diff --git a/skyvern/forge/agent.py b/skyvern/forge/agent.py index 6d919297..517d6826 100644 --- a/skyvern/forge/agent.py +++ b/skyvern/forge/agent.py @@ -1030,7 +1030,7 @@ class ForgeAgent: results, ) # wait random time between actions to avoid detection - await asyncio.sleep(random.uniform(1.0, 2.0)) + await asyncio.sleep(random.uniform(0.5, 1.0)) await self.record_artifacts_after_action(task, step, browser_state, engine) for result in results: result.step_retry_number = step.retry_index diff --git a/skyvern/webeye/browser_factory.py b/skyvern/webeye/browser_factory.py index 5faf84a7..bdba7297 100644 --- a/skyvern/webeye/browser_factory.py +++ b/skyvern/webeye/browser_factory.py @@ -593,7 +593,7 @@ class BrowserState: async def navigate_to_url(self, page: Page, url: str, retry_times: int = NAVIGATION_MAX_RETRY_TIME) -> None: try: for retry_time in range(retry_times): - LOG.info(f"Trying to navigate to {url} and waiting for 5 seconds.", url=url, retry_time=retry_time) + LOG.info(f"Trying to navigate to {url} and waiting for 1 second.", url=url, retry_time=retry_time) try: start_time = time.time() await page.goto(url, timeout=settings.BROWSER_LOADING_TIMEOUT_MS) @@ -617,8 +617,8 @@ class BrowserState: url=url, retry_time=retry_time, ) - # Wait for 5 seconds before retrying - await asyncio.sleep(5) + # Wait for 1 seconds before retrying + await asyncio.sleep(1) except Exception as e: LOG.exception( diff --git a/skyvern/webeye/scraper/scraper.py b/skyvern/webeye/scraper/scraper.py index a51d97e1..524b1246 100644 --- a/skyvern/webeye/scraper/scraper.py +++ b/skyvern/webeye/scraper/scraper.py @@ -506,8 +506,8 @@ async def scrape_web_unsafe( # This also solves the issue where we can't scroll due to a popup.(e.g. geico first popup on the homepage after # clicking start my quote) - LOG.info("Waiting for 5 seconds before scraping the website.") - await asyncio.sleep(5) + LOG.info("Waiting for 3 seconds before scraping the website.") + await asyncio.sleep(3) elements, element_tree = await get_interactable_element_tree(page, scrape_exclude) element_tree = await cleanup_element_tree(page, url, copy.deepcopy(element_tree))