disable async operations for CUA agent (#2194)

This commit is contained in:
Shuchang Zheng
2025-04-20 04:54:50 +08:00
committed by GitHub
parent c5bf2f899d
commit c19f937991

View File

@@ -834,7 +834,8 @@ class ForgeAgent:
): ):
using_cached_action_plan = True using_cached_action_plan = True
else: else:
self.async_operation_pool.run_operation(task.task_id, AgentPhase.llm) if engine != RunEngine.openai_cua:
self.async_operation_pool.run_operation(task.task_id, AgentPhase.llm)
json_response = await app.LLM_API_HANDLER( json_response = await app.LLM_API_HANDLER(
prompt=extract_action_prompt, prompt=extract_action_prompt,
prompt_name="extract-actions", prompt_name="extract-actions",
@@ -996,7 +997,8 @@ class ForgeAgent:
element_id_to_last_action[action.element_id] = action_idx element_id_to_last_action[action.element_id] = action_idx
self.async_operation_pool.run_operation(task.task_id, AgentPhase.action) if engine != RunEngine.openai_cua:
self.async_operation_pool.run_operation(task.task_id, AgentPhase.action)
current_page = await browser_state.must_get_working_page() current_page = await browser_state.must_get_working_page()
if isinstance(action, CompleteAction) and not complete_verification: if isinstance(action, CompleteAction) and not complete_verification:
# Do not verify the complete action when complete_verification is False # Do not verify the complete action when complete_verification is False
@@ -1588,7 +1590,8 @@ class ForgeAgent:
engine: RunEngine, engine: RunEngine,
) -> tuple[ScrapedPage, str]: ) -> tuple[ScrapedPage, str]:
# start the async tasks while running scrape_website # start the async tasks while running scrape_website
self.async_operation_pool.run_operation(task.task_id, AgentPhase.scrape) if engine != RunEngine.openai_cua:
self.async_operation_pool.run_operation(task.task_id, AgentPhase.scrape)
# Scrape the web page and get the screenshot and the elements # Scrape the web page and get the screenshot and the elements
# HACK: try scrape_website three time to handle screenshot timeout # HACK: try scrape_website three time to handle screenshot timeout