fix llm_key - render every llm_key for cua in the execute_step (#2556)

This commit is contained in:
Shuchang Zheng
2025-05-31 16:26:02 -07:00
committed by GitHub
parent 48f5f0913e
commit deb38af17d
3 changed files with 6 additions and 20 deletions

View File

@@ -46,7 +46,7 @@ from skyvern.forge.sdk.api.files import (
download_from_s3,
get_path_for_workflow_download_directory,
)
from skyvern.forge.sdk.api.llm.api_handler_factory import LLMAPIHandlerFactory, LLMCaller
from skyvern.forge.sdk.api.llm.api_handler_factory import LLMAPIHandlerFactory
from skyvern.forge.sdk.artifact.models import ArtifactType
from skyvern.forge.sdk.core import skyvern_context
from skyvern.forge.sdk.db.enums import TaskType
@@ -628,16 +628,6 @@ class BaseTaskBlock(Block):
try:
current_context = skyvern_context.ensure_context()
current_context.task_id = task.task_id
llm_key = workflow.determine_llm_key(block=self)
screenshot_scaling_enabled = False
if self.engine == RunEngine.anthropic_cua:
screenshot_scaling_enabled = True
llm_caller = (
None
if not llm_key
else LLMCaller(llm_key=llm_key, screenshot_scaling_enabled=screenshot_scaling_enabled)
)
await app.agent.execute_step(
organization=organization,
task=task,
@@ -647,7 +637,6 @@ class BaseTaskBlock(Block):
close_browser_on_completion=browser_session_id is None,
complete_verification=self.complete_verification,
engine=self.engine,
llm_caller=llm_caller,
)
except Exception as e:
# Make sure the task is marked as failed in the database before raising the exception