diff --git a/skyvern/forge/forge_app.py b/skyvern/forge/forge_app.py index 4b91c72a..69ad945e 100644 --- a/skyvern/forge/forge_app.py +++ b/skyvern/forge/forge_app.py @@ -12,8 +12,8 @@ from skyvern.forge.agent_functions import AgentFunction from skyvern.forge.forge_openai_client import ForgeAsyncHttpxClientWrapper from skyvern.forge.sdk.api.azure import AzureClientFactory from skyvern.forge.sdk.api.custom_credential_client import CustomCredentialAPIClient +from skyvern.forge.sdk.api.llm.api_handler import LLMAPIHandler from skyvern.forge.sdk.api.llm.api_handler_factory import LLMAPIHandlerFactory -from skyvern.forge.sdk.api.llm.models import LLMAPIHandler from skyvern.forge.sdk.api.real_azure import RealAzureClientFactory from skyvern.forge.sdk.artifact.manager import ArtifactManager from skyvern.forge.sdk.artifact.storage.base import BaseStorage diff --git a/skyvern/forge/sdk/api/llm/api_handler.py b/skyvern/forge/sdk/api/llm/api_handler.py new file mode 100644 index 00000000..1d68afb5 --- /dev/null +++ b/skyvern/forge/sdk/api/llm/api_handler.py @@ -0,0 +1,45 @@ +from typing import Any, Awaitable, Protocol + +from skyvern.forge.sdk.models import Step +from skyvern.forge.sdk.schemas.ai_suggestions import AISuggestion +from skyvern.forge.sdk.schemas.task_v2 import TaskV2, Thought +from skyvern.utils.image_resizer import Resolution + + +class LLMAPIHandler(Protocol): + def __call__( + self, + prompt: str, + prompt_name: str, + step: Step | None = None, + task_v2: TaskV2 | None = None, + thought: Thought | None = None, + ai_suggestion: AISuggestion | None = None, + screenshots: list[bytes] | None = None, + parameters: dict[str, Any] | None = None, + organization_id: str | None = None, + tools: list | None = None, + use_message_history: bool = False, + raw_response: bool = False, + window_dimension: Resolution | None = None, + force_dict: bool = True, + ) -> Awaitable[dict[str, Any] | Any]: ... + + +async def dummy_llm_api_handler( + prompt: str, + prompt_name: str, + step: Step | None = None, + task_v2: TaskV2 | None = None, + thought: Thought | None = None, + ai_suggestion: AISuggestion | None = None, + screenshots: list[bytes] | None = None, + parameters: dict[str, Any] | None = None, + organization_id: str | None = None, + tools: list | None = None, + use_message_history: bool = False, + raw_response: bool = False, + window_dimension: Resolution | None = None, + force_dict: bool = True, +) -> dict[str, Any] | Any: + raise NotImplementedError("Your LLM provider is not configured. Please configure it in the .env file.") diff --git a/skyvern/forge/sdk/api/llm/api_handler_factory.py b/skyvern/forge/sdk/api/llm/api_handler_factory.py index 635d63c5..db3ece1e 100644 --- a/skyvern/forge/sdk/api/llm/api_handler_factory.py +++ b/skyvern/forge/sdk/api/llm/api_handler_factory.py @@ -20,6 +20,7 @@ from skyvern.config import settings from skyvern.exceptions import SkyvernContextWindowExceededError from skyvern.forge import app from skyvern.forge.forge_openai_client import ForgeAsyncHttpxClientWrapper +from skyvern.forge.sdk.api.llm.api_handler import LLMAPIHandler, dummy_llm_api_handler from skyvern.forge.sdk.api.llm.config_registry import LLMConfigRegistry from skyvern.forge.sdk.api.llm.exceptions import ( DuplicateCustomLLMProviderError, @@ -29,10 +30,8 @@ from skyvern.forge.sdk.api.llm.exceptions import ( ) from skyvern.forge.sdk.api.llm.models import ( LLMAllowedFailsPolicy, - LLMAPIHandler, LLMConfig, LLMRouterConfig, - dummy_llm_api_handler, ) from skyvern.forge.sdk.api.llm.ui_tars_response import UITarsResponse from skyvern.forge.sdk.api.llm.utils import llm_messages_builder, llm_messages_builder_with_history, parse_api_response diff --git a/skyvern/forge/sdk/api/llm/models.py b/skyvern/forge/sdk/api/llm/models.py index b60273be..faa399d8 100644 --- a/skyvern/forge/sdk/api/llm/models.py +++ b/skyvern/forge/sdk/api/llm/models.py @@ -1,11 +1,7 @@ from dataclasses import dataclass, field -from typing import Any, Awaitable, Literal, Optional, Protocol, TypedDict +from typing import Any, Literal, Optional, TypedDict -from skyvern.forge.sdk.models import Step -from skyvern.forge.sdk.schemas.ai_suggestions import AISuggestion -from skyvern.forge.sdk.schemas.task_v2 import TaskV2, Thought from skyvern.forge.sdk.settings_manager import SettingsManager -from skyvern.utils.image_resizer import Resolution class LiteLLMParams(TypedDict, total=False): @@ -93,42 +89,3 @@ class LLMRouterConfig(LLMConfigBase): max_completion_tokens: int | None = None reasoning_effort: str | None = None temperature: float | None = SettingsManager.get_settings().LLM_CONFIG_TEMPERATURE - - -class LLMAPIHandler(Protocol): - def __call__( - self, - prompt: str, - prompt_name: str, - step: Step | None = None, - task_v2: TaskV2 | None = None, - thought: Thought | None = None, - ai_suggestion: AISuggestion | None = None, - screenshots: list[bytes] | None = None, - parameters: dict[str, Any] | None = None, - organization_id: str | None = None, - tools: list | None = None, - use_message_history: bool = False, - raw_response: bool = False, - window_dimension: Resolution | None = None, - force_dict: bool = True, - ) -> Awaitable[dict[str, Any] | Any]: ... - - -async def dummy_llm_api_handler( - prompt: str, - prompt_name: str, - step: Step | None = None, - task_v2: TaskV2 | None = None, - thought: Thought | None = None, - ai_suggestion: AISuggestion | None = None, - screenshots: list[bytes] | None = None, - parameters: dict[str, Any] | None = None, - organization_id: str | None = None, - tools: list | None = None, - use_message_history: bool = False, - raw_response: bool = False, - window_dimension: Resolution | None = None, - force_dict: bool = True, -) -> dict[str, Any] | Any: - raise NotImplementedError("Your LLM provider is not configured. Please configure it in the .env file.") diff --git a/skyvern/forge/sdk/experimentation/llm_prompt_config.py b/skyvern/forge/sdk/experimentation/llm_prompt_config.py index 5ba7c6de..39b89837 100644 --- a/skyvern/forge/sdk/experimentation/llm_prompt_config.py +++ b/skyvern/forge/sdk/experimentation/llm_prompt_config.py @@ -5,8 +5,8 @@ import json import structlog from skyvern.forge import app +from skyvern.forge.sdk.api.llm.api_handler import LLMAPIHandler from skyvern.forge.sdk.api.llm.api_handler_factory import LLMAPIHandlerFactory -from skyvern.forge.sdk.api.llm.models import LLMAPIHandler LOG = structlog.get_logger() diff --git a/skyvern/forge/sdk/workflow/models/block.py b/skyvern/forge/sdk/workflow/models/block.py index da1786f7..2df60151 100644 --- a/skyvern/forge/sdk/workflow/models/block.py +++ b/skyvern/forge/sdk/workflow/models/block.py @@ -56,8 +56,8 @@ from skyvern.forge.sdk.api.files import ( download_from_s3, get_path_for_workflow_download_directory, ) +from skyvern.forge.sdk.api.llm.api_handler import LLMAPIHandler from skyvern.forge.sdk.api.llm.api_handler_factory import LLMAPIHandlerFactory -from skyvern.forge.sdk.api.llm.models import LLMAPIHandler from skyvern.forge.sdk.artifact.models import ArtifactType from skyvern.forge.sdk.core import skyvern_context from skyvern.forge.sdk.core.aiohttp_helper import aiohttp_request