Move LLMAPIHandler to its own file (#4216)

This commit is contained in:
Stanislav Novosad
2025-12-05 16:36:59 -07:00
committed by GitHub
parent 1e97c58d4f
commit 0f00a86909
6 changed files with 50 additions and 49 deletions

View File

@@ -12,8 +12,8 @@ from skyvern.forge.agent_functions import AgentFunction
from skyvern.forge.forge_openai_client import ForgeAsyncHttpxClientWrapper from skyvern.forge.forge_openai_client import ForgeAsyncHttpxClientWrapper
from skyvern.forge.sdk.api.azure import AzureClientFactory from skyvern.forge.sdk.api.azure import AzureClientFactory
from skyvern.forge.sdk.api.custom_credential_client import CustomCredentialAPIClient from skyvern.forge.sdk.api.custom_credential_client import CustomCredentialAPIClient
from skyvern.forge.sdk.api.llm.api_handler import LLMAPIHandler
from skyvern.forge.sdk.api.llm.api_handler_factory import LLMAPIHandlerFactory from skyvern.forge.sdk.api.llm.api_handler_factory import LLMAPIHandlerFactory
from skyvern.forge.sdk.api.llm.models import LLMAPIHandler
from skyvern.forge.sdk.api.real_azure import RealAzureClientFactory from skyvern.forge.sdk.api.real_azure import RealAzureClientFactory
from skyvern.forge.sdk.artifact.manager import ArtifactManager from skyvern.forge.sdk.artifact.manager import ArtifactManager
from skyvern.forge.sdk.artifact.storage.base import BaseStorage from skyvern.forge.sdk.artifact.storage.base import BaseStorage

View File

@@ -0,0 +1,45 @@
from typing import Any, Awaitable, Protocol
from skyvern.forge.sdk.models import Step
from skyvern.forge.sdk.schemas.ai_suggestions import AISuggestion
from skyvern.forge.sdk.schemas.task_v2 import TaskV2, Thought
from skyvern.utils.image_resizer import Resolution
class LLMAPIHandler(Protocol):
def __call__(
self,
prompt: str,
prompt_name: str,
step: Step | None = None,
task_v2: TaskV2 | None = None,
thought: Thought | None = None,
ai_suggestion: AISuggestion | None = None,
screenshots: list[bytes] | None = None,
parameters: dict[str, Any] | None = None,
organization_id: str | None = None,
tools: list | None = None,
use_message_history: bool = False,
raw_response: bool = False,
window_dimension: Resolution | None = None,
force_dict: bool = True,
) -> Awaitable[dict[str, Any] | Any]: ...
async def dummy_llm_api_handler(
prompt: str,
prompt_name: str,
step: Step | None = None,
task_v2: TaskV2 | None = None,
thought: Thought | None = None,
ai_suggestion: AISuggestion | None = None,
screenshots: list[bytes] | None = None,
parameters: dict[str, Any] | None = None,
organization_id: str | None = None,
tools: list | None = None,
use_message_history: bool = False,
raw_response: bool = False,
window_dimension: Resolution | None = None,
force_dict: bool = True,
) -> dict[str, Any] | Any:
raise NotImplementedError("Your LLM provider is not configured. Please configure it in the .env file.")

View File

@@ -20,6 +20,7 @@ from skyvern.config import settings
from skyvern.exceptions import SkyvernContextWindowExceededError from skyvern.exceptions import SkyvernContextWindowExceededError
from skyvern.forge import app from skyvern.forge import app
from skyvern.forge.forge_openai_client import ForgeAsyncHttpxClientWrapper from skyvern.forge.forge_openai_client import ForgeAsyncHttpxClientWrapper
from skyvern.forge.sdk.api.llm.api_handler import LLMAPIHandler, dummy_llm_api_handler
from skyvern.forge.sdk.api.llm.config_registry import LLMConfigRegistry from skyvern.forge.sdk.api.llm.config_registry import LLMConfigRegistry
from skyvern.forge.sdk.api.llm.exceptions import ( from skyvern.forge.sdk.api.llm.exceptions import (
DuplicateCustomLLMProviderError, DuplicateCustomLLMProviderError,
@@ -29,10 +30,8 @@ from skyvern.forge.sdk.api.llm.exceptions import (
) )
from skyvern.forge.sdk.api.llm.models import ( from skyvern.forge.sdk.api.llm.models import (
LLMAllowedFailsPolicy, LLMAllowedFailsPolicy,
LLMAPIHandler,
LLMConfig, LLMConfig,
LLMRouterConfig, LLMRouterConfig,
dummy_llm_api_handler,
) )
from skyvern.forge.sdk.api.llm.ui_tars_response import UITarsResponse from skyvern.forge.sdk.api.llm.ui_tars_response import UITarsResponse
from skyvern.forge.sdk.api.llm.utils import llm_messages_builder, llm_messages_builder_with_history, parse_api_response from skyvern.forge.sdk.api.llm.utils import llm_messages_builder, llm_messages_builder_with_history, parse_api_response

View File

@@ -1,11 +1,7 @@
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import Any, Awaitable, Literal, Optional, Protocol, TypedDict from typing import Any, Literal, Optional, TypedDict
from skyvern.forge.sdk.models import Step
from skyvern.forge.sdk.schemas.ai_suggestions import AISuggestion
from skyvern.forge.sdk.schemas.task_v2 import TaskV2, Thought
from skyvern.forge.sdk.settings_manager import SettingsManager from skyvern.forge.sdk.settings_manager import SettingsManager
from skyvern.utils.image_resizer import Resolution
class LiteLLMParams(TypedDict, total=False): class LiteLLMParams(TypedDict, total=False):
@@ -93,42 +89,3 @@ class LLMRouterConfig(LLMConfigBase):
max_completion_tokens: int | None = None max_completion_tokens: int | None = None
reasoning_effort: str | None = None reasoning_effort: str | None = None
temperature: float | None = SettingsManager.get_settings().LLM_CONFIG_TEMPERATURE temperature: float | None = SettingsManager.get_settings().LLM_CONFIG_TEMPERATURE
class LLMAPIHandler(Protocol):
def __call__(
self,
prompt: str,
prompt_name: str,
step: Step | None = None,
task_v2: TaskV2 | None = None,
thought: Thought | None = None,
ai_suggestion: AISuggestion | None = None,
screenshots: list[bytes] | None = None,
parameters: dict[str, Any] | None = None,
organization_id: str | None = None,
tools: list | None = None,
use_message_history: bool = False,
raw_response: bool = False,
window_dimension: Resolution | None = None,
force_dict: bool = True,
) -> Awaitable[dict[str, Any] | Any]: ...
async def dummy_llm_api_handler(
prompt: str,
prompt_name: str,
step: Step | None = None,
task_v2: TaskV2 | None = None,
thought: Thought | None = None,
ai_suggestion: AISuggestion | None = None,
screenshots: list[bytes] | None = None,
parameters: dict[str, Any] | None = None,
organization_id: str | None = None,
tools: list | None = None,
use_message_history: bool = False,
raw_response: bool = False,
window_dimension: Resolution | None = None,
force_dict: bool = True,
) -> dict[str, Any] | Any:
raise NotImplementedError("Your LLM provider is not configured. Please configure it in the .env file.")

View File

@@ -5,8 +5,8 @@ import json
import structlog import structlog
from skyvern.forge import app from skyvern.forge import app
from skyvern.forge.sdk.api.llm.api_handler import LLMAPIHandler
from skyvern.forge.sdk.api.llm.api_handler_factory import LLMAPIHandlerFactory from skyvern.forge.sdk.api.llm.api_handler_factory import LLMAPIHandlerFactory
from skyvern.forge.sdk.api.llm.models import LLMAPIHandler
LOG = structlog.get_logger() LOG = structlog.get_logger()

View File

@@ -56,8 +56,8 @@ from skyvern.forge.sdk.api.files import (
download_from_s3, download_from_s3,
get_path_for_workflow_download_directory, get_path_for_workflow_download_directory,
) )
from skyvern.forge.sdk.api.llm.api_handler import LLMAPIHandler
from skyvern.forge.sdk.api.llm.api_handler_factory import LLMAPIHandlerFactory from skyvern.forge.sdk.api.llm.api_handler_factory import LLMAPIHandlerFactory
from skyvern.forge.sdk.api.llm.models import LLMAPIHandler
from skyvern.forge.sdk.artifact.models import ArtifactType from skyvern.forge.sdk.artifact.models import ArtifactType
from skyvern.forge.sdk.core import skyvern_context from skyvern.forge.sdk.core import skyvern_context
from skyvern.forge.sdk.core.aiohttp_helper import aiohttp_request from skyvern.forge.sdk.core.aiohttp_helper import aiohttp_request