Move LLMAPIHandler to its own file (#4216)

This commit is contained in:
Stanislav Novosad
2025-12-05 16:36:59 -07:00
committed by GitHub
parent 1e97c58d4f
commit 0f00a86909
6 changed files with 50 additions and 49 deletions

View File

@@ -0,0 +1,45 @@
from typing import Any, Awaitable, Protocol
from skyvern.forge.sdk.models import Step
from skyvern.forge.sdk.schemas.ai_suggestions import AISuggestion
from skyvern.forge.sdk.schemas.task_v2 import TaskV2, Thought
from skyvern.utils.image_resizer import Resolution
class LLMAPIHandler(Protocol):
def __call__(
self,
prompt: str,
prompt_name: str,
step: Step | None = None,
task_v2: TaskV2 | None = None,
thought: Thought | None = None,
ai_suggestion: AISuggestion | None = None,
screenshots: list[bytes] | None = None,
parameters: dict[str, Any] | None = None,
organization_id: str | None = None,
tools: list | None = None,
use_message_history: bool = False,
raw_response: bool = False,
window_dimension: Resolution | None = None,
force_dict: bool = True,
) -> Awaitable[dict[str, Any] | Any]: ...
async def dummy_llm_api_handler(
prompt: str,
prompt_name: str,
step: Step | None = None,
task_v2: TaskV2 | None = None,
thought: Thought | None = None,
ai_suggestion: AISuggestion | None = None,
screenshots: list[bytes] | None = None,
parameters: dict[str, Any] | None = None,
organization_id: str | None = None,
tools: list | None = None,
use_message_history: bool = False,
raw_response: bool = False,
window_dimension: Resolution | None = None,
force_dict: bool = True,
) -> dict[str, Any] | Any:
raise NotImplementedError("Your LLM provider is not configured. Please configure it in the .env file.")

View File

@@ -20,6 +20,7 @@ from skyvern.config import settings
from skyvern.exceptions import SkyvernContextWindowExceededError
from skyvern.forge import app
from skyvern.forge.forge_openai_client import ForgeAsyncHttpxClientWrapper
from skyvern.forge.sdk.api.llm.api_handler import LLMAPIHandler, dummy_llm_api_handler
from skyvern.forge.sdk.api.llm.config_registry import LLMConfigRegistry
from skyvern.forge.sdk.api.llm.exceptions import (
DuplicateCustomLLMProviderError,
@@ -29,10 +30,8 @@ from skyvern.forge.sdk.api.llm.exceptions import (
)
from skyvern.forge.sdk.api.llm.models import (
LLMAllowedFailsPolicy,
LLMAPIHandler,
LLMConfig,
LLMRouterConfig,
dummy_llm_api_handler,
)
from skyvern.forge.sdk.api.llm.ui_tars_response import UITarsResponse
from skyvern.forge.sdk.api.llm.utils import llm_messages_builder, llm_messages_builder_with_history, parse_api_response

View File

@@ -1,11 +1,7 @@
from dataclasses import dataclass, field
from typing import Any, Awaitable, Literal, Optional, Protocol, TypedDict
from typing import Any, Literal, Optional, TypedDict
from skyvern.forge.sdk.models import Step
from skyvern.forge.sdk.schemas.ai_suggestions import AISuggestion
from skyvern.forge.sdk.schemas.task_v2 import TaskV2, Thought
from skyvern.forge.sdk.settings_manager import SettingsManager
from skyvern.utils.image_resizer import Resolution
class LiteLLMParams(TypedDict, total=False):
@@ -93,42 +89,3 @@ class LLMRouterConfig(LLMConfigBase):
max_completion_tokens: int | None = None
reasoning_effort: str | None = None
temperature: float | None = SettingsManager.get_settings().LLM_CONFIG_TEMPERATURE
class LLMAPIHandler(Protocol):
def __call__(
self,
prompt: str,
prompt_name: str,
step: Step | None = None,
task_v2: TaskV2 | None = None,
thought: Thought | None = None,
ai_suggestion: AISuggestion | None = None,
screenshots: list[bytes] | None = None,
parameters: dict[str, Any] | None = None,
organization_id: str | None = None,
tools: list | None = None,
use_message_history: bool = False,
raw_response: bool = False,
window_dimension: Resolution | None = None,
force_dict: bool = True,
) -> Awaitable[dict[str, Any] | Any]: ...
async def dummy_llm_api_handler(
prompt: str,
prompt_name: str,
step: Step | None = None,
task_v2: TaskV2 | None = None,
thought: Thought | None = None,
ai_suggestion: AISuggestion | None = None,
screenshots: list[bytes] | None = None,
parameters: dict[str, Any] | None = None,
organization_id: str | None = None,
tools: list | None = None,
use_message_history: bool = False,
raw_response: bool = False,
window_dimension: Resolution | None = None,
force_dict: bool = True,
) -> dict[str, Any] | Any:
raise NotImplementedError("Your LLM provider is not configured. Please configure it in the .env file.")