Add AI suggestion endpoints (#1519)
This commit is contained in:
@@ -22,6 +22,7 @@ from skyvern.forge.sdk.api.llm.utils import llm_messages_builder, parse_api_resp
|
||||
from skyvern.forge.sdk.artifact.models import ArtifactType
|
||||
from skyvern.forge.sdk.core import skyvern_context
|
||||
from skyvern.forge.sdk.models import Step
|
||||
from skyvern.forge.sdk.schemas.ai_suggestions import AISuggestion
|
||||
from skyvern.forge.sdk.schemas.observers import ObserverCruise, ObserverThought
|
||||
|
||||
LOG = structlog.get_logger()
|
||||
@@ -63,6 +64,7 @@ class LLMAPIHandlerFactory:
|
||||
step: Step | None = None,
|
||||
observer_cruise: ObserverCruise | None = None,
|
||||
observer_thought: ObserverThought | None = None,
|
||||
ai_suggestion: AISuggestion | None = None,
|
||||
screenshots: list[bytes] | None = None,
|
||||
parameters: dict[str, Any] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
@@ -89,6 +91,7 @@ class LLMAPIHandlerFactory:
|
||||
step=step,
|
||||
observer_cruise=observer_cruise,
|
||||
observer_thought=observer_thought,
|
||||
ai_suggestion=ai_suggestion,
|
||||
)
|
||||
|
||||
await app.ARTIFACT_MANAGER.create_llm_artifact(
|
||||
@@ -113,6 +116,7 @@ class LLMAPIHandlerFactory:
|
||||
step=step,
|
||||
observer_cruise=observer_cruise,
|
||||
observer_thought=observer_thought,
|
||||
ai_suggestion=ai_suggestion,
|
||||
)
|
||||
try:
|
||||
response = await router.acompletion(model=main_model_group, messages=messages, **parameters)
|
||||
@@ -140,6 +144,7 @@ class LLMAPIHandlerFactory:
|
||||
step=step,
|
||||
observer_cruise=observer_cruise,
|
||||
observer_thought=observer_thought,
|
||||
ai_suggestion=ai_suggestion,
|
||||
)
|
||||
if step:
|
||||
llm_cost = litellm.completion_cost(completion_response=response)
|
||||
@@ -160,6 +165,7 @@ class LLMAPIHandlerFactory:
|
||||
step=step,
|
||||
observer_cruise=observer_cruise,
|
||||
observer_thought=observer_thought,
|
||||
ai_suggestion=ai_suggestion,
|
||||
)
|
||||
|
||||
if context and len(context.hashed_href_map) > 0:
|
||||
@@ -172,6 +178,7 @@ class LLMAPIHandlerFactory:
|
||||
step=step,
|
||||
observer_cruise=observer_cruise,
|
||||
observer_thought=observer_thought,
|
||||
ai_suggestion=ai_suggestion,
|
||||
)
|
||||
|
||||
return parsed_response
|
||||
@@ -192,6 +199,7 @@ class LLMAPIHandlerFactory:
|
||||
step: Step | None = None,
|
||||
observer_cruise: ObserverCruise | None = None,
|
||||
observer_thought: ObserverThought | None = None,
|
||||
ai_suggestion: AISuggestion | None = None,
|
||||
screenshots: list[bytes] | None = None,
|
||||
parameters: dict[str, Any] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
@@ -211,6 +219,7 @@ class LLMAPIHandlerFactory:
|
||||
step=step,
|
||||
observer_cruise=observer_cruise,
|
||||
observer_thought=observer_thought,
|
||||
ai_suggestion=ai_suggestion,
|
||||
)
|
||||
|
||||
await app.ARTIFACT_MANAGER.create_llm_artifact(
|
||||
@@ -220,6 +229,7 @@ class LLMAPIHandlerFactory:
|
||||
step=step,
|
||||
observer_cruise=observer_cruise,
|
||||
observer_thought=observer_thought,
|
||||
ai_suggestion=ai_suggestion,
|
||||
)
|
||||
|
||||
if not llm_config.supports_vision:
|
||||
@@ -239,6 +249,7 @@ class LLMAPIHandlerFactory:
|
||||
step=step,
|
||||
observer_cruise=observer_cruise,
|
||||
observer_thought=observer_thought,
|
||||
ai_suggestion=ai_suggestion,
|
||||
)
|
||||
t_llm_request = time.perf_counter()
|
||||
try:
|
||||
@@ -274,6 +285,7 @@ class LLMAPIHandlerFactory:
|
||||
step=step,
|
||||
observer_cruise=observer_cruise,
|
||||
observer_thought=observer_thought,
|
||||
ai_suggestion=ai_suggestion,
|
||||
)
|
||||
|
||||
if step:
|
||||
@@ -295,6 +307,7 @@ class LLMAPIHandlerFactory:
|
||||
step=step,
|
||||
observer_cruise=observer_cruise,
|
||||
observer_thought=observer_thought,
|
||||
ai_suggestion=ai_suggestion,
|
||||
)
|
||||
|
||||
if context and len(context.hashed_href_map) > 0:
|
||||
@@ -307,6 +320,7 @@ class LLMAPIHandlerFactory:
|
||||
step=step,
|
||||
observer_cruise=observer_cruise,
|
||||
observer_thought=observer_thought,
|
||||
ai_suggestion=ai_suggestion,
|
||||
)
|
||||
|
||||
return parsed_response
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import Any, Awaitable, Literal, Optional, Protocol, TypedDict
|
||||
from litellm import AllowedFailsPolicy
|
||||
|
||||
from skyvern.forge.sdk.models import Step
|
||||
from skyvern.forge.sdk.schemas.ai_suggestions import AISuggestion
|
||||
from skyvern.forge.sdk.schemas.observers import ObserverCruise, ObserverThought
|
||||
from skyvern.forge.sdk.settings_manager import SettingsManager
|
||||
|
||||
@@ -81,6 +82,7 @@ class LLMAPIHandler(Protocol):
|
||||
step: Step | None = None,
|
||||
observer_cruise: ObserverCruise | None = None,
|
||||
observer_thought: ObserverThought | None = None,
|
||||
ai_suggestion: AISuggestion | None = None,
|
||||
screenshots: list[bytes] | None = None,
|
||||
parameters: dict[str, Any] | None = None,
|
||||
) -> Awaitable[dict[str, Any]]: ...
|
||||
|
||||
Reference in New Issue
Block a user