handle litellm APIError for the router and raise LLMProviderErrorRetryableTask (#750)

This commit is contained in:
Shuchang Zheng
2024-08-28 10:23:16 -07:00
committed by GitHub
parent 9cc290cc80
commit b5d743d6eb

View File

@@ -5,7 +5,6 @@ from asyncio import CancelledError
from typing import Any
import litellm
import openai
import structlog
from skyvern.forge import app
@@ -107,8 +106,8 @@ class LLMAPIHandlerFactory:
LOG.info("Calling LLM API", llm_key=llm_key, model=llm_config.model_name)
response = await router.acompletion(model=main_model_group, messages=messages, **parameters)
LOG.info("LLM API call successful", llm_key=llm_key, model=llm_config.model_name)
except openai.OpenAIError as e:
raise LLMProviderError(llm_key) from e
except litellm.exceptions.APIError as e:
raise LLMProviderErrorRetryableTask(llm_key) from e
except Exception as e:
LOG.exception(
"LLM request failed unexpectedly",