Move the code over from private repository (#3)
This commit is contained in:
0
streamlit_app/visualizer/__init__.py
Normal file
0
streamlit_app/visualizer/__init__.py
Normal file
74
streamlit_app/visualizer/api.py
Normal file
74
streamlit_app/visualizer/api.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import json
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
|
||||
from skyvern.forge.sdk.schemas.tasks import TaskRequest
|
||||
|
||||
|
||||
class SkyvernClient:
|
||||
def __init__(self, base_url: str, credentials: str):
|
||||
self.base_url = base_url
|
||||
self.credentials = credentials
|
||||
|
||||
def create_task(self, task_request_body: TaskRequest) -> str | None:
|
||||
url = f"{self.base_url}/tasks"
|
||||
payload = task_request_body.model_dump()
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"x-api-key": self.credentials,
|
||||
}
|
||||
|
||||
response = requests.post(url, headers=headers, data=json.dumps(payload))
|
||||
if "task_id" not in response.json():
|
||||
return None
|
||||
return response.json()["task_id"]
|
||||
|
||||
def get_task(self, task_id: str) -> dict[str, Any] | None:
|
||||
"""Get a task by id."""
|
||||
url = f"{self.base_url}/internal/tasks/{task_id}"
|
||||
headers = {"x-api-key": self.credentials}
|
||||
response = requests.get(url, headers=headers)
|
||||
if response.status_code != 200:
|
||||
return None
|
||||
return response.json()
|
||||
|
||||
def get_agent_tasks(self, page: int = 1, page_size: int = 15) -> dict[str, Any]:
|
||||
"""Get all tasks with pagination."""
|
||||
url = f"{self.base_url}/internal/tasks"
|
||||
params = {"page": page, "page_size": page_size}
|
||||
headers = {"x-api-key": self.credentials}
|
||||
response = requests.get(url, params=params, headers=headers)
|
||||
return response.json()
|
||||
|
||||
def get_agent_task_steps(self, task_id: str, page: int = 1, page_size: int = 15) -> list[dict[str, Any]]:
|
||||
"""Get all steps for a task with pagination."""
|
||||
url = f"{self.base_url}/tasks/{task_id}/steps"
|
||||
params = {"page": page, "page_size": page_size}
|
||||
headers = {"x-api-key": self.credentials}
|
||||
response = requests.get(url, params=params, headers=headers)
|
||||
steps = response.json()
|
||||
for step in steps:
|
||||
step["output"]["actions_and_results"] = json.dumps(step["output"]["actions_and_results"])
|
||||
return steps
|
||||
|
||||
def get_agent_task_video_artifact(self, task_id: str) -> dict[str, Any] | None:
|
||||
"""Get the video artifact from the first step artifact of the task."""
|
||||
steps = self.get_agent_task_steps(task_id)
|
||||
if not steps:
|
||||
return None
|
||||
|
||||
first_step_id = steps[0]["step_id"]
|
||||
artifacts = self.get_agent_artifacts(task_id, first_step_id)
|
||||
for artifact in artifacts:
|
||||
if artifact["artifact_type"] == "recording":
|
||||
return artifact
|
||||
|
||||
return None
|
||||
|
||||
def get_agent_artifacts(self, task_id: str, step_id: str) -> list[dict[str, Any]]:
|
||||
"""Get all artifacts for a list of steps."""
|
||||
url = f"{self.base_url}/tasks/{task_id}/steps/{step_id}/artifacts"
|
||||
headers = {"x-api-key": self.credentials}
|
||||
response = requests.get(url, headers=headers)
|
||||
return response.json()
|
||||
69
streamlit_app/visualizer/artifact_loader.py
Normal file
69
streamlit_app/visualizer/artifact_loader.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import asyncio
|
||||
import random
|
||||
import string
|
||||
import typing
|
||||
from typing import Any, Callable
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from skyvern.forge.sdk.api.aws import AsyncAWSClient
|
||||
|
||||
async_s3_client = AsyncAWSClient()
|
||||
|
||||
|
||||
def read_artifact(uri: str, is_image: bool = False, is_webm: bool = False) -> Image.Image | str | bytes:
|
||||
"""Load and display an artifact based on its URI."""
|
||||
if uri.startswith("s3://"):
|
||||
downloaded_bytes = asyncio.run(async_s3_client.download_file(uri))
|
||||
if is_image:
|
||||
return downloaded_bytes
|
||||
elif is_webm:
|
||||
return downloaded_bytes
|
||||
else:
|
||||
return downloaded_bytes.decode("utf-8")
|
||||
elif uri.startswith("file://"):
|
||||
# Remove file:// prefix
|
||||
uri = uri[7:]
|
||||
# Means it's a local file
|
||||
if is_image:
|
||||
with open(uri, "rb") as f:
|
||||
image = Image.open(f)
|
||||
image.load()
|
||||
return image
|
||||
elif is_webm:
|
||||
with open(uri, "rb") as f:
|
||||
return f.read()
|
||||
else:
|
||||
with open(uri, "r") as f:
|
||||
return f.read()
|
||||
else:
|
||||
raise ValueError(f"Unsupported URI: {uri}")
|
||||
|
||||
|
||||
def read_artifact_safe(uri: str, is_image: bool = False, is_webm: bool = False) -> Image.Image | str | bytes:
|
||||
"""Load and display an artifact based on its URI."""
|
||||
try:
|
||||
return read_artifact(uri, is_image, is_webm)
|
||||
except Exception as e:
|
||||
return f"Failed to load artifact: {e}"
|
||||
|
||||
|
||||
def streamlit_content_safe(st_obj: Any, f: Callable, content: bytes, message: str, **kwargs: dict[str, Any]) -> None:
|
||||
try:
|
||||
if content:
|
||||
f(content, **kwargs)
|
||||
else:
|
||||
st_obj.write(message)
|
||||
except Exception:
|
||||
st_obj.write(message)
|
||||
|
||||
|
||||
@typing.no_type_check
|
||||
def streamlit_show_recording(st_obj: Any, uri: str) -> None:
|
||||
# ignoring type because is_webm will return bytes
|
||||
content = read_artifact_safe(uri, is_webm=True) # type: ignore
|
||||
if content:
|
||||
random_key = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
|
||||
st_obj.download_button("Download recording", content, f"recording{uri.split('/')[-1]}.webm", key=random_key)
|
||||
|
||||
streamlit_content_safe(st_obj, st_obj.video, content, "No recording available.", format="video/webm", start_time=0)
|
||||
30
streamlit_app/visualizer/repository.py
Normal file
30
streamlit_app/visualizer/repository.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from streamlit_app.visualizer.api import SkyvernClient
|
||||
|
||||
|
||||
class TaskRepository:
|
||||
def __init__(self, client: SkyvernClient):
|
||||
self.client = client
|
||||
|
||||
def get_task(self, task_id: str) -> dict[str, Any] | None:
|
||||
return self.client.get_task(task_id)
|
||||
|
||||
def get_tasks(self, page: int = 1, page_size: int = 15) -> dict[str, Any]:
|
||||
"""Get tasks with pagination."""
|
||||
return self.client.get_agent_tasks(page=page, page_size=page_size)
|
||||
|
||||
def get_task_steps(self, task_id: str) -> list[dict[str, Any]]:
|
||||
"""Get steps for a specific task with pagination."""
|
||||
return self.client.get_agent_task_steps(task_id)
|
||||
|
||||
def get_artifacts(self, task_id: str, step_id: str) -> list[dict[str, Any]]:
|
||||
"""Get artifacts for a specific task and steps."""
|
||||
return self.client.get_agent_artifacts(task_id, step_id)
|
||||
|
||||
def get_task_recording_uri(self, task: dict[str, Any]) -> Optional[str]:
|
||||
"""Get the recording URI for a task."""
|
||||
video_artifact = self.client.get_agent_task_video_artifact(task["task_id"])
|
||||
if video_artifact is None:
|
||||
return None
|
||||
return video_artifact["uri"]
|
||||
185
streamlit_app/visualizer/sample_data.py
Normal file
185
streamlit_app/visualizer/sample_data.py
Normal file
@@ -0,0 +1,185 @@
|
||||
import json
|
||||
|
||||
|
||||
def get_sample_url() -> str:
|
||||
return "https://www.geico.com"
|
||||
|
||||
|
||||
def get_sample_navigation_goal() -> str:
|
||||
return "Navigate through the website until you generate an auto insurance quote. Do not generate a home insurance quote. If this page contains an auto insurance quote, consider the goal achieved"
|
||||
|
||||
|
||||
def get_sample_data_extraction_goal() -> str:
|
||||
return "Extract all quote information in JSON format including the premium amount, the timeframe for the quote."
|
||||
|
||||
|
||||
def get_sample_navigation_payload() -> str:
|
||||
navigation_payload = {
|
||||
"licensed_at_age": 19,
|
||||
"education_level": "HIGH_SCHOOL",
|
||||
"phone_number": "8042221111",
|
||||
"full_name": "Chris P. Bacon",
|
||||
"past_claim": [],
|
||||
"has_claims": False,
|
||||
"spouse_occupation": "Florist",
|
||||
"auto_current_carrier": "None",
|
||||
"home_commercial_uses": None,
|
||||
"spouse_full_name": "Amy Stake",
|
||||
"auto_commercial_uses": None,
|
||||
"requires_sr22": False,
|
||||
"previous_address_move_date": None,
|
||||
"line_of_work": None,
|
||||
"spouse_age": "1987-12-12",
|
||||
"auto_insurance_deadline": None,
|
||||
"email": "chris.p.bacon@abc.com",
|
||||
"net_worth_numeric": 1000000,
|
||||
"spouse_gender": "F",
|
||||
"marital_status": "married",
|
||||
"spouse_licensed_at_age": 20,
|
||||
"license_number": "AAAAAAA090AA",
|
||||
"spouse_license_number": "AAAAAAA080AA",
|
||||
"how_much_can_you_lose": 25000,
|
||||
"vehicles": [
|
||||
{
|
||||
"annual_mileage": 10000,
|
||||
"commute_mileage": 4000,
|
||||
"existing_coverages": None,
|
||||
"ideal_coverages": {
|
||||
"bodily_injury_per_incident_limit": 50000,
|
||||
"bodily_injury_per_person_limit": 25000,
|
||||
"collision_deductible": 1000,
|
||||
"comprehensive_deductible": 1000,
|
||||
"personal_injury_protection": None,
|
||||
"property_damage_per_incident_limit": None,
|
||||
"property_damage_per_person_limit": 25000,
|
||||
"rental_reimbursement_per_incident_limit": None,
|
||||
"rental_reimbursement_per_person_limit": None,
|
||||
"roadside_assistance_limit": None,
|
||||
"underinsured_motorist_bodily_injury_per_incident_limit": 50000,
|
||||
"underinsured_motorist_bodily_injury_per_person_limit": 25000,
|
||||
"underinsured_motorist_property_limit": None,
|
||||
},
|
||||
"ownership": "Owned",
|
||||
"parked": "Garage",
|
||||
"purpose": "commute",
|
||||
"vehicle": {
|
||||
"style": "AWD 3.0 quattro TDI 4dr Sedan",
|
||||
"model": "A8 L",
|
||||
"price_estimate": 29084,
|
||||
"year": 2015,
|
||||
"make": "Audi",
|
||||
},
|
||||
"vehicle_id": None,
|
||||
"vin": None,
|
||||
}
|
||||
],
|
||||
"additional_drivers": [],
|
||||
"home": [
|
||||
{
|
||||
"home_ownership": "owned",
|
||||
}
|
||||
],
|
||||
"spouse_line_of_work": "Agriculture, Forestry and Fishing",
|
||||
"occupation": "Customer Service Representative",
|
||||
"id": None,
|
||||
"gender": "M",
|
||||
"credit_check_authorized": False,
|
||||
"age": "1987-11-11",
|
||||
"license_state": "Washington",
|
||||
"cash_on_hand": "$10000–14999",
|
||||
"address": {
|
||||
"city": "HOUSTON",
|
||||
"country": "US",
|
||||
"state": "TX",
|
||||
"street": "9625 GARFIELD AVE.",
|
||||
"zip": "77082",
|
||||
},
|
||||
"spouse_education_level": "MASTERS",
|
||||
"spouse_email": "amy.stake@abc.com",
|
||||
"spouse_added_to_auto_policy": True,
|
||||
}
|
||||
|
||||
return json.dumps(navigation_payload)
|
||||
|
||||
|
||||
def get_sample_extracted_information_schema() -> str:
|
||||
extracted_information_schema = {
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"quotes": {
|
||||
"items": {
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"coverages": {
|
||||
"items": {
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"amount": {
|
||||
"description": "The coverage amount in USD, which can be a single value or a range (e.g., '$300,000' or '$300,000/$300,000').",
|
||||
"type": "string",
|
||||
},
|
||||
"included": {
|
||||
"description": "Indicates whether the coverage is included in the policy (true or False).",
|
||||
"type": "boolean",
|
||||
},
|
||||
"type": {
|
||||
"description": "The limit of the coverage (e.g., 'bodily_injury_limit', 'property_damage_limit', 'underinsured_motorist_bodily_injury_limit').\nTranslate the english name of the coverage to snake case values in the following list:\n * bodily_injury_limit\n * property_damage_limit\n * underinsured_motorist_bodily_injury_limit\n * personal_injury_protection\n * accidental_death\n * work_loss_exclusion\n",
|
||||
"type": "string",
|
||||
},
|
||||
},
|
||||
"type": "object",
|
||||
},
|
||||
"type": "array",
|
||||
},
|
||||
"premium_amount": {
|
||||
"description": "The total premium amount for the whole quote timeframe in USD, formatted as a string (e.g., '$321.57').",
|
||||
"type": "string",
|
||||
},
|
||||
"quote_number": {
|
||||
"description": "The quote number generated by the carrier that identifies this quote",
|
||||
"type": "string",
|
||||
},
|
||||
"timeframe": {
|
||||
"description": "The duration of the coverage, typically expressed in months or years.",
|
||||
"type": "string",
|
||||
},
|
||||
"vehicle_coverages": {
|
||||
"items": {
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"collision_deductible": {
|
||||
"description": "The collision deductible amount in USD, which is a single value (e.g., '$500') or null if it is not included",
|
||||
"type": "string",
|
||||
},
|
||||
"comprehensive_deductible": {
|
||||
"description": "The collision deductible amount in USD, which is a single value (e.g., '$500') or null if it is not included",
|
||||
"type": "string",
|
||||
},
|
||||
"for_vehicle": {
|
||||
"additionalProperties": False,
|
||||
"description": "The vehicle that the collision and comprehensive coverage is for",
|
||||
"properties": {
|
||||
"make": {"description": "The make of the vehicle", "type": "string"},
|
||||
"model": {"description": "The model of the vehicle", "type": "string"},
|
||||
"year": {"description": "The year of the vehicle", "type": "string"},
|
||||
},
|
||||
"type": "object",
|
||||
},
|
||||
"underinsured_property_damage": {
|
||||
"description": "The underinsured property damage limit for this vehicle, which is a limit and a deductible (e.g., '$25,000/$250 deductible') or null if it is not included",
|
||||
"type": "string",
|
||||
},
|
||||
},
|
||||
"type": "object",
|
||||
},
|
||||
"type": "array",
|
||||
},
|
||||
},
|
||||
"type": "object",
|
||||
},
|
||||
"type": "array",
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
}
|
||||
return json.dumps(extracted_information_schema)
|
||||
383
streamlit_app/visualizer/streamlit.py
Normal file
383
streamlit_app/visualizer/streamlit.py
Normal file
@@ -0,0 +1,383 @@
|
||||
import pandas as pd
|
||||
import streamlit as st
|
||||
|
||||
from skyvern.forge.sdk.schemas.tasks import ProxyLocation, TaskRequest
|
||||
from streamlit_app.visualizer import styles
|
||||
from streamlit_app.visualizer.api import SkyvernClient
|
||||
from streamlit_app.visualizer.artifact_loader import (
|
||||
read_artifact_safe,
|
||||
streamlit_content_safe,
|
||||
streamlit_show_recording,
|
||||
)
|
||||
from streamlit_app.visualizer.repository import TaskRepository
|
||||
from streamlit_app.visualizer.sample_data import (
|
||||
get_sample_data_extraction_goal,
|
||||
get_sample_extracted_information_schema,
|
||||
get_sample_navigation_goal,
|
||||
get_sample_navigation_payload,
|
||||
get_sample_url,
|
||||
)
|
||||
|
||||
# Streamlit UI Configuration
|
||||
st.set_page_config(layout="wide")
|
||||
|
||||
# Apply styles
|
||||
st.markdown(styles.page_font_style, unsafe_allow_html=True)
|
||||
st.markdown(styles.button_style, unsafe_allow_html=True)
|
||||
|
||||
|
||||
# Configuration
|
||||
def reset_session_state() -> None:
|
||||
# Delete all the items in Session state when env or org is changed
|
||||
for key in st.session_state.keys():
|
||||
del st.session_state[key]
|
||||
|
||||
|
||||
CONFIGS_DICT = st.secrets["skyvern"]["configs"]
|
||||
if not CONFIGS_DICT:
|
||||
raise Exception("No configuration found. Copy the values from 1P and restart the app.")
|
||||
SETTINGS = {}
|
||||
for config in CONFIGS_DICT:
|
||||
env = config["env"]
|
||||
host = config["host"]
|
||||
orgs = config["orgs"]
|
||||
org_dict = {org["name"]: org["cred"] for org in orgs}
|
||||
SETTINGS[env] = {"host": host, "orgs": org_dict}
|
||||
|
||||
st.sidebar.markdown("#### **Settings**")
|
||||
select_env = st.sidebar.selectbox("Environment", list(SETTINGS.keys()), on_change=reset_session_state)
|
||||
select_org = st.sidebar.selectbox(
|
||||
"Organization", list(SETTINGS[select_env]["orgs"].keys()), on_change=reset_session_state
|
||||
)
|
||||
|
||||
# Initialize session state
|
||||
if "client" not in st.session_state:
|
||||
st.session_state.client = SkyvernClient(
|
||||
base_url=SETTINGS[select_env]["host"], credentials=SETTINGS[select_env]["orgs"][select_org]
|
||||
)
|
||||
if "repository" not in st.session_state:
|
||||
st.session_state.repository = TaskRepository(st.session_state.client)
|
||||
if "task_page_number" not in st.session_state:
|
||||
st.session_state.task_page_number = 1
|
||||
if "selected_task" not in st.session_state:
|
||||
st.session_state.selected_task = None
|
||||
st.session_state.selected_task_recording_uri = None
|
||||
st.session_state.task_steps = None
|
||||
if "selected_step" not in st.session_state:
|
||||
st.session_state.selected_step = None
|
||||
st.session_state.selected_step_index = None
|
||||
|
||||
client = st.session_state.client
|
||||
repository = st.session_state.repository
|
||||
task_page_number = st.session_state.task_page_number
|
||||
selected_task = st.session_state.selected_task
|
||||
selected_task_recording_uri = st.session_state.selected_task_recording_uri
|
||||
task_steps = st.session_state.task_steps
|
||||
selected_step = st.session_state.selected_step
|
||||
selected_step_index = st.session_state.selected_step_index
|
||||
|
||||
|
||||
# Onclick handlers
|
||||
def select_task(task: dict) -> None:
|
||||
st.session_state.selected_task = task
|
||||
st.session_state.selected_task_recording_uri = repository.get_task_recording_uri(task)
|
||||
# reset step selection
|
||||
st.session_state.selected_step = None
|
||||
# save task's steps in session state
|
||||
st.session_state.task_steps = repository.get_task_steps(task["task_id"])
|
||||
if st.session_state.task_steps:
|
||||
st.session_state.selected_step = st.session_state.task_steps[0]
|
||||
st.session_state.selected_step_index = 0
|
||||
|
||||
|
||||
def go_to_previous_step() -> None:
|
||||
new_step_index = max(0, selected_step_index - 1)
|
||||
select_step(task_steps[new_step_index])
|
||||
|
||||
|
||||
def go_to_next_step() -> None:
|
||||
new_step_index = min(len(task_steps) - 1, selected_step_index + 1)
|
||||
select_step(task_steps[new_step_index])
|
||||
|
||||
|
||||
def select_step(step: dict) -> None:
|
||||
st.session_state.selected_step = step
|
||||
st.session_state.selected_step_index = task_steps.index(step)
|
||||
|
||||
|
||||
# Streamlit UI Logic
|
||||
st.markdown("# **:dragon: Skyvern :dragon:**")
|
||||
st.markdown(f"### **{select_env} - {select_org}**")
|
||||
execute_tab, visualizer_tab = st.tabs(["Execute", "Visualizer"])
|
||||
|
||||
with execute_tab:
|
||||
create_column, explanation_column = st.columns([1, 2])
|
||||
with create_column:
|
||||
with st.form("task_form"):
|
||||
st.markdown("## Run a task")
|
||||
# Create all the fields to create a TaskRequest object
|
||||
st_url = st.text_input("URL*", value=get_sample_url(), key="url")
|
||||
st_webhook_callback_url = st.text_input("Webhook Callback URL", key="webhook", placeholder="Optional")
|
||||
st_navigation_goal = st.text_input(
|
||||
"Navigation Goal",
|
||||
key="nav_goal",
|
||||
placeholder="Describe the navigation goal",
|
||||
value=get_sample_navigation_goal(),
|
||||
)
|
||||
st_data_extraction_goal = st.text_input(
|
||||
"Data Extraction Goal",
|
||||
key="data_goal",
|
||||
placeholder="Describe the data extraction goal",
|
||||
value=get_sample_data_extraction_goal(),
|
||||
)
|
||||
st_navigation_payload = st.text_area(
|
||||
"Navigation Payload JSON",
|
||||
key="nav_payload",
|
||||
placeholder='{"name": "John Doe", "email": "abc@123.com"}',
|
||||
value=get_sample_navigation_payload(),
|
||||
)
|
||||
st_extracted_information_schema = st.text_area(
|
||||
"Extracted Information Schema",
|
||||
key="extracted_info_schema",
|
||||
placeholder='{"quote_price": "float"}',
|
||||
value=get_sample_extracted_information_schema(),
|
||||
)
|
||||
# Create a TaskRequest object from the form fields
|
||||
task_request_body = TaskRequest(
|
||||
url=st_url,
|
||||
webhook_callback_url=st_webhook_callback_url,
|
||||
navigation_goal=st_navigation_goal,
|
||||
data_extraction_goal=st_data_extraction_goal,
|
||||
proxy_location=ProxyLocation.NONE,
|
||||
navigation_payload=st_navigation_payload,
|
||||
extracted_information_schema=st_extracted_information_schema,
|
||||
)
|
||||
# Submit the form
|
||||
if st.form_submit_button("Execute Task", use_container_width=True):
|
||||
# Call the API to create a task
|
||||
task_id = client.create_task(task_request_body)
|
||||
if not task_id:
|
||||
st.error("Failed to create task!")
|
||||
else:
|
||||
st.success("Task created successfully, task_id: " + task_id)
|
||||
|
||||
with explanation_column:
|
||||
st.markdown("### **Task Request**")
|
||||
st.markdown("#### **URL**")
|
||||
st.markdown("The starting URL for the task.")
|
||||
st.markdown("#### **Webhook Callback URL**")
|
||||
st.markdown("The URL to call with the results when the task is completed.")
|
||||
st.markdown("#### **Navigation Goal**")
|
||||
st.markdown("The user's goal for the task. Nullable if the task is only for data extraction.")
|
||||
st.markdown("#### **Data Extraction Goal**")
|
||||
st.markdown("The user's goal for data extraction. Nullable if the task is only for navigation.")
|
||||
st.markdown("#### **Navigation Payload**")
|
||||
st.markdown("The user's details needed to achieve the task. AI will use this information as needed.")
|
||||
st.markdown("#### **Extracted Information Schema**")
|
||||
st.markdown("The requested schema of the extracted information for data extraction goal.")
|
||||
|
||||
|
||||
with visualizer_tab:
|
||||
task_id_input = st.text_input("task_id", value="")
|
||||
|
||||
def search_task() -> None:
|
||||
if not task_id_input:
|
||||
return
|
||||
|
||||
task = repository.get_task(task_id_input)
|
||||
if task:
|
||||
select_task(task)
|
||||
else:
|
||||
st.error(f"Task with id {task_id_input} not found.")
|
||||
|
||||
st.button("search task", on_click=search_task)
|
||||
|
||||
col_tasks, _, col_steps, _, col_artifacts = st.columns([4, 1, 6, 1, 18])
|
||||
|
||||
col_tasks.markdown(f"#### Tasks")
|
||||
col_steps.markdown(f"#### Steps")
|
||||
col_artifacts.markdown("#### Artifacts")
|
||||
tasks_response = repository.get_tasks(task_page_number)
|
||||
if "error" in tasks_response:
|
||||
st.write(tasks_response)
|
||||
|
||||
# Display tasks in sidebar for selection
|
||||
tasks = {task["task_id"]: task for task in tasks_response}
|
||||
task_id_buttons = {
|
||||
task_id: col_tasks.button(
|
||||
f"{task_id}",
|
||||
on_click=select_task,
|
||||
args=(task,),
|
||||
use_container_width=True,
|
||||
type="primary" if selected_task and task_id == selected_task["task_id"] else "secondary",
|
||||
)
|
||||
for task_id, task in tasks.items()
|
||||
}
|
||||
|
||||
# Display pagination buttons
|
||||
task_page_prev, _, show_task_page_number, _, task_page_next = col_tasks.columns([1, 1, 1, 1, 1])
|
||||
show_task_page_number.button(str(task_page_number), disabled=True)
|
||||
if task_page_next.button("\>"):
|
||||
st.session_state.task_page_number += 1
|
||||
if task_page_prev.button("\<", disabled=task_page_number == 1):
|
||||
st.session_state.task_page_number = max(1, st.session_state.task_page_number - 1)
|
||||
|
||||
(
|
||||
tab_task,
|
||||
tab_step,
|
||||
tab_recording,
|
||||
tab_screenshot,
|
||||
tab_post_action_screenshot,
|
||||
tab_id_to_xpath,
|
||||
tab_element_tree,
|
||||
tab_element_tree_trimmed,
|
||||
tab_llm_prompt,
|
||||
tab_llm_request,
|
||||
tab_llm_response_parsed,
|
||||
tab_llm_response_raw,
|
||||
tab_html,
|
||||
) = col_artifacts.tabs(
|
||||
[
|
||||
":green[Task]",
|
||||
":blue[Step]",
|
||||
":violet[Recording]",
|
||||
":rainbow[Screenshot]",
|
||||
":rainbow[Action Screenshots]",
|
||||
":red[ID -> XPath]",
|
||||
":orange[Element Tree]",
|
||||
":blue[Element Tree (Trimmed)]",
|
||||
":yellow[LLM Prompt]",
|
||||
":green[LLM Request]",
|
||||
":blue[LLM Response (Parsed)]",
|
||||
":violet[LLM Response (Raw)]",
|
||||
":rainbow[Html (Raw)]",
|
||||
]
|
||||
)
|
||||
|
||||
tab_task_details, tab_task_steps, tab_task_action_results = tab_task.tabs(["Details", "Steps", "Action Results"])
|
||||
|
||||
if selected_task:
|
||||
tab_task_details.json(selected_task)
|
||||
if selected_task_recording_uri:
|
||||
streamlit_show_recording(tab_recording, selected_task_recording_uri)
|
||||
|
||||
if task_steps:
|
||||
col_steps_prev, _, col_steps_next = col_steps.columns([3, 1, 3])
|
||||
col_steps_prev.button(
|
||||
"prev", on_click=go_to_previous_step, key="previous_step_button", use_container_width=True
|
||||
)
|
||||
col_steps_next.button("next", on_click=go_to_next_step, key="next_step_button", use_container_width=True)
|
||||
|
||||
step_id_buttons = {
|
||||
step["step_id"]: col_steps.button(
|
||||
f"{step['order']} - {step['retry_index']} - {step['step_id']}",
|
||||
on_click=select_step,
|
||||
args=(step,),
|
||||
use_container_width=True,
|
||||
type="primary" if selected_step and step["step_id"] == selected_step["step_id"] else "secondary",
|
||||
)
|
||||
for step in task_steps
|
||||
}
|
||||
|
||||
df = pd.json_normalize(task_steps)
|
||||
tab_task_steps.dataframe(df, use_container_width=True, height=1000)
|
||||
|
||||
task_action_results = []
|
||||
for step in task_steps:
|
||||
output = step.get("output")
|
||||
step_id = step["step_id"]
|
||||
if output:
|
||||
step_action_results = output.get("action_results", [])
|
||||
for action_result in step_action_results:
|
||||
task_action_results.append(
|
||||
{
|
||||
"step_id": step_id,
|
||||
"order": step["order"],
|
||||
"retry_index": step["retry_index"],
|
||||
**action_result,
|
||||
}
|
||||
)
|
||||
df = pd.json_normalize(task_action_results)
|
||||
df = df.reindex(sorted(df.columns), axis=1)
|
||||
tab_task_action_results.dataframe(df, use_container_width=True, height=1000)
|
||||
|
||||
if selected_step:
|
||||
tab_step.json(selected_step)
|
||||
|
||||
artifacts_response = repository.get_artifacts(selected_task["task_id"], selected_step["step_id"])
|
||||
split_artifact_uris = [artifact["uri"].split("/") for artifact in artifacts_response]
|
||||
file_name_to_uris = {split_uri[-1]: "/".join(split_uri) for split_uri in split_artifact_uris}
|
||||
|
||||
for file_name, uri in file_name_to_uris.items():
|
||||
file_name = file_name.lower()
|
||||
if file_name.endswith("screenshot_llm.png") or file_name.endswith("screenshot.png"):
|
||||
streamlit_content_safe(
|
||||
tab_screenshot,
|
||||
tab_screenshot.image,
|
||||
read_artifact_safe(uri, is_image=True),
|
||||
"No screenshot available.",
|
||||
use_column_width=True,
|
||||
)
|
||||
elif file_name.endswith("screenshot_action.png"):
|
||||
streamlit_content_safe(
|
||||
tab_post_action_screenshot,
|
||||
tab_post_action_screenshot.image,
|
||||
read_artifact_safe(uri, is_image=True),
|
||||
"No action screenshot available.",
|
||||
use_column_width=True,
|
||||
)
|
||||
elif file_name.endswith("id_xpath_map.json"):
|
||||
streamlit_content_safe(
|
||||
tab_id_to_xpath, tab_id_to_xpath.json, read_artifact_safe(uri), "No ID -> XPath map available."
|
||||
)
|
||||
elif file_name.endswith("tree.json"):
|
||||
streamlit_content_safe(
|
||||
tab_element_tree,
|
||||
tab_element_tree.json,
|
||||
read_artifact_safe(uri),
|
||||
"No element tree available.",
|
||||
)
|
||||
elif file_name.endswith("tree_trimmed.json"):
|
||||
streamlit_content_safe(
|
||||
tab_element_tree_trimmed,
|
||||
tab_element_tree_trimmed.json,
|
||||
read_artifact_safe(uri),
|
||||
"No element tree trimmed available.",
|
||||
)
|
||||
elif file_name.endswith("llm_prompt.txt"):
|
||||
content = read_artifact_safe(uri)
|
||||
# this is a hacky way to call this generic method to get it working with st.text_area
|
||||
streamlit_content_safe(
|
||||
tab_llm_prompt,
|
||||
tab_llm_prompt.text_area,
|
||||
content,
|
||||
"No LLM prompt available.",
|
||||
value=content,
|
||||
height=1000,
|
||||
label_visibility="collapsed",
|
||||
)
|
||||
# tab_llm_prompt.text_area("collapsed", value=content, label_visibility="collapsed", height=1000)
|
||||
elif file_name.endswith("llm_request.json"):
|
||||
streamlit_content_safe(
|
||||
tab_llm_request, tab_llm_request.json, read_artifact_safe(uri), "No LLM request available."
|
||||
)
|
||||
elif file_name.endswith("llm_response_parsed.json"):
|
||||
streamlit_content_safe(
|
||||
tab_llm_response_parsed,
|
||||
tab_llm_response_parsed.json,
|
||||
read_artifact_safe(uri),
|
||||
"No parsed LLM response available.",
|
||||
)
|
||||
elif file_name.endswith("llm_response.json"):
|
||||
streamlit_content_safe(
|
||||
tab_llm_response_raw,
|
||||
tab_llm_response_raw.json,
|
||||
read_artifact_safe(uri),
|
||||
"No raw LLM response available.",
|
||||
)
|
||||
elif file_name.endswith("html_scrape.html"):
|
||||
streamlit_content_safe(tab_html, tab_html.text, read_artifact_safe(uri), "No html available.")
|
||||
elif file_name.endswith("html_action.html"):
|
||||
streamlit_content_safe(tab_html, tab_html.text, read_artifact_safe(uri), "No html available.")
|
||||
else:
|
||||
st.write(f"Artifact {file_name} not supported.")
|
||||
38
streamlit_app/visualizer/styles.py
Normal file
38
streamlit_app/visualizer/styles.py
Normal file
@@ -0,0 +1,38 @@
|
||||
page_font_style = """
|
||||
<style>
|
||||
@import url('https://fonts.googleapis.com/css2?family=Roboto+Mono:wght@300&display=swap');
|
||||
|
||||
* {
|
||||
font-family: 'Roboto Mono', monospace;
|
||||
}
|
||||
</style>
|
||||
"""
|
||||
|
||||
button_style = """
|
||||
<style>
|
||||
/* Apply the custom styles to Streamlit button */
|
||||
.stButton > button {
|
||||
text-align: center; /* Center button text */
|
||||
font-size: 10px; /* Set font size here */
|
||||
border: none; /* No border */
|
||||
border-radius: 20px; /* Rounded corners */
|
||||
background-color: #67748E;
|
||||
color: ##3C414A;
|
||||
padding: 10px 10px; /* Some padding */
|
||||
box-shadow: 0 4px 8px rgba(0,0,0,0.2); /* Box shadow */
|
||||
}
|
||||
|
||||
.stButton > button[kind="primary"] {
|
||||
border: 3px solid #DCFF94; /* Red border */
|
||||
}
|
||||
|
||||
.stButton > button:disabled {
|
||||
background-color: #636B7D;
|
||||
}
|
||||
|
||||
.stButton > button:hover {
|
||||
background-color: #73678F;
|
||||
color: #B6E359;
|
||||
}
|
||||
</style>
|
||||
"""
|
||||
Reference in New Issue
Block a user