diff --git a/skyvern/forge/sdk/workflow/models/block.py b/skyvern/forge/sdk/workflow/models/block.py index 6f1e9f43..7008f61c 100644 --- a/skyvern/forge/sdk/workflow/models/block.py +++ b/skyvern/forge/sdk/workflow/models/block.py @@ -111,6 +111,17 @@ class BlockStatus(StrEnum): timed_out = "timed_out" +# Mapping from TaskV2Status to the corresponding BlockStatus. Declared once at +# import time so it is not recreated on each block execution. +TASKV2_TO_BLOCK_STATUS: dict[TaskV2Status, BlockStatus] = { + TaskV2Status.completed: BlockStatus.completed, + TaskV2Status.terminated: BlockStatus.terminated, + TaskV2Status.failed: BlockStatus.failed, + TaskV2Status.canceled: BlockStatus.canceled, + TaskV2Status.timed_out: BlockStatus.timed_out, +} + + @dataclass(frozen=True) class BlockResult: success: bool @@ -2544,11 +2555,18 @@ class TaskV2Block(Block): if task_v2: result_dict = task_v2.output + # Determine block status from task status using module-level mapping + block_status = TASKV2_TO_BLOCK_STATUS.get(task_v2.status, BlockStatus.failed) + success = task_v2.status == TaskV2Status.completed + failure_reason = task_v2.failure_reason + + # If continue_on_failure is True, we treat the block as successful even if the task failed + # This allows the workflow to continue execution despite this block's failure return await self.build_block_result( - success=True, - failure_reason=None, + success=success or self.continue_on_failure, + failure_reason=failure_reason, output_parameter_value=result_dict, - status=BlockStatus.completed, + status=block_status, workflow_run_block_id=workflow_run_block_id, organization_id=organization_id, )