Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "uipath"
version = "2.9.13"
version = "2.9.14"
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.11"
Expand Down
8 changes: 8 additions & 0 deletions src/uipath/_cli/cli_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,12 @@ def _resolve_model_settings_override(
default=False,
help="Resume execution from a previous suspended state",
)
@click.option(
"--verbose",
is_flag=True,
default=False,
help="Include agent execution output (trace, result) in the output file",
)
def eval(
entrypoint: str | None,
eval_set: str | None,
Expand All @@ -220,6 +226,7 @@ def eval(
max_llm_concurrency: int,
input_overrides: dict[str, Any],
resume: bool,
verbose: bool,
) -> None:
"""Run an evaluation set against the agent.

Expand Down Expand Up @@ -272,6 +279,7 @@ def eval(
eval_context.report_coverage = report_coverage
eval_context.input_overrides = input_overrides
eval_context.resume = resume
eval_context.verbose = verbose

try:

Expand Down
12 changes: 10 additions & 2 deletions src/uipath/eval/runtime/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class EvaluationResultDto(BaseModel):
model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True)

score: float
details: str | BaseModel | None = None
details: str | dict[str, Any] | None = None
evaluation_time: float | None = None

@model_serializer(mode="wrap")
Expand All @@ -82,9 +82,17 @@ def from_evaluation_result(
else:
score = evaluation_result.score

# Convert BaseModel details to dict so Pydantic doesn't lose subclass fields
if isinstance(evaluation_result.details, BaseModel):
details: str | dict[str, Any] | None = (
evaluation_result.details.model_dump()
)
else:
details = evaluation_result.details

return cls(
score=score,
details=evaluation_result.details,
details=details,
evaluation_time=evaluation_result.evaluation_time,
)

Expand Down
23 changes: 12 additions & 11 deletions tests/cli/eval/test_evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,24 +120,25 @@ async def dispose(self) -> None:
)

# Assert that the output is json-serializable
UiPathEvalOutput.model_validate(result.output).model_dump_json()
eval_output = UiPathEvalOutput.model_validate(result.output)
eval_output.model_dump_json()
assert result.output
output_dict = (
result.output.model_dump()
if isinstance(result.output, BaseModel)
else result.output
)
assert isinstance(output_dict, dict)
assert (
output_dict["evaluationSetResults"][0]["evaluationRunResults"][0]["result"][
"score"
]
== 1.0
)
assert (
output_dict["evaluationSetResults"][0]["evaluationRunResults"][0]["evaluatorId"]
== "ExactMatchEvaluator"
)
first_result = output_dict["evaluationSetResults"][0]["evaluationRunResults"][0]
assert first_result["result"]["score"] == 1.0
assert first_result["evaluatorId"] == "ExactMatchEvaluator"
# Verify details are properly serialized (not empty dict)
details = first_result["result"].get("details")
if details is not None:
assert details != {}, (
"details should not be an empty dict - BaseModel serialization bug"
)
assert isinstance(details, (str, dict))


async def test_eval_runtime_generates_uuid_when_no_custom_id():
Expand Down
2 changes: 1 addition & 1 deletion uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.