Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions packages/sdk/server-ai/src/ldai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,12 @@
from ldai.providers import (
AgentGraphResult,
AgentGraphRunner,
AgentGraphRunnerResult,
AgentResult,
AgentRunner,
GraphMetrics,
GraphMetricSummary,
ManagedGraphResult,
ManagedResult,
Runner,
RunnerResult,
Expand All @@ -51,6 +55,10 @@
'AgentGraphRunner',
'AgentResult',
'AgentGraphResult',
'AgentGraphRunnerResult',
'GraphMetrics',
'GraphMetricSummary',
'ManagedGraphResult',
'ManagedResult',
'Runner',
'RunnerResult',
Expand Down
35 changes: 26 additions & 9 deletions packages/sdk/server-ai/src/ldai/managed_agent_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,16 @@
from typing import Any

from ldai.providers import AgentGraphResult, AgentGraphRunner
from ldai.providers.types import GraphMetricSummary, ManagedGraphResult


class ManagedAgentGraph:
"""
LaunchDarkly managed wrapper for AI agent graph execution.

Holds an AgentGraphRunner. Auto-tracking of path,
tool calls, handoffs, latency, and invocation success/failure is handled
by the runner implementation.
Holds an AgentGraphRunner. Wraps the runner result in a
:class:`~ldai.providers.types.ManagedGraphResult` and builds a
:class:`~ldai.providers.types.GraphMetricSummary` from the runner's metrics.

Obtain an instance via ``LDAIClient.create_agent_graph()``.
"""
Expand All @@ -27,17 +28,33 @@ def __init__(
"""
self._runner = runner

async def run(self, input: Any) -> AgentGraphResult:
async def run(self, input: Any) -> ManagedGraphResult:
"""
Run the agent graph with the given input.

Delegates to the underlying AgentGraphRunner, which handles
execution and all auto-tracking internally.

:param input: The input prompt or structured input for the graph
:return: AgentGraphResult containing the output, raw response, and metrics
:return: ManagedGraphResult containing the content, metric summary, raw response,
and an optional evaluations task (currently always ``None`` for graphs —
per-graph evaluations will be added in a future PR).
"""
return await self._runner.run(input)
result: AgentGraphResult = await self._runner.run(input)

# Build a GraphMetricSummary from the runner result's LDAIMetrics.
# path and node_metrics will be populated once graph runners are migrated
# to return AgentGraphRunnerResult with GraphMetrics (PR 11).
metrics = result.metrics
summary = GraphMetricSummary(
success=metrics.success,
usage=metrics.usage,
duration_ms=getattr(metrics, 'duration_ms', None),
)

return ManagedGraphResult(
content=result.output,
metrics=summary,
raw=result.raw,
evaluations=None,
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will be added in a follow up PR #153

)

def get_agent_graph_runner(self) -> AgentGraphRunner:
"""
Expand Down
8 changes: 8 additions & 0 deletions packages/sdk/server-ai/src/ldai/providers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,13 @@
from ldai.providers.runner_factory import RunnerFactory
from ldai.providers.types import (
AgentGraphResult,
AgentGraphRunnerResult,
AgentResult,
GraphMetrics,
GraphMetricSummary,
JudgeResult,
LDAIMetrics,
ManagedGraphResult,
ManagedResult,
ModelResponse,
RunnerResult,
Expand All @@ -20,10 +24,14 @@
'AIProvider',
'AgentGraphResult',
'AgentGraphRunner',
'AgentGraphRunnerResult',
'AgentResult',
'AgentRunner',
'GraphMetrics',
'GraphMetricSummary',
'JudgeResult',
'LDAIMetrics',
'ManagedGraphResult',
'ManagedResult',
'ModelResponse',
'ModelRunner',
Expand Down
76 changes: 75 additions & 1 deletion packages/sdk/server-ai/src/ldai/providers/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from __future__ import annotations

import asyncio
from dataclasses import dataclass
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional

from ldai.models import LDMessage
Expand Down Expand Up @@ -114,6 +114,80 @@ class StructuredResponse:
metrics: LDAIMetrics


@dataclass
class GraphMetrics:
"""Contains raw metrics from a single agent graph run."""

success: bool
"""Whether the graph run succeeded."""

path: List[str] = field(default_factory=list)
"""Ordered list of node keys visited during the run."""

duration_ms: Optional[int] = None
"""Wall-clock duration of the graph run in milliseconds."""

usage: Optional[TokenUsage] = None
"""Optional aggregate token usage information across all nodes in the graph run."""

node_metrics: Dict[str, LDAIMetrics] = field(default_factory=dict)
"""Per-node metrics keyed by node key."""


@dataclass
class GraphMetricSummary:
"""Contains a summary of metrics for an agent graph run."""

success: bool
"""Whether the graph run succeeded."""

path: List[str] = field(default_factory=list)
"""Ordered list of node keys visited during the run."""

duration_ms: Optional[int] = None
"""Wall-clock duration of the graph run in milliseconds."""

usage: Optional[TokenUsage] = None
"""Optional aggregate token usage information across all nodes in the graph run."""

node_metrics: Dict[str, LDAIMetrics] = field(default_factory=dict)
"""Per-node metrics keyed by node key."""

resumption_token: Optional[str] = None
"""Optional resumption token from the graph tracker for cross-process resumption."""


@dataclass
class ManagedGraphResult:
"""Contains the result of a managed agent graph run, including metrics and optional judge evaluations."""

content: str
"""The graph's final output content."""

metrics: GraphMetricSummary
"""Aggregated metric summary from the graph tracker for this run."""

raw: Optional[Any] = None
"""Optional provider-native response object for advanced consumers."""

evaluations: Optional[asyncio.Task[List[JudgeResult]]] = None
"""Optional asyncio Task that resolves to the list of :class:`JudgeResult` instances when awaited."""


@dataclass
class AgentGraphRunnerResult:
"""Contains the result of an agent graph runner invocation."""

content: str
"""The graph's final output content."""

metrics: GraphMetrics
"""Metrics from the graph run."""

raw: Optional[Any] = None
"""Optional provider-native response object for advanced consumers."""


@dataclass
class JudgeResult:
"""Contains the result of a single judge evaluation."""
Expand Down
8 changes: 5 additions & 3 deletions packages/sdk/server-ai/tests/test_managed_agent_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from ldclient import Config, Context, LDClient
from ldclient.integrations.test_data import TestData

from ldai import LDAIClient, ManagedAgentGraph
from ldai import LDAIClient, ManagedAgentGraph, ManagedGraphResult
from ldai.providers.types import LDAIMetrics
from ldai.providers import AgentGraphResult, AgentGraphRunner, ToolRegistry

Expand All @@ -31,7 +31,8 @@ async def test_managed_agent_graph_run_delegates_to_runner():
runner = StubAgentGraphRunner("hello world")
managed = ManagedAgentGraph(runner)
result = await managed.run("test input")
assert result.output == "hello world"
assert isinstance(result, ManagedGraphResult)
assert result.content == "hello world"
assert result.metrics.success is True


Expand Down Expand Up @@ -172,7 +173,8 @@ async def test_create_agent_graph_run_produces_result(ldai_client: LDAIClient):

assert managed is not None
result = await managed.run("find restaurants")
assert result.output == "final answer"
assert isinstance(result, ManagedGraphResult)
assert result.content == "final answer"
assert result.metrics.success is True


Expand Down
Loading