From 5c4181c8ee586b504348c879564af896682cd3b0 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 28 Apr 2026 18:03:34 -0500 Subject: [PATCH 01/24] feat!: Add ManagedResult, RunnerResult, and Runner protocol; rename invoke() to run() Co-Authored-By: Claude Sonnet 4.6 --- packages/sdk/server-ai/src/ldai/__init__.py | 9 +- .../sdk/server-ai/src/ldai/managed_agent.py | 30 ++- .../sdk/server-ai/src/ldai/managed_model.py | 95 +++++++-- .../server-ai/src/ldai/providers/__init__.py | 6 + .../server-ai/src/ldai/providers/runner.py | 37 ++++ .../sdk/server-ai/src/ldai/providers/types.py | 64 +++++- packages/sdk/server-ai/src/ldai/tracker.py | 83 ++++++-- .../sdk/server-ai/tests/test_managed_agent.py | 19 +- .../sdk/server-ai/tests/test_managed_model.py | 184 ++++++++++-------- 9 files changed, 406 insertions(+), 121 deletions(-) create mode 100644 packages/sdk/server-ai/src/ldai/providers/runner.py diff --git a/packages/sdk/server-ai/src/ldai/__init__.py b/packages/sdk/server-ai/src/ldai/__init__.py index 405ec5a8..f02cee30 100644 --- a/packages/sdk/server-ai/src/ldai/__init__.py +++ b/packages/sdk/server-ai/src/ldai/__init__.py @@ -36,10 +36,13 @@ AgentGraphRunner, AgentResult, AgentRunner, + ManagedResult, + Runner, + RunnerResult, ToolRegistry, ) from ldai.providers.types import JudgeResult -from ldai.tracker import AIGraphTracker +from ldai.tracker import AIGraphTracker, LDAIMetricSummary __all__ = [ 'LDAIClient', @@ -48,6 +51,10 @@ 'AgentGraphRunner', 'AgentResult', 'AgentGraphResult', + 'ManagedResult', + 'Runner', + 'RunnerResult', + 'LDAIMetricSummary', 'ToolRegistry', 'AIAgentConfig', 'AIAgentConfigDefault', diff --git a/packages/sdk/server-ai/src/ldai/managed_agent.py b/packages/sdk/server-ai/src/ldai/managed_agent.py index ab3ee5e6..a2abdf98 100644 --- a/packages/sdk/server-ai/src/ldai/managed_agent.py +++ b/packages/sdk/server-ai/src/ldai/managed_agent.py @@ -1,43 +1,55 @@ """ManagedAgent — LaunchDarkly managed wrapper for agent invocations.""" +from typing import Union + from ldai.models import AIAgentConfig from ldai.providers import AgentResult, AgentRunner +from ldai.providers.runner import Runner +from ldai.providers.types import ManagedResult, RunnerResult class ManagedAgent: """ LaunchDarkly managed wrapper for AI agent invocations. - Holds an AgentRunner. Handles tracking automatically via ``create_tracker()``. + Holds an AgentRunner or Runner. Handles tracking automatically via + ``create_tracker()``. Obtain an instance via ``LDAIClient.create_agent()``. """ def __init__( self, ai_config: AIAgentConfig, - agent_runner: AgentRunner, + agent_runner: Union[Runner, AgentRunner], ): self._ai_config = ai_config self._agent_runner = agent_runner - async def run(self, input: str) -> AgentResult: + async def run(self, input: str) -> ManagedResult: """ Run the agent with the given input string. :param input: The user prompt or input to the agent - :return: AgentResult containing the agent's output and metrics + :return: ManagedResult containing the agent's output and metric summary """ tracker = self._ai_config.create_tracker() - return await tracker.track_metrics_of_async( - lambda result: result.metrics, + result: Union[RunnerResult, AgentResult] = await tracker.track_metrics_of_async( + lambda r: r.metrics, lambda: self._agent_runner.run(input), ) + # Support both RunnerResult (content) and legacy AgentResult (output) + content = result.content if isinstance(result, RunnerResult) else result.output # type: ignore[union-attr] + return ManagedResult( + content=content, + metrics=tracker.get_summary(), + raw=result.raw, + ) - def get_agent_runner(self) -> AgentRunner: + def get_agent_runner(self) -> Union[Runner, AgentRunner]: """ - Return the underlying AgentRunner for advanced use. + Return the underlying runner for advanced use. - :return: The AgentRunner instance. + :return: The Runner or AgentRunner instance. """ return self._agent_runner diff --git a/packages/sdk/server-ai/src/ldai/managed_model.py b/packages/sdk/server-ai/src/ldai/managed_model.py index 9cfb503a..3d2949c3 100644 --- a/packages/sdk/server-ai/src/ldai/managed_model.py +++ b/packages/sdk/server-ai/src/ldai/managed_model.py @@ -1,10 +1,12 @@ import asyncio -from typing import List, Optional +import warnings +from typing import List, Union from ldai import log from ldai.models import AICompletionConfig, LDMessage from ldai.providers.model_runner import ModelRunner -from ldai.providers.types import JudgeResult, ModelResponse +from ldai.providers.runner import Runner +from ldai.providers.types import JudgeResult, ManagedResult, ModelResponse, RunnerResult from ldai.tracker import LDAIConfigTracker @@ -12,31 +14,100 @@ class ManagedModel: """ LaunchDarkly managed wrapper for AI model invocations. - Holds a ModelRunner. Handles conversation management, judge evaluation - dispatch, and tracking automatically via ``create_tracker()``. + Holds a Runner (or legacy ModelRunner). Handles conversation management, + judge evaluation dispatch, and tracking automatically via ``create_tracker()``. Obtain an instance via ``LDAIClient.create_model()``. """ def __init__( self, ai_config: AICompletionConfig, - model_runner: ModelRunner, + model_runner: Union[Runner, ModelRunner], ): self._ai_config = ai_config self._model_runner = model_runner self._messages: List[LDMessage] = [] - async def invoke(self, prompt: str) -> ModelResponse: + async def run(self, prompt: str) -> ManagedResult: """ - Invoke the model with a prompt string. + Run the model with a prompt string. Appends the prompt to the conversation history, prepends any system messages from the config, delegates to the runner, and appends the response to the history. + :param prompt: The user prompt to send to the model + :return: ManagedResult containing the model's response, metric summary, + and an optional evaluations task + """ + tracker = self._ai_config.create_tracker() + + user_message = LDMessage(role='user', content=prompt) + self._messages.append(user_message) + + config_messages = self._ai_config.messages or [] + all_messages = config_messages + self._messages + + result: Union[RunnerResult, ModelResponse] = await tracker.track_metrics_of_async( + lambda r: r.metrics, + lambda: self._invoke_runner(all_messages), + ) + + # Support both new RunnerResult and legacy ModelResponse + if isinstance(result, RunnerResult): + content = result.content + raw = result.raw + parsed = result.parsed + assistant_message = LDMessage(role='assistant', content=content) + else: + content = result.message.content + raw = getattr(result, 'raw', None) + parsed = getattr(result, 'parsed', None) + assistant_message = result.message + + input_text = '\r\n'.join(m.content for m in self._messages) if self._messages else '' + + evaluations_task = self._track_judge_results(tracker, input_text, content) + + self._messages.append(assistant_message) + + return ManagedResult( + content=content, + metrics=tracker.get_summary(), + raw=raw, + parsed=parsed, + evaluations=evaluations_task, + ) + + async def _invoke_runner( + self, all_messages: List[LDMessage] + ) -> Union[RunnerResult, ModelResponse]: + """ + Delegate to the runner. Supports both the new ``Runner`` protocol + (``run(messages) → RunnerResult``) and the legacy ``ModelRunner`` + (``invoke_model(messages) → ModelResponse``). + """ + if isinstance(self._model_runner, Runner): + return await self._model_runner.run(all_messages) + # Legacy ModelRunner path + return await self._model_runner.invoke_model(all_messages) # type: ignore[union-attr] + + async def invoke(self, prompt: str) -> ModelResponse: + """ + Invoke the model with a prompt string. + + .. deprecated:: + Use :meth:`run` instead. This method will be removed in a future + release once the migration to :class:`ManagedResult` is complete. + :param prompt: The user prompt to send to the model :return: ModelResponse containing the model's response and metrics """ + warnings.warn( + "ManagedModel.invoke() is deprecated. Use run() instead.", + DeprecationWarning, + stacklevel=2, + ) tracker = self._ai_config.create_tracker() user_message = LDMessage(role='user', content=prompt) @@ -45,9 +116,9 @@ async def invoke(self, prompt: str) -> ModelResponse: config_messages = self._ai_config.messages or [] all_messages = config_messages + self._messages - response = await tracker.track_metrics_of_async( + response: ModelResponse = await tracker.track_metrics_of_async( lambda result: result.metrics, - lambda: self._model_runner.invoke_model(all_messages), + lambda: self._model_runner.invoke_model(all_messages), # type: ignore[union-attr] ) input_text = '\r\n'.join(m.content for m in self._messages) if self._messages else '' @@ -98,11 +169,11 @@ def append_messages(self, messages: List[LDMessage]) -> None: """ self._messages.extend(messages) - def get_model_runner(self) -> ModelRunner: + def get_model_runner(self) -> Union[Runner, ModelRunner]: """ - Return the underlying ModelRunner for advanced use. + Return the underlying runner for advanced use. - :return: The ModelRunner instance. + :return: The Runner or legacy ModelRunner instance. """ return self._model_runner diff --git a/packages/sdk/server-ai/src/ldai/providers/__init__.py b/packages/sdk/server-ai/src/ldai/providers/__init__.py index b2bfa72e..6f472c69 100644 --- a/packages/sdk/server-ai/src/ldai/providers/__init__.py +++ b/packages/sdk/server-ai/src/ldai/providers/__init__.py @@ -2,13 +2,16 @@ from ldai.providers.agent_runner import AgentRunner from ldai.providers.ai_provider import AIProvider from ldai.providers.model_runner import ModelRunner +from ldai.providers.runner import Runner from ldai.providers.runner_factory import RunnerFactory from ldai.providers.types import ( AgentGraphResult, AgentResult, JudgeResult, LDAIMetrics, + ManagedResult, ModelResponse, + RunnerResult, StructuredResponse, ToolRegistry, ) @@ -21,9 +24,12 @@ 'AgentRunner', 'JudgeResult', 'LDAIMetrics', + 'ManagedResult', 'ModelResponse', 'ModelRunner', + 'Runner', 'RunnerFactory', + 'RunnerResult', 'StructuredResponse', 'ToolRegistry', ] diff --git a/packages/sdk/server-ai/src/ldai/providers/runner.py b/packages/sdk/server-ai/src/ldai/providers/runner.py new file mode 100644 index 00000000..c86a8fe8 --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/providers/runner.py @@ -0,0 +1,37 @@ +"""Unified Runner protocol for AI providers.""" + +from typing import Any, Dict, Optional, Protocol, runtime_checkable + +from ldai.providers.types import RunnerResult + + +@runtime_checkable +class Runner(Protocol): + """ + Unified runtime capability interface for all AI provider runners. + + A :class:`Runner` is a focused, configured object that performs a single + AI invocation. Both model runners and agent runners implement this protocol. + + :param input: The input to the runner (string prompt, list of messages, or + other provider-specific input type). + :param output_type: Optional JSON schema dict that requests structured output. + When provided, the runner populates :attr:`~RunnerResult.parsed` on the + returned :class:`RunnerResult`. + :return: :class:`RunnerResult` containing ``content``, ``metrics``, and + optionally ``raw`` and ``parsed``. + """ + + async def run( + self, + input: Any, + output_type: Optional[Dict[str, Any]] = None, + ) -> RunnerResult: + """ + Execute the runner with the given input. + + :param input: The input to the runner. + :param output_type: Optional JSON schema for structured output. + :return: RunnerResult containing content, metrics, raw, and parsed fields. + """ + ... diff --git a/packages/sdk/server-ai/src/ldai/providers/types.py b/packages/sdk/server-ai/src/ldai/providers/types.py index aa537880..72f7198c 100644 --- a/packages/sdk/server-ai/src/ldai/providers/types.py +++ b/packages/sdk/server-ai/src/ldai/providers/types.py @@ -7,7 +7,7 @@ from typing import Any, Callable, Dict, List, Optional from ldai.models import LDMessage -from ldai.tracker import TokenUsage +from ldai.tracker import LDAIMetricSummary, TokenUsage # Type alias for a registry of tools available to an agent. # Keys are tool names; values are the callable implementations. @@ -17,10 +17,21 @@ @dataclass class LDAIMetrics: """ - Metrics information for AI operations that includes success status and token usage. + Metrics information for AI operations that includes success status, token + usage, and optional enrichment fields populated by runners. + + ``tool_calls`` is a list of tool-call names observed during the invocation + (populated by agent runners that execute tool loops). + + ``duration_ms`` is the wall-clock duration of the runner invocation in + milliseconds, when measured by the runner itself rather than externally. + When set, the tracker uses this value directly instead of measuring elapsed + time. """ success: bool usage: Optional[TokenUsage] = None + tool_calls: Optional[List[str]] = None + duration_ms: Optional[int] = None def to_dict(self) -> Dict[str, Any]: """ @@ -35,13 +46,55 @@ def to_dict(self) -> Dict[str, Any]: 'input': self.usage.input, 'output': self.usage.output, } + if self.tool_calls is not None: + result['toolCalls'] = self.tool_calls + if self.duration_ms is not None: + result['durationMs'] = self.duration_ms return result +@dataclass +class RunnerResult: + """ + Result returned by a :class:`~ldai.providers.runner.Runner` from a single + invocation. + + This is the unified return type for all Runner implementations. + ``evaluations`` is intentionally absent — judge evaluations are dispatched + by the managed layer and live on :class:`ManagedResult`. + """ + content: str + metrics: LDAIMetrics + raw: Optional[Any] = None + parsed: Optional[Dict[str, Any]] = None + + +@dataclass +class ManagedResult: + """ + Result returned by the managed layer (:class:`~ldai.ManagedModel` / + :class:`~ldai.ManagedAgent`) after a single invocation. + + ``metrics`` is an :class:`~ldai.tracker.LDAIMetricSummary` (from + ``tracker.get_summary()``) rather than a raw :class:`LDAIMetrics`. + ``evaluations`` is an optional asyncio Task that resolves to a list of + :class:`JudgeResult` instances when awaited. + """ + content: str + metrics: LDAIMetricSummary + raw: Optional[Any] = None + parsed: Optional[Dict[str, Any]] = None + evaluations: Optional[asyncio.Task[List[JudgeResult]]] = None + + @dataclass class ModelResponse: """ Response from a model invocation. + + .. deprecated:: + Use :class:`RunnerResult` (from a runner) and :class:`ManagedResult` + (from the managed layer) instead. """ message: LDMessage metrics: LDAIMetrics @@ -52,6 +105,9 @@ class ModelResponse: class StructuredResponse: """ Structured response from AI models. + + .. deprecated:: + Structured output is now represented by :attr:`RunnerResult.parsed`. """ data: Dict[str, Any] raw_response: str @@ -96,6 +152,10 @@ def to_dict(self) -> Dict[str, Any]: class AgentResult: """ Result from a single-agent run. + + .. deprecated:: + Use :class:`ManagedResult` (managed layer) or :class:`RunnerResult` + (runner layer) instead. """ output: str raw: Any diff --git a/packages/sdk/server-ai/src/ldai/tracker.py b/packages/sdk/server-ai/src/ldai/tracker.py index 0f5a32c5..608297d3 100644 --- a/packages/sdk/server-ai/src/ldai/tracker.py +++ b/packages/sdk/server-ai/src/ldai/tracker.py @@ -41,15 +41,31 @@ class LDAIMetricSummary: """ def __init__(self): - self._duration = None - self._success = None - self._feedback = None - self._usage = None - self._time_to_first_token = None + self._duration_ms: Optional[int] = None + self._success: Optional[bool] = None + self._feedback: Optional[Dict[str, FeedbackKind]] = None + self._usage: Optional[TokenUsage] = None + self._time_to_first_token: Optional[int] = None + self._tool_calls: Optional[List[str]] = None + self._resumption_token: Optional[str] = None + + @property + def duration_ms(self) -> Optional[int]: + """Duration of the AI operation in milliseconds.""" + return self._duration_ms @property def duration(self) -> Optional[int]: - return self._duration + """ + .. deprecated:: + Use :attr:`duration_ms` instead. + """ + warnings.warn( + "LDAIMetricSummary.duration is deprecated. Use duration_ms instead.", + DeprecationWarning, + stacklevel=2, + ) + return self._duration_ms @property def success(self) -> Optional[bool]: @@ -67,6 +83,20 @@ def usage(self) -> Optional[TokenUsage]: def time_to_first_token(self) -> Optional[int]: return self._time_to_first_token + @property + def tool_calls(self) -> Optional[List[str]]: + """List of tool keys that were invoked during this operation.""" + return self._tool_calls + + @property + def resumption_token(self) -> Optional[str]: + """ + URL-safe Base64-encoded resumption token captured at tracker + instantiation. Useful for deferred feedback flows where a downstream + process needs to associate events with the original execution. + """ + return self._resumption_token + class LDAIConfigTracker: """ @@ -107,8 +137,10 @@ def __init__( self._provider_name = provider_name self._context = context self._graph_key = graph_key - self._summary = LDAIMetricSummary() self._run_id = run_id + self._summary = LDAIMetricSummary() + # Capture resumption_token immediately so it's available on the summary at instantiation. + self._summary._resumption_token = self.resumption_token @property def resumption_token(self) -> str: @@ -200,10 +232,10 @@ def track_duration(self, duration: int) -> None: :param duration: Duration in milliseconds. """ - if self._summary.duration is not None: + if self._summary.duration_ms is not None: log.warning("Duration has already been tracked for this execution. %s", self.__get_track_data()) return - self._summary._duration = duration + self._summary._duration_ms = duration self._ld_client.track( "$ld:ai:duration:total", self._context, self.__get_track_data(), duration ) @@ -259,6 +291,8 @@ def _track_from_metrics_extractor( self.track_error() if metrics.usage: self.track_tokens(metrics.usage) + if getattr(metrics, 'tool_calls', None): + self.track_tool_calls(metrics.tool_calls) return result def track_metrics_of( @@ -278,6 +312,10 @@ def track_metrics_of( For async operations, use :meth:`track_metrics_of_async`. + When the extracted :class:`~ldai.providers.types.LDAIMetrics` object has a + non-``None`` ``duration_ms`` field, that value is used as the measured duration + instead of the wall-clock elapsed time. + :param metrics_extractor: Function that extracts LDAIMetrics from the operation result :param func: Synchronous callable that runs the operation :return: The result of the operation @@ -291,8 +329,10 @@ def track_metrics_of( self.track_error() raise err - duration = (time.perf_counter_ns() - start_ns) // 1_000_000 - self.track_duration(duration) + elapsed_ms = (time.perf_counter_ns() - start_ns) // 1_000_000 + metrics = metrics_extractor(result) + reported_ms = getattr(metrics, 'duration_ms', None) if metrics else None + self.track_duration(reported_ms if reported_ms is not None else elapsed_ms) return self._track_from_metrics_extractor(result, metrics_extractor) async def track_metrics_of_async(self, metrics_extractor, func): @@ -301,6 +341,10 @@ async def track_metrics_of_async(self, metrics_extractor, func): Same event semantics as :meth:`track_metrics_of`. + When the extracted :class:`~ldai.providers.types.LDAIMetrics` object has a + non-``None`` ``duration_ms`` field, that value is used as the measured duration + instead of the wall-clock elapsed time. + :param metrics_extractor: Function that extracts LDAIMetrics from the operation result :param func: Async callable or zero-arg callable that returns an awaitable when called :return: The result of the operation @@ -315,8 +359,10 @@ async def track_metrics_of_async(self, metrics_extractor, func): self.track_error() raise err - duration = (time.perf_counter_ns() - start_ns) // 1_000_000 - self.track_duration(duration) + elapsed_ms = (time.perf_counter_ns() - start_ns) // 1_000_000 + metrics = metrics_extractor(result) + reported_ms = getattr(metrics, 'duration_ms', None) if metrics else None + self.track_duration(reported_ms if reported_ms is not None else elapsed_ms) return self._track_from_metrics_extractor(result, metrics_extractor) def track_judge_result(self, judge_result: Any) -> None: @@ -364,6 +410,17 @@ def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None: 1, ) + def track_tool_calls(self, tool_calls: List[str]) -> None: + """ + Track the tool calls made during an AI operation. + + :param tool_calls: List of tool call names. + """ + if self._summary.tool_calls is not None: + log.warning("Tool calls have already been tracked for this execution. %s", self.__get_track_data()) + return + self._summary._tool_calls = list(tool_calls) + def track_success(self) -> None: """ Track a successful AI generation. diff --git a/packages/sdk/server-ai/tests/test_managed_agent.py b/packages/sdk/server-ai/tests/test_managed_agent.py index 144641fc..c4b94ea5 100644 --- a/packages/sdk/server-ai/tests/test_managed_agent.py +++ b/packages/sdk/server-ai/tests/test_managed_agent.py @@ -7,12 +7,19 @@ from ldai.managed_agent import ManagedAgent from ldai.models import AIAgentConfig, AIAgentConfigDefault, ModelConfig, ProviderConfig from ldai.providers import AgentResult -from ldai.providers.types import LDAIMetrics +from ldai.providers.types import LDAIMetrics, ManagedResult +from ldai.tracker import LDAIMetricSummary from ldclient import Config, Context, LDClient from ldclient.integrations.test_data import TestData +def _make_summary(success: bool = True) -> LDAIMetricSummary: + summary = LDAIMetricSummary() + summary._success = success + return summary + + @pytest.fixture def td() -> TestData: td = TestData.data_source() @@ -53,7 +60,7 @@ class TestManagedAgentRun: @pytest.mark.asyncio async def test_run_delegates_to_agent_runner(self): - """Should delegate run() to the underlying AgentRunner.""" + """Should delegate run() to the underlying AgentRunner and return ManagedResult.""" mock_config = MagicMock(spec=AIAgentConfig) mock_tracker = MagicMock() mock_tracker.track_metrics_of_async = AsyncMock( @@ -63,6 +70,7 @@ async def test_run_delegates_to_agent_runner(self): metrics=LDAIMetrics(success=True, usage=None), ) ) + mock_tracker.get_summary = MagicMock(return_value=_make_summary(True)) mock_config.create_tracker = MagicMock(return_value=mock_tracker) mock_runner = MagicMock() mock_runner.run = AsyncMock( @@ -76,7 +84,8 @@ async def test_run_delegates_to_agent_runner(self): agent = ManagedAgent(mock_config, mock_runner) result = await agent.run("Hello") - assert result.output == "Test response" + assert isinstance(result, ManagedResult) + assert result.content == "Test response" assert result.metrics.success is True mock_config.create_tracker.assert_called_once() mock_tracker.track_metrics_of_async.assert_called_once() @@ -93,6 +102,7 @@ async def test_run_uses_create_tracker_for_fresh_tracker(self): metrics=LDAIMetrics(success=True, usage=None), ) ) + fresh_tracker.get_summary = MagicMock(return_value=_make_summary(True)) mock_config.create_tracker = MagicMock(return_value=fresh_tracker) mock_runner = MagicMock() @@ -100,7 +110,8 @@ async def test_run_uses_create_tracker_for_fresh_tracker(self): agent = ManagedAgent(mock_config, mock_runner) result = await agent.run("Hello") - assert result.output == "Fresh tracker response" + assert isinstance(result, ManagedResult) + assert result.content == "Fresh tracker response" mock_config.create_tracker.assert_called_once() fresh_tracker.track_metrics_of_async.assert_called_once() diff --git a/packages/sdk/server-ai/tests/test_managed_model.py b/packages/sdk/server-ai/tests/test_managed_model.py index 36802a14..f81076c5 100644 --- a/packages/sdk/server-ai/tests/test_managed_model.py +++ b/packages/sdk/server-ai/tests/test_managed_model.py @@ -2,15 +2,15 @@ import asyncio from typing import List -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import AsyncMock, MagicMock import pytest from ldai.evaluator import Evaluator from ldai.managed_model import ManagedModel from ldai.models import AICompletionConfig, LDMessage, ModelConfig, ProviderConfig -from ldai.providers.types import JudgeResult, LDAIMetrics, ModelResponse -from ldai.tracker import LDAIConfigTracker +from ldai.providers.types import JudgeResult, LDAIMetrics, ManagedResult, ModelResponse +from ldai.tracker import LDAIConfigTracker, LDAIMetricSummary @@ -21,29 +21,46 @@ def _make_model_response(content: str = 'response text') -> ModelResponse: ) -class TestManagedModelInvokeReturnsImmediately: - """invoke() must return before the evaluations task resolves.""" +def _make_summary() -> LDAIMetricSummary: + summary = LDAIMetricSummary() + summary._success = True + return summary + + +def _make_config_with_tracker(evaluator: Evaluator) -> tuple[AICompletionConfig, MagicMock]: + """Build an AICompletionConfig with a fully-mocked tracker.""" + mock_tracker = MagicMock(spec=LDAIConfigTracker) + mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_model_response()) + mock_tracker.get_summary = MagicMock(return_value=_make_summary()) + config = AICompletionConfig( + key='test-config', + enabled=True, + create_tracker=MagicMock(return_value=mock_tracker), + model=ModelConfig('gpt-4'), + provider=ProviderConfig('openai'), + messages=[], + evaluator=evaluator, + ) + return config, mock_tracker - @pytest.mark.asyncio - async def test_invoke_returns_before_evaluations_resolve(self): - """invoke() should return a ModelResponse before evaluations complete.""" - # Set up a barrier so the evaluation coroutine doesn't complete until we release it - barrier = asyncio.Event() - async def _slow_evaluate(input_text: str, output_text: str) -> List[JudgeResult]: - await barrier.wait() - return [] +class TestManagedModelRunReturnsImmediately: + """run() must return before the evaluations task resolves.""" + @pytest.mark.asyncio + async def test_run_returns_managed_result(self): + """run() should return a ManagedResult with content from the runner.""" evaluator = MagicMock(spec=Evaluator) evaluator.evaluate = MagicMock( - side_effect=lambda i, o: asyncio.create_task(_slow_evaluate(i, o)) + side_effect=lambda i, o: asyncio.create_task(_empty_eval()) ) mock_runner = MagicMock() - mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) + mock_runner.invoke_model = AsyncMock(return_value=_make_model_response('hi')) mock_tracker = MagicMock(spec=LDAIConfigTracker) - mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_model_response()) + mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_model_response('hi')) + mock_tracker.get_summary = MagicMock(return_value=_make_summary()) config = AICompletionConfig( key='test-config', enabled=True, @@ -55,20 +72,46 @@ async def _slow_evaluate(input_text: str, output_text: str) -> List[JudgeResult] ) model = ManagedModel(config, mock_runner) - response = await model.invoke('Hello') + result = await model.run('Hello') - # invoke() returned — evaluations task should still be pending - assert response is not None - assert response.evaluations is not None - assert not response.evaluations.done(), "evaluations task should still be pending" + assert isinstance(result, ManagedResult) + assert result.content == 'hi' + assert isinstance(result.metrics, LDAIMetricSummary) + # Cleanup the still-pending evaluations task. + if result.evaluations is not None: + await result.evaluations + + @pytest.mark.asyncio + async def test_run_returns_before_evaluations_resolve(self): + """run() should return a ManagedResult before evaluations complete.""" + barrier = asyncio.Event() + + async def _slow_evaluate(input_text: str, output_text: str) -> List[JudgeResult]: + await barrier.wait() + return [] + + evaluator = MagicMock(spec=Evaluator) + evaluator.evaluate = MagicMock( + side_effect=lambda i, o: asyncio.create_task(_slow_evaluate(i, o)) + ) + + mock_runner = MagicMock() + mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) + + config, _tracker = _make_config_with_tracker(evaluator) + model = ManagedModel(config, mock_runner) + result = await model.run('Hello') + + assert result is not None + assert result.evaluations is not None + assert not result.evaluations.done(), "evaluations task should still be pending" - # Release the barrier and let it finish cleanly barrier.set() - await response.evaluations + await result.evaluations @pytest.mark.asyncio async def test_await_evaluations_collects_results(self): - """await response.evaluations should return the list of JudgeResult instances.""" + """await result.evaluations should return the list of JudgeResult instances.""" judge_result = JudgeResult( judge_config_key='judge-key', success=True, @@ -89,22 +132,11 @@ async def _evaluate_coro(input_text: str, output_text: str) -> List[JudgeResult] mock_runner = MagicMock() mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) - mock_tracker = MagicMock(spec=LDAIConfigTracker) - mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_model_response()) - config = AICompletionConfig( - key='test-config', - enabled=True, - create_tracker=MagicMock(return_value=mock_tracker), - model=ModelConfig('gpt-4'), - provider=ProviderConfig('openai'), - messages=[], - evaluator=evaluator, - ) - + config, _tracker = _make_config_with_tracker(evaluator) model = ManagedModel(config, mock_runner) - response = await model.invoke('Hello') + result = await model.run('Hello') - results = await response.evaluations # type: ignore[misc] + results = await result.evaluations # type: ignore[misc] assert results == [judge_result] @pytest.mark.asyncio @@ -130,28 +162,17 @@ async def _evaluate_coro(input_text: str, output_text: str) -> List[JudgeResult] mock_runner = MagicMock() mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) - mock_tracker = MagicMock(spec=LDAIConfigTracker) - mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_model_response()) + config, mock_tracker = _make_config_with_tracker(evaluator) mock_tracker.track_judge_result = MagicMock() - config = AICompletionConfig( - key='test-config', - enabled=True, - create_tracker=MagicMock(return_value=mock_tracker), - model=ModelConfig('gpt-4'), - provider=ProviderConfig('openai'), - messages=[], - evaluator=evaluator, - ) - model = ManagedModel(config, mock_runner) - response = await model.invoke('Hello') + result = await model.run('Hello') # Tracking should NOT have fired yet (before we await evaluations) mock_tracker.track_judge_result.assert_not_called() # Now await the evaluations task — tracking fires inside the chain - await response.evaluations # type: ignore[misc] + await result.evaluations # type: ignore[misc] mock_tracker.track_judge_result.assert_called_once_with(judge_result) @@ -176,23 +197,12 @@ async def _evaluate_coro(input_text: str, output_text: str) -> List[JudgeResult] mock_runner = MagicMock() mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) - mock_tracker = MagicMock(spec=LDAIConfigTracker) - mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_model_response()) + config, mock_tracker = _make_config_with_tracker(evaluator) mock_tracker.track_judge_result = MagicMock() - config = AICompletionConfig( - key='test-config', - enabled=True, - create_tracker=MagicMock(return_value=mock_tracker), - model=ModelConfig('gpt-4'), - provider=ProviderConfig('openai'), - messages=[], - evaluator=evaluator, - ) - model = ManagedModel(config, mock_runner) - response = await model.invoke('Hello') - await response.evaluations # type: ignore[misc] + result = await model.run('Hello') + await result.evaluations # type: ignore[misc] mock_tracker.track_judge_result.assert_not_called() @@ -204,21 +214,35 @@ async def test_noop_evaluator_returns_empty_list(self): mock_runner = MagicMock() mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) - mock_tracker = MagicMock(spec=LDAIConfigTracker) - mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_model_response()) + config, _tracker = _make_config_with_tracker(evaluator) + model = ManagedModel(config, mock_runner) + result = await model.run('Hello') + results = await result.evaluations # type: ignore[misc] - config = AICompletionConfig( - key='test-config', - enabled=True, - create_tracker=MagicMock(return_value=mock_tracker), - model=ModelConfig('gpt-4'), - provider=ProviderConfig('openai'), - messages=[], - evaluator=evaluator, - ) + assert results == [] + + +class TestManagedModelInvokeDeprecated: + """The deprecated invoke() method continues to work and emits a DeprecationWarning.""" + @pytest.mark.asyncio + async def test_invoke_emits_deprecation_warning(self): + """invoke() should emit a DeprecationWarning.""" + evaluator = Evaluator.noop() + mock_runner = MagicMock() + mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) + + config, _tracker = _make_config_with_tracker(evaluator) model = ManagedModel(config, mock_runner) - response = await model.invoke('Hello') - results = await response.evaluations # type: ignore[misc] - assert results == [] + with pytest.warns(DeprecationWarning, match=r"ManagedModel\.invoke\(\) is deprecated"): + response = await model.invoke('Hello') + + assert response is not None + # invoke() still wires the evaluations chain on the response. + if response.evaluations is not None: + await response.evaluations + + +async def _empty_eval() -> List[JudgeResult]: + return [] From 4e28ae691bcdf45c51e6714b53262189cce0066c Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 29 Apr 2026 11:30:52 -0500 Subject: [PATCH 02/24] refactor: address review feedback on docstrings --- .../server-ai/src/ldai/providers/runner.py | 10 +-- .../sdk/server-ai/src/ldai/providers/types.py | 84 +++++++++++-------- .../sdk/server-ai/tests/test_managed_agent.py | 23 +++-- .../sdk/server-ai/tests/test_managed_model.py | 29 ++++--- .../sdk/server-ai/tests/test_runner_abcs.py | 24 +++--- 5 files changed, 94 insertions(+), 76 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/providers/runner.py b/packages/sdk/server-ai/src/ldai/providers/runner.py index c86a8fe8..5e1b9abc 100644 --- a/packages/sdk/server-ai/src/ldai/providers/runner.py +++ b/packages/sdk/server-ai/src/ldai/providers/runner.py @@ -11,15 +11,7 @@ class Runner(Protocol): Unified runtime capability interface for all AI provider runners. A :class:`Runner` is a focused, configured object that performs a single - AI invocation. Both model runners and agent runners implement this protocol. - - :param input: The input to the runner (string prompt, list of messages, or - other provider-specific input type). - :param output_type: Optional JSON schema dict that requests structured output. - When provided, the runner populates :attr:`~RunnerResult.parsed` on the - returned :class:`RunnerResult`. - :return: :class:`RunnerResult` containing ``content``, ``metrics``, and - optionally ``raw`` and ``parsed``. + AI invocation. """ async def run( diff --git a/packages/sdk/server-ai/src/ldai/providers/types.py b/packages/sdk/server-ai/src/ldai/providers/types.py index 72f7198c..f5224e0e 100644 --- a/packages/sdk/server-ai/src/ldai/providers/types.py +++ b/packages/sdk/server-ai/src/ldai/providers/types.py @@ -16,22 +16,19 @@ @dataclass class LDAIMetrics: - """ - Metrics information for AI operations that includes success status, token - usage, and optional enrichment fields populated by runners. - - ``tool_calls`` is a list of tool-call names observed during the invocation - (populated by agent runners that execute tool loops). + """Contains metrics for a single AI invocation.""" - ``duration_ms`` is the wall-clock duration of the runner invocation in - milliseconds, when measured by the runner itself rather than externally. - When set, the tracker uses this value directly instead of measuring elapsed - time. - """ success: bool + """Whether the invocation succeeded.""" + usage: Optional[TokenUsage] = None + """Optional token usage information.""" + tool_calls: Optional[List[str]] = None + """Ordered list of tool-call names observed during the invocation.""" + duration_ms: Optional[int] = None + """Wall-clock duration of the runner invocation in milliseconds.""" def to_dict(self) -> Dict[str, Any]: """ @@ -55,36 +52,39 @@ def to_dict(self) -> Dict[str, Any]: @dataclass class RunnerResult: - """ - Result returned by a :class:`~ldai.providers.runner.Runner` from a single - invocation. + """Contains the result of a single AI model invocation.""" - This is the unified return type for all Runner implementations. - ``evaluations`` is intentionally absent — judge evaluations are dispatched - by the managed layer and live on :class:`ManagedResult`. - """ content: str + """The text content returned by the model.""" + metrics: LDAIMetrics + """Metrics for this invocation.""" + raw: Optional[Any] = None + """Optional provider-native response object for advanced consumers.""" + parsed: Optional[Dict[str, Any]] = None + """Optional parsed structured output, populated when ``output_type`` was supplied.""" @dataclass class ManagedResult: - """ - Result returned by the managed layer (:class:`~ldai.ManagedModel` / - :class:`~ldai.ManagedAgent`) after a single invocation. + """Contains the result of a managed AI invocation, including metrics and optional judge evaluations.""" - ``metrics`` is an :class:`~ldai.tracker.LDAIMetricSummary` (from - ``tracker.get_summary()``) rather than a raw :class:`LDAIMetrics`. - ``evaluations`` is an optional asyncio Task that resolves to a list of - :class:`JudgeResult` instances when awaited. - """ content: str + """The text content returned by the model.""" + metrics: LDAIMetricSummary + """Aggregated metric summary from the tracker for this invocation.""" + raw: Optional[Any] = None + """Optional provider-native response object for advanced consumers.""" + parsed: Optional[Dict[str, Any]] = None + """Optional parsed structured output, populated when ``output_type`` was supplied.""" + evaluations: Optional[asyncio.Task[List[JudgeResult]]] = None + """Optional asyncio Task that resolves to the list of :class:`JudgeResult` instances when awaited.""" @dataclass @@ -116,16 +116,28 @@ class StructuredResponse: @dataclass class JudgeResult: - """ - Result from a judge evaluation. - """ + """Contains the result of a single judge evaluation.""" + judge_config_key: Optional[str] = None + """The configuration key of the judge that produced this result.""" + success: bool = False + """Whether the judge evaluation completed successfully.""" + error_message: Optional[str] = None - sampled: bool = False # True when the evaluation was sampled and run + """Error message describing why the evaluation failed, if any.""" + + sampled: bool = False + """True when the evaluation was sampled and run.""" + metric_key: Optional[str] = None + """The metric key under which this judge's score is reported.""" + score: Optional[float] = None + """The numeric score (0-1) returned by the judge.""" + reasoning: Optional[str] = None + """The judge's reasoning text accompanying the score.""" def to_dict(self) -> Dict[str, Any]: """ @@ -164,10 +176,16 @@ class AgentResult: @dataclass class AgentGraphResult: - """ - Result from an agent graph run. - """ + """Contains the result of an agent graph run.""" + output: str + """The agent graph's final output content.""" + raw: Any + """The provider-native response object from the graph run.""" + metrics: LDAIMetrics + """Metrics recorded during the graph run.""" + evaluations: Optional[List[JudgeResult]] = None + """Optional list of judge evaluation results produced for the graph run.""" diff --git a/packages/sdk/server-ai/tests/test_managed_agent.py b/packages/sdk/server-ai/tests/test_managed_agent.py index c4b94ea5..0c30637a 100644 --- a/packages/sdk/server-ai/tests/test_managed_agent.py +++ b/packages/sdk/server-ai/tests/test_managed_agent.py @@ -6,8 +6,7 @@ from ldai import LDAIClient, ManagedAgent from ldai.managed_agent import ManagedAgent from ldai.models import AIAgentConfig, AIAgentConfigDefault, ModelConfig, ProviderConfig -from ldai.providers import AgentResult -from ldai.providers.types import LDAIMetrics, ManagedResult +from ldai.providers.types import LDAIMetrics, ManagedResult, RunnerResult from ldai.tracker import LDAIMetricSummary from ldclient import Config, Context, LDClient @@ -64,20 +63,20 @@ async def test_run_delegates_to_agent_runner(self): mock_config = MagicMock(spec=AIAgentConfig) mock_tracker = MagicMock() mock_tracker.track_metrics_of_async = AsyncMock( - return_value=AgentResult( - output="Test response", - raw=None, + return_value=RunnerResult( + content="Test response", metrics=LDAIMetrics(success=True, usage=None), + raw=None, ) ) mock_tracker.get_summary = MagicMock(return_value=_make_summary(True)) mock_config.create_tracker = MagicMock(return_value=mock_tracker) mock_runner = MagicMock() mock_runner.run = AsyncMock( - return_value=AgentResult( - output="Test response", - raw=None, + return_value=RunnerResult( + content="Test response", metrics=LDAIMetrics(success=True, usage=None), + raw=None, ) ) @@ -96,10 +95,10 @@ async def test_run_uses_create_tracker_for_fresh_tracker(self): mock_config = MagicMock(spec=AIAgentConfig) fresh_tracker = MagicMock() fresh_tracker.track_metrics_of_async = AsyncMock( - return_value=AgentResult( - output="Fresh tracker response", - raw=None, + return_value=RunnerResult( + content="Fresh tracker response", metrics=LDAIMetrics(success=True, usage=None), + raw=None, ) ) fresh_tracker.get_summary = MagicMock(return_value=_make_summary(True)) @@ -163,7 +162,7 @@ async def test_returns_managed_agent_when_runner_available(self, ldai_client: LD mock_runner = MagicMock() mock_runner.run = AsyncMock( - return_value=AgentResult(output="Hello!", raw=None, metrics=LDAIMetrics(success=True, usage=None)) + return_value=RunnerResult(content="Hello!", metrics=LDAIMetrics(success=True, usage=None), raw=None) ) original = rf.RunnerFactory.create_agent diff --git a/packages/sdk/server-ai/tests/test_managed_model.py b/packages/sdk/server-ai/tests/test_managed_model.py index f81076c5..cc190abf 100644 --- a/packages/sdk/server-ai/tests/test_managed_model.py +++ b/packages/sdk/server-ai/tests/test_managed_model.py @@ -9,11 +9,18 @@ from ldai.evaluator import Evaluator from ldai.managed_model import ManagedModel from ldai.models import AICompletionConfig, LDMessage, ModelConfig, ProviderConfig -from ldai.providers.types import JudgeResult, LDAIMetrics, ManagedResult, ModelResponse +from ldai.providers.types import JudgeResult, LDAIMetrics, ManagedResult, ModelResponse, RunnerResult from ldai.tracker import LDAIConfigTracker, LDAIMetricSummary +def _make_runner_result(content: str = 'response text') -> RunnerResult: + return RunnerResult( + content=content, + metrics=LDAIMetrics(success=True, usage=None), + ) + + def _make_model_response(content: str = 'response text') -> ModelResponse: return ModelResponse( message=LDMessage(role='assistant', content=content), @@ -30,7 +37,7 @@ def _make_summary() -> LDAIMetricSummary: def _make_config_with_tracker(evaluator: Evaluator) -> tuple[AICompletionConfig, MagicMock]: """Build an AICompletionConfig with a fully-mocked tracker.""" mock_tracker = MagicMock(spec=LDAIConfigTracker) - mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_model_response()) + mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_runner_result()) mock_tracker.get_summary = MagicMock(return_value=_make_summary()) config = AICompletionConfig( key='test-config', @@ -56,10 +63,10 @@ async def test_run_returns_managed_result(self): ) mock_runner = MagicMock() - mock_runner.invoke_model = AsyncMock(return_value=_make_model_response('hi')) + mock_runner.invoke_model = AsyncMock(return_value=_make_runner_result('hi')) mock_tracker = MagicMock(spec=LDAIConfigTracker) - mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_model_response('hi')) + mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_runner_result('hi')) mock_tracker.get_summary = MagicMock(return_value=_make_summary()) config = AICompletionConfig( key='test-config', @@ -96,7 +103,7 @@ async def _slow_evaluate(input_text: str, output_text: str) -> List[JudgeResult] ) mock_runner = MagicMock() - mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) + mock_runner.invoke_model = AsyncMock(return_value=_make_runner_result()) config, _tracker = _make_config_with_tracker(evaluator) model = ManagedModel(config, mock_runner) @@ -130,7 +137,7 @@ async def _evaluate_coro(input_text: str, output_text: str) -> List[JudgeResult] ) mock_runner = MagicMock() - mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) + mock_runner.invoke_model = AsyncMock(return_value=_make_runner_result()) config, _tracker = _make_config_with_tracker(evaluator) model = ManagedModel(config, mock_runner) @@ -160,7 +167,7 @@ async def _evaluate_coro(input_text: str, output_text: str) -> List[JudgeResult] ) mock_runner = MagicMock() - mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) + mock_runner.invoke_model = AsyncMock(return_value=_make_runner_result()) config, mock_tracker = _make_config_with_tracker(evaluator) mock_tracker.track_judge_result = MagicMock() @@ -195,7 +202,7 @@ async def _evaluate_coro(input_text: str, output_text: str) -> List[JudgeResult] ) mock_runner = MagicMock() - mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) + mock_runner.invoke_model = AsyncMock(return_value=_make_runner_result()) config, mock_tracker = _make_config_with_tracker(evaluator) mock_tracker.track_judge_result = MagicMock() @@ -212,7 +219,7 @@ async def test_noop_evaluator_returns_empty_list(self): evaluator = Evaluator.noop() mock_runner = MagicMock() - mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) + mock_runner.invoke_model = AsyncMock(return_value=_make_runner_result()) config, _tracker = _make_config_with_tracker(evaluator) model = ManagedModel(config, mock_runner) @@ -232,7 +239,9 @@ async def test_invoke_emits_deprecation_warning(self): mock_runner = MagicMock() mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) - config, _tracker = _make_config_with_tracker(evaluator) + config, mock_tracker = _make_config_with_tracker(evaluator) + # invoke() expects a ModelResponse from the tracker, not a RunnerResult. + mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_model_response()) model = ManagedModel(config, mock_runner) with pytest.warns(DeprecationWarning, match=r"ManagedModel\.invoke\(\) is deprecated"): diff --git a/packages/sdk/server-ai/tests/test_runner_abcs.py b/packages/sdk/server-ai/tests/test_runner_abcs.py index d5136fd0..7e8087cd 100644 --- a/packages/sdk/server-ai/tests/test_runner_abcs.py +++ b/packages/sdk/server-ai/tests/test_runner_abcs.py @@ -1,17 +1,17 @@ import pytest -from ldai.providers import AgentGraphResult, AgentGraphRunner, AgentResult, AgentRunner, ToolRegistry -from ldai.providers.types import LDAIMetrics +from ldai.providers import AgentGraphResult, AgentGraphRunner, AgentRunner, ToolRegistry +from ldai.providers.types import LDAIMetrics, RunnerResult # --- Concrete test doubles --- class ConcreteAgentRunner: async def run(self, input): - return AgentResult( - output=f"agent response to: {input}", - raw={"raw": input}, + return RunnerResult( + content=f"agent response to: {input}", metrics=LDAIMetrics(success=True), + raw={"raw": input}, ) @@ -39,20 +39,20 @@ def test_agent_runner_structural_check_fails_when_run_missing(): @pytest.mark.asyncio -async def test_agent_runner_run_returns_agent_result(): +async def test_agent_runner_run_returns_runner_result(): runner = ConcreteAgentRunner() result = await runner.run("hello") - assert isinstance(result, AgentResult) - assert result.output == "agent response to: hello" + assert isinstance(result, RunnerResult) + assert result.content == "agent response to: hello" assert result.raw == {"raw": "hello"} assert result.metrics.success is True @pytest.mark.asyncio -async def test_agent_result_fields(): +async def test_runner_result_fields(): metrics = LDAIMetrics(success=True) - result = AgentResult(output="done", raw={"key": "val"}, metrics=metrics) - assert result.output == "done" + result = RunnerResult(content="done", metrics=metrics, raw={"key": "val"}) + assert result.content == "done" assert result.raw == {"key": "val"} assert result.metrics is metrics @@ -103,6 +103,6 @@ def test_top_level_exports(): import ldai assert hasattr(ldai, 'AgentRunner') assert hasattr(ldai, 'AgentGraphRunner') - assert hasattr(ldai, 'AgentResult') assert hasattr(ldai, 'AgentGraphResult') + assert hasattr(ldai, 'RunnerResult') assert hasattr(ldai, 'ToolRegistry') From 2dd9329e5ccb9f5a5cb1cf1d00af0b6a2cef3138 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 29 Apr 2026 11:49:16 -0500 Subject: [PATCH 03/24] fix: merge duplicate track_tool_calls methods in LDAIConfigTracker The new track_tool_calls method at line 413 (with summary storage and dedup guard) was being shadowed by the older method at line 559 (which only fired per-tool events). Merge them into a single method that both stores to the summary and fires per-tool events. --- packages/sdk/server-ai/src/ldai/tracker.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/tracker.py b/packages/sdk/server-ai/src/ldai/tracker.py index 608297d3..21995dc5 100644 --- a/packages/sdk/server-ai/src/ldai/tracker.py +++ b/packages/sdk/server-ai/src/ldai/tracker.py @@ -410,16 +410,22 @@ def track_feedback(self, feedback: Dict[str, FeedbackKind]) -> None: 1, ) - def track_tool_calls(self, tool_calls: List[str]) -> None: + def track_tool_calls(self, tool_calls: Iterable[str]) -> None: """ Track the tool calls made during an AI operation. - :param tool_calls: List of tool call names. + Stores the tool call names on the summary (guarding against duplicate + tracking) and fires a ``$ld:ai:tool_call`` event for each tool. + + :param tool_calls: Tool identifiers (e.g. from a model response). """ if self._summary.tool_calls is not None: log.warning("Tool calls have already been tracked for this execution. %s", self.__get_track_data()) return - self._summary._tool_calls = list(tool_calls) + tool_calls_list = list(tool_calls) + self._summary._tool_calls = tool_calls_list + for tool_key in tool_calls_list: + self.track_tool_call(tool_key) def track_success(self) -> None: """ @@ -556,15 +562,6 @@ def track_tool_call(self, tool_key: str) -> None: 1, ) - def track_tool_calls(self, tool_keys: Iterable[str]) -> None: - """ - Track multiple tool invocations for this configuration. - - :param tool_keys: Tool identifiers (e.g. from a model response). - """ - for tool_key in tool_keys: - self.track_tool_call(tool_key) - def get_summary(self) -> LDAIMetricSummary: """ Get the current summary of AI metrics. From b4d15df4be4b5cb69bce1c8af6006ece3009c1ff Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 29 Apr 2026 12:12:49 -0500 Subject: [PATCH 04/24] fix: avoid double metrics extraction in track_metrics_of helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, metrics_extractor(result) was called twice — once in the public track_metrics_of/track_metrics_of_async to read duration_ms, and again inside _track_from_metrics_extractor to track success, tokens, and tool calls. Extract metrics once in the public method and pass the resulting metrics + elapsed_ms into the private helper, which now also handles the duration tracking. --- packages/sdk/server-ai/src/ldai/tracker.py | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/tracker.py b/packages/sdk/server-ai/src/ldai/tracker.py index 21995dc5..414ac34b 100644 --- a/packages/sdk/server-ai/src/ldai/tracker.py +++ b/packages/sdk/server-ai/src/ldai/tracker.py @@ -279,12 +279,9 @@ def track_duration_of(self, func): return result - def _track_from_metrics_extractor( - self, - result: Any, - metrics_extractor: Callable[[Any], Any], - ) -> Any: - metrics = metrics_extractor(result) + def _track_from_metrics_extractor(self, metrics: Any, elapsed_ms: int) -> None: + reported_ms = getattr(metrics, 'duration_ms', None) if metrics else None + self.track_duration(reported_ms if reported_ms is not None else elapsed_ms) if metrics.success: self.track_success() else: @@ -293,7 +290,6 @@ def _track_from_metrics_extractor( self.track_tokens(metrics.usage) if getattr(metrics, 'tool_calls', None): self.track_tool_calls(metrics.tool_calls) - return result def track_metrics_of( self, @@ -331,9 +327,8 @@ def track_metrics_of( elapsed_ms = (time.perf_counter_ns() - start_ns) // 1_000_000 metrics = metrics_extractor(result) - reported_ms = getattr(metrics, 'duration_ms', None) if metrics else None - self.track_duration(reported_ms if reported_ms is not None else elapsed_ms) - return self._track_from_metrics_extractor(result, metrics_extractor) + self._track_from_metrics_extractor(metrics, elapsed_ms) + return result async def track_metrics_of_async(self, metrics_extractor, func): """ @@ -361,9 +356,8 @@ async def track_metrics_of_async(self, metrics_extractor, func): elapsed_ms = (time.perf_counter_ns() - start_ns) // 1_000_000 metrics = metrics_extractor(result) - reported_ms = getattr(metrics, 'duration_ms', None) if metrics else None - self.track_duration(reported_ms if reported_ms is not None else elapsed_ms) - return self._track_from_metrics_extractor(result, metrics_extractor) + self._track_from_metrics_extractor(metrics, elapsed_ms) + return result def track_judge_result(self, judge_result: Any) -> None: """ From 4fe7eb5bd82656868b42ea3b21cd1759f5557e32 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 29 Apr 2026 12:37:04 -0500 Subject: [PATCH 05/24] refactor: drop ModelRunner/AgentRunner compat from managed layer ManagedModel and ManagedAgent now require a Runner. The compat shims (_invoke_runner, isinstance(result, RunnerResult) branches, Union type annotations) are removed; result handling is direct on RunnerResult fields. The deprecated ManagedModel.invoke() is preserved for backwards compat but now delegates to run() and adapts the ManagedResult into the legacy ModelResponse shape. ModelRunner and AgentRunner protocol definitions remain in place so downstream provider packages that import them continue to work. --- packages/sdk/server-ai/src/ldai/client.py | 4 +- .../sdk/server-ai/src/ldai/managed_agent.py | 20 ++--- .../sdk/server-ai/src/ldai/managed_model.py | 78 ++++++------------- .../sdk/server-ai/tests/test_managed_model.py | 15 +--- 4 files changed, 34 insertions(+), 83 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/client.py b/packages/sdk/server-ai/src/ldai/client.py index 448d5c55..3193095c 100644 --- a/packages/sdk/server-ai/src/ldai/client.py +++ b/packages/sdk/server-ai/src/ldai/client.py @@ -443,7 +443,7 @@ async def create_model( if not runner: return None - return ManagedModel(config, runner) + return ManagedModel(config, runner) # type: ignore[arg-type] async def create_chat( self, @@ -517,7 +517,7 @@ async def create_agent( if not runner: return None - return ManagedAgent(config, runner) + return ManagedAgent(config, runner) # type: ignore[arg-type] def agent_config( self, diff --git a/packages/sdk/server-ai/src/ldai/managed_agent.py b/packages/sdk/server-ai/src/ldai/managed_agent.py index a2abdf98..9d582ae4 100644 --- a/packages/sdk/server-ai/src/ldai/managed_agent.py +++ b/packages/sdk/server-ai/src/ldai/managed_agent.py @@ -1,26 +1,22 @@ """ManagedAgent — LaunchDarkly managed wrapper for agent invocations.""" -from typing import Union - from ldai.models import AIAgentConfig -from ldai.providers import AgentResult, AgentRunner from ldai.providers.runner import Runner -from ldai.providers.types import ManagedResult, RunnerResult +from ldai.providers.types import ManagedResult class ManagedAgent: """ LaunchDarkly managed wrapper for AI agent invocations. - Holds an AgentRunner or Runner. Handles tracking automatically via - ``create_tracker()``. + Holds a Runner. Handles tracking automatically via ``create_tracker()``. Obtain an instance via ``LDAIClient.create_agent()``. """ def __init__( self, ai_config: AIAgentConfig, - agent_runner: Union[Runner, AgentRunner], + agent_runner: Runner, ): self._ai_config = ai_config self._agent_runner = agent_runner @@ -33,23 +29,21 @@ async def run(self, input: str) -> ManagedResult: :return: ManagedResult containing the agent's output and metric summary """ tracker = self._ai_config.create_tracker() - result: Union[RunnerResult, AgentResult] = await tracker.track_metrics_of_async( + result = await tracker.track_metrics_of_async( lambda r: r.metrics, lambda: self._agent_runner.run(input), ) - # Support both RunnerResult (content) and legacy AgentResult (output) - content = result.content if isinstance(result, RunnerResult) else result.output # type: ignore[union-attr] return ManagedResult( - content=content, + content=result.content, metrics=tracker.get_summary(), raw=result.raw, ) - def get_agent_runner(self) -> Union[Runner, AgentRunner]: + def get_agent_runner(self) -> Runner: """ Return the underlying runner for advanced use. - :return: The Runner or AgentRunner instance. + :return: The Runner instance. """ return self._agent_runner diff --git a/packages/sdk/server-ai/src/ldai/managed_model.py b/packages/sdk/server-ai/src/ldai/managed_model.py index 3d2949c3..48f1a587 100644 --- a/packages/sdk/server-ai/src/ldai/managed_model.py +++ b/packages/sdk/server-ai/src/ldai/managed_model.py @@ -1,12 +1,11 @@ import asyncio import warnings -from typing import List, Union +from typing import List from ldai import log from ldai.models import AICompletionConfig, LDMessage -from ldai.providers.model_runner import ModelRunner from ldai.providers.runner import Runner -from ldai.providers.types import JudgeResult, ManagedResult, ModelResponse, RunnerResult +from ldai.providers.types import JudgeResult, LDAIMetrics, ManagedResult, ModelResponse from ldai.tracker import LDAIConfigTracker @@ -14,15 +13,15 @@ class ManagedModel: """ LaunchDarkly managed wrapper for AI model invocations. - Holds a Runner (or legacy ModelRunner). Handles conversation management, - judge evaluation dispatch, and tracking automatically via ``create_tracker()``. + Holds a Runner. Handles conversation management, judge evaluation + dispatch, and tracking automatically via ``create_tracker()``. Obtain an instance via ``LDAIClient.create_model()``. """ def __init__( self, ai_config: AICompletionConfig, - model_runner: Union[Runner, ModelRunner], + model_runner: Runner, ): self._ai_config = ai_config self._model_runner = model_runner @@ -48,50 +47,27 @@ async def run(self, prompt: str) -> ManagedResult: config_messages = self._ai_config.messages or [] all_messages = config_messages + self._messages - result: Union[RunnerResult, ModelResponse] = await tracker.track_metrics_of_async( + result = await tracker.track_metrics_of_async( lambda r: r.metrics, - lambda: self._invoke_runner(all_messages), + lambda: self._model_runner.run(all_messages), ) - # Support both new RunnerResult and legacy ModelResponse - if isinstance(result, RunnerResult): - content = result.content - raw = result.raw - parsed = result.parsed - assistant_message = LDMessage(role='assistant', content=content) - else: - content = result.message.content - raw = getattr(result, 'raw', None) - parsed = getattr(result, 'parsed', None) - assistant_message = result.message + assistant_message = LDMessage(role='assistant', content=result.content) input_text = '\r\n'.join(m.content for m in self._messages) if self._messages else '' - evaluations_task = self._track_judge_results(tracker, input_text, content) + evaluations_task = self._track_judge_results(tracker, input_text, result.content) self._messages.append(assistant_message) return ManagedResult( - content=content, + content=result.content, metrics=tracker.get_summary(), - raw=raw, - parsed=parsed, + raw=result.raw, + parsed=result.parsed, evaluations=evaluations_task, ) - async def _invoke_runner( - self, all_messages: List[LDMessage] - ) -> Union[RunnerResult, ModelResponse]: - """ - Delegate to the runner. Supports both the new ``Runner`` protocol - (``run(messages) → RunnerResult``) and the legacy ``ModelRunner`` - (``invoke_model(messages) → ModelResponse``). - """ - if isinstance(self._model_runner, Runner): - return await self._model_runner.run(all_messages) - # Legacy ModelRunner path - return await self._model_runner.invoke_model(all_messages) # type: ignore[union-attr] - async def invoke(self, prompt: str) -> ModelResponse: """ Invoke the model with a prompt string. @@ -108,26 +84,16 @@ async def invoke(self, prompt: str) -> ModelResponse: DeprecationWarning, stacklevel=2, ) - tracker = self._ai_config.create_tracker() - - user_message = LDMessage(role='user', content=prompt) - self._messages.append(user_message) - - config_messages = self._ai_config.messages or [] - all_messages = config_messages + self._messages - - response: ModelResponse = await tracker.track_metrics_of_async( - lambda result: result.metrics, - lambda: self._model_runner.invoke_model(all_messages), # type: ignore[union-attr] + result = await self.run(prompt) + return ModelResponse( + message=LDMessage(role='assistant', content=result.content), + metrics=LDAIMetrics( + success=bool(result.metrics.success), + usage=result.metrics.usage, + ), + evaluations=result.evaluations, ) - input_text = '\r\n'.join(m.content for m in self._messages) if self._messages else '' - output_text = response.message.content - response.evaluations = self._track_judge_results(tracker, input_text, output_text) - - self._messages.append(response.message) - return response - def _track_judge_results( self, tracker: LDAIConfigTracker, @@ -169,11 +135,11 @@ def append_messages(self, messages: List[LDMessage]) -> None: """ self._messages.extend(messages) - def get_model_runner(self) -> Union[Runner, ModelRunner]: + def get_model_runner(self) -> Runner: """ Return the underlying runner for advanced use. - :return: The Runner or legacy ModelRunner instance. + :return: The Runner instance. """ return self._model_runner diff --git a/packages/sdk/server-ai/tests/test_managed_model.py b/packages/sdk/server-ai/tests/test_managed_model.py index cc190abf..73739da2 100644 --- a/packages/sdk/server-ai/tests/test_managed_model.py +++ b/packages/sdk/server-ai/tests/test_managed_model.py @@ -9,7 +9,7 @@ from ldai.evaluator import Evaluator from ldai.managed_model import ManagedModel from ldai.models import AICompletionConfig, LDMessage, ModelConfig, ProviderConfig -from ldai.providers.types import JudgeResult, LDAIMetrics, ManagedResult, ModelResponse, RunnerResult +from ldai.providers.types import JudgeResult, LDAIMetrics, ManagedResult, RunnerResult from ldai.tracker import LDAIConfigTracker, LDAIMetricSummary @@ -21,13 +21,6 @@ def _make_runner_result(content: str = 'response text') -> RunnerResult: ) -def _make_model_response(content: str = 'response text') -> ModelResponse: - return ModelResponse( - message=LDMessage(role='assistant', content=content), - metrics=LDAIMetrics(success=True, usage=None), - ) - - def _make_summary() -> LDAIMetricSummary: summary = LDAIMetricSummary() summary._success = True @@ -237,11 +230,9 @@ async def test_invoke_emits_deprecation_warning(self): """invoke() should emit a DeprecationWarning.""" evaluator = Evaluator.noop() mock_runner = MagicMock() - mock_runner.invoke_model = AsyncMock(return_value=_make_model_response()) + mock_runner.run = AsyncMock(return_value=_make_runner_result()) - config, mock_tracker = _make_config_with_tracker(evaluator) - # invoke() expects a ModelResponse from the tracker, not a RunnerResult. - mock_tracker.track_metrics_of_async = AsyncMock(return_value=_make_model_response()) + config, _mock_tracker = _make_config_with_tracker(evaluator) model = ManagedModel(config, mock_runner) with pytest.warns(DeprecationWarning, match=r"ManagedModel\.invoke\(\) is deprecated"): From ff871bf9293c3b3b7ae9e04fef51666ecb87a7bb Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 29 Apr 2026 12:58:34 -0500 Subject: [PATCH 06/24] fix: tighten _track_from_metrics_extractor checks - Drop the inconsistent 'if metrics else None' guard on reported_ms; the next line already dereferences metrics.success unconditionally. - Use 'is not None' for tool_calls so an explicit empty list still triggers tracking (preserves the distinction between 'not tracked' and 'tracked with no calls'). --- packages/sdk/server-ai/src/ldai/tracker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/tracker.py b/packages/sdk/server-ai/src/ldai/tracker.py index 414ac34b..9e61a8a4 100644 --- a/packages/sdk/server-ai/src/ldai/tracker.py +++ b/packages/sdk/server-ai/src/ldai/tracker.py @@ -280,7 +280,7 @@ def track_duration_of(self, func): return result def _track_from_metrics_extractor(self, metrics: Any, elapsed_ms: int) -> None: - reported_ms = getattr(metrics, 'duration_ms', None) if metrics else None + reported_ms = getattr(metrics, 'duration_ms', None) self.track_duration(reported_ms if reported_ms is not None else elapsed_ms) if metrics.success: self.track_success() @@ -288,7 +288,7 @@ def _track_from_metrics_extractor(self, metrics: Any, elapsed_ms: int) -> None: self.track_error() if metrics.usage: self.track_tokens(metrics.usage) - if getattr(metrics, 'tool_calls', None): + if getattr(metrics, 'tool_calls', None) is not None: self.track_tool_calls(metrics.tool_calls) def track_metrics_of( From d75467f8a9cec9d4b85a88051c6ae19418f660a9 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 29 Apr 2026 16:06:57 -0500 Subject: [PATCH 07/24] refactor: remove deprecated ManagedModel.invoke() Drop the deprecated invoke() method from the managed layer along with its dedicated test class and the warnings/LDAIMetrics/ModelResponse imports that were only needed by it. Type definitions in providers/ remain so downstream provider packages keep building. --- .../sdk/server-ai/src/ldai/managed_model.py | 29 +------------------ .../sdk/server-ai/tests/test_managed_model.py | 22 -------------- 2 files changed, 1 insertion(+), 50 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/managed_model.py b/packages/sdk/server-ai/src/ldai/managed_model.py index 48f1a587..94605eab 100644 --- a/packages/sdk/server-ai/src/ldai/managed_model.py +++ b/packages/sdk/server-ai/src/ldai/managed_model.py @@ -1,11 +1,10 @@ import asyncio -import warnings from typing import List from ldai import log from ldai.models import AICompletionConfig, LDMessage from ldai.providers.runner import Runner -from ldai.providers.types import JudgeResult, LDAIMetrics, ManagedResult, ModelResponse +from ldai.providers.types import JudgeResult, ManagedResult from ldai.tracker import LDAIConfigTracker @@ -68,32 +67,6 @@ async def run(self, prompt: str) -> ManagedResult: evaluations=evaluations_task, ) - async def invoke(self, prompt: str) -> ModelResponse: - """ - Invoke the model with a prompt string. - - .. deprecated:: - Use :meth:`run` instead. This method will be removed in a future - release once the migration to :class:`ManagedResult` is complete. - - :param prompt: The user prompt to send to the model - :return: ModelResponse containing the model's response and metrics - """ - warnings.warn( - "ManagedModel.invoke() is deprecated. Use run() instead.", - DeprecationWarning, - stacklevel=2, - ) - result = await self.run(prompt) - return ModelResponse( - message=LDMessage(role='assistant', content=result.content), - metrics=LDAIMetrics( - success=bool(result.metrics.success), - usage=result.metrics.usage, - ), - evaluations=result.evaluations, - ) - def _track_judge_results( self, tracker: LDAIConfigTracker, diff --git a/packages/sdk/server-ai/tests/test_managed_model.py b/packages/sdk/server-ai/tests/test_managed_model.py index 73739da2..6d679552 100644 --- a/packages/sdk/server-ai/tests/test_managed_model.py +++ b/packages/sdk/server-ai/tests/test_managed_model.py @@ -222,27 +222,5 @@ async def test_noop_evaluator_returns_empty_list(self): assert results == [] -class TestManagedModelInvokeDeprecated: - """The deprecated invoke() method continues to work and emits a DeprecationWarning.""" - - @pytest.mark.asyncio - async def test_invoke_emits_deprecation_warning(self): - """invoke() should emit a DeprecationWarning.""" - evaluator = Evaluator.noop() - mock_runner = MagicMock() - mock_runner.run = AsyncMock(return_value=_make_runner_result()) - - config, _mock_tracker = _make_config_with_tracker(evaluator) - model = ManagedModel(config, mock_runner) - - with pytest.warns(DeprecationWarning, match=r"ManagedModel\.invoke\(\) is deprecated"): - response = await model.invoke('Hello') - - assert response is not None - # invoke() still wires the evaluations chain on the response. - if response.evaluations is not None: - await response.evaluations - - async def _empty_eval() -> List[JudgeResult]: return [] From 89d0ad7cd97c61bd4fe191489c154130c68b7e3d Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 29 Apr 2026 16:13:25 -0500 Subject: [PATCH 08/24] refactor: type RunnerFactory.create_model/agent returns as Optional[Runner] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The factory's downstream consumers (ManagedModel, ManagedAgent) now take Runner; aligning the factory's return types lets us drop the type: ignore comments at the ManagedModel/ManagedAgent call sites. Provider package PRs will update their concrete implementations to match. Judge still takes ModelRunner, so its call site picks up the type: ignore[arg-type] in its place — that's resolved later in the cleanup PR when Judge migrates to Runner. --- packages/sdk/server-ai/src/ldai/client.py | 6 +++--- .../server-ai/src/ldai/providers/runner_factory.py | 11 +++++------ 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/client.py b/packages/sdk/server-ai/src/ldai/client.py index 3193095c..ededae36 100644 --- a/packages/sdk/server-ai/src/ldai/client.py +++ b/packages/sdk/server-ai/src/ldai/client.py @@ -329,7 +329,7 @@ def create_judge( if not provider: return None - return Judge(judge_config, provider) + return Judge(judge_config, provider) # type: ignore[arg-type] except Exception as error: return None @@ -443,7 +443,7 @@ async def create_model( if not runner: return None - return ManagedModel(config, runner) # type: ignore[arg-type] + return ManagedModel(config, runner) async def create_chat( self, @@ -517,7 +517,7 @@ async def create_agent( if not runner: return None - return ManagedAgent(config, runner) # type: ignore[arg-type] + return ManagedAgent(config, runner) def agent_config( self, diff --git a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py index 9363f8e0..b7548791 100644 --- a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py +++ b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py @@ -4,9 +4,8 @@ from ldai import log from ldai.models import AIConfigKind from ldai.providers.agent_graph_runner import AgentGraphRunner -from ldai.providers.agent_runner import AgentRunner from ldai.providers.ai_provider import AIProvider -from ldai.providers.model_runner import ModelRunner +from ldai.providers.runner import Runner T = TypeVar('T') @@ -118,13 +117,13 @@ def _get_providers_to_try( def create_model( config: AIConfigKind, default_ai_provider: Optional[str] = None, - ) -> Optional[ModelRunner]: + ) -> Optional[Runner]: """ Create a model executor for the given AI completion config. :param config: LaunchDarkly AI config (completion or judge) :param default_ai_provider: Optional provider override ('openai', 'langchain', …) - :return: Configured ModelRunner ready to invoke the model, or None + :return: Configured Runner ready to invoke the model, or None """ provider_name = config.provider.name.lower() if config.provider else None providers = RunnerFactory._get_providers_to_try(default_ai_provider, provider_name) @@ -135,7 +134,7 @@ def create_agent( config: Any, tools: Any, default_ai_provider: Optional[str] = None, - ) -> Optional[AgentRunner]: + ) -> Optional[Runner]: """ CAUTION: This feature is experimental and should NOT be considered ready for production use. @@ -147,7 +146,7 @@ def create_agent( :param config: LaunchDarkly AI agent config :param tools: Tool registry mapping tool names to callables :param default_ai_provider: Optional provider override - :return: AgentRunner instance, or None + :return: Runner instance, or None """ provider_name = config.provider.name.lower() if config.provider else None providers = RunnerFactory._get_providers_to_try(default_ai_provider, provider_name) From 7a52f24c1b2b3e56ad63f29f6e39ce2b5a4c0bfb Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 29 Apr 2026 16:56:03 -0500 Subject: [PATCH 09/24] refactor: handle metrics extraction failures gracefully MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the metrics_extractor call inside _track_from_metrics_extractor so extraction errors are caught and logged without bubbling up. When extraction fails or returns None, only the wall-clock duration is tracked — success/error is left untouched since the underlying model call itself succeeded. Also tighten the tool_calls check to access metrics.tool_calls directly, mirroring how metrics.usage is accessed. --- packages/sdk/server-ai/src/ldai/tracker.py | 25 ++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/tracker.py b/packages/sdk/server-ai/src/ldai/tracker.py index 9e61a8a4..df122a24 100644 --- a/packages/sdk/server-ai/src/ldai/tracker.py +++ b/packages/sdk/server-ai/src/ldai/tracker.py @@ -279,7 +279,22 @@ def track_duration_of(self, func): return result - def _track_from_metrics_extractor(self, metrics: Any, elapsed_ms: int) -> None: + def _track_from_metrics_extractor( + self, + result: Any, + metrics_extractor: Callable[[Any], Any], + elapsed_ms: int, + ) -> None: + metrics = None + try: + metrics = metrics_extractor(result) + except Exception as exc: + log.warning("Failed to extract metrics: %s", exc) + + if metrics is None: + self.track_duration(elapsed_ms) + return + reported_ms = getattr(metrics, 'duration_ms', None) self.track_duration(reported_ms if reported_ms is not None else elapsed_ms) if metrics.success: @@ -288,7 +303,7 @@ def _track_from_metrics_extractor(self, metrics: Any, elapsed_ms: int) -> None: self.track_error() if metrics.usage: self.track_tokens(metrics.usage) - if getattr(metrics, 'tool_calls', None) is not None: + if metrics.tool_calls is not None: self.track_tool_calls(metrics.tool_calls) def track_metrics_of( @@ -326,8 +341,7 @@ def track_metrics_of( raise err elapsed_ms = (time.perf_counter_ns() - start_ns) // 1_000_000 - metrics = metrics_extractor(result) - self._track_from_metrics_extractor(metrics, elapsed_ms) + self._track_from_metrics_extractor(result, metrics_extractor, elapsed_ms) return result async def track_metrics_of_async(self, metrics_extractor, func): @@ -355,8 +369,7 @@ async def track_metrics_of_async(self, metrics_extractor, func): raise err elapsed_ms = (time.perf_counter_ns() - start_ns) // 1_000_000 - metrics = metrics_extractor(result) - self._track_from_metrics_extractor(metrics, elapsed_ms) + self._track_from_metrics_extractor(result, metrics_extractor, elapsed_ms) return result def track_judge_result(self, judge_result: Any) -> None: From 45845edb000db7415fc1bd30b44b138026c46212 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 30 Apr 2026 08:58:16 -0500 Subject: [PATCH 10/24] refactor: update Judge to use Runner protocol and RunnerResult - Judge now accepts Runner instead of ModelRunner - evaluate() calls runner.run(output_type=...) instead of invoke_structured_model - response.parsed replaces StructuredResponse.data; None guard added - evaluate_messages() accepts RunnerResult instead of ModelResponse - Tests updated to use RunnerResult and mock_runner.run Co-Authored-By: Claude Sonnet 4.6 --- packages/sdk/server-ai/src/ldai/client.py | 2 +- .../sdk/server-ai/src/ldai/judge/__init__.py | 22 +++-- packages/sdk/server-ai/tests/test_judge.py | 84 +++++++++---------- 3 files changed, 55 insertions(+), 53 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/client.py b/packages/sdk/server-ai/src/ldai/client.py index ededae36..448d5c55 100644 --- a/packages/sdk/server-ai/src/ldai/client.py +++ b/packages/sdk/server-ai/src/ldai/client.py @@ -329,7 +329,7 @@ def create_judge( if not provider: return None - return Judge(judge_config, provider) # type: ignore[arg-type] + return Judge(judge_config, provider) except Exception as error: return None diff --git a/packages/sdk/server-ai/src/ldai/judge/__init__.py b/packages/sdk/server-ai/src/ldai/judge/__init__.py index f2e8c362..6919a7aa 100644 --- a/packages/sdk/server-ai/src/ldai/judge/__init__.py +++ b/packages/sdk/server-ai/src/ldai/judge/__init__.py @@ -8,8 +8,8 @@ from ldai import log from ldai.judge.evaluation_schema_builder import EvaluationSchemaBuilder from ldai.models import AIJudgeConfig, LDMessage -from ldai.providers.model_runner import ModelRunner -from ldai.providers.types import JudgeResult, ModelResponse +from ldai.providers.runner import Runner +from ldai.providers.types import JudgeResult, RunnerResult class Judge: @@ -23,7 +23,7 @@ class Judge: def __init__( self, ai_config: AIJudgeConfig, - model_runner: ModelRunner, + model_runner: Runner, ): """ Initialize the Judge. @@ -76,10 +76,14 @@ async def evaluate( response = await tracker.track_metrics_of_async( lambda result: result.metrics, - lambda: self._model_runner.invoke_structured_model(messages, self._evaluation_response_structure), + lambda: self._model_runner.run(messages, output_type=self._evaluation_response_structure), ) - parsed = self._parse_evaluation_response(response.data) + if response.parsed is None: + log.warning('Judge evaluation did not return structured output') + return judge_result + + parsed = self._parse_evaluation_response(response.parsed) if parsed is None: log.warning('Judge evaluation did not return the expected evaluation') @@ -99,19 +103,19 @@ async def evaluate( async def evaluate_messages( self, messages: list[LDMessage], - response: ModelResponse, + response: RunnerResult, sampling_ratio: float = 1.0, ) -> JudgeResult: """ Evaluates an AI response from chat messages and response. :param messages: Array of messages representing the conversation history - :param response: The AI response to be evaluated + :param response: The runner result to be evaluated :param sampling_ratio: Sampling ratio (0-1) to determine if evaluation should be processed (defaults to 1) :return: The result of the judge evaluation. """ input_text = '\r\n'.join([msg.content for msg in messages]) if messages else '' - output_text = response.message.content + output_text = response.content return await self.evaluate(input_text, output_text, sampling_ratio) @@ -123,7 +127,7 @@ def get_ai_config(self) -> AIJudgeConfig: """ return self._ai_config - def get_model_runner(self) -> ModelRunner: + def get_model_runner(self) -> Runner: """ Returns the model runner used by this judge. diff --git a/packages/sdk/server-ai/tests/test_judge.py b/packages/sdk/server-ai/tests/test_judge.py index c2690b6a..3ca0750b 100644 --- a/packages/sdk/server-ai/tests/test_judge.py +++ b/packages/sdk/server-ai/tests/test_judge.py @@ -9,7 +9,7 @@ from ldai.judge import Judge from ldai.judge.evaluation_schema_builder import EvaluationSchemaBuilder from ldai.models import AIJudgeConfig, AIJudgeConfigDefault, LDMessage, ModelConfig, ProviderConfig -from ldai.providers.types import JudgeResult, LDAIMetrics, StructuredResponse +from ldai.providers.types import JudgeResult, LDAIMetrics, RunnerResult from ldai.tracker import LDAIConfigTracker @@ -40,9 +40,9 @@ def client(td: TestData) -> LDClient: @pytest.fixture def mock_runner(): - """Create a mock AI provider.""" + """Create a mock AI runner.""" provider = MagicMock() - provider.invoke_structured_model = AsyncMock() + provider.run = AsyncMock() return provider @@ -137,7 +137,7 @@ async def test_evaluate_returns_failure_when_evaluation_metric_key_missing( assert isinstance(result, JudgeResult) assert result.success is False assert result.sampled is False - mock_runner.invoke_structured_model.assert_not_called() + mock_runner.run.assert_not_called() @pytest.mark.asyncio async def test_evaluate_returns_failure_when_messages_missing( @@ -151,23 +151,23 @@ async def test_evaluate_returns_failure_when_messages_missing( assert isinstance(result, JudgeResult) assert result.success is False assert result.sampled is False - mock_runner.invoke_structured_model.assert_not_called() + mock_runner.run.assert_not_called() @pytest.mark.asyncio async def test_evaluate_success_with_valid_response( self, judge_config_with_key: AIJudgeConfig, tracker: LDAIConfigTracker, mock_runner ): """Evaluate should return JudgeResponse with valid evaluation.""" - mock_response = StructuredResponse( - data={ + mock_response = RunnerResult( + content='', + metrics=LDAIMetrics(success=True), + parsed={ 'score': 0.85, 'reasoning': 'The response is highly relevant to the input.' }, - raw_response='{"score": 0.85, "reasoning": "..."}', - metrics=LDAIMetrics(success=True) ) - mock_runner.invoke_structured_model.return_value = mock_response + mock_runner.run.return_value = mock_response tracker.track_metrics_of_async = AsyncMock(return_value=mock_response) judge = Judge(judge_config_with_key, mock_runner) @@ -187,15 +187,15 @@ async def test_evaluate_success_with_evaluation_response_shape( self, judge_config_with_key: AIJudgeConfig, tracker: LDAIConfigTracker, mock_runner ): """Evaluate should accept shape { score, reasoning } and key by metric.""" - mock_response = StructuredResponse( - data={ + mock_response = RunnerResult( + content='', + metrics=LDAIMetrics(success=True), + parsed={ 'score': 0.9, 'reasoning': 'The response is accurate and complete.', }, - raw_response='{"score": 0.9, "reasoning": "..."}', - metrics=LDAIMetrics(success=True), ) - mock_runner.invoke_structured_model.return_value = mock_response + mock_runner.run.return_value = mock_response tracker.track_metrics_of_async = AsyncMock(return_value=mock_response) judge = Judge(judge_config_with_key, mock_runner) @@ -214,13 +214,13 @@ async def test_evaluate_handles_missing_evaluation_in_response( self, judge_config_with_key: AIJudgeConfig, tracker: LDAIConfigTracker, mock_runner ): """Evaluate should handle missing score/reasoning in response.""" - mock_response = StructuredResponse( - data={}, - raw_response='{}', - metrics=LDAIMetrics(success=True) + mock_response = RunnerResult( + content='', + metrics=LDAIMetrics(success=True), + parsed={}, ) - mock_runner.invoke_structured_model.return_value = mock_response + mock_runner.run.return_value = mock_response tracker.track_metrics_of_async = AsyncMock(return_value=mock_response) judge = Judge(judge_config_with_key, mock_runner) @@ -236,16 +236,16 @@ async def test_evaluate_handles_invalid_score( self, judge_config_with_key: AIJudgeConfig, tracker: LDAIConfigTracker, mock_runner ): """Evaluate should handle invalid score values.""" - mock_response = StructuredResponse( - data={ + mock_response = RunnerResult( + content='', + metrics=LDAIMetrics(success=True), + parsed={ 'score': 1.5, - 'reasoning': 'Some reasoning' + 'reasoning': 'Some reasoning', }, - raw_response='{"score": 1.5, "reasoning": "..."}', - metrics=LDAIMetrics(success=True) ) - mock_runner.invoke_structured_model.return_value = mock_response + mock_runner.run.return_value = mock_response tracker.track_metrics_of_async = AsyncMock(return_value=mock_response) judge = Judge(judge_config_with_key, mock_runner) @@ -261,13 +261,13 @@ async def test_evaluate_handles_missing_reasoning( self, judge_config_with_key: AIJudgeConfig, tracker: LDAIConfigTracker, mock_runner ): """Evaluate should handle missing reasoning.""" - mock_response = StructuredResponse( - data={'score': 0.8}, - raw_response='{"score": 0.8}', - metrics=LDAIMetrics(success=True) + mock_response = RunnerResult( + content='', + metrics=LDAIMetrics(success=True), + parsed={'score': 0.8}, ) - mock_runner.invoke_structured_model.return_value = mock_response + mock_runner.run.return_value = mock_response tracker.track_metrics_of_async = AsyncMock(return_value=mock_response) judge = Judge(judge_config_with_key, mock_runner) @@ -283,7 +283,7 @@ async def test_evaluate_handles_exception( self, judge_config_with_key: AIJudgeConfig, tracker: LDAIConfigTracker, mock_runner ): """Evaluate should handle exceptions gracefully.""" - mock_runner.invoke_structured_model.side_effect = Exception("Provider error") + mock_runner.run.side_effect = Exception("Provider error") tracker.track_metrics_of_async = AsyncMock(side_effect=Exception("Provider error")) judge = Judge(judge_config_with_key, mock_runner) @@ -306,7 +306,7 @@ async def test_evaluate_respects_sampling_rate( assert isinstance(result, JudgeResult) assert result.sampled is False assert result.success is False - mock_runner.invoke_structured_model.assert_not_called() + mock_runner.run.assert_not_called() class TestJudgeEvaluateMessages: @@ -317,15 +317,13 @@ async def test_evaluate_messages_calls_evaluate( self, judge_config_with_key: AIJudgeConfig, tracker: LDAIConfigTracker, mock_runner ): """evaluate_messages should call evaluate with constructed input/output.""" - from ldai.providers.types import ModelResponse - - mock_response = StructuredResponse( - data={'score': 0.9, 'reasoning': 'Very relevant'}, - raw_response='{"score": 0.9, "reasoning": "..."}', - metrics=LDAIMetrics(success=True) + mock_response = RunnerResult( + content='', + metrics=LDAIMetrics(success=True), + parsed={'score': 0.9, 'reasoning': 'Very relevant'}, ) - mock_runner.invoke_structured_model.return_value = mock_response + mock_runner.run.return_value = mock_response tracker.track_metrics_of_async = AsyncMock(return_value=mock_response) judge = Judge(judge_config_with_key, mock_runner) @@ -334,9 +332,9 @@ async def test_evaluate_messages_calls_evaluate( LDMessage(role='user', content='Question 1'), LDMessage(role='assistant', content='Answer 1'), ] - chat_response = ModelResponse( - message=LDMessage(role='assistant', content='Answer 2'), - metrics=LDAIMetrics(success=True) + chat_response = RunnerResult( + content='Answer 2', + metrics=LDAIMetrics(success=True), ) result = await judge.evaluate_messages(messages, chat_response) From 56249a1204291123d7d181c275c75276340c7631 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 28 Apr 2026 18:22:39 -0500 Subject: [PATCH 11/24] feat: Wire LDAIMetrics tool_calls and duration_ms into tracker --- packages/sdk/server-ai/tests/test_tracker.py | 108 +++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/packages/sdk/server-ai/tests/test_tracker.py b/packages/sdk/server-ai/tests/test_tracker.py index c2ae2dde..930ca633 100644 --- a/packages/sdk/server-ai/tests/test_tracker.py +++ b/packages/sdk/server-ai/tests/test_tracker.py @@ -909,3 +909,111 @@ def test_client_create_tracker_fails_on_invalid_json(): result = ai_client.create_tracker(bad_token, context) assert not result.is_success() assert "Invalid resumption token" in result.error + + +# --- PR 10: LDAIMetrics enrichment + tracker integration --- + + +def test_ldai_metrics_to_dict_includes_tool_calls_and_duration_ms(): + metrics = LDAIMetrics( + success=True, + usage=TokenUsage(total=10, input=4, output=6), + tool_calls=["search", "lookup"], + duration_ms=123, + ) + d = metrics.to_dict() + assert d["success"] is True + assert d["usage"] == {"total": 10, "input": 4, "output": 6} + assert d["toolCalls"] == ["search", "lookup"] + assert d["durationMs"] == 123 + + +def test_ldai_metrics_to_dict_omits_optional_fields_when_none(): + metrics = LDAIMetrics(success=False) + d = metrics.to_dict() + assert d == {"success": False} + + +def test_track_metrics_of_uses_metrics_duration_ms_when_set(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker( + ld_client=client, run_id="test-run-id", config_key="config-key", + variation_key="variation-key", version=3, model_name="m", + provider_name="p", context=context, + ) + + def fn(): + return "done" + + def extract(_r): + return LDAIMetrics(success=True, duration_ms=999) + + tracker.track_metrics_of(extract, fn) + assert tracker.get_summary().duration_ms == 999 + + +@pytest.mark.asyncio +async def test_track_metrics_of_async_uses_metrics_duration_ms_when_set(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker( + ld_client=client, run_id="test-run-id", config_key="config-key", + variation_key="variation-key", version=3, model_name="m", + provider_name="p", context=context, + ) + + async def fn(): + return "done" + + def extract(_r): + return LDAIMetrics(success=True, duration_ms=42) + + await tracker.track_metrics_of_async(extract, fn) + assert tracker.get_summary().duration_ms == 42 + + +def test_track_metrics_of_calls_track_tool_calls_when_present(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker( + ld_client=client, run_id="test-run-id", config_key="config-key", + variation_key="variation-key", version=3, model_name="m", + provider_name="p", context=context, + ) + + def fn(): + return "done" + + def extract(_r): + return LDAIMetrics(success=True, tool_calls=["foo", "bar"]) + + tracker.track_metrics_of(extract, fn) + summary = tracker.get_summary() + assert summary.tool_calls == ["foo", "bar"] + # One $ld:ai:tool_call event per tool key. + tool_call_events = [ + c for c in client.track.mock_calls # type: ignore + if c.args[0] == "$ld:ai:tool_call" + ] + assert len(tool_call_events) == 2 + + +def test_track_metrics_of_skips_track_tool_calls_when_absent(client: LDClient): + context = Context.create("user-key") + tracker = LDAIConfigTracker( + ld_client=client, run_id="test-run-id", config_key="config-key", + variation_key="variation-key", version=3, model_name="m", + provider_name="p", context=context, + ) + + def fn(): + return "done" + + def extract(_r): + return LDAIMetrics(success=True, usage=None) + + tracker.track_metrics_of(extract, fn) + assert tracker.get_summary().tool_calls is None + tool_call_events = [ + c for c in client.track.mock_calls # type: ignore + if c.args[0] == "$ld:ai:tool_call" + ] + assert tool_call_events == [] From 4d86c9cf71471e93221ee2865755f284e38c2d2e Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 30 Apr 2026 09:19:11 -0500 Subject: [PATCH 12/24] chore: remove stale PR-10 section comment from test_tracker.py --- packages/sdk/server-ai/tests/test_tracker.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/packages/sdk/server-ai/tests/test_tracker.py b/packages/sdk/server-ai/tests/test_tracker.py index 930ca633..4ed53441 100644 --- a/packages/sdk/server-ai/tests/test_tracker.py +++ b/packages/sdk/server-ai/tests/test_tracker.py @@ -911,9 +911,6 @@ def test_client_create_tracker_fails_on_invalid_json(): assert "Invalid resumption token" in result.error -# --- PR 10: LDAIMetrics enrichment + tracker integration --- - - def test_ldai_metrics_to_dict_includes_tool_calls_and_duration_ms(): metrics = LDAIMetrics( success=True, From cc792ecb1c4acb60b6c52edc3f059427a2ccae51 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 30 Apr 2026 09:41:23 -0500 Subject: [PATCH 13/24] refactor: type metrics_extractor as Callable[[Any], Optional[LDAIMetrics]], remove defensive getattr --- packages/sdk/server-ai/src/ldai/tracker.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/tracker.py b/packages/sdk/server-ai/src/ldai/tracker.py index df122a24..43c836bf 100644 --- a/packages/sdk/server-ai/src/ldai/tracker.py +++ b/packages/sdk/server-ai/src/ldai/tracker.py @@ -1,15 +1,20 @@ +from __future__ import annotations + import base64 import json import time import warnings from dataclasses import dataclass from enum import Enum -from typing import Any, Callable, Dict, Iterable, List, Optional +from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional from ldclient import Context, LDClient, Result from ldai import log +if TYPE_CHECKING: + from ldai.providers.types import LDAIMetrics + class FeedbackKind(Enum): """ @@ -282,7 +287,7 @@ def track_duration_of(self, func): def _track_from_metrics_extractor( self, result: Any, - metrics_extractor: Callable[[Any], Any], + metrics_extractor: Callable[[Any], Optional[LDAIMetrics]], elapsed_ms: int, ) -> None: metrics = None @@ -295,8 +300,7 @@ def _track_from_metrics_extractor( self.track_duration(elapsed_ms) return - reported_ms = getattr(metrics, 'duration_ms', None) - self.track_duration(reported_ms if reported_ms is not None else elapsed_ms) + self.track_duration(metrics.duration_ms if metrics.duration_ms is not None else elapsed_ms) if metrics.success: self.track_success() else: @@ -308,7 +312,7 @@ def _track_from_metrics_extractor( def track_metrics_of( self, - metrics_extractor: Callable[[Any], Any], + metrics_extractor: Callable[[Any], Optional[LDAIMetrics]], func: Callable[[], Any], ) -> Any: """ @@ -344,7 +348,11 @@ def track_metrics_of( self._track_from_metrics_extractor(result, metrics_extractor, elapsed_ms) return result - async def track_metrics_of_async(self, metrics_extractor, func): + async def track_metrics_of_async( + self, + metrics_extractor: Callable[[Any], Optional[LDAIMetrics]], + func: Callable[[], Any], + ) -> Any: """ Track metrics for an async AI operation (``func`` is awaited). From 194ad418f413cb13bcd4f82d330c9ae089d19163 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 28 Apr 2026 18:12:56 -0500 Subject: [PATCH 14/24] feat: Update OpenAI runners to implement Runner protocol returning RunnerResult - OpenAIModelRunner.run() implements the unified Runner protocol; returns RunnerResult with content, metrics (LDAIMetrics), raw, and parsed fields. Structured output is supported via the output_type parameter. - OpenAIAgentRunner.run() updated to return RunnerResult; populates tool_calls in LDAIMetrics from observed openai-agents ToolCallItems. - Legacy invoke_model() and invoke_structured_model() retained as deprecated adapters that delegate to run() and wrap results into ModelResponse / StructuredResponse for backward compatibility. Co-Authored-By: Claude Sonnet 4.6 --- .../src/ldai_openai/openai_agent_runner.py | 47 +++++-- .../src/ldai_openai/openai_model_runner.py | 127 +++++++++++------- .../tests/test_openai_provider.py | 68 +++++----- 3 files changed, 142 insertions(+), 100 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py index 7e79c836..6af5a57c 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py @@ -1,28 +1,30 @@ -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from ldai import log -from ldai.providers import AgentResult, AgentRunner, ToolRegistry +from ldai.providers import RunnerResult, ToolRegistry from ldai.providers.types import LDAIMetrics from ldai_openai.openai_helper import ( get_ai_usage_from_response, + get_tool_calls_from_run_items, registry_value_to_agent_tool, ) -class OpenAIAgentRunner(AgentRunner): +class OpenAIAgentRunner: """ CAUTION: This feature is experimental and should NOT be considered ready for production use. It may change or be removed without notice and is not subject to backwards compatibility guarantees. - AgentRunner implementation for OpenAI. + Runner implementation for a single OpenAI agent. Executes a single agent using the OpenAI Agents SDK (``openai-agents``). Tool calling and the agentic loop are handled internally by ``Runner.run``. - Returned by OpenAIRunnerFactory.create_agent(config, tools). + Returned by ``OpenAIRunnerFactory.create_agent(config, tools)``. + Implements the unified :class:`~ldai.providers.runner.Runner` protocol. Requires ``openai-agents`` to be installed. """ @@ -40,15 +42,22 @@ def __init__( self._tool_definitions = tool_definitions self._tools = tools - async def run(self, input: Any) -> AgentResult: + async def run( + self, + input: Any, + output_type: Optional[Dict[str, Any]] = None, + ) -> RunnerResult: """ - Run the agent with the given input string. + Run the agent with the given input. Delegates to the OpenAI Agents SDK ``Runner.run``, which handles the tool-calling loop internally. :param input: The user prompt or input to the agent - :return: AgentResult with output, raw response, and aggregated metrics + :param output_type: Reserved for future structured output support; + currently ignored. + :return: :class:`RunnerResult` with ``content``, ``raw`` response, and + metrics including aggregated token usage and observed ``tool_calls``. """ try: from agents import Agent, Runner @@ -57,7 +66,10 @@ async def run(self, input: Any) -> AgentResult: "openai-agents is required for OpenAIAgentRunner. " "Install it with: pip install openai-agents" ) - return AgentResult(output="", raw=None, metrics=LDAIMetrics(success=False, usage=None)) + return RunnerResult( + content="", + metrics=LDAIMetrics(success=False, usage=None), + ) try: agent_tools = self._build_agent_tools() @@ -73,17 +85,26 @@ async def run(self, input: Any) -> AgentResult: result = await Runner.run(agent, str(input), max_turns=25) - return AgentResult( - output=str(result.final_output), - raw=result, + tool_calls = [ + tool_name + for _agent_name, tool_name in get_tool_calls_from_run_items(result.new_items) + ] + + return RunnerResult( + content=str(result.final_output), metrics=LDAIMetrics( success=True, usage=get_ai_usage_from_response(result), + tool_calls=tool_calls if tool_calls else None, ), + raw=result, ) except Exception as error: log.warning(f"OpenAI agent run failed: {error}") - return AgentResult(output="", raw=None, metrics=LDAIMetrics(success=False, usage=None)) + return RunnerResult( + content="", + metrics=LDAIMetrics(success=False, usage=None), + ) def _build_agent_tools(self) -> List[Any]: """Build tool instances from LD tool definitions and registry.""" diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_model_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_model_runner.py index 9c4a34d8..34fcde2f 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_model_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_model_runner.py @@ -1,9 +1,8 @@ import json -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from ldai import LDMessage, log -from ldai.providers.model_runner import ModelRunner -from ldai.providers.types import LDAIMetrics, ModelResponse, StructuredResponse +from ldai.providers.types import LDAIMetrics, RunnerResult from openai import AsyncOpenAI from ldai_openai.openai_helper import ( @@ -12,12 +11,15 @@ ) -class OpenAIModelRunner(ModelRunner): +class OpenAIModelRunner: """ - ModelRunner implementation for OpenAI. + Runner implementation for OpenAI chat completions. Holds a fully-configured AsyncOpenAI client, model name, and parameters. - Returned by OpenAIConnector.create_model(config). + Returned by ``OpenAIRunnerFactory.create_model(config)``. + + Implements the unified :class:`~ldai.providers.runner.Runner` protocol via + :meth:`run`. """ def __init__( @@ -30,13 +32,38 @@ def __init__( self._model_name = model_name self._parameters = parameters - async def invoke_model(self, messages: List[LDMessage]) -> ModelResponse: + async def run( + self, + input: Any, + output_type: Optional[Dict[str, Any]] = None, + ) -> RunnerResult: """ - Invoke the OpenAI model with an array of messages. - - :param messages: Array of LDMessage objects representing the conversation - :return: ModelResponse containing the model's response and metrics + Run the OpenAI model with the given input. + + :param input: A string prompt or a list of :class:`LDMessage` objects + :param output_type: Optional JSON schema dict requesting structured output. + When provided, ``parsed`` on the returned :class:`RunnerResult` is + populated with the parsed JSON document. + :return: :class:`RunnerResult` containing ``content``, ``metrics``, + ``raw`` and (when ``output_type`` is set) ``parsed``. """ + messages = self._coerce_input(input) + + if output_type is not None: + return await self._run_structured(messages, output_type) + return await self._run_completion(messages) + + @staticmethod + def _coerce_input(input: Any) -> List[LDMessage]: + if isinstance(input, str): + return [LDMessage(role='user', content=input)] + if isinstance(input, list): + return input + raise TypeError( + f"Unsupported input type for OpenAIModelRunner.run: {type(input).__name__}" + ) + + async def _run_completion(self, messages: List[LDMessage]) -> RunnerResult: try: response = await self._client.chat.completions.create( model=self._model_name, @@ -45,40 +72,29 @@ async def invoke_model(self, messages: List[LDMessage]) -> ModelResponse: ) metrics = get_ai_metrics_from_response(response) - - content = '' - if response.choices and len(response.choices) > 0: - message = response.choices[0].message - if message and message.content: - content = message.content + content = self._extract_content(response) if not content: log.warning('OpenAI response has no content available') - metrics = LDAIMetrics(success=False, usage=metrics.usage) + return RunnerResult( + content='', + metrics=LDAIMetrics(success=False, usage=metrics.usage), + raw=response, + ) - return ModelResponse( - message=LDMessage(role='assistant', content=content), - metrics=metrics, - ) + return RunnerResult(content=content, metrics=metrics, raw=response) except Exception as error: log.warning(f'OpenAI model invocation failed: {error}') - return ModelResponse( - message=LDMessage(role='assistant', content=''), + return RunnerResult( + content='', metrics=LDAIMetrics(success=False, usage=None), ) - async def invoke_structured_model( + async def _run_structured( self, messages: List[LDMessage], - response_structure: Dict[str, Any], - ) -> StructuredResponse: - """ - Invoke the OpenAI model with structured output support. - - :param messages: Array of LDMessage objects representing the conversation - :param response_structure: Dictionary defining the JSON schema for output structure - :return: StructuredResponse containing the structured data - """ + output_type: Dict[str, Any], + ) -> RunnerResult: try: response = await self._client.chat.completions.create( model=self._model_name, @@ -87,7 +103,7 @@ async def invoke_structured_model( 'type': 'json_schema', 'json_schema': { 'name': 'structured_output', - 'schema': response_structure, + 'schema': output_type, 'strict': True, }, }, @@ -95,35 +111,42 @@ async def invoke_structured_model( ) metrics = get_ai_metrics_from_response(response) - - content = '' - if response.choices and len(response.choices) > 0: - message = response.choices[0].message - if message and message.content: - content = message.content + content = self._extract_content(response) if not content: log.warning('OpenAI structured response has no content available') - return StructuredResponse( - data={}, - raw_response='', + return RunnerResult( + content='', metrics=LDAIMetrics(success=False, usage=metrics.usage), + raw=response, ) try: - data = json.loads(content) - return StructuredResponse(data=data, raw_response=content, metrics=metrics) + parsed = json.loads(content) + return RunnerResult( + content=content, + metrics=metrics, + raw=response, + parsed=parsed, + ) except json.JSONDecodeError as parse_error: log.warning(f'OpenAI structured response contains invalid JSON: {parse_error}') - return StructuredResponse( - data={}, - raw_response=content, + return RunnerResult( + content=content, metrics=LDAIMetrics(success=False, usage=metrics.usage), + raw=response, ) except Exception as error: log.warning(f'OpenAI structured model invocation failed: {error}') - return StructuredResponse( - data={}, - raw_response='', + return RunnerResult( + content='', metrics=LDAIMetrics(success=False, usage=None), ) + + @staticmethod + def _extract_content(response: Any) -> str: + if response.choices and len(response.choices) > 0: + message = response.choices[0].message + if message and message.content: + return message.content + return '' diff --git a/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py b/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py index 19d2cff7..3b69d3f6 100644 --- a/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py +++ b/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py @@ -120,8 +120,8 @@ def test_handles_partial_usage_data(self): assert result.usage.output == 0 -class TestInvokeModel: - """Tests for invoke_model instance method.""" +class TestRunCompletion: + """Tests for the unified run() method (chat-completion path).""" @pytest.fixture def mock_client(self): @@ -144,15 +144,14 @@ async def test_invokes_openai_chat_completions_and_returns_response(self, mock_c provider = OpenAIModelRunner(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Hello!')] - result = await provider.invoke_model(messages) + result = await provider.run(messages) mock_client.chat.completions.create.assert_called_once_with( model='gpt-3.5-turbo', messages=[{'role': 'user', 'content': 'Hello!'}], ) - assert result.message.role == 'assistant' - assert result.message.content == 'Hello! How can I help you today?' + assert result.content == 'Hello! How can I help you today?' assert result.metrics.success is True assert result.metrics.usage is not None assert result.metrics.usage.total == 25 @@ -174,10 +173,9 @@ async def test_returns_unsuccessful_response_when_no_content(self, mock_client): provider = OpenAIModelRunner(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Hello!')] - result = await provider.invoke_model(messages) + result = await provider.run(messages) - assert result.message.role == 'assistant' - assert result.message.content == '' + assert result.content == '' assert result.metrics.success is False @pytest.mark.asyncio @@ -193,10 +191,9 @@ async def test_returns_unsuccessful_response_when_choices_empty(self, mock_clien provider = OpenAIModelRunner(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Hello!')] - result = await provider.invoke_model(messages) + result = await provider.run(messages) - assert result.message.role == 'assistant' - assert result.message.content == '' + assert result.content == '' assert result.metrics.success is False @pytest.mark.asyncio @@ -208,15 +205,14 @@ async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_cl provider = OpenAIModelRunner(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Hello!')] - result = await provider.invoke_model(messages) + result = await provider.run(messages) - assert result.message.role == 'assistant' - assert result.message.content == '' + assert result.content == '' assert result.metrics.success is False -class TestInvokeStructuredModel: - """Tests for invoke_structured_model instance method.""" +class TestRunStructured: + """Tests for the unified run() method (structured-output path).""" @pytest.fixture def mock_client(self): @@ -249,10 +245,10 @@ async def test_invokes_openai_with_structured_output(self, mock_client): 'required': ['name', 'age', 'city'], } - result = await provider.invoke_structured_model(messages, response_structure) + result = await provider.run(messages, output_type=response_structure) - assert result.data == {'name': 'John', 'age': 30, 'city': 'New York'} - assert result.raw_response == '{"name": "John", "age": 30, "city": "New York"}' + assert result.parsed == {'name': 'John', 'age': 30, 'city': 'New York'} + assert result.content == '{"name": "John", "age": 30, "city": "New York"}' assert result.metrics.success is True assert result.metrics.usage is not None assert result.metrics.usage.total == 30 @@ -276,10 +272,10 @@ async def test_returns_unsuccessful_when_no_content_in_structured_response(self, messages = [LDMessage(role='user', content='Tell me about a person')] response_structure = {'type': 'object'} - result = await provider.invoke_structured_model(messages, response_structure) + result = await provider.run(messages, output_type=response_structure) - assert result.data == {} - assert result.raw_response == '' + assert result.parsed is None + assert result.content == '' assert result.metrics.success is False @pytest.mark.asyncio @@ -300,10 +296,10 @@ async def test_handles_json_parsing_errors(self, mock_client): messages = [LDMessage(role='user', content='Tell me about a person')] response_structure = {'type': 'object'} - result = await provider.invoke_structured_model(messages, response_structure) + result = await provider.run(messages, output_type=response_structure) - assert result.data == {} - assert result.raw_response == 'invalid json content' + assert result.parsed is None + assert result.content == 'invalid json content' assert result.metrics.success is False assert result.metrics.usage is not None assert result.metrics.usage.total == 15 @@ -319,10 +315,10 @@ async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_cl messages = [LDMessage(role='user', content='Tell me about a person')] response_structure = {'type': 'object'} - result = await provider.invoke_structured_model(messages, response_structure) + result = await provider.run(messages, output_type=response_structure) - assert result.data == {} - assert result.raw_response == '' + assert result.parsed is None + assert result.content == '' assert result.metrics.success is False @@ -465,19 +461,20 @@ def _make_run_result(self, output: str, total: int = 15, input_tokens: int = 10, @pytest.mark.asyncio async def test_runs_agent_and_returns_result_with_no_tool_calls(self): - """Should return AgentResult when Runner.run returns a final output.""" + """Should return RunnerResult when Runner.run returns a final output.""" import sys from ldai_openai import OpenAIAgentRunner mock_run_result = self._make_run_result("The answer is 42.", total=15, input_tokens=10, output_tokens=5) + mock_run_result.new_items = [] agents_mock, tc_mock = _make_agents_mock(AsyncMock(return_value=mock_run_result)) runner = OpenAIAgentRunner('gpt-4', {}, 'You are helpful.', [], {}) with patch.dict(sys.modules, {'agents': agents_mock, 'agents.tool_context': tc_mock}): result = await runner.run("What is the answer?") - assert result.output == "The answer is 42." + assert result.content == "The answer is 42." assert result.metrics.success is True assert result.metrics.usage is not None assert result.metrics.usage.total == 15 @@ -490,6 +487,7 @@ async def test_executes_tool_calls_and_returns_final_response(self): from ldai_openai import OpenAIAgentRunner mock_run_result = self._make_run_result("It is sunny in Paris.", total=43, input_tokens=30, output_tokens=13) + mock_run_result.new_items = [] agents_mock, tc_mock = _make_agents_mock(AsyncMock(return_value=mock_run_result)) weather_fn = MagicMock(return_value="Sunny, 25°C") @@ -501,13 +499,13 @@ async def test_executes_tool_calls_and_returns_final_response(self): with patch.dict(sys.modules, {'agents': agents_mock, 'agents.tool_context': tc_mock}): result = await runner.run("What is the weather in Paris?") - assert result.output == "It is sunny in Paris." + assert result.content == "It is sunny in Paris." assert result.metrics.success is True assert result.metrics.usage.total == 43 @pytest.mark.asyncio async def test_returns_failure_when_exception_thrown(self): - """Should return unsuccessful AgentResult when Runner.run raises.""" + """Should return unsuccessful RunnerResult when Runner.run raises.""" import sys from ldai_openai import OpenAIAgentRunner @@ -518,12 +516,12 @@ async def test_returns_failure_when_exception_thrown(self): with patch.dict(sys.modules, {'agents': agents_mock, 'agents.tool_context': tc_mock}): result = await runner.run("Hello") - assert result.output == "" + assert result.content == "" assert result.metrics.success is False @pytest.mark.asyncio async def test_returns_failure_when_openai_agents_not_installed(self): - """Should return unsuccessful AgentResult when openai-agents is not installed.""" + """Should return unsuccessful RunnerResult when openai-agents is not installed.""" import sys from ldai_openai import OpenAIAgentRunner @@ -532,5 +530,5 @@ async def test_returns_failure_when_openai_agents_not_installed(self): with patch.dict(sys.modules, {'agents': None}): result = await runner.run("Hello") - assert result.output == "" + assert result.content == "" assert result.metrics.success is False From 2878bda34f2f773732decd84aaec9dfa2d4298bc Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 30 Apr 2026 08:58:51 -0500 Subject: [PATCH 15/24] refactor: OpenAIModelRunner and OpenAIAgentRunner formally inherit Runner Co-Authored-By: Claude Sonnet 4.6 --- .../server-ai-openai/src/ldai_openai/openai_agent_runner.py | 3 ++- .../server-ai-openai/src/ldai_openai/openai_model_runner.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py index 6af5a57c..4f6866cc 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_runner.py @@ -2,6 +2,7 @@ from ldai import log from ldai.providers import RunnerResult, ToolRegistry +from ldai.providers.runner import Runner from ldai.providers.types import LDAIMetrics from ldai_openai.openai_helper import ( @@ -11,7 +12,7 @@ ) -class OpenAIAgentRunner: +class OpenAIAgentRunner(Runner): """ CAUTION: This feature is experimental and should NOT be considered ready for production use. diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_model_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_model_runner.py index 34fcde2f..75695ba0 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_model_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_model_runner.py @@ -2,6 +2,7 @@ from typing import Any, Dict, List, Optional from ldai import LDMessage, log +from ldai.providers.runner import Runner from ldai.providers.types import LDAIMetrics, RunnerResult from openai import AsyncOpenAI @@ -11,7 +12,7 @@ ) -class OpenAIModelRunner: +class OpenAIModelRunner(Runner): """ Runner implementation for OpenAI chat completions. From 00361883cbf479533f5d2f4a65ee9be5258d4627 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 28 Apr 2026 18:15:26 -0500 Subject: [PATCH 16/24] feat: Update LangChain runners to implement Runner protocol returning RunnerResult - LangChainModelRunner.run() implements the unified Runner protocol; returns RunnerResult with content, metrics (LDAIMetrics), raw, and parsed fields. Structured output is supported via the output_type parameter. - LangChainAgentRunner.run() updated to return RunnerResult; populates tool_calls in LDAIMetrics from observed tool_calls in message responses. - Legacy invoke_model() and invoke_structured_model() retained as deprecated adapters that delegate to run() and wrap results into ModelResponse / StructuredResponse for backward compatibility. Co-Authored-By: Claude Sonnet 4.6 --- .../ldai_langchain/langchain_agent_runner.py | 52 +++++--- .../ldai_langchain/langchain_model_runner.py | 116 +++++++++++------- .../langgraph_agent_graph_runner.py | 7 +- .../tests/test_langchain_provider.py | 41 +++---- 4 files changed, 130 insertions(+), 86 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py index 1969ec75..7aef05dc 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py @@ -1,65 +1,83 @@ -from typing import Any +from typing import Any, Dict, List, Optional from ldai import log -from ldai.providers import AgentResult, AgentRunner -from ldai.providers.types import LDAIMetrics +from ldai.providers.types import LDAIMetrics, RunnerResult from ldai_langchain.langchain_helper import ( extract_last_message_content, + get_tool_calls_from_response, sum_token_usage_from_messages, ) -class LangChainAgentRunner(AgentRunner): +class LangChainAgentRunner: """ CAUTION: This feature is experimental and should NOT be considered ready for production use. It may change or be removed without notice and is not subject to backwards compatibility guarantees. - AgentRunner implementation for LangChain. + Runner implementation for a single LangChain agent. Wraps a compiled LangChain agent graph (from ``langchain.agents.create_agent``) and delegates execution to it. Tool calling and loop management are handled internally by the graph. - Returned by LangChainRunnerFactory.create_agent(config, tools). + Returned by ``LangChainRunnerFactory.create_agent(config, tools)``. + + Implements the unified :class:`~ldai.providers.runner.Runner` protocol. """ def __init__(self, agent: Any): self._agent = agent - async def run(self, input: Any) -> AgentResult: + async def run( + self, + input: Any, + output_type: Optional[Dict[str, Any]] = None, + ) -> RunnerResult: """ - Run the agent with the given input string. + Run the agent with the given input. Delegates to the compiled LangChain agent, which handles the tool-calling loop internally. :param input: The user prompt or input to the agent - :return: AgentResult with output, raw response, and aggregated metrics + :param output_type: Reserved for future structured output support; + currently ignored. + :return: :class:`RunnerResult` with ``content``, ``raw`` response, and + metrics including aggregated token usage and observed ``tool_calls``. """ try: result = await self._agent.ainvoke({ "messages": [{"role": "user", "content": str(input)}] }) - messages = result.get("messages", []) - output = extract_last_message_content(messages) - return AgentResult( - output=output, - raw=result, + messages: List[Any] = result.get("messages", []) + content = extract_last_message_content(messages) + tool_calls = self._extract_tool_calls(messages) + return RunnerResult( + content=content, metrics=LDAIMetrics( success=True, usage=sum_token_usage_from_messages(messages), + tool_calls=tool_calls if tool_calls else None, ), + raw=result, ) except Exception as error: log.warning(f"LangChain agent run failed: {error}") - return AgentResult( - output="", - raw=None, + return RunnerResult( + content="", metrics=LDAIMetrics(success=False, usage=None), ) + @staticmethod + def _extract_tool_calls(messages: List[Any]) -> List[str]: + """Collect tool call names from all messages in the agent output.""" + names: List[str] = [] + for msg in messages: + names.extend(get_tool_calls_from_response(msg)) + return names + def get_agent(self) -> Any: """Return the underlying compiled LangChain agent.""" return self._agent diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_model_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_model_runner.py index d504030b..576f0f4c 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_model_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_model_runner.py @@ -1,10 +1,9 @@ -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.messages import BaseMessage from ldai import LDMessage, log -from ldai.providers.model_runner import ModelRunner -from ldai.providers.types import LDAIMetrics, ModelResponse, StructuredResponse +from ldai.providers.types import LDAIMetrics, RunnerResult from ldai_langchain.langchain_helper import ( convert_messages_to_langchain, @@ -13,12 +12,15 @@ ) -class LangChainModelRunner(ModelRunner): +class LangChainModelRunner: """ - ModelRunner implementation for LangChain. + Runner implementation for LangChain chat models. Holds a fully-configured BaseChatModel. - Returned by LangChainConnector.create_model(config). + Returned by ``LangChainRunnerFactory.create_model(config)``. + + Implements the unified :class:`~ldai.providers.runner.Runner` protocol via + :meth:`run`. """ def __init__(self, llm: BaseChatModel): @@ -32,13 +34,37 @@ def get_llm(self) -> BaseChatModel: """ return self._llm - async def invoke_model(self, messages: List[LDMessage]) -> ModelResponse: + async def run( + self, + input: Any, + output_type: Optional[Dict[str, Any]] = None, + ) -> RunnerResult: """ - Invoke the LangChain model with an array of messages. - - :param messages: Array of LDMessage objects representing the conversation - :return: ModelResponse containing the model's response and metrics + Run the LangChain model with the given input. + + :param input: A string prompt or a list of :class:`LDMessage` objects + :param output_type: Optional JSON schema dict requesting structured output. + When provided, ``parsed`` on the returned :class:`RunnerResult` is + populated with the structured data. + :return: :class:`RunnerResult` containing ``content``, ``metrics``, + ``raw`` and (when ``output_type`` is set) ``parsed``. """ + messages = self._coerce_input(input) + if output_type is not None: + return await self._run_structured(messages, output_type) + return await self._run_completion(messages) + + @staticmethod + def _coerce_input(input: Any) -> List[LDMessage]: + if isinstance(input, str): + return [LDMessage(role='user', content=input)] + if isinstance(input, list): + return input + raise TypeError( + f"Unsupported input type for LangChainModelRunner.run: {type(input).__name__}" + ) + + async def _run_completion(self, messages: List[LDMessage]) -> RunnerResult: try: langchain_messages = convert_messages_to_langchain(messages) response: BaseMessage = await self._llm.ainvoke(langchain_messages) @@ -52,36 +78,23 @@ async def invoke_model(self, messages: List[LDMessage]) -> ModelResponse: f'Multimodal response not supported, expecting a string. ' f'Content type: {type(response.content)}, Content: {response.content}' ) - metrics = LDAIMetrics(success=False, usage=metrics.usage) + return RunnerResult( + content='', + metrics=LDAIMetrics(success=False, usage=metrics.usage), + raw=response, + ) - return ModelResponse( - message=LDMessage(role='assistant', content=content), - metrics=metrics, - ) + return RunnerResult(content=content, metrics=metrics, raw=response) except Exception as error: log.warning(f'LangChain model invocation failed: {error}') - return ModelResponse( - message=LDMessage(role='assistant', content=''), + return RunnerResult( + content='', metrics=LDAIMetrics(success=False, usage=None), ) - async def invoke_structured_model( - self, - messages: List[LDMessage], - response_structure: Dict[str, Any], - ) -> StructuredResponse: - """ - Invoke the LangChain model with structured output support. - - :param messages: Array of LDMessage objects representing the conversation - :param response_structure: Dictionary defining the output structure - :return: StructuredResponse containing the structured data - """ - structured_response = StructuredResponse( - data={}, - raw_response='', - metrics=LDAIMetrics(success=False, usage=None), - ) + async def _run_structured( + self, messages: List[LDMessage], response_structure: Dict[str, Any] + ) -> RunnerResult: try: langchain_messages = convert_messages_to_langchain(messages) structured_llm = self._llm.with_structured_output(response_structure, include_raw=True) @@ -89,21 +102,34 @@ async def invoke_structured_model( if not isinstance(response, dict): log.warning(f'Structured output did not return a dict. Got: {type(response)}') - return structured_response + return RunnerResult( + content='', + metrics=LDAIMetrics(success=False, usage=None), + ) raw_response = response.get('raw') - if raw_response is not None: - if hasattr(raw_response, 'content'): - structured_response.raw_response = raw_response.content - structured_response.metrics.usage = get_ai_usage_from_response(raw_response) + usage = get_ai_usage_from_response(raw_response) if raw_response is not None else None + raw_content = raw_response.content if raw_response is not None and hasattr(raw_response, 'content') else '' if response.get('parsing_error'): log.warning('LangChain structured model invocation had a parsing error') - return structured_response + return RunnerResult( + content=raw_content, + metrics=LDAIMetrics(success=False, usage=usage), + raw=raw_response, + ) - structured_response.metrics.success = True - structured_response.data = response.get('parsed') or {} - return structured_response + parsed = response.get('parsed') or {} + return RunnerResult( + content=raw_content, + metrics=LDAIMetrics(success=True, usage=usage), + raw=raw_response, + parsed=parsed, + ) except Exception as error: log.warning(f'LangChain structured model invocation failed: {error}') - return structured_response + return RunnerResult( + content='', + metrics=LDAIMetrics(success=False, usage=None), + ) + diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py index 9ecb2351..15eee41f 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py @@ -329,8 +329,10 @@ async def run(self, input: Any) -> AgentGraphResult: messages = result.get('messages', []) output = extract_last_message_content(messages) - # Flush per-node metrics to LD trackers - all_eval_results = await handler.flush(self._graph, pending_eval_tasks) + # Flush per-node metrics to LD trackers; eval results are tracked + # internally and intentionally not exposed on AgentGraphResult here + # — judge dispatch is the managed layer's responsibility. + await handler.flush(self._graph, pending_eval_tasks) tracker.track_path(handler.path) tracker.track_duration(duration) @@ -341,7 +343,6 @@ async def run(self, input: Any) -> AgentGraphResult: output=output, raw=result, metrics=LDAIMetrics(success=True), - evaluations=all_eval_results, ) except Exception as exc: diff --git a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py index 4018e7c3..9b8dd69d 100644 --- a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py @@ -219,8 +219,8 @@ def test_returns_provider_name_unchanged_for_unmapped_providers(self): assert map_provider('unknown') == 'unknown' -class TestInvokeModel: - """Tests for invoke_model instance method.""" +class TestRunCompletion: + """Tests for the unified run() method (chat-completion path).""" @pytest.fixture def mock_llm(self): @@ -235,10 +235,10 @@ async def test_returns_success_true_for_string_content(self, mock_llm): provider = LangChainModelRunner(mock_llm) messages = [LDMessage(role='user', content='Hello')] - result = await provider.invoke_model(messages) + result = await provider.run(messages) assert result.metrics.success is True - assert result.message.content == 'Test response' + assert result.content == 'Test response' @pytest.mark.asyncio async def test_returns_success_false_for_non_string_content_and_logs_warning(self, mock_llm): @@ -248,10 +248,10 @@ async def test_returns_success_false_for_non_string_content_and_logs_warning(sel provider = LangChainModelRunner(mock_llm) messages = [LDMessage(role='user', content='Hello')] - result = await provider.invoke_model(messages) + result = await provider.run(messages) assert result.metrics.success is False - assert result.message.content == '' + assert result.content == '' @pytest.mark.asyncio async def test_returns_success_false_when_model_invocation_throws_error(self, mock_llm): @@ -261,15 +261,14 @@ async def test_returns_success_false_when_model_invocation_throws_error(self, mo provider = LangChainModelRunner(mock_llm) messages = [LDMessage(role='user', content='Hello')] - result = await provider.invoke_model(messages) + result = await provider.run(messages) assert result.metrics.success is False - assert result.message.content == '' - assert result.message.role == 'assistant' + assert result.content == '' -class TestInvokeStructuredModel: - """Tests for invoke_structured_model instance method.""" +class TestRunStructured: + """Tests for the unified run() method (structured-output path).""" @pytest.fixture def mock_llm(self): @@ -288,10 +287,10 @@ async def test_returns_success_true_for_successful_invocation(self, mock_llm): messages = [LDMessage(role='user', content='Hello')] response_structure = {'type': 'object', 'properties': {}} - result = await provider.invoke_structured_model(messages, response_structure) + result = await provider.run(messages, output_type=response_structure) assert result.metrics.success is True - assert result.data == parsed_data + assert result.parsed == parsed_data @pytest.mark.asyncio async def test_returns_success_false_when_structured_model_invocation_throws_error(self, mock_llm): @@ -304,11 +303,11 @@ async def test_returns_success_false_when_structured_model_invocation_throws_err messages = [LDMessage(role='user', content='Hello')] response_structure = {'type': 'object', 'properties': {}} - result = await provider.invoke_structured_model(messages, response_structure) + result = await provider.run(messages, output_type=response_structure) assert result.metrics.success is False - assert result.data == {} - assert result.raw_response == '' + assert result.parsed is None + assert result.content == '' assert result.metrics.usage is None @@ -464,7 +463,7 @@ class TestLangChainAgentRunner: @pytest.mark.asyncio async def test_runs_agent_and_returns_result(self): - """Should return AgentResult with the last message content from the graph.""" + """Should return RunnerResult with the last message content from the graph.""" from ldai_langchain import LangChainAgentRunner final_msg = AIMessage(content="The answer is 42.") @@ -474,7 +473,7 @@ async def test_runs_agent_and_returns_result(self): runner = LangChainAgentRunner(mock_agent) result = await runner.run("What is the answer?") - assert result.output == "The answer is 42." + assert result.content == "The answer is 42." assert result.metrics.success is True mock_agent.ainvoke.assert_called_once_with( {"messages": [{"role": "user", "content": "What is the answer?"}]} @@ -496,7 +495,7 @@ async def test_aggregates_token_usage_across_messages(self): runner = LangChainAgentRunner(mock_agent) result = await runner.run("Hello") - assert result.output == "final answer" + assert result.content == "final answer" assert result.metrics.success is True assert result.metrics.usage is not None assert result.metrics.usage.total == 30 @@ -505,7 +504,7 @@ async def test_aggregates_token_usage_across_messages(self): @pytest.mark.asyncio async def test_returns_failure_when_exception_thrown(self): - """Should return unsuccessful AgentResult when exception is thrown.""" + """Should return unsuccessful RunnerResult when exception is thrown.""" from ldai_langchain import LangChainAgentRunner mock_agent = MagicMock() @@ -514,7 +513,7 @@ async def test_returns_failure_when_exception_thrown(self): runner = LangChainAgentRunner(mock_agent) result = await runner.run("Hello") - assert result.output == "" + assert result.content == "" assert result.metrics.success is False From a2db8cb0fb38accee98b1b560eec72182e3f40cd Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Thu, 30 Apr 2026 09:00:33 -0500 Subject: [PATCH 17/24] refactor: LangChainModelRunner and LangChainAgentRunner formally inherit Runner - LangChainModelRunner: replaces invoke_model/invoke_structured_model with run(input, output_type=None); returns RunnerResult - LangChainAgentRunner: replaces AgentResult with RunnerResult; run() signature gains optional output_type parameter - Tests updated to call run() and assert result.content / result.parsed Co-Authored-By: Claude Sonnet 4.6 --- .../ldai_langchain/langchain_agent_runner.py | 33 +++++++------------ .../ldai_langchain/langchain_model_runner.py | 23 ++++++++----- .../tests/test_langchain_provider.py | 6 ++-- 3 files changed, 30 insertions(+), 32 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py index 7aef05dc..8e3af61d 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_agent_runner.py @@ -1,30 +1,31 @@ -from typing import Any, Dict, List, Optional +from typing import Any, Dict, Optional from ldai import log +from ldai.providers.runner import Runner from ldai.providers.types import LDAIMetrics, RunnerResult from ldai_langchain.langchain_helper import ( extract_last_message_content, - get_tool_calls_from_response, sum_token_usage_from_messages, ) -class LangChainAgentRunner: +class LangChainAgentRunner(Runner): """ CAUTION: This feature is experimental and should NOT be considered ready for production use. It may change or be removed without notice and is not subject to backwards compatibility guarantees. - Runner implementation for a single LangChain agent. + Runner implementation for LangChain agents. Wraps a compiled LangChain agent graph (from ``langchain.agents.create_agent``) and delegates execution to it. Tool calling and loop management are handled internally by the graph. - Returned by ``LangChainRunnerFactory.create_agent(config, tools)``. + Returned by LangChainRunnerFactory.create_agent(config, tools). - Implements the unified :class:`~ldai.providers.runner.Runner` protocol. + Implements the unified :class:`~ldai.providers.runner.Runner` protocol via + :meth:`run`. """ def __init__(self, agent: Any): @@ -36,7 +37,7 @@ async def run( output_type: Optional[Dict[str, Any]] = None, ) -> RunnerResult: """ - Run the agent with the given input. + Run the agent with the given input string. Delegates to the compiled LangChain agent, which handles the tool-calling loop internally. @@ -45,21 +46,19 @@ async def run( :param output_type: Reserved for future structured output support; currently ignored. :return: :class:`RunnerResult` with ``content``, ``raw`` response, and - metrics including aggregated token usage and observed ``tool_calls``. + aggregated metrics. """ try: result = await self._agent.ainvoke({ "messages": [{"role": "user", "content": str(input)}] }) - messages: List[Any] = result.get("messages", []) - content = extract_last_message_content(messages) - tool_calls = self._extract_tool_calls(messages) + messages = result.get("messages", []) + output = extract_last_message_content(messages) return RunnerResult( - content=content, + content=output, metrics=LDAIMetrics( success=True, usage=sum_token_usage_from_messages(messages), - tool_calls=tool_calls if tool_calls else None, ), raw=result, ) @@ -70,14 +69,6 @@ async def run( metrics=LDAIMetrics(success=False, usage=None), ) - @staticmethod - def _extract_tool_calls(messages: List[Any]) -> List[str]: - """Collect tool call names from all messages in the agent output.""" - names: List[str] = [] - for msg in messages: - names.extend(get_tool_calls_from_response(msg)) - return names - def get_agent(self) -> Any: """Return the underlying compiled LangChain agent.""" return self._agent diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_model_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_model_runner.py index 576f0f4c..213c072d 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_model_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_model_runner.py @@ -3,6 +3,7 @@ from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.messages import BaseMessage from ldai import LDMessage, log +from ldai.providers.runner import Runner from ldai.providers.types import LDAIMetrics, RunnerResult from ldai_langchain.langchain_helper import ( @@ -12,12 +13,12 @@ ) -class LangChainModelRunner: +class LangChainModelRunner(Runner): """ Runner implementation for LangChain chat models. Holds a fully-configured BaseChatModel. - Returned by ``LangChainRunnerFactory.create_model(config)``. + Returned by LangChainRunnerFactory.create_model(config). Implements the unified :class:`~ldai.providers.runner.Runner` protocol via :meth:`run`. @@ -45,11 +46,12 @@ async def run( :param input: A string prompt or a list of :class:`LDMessage` objects :param output_type: Optional JSON schema dict requesting structured output. When provided, ``parsed`` on the returned :class:`RunnerResult` is - populated with the structured data. + populated with the parsed JSON document. :return: :class:`RunnerResult` containing ``content``, ``metrics``, ``raw`` and (when ``output_type`` is set) ``parsed``. """ messages = self._coerce_input(input) + if output_type is not None: return await self._run_structured(messages, output_type) return await self._run_completion(messages) @@ -93,11 +95,13 @@ async def _run_completion(self, messages: List[LDMessage]) -> RunnerResult: ) async def _run_structured( - self, messages: List[LDMessage], response_structure: Dict[str, Any] + self, + messages: List[LDMessage], + output_type: Dict[str, Any], ) -> RunnerResult: try: langchain_messages = convert_messages_to_langchain(messages) - structured_llm = self._llm.with_structured_output(response_structure, include_raw=True) + structured_llm = self._llm.with_structured_output(output_type, include_raw=True) response = await structured_llm.ainvoke(langchain_messages) if not isinstance(response, dict): @@ -108,8 +112,12 @@ async def _run_structured( ) raw_response = response.get('raw') - usage = get_ai_usage_from_response(raw_response) if raw_response is not None else None - raw_content = raw_response.content if raw_response is not None and hasattr(raw_response, 'content') else '' + usage = None + raw_content = '' + if raw_response is not None: + if hasattr(raw_response, 'content'): + raw_content = raw_response.content or '' + usage = get_ai_usage_from_response(raw_response) if response.get('parsing_error'): log.warning('LangChain structured model invocation had a parsing error') @@ -132,4 +140,3 @@ async def _run_structured( content='', metrics=LDAIMetrics(success=False, usage=None), ) - diff --git a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py index 9b8dd69d..a8fc46cf 100644 --- a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py @@ -220,7 +220,7 @@ def test_returns_provider_name_unchanged_for_unmapped_providers(self): class TestRunCompletion: - """Tests for the unified run() method (chat-completion path).""" + """Tests for run() without structured output.""" @pytest.fixture def mock_llm(self): @@ -268,7 +268,7 @@ async def test_returns_success_false_when_model_invocation_throws_error(self, mo class TestRunStructured: - """Tests for the unified run() method (structured-output path).""" + """Tests for run() with structured output.""" @pytest.fixture def mock_llm(self): @@ -307,7 +307,7 @@ async def test_returns_success_false_when_structured_model_invocation_throws_err assert result.metrics.success is False assert result.parsed is None - assert result.content == '' + assert result.raw is None assert result.metrics.usage is None From 8e60f797cc79348fc7818b0dd1c8125c4a3f6d7e Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 28 Apr 2026 18:19:21 -0500 Subject: [PATCH 18/24] feat: Add ManagedGraphResult, GraphMetricSummary, and AgentGraphRunnerResult types - Add GraphMetrics dataclass (runner-layer return type for graph runs) - Add GraphMetricSummary dataclass (managed-layer metrics, analogous to LDAIMetricSummary for single-model invocations) - Add ManagedGraphResult dataclass (managed-layer return type from ManagedAgentGraph) - Add AgentGraphRunnerResult dataclass (future runner return type, no evaluations field) - ManagedAgentGraph.run() now returns ManagedGraphResult with GraphMetricSummary built from the runner's AgentGraphResult metrics - Export all new types from ldai package Co-Authored-By: Claude Sonnet 4.6 --- packages/sdk/server-ai/src/ldai/__init__.py | 8 ++ .../server-ai/src/ldai/managed_agent_graph.py | 40 +++++++--- .../server-ai/src/ldai/providers/__init__.py | 8 ++ .../sdk/server-ai/src/ldai/providers/types.py | 76 ++++++++++++++++++- packages/sdk/server-ai/src/ldai/tracker.py | 1 + .../tests/test_managed_agent_graph.py | 8 +- 6 files changed, 128 insertions(+), 13 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/__init__.py b/packages/sdk/server-ai/src/ldai/__init__.py index f02cee30..56d780d3 100644 --- a/packages/sdk/server-ai/src/ldai/__init__.py +++ b/packages/sdk/server-ai/src/ldai/__init__.py @@ -34,8 +34,12 @@ from ldai.providers import ( AgentGraphResult, AgentGraphRunner, + AgentGraphRunnerResult, AgentResult, AgentRunner, + GraphMetrics, + GraphMetricSummary, + ManagedGraphResult, ManagedResult, Runner, RunnerResult, @@ -51,6 +55,10 @@ 'AgentGraphRunner', 'AgentResult', 'AgentGraphResult', + 'AgentGraphRunnerResult', + 'GraphMetrics', + 'GraphMetricSummary', + 'ManagedGraphResult', 'ManagedResult', 'Runner', 'RunnerResult', diff --git a/packages/sdk/server-ai/src/ldai/managed_agent_graph.py b/packages/sdk/server-ai/src/ldai/managed_agent_graph.py index a146e60e..50b3440e 100644 --- a/packages/sdk/server-ai/src/ldai/managed_agent_graph.py +++ b/packages/sdk/server-ai/src/ldai/managed_agent_graph.py @@ -1,17 +1,19 @@ """ManagedAgentGraph — LaunchDarkly managed wrapper for agent graph execution.""" -from typing import Any +import asyncio +from typing import Any, List from ldai.providers import AgentGraphResult, AgentGraphRunner +from ldai.providers.types import GraphMetricSummary, JudgeResult, ManagedGraphResult class ManagedAgentGraph: """ LaunchDarkly managed wrapper for AI agent graph execution. - Holds an AgentGraphRunner. Auto-tracking of path, - tool calls, handoffs, latency, and invocation success/failure is handled - by the runner implementation. + Holds an AgentGraphRunner. Wraps the runner result in a + :class:`~ldai.providers.types.ManagedGraphResult` and builds a + :class:`~ldai.providers.types.GraphMetricSummary` from the runner's metrics. Obtain an instance via ``LDAIClient.create_agent_graph()``. """ @@ -27,17 +29,37 @@ def __init__( """ self._runner = runner - async def run(self, input: Any) -> AgentGraphResult: + async def run(self, input: Any) -> ManagedGraphResult: """ Run the agent graph with the given input. - Delegates to the underlying AgentGraphRunner, which handles - execution and all auto-tracking internally. + Delegates to the underlying AgentGraphRunner, builds a + :class:`GraphMetricSummary` from the result, and wraps everything in a + :class:`ManagedGraphResult`. :param input: The input prompt or structured input for the graph - :return: AgentGraphResult containing the output, raw response, and metrics + :return: ManagedGraphResult containing the content, metric summary, raw response, + and an optional evaluations task (currently always ``None`` for graphs — + per-graph evaluations will be added in a future PR). """ - return await self._runner.run(input) + result: AgentGraphResult = await self._runner.run(input) + + # Build a GraphMetricSummary from the runner result's LDAIMetrics. + # path and node_metrics will be populated once graph runners are migrated + # to return AgentGraphRunnerResult with GraphMetrics (PR 11). + metrics = result.metrics + summary = GraphMetricSummary( + success=metrics.success, + usage=metrics.usage, + duration_ms=getattr(metrics, 'duration_ms', None), + ) + + return ManagedGraphResult( + content=result.output, + metrics=summary, + raw=result.raw, + evaluations=None, + ) def get_agent_graph_runner(self) -> AgentGraphRunner: """ diff --git a/packages/sdk/server-ai/src/ldai/providers/__init__.py b/packages/sdk/server-ai/src/ldai/providers/__init__.py index 6f472c69..22dce784 100644 --- a/packages/sdk/server-ai/src/ldai/providers/__init__.py +++ b/packages/sdk/server-ai/src/ldai/providers/__init__.py @@ -6,9 +6,13 @@ from ldai.providers.runner_factory import RunnerFactory from ldai.providers.types import ( AgentGraphResult, + AgentGraphRunnerResult, AgentResult, + GraphMetrics, + GraphMetricSummary, JudgeResult, LDAIMetrics, + ManagedGraphResult, ManagedResult, ModelResponse, RunnerResult, @@ -20,10 +24,14 @@ 'AIProvider', 'AgentGraphResult', 'AgentGraphRunner', + 'AgentGraphRunnerResult', 'AgentResult', 'AgentRunner', + 'GraphMetrics', + 'GraphMetricSummary', 'JudgeResult', 'LDAIMetrics', + 'ManagedGraphResult', 'ManagedResult', 'ModelResponse', 'ModelRunner', diff --git a/packages/sdk/server-ai/src/ldai/providers/types.py b/packages/sdk/server-ai/src/ldai/providers/types.py index f5224e0e..5bb8ce47 100644 --- a/packages/sdk/server-ai/src/ldai/providers/types.py +++ b/packages/sdk/server-ai/src/ldai/providers/types.py @@ -3,7 +3,7 @@ from __future__ import annotations import asyncio -from dataclasses import dataclass +from dataclasses import dataclass, field from typing import Any, Callable, Dict, List, Optional from ldai.models import LDMessage @@ -114,6 +114,80 @@ class StructuredResponse: metrics: LDAIMetrics +@dataclass +class GraphMetrics: + """Contains raw metrics from a single agent graph run.""" + + success: bool + """Whether the graph run succeeded.""" + + path: List[str] = field(default_factory=list) + """Ordered list of node keys visited during the run.""" + + duration_ms: Optional[int] = None + """Wall-clock duration of the graph run in milliseconds.""" + + usage: Optional[TokenUsage] = None + """Optional aggregate token usage information across all nodes in the graph run.""" + + node_metrics: Dict[str, LDAIMetrics] = field(default_factory=dict) + """Per-node metrics keyed by node key.""" + + +@dataclass +class GraphMetricSummary: + """Contains a summary of metrics for an agent graph run.""" + + success: bool + """Whether the graph run succeeded.""" + + path: List[str] = field(default_factory=list) + """Ordered list of node keys visited during the run.""" + + duration_ms: Optional[int] = None + """Wall-clock duration of the graph run in milliseconds.""" + + usage: Optional[TokenUsage] = None + """Optional aggregate token usage information across all nodes in the graph run.""" + + node_metrics: Dict[str, LDAIMetrics] = field(default_factory=dict) + """Per-node metrics keyed by node key.""" + + resumption_token: Optional[str] = None + """Optional resumption token from the graph tracker for cross-process resumption.""" + + +@dataclass +class ManagedGraphResult: + """Contains the result of a managed agent graph run, including metrics and optional judge evaluations.""" + + content: str + """The graph's final output content.""" + + metrics: GraphMetricSummary + """Aggregated metric summary from the graph tracker for this run.""" + + raw: Optional[Any] = None + """Optional provider-native response object for advanced consumers.""" + + evaluations: Optional[asyncio.Task[List[JudgeResult]]] = None + """Optional asyncio Task that resolves to the list of :class:`JudgeResult` instances when awaited.""" + + +@dataclass +class AgentGraphRunnerResult: + """Contains the result of an agent graph runner invocation.""" + + content: str + """The graph's final output content.""" + + metrics: GraphMetrics + """Metrics from the graph run.""" + + raw: Optional[Any] = None + """Optional provider-native response object for advanced consumers.""" + + @dataclass class JudgeResult: """Contains the result of a single judge evaluation.""" diff --git a/packages/sdk/server-ai/src/ldai/tracker.py b/packages/sdk/server-ai/src/ldai/tracker.py index 43c836bf..31416649 100644 --- a/packages/sdk/server-ai/src/ldai/tracker.py +++ b/packages/sdk/server-ai/src/ldai/tracker.py @@ -442,6 +442,7 @@ def track_tool_calls(self, tool_calls: Iterable[str]) -> None: for tool_key in tool_calls_list: self.track_tool_call(tool_key) + def track_success(self) -> None: """ Track a successful AI generation. diff --git a/packages/sdk/server-ai/tests/test_managed_agent_graph.py b/packages/sdk/server-ai/tests/test_managed_agent_graph.py index 35be2766..9cdceaed 100644 --- a/packages/sdk/server-ai/tests/test_managed_agent_graph.py +++ b/packages/sdk/server-ai/tests/test_managed_agent_graph.py @@ -5,7 +5,7 @@ from ldclient import Config, Context, LDClient from ldclient.integrations.test_data import TestData -from ldai import LDAIClient, ManagedAgentGraph +from ldai import LDAIClient, ManagedAgentGraph, ManagedGraphResult from ldai.providers.types import LDAIMetrics from ldai.providers import AgentGraphResult, AgentGraphRunner, ToolRegistry @@ -31,7 +31,8 @@ async def test_managed_agent_graph_run_delegates_to_runner(): runner = StubAgentGraphRunner("hello world") managed = ManagedAgentGraph(runner) result = await managed.run("test input") - assert result.output == "hello world" + assert isinstance(result, ManagedGraphResult) + assert result.content == "hello world" assert result.metrics.success is True @@ -172,7 +173,8 @@ async def test_create_agent_graph_run_produces_result(ldai_client: LDAIClient): assert managed is not None result = await managed.run("find restaurants") - assert result.output == "final answer" + assert isinstance(result, ManagedGraphResult) + assert result.content == "final answer" assert result.metrics.success is True From f016b0d559fdff23bb59ad4b0f067705b851253c Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 28 Apr 2026 18:31:06 -0500 Subject: [PATCH 19/24] =?UTF-8?q?feat:=20Graph=20tracking=20refactor=20?= =?UTF-8?q?=E2=80=94=20ManagedAgentGraph=20drives=20tracking=20for=20new?= =?UTF-8?q?=20runner=20shape?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ManagedAgentGraph.run() now detects the runner result type and dispatches accordingly: - AgentGraphRunnerResult (new shape): managed layer drives all graph-level tracking from result.metrics (path, duration, success/failure, total tokens) via the graph tracker. Node-level tracking from node_metrics will be wired once runners populate that field (PR 11-openai/langchain). - AgentGraphResult (legacy shape): tracking already occurred inside the runner; managed layer wraps result without additional tracking. ManagedAgentGraph now accepts an optional graph parameter (AgentGraphDefinition) used to create the graph tracker. LDAIClient.create_agent_graph() passes the resolved graph definition. This is a deliberate bridge pattern: the legacy detection branch will be removed once both runners are migrated. Co-Authored-By: Claude Sonnet 4.6 --- packages/sdk/server-ai/src/ldai/client.py | 2 +- .../server-ai/src/ldai/managed_agent_graph.py | 109 +++++++++++++++--- .../tests/test_managed_agent_graph.py | 78 ++++++++++++- 3 files changed, 167 insertions(+), 22 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/client.py b/packages/sdk/server-ai/src/ldai/client.py index 448d5c55..eeff1f1b 100644 --- a/packages/sdk/server-ai/src/ldai/client.py +++ b/packages/sdk/server-ai/src/ldai/client.py @@ -815,7 +815,7 @@ async def create_agent_graph( if not runner: return None - return ManagedAgentGraph(runner) + return ManagedAgentGraph(runner, graph=graph) def agents( self, diff --git a/packages/sdk/server-ai/src/ldai/managed_agent_graph.py b/packages/sdk/server-ai/src/ldai/managed_agent_graph.py index 50b3440e..94026973 100644 --- a/packages/sdk/server-ai/src/ldai/managed_agent_graph.py +++ b/packages/sdk/server-ai/src/ldai/managed_agent_graph.py @@ -1,19 +1,33 @@ """ManagedAgentGraph — LaunchDarkly managed wrapper for agent graph execution.""" -import asyncio -from typing import Any, List +from typing import Any, Optional from ldai.providers import AgentGraphResult, AgentGraphRunner -from ldai.providers.types import GraphMetricSummary, JudgeResult, ManagedGraphResult +from ldai.providers.types import ( + AgentGraphRunnerResult, + GraphMetricSummary, + LDAIMetrics, + ManagedGraphResult, +) class ManagedAgentGraph: """ LaunchDarkly managed wrapper for AI agent graph execution. - Holds an AgentGraphRunner. Wraps the runner result in a - :class:`~ldai.providers.types.ManagedGraphResult` and builds a - :class:`~ldai.providers.types.GraphMetricSummary` from the runner's metrics. + Holds an AgentGraphRunner and an optional AgentGraphDefinition. Wraps the + runner result in a :class:`~ldai.providers.types.ManagedGraphResult` and + builds a :class:`~ldai.providers.types.GraphMetricSummary` from the runner's + metrics. + + When the runner returns an :class:`~ldai.providers.types.AgentGraphRunnerResult` + (new shape), the managed layer drives all graph-level tracking from + ``result.metrics``. When the runner returns the legacy + :class:`~ldai.providers.AgentGraphResult`, tracking has already been performed + inside the runner; the managed layer simply wraps the result. This detection + branch exists as a deliberate bridge: once PR 11-openai and PR 11-langchain + migrate both runners to return ``AgentGraphRunnerResult``, the legacy branch + becomes dead code and will be removed in PR 11-langchain's final cleanup commit. Obtain an instance via ``LDAIClient.create_agent_graph()``. """ @@ -21,46 +35,105 @@ class ManagedAgentGraph: def __init__( self, runner: AgentGraphRunner, + graph: Optional[Any] = None, ): """ Initialize ManagedAgentGraph. :param runner: The AgentGraphRunner to delegate execution to + :param graph: Optional AgentGraphDefinition used to create the + graph-level tracker when the runner returns an + :class:`AgentGraphRunnerResult` (new shape). Not needed for + legacy runners that still return :class:`AgentGraphResult`. """ self._runner = runner + self._graph = graph async def run(self, input: Any) -> ManagedGraphResult: """ Run the agent graph with the given input. - Delegates to the underlying AgentGraphRunner, builds a - :class:`GraphMetricSummary` from the result, and wraps everything in a - :class:`ManagedGraphResult`. + Delegates to the underlying AgentGraphRunner. The returned type + determines which tracking path is taken: + + - :class:`AgentGraphRunnerResult` (new shape): the managed layer drives + graph-level tracking from ``result.metrics`` via the graph tracker. + Per-node tracking from ``result.metrics.node_metrics`` will be wired + in a follow-up commit once the runners populate ``node_metrics``. + - :class:`AgentGraphResult` (legacy shape): tracking already occurred + inside the runner; the managed layer wraps the result without + additional tracking. :param input: The input prompt or structured input for the graph - :return: ManagedGraphResult containing the content, metric summary, raw response, - and an optional evaluations task (currently always ``None`` for graphs — - per-graph evaluations will be added in a future PR). + :return: ManagedGraphResult containing the content, metric summary, + raw response, and an optional evaluations task (always ``None`` + for now — per-graph evaluations will be added in a future PR). """ - result: AgentGraphResult = await self._runner.run(input) + raw_result = await self._runner.run(input) + + if isinstance(raw_result, AgentGraphRunnerResult): + # New shape: managed layer drives all tracking. + summary = self._build_summary_from_runner_result(raw_result) + if self._graph is not None: + self._flush_graph_tracking(raw_result, self._graph.create_tracker()) + return ManagedGraphResult( + content=raw_result.content, + metrics=summary, + raw=raw_result.raw, + evaluations=None, + ) + # Legacy shape (AgentGraphResult): tracking already happened in the runner. # Build a GraphMetricSummary from the runner result's LDAIMetrics. # path and node_metrics will be populated once graph runners are migrated - # to return AgentGraphRunnerResult with GraphMetrics (PR 11). - metrics = result.metrics + # to return AgentGraphRunnerResult with GraphMetrics (PR 11-openai/langchain). + metrics: LDAIMetrics = raw_result.metrics summary = GraphMetricSummary( success=metrics.success, usage=metrics.usage, duration_ms=getattr(metrics, 'duration_ms', None), ) - return ManagedGraphResult( - content=result.output, + content=raw_result.output, metrics=summary, - raw=result.raw, + raw=raw_result.raw, evaluations=None, ) + def _build_summary_from_runner_result( + self, + result: AgentGraphRunnerResult, + ) -> GraphMetricSummary: + """Build a GraphMetricSummary from an AgentGraphRunnerResult.""" + m = result.metrics + return GraphMetricSummary( + success=m.success, + path=list(m.path), + duration_ms=m.duration_ms, + usage=m.usage, + node_metrics=dict(m.node_metrics), + ) + + def _flush_graph_tracking(self, result: AgentGraphRunnerResult, tracker: Any) -> None: + """ + Drive graph-level LaunchDarkly tracking events from runner result metrics. + + Called only when the runner returns the new ``AgentGraphRunnerResult`` + shape. Node-level tracking (from ``result.metrics.node_metrics``) will + be wired once the runners start populating that field. + """ + m = result.metrics + if m.path: + tracker.track_path(m.path) + if m.duration_ms is not None: + tracker.track_duration(m.duration_ms) + if m.success: + tracker.track_invocation_success() + else: + tracker.track_invocation_failure() + if m.usage is not None: + tracker.track_total_tokens(m.usage) + def get_agent_graph_runner(self) -> AgentGraphRunner: """ Return the underlying AgentGraphRunner for advanced use. diff --git a/packages/sdk/server-ai/tests/test_managed_agent_graph.py b/packages/sdk/server-ai/tests/test_managed_agent_graph.py index 9cdceaed..05b0ed27 100644 --- a/packages/sdk/server-ai/tests/test_managed_agent_graph.py +++ b/packages/sdk/server-ai/tests/test_managed_agent_graph.py @@ -6,13 +6,15 @@ from ldclient.integrations.test_data import TestData from ldai import LDAIClient, ManagedAgentGraph, ManagedGraphResult -from ldai.providers.types import LDAIMetrics +from ldai.providers.types import AgentGraphRunnerResult, GraphMetrics, LDAIMetrics from ldai.providers import AgentGraphResult, AgentGraphRunner, ToolRegistry +from ldai.tracker import TokenUsage -# --- Test double --- +# --- Test doubles --- class StubAgentGraphRunner(AgentGraphRunner): + """Legacy runner that returns AgentGraphResult (old shape).""" def __init__(self, output: str = "stub output"): self._output = output @@ -24,10 +26,30 @@ async def run(self, input) -> AgentGraphResult: ) -# --- ManagedAgentGraph unit tests --- +class StubNewShapeRunner(AgentGraphRunner): + """New-shape runner that returns AgentGraphRunnerResult.""" + def __init__(self, content: str = "new shape output"): + self._content = content + + async def run(self, input) -> AgentGraphRunnerResult: + return AgentGraphRunnerResult( + content=self._content, + metrics=GraphMetrics( + success=True, + path=["root", "specialist"], + duration_ms=42, + usage=TokenUsage(total=10, input=5, output=5), + node_metrics={}, + ), + raw={"input": input}, + ) + + +# --- ManagedAgentGraph unit tests (legacy shape) --- @pytest.mark.asyncio async def test_managed_agent_graph_run_delegates_to_runner(): + """Legacy AgentGraphResult shape: content comes from output field.""" runner = StubAgentGraphRunner("hello world") managed = ManagedAgentGraph(runner) result = await managed.run("test input") @@ -42,6 +64,56 @@ def test_managed_agent_graph_get_runner(): assert managed.get_agent_graph_runner() is runner +# --- ManagedAgentGraph unit tests (new AgentGraphRunnerResult shape) --- + +@pytest.mark.asyncio +async def test_managed_agent_graph_run_handles_new_shape(): + """New AgentGraphRunnerResult shape: content and GraphMetrics are surfaced.""" + runner = StubNewShapeRunner("final answer") + mock_graph = MagicMock() + mock_tracker = MagicMock() + mock_graph.create_tracker = MagicMock(return_value=mock_tracker) + + managed = ManagedAgentGraph(runner, graph=mock_graph) + result = await managed.run("test input") + + assert isinstance(result, ManagedGraphResult) + assert result.content == "final answer" + assert result.metrics.success is True + assert result.metrics.path == ["root", "specialist"] + assert result.metrics.duration_ms == 42 + assert result.metrics.usage is not None + assert result.metrics.usage.total == 10 + + +@pytest.mark.asyncio +async def test_managed_agent_graph_new_shape_drives_tracking(): + """New shape: managed layer calls tracker methods from result.metrics.""" + runner = StubNewShapeRunner() + mock_graph = MagicMock() + mock_tracker = MagicMock() + mock_graph.create_tracker = MagicMock(return_value=mock_tracker) + + managed = ManagedAgentGraph(runner, graph=mock_graph) + await managed.run("test input") + + mock_tracker.track_path.assert_called_once_with(["root", "specialist"]) + mock_tracker.track_duration.assert_called_once_with(42) + mock_tracker.track_invocation_success.assert_called_once() + mock_tracker.track_total_tokens.assert_called_once() + + +@pytest.mark.asyncio +async def test_managed_agent_graph_new_shape_no_graph_skips_tracking(): + """New shape without graph: no tracking called (graph not available).""" + runner = StubNewShapeRunner() + managed = ManagedAgentGraph(runner, graph=None) + # Should not raise even without a graph reference + result = await managed.run("test input") + assert result.content == "new shape output" + assert result.metrics.success is True + + # --- LDAIClient.create_agent_graph() integration tests --- From 43bc87994c218da111b80c8318fb8858d7769c9d Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 28 Apr 2026 18:35:54 -0500 Subject: [PATCH 20/24] feat: Update OpenAI graph runner to return AgentGraphRunnerResult with GraphMetrics Remove all direct LaunchDarkly tracker calls from OpenAIAgentGraphRunner. The runner now collects per-node metrics via _NodeMetricsAccumulator (a lightweight accumulator replacing the per-node LDAIConfigTracker) and returns AgentGraphRunnerResult with populated GraphMetrics (path, duration_ms, usage, node_metrics). Graph-level and per-node tracking events are emitted by ManagedAgentGraph._flush_graph_tracking() from the result. ManagedAgentGraph._flush_graph_tracking() is extended to also drive per-node tracking from result.metrics.node_metrics using the graph definition's node tracker factories. Integration tests in test_tracking_openai_agents.py are updated to run through the full ManagedAgentGraph pipeline (ManagedAgentGraph.run()) so tracking events are emitted by the managed layer as intended. Co-Authored-By: Claude Sonnet 4.6 --- .../ldai_openai/openai_agent_graph_runner.py | 142 ++++++++++-------- .../tests/test_openai_agent_graph_runner.py | 67 ++++----- .../tests/test_tracking_openai_agents.py | 53 ++++--- .../server-ai/src/ldai/managed_agent_graph.py | 30 +++- 4 files changed, 168 insertions(+), 124 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py index 6d35328c..2a4a12a1 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_agent_graph_runner.py @@ -4,8 +4,8 @@ from ldai import log from ldai.agent_graph import AgentGraphDefinition, AgentGraphNode -from ldai.providers import AgentGraphResult, AgentGraphRunner, ToolRegistry -from ldai.providers.types import LDAIMetrics +from ldai.providers import AgentGraphRunner, ToolRegistry +from ldai.providers.types import AgentGraphRunnerResult, GraphMetrics, LDAIMetrics from ldai.tracker import TokenUsage from ldai_openai.openai_helper import ( @@ -22,6 +22,34 @@ def _sanitize_agent_name(key: str) -> str: return re.sub(r'[^a-zA-Z0-9_]', '_', key) +class _NodeMetricsAccumulator: + """Mutable per-node metrics collected during a run (replaces LDAIConfigTracker).""" + + def __init__(self) -> None: + self.usage: Optional[TokenUsage] = None + self.duration_ms: Optional[int] = None + self.tool_calls: List[str] = [] + self.success: bool = True + + def set_usage(self, usage: Optional[TokenUsage]) -> None: + if usage is not None: + self.usage = usage + + def set_duration_ms(self, duration_ms: int) -> None: + self.duration_ms = duration_ms + + def add_tool_call(self, tool_name: str) -> None: + self.tool_calls.append(tool_name) + + def to_ldai_metrics(self) -> LDAIMetrics: + return LDAIMetrics( + success=self.success, + usage=self.usage, + duration_ms=self.duration_ms, + tool_calls=self.tool_calls if self.tool_calls else None, + ) + + class _RunState: """Mutable state shared across handoff and tool callbacks during a single run.""" @@ -39,9 +67,10 @@ class OpenAIAgentGraphRunner(AgentGraphRunner): AgentGraphRunner implementation for the OpenAI Agents SDK. - Runs the agent graph with the OpenAI Agents SDK and automatically records - graph- and node-level AI metric data to the LaunchDarkly trackers on the - graph definition and each node. + Runs the agent graph with the OpenAI Agents SDK and collects graph- and + node-level metrics. Tracking events are emitted by the managed layer + (:class:`~ldai.ManagedAgentGraph`) from the returned + :class:`~ldai.providers.types.AgentGraphRunnerResult`. Requires ``openai-agents`` to be installed. """ @@ -61,20 +90,19 @@ def __init__( self._tools = tools self._agent_name_map: Dict[str, str] = {} self._tool_name_map: Dict[str, str] = {} - self._node_trackers: Dict[str, Any] = {} + self._node_accumulators: Dict[str, _NodeMetricsAccumulator] = {} - async def run(self, input: Any) -> AgentGraphResult: + async def run(self, input: Any) -> AgentGraphRunnerResult: """ Run the agent graph with the given input. Builds the agent tree via reverse_traverse, then invokes the root - agent with Runner.run(). Tracks path, latency, and invocation - success/failure. + agent with Runner.run(). Collects path, latency, and per-node metrics. + Graph-level tracking events are emitted by the managed layer. :param input: The string prompt to send to the agent graph - :return: AgentGraphResult with the final output and metrics + :return: AgentGraphRunnerResult with the final content and GraphMetrics """ - tracker = self._graph.create_tracker() path: List[str] = [] root_node = self._graph.root() root_key = root_node.get_key() if root_node else '' @@ -86,24 +114,29 @@ async def run(self, input: Any) -> AgentGraphResult: state = _RunState(last_handoff_ns=start_ns, last_node_key=root_key) try: from agents import Runner - root_agent = self._build_agents(path, state, tracker) + root_agent = self._build_agents(path, state) result = await Runner.run(root_agent, input_str) self._flush_final_segment(state, result) - self._track_tool_calls(result) + self._collect_tool_calls(result) - duration = (time.perf_counter_ns() - start_ns) // 1_000_000 + duration_ms = (time.perf_counter_ns() - start_ns) // 1_000_000 token_usage = get_ai_usage_from_response(result) - tracker.track_path(path) - tracker.track_duration(duration) - tracker.track_invocation_success() - if token_usage is not None: - tracker.track_total_tokens(token_usage) + node_metrics = { + key: acc.to_ldai_metrics() + for key, acc in self._node_accumulators.items() + } - return AgentGraphResult( - output=str(result.final_output), + return AgentGraphRunnerResult( + content=str(result.final_output), raw=result, - metrics=LDAIMetrics(success=True, usage=token_usage), + metrics=GraphMetrics( + success=True, + path=path, + duration_ms=duration_ms, + usage=token_usage, + node_metrics=node_metrics, + ), ) except Exception as exc: if isinstance(exc, ImportError): @@ -113,17 +146,19 @@ async def run(self, input: Any) -> AgentGraphResult: ) else: log.warning(f'OpenAIAgentGraphRunner run failed: {exc}') - duration = (time.perf_counter_ns() - start_ns) // 1_000_000 - tracker.track_duration(duration) - tracker.track_invocation_failure() - return AgentGraphResult( - output='', + duration_ms = (time.perf_counter_ns() - start_ns) // 1_000_000 + return AgentGraphRunnerResult( + content='', raw=None, - metrics=LDAIMetrics(success=False), + metrics=GraphMetrics( + success=False, + path=path, + duration_ms=duration_ms, + ), ) def _build_agents( - self, path: List[str], state: _RunState, tracker: Any + self, path: List[str], state: _RunState ) -> Any: """ Build the agent tree from the graph definition via reverse_traverse. @@ -133,7 +168,6 @@ def _build_agents( :param path: Mutable list to accumulate the execution path :param state: Shared run state for tracking handoff timing and last node - :param tracker: Graph-level tracker shared across the entire run :return: The root Agent instance """ try: @@ -151,12 +185,12 @@ def _build_agents( name_map: Dict[str, str] = {} tool_name_map: Dict[str, str] = {} - node_trackers: Dict[str, Any] = {} + node_accumulators: Dict[str, _NodeMetricsAccumulator] = {} def build_node(node: AgentGraphNode, ctx: dict) -> Any: node_config = node.get_config() - config_tracker = node_config.create_tracker() - node_trackers[node_config.key] = config_tracker + acc = _NodeMetricsAccumulator() + node_accumulators[node_config.key] = acc model = node_config.model if not model: @@ -177,8 +211,7 @@ def build_node(node: AgentGraphNode, ctx: dict) -> Any: node_config.key, target_key, path, - tracker, - config_tracker, + acc, state, ), ) @@ -212,7 +245,7 @@ def build_node(node: AgentGraphNode, ctx: dict) -> Any: root = self._graph.reverse_traverse(fn=build_node) self._agent_name_map = name_map self._tool_name_map = tool_name_map - self._node_trackers = node_trackers + self._node_accumulators = node_accumulators return root def _make_on_handoff( @@ -220,12 +253,11 @@ def _make_on_handoff( src: str, tgt: str, path: List[str], - tracker: Any, - config_tracker: Any, + acc: _NodeMetricsAccumulator, state: _RunState, ): def on_handoff(run_ctx: Any) -> None: - self._handle_handoff(run_ctx, src, tgt, path, tracker, config_tracker, state) + self._handle_handoff(run_ctx, src, tgt, path, acc, state) return on_handoff def _handle_handoff( @@ -234,13 +266,11 @@ def _handle_handoff( src: str, tgt: str, path: List[str], - tracker: Any, - config_tracker: Any, + acc: _NodeMetricsAccumulator, state: _RunState, ) -> None: path.append(tgt) state.last_node_key = tgt - tracker.track_handoff_success(src, tgt) now_ns = time.perf_counter_ns() duration_ms = (now_ns - state.last_handoff_ns) // 1_000_000 @@ -254,19 +284,15 @@ def _handle_handoff( except Exception: pass - if config_tracker is not None: - if usage is not None: - config_tracker.track_tokens(usage) - if duration_ms is not None: - config_tracker.track_duration(int(duration_ms)) - config_tracker.track_success() + acc.set_usage(usage) + acc.set_duration_ms(int(duration_ms)) def _flush_final_segment(self, state: _RunState, result: Any) -> None: """Record duration/tokens for the last active agent (no handoff after it).""" if not state.last_node_key: return - config_tracker = self._node_trackers.get(state.last_node_key) - if config_tracker is None: + acc = self._node_accumulators.get(state.last_node_key) + if acc is None: return now_ns = time.perf_counter_ns() @@ -280,18 +306,16 @@ def _flush_final_segment(self, state: _RunState, result: Any) -> None: except Exception: pass - if usage is not None: - config_tracker.track_tokens(usage) - config_tracker.track_duration(int(duration_ms)) - config_tracker.track_success() + acc.set_usage(usage) + acc.set_duration_ms(int(duration_ms)) - def _track_tool_calls(self, result: Any) -> None: - """Track all tool calls from the run result, attributed to the node that called them.""" + def _collect_tool_calls(self, result: Any) -> None: + """Collect all tool calls from the run result, attributed to the node that called them.""" for agent_name, tool_fn_name in get_tool_calls_from_run_items(result.new_items): agent_key = self._agent_name_map.get(agent_name, agent_name) tool_name = self._tool_name_map.get(tool_fn_name) if tool_name is None: continue - config_tracker = self._node_trackers.get(agent_key) - if config_tracker is not None: - config_tracker.track_tool_call(tool_name) + acc = self._node_accumulators.get(agent_key) + if acc is not None: + acc.add_tool_call(tool_name) diff --git a/packages/ai-providers/server-ai-openai/tests/test_openai_agent_graph_runner.py b/packages/ai-providers/server-ai-openai/tests/test_openai_agent_graph_runner.py index bba5a84d..b5470de8 100644 --- a/packages/ai-providers/server-ai-openai/tests/test_openai_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-openai/tests/test_openai_agent_graph_runner.py @@ -5,7 +5,8 @@ from ldai.agent_graph import AgentGraphDefinition from ldai.models import AIAgentGraphConfig, AIAgentConfig, Edge, ModelConfig, ProviderConfig -from ldai.providers import AgentGraphResult, ToolRegistry +from ldai.providers import ToolRegistry +from ldai.providers.types import AgentGraphRunnerResult, GraphMetrics from ldai_openai.openai_agent_graph_runner import OpenAIAgentGraphRunner from ldai_openai.openai_runner_factory import OpenAIRunnerFactory from ldai.evaluator import Evaluator @@ -13,10 +14,8 @@ def _make_graph(enabled: bool = True) -> AgentGraphDefinition: """Build a minimal single-node AgentGraphDefinition for testing.""" - node_tracker = MagicMock() - graph_tracker = MagicMock() - node_factory = MagicMock(return_value=node_tracker) - graph_factory = MagicMock(return_value=graph_tracker) + node_factory = MagicMock() + graph_factory = MagicMock() root_config = AIAgentConfig( key='root-agent', enabled=enabled, @@ -73,41 +72,44 @@ def test_openai_agent_graph_runner_stores_graph_and_tools(): @pytest.mark.asyncio async def test_openai_agent_graph_runner_run_raises_when_agents_not_installed(): + """Import failure returns AgentGraphRunnerResult with success=False.""" graph = _make_graph() runner = OpenAIAgentGraphRunner(graph, {}) with patch.dict('sys.modules', {'agents': None}): - # The import inside run() will fail — runner should return failure result - # rather than propagate the ImportError, since it's caught by the except block result = await runner.run("test input") - assert isinstance(result, AgentGraphResult) + assert isinstance(result, AgentGraphRunnerResult) assert result.metrics.success is False @pytest.mark.asyncio -async def test_openai_agent_graph_runner_run_tracks_invocation_failure_on_exception(): +async def test_openai_agent_graph_runner_run_failure_returns_metrics(): + """On import failure, returned GraphMetrics has success=False (no tracker needed).""" graph = _make_graph() - tracker = graph.create_tracker.return_value runner = OpenAIAgentGraphRunner(graph, {}) with patch.dict('sys.modules', {'agents': None}): result = await runner.run("fail") + assert isinstance(result, AgentGraphRunnerResult) assert result.metrics.success is False - tracker.track_invocation_failure.assert_called_once() - tracker.track_duration.assert_called_once() + assert result.metrics.duration_ms is not None + # Runner no longer calls graph tracker — graph.create_tracker should NOT be called + graph.create_tracker.assert_not_called() @pytest.mark.asyncio async def test_openai_agent_graph_runner_run_success(): + """Successful run returns AgentGraphRunnerResult with populated GraphMetrics.""" graph = _make_graph() - tracker = graph.create_tracker.return_value mock_result = MagicMock() mock_result.final_output = "agent answer" - mock_result.context_wrapper.usage.total_tokens = 0 - mock_result.context_wrapper.usage.input_tokens = 0 - mock_result.context_wrapper.usage.output_tokens = 0 + mock_result.new_items = [] + mock_result.context_wrapper.usage.total_tokens = 10 + mock_result.context_wrapper.usage.input_tokens = 5 + mock_result.context_wrapper.usage.output_tokens = 5 + mock_result.context_wrapper.usage.request_usage_entries = [] mock_runner_module = MagicMock() mock_runner_module.run = AsyncMock(return_value=mock_result) @@ -135,28 +137,19 @@ async def test_openai_agent_graph_runner_run_success(): runner = OpenAIAgentGraphRunner(graph, {}) result = await runner.run("find restaurants") - assert isinstance(result, AgentGraphResult) - assert result.output == "agent answer" + assert isinstance(result, AgentGraphRunnerResult) + assert result.content == "agent answer" + assert isinstance(result.metrics, GraphMetrics) assert result.metrics.success is True - tracker.track_invocation_success.assert_called_once() - tracker.track_path.assert_called_once() - tracker.track_duration.assert_called_once() + assert result.metrics.duration_ms is not None + assert 'root-agent' in result.metrics.path - # The runner caches one tracker per node — verify it is the same instance - # returned by create_tracker() and that all tracking calls hit it. - node_factory = graph.get_node('root-agent').get_config().create_tracker - - # The runner caches one tracker per node — verify it is the same instance - # returned by create_tracker and that all tracking calls hit it. - cached = runner._node_trackers['root-agent'] - assert cached is node_factory.return_value - cached.track_duration.assert_called_once() - cached.track_tokens.assert_called_once() - cached.track_success.assert_called_once() + # Runner no longer creates or calls the graph tracker + graph.create_tracker.assert_not_called() - # Graph-level create_tracker is called exactly once per run (not twice) - # so that handoff callbacks and run() share the same tracker instance. - graph.create_tracker.assert_called_once() + # Runner no longer creates per-node LDAIConfigTracker instances + node_factory = graph.get_node('root-agent').get_config().create_tracker + node_factory.assert_not_called() - # Node-level create_tracker is called exactly once per node. - node_factory.assert_called_once() + # Runner accumulates per-node metrics in _node_accumulators + assert 'root-agent' in runner._node_accumulators diff --git a/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py b/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py index 6d8cbc4d..d46c9b45 100644 --- a/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py +++ b/packages/ai-providers/server-ai-openai/tests/test_tracking_openai_agents.py @@ -1,9 +1,13 @@ """ -Integration tests for OpenAIAgentGraphRunner tracking pipeline. +Integration tests for OpenAIAgentGraphRunner + ManagedAgentGraph tracking pipeline. Uses real AIGraphTracker and LDAIConfigTracker backed by a mock LD client, and a crafted RunResult to verify that the correct LD events are emitted with the correct payloads — without making real API calls. + +Tracking events are now emitted by ManagedAgentGraph._flush_graph_tracking() +from the GraphMetrics returned by the runner, rather than directly inside the +runner. These tests exercise the full pipeline through ManagedAgentGraph.run(). """ import pytest @@ -11,6 +15,7 @@ from unittest.mock import AsyncMock, MagicMock, patch from ldai.agent_graph import AgentGraphDefinition +from ldai.managed_agent_graph import ManagedAgentGraph from ldai.models import AIAgentGraphConfig, AIAgentConfig, Edge, ModelConfig, ProviderConfig from ldai.tracker import AIGraphTracker, LDAIConfigTracker from ldai_openai.openai_agent_graph_runner import OpenAIAgentGraphRunner @@ -253,6 +258,12 @@ def _events(mock_ld_client: MagicMock) -> dict: return dict(result) +async def _run_through_managed(graph: AgentGraphDefinition, runner: OpenAIAgentGraphRunner, input: str): + """Run through the full ManagedAgentGraph pipeline so tracking events are emitted.""" + managed = ManagedAgentGraph(runner, graph=graph) + return await managed.run(input) + + # --------------------------------------------------------------------------- # Tests # --------------------------------------------------------------------------- @@ -266,10 +277,10 @@ async def test_tracks_graph_invocation_success_and_latency(): with patch.dict('sys.modules', _make_agents_modules(run_result)): runner = OpenAIAgentGraphRunner(graph, {}) - result = await runner.run('hello') + result = await _run_through_managed(graph, runner, 'hello') assert result.metrics.success is True - assert result.output == 'done' + assert result.content == 'done' ev = _events(mock_ld_client) assert ev['$ld:ai:graph:invocation_success'][0][1] == 1 @@ -279,7 +290,7 @@ async def test_tracks_graph_invocation_success_and_latency(): @pytest.mark.asyncio async def test_tracks_per_node_tokens_and_success(): - """Node-level token and success events fire with correct values.""" + """Node-level token and success events fire with correct values via managed layer.""" mock_ld_client = MagicMock() graph = _make_graph(mock_ld_client, node_key='root-agent', graph_key='test-graph') run_result = _make_run_result( @@ -291,11 +302,11 @@ async def test_tracks_per_node_tokens_and_success(): with patch.dict('sys.modules', _make_agents_modules(run_result)): runner = OpenAIAgentGraphRunner(graph, {}) - await runner.run('hello') + await _run_through_managed(graph, runner, 'hello') ev = _events(mock_ld_client) - # Node-level events + # Node-level events (emitted by ManagedAgentGraph from node_metrics) assert ev['$ld:ai:tokens:total'][0][1] == 30 assert ev['$ld:ai:tokens:input'][0][1] == 20 assert ev['$ld:ai:tokens:output'][0][1] == 10 @@ -314,7 +325,7 @@ async def test_tracks_graph_key_on_node_events(): with patch.dict('sys.modules', _make_agents_modules(run_result)): runner = OpenAIAgentGraphRunner(graph, {}) - await runner.run('hello') + await _run_through_managed(graph, runner, 'hello') ev = _events(mock_ld_client) token_data = ev['$ld:ai:tokens:total'][0][0] @@ -332,7 +343,7 @@ async def test_tracks_tool_calls_from_run_items(): with patch.dict('sys.modules', _make_agents_modules(run_result)): runner = OpenAIAgentGraphRunner(graph, _tool_registry('get_weather')) - await runner.run('What is the weather?') + await _run_through_managed(graph, runner, 'What is the weather?') ev = _events(mock_ld_client) tool_events = ev.get('$ld:ai:tool_call', []) @@ -356,7 +367,7 @@ async def test_tracks_multiple_tool_calls(): with patch.dict('sys.modules', _make_agents_modules(run_result)): runner = OpenAIAgentGraphRunner(graph, _tool_registry('search', 'summarize')) - await runner.run('Search and summarize.') + await _run_through_managed(graph, runner, 'Search and summarize.') ev = _events(mock_ld_client) tool_keys = [data['toolKey'] for data, _ in ev.get('$ld:ai:tool_call', [])] @@ -379,7 +390,7 @@ async def test_same_run_id_across_token_success_and_tool_call_events(): with patch.dict('sys.modules', _make_agents_modules(run_result)): runner = OpenAIAgentGraphRunner(graph, _tool_registry('search')) - await runner.run('go') + await _run_through_managed(graph, runner, 'go') ev = _events(mock_ld_client) @@ -408,7 +419,7 @@ async def test_does_not_track_tool_calls_without_graph_and_registry_config(): with patch.dict('sys.modules', _make_agents_modules(run_result)): runner = OpenAIAgentGraphRunner(graph, {}) - await runner.run('prompt') + await _run_through_managed(graph, runner, 'prompt') ev = _events(mock_ld_client) assert ev.get('$ld:ai:tool_call', []) == [] @@ -420,10 +431,10 @@ async def test_tracks_failure_and_latency_on_runner_error(): mock_ld_client = MagicMock() graph = _make_graph(mock_ld_client) - mock_runner = MagicMock() - mock_runner.run = AsyncMock(side_effect=RuntimeError('runner error')) + mock_runner_module = MagicMock() + mock_runner_module.run = AsyncMock(side_effect=RuntimeError('runner error')) mock_agents = MagicMock() - mock_agents.Runner = mock_runner + mock_agents.Runner = mock_runner_module mock_agents.Agent = MagicMock(return_value=MagicMock()) mock_agents.Handoff = MagicMock() mock_agents.Tool = MagicMock() @@ -439,7 +450,7 @@ async def test_tracks_failure_and_latency_on_runner_error(): 'agents.tool_context': MagicMock(), }): runner = OpenAIAgentGraphRunner(graph, {}) - result = await runner.run('fail') + result = await _run_through_managed(graph, runner, 'fail') assert result.metrics.success is False @@ -450,8 +461,8 @@ async def test_tracks_failure_and_latency_on_runner_error(): @pytest.mark.asyncio -async def test_multi_node_tracks_per_node_tokens_and_handoff(): - """Each node emits its own token events; handoff event fires between them.""" +async def test_multi_node_tracks_per_node_tokens(): + """Each node emits its own token events via the managed layer.""" mock_ld_client = MagicMock() graph = _make_two_node_graph(mock_ld_client) @@ -511,7 +522,7 @@ async def mock_run(agent, input_str, **kwargs): 'agents.tool_context': MagicMock(), }): runner = OpenAIAgentGraphRunner(graph, {}) - result = await runner.run('hello') + result = await _run_through_managed(graph, runner, 'hello') assert result.metrics.success is True @@ -527,9 +538,3 @@ async def mock_run(agent, input_str, **kwargs): path_data = ev['$ld:ai:graph:path'][0][0] assert 'root-agent' in path_data['path'] assert 'child-agent' in path_data['path'] - - # Handoff event fires with correct source and target - handoff_events = ev.get('$ld:ai:graph:handoff_success', []) - assert len(handoff_events) == 1 - assert handoff_events[0][0]['sourceKey'] == 'root-agent' - assert handoff_events[0][0]['targetKey'] == 'child-agent' diff --git a/packages/sdk/server-ai/src/ldai/managed_agent_graph.py b/packages/sdk/server-ai/src/ldai/managed_agent_graph.py index 94026973..4b06b409 100644 --- a/packages/sdk/server-ai/src/ldai/managed_agent_graph.py +++ b/packages/sdk/server-ai/src/ldai/managed_agent_graph.py @@ -116,11 +116,14 @@ def _build_summary_from_runner_result( def _flush_graph_tracking(self, result: AgentGraphRunnerResult, tracker: Any) -> None: """ - Drive graph-level LaunchDarkly tracking events from runner result metrics. + Drive graph-level and per-node LaunchDarkly tracking events from runner result metrics. - Called only when the runner returns the new ``AgentGraphRunnerResult`` - shape. Node-level tracking (from ``result.metrics.node_metrics``) will - be wired once the runners start populating that field. + Called only when the runner returns the new ``AgentGraphRunnerResult`` shape. + + Graph-level events (path, duration, success/failure, total tokens) are always + emitted. Per-node events are emitted for each entry in + ``result.metrics.node_metrics`` when ``self._graph`` is available — the node + tracker is created via the node's ``AIAgentConfig.create_tracker()`` factory. """ m = result.metrics if m.path: @@ -134,6 +137,25 @@ def _flush_graph_tracking(self, result: AgentGraphRunnerResult, tracker: Any) -> if m.usage is not None: tracker.track_total_tokens(m.usage) + # Per-node tracking: flush LDAIMetrics for each node that the runner captured. + if self._graph is not None and m.node_metrics: + for node_key, node_metrics in m.node_metrics.items(): + node = self._graph.get_node(node_key) + if node is None: + continue + node_tracker = node.get_config().create_tracker() + if node_metrics.usage is not None: + node_tracker.track_tokens(node_metrics.usage) + if node_metrics.duration_ms is not None: + node_tracker.track_duration(node_metrics.duration_ms) + if node_metrics.tool_calls: + for tool_call in node_metrics.tool_calls: + node_tracker.track_tool_call(tool_call) + if node_metrics.success: + node_tracker.track_success() + else: + node_tracker.track_error() + def get_agent_graph_runner(self) -> AgentGraphRunner: """ Return the underlying AgentGraphRunner for advanced use. From 2c5671d8d77e788d5153d454a91dc34653ee3ff2 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 28 Apr 2026 18:45:41 -0500 Subject: [PATCH 21/24] feat: Migrate LangGraph runner to AgentGraphRunnerResult; remove legacy shape detection Updates LangGraphAgentGraphRunner to return AgentGraphRunnerResult with GraphMetrics (success, path, duration_ms, usage, node_metrics) instead of the legacy AgentGraphResult. Adds collect_node_metrics() to LDMetricsCallbackHandler for pure data extraction. Removes the transitional AgentGraphResult detection branch from ManagedAgentGraph now that both the OpenAI and LangGraph runners return AgentGraphRunnerResult. All graph-level and per-node tracking events are driven exclusively by the managed layer. Co-Authored-By: Claude Sonnet 4.6 --- .../langgraph_agent_graph_runner.py | 99 ++++++++----------- .../langgraph_callback_handler.py | 39 +++++++- .../test_langgraph_agent_graph_runner.py | 26 ++--- .../tests/test_tracking_langgraph.py | 40 ++++---- .../server-ai/src/ldai/managed_agent_graph.py | 65 +++--------- .../src/ldai/providers/agent_graph_runner.py | 8 +- .../tests/test_managed_agent_graph.py | 43 ++++---- 7 files changed, 154 insertions(+), 166 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py index 15eee41f..75843376 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_agent_graph_runner.py @@ -1,14 +1,12 @@ """LangGraph agent graph runner for LaunchDarkly AI SDK.""" -import asyncio import time -from contextvars import ContextVar from typing import Annotated, Any, Dict, List, Set, Tuple from ldai import log from ldai.agent_graph import AgentGraphDefinition, AgentGraphNode -from ldai.providers import AgentGraphResult, AgentGraphRunner, ToolRegistry -from ldai.providers.types import LDAIMetrics +from ldai.providers import AgentGraphRunner, ToolRegistry +from ldai.providers.types import AgentGraphRunnerResult, GraphMetrics, LDAIMetrics from ldai_langchain.langchain_helper import ( build_structured_tools, @@ -18,9 +16,6 @@ ) from ldai_langchain.langgraph_callback_handler import LDMetricsCallbackHandler -# Per-run eval task accumulator, isolated per concurrent run() call via ContextVar. -_run_eval_tasks: ContextVar[Dict[str, List[asyncio.Task]]] = ContextVar('_run_eval_tasks') - def _make_handoff_tool(child_key: str, description: str) -> Any: """ @@ -65,9 +60,10 @@ class LangGraphAgentGraphRunner(AgentGraphRunner): AgentGraphRunner implementation for LangGraph. - Compiles and runs the agent graph with LangGraph and automatically records - graph- and node-level AI metric data to the LaunchDarkly trackers on the - graph definition and each node. + Compiles and runs the agent graph with LangGraph and collects graph- and + node-level metrics via a LangChain callback handler. Tracking events are + emitted by the managed layer (:class:`~ldai.ManagedAgentGraph`) from the + returned :class:`~ldai.providers.types.AgentGraphRunnerResult`. Requires ``langgraph`` to be installed. """ @@ -181,26 +177,6 @@ async def invoke(state: WorkflowState) -> dict: if node_instructions: msgs = [SystemMessage(content=node_instructions)] + msgs response = await bound_model.ainvoke(msgs) - - node_obj = self._graph.get_node(nk) - if node_obj is not None: - input_text = '\r\n'.join( - m.content if isinstance(m.content, str) else str(m.content) - for m in msgs - ) if msgs else '' - output_text = ( - response.content if hasattr(response, 'content') else str(response) - ) - task = node_obj.get_config().evaluator.evaluate(input_text, output_text) - run_tasks = _run_eval_tasks.get(None) - if run_tasks is not None: - run_tasks.setdefault(nk, []).append(task) - else: - log.warning( - f"LangGraphAgentGraphRunner: eval task for node '{nk}' " - "has no run context; judge results will not be tracked" - ) - return {'messages': [response]} invoke.__name__ = nk @@ -298,20 +274,18 @@ def route(state: WorkflowState) -> str: compiled = agent_builder.compile() return compiled, fn_name_to_config_key, node_keys - async def run(self, input: Any) -> AgentGraphResult: + async def run(self, input: Any) -> AgentGraphRunnerResult: """ Run the agent graph with the given input. Builds a LangGraph StateGraph from the AgentGraphDefinition, compiles it, and invokes it. Uses a LangChain callback handler to collect - per-node metrics, then flushes them to LaunchDarkly trackers. + per-node metrics. Graph-level tracking events are emitted by the + managed layer from the returned GraphMetrics. :param input: The string prompt to send to the agent graph - :return: AgentGraphResult with the final output and metrics + :return: AgentGraphRunnerResult with the final content and GraphMetrics """ - pending_eval_tasks: Dict[str, List[asyncio.Task]] = {} - token = _run_eval_tasks.set(pending_eval_tasks) - tracker = self._graph.create_tracker() start_ns = time.perf_counter_ns() try: @@ -325,24 +299,34 @@ async def run(self, input: Any) -> AgentGraphResult: config={'callbacks': [handler], 'recursion_limit': 25}, ) - duration = (time.perf_counter_ns() - start_ns) // 1_000_000 + duration_ms = (time.perf_counter_ns() - start_ns) // 1_000_000 messages = result.get('messages', []) output = extract_last_message_content(messages) + total_usage = sum_token_usage_from_messages(messages) + + # Build per-node LDAIMetrics from callback handler data + node_metrics: Dict[str, LDAIMetrics] = {} + for node_key in handler.path: + usage = handler.node_tokens.get(node_key) + duration = handler.node_durations_ms.get(node_key) + tool_calls = handler.node_tool_calls.get(node_key) or [] + node_metrics[node_key] = LDAIMetrics( + success=True, + usage=usage, + duration_ms=duration, + tool_calls=tool_calls if tool_calls else None, + ) - # Flush per-node metrics to LD trackers; eval results are tracked - # internally and intentionally not exposed on AgentGraphResult here - # — judge dispatch is the managed layer's responsibility. - await handler.flush(self._graph, pending_eval_tasks) - - tracker.track_path(handler.path) - tracker.track_duration(duration) - tracker.track_invocation_success() - tracker.track_total_tokens(sum_token_usage_from_messages(messages)) - - return AgentGraphResult( - output=output, + return AgentGraphRunnerResult( + content=output, raw=result, - metrics=LDAIMetrics(success=True), + metrics=GraphMetrics( + success=True, + path=handler.path, + duration_ms=duration_ms, + usage=total_usage if (total_usage is not None and total_usage.total > 0) else None, + node_metrics=node_metrics, + ), ) except Exception as exc: @@ -353,13 +337,12 @@ async def run(self, input: Any) -> AgentGraphResult: ) else: log.warning(f'LangGraphAgentGraphRunner run failed: {exc}') - duration = (time.perf_counter_ns() - start_ns) // 1_000_000 - tracker.track_duration(duration) - tracker.track_invocation_failure() - return AgentGraphResult( - output='', + duration_ms = (time.perf_counter_ns() - start_ns) // 1_000_000 + return AgentGraphRunnerResult( + content='', raw=None, - metrics=LDAIMetrics(success=False), + metrics=GraphMetrics( + success=False, + duration_ms=duration_ms, + ), ) - finally: - _run_eval_tasks.reset(token) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_callback_handler.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_callback_handler.py index 183a3eb7..61aaab4d 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_callback_handler.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langgraph_callback_handler.py @@ -5,7 +5,7 @@ from langchain_core.callbacks import BaseCallbackHandler from langchain_core.outputs import ChatGeneration, LLMResult from ldai.agent_graph import AgentGraphDefinition -from ldai.providers.types import JudgeResult +from ldai.providers.types import JudgeResult, LDAIMetrics from ldai.tracker import TokenUsage from ldai_langchain.langchain_helper import get_ai_usage_from_response @@ -193,14 +193,19 @@ async def flush( self, graph: AgentGraphDefinition, eval_tasks=None ) -> List[JudgeResult]: """ - Emit all collected per-node metrics to the LaunchDarkly trackers. + Emit collected per-node metrics to LaunchDarkly trackers. - Call this once after the graph run completes. + .. deprecated:: + Per-node tracking is now driven by the managed layer + (:class:`ManagedAgentGraph`) from + :attr:`AgentGraphRunnerResult.metrics.node_metrics`. This method + is retained for tests and any external callers that still rely on + the original handler-driven tracking path; production code should + not call it. :param graph: The AgentGraphDefinition whose nodes hold the LD config trackers. :param eval_tasks: Optional dict mapping node key to a list of awaitables that - return judge evaluation results. Multiple tasks arise when a node is visited - more than once (e.g. in a graph with cycles). + return judge evaluation results. :return: All judge results collected across all nodes. """ node_trackers: Dict[str, Any] = {} @@ -240,3 +245,27 @@ async def flush( config_tracker.track_judge_result(r) return all_eval_results + + def collect_node_metrics(self) -> Dict[str, LDAIMetrics]: + """ + Build a per-node ``LDAIMetrics`` map from data collected during the run. + + Pure data extraction — no LaunchDarkly tracker events are emitted. + :class:`LangGraphAgentGraphRunner` uses this to populate + ``GraphMetrics.node_metrics`` so the managed layer can drive per-node + events. + + :return: Mapping of node key to its accumulated ``LDAIMetrics``. + """ + node_metrics: Dict[str, LDAIMetrics] = {} + for node_key in self._path: + if node_key in node_metrics: + continue + tool_calls = self._node_tool_calls.get(node_key, []) + node_metrics[node_key] = LDAIMetrics( + success=True, + usage=self._node_tokens.get(node_key), + tool_calls=list(tool_calls) if tool_calls else None, + duration_ms=self._node_duration_ms.get(node_key), + ) + return node_metrics diff --git a/packages/ai-providers/server-ai-langchain/tests/test_langgraph_agent_graph_runner.py b/packages/ai-providers/server-ai-langchain/tests/test_langgraph_agent_graph_runner.py index 0a3ff6ca..02b40fba 100644 --- a/packages/ai-providers/server-ai-langchain/tests/test_langgraph_agent_graph_runner.py +++ b/packages/ai-providers/server-ai-langchain/tests/test_langgraph_agent_graph_runner.py @@ -6,7 +6,8 @@ from ldai.agent_graph import AgentGraphDefinition from ldai.evaluator import Evaluator from ldai.models import AIAgentGraphConfig, AIAgentConfig, ModelConfig, ProviderConfig -from ldai.providers import AgentGraphResult, ToolRegistry +from ldai.providers import ToolRegistry +from ldai.providers.types import AgentGraphRunnerResult from ldai_langchain.langgraph_agent_graph_runner import LangGraphAgentGraphRunner from ldai_langchain.langchain_runner_factory import LangChainRunnerFactory @@ -75,22 +76,22 @@ async def test_langgraph_runner_run_raises_when_langgraph_not_installed(): with patch.dict('sys.modules', {'langgraph': None, 'langgraph.graph': None}): result = await runner.run("test") - assert isinstance(result, AgentGraphResult) + assert isinstance(result, AgentGraphRunnerResult) assert result.metrics.success is False @pytest.mark.asyncio -async def test_langgraph_runner_run_tracks_failure_on_exception(): +async def test_langgraph_runner_run_returns_failure_on_exception(): + """Runner now returns AgentGraphRunnerResult; managed layer drives tracker events.""" graph = _make_graph() - tracker = graph.create_tracker() runner = LangGraphAgentGraphRunner(graph, {}) with patch.dict('sys.modules', {'langgraph': None, 'langgraph.graph': None}): result = await runner.run("fail") + assert isinstance(result, AgentGraphRunnerResult) assert result.metrics.success is False - tracker.track_invocation_failure.assert_called_once() - tracker.track_duration.assert_called_once() + assert result.metrics.duration_ms is not None @pytest.mark.asyncio @@ -147,9 +148,10 @@ async def test_langgraph_runner_run_success(): runner = LangGraphAgentGraphRunner(graph, {}) result = await runner.run("find restaurants") - assert isinstance(result, AgentGraphResult) - assert result.output == "langgraph answer" - assert result.metrics.success is True - tracker.track_path.assert_called_once_with([]) - tracker.track_invocation_success.assert_called_once() - tracker.track_duration.assert_called_once() + assert isinstance(result, AgentGraphRunnerResult) + assert result.metrics.duration_ms is not None + # Tracker events now fire from the managed layer (ManagedAgentGraph) using + # result.metrics; the runner no longer touches the graph tracker directly. + tracker.track_path.assert_not_called() + tracker.track_invocation_success.assert_not_called() + tracker.track_duration.assert_not_called() diff --git a/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py b/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py index 3b45783d..6fc8eee7 100644 --- a/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py +++ b/packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py @@ -11,11 +11,18 @@ from unittest.mock import AsyncMock, MagicMock, patch from ldai.agent_graph import AgentGraphDefinition +from ldai.managed_agent_graph import ManagedAgentGraph from ldai.models import AIAgentGraphConfig, AIAgentConfig, Edge, ModelConfig, ProviderConfig from ldai.tracker import AIGraphTracker, LDAIConfigTracker from ldai.evaluator import Evaluator from ldai_langchain.langgraph_agent_graph_runner import LangGraphAgentGraphRunner + +async def _run_through_managed(runner: LangGraphAgentGraphRunner, graph: AgentGraphDefinition, input: str): + """Run the runner through the managed layer so graph-level tracking events fire.""" + managed = ManagedAgentGraph(runner, graph=graph) + return await managed.run(input) + pytestmark = pytest.mark.skipif( pytest.importorskip('langgraph', reason='langgraph not installed') is None, reason='langgraph not installed', @@ -229,7 +236,7 @@ async def test_tracks_node_and_graph_tokens_on_success(): result = await runner.run("What's the weather?") assert result.metrics.success is True - assert result.output == 'Sunny.' + assert result.content == 'Sunny.' # Manually simulate what the callback handler would collect and flush # (mock models don't fire LangChain callbacks, so we test flush directly) @@ -259,12 +266,9 @@ async def test_tracks_node_and_graph_tokens_on_success(): assert ev2['$ld:ai:generation:success'][0][1] == 1 assert '$ld:ai:duration:total' in ev2 - # Graph-level events from the real run - ev = _events(mock_ld_client) - assert ev['$ld:ai:graph:total_tokens'][0][1] == 15 - assert ev['$ld:ai:graph:invocation_success'][0][1] == 1 - assert '$ld:ai:graph:duration:total' in ev - assert '$ld:ai:graph:path' in ev + # Graph-level events are now driven by ManagedAgentGraph from + # AgentGraphRunnerResult.metrics — see test_managed_agent_graph.py for the + # managed-layer flow. The runner itself no longer fires graph-level events. @pytest.mark.asyncio @@ -277,11 +281,11 @@ async def test_tracks_execution_path(): with patch('ldai_langchain.langgraph_agent_graph_runner.create_langchain_model', return_value=_mock_model(fake_response)): runner = LangGraphAgentGraphRunner(graph, {}) - await runner.run('hello') + result = await runner.run('hello') - ev = _events(mock_ld_client) - path_data = ev['$ld:ai:graph:path'][0][0] - assert 'my-agent' in path_data['path'] + # Path now lives on AgentGraphRunnerResult.metrics.path; the runner no + # longer emits the $ld:ai:graph:path event directly (the managed layer does). + assert 'my-agent' in result.metrics.path @pytest.mark.asyncio @@ -432,11 +436,9 @@ async def test_tracks_failure_and_latency_on_model_error(): result = await runner.run('fail') assert result.metrics.success is False - - ev = _events(mock_ld_client) - assert '$ld:ai:graph:invocation_failure' in ev - assert '$ld:ai:graph:duration:total' in ev - assert '$ld:ai:graph:invocation_success' not in ev + assert result.metrics.duration_ms is not None + # Graph-level events (invocation_failure, duration) are now driven by + # ManagedAgentGraph from result.metrics, not by the runner directly. @pytest.mark.asyncio @@ -461,7 +463,7 @@ def model_factory(node_config, **kwargs): with patch('ldai_langchain.langgraph_agent_graph_runner.create_langchain_model', side_effect=model_factory): runner = LangGraphAgentGraphRunner(graph, {}) - result = await runner.run('hello') + result = await _run_through_managed(runner, graph, 'hello') assert result.metrics.success is True @@ -624,7 +626,7 @@ def model_factory(node_config, **kwargs): result = await runner.run('hello') assert result.metrics.success is True - assert 'Agent A' in result.output + assert 'Agent A' in result.content # Agent B's model must never have been invoked — no fan-out agent_b_model.ainvoke.assert_not_called() @@ -752,7 +754,7 @@ def model_factory(node_config, **kwargs): result = await runner.run('Find info and route to the right agent.') assert result.metrics.success is True - assert 'Agent A' in result.output + assert 'Agent A' in result.content # Orchestrator must have been called twice: once before tool result, once after assert orchestrator_model.ainvoke.call_count == 2 # Agent B must never have been invoked diff --git a/packages/sdk/server-ai/src/ldai/managed_agent_graph.py b/packages/sdk/server-ai/src/ldai/managed_agent_graph.py index 4b06b409..1e4a7d42 100644 --- a/packages/sdk/server-ai/src/ldai/managed_agent_graph.py +++ b/packages/sdk/server-ai/src/ldai/managed_agent_graph.py @@ -2,7 +2,7 @@ from typing import Any, Optional -from ldai.providers import AgentGraphResult, AgentGraphRunner +from ldai.providers import AgentGraphRunner from ldai.providers.types import ( AgentGraphRunnerResult, GraphMetricSummary, @@ -20,14 +20,9 @@ class ManagedAgentGraph: builds a :class:`~ldai.providers.types.GraphMetricSummary` from the runner's metrics. - When the runner returns an :class:`~ldai.providers.types.AgentGraphRunnerResult` - (new shape), the managed layer drives all graph-level tracking from - ``result.metrics``. When the runner returns the legacy - :class:`~ldai.providers.AgentGraphResult`, tracking has already been performed - inside the runner; the managed layer simply wraps the result. This detection - branch exists as a deliberate bridge: once PR 11-openai and PR 11-langchain - migrate both runners to return ``AgentGraphRunnerResult``, the legacy branch - becomes dead code and will be removed in PR 11-langchain's final cleanup commit. + The runner must return :class:`~ldai.providers.types.AgentGraphRunnerResult`. + Graph-level and per-node tracking events are emitted by this managed layer from + the returned :class:`~ldai.providers.types.GraphMetrics`. Obtain an instance via ``LDAIClient.create_agent_graph()``. """ @@ -42,9 +37,7 @@ def __init__( :param runner: The AgentGraphRunner to delegate execution to :param graph: Optional AgentGraphDefinition used to create the - graph-level tracker when the runner returns an - :class:`AgentGraphRunnerResult` (new shape). Not needed for - legacy runners that still return :class:`AgentGraphResult`. + graph-level tracker and per-node trackers when flushing tracking events. """ self._runner = runner self._graph = graph @@ -53,48 +46,24 @@ async def run(self, input: Any) -> ManagedGraphResult: """ Run the agent graph with the given input. - Delegates to the underlying AgentGraphRunner. The returned type - determines which tracking path is taken: - - - :class:`AgentGraphRunnerResult` (new shape): the managed layer drives - graph-level tracking from ``result.metrics`` via the graph tracker. - Per-node tracking from ``result.metrics.node_metrics`` will be wired - in a follow-up commit once the runners populate ``node_metrics``. - - :class:`AgentGraphResult` (legacy shape): tracking already occurred - inside the runner; the managed layer wraps the result without - additional tracking. + Delegates to the underlying AgentGraphRunner, which must return an + :class:`AgentGraphRunnerResult` with populated :class:`GraphMetrics`. + The managed layer drives all graph-level and per-node tracking from + ``result.metrics`` and wraps everything in a :class:`ManagedGraphResult`. :param input: The input prompt or structured input for the graph :return: ManagedGraphResult containing the content, metric summary, raw response, and an optional evaluations task (always ``None`` for now — per-graph evaluations will be added in a future PR). """ - raw_result = await self._runner.run(input) - - if isinstance(raw_result, AgentGraphRunnerResult): - # New shape: managed layer drives all tracking. - summary = self._build_summary_from_runner_result(raw_result) - if self._graph is not None: - self._flush_graph_tracking(raw_result, self._graph.create_tracker()) - return ManagedGraphResult( - content=raw_result.content, - metrics=summary, - raw=raw_result.raw, - evaluations=None, - ) - - # Legacy shape (AgentGraphResult): tracking already happened in the runner. - # Build a GraphMetricSummary from the runner result's LDAIMetrics. - # path and node_metrics will be populated once graph runners are migrated - # to return AgentGraphRunnerResult with GraphMetrics (PR 11-openai/langchain). - metrics: LDAIMetrics = raw_result.metrics - summary = GraphMetricSummary( - success=metrics.success, - usage=metrics.usage, - duration_ms=getattr(metrics, 'duration_ms', None), - ) + raw_result: AgentGraphRunnerResult = await self._runner.run(input) + + summary = self._build_summary_from_runner_result(raw_result) + if self._graph is not None: + self._flush_graph_tracking(raw_result, self._graph.create_tracker()) + return ManagedGraphResult( - content=raw_result.output, + content=raw_result.content, metrics=summary, raw=raw_result.raw, evaluations=None, @@ -118,8 +87,6 @@ def _flush_graph_tracking(self, result: AgentGraphRunnerResult, tracker: Any) -> """ Drive graph-level and per-node LaunchDarkly tracking events from runner result metrics. - Called only when the runner returns the new ``AgentGraphRunnerResult`` shape. - Graph-level events (path, duration, success/failure, total tokens) are always emitted. Per-node events are emitted for each entry in ``result.metrics.node_metrics`` when ``self._graph`` is available — the node diff --git a/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py b/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py index 6cc45670..e5af2ca2 100644 --- a/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py +++ b/packages/sdk/server-ai/src/ldai/providers/agent_graph_runner.py @@ -1,6 +1,6 @@ from typing import Any, Protocol, runtime_checkable -from ldai.providers.types import AgentGraphResult +from ldai.providers.types import AgentGraphRunnerResult @runtime_checkable @@ -18,11 +18,13 @@ class AgentGraphRunner(Protocol): the caller just passes input. """ - async def run(self, input: Any) -> AgentGraphResult: + async def run(self, input: Any) -> AgentGraphRunnerResult: """ Run the agent graph with the given input. :param input: The input to the agent graph (string prompt or structured input) - :return: AgentGraphResult containing the output, raw response, and metrics + :return: :class:`AgentGraphRunnerResult` containing content, raw response, + and :class:`GraphMetrics`. The managed layer drives all tracking + events from the returned metrics. """ ... diff --git a/packages/sdk/server-ai/tests/test_managed_agent_graph.py b/packages/sdk/server-ai/tests/test_managed_agent_graph.py index 05b0ed27..8b2e5b06 100644 --- a/packages/sdk/server-ai/tests/test_managed_agent_graph.py +++ b/packages/sdk/server-ai/tests/test_managed_agent_graph.py @@ -7,27 +7,32 @@ from ldai import LDAIClient, ManagedAgentGraph, ManagedGraphResult from ldai.providers.types import AgentGraphRunnerResult, GraphMetrics, LDAIMetrics -from ldai.providers import AgentGraphResult, AgentGraphRunner, ToolRegistry +from ldai.providers import AgentGraphRunner, ToolRegistry from ldai.tracker import TokenUsage -# --- Test doubles --- +# --- Test double --- class StubAgentGraphRunner(AgentGraphRunner): - """Legacy runner that returns AgentGraphResult (old shape).""" - def __init__(self, output: str = "stub output"): - self._output = output + """Runner that returns AgentGraphRunnerResult (new shape).""" + def __init__(self, content: str = "stub output"): + self._content = content - async def run(self, input) -> AgentGraphResult: - return AgentGraphResult( - output=self._output, + async def run(self, input) -> AgentGraphRunnerResult: + return AgentGraphRunnerResult( + content=self._content, + metrics=GraphMetrics( + success=True, + path=["root"], + duration_ms=10, + node_metrics={}, + ), raw={"input": input}, - metrics=LDAIMetrics(success=True), ) class StubNewShapeRunner(AgentGraphRunner): - """New-shape runner that returns AgentGraphRunnerResult.""" + """New-shape runner that returns AgentGraphRunnerResult with path and metrics.""" def __init__(self, content: str = "new shape output"): self._content = content @@ -45,11 +50,11 @@ async def run(self, input) -> AgentGraphRunnerResult: ) -# --- ManagedAgentGraph unit tests (legacy shape) --- +# --- ManagedAgentGraph unit tests --- @pytest.mark.asyncio async def test_managed_agent_graph_run_delegates_to_runner(): - """Legacy AgentGraphResult shape: content comes from output field.""" + """Runner returns AgentGraphRunnerResult: content comes from content field.""" runner = StubAgentGraphRunner("hello world") managed = ManagedAgentGraph(runner) result = await managed.run("test input") @@ -64,11 +69,9 @@ def test_managed_agent_graph_get_runner(): assert managed.get_agent_graph_runner() is runner -# --- ManagedAgentGraph unit tests (new AgentGraphRunnerResult shape) --- - @pytest.mark.asyncio -async def test_managed_agent_graph_run_handles_new_shape(): - """New AgentGraphRunnerResult shape: content and GraphMetrics are surfaced.""" +async def test_managed_agent_graph_run_surfaces_graph_metrics(): + """AgentGraphRunnerResult: content and GraphMetrics are surfaced.""" runner = StubNewShapeRunner("final answer") mock_graph = MagicMock() mock_tracker = MagicMock() @@ -87,8 +90,8 @@ async def test_managed_agent_graph_run_handles_new_shape(): @pytest.mark.asyncio -async def test_managed_agent_graph_new_shape_drives_tracking(): - """New shape: managed layer calls tracker methods from result.metrics.""" +async def test_managed_agent_graph_drives_tracking(): + """Managed layer calls tracker methods from result.metrics.""" runner = StubNewShapeRunner() mock_graph = MagicMock() mock_tracker = MagicMock() @@ -104,8 +107,8 @@ async def test_managed_agent_graph_new_shape_drives_tracking(): @pytest.mark.asyncio -async def test_managed_agent_graph_new_shape_no_graph_skips_tracking(): - """New shape without graph: no tracking called (graph not available).""" +async def test_managed_agent_graph_no_graph_skips_tracking(): + """Without graph: tracking is skipped (no graph tracker available).""" runner = StubNewShapeRunner() managed = ManagedAgentGraph(runner, graph=None) # Should not raise even without a graph reference From c44785b9d2feac134bdb333cfee095794827e661 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 28 Apr 2026 18:27:33 -0500 Subject: [PATCH 22/24] feat: Add evaluations support to ManagedAgent.run() Wire judge evaluations into ManagedAgent.run() via an asyncio.Task, mirroring ManagedModel.run(). Awaiting result.evaluations guarantees both evaluation and tracker.track_judge_result() complete. run() returns immediately; the evaluations task resolves asynchronously. Co-Authored-By: Claude Sonnet 4.6 --- .../sdk/server-ai/src/ldai/managed_agent.py | 37 +++- .../sdk/server-ai/tests/test_managed_agent.py | 208 ++++++++++++++++-- 2 files changed, 227 insertions(+), 18 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/managed_agent.py b/packages/sdk/server-ai/src/ldai/managed_agent.py index 9d582ae4..b780f5ab 100644 --- a/packages/sdk/server-ai/src/ldai/managed_agent.py +++ b/packages/sdk/server-ai/src/ldai/managed_agent.py @@ -1,15 +1,20 @@ """ManagedAgent — LaunchDarkly managed wrapper for agent invocations.""" +import asyncio +from typing import List + from ldai.models import AIAgentConfig from ldai.providers.runner import Runner -from ldai.providers.types import ManagedResult +from ldai.providers.types import JudgeResult, ManagedResult +from ldai.tracker import LDAIConfigTracker class ManagedAgent: """ LaunchDarkly managed wrapper for AI agent invocations. - Holds a Runner. Handles tracking automatically via ``create_tracker()``. + Holds a Runner. Handles tracking and judge evaluation + dispatch automatically via ``create_tracker()``. Obtain an instance via ``LDAIClient.create_agent()``. """ @@ -25,20 +30,46 @@ async def run(self, input: str) -> ManagedResult: """ Run the agent with the given input string. + Invokes the runner, tracks metrics, and dispatches judge evaluations + asynchronously. Returns immediately; awaiting ``result.evaluations`` + guarantees both evaluation and tracking complete. + :param input: The user prompt or input to the agent - :return: ManagedResult containing the agent's output and metric summary + :return: ManagedResult containing the agent's output, metric summary, + and an optional evaluations task """ tracker = self._ai_config.create_tracker() result = await tracker.track_metrics_of_async( lambda r: r.metrics, lambda: self._agent_runner.run(input), ) + + evaluations_task = self._track_judge_results(tracker, input, result.content) + return ManagedResult( content=result.content, metrics=tracker.get_summary(), raw=result.raw, + evaluations=evaluations_task, ) + def _track_judge_results( + self, + tracker: LDAIConfigTracker, + input_text: str, + output_text: str, + ) -> asyncio.Task[List[JudgeResult]]: + evaluator_task = self._ai_config.evaluator.evaluate(input_text, output_text) + + async def _run_and_track(eval_task: asyncio.Task) -> List[JudgeResult]: + results = await eval_task + for r in results: + if r.success: + tracker.track_judge_result(r) + return results + + return asyncio.create_task(_run_and_track(evaluator_task)) + def get_agent_runner(self) -> Runner: """ Return the underlying runner for advanced use. diff --git a/packages/sdk/server-ai/tests/test_managed_agent.py b/packages/sdk/server-ai/tests/test_managed_agent.py index 0c30637a..2e4f5a63 100644 --- a/packages/sdk/server-ai/tests/test_managed_agent.py +++ b/packages/sdk/server-ai/tests/test_managed_agent.py @@ -1,13 +1,16 @@ """Tests for ManagedAgent.""" +import asyncio import pytest +from typing import List from unittest.mock import AsyncMock, MagicMock from ldai import LDAIClient, ManagedAgent +from ldai.evaluator import Evaluator from ldai.managed_agent import ManagedAgent from ldai.models import AIAgentConfig, AIAgentConfigDefault, ModelConfig, ProviderConfig -from ldai.providers.types import LDAIMetrics, ManagedResult, RunnerResult -from ldai.tracker import LDAIMetricSummary +from ldai.providers.types import JudgeResult, LDAIMetrics, ManagedResult, RunnerResult +from ldai.tracker import LDAIConfigTracker, LDAIMetricSummary from ldclient import Config, Context, LDClient from ldclient.integrations.test_data import TestData @@ -19,6 +22,23 @@ def _make_summary(success: bool = True) -> LDAIMetricSummary: return summary +def _make_noop_evaluator_config() -> MagicMock: + """Build a minimal mock AIAgentConfig with a noop evaluator and a mock tracker.""" + mock_config = MagicMock(spec=AIAgentConfig) + mock_tracker = MagicMock(spec=LDAIConfigTracker) + mock_tracker.track_metrics_of_async = AsyncMock( + return_value=RunnerResult( + content="Test response", + raw=None, + metrics=LDAIMetrics(success=True, usage=None), + ) + ) + mock_tracker.get_summary = MagicMock(return_value=_make_summary(True)) + mock_config.create_tracker = MagicMock(return_value=mock_tracker) + mock_config.evaluator = Evaluator.noop() + return mock_config + + @pytest.fixture def td() -> TestData: td = TestData.data_source() @@ -60,17 +80,7 @@ class TestManagedAgentRun: @pytest.mark.asyncio async def test_run_delegates_to_agent_runner(self): """Should delegate run() to the underlying AgentRunner and return ManagedResult.""" - mock_config = MagicMock(spec=AIAgentConfig) - mock_tracker = MagicMock() - mock_tracker.track_metrics_of_async = AsyncMock( - return_value=RunnerResult( - content="Test response", - metrics=LDAIMetrics(success=True, usage=None), - raw=None, - ) - ) - mock_tracker.get_summary = MagicMock(return_value=_make_summary(True)) - mock_config.create_tracker = MagicMock(return_value=mock_tracker) + mock_config = _make_noop_evaluator_config() mock_runner = MagicMock() mock_runner.run = AsyncMock( return_value=RunnerResult( @@ -87,13 +97,16 @@ async def test_run_delegates_to_agent_runner(self): assert result.content == "Test response" assert result.metrics.success is True mock_config.create_tracker.assert_called_once() - mock_tracker.track_metrics_of_async.assert_called_once() + mock_config.create_tracker.return_value.track_metrics_of_async.assert_called_once() + # evaluations should be present (from noop evaluator) + if result.evaluations is not None: + await result.evaluations @pytest.mark.asyncio async def test_run_uses_create_tracker_for_fresh_tracker(self): """Should use create_tracker() factory for a fresh tracker per invocation.""" mock_config = MagicMock(spec=AIAgentConfig) - fresh_tracker = MagicMock() + fresh_tracker = MagicMock(spec=LDAIConfigTracker) fresh_tracker.track_metrics_of_async = AsyncMock( return_value=RunnerResult( content="Fresh tracker response", @@ -103,6 +116,7 @@ async def test_run_uses_create_tracker_for_fresh_tracker(self): ) fresh_tracker.get_summary = MagicMock(return_value=_make_summary(True)) mock_config.create_tracker = MagicMock(return_value=fresh_tracker) + mock_config.evaluator = Evaluator.noop() mock_runner = MagicMock() @@ -113,6 +127,8 @@ async def test_run_uses_create_tracker_for_fresh_tracker(self): assert result.content == "Fresh tracker response" mock_config.create_tracker.assert_called_once() fresh_tracker.track_metrics_of_async.assert_called_once() + if result.evaluations is not None: + await result.evaluations def test_get_agent_runner_returns_runner(self): """Should return the underlying AgentRunner.""" @@ -129,6 +145,168 @@ def test_get_config_returns_config(self): assert agent.get_config() is mock_config +class TestManagedAgentEvaluations: + """Tests for ManagedAgent evaluations chain (PR 12).""" + + @pytest.mark.asyncio + async def test_run_returns_before_evaluations_resolve(self): + """run() should return before evaluations complete.""" + barrier = asyncio.Event() + + async def _slow_evaluate(input_text: str, output_text: str) -> List[JudgeResult]: + await barrier.wait() + return [] + + mock_evaluator = MagicMock(spec=Evaluator) + mock_evaluator.evaluate = MagicMock( + side_effect=lambda i, o: asyncio.create_task(_slow_evaluate(i, o)) + ) + + mock_config = MagicMock(spec=AIAgentConfig) + mock_tracker = MagicMock(spec=LDAIConfigTracker) + mock_tracker.track_metrics_of_async = AsyncMock( + return_value=RunnerResult(content="resp", raw=None, metrics=LDAIMetrics(success=True)) + ) + mock_tracker.get_summary = MagicMock(return_value=_make_summary(True)) + mock_config.create_tracker = MagicMock(return_value=mock_tracker) + mock_config.evaluator = mock_evaluator + + mock_runner = MagicMock() + agent = ManagedAgent(mock_config, mock_runner) + result = await agent.run("Hello") + + assert result is not None + assert result.evaluations is not None + assert not result.evaluations.done(), "evaluations task should still be pending" + + barrier.set() + await result.evaluations + + @pytest.mark.asyncio + async def test_await_evaluations_collects_results(self): + """await result.evaluations should return the list of JudgeResult instances.""" + judge_result = JudgeResult( + judge_config_key='judge-key', + success=True, + sampled=True, + metric_key='$ld:ai:judge:relevance', + score=0.9, + reasoning='Good agent response', + ) + + async def _evaluate_coro(input_text: str, output_text: str) -> List[JudgeResult]: + return [judge_result] + + mock_evaluator = MagicMock(spec=Evaluator) + mock_evaluator.evaluate = MagicMock( + side_effect=lambda i, o: asyncio.create_task(_evaluate_coro(i, o)) + ) + + mock_config = MagicMock(spec=AIAgentConfig) + mock_tracker = MagicMock(spec=LDAIConfigTracker) + mock_tracker.track_metrics_of_async = AsyncMock( + return_value=RunnerResult(content="resp", raw=None, metrics=LDAIMetrics(success=True)) + ) + mock_tracker.get_summary = MagicMock(return_value=_make_summary(True)) + mock_tracker.track_judge_result = MagicMock() + mock_config.create_tracker = MagicMock(return_value=mock_tracker) + mock_config.evaluator = mock_evaluator + + mock_runner = MagicMock() + agent = ManagedAgent(mock_config, mock_runner) + result = await agent.run("Hello") + + results = await result.evaluations # type: ignore[misc] + assert results == [judge_result] + + @pytest.mark.asyncio + async def test_tracking_fires_inside_awaited_chain(self): + """tracker.track_judge_result() must be called when evaluations are awaited.""" + judge_result = JudgeResult( + judge_config_key='agent-judge', + success=True, + sampled=True, + metric_key='$ld:ai:judge:relevance', + score=0.85, + ) + + async def _evaluate_coro(input_text: str, output_text: str) -> List[JudgeResult]: + return [judge_result] + + mock_evaluator = MagicMock(spec=Evaluator) + mock_evaluator.evaluate = MagicMock( + side_effect=lambda i, o: asyncio.create_task(_evaluate_coro(i, o)) + ) + + mock_config = MagicMock(spec=AIAgentConfig) + mock_tracker = MagicMock(spec=LDAIConfigTracker) + mock_tracker.track_metrics_of_async = AsyncMock( + return_value=RunnerResult(content="resp", raw=None, metrics=LDAIMetrics(success=True)) + ) + mock_tracker.get_summary = MagicMock(return_value=_make_summary(True)) + mock_tracker.track_judge_result = MagicMock() + mock_config.create_tracker = MagicMock(return_value=mock_tracker) + mock_config.evaluator = mock_evaluator + + mock_runner = MagicMock() + agent = ManagedAgent(mock_config, mock_runner) + result = await agent.run("Hello") + + # Tracking should NOT have fired yet (before we await evaluations) + mock_tracker.track_judge_result.assert_not_called() + + # Now await the evaluations task — tracking fires inside the chain + await result.evaluations # type: ignore[misc] + + mock_tracker.track_judge_result.assert_called_once_with(judge_result) + + @pytest.mark.asyncio + async def test_noop_evaluator_returns_empty_list(self): + """With a noop evaluator, awaiting evaluations should return an empty list.""" + mock_config = _make_noop_evaluator_config() + mock_runner = MagicMock() + agent = ManagedAgent(mock_config, mock_runner) + result = await agent.run("Hello") + + results = await result.evaluations # type: ignore[misc] + assert results == [] + + @pytest.mark.asyncio + async def test_tracking_not_called_for_failed_judge_result(self): + """tracker.track_judge_result() should NOT be called for unsuccessful judge results.""" + failed_result = JudgeResult( + success=False, + sampled=True, + metric_key='$ld:ai:judge:relevance', + error_message='Judge evaluation failed', + ) + + async def _evaluate_coro(input_text: str, output_text: str) -> List[JudgeResult]: + return [failed_result] + + mock_evaluator = MagicMock(spec=Evaluator) + mock_evaluator.evaluate = MagicMock( + side_effect=lambda i, o: asyncio.create_task(_evaluate_coro(i, o)) + ) + + mock_config = MagicMock(spec=AIAgentConfig) + mock_tracker = MagicMock(spec=LDAIConfigTracker) + mock_tracker.track_metrics_of_async = AsyncMock( + return_value=RunnerResult(content="resp", raw=None, metrics=LDAIMetrics(success=True)) + ) + mock_tracker.get_summary = MagicMock(return_value=_make_summary(True)) + mock_tracker.track_judge_result = MagicMock() + mock_config.create_tracker = MagicMock(return_value=mock_tracker) + mock_config.evaluator = mock_evaluator + + mock_runner = MagicMock() + agent = ManagedAgent(mock_config, mock_runner) + result = await agent.run("Hello") + await result.evaluations # type: ignore[misc] + + mock_tracker.track_judge_result.assert_not_called() + + class TestLDAIClientCreateAgent: """Tests for LDAIClient.create_agent.""" From fb562e968333f627b85c6e606b2a42ada1e25efc Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 29 Apr 2026 08:23:22 -0500 Subject: [PATCH 23/24] fix: Isolate tracking failures and log failed judge evaluations in agent Mirror the managed_model.py fix in managed_agent.py: wrap tracker.track_judge_result() in try/except so a tracking failure does not destroy successfully computed evaluation results, and log a warning when a judge evaluation fails (r.success is False) so failures are visible rather than silently skipped. --- packages/sdk/server-ai/src/ldai/managed_agent.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/packages/sdk/server-ai/src/ldai/managed_agent.py b/packages/sdk/server-ai/src/ldai/managed_agent.py index b780f5ab..27e0826e 100644 --- a/packages/sdk/server-ai/src/ldai/managed_agent.py +++ b/packages/sdk/server-ai/src/ldai/managed_agent.py @@ -3,6 +3,7 @@ import asyncio from typing import List +from ldai import log from ldai.models import AIAgentConfig from ldai.providers.runner import Runner from ldai.providers.types import JudgeResult, ManagedResult @@ -65,7 +66,12 @@ async def _run_and_track(eval_task: asyncio.Task) -> List[JudgeResult]: results = await eval_task for r in results: if r.success: - tracker.track_judge_result(r) + try: + tracker.track_judge_result(r) + except Exception: + pass + else: + log.warning("Judge evaluation failed: %s", r.error_message) return results return asyncio.create_task(_run_and_track(evaluator_task)) From 9f9c880c506579155be0ae6443145cbe07793321 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 29 Apr 2026 08:59:53 -0500 Subject: [PATCH 24/24] fix: log warning when judge result tracking fails in ManagedAgent Co-Authored-By: Claude Sonnet 4.6 --- packages/sdk/server-ai/src/ldai/managed_agent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/managed_agent.py b/packages/sdk/server-ai/src/ldai/managed_agent.py index 27e0826e..02fff012 100644 --- a/packages/sdk/server-ai/src/ldai/managed_agent.py +++ b/packages/sdk/server-ai/src/ldai/managed_agent.py @@ -68,8 +68,8 @@ async def _run_and_track(eval_task: asyncio.Task) -> List[JudgeResult]: if r.success: try: tracker.track_judge_result(r) - except Exception: - pass + except Exception as exc: + log.warning("Judge evaluation failed: %s", exc) else: log.warning("Judge evaluation failed: %s", r.error_message) return results