Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/models/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,8 @@ You can integrate other LLM providers with these built-in paths:
2. [`ModelProvider`][agents.models.interface.ModelProvider] is at the `Runner.run` level. This lets you say "use a custom model provider for all agents in this run". See a configurable example in [examples/model_providers/custom_example_provider.py](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/custom_example_provider.py).
3. [`Agent.model`][agents.agent.Agent.model] lets you specify the model on a specific Agent instance. This enables you to mix and match different providers for different agents. See a configurable example in [examples/model_providers/custom_example_agent.py](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/custom_example_agent.py).

If you want one concrete OpenAI-compatible provider example, see [examples/model_providers/qianfan_provider.py](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/qianfan_provider.py), which shows the same built-in pattern against Baidu Qianfan with `AsyncOpenAI`, `set_default_openai_client`, and the Chat Completions path.

In cases where you do not have an API key from `platform.openai.com`, we recommend disabling tracing via `set_tracing_disabled()`, or setting up a [different tracing processor](../tracing.md).

``` python
Expand Down
8 changes: 8 additions & 0 deletions examples/model_providers/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,11 @@ Direct-model examples let you override the target model:
uv run examples/model_providers/any_llm_provider.py --model openrouter/openai/gpt-5.4-mini
uv run examples/model_providers/litellm_provider.py --model openrouter/openai/gpt-5.4-mini
```

For a built-in OpenAI-compatible provider example using Baidu Qianfan:

```bash
export QIANFAN_API_KEY="..."
export QIANFAN_MODEL="ernie-5.0"
uv run examples/model_providers/qianfan_provider.py
```
72 changes: 72 additions & 0 deletions examples/model_providers/qianfan_provider.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
from __future__ import annotations

import asyncio
import os
from dataclasses import dataclass

from openai import AsyncOpenAI

from agents import (
Agent,
Runner,
function_tool,
set_default_openai_api,
set_default_openai_client,
set_tracing_disabled,
)

DEFAULT_QIANFAN_BASE_URL = "https://qianfan.baidubce.com/v2"
DEFAULT_QIANFAN_MODEL = "ernie-5.0"


@dataclass(frozen=True)
class QianfanExampleSettings:
base_url: str
api_key: str
model_name: str


def load_settings() -> QianfanExampleSettings:
return QianfanExampleSettings(
base_url=os.getenv("QIANFAN_BASE_URL", DEFAULT_QIANFAN_BASE_URL),
api_key=os.getenv("QIANFAN_API_KEY", "dummy"),
model_name=os.getenv("QIANFAN_MODEL", DEFAULT_QIANFAN_MODEL),
)


def configure_client(settings: QianfanExampleSettings) -> None:
client = AsyncOpenAI(base_url=settings.base_url, api_key=settings.api_key)
set_default_openai_client(client=client, use_for_tracing=False)
set_default_openai_api("chat_completions")
set_tracing_disabled(disabled=True)


@function_tool
def get_weather(city: str) -> str:
print(f"[debug] getting weather for {city}")
return f"The weather in {city} is sunny."


async def main(settings: QianfanExampleSettings | None = None) -> str | None:
settings = settings or load_settings()
if settings.api_key == "dummy":
message = "Skipping run because no valid QIANFAN_API_KEY was provided."
print(message)
return message

configure_client(settings)

agent = Agent(
name="Assistant",
instructions="You only respond in haikus.",
model=settings.model_name,
tools=[get_weather],
)

result = await Runner.run(agent, "What's the weather in Beijing?")
print(result.final_output)
return str(result.final_output)


if __name__ == "__main__":
asyncio.run(main())
30 changes: 30 additions & 0 deletions tests/test_qianfan_model_provider_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
from __future__ import annotations

import importlib

import pytest

qianfan_provider = importlib.import_module("examples.model_providers.qianfan_provider")


def test_load_settings_uses_qianfan_defaults(monkeypatch) -> None:
monkeypatch.delenv("QIANFAN_BASE_URL", raising=False)
monkeypatch.delenv("QIANFAN_API_KEY", raising=False)
monkeypatch.delenv("QIANFAN_MODEL", raising=False)

settings = qianfan_provider.load_settings()

assert settings.base_url == "https://qianfan.baidubce.com/v2"
assert settings.api_key == "dummy"
assert settings.model_name == "ernie-5.0"


@pytest.mark.asyncio
async def test_main_skips_without_api_key(monkeypatch) -> None:
monkeypatch.delenv("QIANFAN_API_KEY", raising=False)
monkeypatch.delenv("QIANFAN_BASE_URL", raising=False)
monkeypatch.delenv("QIANFAN_MODEL", raising=False)

message = await qianfan_provider.main()

assert message == "Skipping run because no valid QIANFAN_API_KEY was provided."
7 changes: 7 additions & 0 deletions tests/test_run_examples_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,13 @@ def test_artifact_dir_for_example_uses_tmp_safe_stem(tmp_path: Path) -> None:
assert artifact_dir == tmp_path / "examples__sandbox__tutorials__vision_website_clone__main"


def test_discover_examples_finds_qianfan_provider() -> None:
examples = run_examples.discover_examples(["qianfan_provider.py"])
relpaths = {example.relpath for example in examples}

assert "examples/model_providers/qianfan_provider.py" in relpaths


def test_prepare_redis_for_example_uses_existing_local_redis(monkeypatch) -> None:
env: dict[str, str] = {}
monkeypatch.setattr(run_examples, "redis_ping_url", lambda url, timeout=0.5: True)
Expand Down