From 024fd0190d57b01ea4d89f0aa28c9e647a33edf6 Mon Sep 17 00:00:00 2001 From: tfwang Date: Wed, 31 Dec 2025 10:37:36 +0800 Subject: [PATCH 1/4] Add Langchain quickstart guide, notebook demo --- ...ow_to_Create_an_AI_Agent_with_Langchain.md | 38 ++ .../langchain/langchain_quickstart.ipynb | 424 ++++++++++++++++++ 2 files changed, 462 insertions(+) create mode 100644 docs/en/solutions/How_to_Create_an_AI_Agent_with_Langchain.md create mode 100644 docs/public/langchain/langchain_quickstart.ipynb diff --git a/docs/en/solutions/How_to_Create_an_AI_Agent_with_Langchain.md b/docs/en/solutions/How_to_Create_an_AI_Agent_with_Langchain.md new file mode 100644 index 0000000..40999c9 --- /dev/null +++ b/docs/en/solutions/How_to_Create_an_AI_Agent_with_Langchain.md @@ -0,0 +1,38 @@ +--- +products: + - Alauda AI +kind: + - Solution +ProductsVersion: + - 4.x +--- + +# How To Create AI Agent with Langchain + +## Overview + +Langchain is a framework for developing applications powered by language models. It provides tools and abstractions for building AI agents that can interact with users, access external tools, and perform complex reasoning tasks. This guide provides a quickstart example for creating an AI Agent using Langchain. + +## Prerequisites + +- Access to a Notebook environment (e.g., Jupyter Notebook, JupyterLab, or similar) +- Python environment with Langchain dependencies installed + +## Quickstart + +A simple example of creating an AI Agent with Langchain is available here: [langchain_quickstart.ipynb](/langchain/langchain_quickstart.ipynb). Download and upload it to a Notebook environment to run. + +The notebook demonstrates: +- Environment setup and dependency installation +- Tool definition using the `@tool` decorator (weather query tool example) +- LLM model initialization and configuration +- Agent creation with tools and system prompts +- Agent execution and result handling +- FastAPI service deployment for production use + +## Additional Resources + +For more resources on developing AI Agents with Langchain, see: + +- [Langchain Documentation](https://docs.langchain.com/oss/python/langchain/overview) - The official Langchain documentation where all usage-related documentation can be found. +- [Langchain Academy](https://academy.langchain.com/) - The official Langchain Academy provides extensive educational resources. The [Foundation Introduction to Langchain Python](https://academy.langchain.com/courses/foundation-introduction-to-langchain-python) course introduces the fundamentals of developing Agents with Langchain. \ No newline at end of file diff --git a/docs/public/langchain/langchain_quickstart.ipynb b/docs/public/langchain/langchain_quickstart.ipynb new file mode 100644 index 0000000..def734e --- /dev/null +++ b/docs/public/langchain/langchain_quickstart.ipynb @@ -0,0 +1,424 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# LangChain Quick Start Demo\n", + "\n", + "This notebook demonstrates how to create a simple LangChain agent with tools.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Install Dependencies\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install \"langchain>=1.0.0\" \"langchain-openai>=1.0.0\" \"requests\" \"fastapi\" \"uvicorn\" --target ~/packages" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Import Libraries\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "from pathlib import Path\n", + "\n", + "user_site_packages = Path.home() / \"packages\"\n", + "if str(user_site_packages) not in sys.path:\n", + " sys.path.insert(0, str(user_site_packages))\n", + "\n", + "from langchain.agents import create_agent\n", + "from langchain_openai import ChatOpenAI\n", + "from langchain_core.tools import tool\n", + "from typing import Dict, Any\n", + "import requests\n", + "import os\n", + "\n", + "print(\"Libraries imported\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Set Configuration\n", + "\n", + "Set your API key and model configuration (optional, can also use environment variables):\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Set your API key by the OPENAI_API_KEY environment variable\n", + "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"\")\n", + "MODEL_NAME = os.getenv(\"MODEL_NAME\", \"deepseek-chat\")\n", + "BASE_URL = os.getenv(\"BASE_URL\", \"https://api.deepseek.com\")\n", + "\n", + "print(\"Environ configured\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Define a Tool\n", + "\n", + "Create a simple weather tool using the `@tool` decorator:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@tool\n", + "def get_weather(city: str) -> Dict[str, Any]:\n", + " \"\"\"Get current weather for a city.\n", + " \n", + " Args:\n", + " city: City name (e.g., Beijing, Shanghai, New York)\n", + " \n", + " Returns:\n", + " Dictionary with weather information\n", + " \"\"\"\n", + " try:\n", + " url = f\"https://wttr.in/{city}?format=j1\"\n", + " response = requests.get(url, timeout=30)\n", + " response.raise_for_status()\n", + " data = response.json()\n", + " \n", + " current = data[\"current_condition\"][0]\n", + " return {\n", + " \"city\": city,\n", + " \"temperature\": f\"{current['temp_C']}°C\",\n", + " \"humidity\": f\"{current['humidity']}%\",\n", + " }\n", + " except Exception as e:\n", + " return {\"error\": f\"Failed to get weather: {str(e)}\"}\n", + "\n", + "print(\"Defined get_weather tool\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Create LLM Model\n", + "\n", + "Initialize the language model (using DeepSeek in this example):\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "llm = ChatOpenAI(\n", + " model=MODEL_NAME,\n", + " base_url=BASE_URL if BASE_URL else None, # None means use OpenAI default\n", + " temperature=0\n", + ")\n", + "\n", + "\n", + "print(\"Created LLM instance\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. Create Agent\n", + "\n", + "Create an agent with the weather tool:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "agent = create_agent(\n", + " model=llm,\n", + " tools=[get_weather],\n", + " system_prompt=\"You are a helpful weather assistant.\"\n", + ")\n", + "\n", + "print(\"Created Agent\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 7. Run the Agent\n", + "\n", + "Invoke the agent with a query:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = await agent.ainvoke({\n", + " \"messages\": [\n", + " {\"role\": \"user\", \"content\": \"What's the weather in Beijing?\"}\n", + " ]\n", + "})\n", + "\n", + "print(\"Invoked\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 8. Display the Result\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def print_result(result: Dict[str, Any]):\n", + " if isinstance(result, dict) and \"messages\" in result:\n", + " last_message = result[\"messages\"][-1]\n", + " if hasattr(last_message, \"content\"):\n", + " print(last_message.content)\n", + " else:\n", + " print(result)\n", + " else:\n", + " print(result)\n", + "\n", + "print_result(result)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Try Different Queries\n", + "\n", + "You can try asking different questions:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Try another query\n", + "result2 = await agent.ainvoke({\n", + " \"messages\": [\n", + " {\"role\": \"user\", \"content\": \"What's the weather in Shanghai?\"}\n", + " ]\n", + "})\n", + "\n", + "print_result(result2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 9. FastAPI Service Example\n", + "\n", + "You can also run the agent as a FastAPI web service for production use:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import FastAPI components\n", + "from fastapi import FastAPI\n", + "from pydantic import BaseModel\n", + "from threading import Thread\n", + "import time\n", + "import requests\n", + "\n", + "# Create a simple FastAPI app\n", + "api_app = FastAPI(title=\"LangChain Agent API\")\n", + "\n", + "class ChatRequest(BaseModel):\n", + " message: str\n", + "\n", + "@api_app.post(\"/chat\")\n", + "async def chat(request: ChatRequest):\n", + " \"\"\" Chat endpoint \"\"\"\n", + " result = await agent.ainvoke({\n", + " \"messages\": [\n", + " {\"role\": \"user\", \"content\": request.message}\n", + " ]\n", + " })\n", + "\n", + " if isinstance(result, dict) and \"messages\" in result:\n", + " last_message = result[\"messages\"][-1]\n", + " if hasattr(last_message, \"content\"):\n", + " return {\"response\": last_message.content}\n", + " return {\"response\": str(result)}\n", + "\n", + "\n", + "print(\"FastAPI app created. Use the next cell to start the server.\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Start the FastAPI Server\n", + "\n", + "**Note**: In a notebook, you can start the server in a background thread. For production, run it as a separate process.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Start server in background thread (for notebook demonstration)\n", + "\n", + "from uvicorn import Config, Server\n", + "\n", + "# Create a server instance that can be controlled\n", + "config = Config(api_app, host=\"127.0.0.1\", port=8000, log_level=\"info\")\n", + "server = Server(config)\n", + "\n", + "def run_server():\n", + " server.run()\n", + "\n", + "# Use daemon=True so the thread stops automatically when the kernel restarts\n", + "# This is safe for notebook demonstrations\n", + "# For production, use process managers instead of threads\n", + "server_thread = Thread(target=run_server, daemon=True)\n", + "server_thread.start()\n", + "\n", + "# Wait a moment for the server to start\n", + "time.sleep(2)\n", + "print(\"✓ FastAPI server started at http://127.0.0.1:8000\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Test the API\n", + "\n", + "Now you can call the API using HTTP requests:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test the API endpoint\n", + "response = requests.post(\n", + " \"http://127.0.0.1:8000/chat\",\n", + " json={\"message\": \"What's the weather in Shanghai?\"},\n", + " timeout=30\n", + ")\n", + "\n", + "print(f\"Status Code: {response.status_code}\")\n", + "print(\"Response:\")\n", + "print(response.json().get('response'))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Stop the Server\n", + "\n", + "You can stop the server by calling its shutdown method:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Stop the server\n", + "if 'server' in globals() and server.started:\n", + " server.should_exit = True\n", + " print(\"✓ Server shutdown requested. It will stop after handling current requests.\")\n", + " print(\" Note: The server will also stop automatically when you restart the kernel.\")\n", + "else:\n", + " print(\"Server is not running or has already stopped.\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 10. More Resources\n", + "\n", + "For more resources on developing AI Agents with Langchain, see:\n", + "\n", + "- [Langchain Documentation](https://docs.langchain.com/oss/python/langchain/overview) - The official Langchain documentation where all usage-related documentation can be found.\n", + "- [Langchain Academy](https://academy.langchain.com/) - The official Langchain Academy provides extensive educational resources. The [Foundation Introduction to Langchain Python](https://academy.langchain.com/courses/foundation-introduction-to-langchain-python) course introduces the fundamentals of developing Agents with Langchain." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From eb3eae69880dd398866189aa680c196f2aee36ab Mon Sep 17 00:00:00 2001 From: tfwang Date: Thu, 15 Jan 2026 10:08:45 +0800 Subject: [PATCH 2/4] update --- docs/public/langchain/langchain_quickstart.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/public/langchain/langchain_quickstart.ipynb b/docs/public/langchain/langchain_quickstart.ipynb index def734e..a3eb625 100644 --- a/docs/public/langchain/langchain_quickstart.ipynb +++ b/docs/public/langchain/langchain_quickstart.ipynb @@ -224,7 +224,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Try Different Queries\n", + "### Try Different Queries\n", "\n", "You can try asking different questions:\n" ] From fb34056b92e5072d9d219cfa157783c39efda55b Mon Sep 17 00:00:00 2001 From: tfwang Date: Thu, 15 Jan 2026 17:13:11 +0800 Subject: [PATCH 3/4] update --- docs/public/langchain/langchain_quickstart.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/public/langchain/langchain_quickstart.ipynb b/docs/public/langchain/langchain_quickstart.ipynb index a3eb625..d6c2371 100644 --- a/docs/public/langchain/langchain_quickstart.ipynb +++ b/docs/public/langchain/langchain_quickstart.ipynb @@ -198,7 +198,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 8. Display the Result\n" + "### Display the Result\n" ] }, { @@ -249,7 +249,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 9. FastAPI Service Example\n", + "## 8. FastAPI Service Example\n", "\n", "You can also run the agent as a FastAPI web service for production use:\n" ] @@ -391,7 +391,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 10. More Resources\n", + "## 9. More Resources\n", "\n", "For more resources on developing AI Agents with Langchain, see:\n", "\n", From e019b0347327b2e0e8c82a6516dfdd1504c178f2 Mon Sep 17 00:00:00 2001 From: tfwang Date: Sat, 28 Feb 2026 17:22:33 +0800 Subject: [PATCH 4/4] update --- ...ow_to_Create_an_AI_Agent_with_Langchain.md | 6 +- .../langchain/langchain_quickstart.ipynb | 943 ++++++++++-------- 2 files changed, 525 insertions(+), 424 deletions(-) diff --git a/docs/en/solutions/How_to_Create_an_AI_Agent_with_Langchain.md b/docs/en/solutions/How_to_Create_an_AI_Agent_with_Langchain.md index 40999c9..65e1932 100644 --- a/docs/en/solutions/How_to_Create_an_AI_Agent_with_Langchain.md +++ b/docs/en/solutions/How_to_Create_an_AI_Agent_with_Langchain.md @@ -15,8 +15,8 @@ Langchain is a framework for developing applications powered by language models. ## Prerequisites -- Access to a Notebook environment (e.g., Jupyter Notebook, JupyterLab, or similar) -- Python environment with Langchain dependencies installed +- A Notebook environment (e.g., Jupyter Notebook, JupyterLab, or similar) +- Python 3 with pip to install Langchain and other dependencies ## Quickstart @@ -24,7 +24,7 @@ A simple example of creating an AI Agent with Langchain is available here: [lang The notebook demonstrates: - Environment setup and dependency installation -- Tool definition using the `@tool` decorator (weather query tool example) +- Tool definition (choose one): built-in with the `@tool` decorator, or external via MCP using `MultiServerMCPClient` from `langchain-mcp-adapters` (weather example for both) - LLM model initialization and configuration - Agent creation with tools and system prompts - Agent execution and result handling diff --git a/docs/public/langchain/langchain_quickstart.ipynb b/docs/public/langchain/langchain_quickstart.ipynb index d6c2371..09d929d 100644 --- a/docs/public/langchain/langchain_quickstart.ipynb +++ b/docs/public/langchain/langchain_quickstart.ipynb @@ -1,424 +1,525 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# LangChain Quick Start Demo\n", - "\n", - "This notebook demonstrates how to create a simple LangChain agent with tools.\n" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# LangChain Quick Start Demo\n", + "\n", + "This notebook demonstrates how to create a simple LangChain agent with tools.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Install Dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# If download is slow, add: -i https://pypi.tuna.tsinghua.edu.cn/simple\n", + "!pip install \"langchain>=1.0.0\" \"langchain-openai>=1.0.0\" \"requests\" \"fastapi\" \"uvicorn\" \"langchain-mcp-adapters\" \"fastmcp\" --target ~/packages" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Import Libraries\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "from pathlib import Path\n", + "\n", + "user_site_packages = Path.home() / \"packages\"\n", + "if str(user_site_packages) not in sys.path:\n", + " sys.path.insert(0, str(user_site_packages))\n", + "\n", + "from langchain.agents import create_agent\n", + "from langchain_openai import ChatOpenAI\n", + "from langchain_core.tools import tool\n", + "from typing import Dict, Any\n", + "import requests\n", + "import os\n", + "\n", + "print(\"Libraries imported\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Define a Tool\n", + "\n", + "Two equivalent ways to define tools (choose one). This example uses the same `get_weather(city)` for both:\n", + "\n", + "1. **Option A: Built-in Tool** — Define with the `@tool` decorator in LangChain, same process as the Agent.\n", + "2. **Option B: External MCP** — Provided by a separate MCP server. Use `MultiServerMCPClient` from the `langchain-mcp-adapters` package to connect and load tools.\n", + "\n", + "Run either Option A or Option B (not both); the variable `tools` is set in the option you choose and is used in section 6.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Option A: built-in @tool" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@tool\n", + "def get_weather(city: str) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get current weather for a city.\n", + " Args:\n", + " city: City name (e.g., Beijing, Shanghai, New York)\n", + " Returns:\n", + " Dict with weather information\n", + " \"\"\"\n", + " try:\n", + " url = f\"https://wttr.in/{city}?format=j1\"\n", + " response = requests.get(url, timeout=10)\n", + " response.raise_for_status()\n", + " data = response.json()\n", + " current = data[\"current_condition\"][0]\n", + " return {\n", + " \"city\": city,\n", + " \"temperature\": f\"{current['temp_C']}°C\",\n", + " \"humidity\": f\"{current['humidity']}%\",\n", + " }\n", + " except Exception as e:\n", + " return {\"error\": str(e)}\n", + "\n", + "tools = [get_weather]\n", + "print(\"Defined get_weather tool\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Option B: External MCP weather server\n", + "\n", + "Start the MCP server (or use an existing one), then connect and load tools with `MultiServerMCPClient` below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# MCP weather server: plain function run in subprocess (easy to maintain, no temp files)\n", + "import time\n", + "from multiprocessing import Process\n", + "from fastmcp import FastMCP\n", + "\n", + "\n", + "def run_mcp_weather_server():\n", + " \"\"\"Subprocess entry: MCP server equivalent to built-in get_weather, SSE on port 8001.\"\"\"\n", + " mcp = FastMCP(\"weather\")\n", + "\n", + " @mcp.tool()\n", + " def get_weather(city: str) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get current weather for a city.\n", + " Args:\n", + " city: City name (e.g., Beijing, Shanghai, New York)\n", + " Returns:\n", + " Dict with weather information\n", + " \"\"\"\n", + " try:\n", + " url = f\"https://wttr.in/{city}?format=j1\"\n", + " response = requests.get(url, timeout=10)\n", + " response.raise_for_status()\n", + " data = response.json()\n", + " current = data[\"current_condition\"][0]\n", + " return {\n", + " \"city\": city,\n", + " \"temperature\": f\"{current['temp_C']}°C\",\n", + " \"humidity\": f\"{current['humidity']}%\",\n", + " }\n", + " except Exception as e:\n", + " return {\"error\": str(e)}\n", + "\n", + " mcp.run(transport=\"sse\", host=\"127.0.0.1\", port=8001)\n", + "\n", + "\n", + "mcp_process = Process(target=run_mcp_weather_server, daemon=True)\n", + "mcp_process.start()\n", + "time.sleep(2) # wait for SSE server to be ready\n", + "print(\"MCP weather server started (SSE, port 8001). Stop via the Stop the Server cell.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to MCP server and load tools\n", + "from langchain_core.messages import ToolMessage\n", + "from langchain_mcp_adapters.client import MultiServerMCPClient\n", + "from langchain_mcp_adapters.interceptors import MCPToolCallRequest\n", + "\n", + "\n", + "async def _flatten_tool_content_to_string(request: MCPToolCallRequest, handler):\n", + " \"\"\"Force tool result content to a single string (some APIs reject list content).\"\"\"\n", + " result = await handler(request)\n", + " if not hasattr(result, \"content\"):\n", + " return result\n", + " content = result.content\n", + " if isinstance(content, str):\n", + " return result\n", + " if isinstance(content, list):\n", + " parts = []\n", + " for block in content:\n", + " if isinstance(block, dict) and block.get(\"type\") == \"text\":\n", + " parts.append(block.get(\"text\", \"\"))\n", + " elif hasattr(block, \"text\"):\n", + " parts.append(getattr(block, \"text\", \"\"))\n", + " content = \"\\n\".join(parts) if parts else str(content)\n", + " else:\n", + " content = str(content)\n", + " tool_call_id = getattr(request.runtime, \"tool_call_id\", \"\")\n", + " return ToolMessage(content=content, tool_call_id=tool_call_id)\n", + "\n", + "\n", + "client = MultiServerMCPClient(\n", + " {\n", + " \"weather\": {\n", + " \"transport\": \"sse\",\n", + " \"url\": \"http://127.0.0.1:8001/sse\",\n", + " }\n", + " },\n", + " tool_interceptors=[_flatten_tool_content_to_string],\n", + ")\n", + "tools = await client.get_tools()\n", + "print(f\"Loaded {len(tools)} MCP tool(s):\", [t.name for t in tools])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Create LLM Model\n", + "\n", + "Initialize the language model (using DeepSeek in this example):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# API key and endpoint (required for OpenAI-compatible APIs like DeepSeek).\n", + "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"\")\n", + "MODEL_NAME = os.getenv(\"MODEL_NAME\", \"deepseek-chat\")\n", + "BASE_URL = os.getenv(\"BASE_URL\", \"https://api.deepseek.com\")\n", + "# If you get APIConnectionError: check BASE_URL is reachable, proxy, and OPENAI_API_KEY.\n", + "\n", + "# Use ChatOpenAI with explicit base_url so you control the endpoint (avoids connection issues).\n", + "llm = ChatOpenAI(\n", + " model=MODEL_NAME,\n", + " base_url=BASE_URL if BASE_URL else None,\n", + " temperature=0,\n", + ")\n", + "\n", + "print(\"Created LLM instance\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Create Agent\n", + "\n", + "Create an agent with `tools` (from Option A or Option B above):\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "agent = create_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " system_prompt=\"You are a helpful weather assistant.\"\n", + ")\n", + "\n", + "print(\"Created Agent\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. Run the Agent\n", + "\n", + "Invoke the agent with a query:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = await agent.ainvoke({\n", + " \"messages\": [\n", + " {\"role\": \"user\", \"content\": \"What's the weather in Beijing?\"}\n", + " ]\n", + "})\n", + "\n", + "print(\"Invoked\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Display the Result\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def print_result(result: Dict[str, Any]):\n", + " if isinstance(result, dict) and \"messages\" in result:\n", + " last_message = result[\"messages\"][-1]\n", + " if hasattr(last_message, \"content\"):\n", + " print(last_message.content)\n", + " else:\n", + " print(result)\n", + " else:\n", + " print(result)\n", + "\n", + "print_result(result)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Try Different Queries\n", + "\n", + "You can try asking different questions:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Try another query\n", + "result2 = await agent.ainvoke({\n", + " \"messages\": [\n", + " {\"role\": \"user\", \"content\": \"What's the weather in Shanghai?\"}\n", + " ]\n", + "})\n", + "\n", + "print_result(result2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 7. FastAPI Service Example\n", + "\n", + "You can also run the agent as a FastAPI web service for production use:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import FastAPI components\n", + "from fastapi import FastAPI\n", + "from pydantic import BaseModel\n", + "from threading import Thread\n", + "import time\n", + "import requests\n", + "\n", + "# Create a simple FastAPI app\n", + "api_app = FastAPI(title=\"LangChain Agent API\")\n", + "\n", + "class ChatRequest(BaseModel):\n", + " message: str\n", + "\n", + "@api_app.post(\"/chat\")\n", + "async def chat(request: ChatRequest):\n", + " \"\"\" Chat endpoint \"\"\"\n", + " result = await agent.ainvoke({\n", + " \"messages\": [\n", + " {\"role\": \"user\", \"content\": request.message}\n", + " ]\n", + " })\n", + "\n", + " if isinstance(result, dict) and \"messages\" in result:\n", + " last_message = result[\"messages\"][-1]\n", + " if hasattr(last_message, \"content\"):\n", + " return {\"response\": last_message.content}\n", + " return {\"response\": str(result)}\n", + "\n", + "\n", + "print(\"FastAPI app created. Use the next cell to start the server.\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Start the FastAPI Server\n", + "\n", + "**Note**: In a notebook, you can start the server in a background thread. For production, run it as a separate process.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Start server in background thread (for notebook demonstration)\n", + "\n", + "from uvicorn import Config, Server\n", + "\n", + "# Create a server instance that can be controlled\n", + "config = Config(api_app, host=\"127.0.0.1\", port=8000, log_level=\"info\")\n", + "server = Server(config)\n", + "\n", + "def run_server():\n", + " server.run()\n", + "\n", + "# Use daemon=True so the thread stops automatically when the kernel restarts\n", + "# This is safe for notebook demonstrations\n", + "# For production, use process managers instead of threads\n", + "server_thread = Thread(target=run_server, daemon=True)\n", + "server_thread.start()\n", + "\n", + "# Wait a moment for the server to start\n", + "time.sleep(2)\n", + "print(\"✓ FastAPI server started at http://127.0.0.1:8000\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Test the API\n", + "\n", + "Now you can call the API using HTTP requests:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test the API endpoint\n", + "response = requests.post(\n", + " \"http://127.0.0.1:8000/chat\",\n", + " json={\"message\": \"What's the weather in Shanghai?\"},\n", + " timeout=30\n", + ")\n", + "\n", + "print(f\"Status Code: {response.status_code}\")\n", + "print(\"Response:\")\n", + "print(response.json().get('response'))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Stop the Server\n", + "\n", + "You can stop both the FastAPI server and the MCP weather server by running the cell below:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Stop the FastAPI server\n", + "if 'server' in globals() and server.started:\n", + " server.should_exit = True\n", + " print(\"✓ FastAPI server shutdown requested.\")\n", + "else:\n", + " print(\"FastAPI server is not running or has already stopped.\")\n", + "\n", + "# Stop the MCP weather server (terminate subprocess)\n", + "if 'mcp_process' in globals() and mcp_process.is_alive():\n", + " mcp_process.terminate()\n", + " mcp_process.join(timeout=3)\n", + " print(\"✓ MCP server (subprocess) terminated.\")\n", + "else:\n", + " print(\"MCP server is not running or has already stopped.\")\n", + "\n", + "print(\" Note: Both servers will also stop when you restart the kernel.\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 8. More Resources\n", + "\n", + "For more resources on developing AI Agents with Langchain, see:\n", + "\n", + "- [Langchain Documentation](https://docs.langchain.com/oss/python/langchain/overview) - The official Langchain documentation where all usage-related documentation can be found.\n", + "- [Langchain Academy](https://academy.langchain.com/) - The official Langchain Academy provides extensive educational resources. The [Foundation Introduction to Langchain Python](https://academy.langchain.com/courses/foundation-introduction-to-langchain-python) course introduces the fundamentals of developing Agents with Langchain." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.13" + } }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1. Install Dependencies\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install \"langchain>=1.0.0\" \"langchain-openai>=1.0.0\" \"requests\" \"fastapi\" \"uvicorn\" --target ~/packages" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. Import Libraries\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import sys\n", - "from pathlib import Path\n", - "\n", - "user_site_packages = Path.home() / \"packages\"\n", - "if str(user_site_packages) not in sys.path:\n", - " sys.path.insert(0, str(user_site_packages))\n", - "\n", - "from langchain.agents import create_agent\n", - "from langchain_openai import ChatOpenAI\n", - "from langchain_core.tools import tool\n", - "from typing import Dict, Any\n", - "import requests\n", - "import os\n", - "\n", - "print(\"Libraries imported\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. Set Configuration\n", - "\n", - "Set your API key and model configuration (optional, can also use environment variables):\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Set your API key by the OPENAI_API_KEY environment variable\n", - "os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\", \"\")\n", - "MODEL_NAME = os.getenv(\"MODEL_NAME\", \"deepseek-chat\")\n", - "BASE_URL = os.getenv(\"BASE_URL\", \"https://api.deepseek.com\")\n", - "\n", - "print(\"Environ configured\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4. Define a Tool\n", - "\n", - "Create a simple weather tool using the `@tool` decorator:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "@tool\n", - "def get_weather(city: str) -> Dict[str, Any]:\n", - " \"\"\"Get current weather for a city.\n", - " \n", - " Args:\n", - " city: City name (e.g., Beijing, Shanghai, New York)\n", - " \n", - " Returns:\n", - " Dictionary with weather information\n", - " \"\"\"\n", - " try:\n", - " url = f\"https://wttr.in/{city}?format=j1\"\n", - " response = requests.get(url, timeout=30)\n", - " response.raise_for_status()\n", - " data = response.json()\n", - " \n", - " current = data[\"current_condition\"][0]\n", - " return {\n", - " \"city\": city,\n", - " \"temperature\": f\"{current['temp_C']}°C\",\n", - " \"humidity\": f\"{current['humidity']}%\",\n", - " }\n", - " except Exception as e:\n", - " return {\"error\": f\"Failed to get weather: {str(e)}\"}\n", - "\n", - "print(\"Defined get_weather tool\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. Create LLM Model\n", - "\n", - "Initialize the language model (using DeepSeek in this example):\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "llm = ChatOpenAI(\n", - " model=MODEL_NAME,\n", - " base_url=BASE_URL if BASE_URL else None, # None means use OpenAI default\n", - " temperature=0\n", - ")\n", - "\n", - "\n", - "print(\"Created LLM instance\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 6. Create Agent\n", - "\n", - "Create an agent with the weather tool:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "agent = create_agent(\n", - " model=llm,\n", - " tools=[get_weather],\n", - " system_prompt=\"You are a helpful weather assistant.\"\n", - ")\n", - "\n", - "print(\"Created Agent\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 7. Run the Agent\n", - "\n", - "Invoke the agent with a query:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "result = await agent.ainvoke({\n", - " \"messages\": [\n", - " {\"role\": \"user\", \"content\": \"What's the weather in Beijing?\"}\n", - " ]\n", - "})\n", - "\n", - "print(\"Invoked\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Display the Result\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def print_result(result: Dict[str, Any]):\n", - " if isinstance(result, dict) and \"messages\" in result:\n", - " last_message = result[\"messages\"][-1]\n", - " if hasattr(last_message, \"content\"):\n", - " print(last_message.content)\n", - " else:\n", - " print(result)\n", - " else:\n", - " print(result)\n", - "\n", - "print_result(result)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Try Different Queries\n", - "\n", - "You can try asking different questions:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Try another query\n", - "result2 = await agent.ainvoke({\n", - " \"messages\": [\n", - " {\"role\": \"user\", \"content\": \"What's the weather in Shanghai?\"}\n", - " ]\n", - "})\n", - "\n", - "print_result(result2)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 8. FastAPI Service Example\n", - "\n", - "You can also run the agent as a FastAPI web service for production use:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Import FastAPI components\n", - "from fastapi import FastAPI\n", - "from pydantic import BaseModel\n", - "from threading import Thread\n", - "import time\n", - "import requests\n", - "\n", - "# Create a simple FastAPI app\n", - "api_app = FastAPI(title=\"LangChain Agent API\")\n", - "\n", - "class ChatRequest(BaseModel):\n", - " message: str\n", - "\n", - "@api_app.post(\"/chat\")\n", - "async def chat(request: ChatRequest):\n", - " \"\"\" Chat endpoint \"\"\"\n", - " result = await agent.ainvoke({\n", - " \"messages\": [\n", - " {\"role\": \"user\", \"content\": request.message}\n", - " ]\n", - " })\n", - "\n", - " if isinstance(result, dict) and \"messages\" in result:\n", - " last_message = result[\"messages\"][-1]\n", - " if hasattr(last_message, \"content\"):\n", - " return {\"response\": last_message.content}\n", - " return {\"response\": str(result)}\n", - "\n", - "\n", - "print(\"FastAPI app created. Use the next cell to start the server.\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Start the FastAPI Server\n", - "\n", - "**Note**: In a notebook, you can start the server in a background thread. For production, run it as a separate process.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Start server in background thread (for notebook demonstration)\n", - "\n", - "from uvicorn import Config, Server\n", - "\n", - "# Create a server instance that can be controlled\n", - "config = Config(api_app, host=\"127.0.0.1\", port=8000, log_level=\"info\")\n", - "server = Server(config)\n", - "\n", - "def run_server():\n", - " server.run()\n", - "\n", - "# Use daemon=True so the thread stops automatically when the kernel restarts\n", - "# This is safe for notebook demonstrations\n", - "# For production, use process managers instead of threads\n", - "server_thread = Thread(target=run_server, daemon=True)\n", - "server_thread.start()\n", - "\n", - "# Wait a moment for the server to start\n", - "time.sleep(2)\n", - "print(\"✓ FastAPI server started at http://127.0.0.1:8000\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Test the API\n", - "\n", - "Now you can call the API using HTTP requests:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Test the API endpoint\n", - "response = requests.post(\n", - " \"http://127.0.0.1:8000/chat\",\n", - " json={\"message\": \"What's the weather in Shanghai?\"},\n", - " timeout=30\n", - ")\n", - "\n", - "print(f\"Status Code: {response.status_code}\")\n", - "print(\"Response:\")\n", - "print(response.json().get('response'))\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Stop the Server\n", - "\n", - "You can stop the server by calling its shutdown method:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Stop the server\n", - "if 'server' in globals() and server.started:\n", - " server.should_exit = True\n", - " print(\"✓ Server shutdown requested. It will stop after handling current requests.\")\n", - " print(\" Note: The server will also stop automatically when you restart the kernel.\")\n", - "else:\n", - " print(\"Server is not running or has already stopped.\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 9. More Resources\n", - "\n", - "For more resources on developing AI Agents with Langchain, see:\n", - "\n", - "- [Langchain Documentation](https://docs.langchain.com/oss/python/langchain/overview) - The official Langchain documentation where all usage-related documentation can be found.\n", - "- [Langchain Academy](https://academy.langchain.com/) - The official Langchain Academy provides extensive educational resources. The [Foundation Introduction to Langchain Python](https://academy.langchain.com/courses/foundation-introduction-to-langchain-python) course introduces the fundamentals of developing Agents with Langchain." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.13" - } - }, - "nbformat": 4, - "nbformat_minor": 4 + "nbformat": 4, + "nbformat_minor": 4 }