LangGraph 完整教學:從基礎到進階的 Agent 構建指南 什麼是 LangGraph? LangGraph 是一個用於構建可控 Agent 的低級編排框架。相比於 LangChain 提供的整合和可組合元件,LangGraph 專注於代理編排,提供可定製的架構、長期記憶體和人機協同功能,以可靠地處理複雜任務。 核心優勢 可靠性和可控性:通過審核檢查和人機協同審批來指導代理行動 低級且可擴展:使用完全描述性的低級基元構建自定義代理 一流的流媒體支援:支援逐個令牌流式處理和中間步驟流式處理 快速開始 安裝 pip install -U langgraph "langchain[anthropic]" 第一個簡單 Agent from langgraph.prebuilt import create_react_agent # 定義一個模擬的天氣資料抓取工具 def get_weather(city: str) -> str: """Get weather for a given city.""" return f"It's always sunny in {city}!" # 創建 Agent agent = create_react_agent( model="anthropic:claude-3-5-sonnet-latest", tools=[get_weather], prompt="You are a helpful assistant" ) # 執行 Agent response = agent.invoke({ "messages": [{"role": "user", "content": "what is the weather in sf"}] }) 核心概念與進階功能 1. 動態提示配置 LangGraph 支援靜態和動態兩種提示配置: 靜態提示: agent = create_react_agent( model="anthropic:claude-3-5-sonnet-latest", tools=[get_weather], prompt="Never answer questions about the weather." ) 動態提示: from langchain_core.messages import AnyMessage from langchain_core.runnables import RunnableConfig from langgraph.prebuilt.chat_agent_executor import AgentState def prompt(state: AgentState, config: RunnableConfig) -> list[AnyMessage]: user_name = config["configurable"].get("user_name") system_msg = f"You are a helpful assistant. Address the user as {user_name}." return [{"role": "system", "content": system_msg}] + state["messages"] agent = create_react_agent( model="anthropic:claude-3-5-sonnet-latest", tools=[get_weather], prompt=prompt ) 2. 多輪對話與記憶體 要實現多輪對話,需要配置檢查點程式(checkpointer): from langgraph.checkpoint.memory import MemorySaver checkpointer = MemorySaver() agent = create_react_agent( model="anthropic:claude-3-5-sonnet-latest", tools=[get_weather], checkpointer=checkpointer ) config = {"configurable": {"thread_id": "1"}} # 第一次對話 sf_response = agent.invoke( {"messages": [{"role": "user", "content": "what is the weather in sf"}]}, config ) # 第二次對話(會記住之前的內容) ny_response = agent.invoke( {"messages": [{"role": "user", "content": "what about new york?"}]}, config ) 3. 結構化回應 使用 response_format 參數生成符合特定架構的結構化回應: from pydantic import BaseModel class WeatherResponse(BaseModel): conditions: str agent = create_react_agent( model="anthropic:claude-3-5-sonnet-latest", tools=[get_weather], response_format=WeatherResponse ) response = agent.invoke({ "messages": [{"role": "user", "content": "what is the weather in sf"}] }) # 取得結構化回應 structured_data = response["structured_response"] 自訂 Agent 架構 基本圖形構建 from typing import Annotated from typing_extensions import TypedDict from langgraph.graph import StateGraph, START, END from langgraph.graph.message import add_messages class State(TypedDict): messages: Annotated[list, add_messages] # 建立圖形構建器 graph_builder = StateGraph(State) # 定義聊天機器人節點 def chatbot(state: State): return {"messages": [llm_with_tools.invoke(state["messages"])]} graph_builder.add_node("chatbot", chatbot) 添加工具節點 from langchain_core.messages import ToolMessage import json class BasicToolNode: """執行工具呼叫的節點""" def __init__(self, tools: list) -> None: self.tools_by_name = {tool.name: tool for tool in tools} def __call__(self, inputs: dict): if messages := inputs.get("messages", []): message = messages[-1] else: raise ValueError("No message found in input") outputs = [] for tool_call in message.tool_calls: tool_result = self.tools_by_name[tool_call["name"]].invoke( tool_call["args"] ) outputs.append( ToolMessage( content=json.dumps(tool_result), name=tool_call["name"], tool_call_id=tool_call["id"], ) ) return {"messages": outputs} tool_node = BasicToolNode(tools=[tool]) graph_builder.add_node("tools", tool_node) 條件邊(Conditional Edges) def route_tools(state: State): """根據工具呼叫情況決定下一步""" if isinstance(state, list): ai_message = state[-1] elif messages := state.get("messages", []): ai_message = messages[-1] else: raise ValueError(f"No messages found in input state: {state}") if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0: return "tools" return END # 添加條件邊 graph_builder.add_conditional_edges( "chatbot", route_tools, {"tools": "tools", END: END}, ) graph_builder.add_edge("tools", "chatbot") graph_builder.add_edge(START, "chatbot") # 編譯圖形 graph = graph_builder.compile() 進階功能 1. 持久化記憶體 同步版本 - SQLite Checkpointer from langgraph.checkpoint.memory import MemorySaver memory = MemorySaver() graph = graph_builder.compile(checkpointer=memory) 非同步版本 - AsyncSqliteSaver import asyncio from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver async def main(): db_path = "./production_chatbot_async.db" async with AsyncSqliteSaver.from_conn_string(db_path) as checkpointer: graph = graph_builder.compile(checkpointer=checkpointer) config = {"configurable": {"thread_id": "user_456"}} result = await graph.ainvoke( {"messages": [{"role": "user", "content": "Hi, I'm Bob"}]}, config=config ) print(result["messages"][-1].content) if __name__ == "__main__": asyncio.run(main()) 2. 人機協同控制(Human-in-the-Loop) from langchain_core.tools import tool from langgraph.types import Command, interrupt @tool def human_assistance(query: str) -> str: """Request assistance from a human.""" human_response = interrupt({"query": query}) return human_response["data"] # 設定工具 tools = [tool, human_assistance] llm_with_tools = llm.bind_tools(tools) def chatbot(state: State): message = llm_with_tools.invoke(state["messages"]) # 禁用並行工具調用以避免重複 assert len(message.tool_calls) <= 1 return {"messages": [message]} # 編譯圖形 memory = MemorySaver() graph = graph_builder.compile(checkpointer=memory) # 執行並處理中斷 config = {"configurable": {"thread_id": "1"}} # 開始對話 events = graph.stream( {"messages": [{"role": "user", "content": "I need expert guidance"}]}, config, stream_mode="values", ) # 當執行暫停時,提供人類回應 human_response = "We recommend using LangGraph for building reliable agents." human_command = Command(resume={"data": human_response}) # 恢復執行 events = graph.stream(human_command, config, stream_mode="values") 3. 自訂狀態欄位 from langchain_core.tools import InjectedToolCallId class State(TypedDict): messages: Annotated[list, add_messages] name: str birthday: str @tool def human_assistance( name: str, birthday: str, tool_call_id: Annotated[str, InjectedToolCallId] ) -> str: """Request human verification of information.""" human_response = interrupt({ "question": "Is this correct?", "name": name, "birthday": birthday, }) if human_response.get("correct", "").lower().startswith("y"): verified_name = name verified_birthday = birthday response = "Correct" else: verified_name = human_response.get("name", name) verified_birthday = human_response.get("birthday", birthday) response = f"Corrected: {human_response}" # 使用 Command 更新狀態 state_update = { "name": verified_name, "birthday": verified_birthday, "messages": [ToolMessage(response, tool_call_id=tool_call_id)], } return Command(update=state_update) 4. 時間旅行功能 LangGraph 的檢查點機制支援「時間旅行」,讓您可以回到對話的任何之前狀態: # 查看狀態歷史 for state in graph.get_state_history(config): print(f"訊息數量: {len(state.values['messages'])}") print(f"檢查點ID: {state.config.get('configurable', {}).get('checkpoint_id')}") # 從特定檢查點恢復 specific_config = { "configurable": { "thread_id": "1", "checkpoint_id": "特定的檢查點ID" } } # 從該時間點繼續執行 events = graph.stream(None, specific_config, stream_mode="values") 實際應用案例 完整的搜尋助手 from langchain_tavily import TavilySearch # 設定搜尋工具 tool = TavilySearch(max_results=2) tools = [tool] # 建立具有記憶體的搜尋助手 memory = MemorySaver() agent = create_react_agent( model="anthropic:claude-3-5-sonnet-latest", tools=tools, checkpointer=memory ) def stream_graph_updates(user_input: str): config = {"configurable": {"thread_id": "search_session"}} for event in agent.stream( {"messages": [{"role": "user", "content": user_input}]}, config ): for value in event.values(): print("Assistant:", value["messages"][-1].content) # 互動式對話循環 while True: try: user_input = input("User: ") if user_input.lower() in ["quit", "exit", "q"]: print("Goodbye!") break stream_graph_updates(user_input) except KeyboardInterrupt: break 視覺化與除錯 from IPython.display import Image, display # 視覺化圖形結構 try: display(Image(graph.get_graph().draw_mermaid_png())) except Exception: print("需要安裝額外的依賴來顯示圖形") 最佳實踐 記憶體管理:始終為需要多輪對話的應用程式配置檢查點程式 錯誤處理:在工具節點中添加適當的錯誤處理機制 狀態設計:仔細設計狀態結構,包含應用程式所需的所有資訊 人機協同:在關鍵決策點添加人類干預機制 測試與調試:使用 LangGraph Studio 進行視覺化調試 生態系統整合 LangSmith:提供代理評估和可觀察性 LangGraph 平台:專門為長時間運行的有狀態工作流構建的部署平台 LangGraph Studio:視覺化 IDE,支援工作流的交互和調試 總結 LangGraph 提供了一個強大而靈活的框架來構建複雜的 AI 代理系統。通過其狀態管理、檢查點機制和人機協同功能,您可以構建可靠、可控的智能應用程式。從簡單的聊天機器人到複雜的多代理系統,LangGraph 都能提供所需的工具和抽象。 開始您的 LangGraph 之旅,探索 AI 代理的無限可能!