Created
December 11, 2025 12:43
-
-
Save hsm207/9bc07bbcaeb1638f557e59ba39f9e886 to your computer and use it in GitHub Desktop.
Custom LangGraph architecture for strict message history and summary-free tool execution
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """ | |
| LangGraph Solution for "Inconsistent history when using tool that returns Command with goto=E | |
| and graph=Command.PARENT" | |
| This script provides a LangGraph solution for scenarios where fine-grained control over | |
| agent state and message history is required, particularly to prevent agents from | |
| unintentionally summarizing tool outputs. | |
| Problem Addressed: | |
| The original problem (linked below) highlighted issues with LangGraph's message history | |
| when an agent's tool returns a `Command` object, leading to a `BadRequestError` in | |
| subsequent LLM calls due to a corrupted message sequence. It also tackled the agent's | |
| tendency to summarize tool outputs, even when direct tool results were desired. | |
| Core Solution: | |
| This solution demonstrates a fully custom LangGraph architecture, avoiding high-level | |
| abstractions like `langchain.agents.create_agent` for components that require precise | |
| state management. Instead, it uses: | |
| - Custom LLM caller nodes that *only* invoke the LLM and return its raw AIMessage. | |
| - Explicit `ToolNode`s for tool execution. | |
| - Detailed conditional routing functions to ensure the graph's flow aligns precisely | |
| with the desired state changes, preventing implicit summarization and maintaining | |
| a valid message history. | |
| Original Problem Context (LangChain Forum): | |
| https://forum.langchain.com/t/inconsistent-history-when-using-tool-that-returns-command-with | |
| -goto-end-and-graph-command-parent/2450 | |
| """ | |
| import os | |
| from typing import Annotated | |
| from langchain_core.messages import AIMessage, AnyMessage, HumanMessage, ToolMessage | |
| from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder | |
| from langchain_core.runnables import RunnableConfig | |
| from langchain_core.tools import tool | |
| from langgraph.checkpoint.memory import InMemorySaver | |
| from langgraph.graph import END, START, StateGraph | |
| from langgraph.graph.message import add_messages | |
| from langgraph.prebuilt import ToolNode | |
| from langchain_openai import AzureChatOpenAI | |
| # ============================================================================== | |
| # 0. Model Initialization | |
| # | |
| # ============================================================================== | |
| model = ... # define your own model here | |
| # ============================================================================== | |
| # 1. Agent State Definition | |
| # (Crucial for LangGraph's state management) | |
| # ============================================================================== | |
| class AgentState(dict): | |
| """ | |
| Represents the state of our graph. | |
| 'messages' is a list of chat messages, with 'add_messages' as a reducer | |
| to ensure messages are correctly appended. | |
| """ | |
| messages: Annotated[list[AnyMessage], add_messages] | |
| # ============================================================================== | |
| # 2. Tool Definitions | |
| # (The functionalities our graph can call) | |
| # ============================================================================== | |
| @tool | |
| def weather_tool(city: str) -> str: | |
| """ | |
| Returns what's the weather in the specified city. | |
| This is a simple, pure tool that just returns its output string. | |
| """ | |
| return f"It's sunny in {city}" | |
| @tool | |
| def handoff_tool(agent_name: str) -> str: | |
| """ | |
| Simulates a handoff by returning a message. The graph will route based | |
| on which tool was called, rather than using a Command object. | |
| """ | |
| return f"Control handed off to {agent_name}" | |
| # ============================================================================== | |
| # 3. Custom LLM Caller Node Definitions | |
| # (These nodes ONLY call the LLM and return its raw AIMessage output) | |
| # ============================================================================== | |
| def create_llm_caller_node(system_prompt: str, tools_list: list) -> callable: | |
| """ | |
| A factory that creates a graph node function. This node will: | |
| 1. Apply a system prompt. | |
| 2. Bind a list of tools to the LLM. | |
| 3. Invoke the LLM with the current message history. | |
| 4. Return the raw AIMessage from the LLM. | |
| This ensures no implicit tool execution or summarization happens within the node. | |
| """ | |
| prompt_template = ChatPromptTemplate.from_messages( | |
| [ | |
| ("system", system_prompt), | |
| MessagesPlaceholder(variable_name="messages"), | |
| ] | |
| ) | |
| llm_runnable = prompt_template | model.bind_tools(tools_list) | |
| def llm_caller_node(state: AgentState) -> dict: | |
| ai_message = llm_runnable.invoke({"messages": state['messages']}) | |
| return {"messages": [ai_message]} | |
| return llm_caller_node | |
| # Supervisor LLM Caller: Decides whether to hand off to the weather agent. | |
| # Its only tool is the handoff_tool. | |
| supervisor_llm_caller = create_llm_caller_node( | |
| system_prompt="You are a supervisor. Your primary task is to identify weather-related questions " | |
| "and hand them off to the 'weather_agent' using the 'handoff_tool'. " | |
| "For other questions, respond directly.", | |
| tools_list=[handoff_tool] | |
| ) | |
| # Weather LLM Caller: Decides whether to use the weather_tool. | |
| # Its only tool is the weather_tool. | |
| weather_llm_caller = create_llm_caller_node( | |
| system_prompt="You are a weather agent. You MUST use the 'weather_tool' to answer any questions about the weather. " | |
| "Do not answer from memory. Your only job is to call the 'weather_tool' for weather questions.", | |
| tools_list=[weather_tool] | |
| ) | |
| # ============================================================================== | |
| # 4. Conditional Routing Logic | |
| # (Functions that guide the graph's flow based on state) | |
| # ============================================================================== | |
| def router_after_llm_call(state: AgentState) -> str: | |
| """ | |
| Routes after an LLM caller node has run. If the LLM's output contains | |
| tool calls, it routes to the appropriate tool executor. Otherwise, it ends. | |
| """ | |
| last_message = state["messages"][-1] | |
| if isinstance(last_message, AIMessage) and last_message.tool_calls: | |
| tool_name = last_message.tool_calls[0]['name'] | |
| if tool_name == handoff_tool.name: | |
| return "supervisor_tools" | |
| elif tool_name == weather_tool.name: | |
| return "weather_tools" | |
| return END | |
| def router_after_supervisor_tools(state: AgentState) -> str: | |
| """ | |
| Routes after the supervisor's tools have run. In this graph, a successful | |
| handoff tool call always transitions to the weather agent. | |
| """ | |
| last_message = state["messages"][-1] | |
| if isinstance(last_message, ToolMessage) and last_message.name == handoff_tool.name: | |
| return "weather_llm_caller" | |
| return END # End if handoff failed or was not the last action. | |
| def router_after_weather_tools(state: AgentState) -> str: | |
| """ | |
| Routes after the weather agent's tools have run. This always ends the graph | |
| to prevent summarization, achieving the OP's primary goal. | |
| """ | |
| return END | |
| # ============================================================================== | |
| # 5. Graph Definition and Wiring | |
| # ============================================================================== | |
| def main(): | |
| print("==================================================") | |
| print("== LangGraph Solution for Problem 01 ==") | |
| print("== (Preventing Agent Summarization in a Fully Custom Graph) ==") | |
| print("==================================================") | |
| print("\nThis script demonstrates a robust LangGraph solution for:") | |
| print(" - Preventing agents from summarizing tool output.") | |
| print(" - Maintaining a valid message history throughout the graph.") | |
| print(" - Implementing a fully custom, explicit graph flow.\n") | |
| graph = StateGraph(state_schema=AgentState) | |
| # Add all nodes to the graph | |
| graph.add_node("supervisor_llm_caller", supervisor_llm_caller) | |
| graph.add_node("supervisor_tools", ToolNode([handoff_tool])) | |
| graph.add_node("weather_llm_caller", weather_llm_caller) | |
| graph.add_node("weather_tools", ToolNode([weather_tool])) | |
| # --- Define Graph Edges and Flow --- | |
| graph.add_edge(START, "supervisor_llm_caller") | |
| graph.add_conditional_edges( | |
| "supervisor_llm_caller", | |
| router_after_llm_call, | |
| {"supervisor_tools": "supervisor_tools", END: END} | |
| ) | |
| graph.add_conditional_edges( | |
| "supervisor_tools", | |
| router_after_supervisor_tools, | |
| {"weather_llm_caller": "weather_llm_caller", END: END} | |
| ) | |
| graph.add_conditional_edges( | |
| "weather_llm_caller", | |
| router_after_llm_call, | |
| {"weather_tools": "weather_tools", END: END} | |
| ) | |
| graph.add_conditional_edges( | |
| "weather_tools", | |
| router_after_weather_tools, | |
| {END: END} | |
| ) | |
| # Compile the graph | |
| compiled_graph = graph.compile(checkpointer=InMemorySaver(), name="solution_graph") | |
| # ============================================================================== | |
| # 6. Execution and Verification | |
| # ============================================================================== | |
| print("\n--- Scenario 1: Weather in Sydney (direct tool call expected) ---") | |
| thread_config_1 = RunnableConfig({"configurable": {"thread_id": "thread_sydney_final"}}) | |
| output_sydney = compiled_graph.invoke( | |
| {"messages": [HumanMessage(content="What's the weather in Sydney?")]}, | |
| config=thread_config_1, | |
| ) | |
| print("\n--- History after Sydney invocation (VALID and summary-free) ---") | |
| for msg in output_sydney["messages"]: | |
| msg.pretty_print() | |
| print("\n--- Scenario 2: Weather in Paris (another direct tool call) ---") | |
| thread_config_2 = RunnableConfig({"configurable": {"thread_id": "thread_paris_final"}}) | |
| output_paris = compiled_graph.invoke( | |
| {"messages": [HumanMessage(content="What's the weather in Paris?")]}, | |
| config=thread_config_2, | |
| ) | |
| print("\n--- History after Paris invocation (VALID and summary-free) ---") | |
| for msg in output_paris["messages"]: | |
| msg.pretty_print() | |
| print("\n--- Scenario 3: Simple greeting (no tool call expected from supervisor) ---") | |
| thread_config_3 = RunnableConfig({"configurable": {"thread_id": "thread_greeting_final"}}) | |
| output_greeting = compiled_graph.invoke( | |
| {"messages": [HumanMessage(content="Hello there!")]}, | |
| config=thread_config_3, | |
| ) | |
| print("\n--- History after greeting invocation (VALID and direct response) ---") | |
| for msg in output_greeting["messages"]: | |
| msg.pretty_print() | |
| print("\nSUCCESS! The fully custom solution graph completed all scenarios with correct behavior.") | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment