Building Agents with LangGraph

State machines, conditional routing, cycles, and human-in-the-loop checkpoints for retrieval workflows

Open In Colab

πŸ“– Read the full article


Table of Contents

  1. Setup & Installation
  2. Core Concepts: State, Nodes, Edges
  3. Quick Start: create_react_agent
  4. Building the Graph Manually
  5. Checkpointing & Persistence
  6. Human-in-the-Loop
  7. Streaming

1. Setup & Installation

!pip install -q langchain-openai langgraph langchain-core httpx
import os
# os.environ["OPENAI_API_KEY"] = "your-api-key"

2. Core Concepts: State, Nodes, Edges

Every LangGraph app starts with a state definition β€” a TypedDict declaring what data flows through the graph.

from typing import TypedDict, Annotated
from langgraph.graph import StateGraph, MessagesState
from langgraph.graph.message import add_messages
from langchain_openai import ChatOpenAI


# Custom state with reducer
class AgentState(TypedDict):
    messages: Annotated[list, add_messages]  # Append-only message list
    iteration_count: int                      # Overwrite on each update


# Or use the built-in MessagesState
# Equivalent to: class MessagesState(TypedDict): messages: Annotated[list, add_messages]

print("State definitions ready")
print("- Annotated[list, add_messages] β†’ new messages APPENDED")
print("- Plain int β†’ new values OVERWRITE")
from langgraph.graph import END, START
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)


# --- Node: a function that takes state and returns partial update ---
def agent_node(state: AgentState) -> dict:
    """Call the LLM with the current messages."""
    response = llm.invoke(state["messages"])
    return {"messages": [response]}


# --- Edges: normal, conditional, entry ---
# Normal: graph.add_edge("tools", "agent")
# Conditional: graph.add_conditional_edges("agent", routing_fn, {"tools": "tools", END: END})
# Entry: graph.add_edge(START, "agent")

print("Core concepts: State β†’ Nodes β†’ Edges β†’ Compile β†’ Run")

3. Quick Start: create_react_agent

For the common ReAct pattern, LangGraph provides a prebuilt one-liner.

from langgraph.prebuilt import create_react_agent
from langchain_core.tools import tool
import httpx
import math


@tool
def search_wikipedia(query: str) -> str:
    """Search Wikipedia and return the first paragraph."""
    url = "https://en.wikipedia.org/w/api.php"
    params = {"action": "query", "list": "search", "srsearch": query, "format": "json", "srlimit": 1}
    resp = httpx.get(url, params=params, timeout=10)
    results = resp.json().get("query", {}).get("search", [])
    if not results:
        return "No results found."
    page_id = results[0]["pageid"]
    extract_resp = httpx.get(url, params={
        "action": "query", "prop": "extracts", "exintro": True,
        "explaintext": True, "pageids": page_id, "format": "json",
    }, timeout=10)
    pages = extract_resp.json().get("query", {}).get("pages", {})
    return pages.get(str(page_id), {}).get("extract", "No extract available.")


@tool
def calculator(expression: str) -> str:
    """Evaluate a mathematical expression. Supports +, -, *, /, **."""
    try:
        result = eval(expression, {"__builtins__": {}}, {"sqrt": math.sqrt, "abs": abs})
        return str(result)
    except Exception as e:
        return f"Error: {e}"


# Create agent in one line
agent = create_react_agent(
    model=llm,
    tools=[search_wikipedia, calculator],
    prompt="You are a research assistant. Always verify facts with tools before answering.",
)

result = agent.invoke({
    "messages": [{"role": "user", "content": "What is the population of Tokyo divided by 3?"}]
})

for msg in result["messages"]:
    print(f"{msg.type}: {msg.content[:200] if msg.content else '[tool_calls]'}")

4. Building the Graph Manually

For full control over the agent’s control flow.

from langgraph.prebuilt import ToolNode
from langchain_core.messages import AIMessage


class AgentState(TypedDict):
    messages: Annotated[list, add_messages]


tools = [search_wikipedia, calculator]
llm_with_tools = llm.bind_tools(tools)


def agent_node(state: AgentState) -> dict:
    response = llm_with_tools.invoke(state["messages"])
    return {"messages": [response]}


def should_continue(state: AgentState) -> str:
    last_message = state["messages"][-1]
    if isinstance(last_message, AIMessage) and last_message.tool_calls:
        return "tools"
    return END


# Build the graph
graph = StateGraph(AgentState)
graph.add_node("agent", agent_node)
graph.add_node("tools", ToolNode(tools))

graph.add_edge(START, "agent")
graph.add_conditional_edges("agent", should_continue, {"tools": "tools", END: END})
graph.add_edge("tools", "agent")

app = graph.compile()

result = app.invoke({
    "messages": [{"role": "user", "content": "What's 15 * 7?"}]
})

for msg in result["messages"]:
    print(f"{msg.type}: {msg.content if msg.content else '[tool_calls]'}")

5. Checkpointing & Persistence

Checkpointers save state after every node β€” enabling resume-from-failure, multi-turn conversations, and time travel.

from langgraph.checkpoint.memory import MemorySaver

checkpointer = MemorySaver()
app = graph.compile(checkpointer=checkpointer)

# Multi-turn conversation with thread_id
config = {"configurable": {"thread_id": "session-42"}}

result1 = app.invoke(
    {"messages": [{"role": "user", "content": "What is the population of Tokyo?"}]},
    config=config,
)
print("Turn 1:", result1["messages"][-1].content[:200])

result2 = app.invoke(
    {"messages": [{"role": "user", "content": "How does that compare to Osaka?"}]},
    config=config,
)
print("Turn 2:", result2["messages"][-1].content[:200])
# Inspect state history
for snapshot in app.get_state_history(config):
    print(f"Step: {snapshot.metadata.get('step', '?')}, "
          f"Node: {snapshot.metadata.get('source', '?')}, "
          f"Messages: {len(snapshot.values.get('messages', []))}")

6. Human-in-the-Loop

Pause before the tools node to let a human review proposed tool calls.

# Compile with interrupt before tools
app_hitl = graph.compile(
    checkpointer=MemorySaver(),
    interrupt_before=["tools"],
)

config = {"configurable": {"thread_id": "review-session"}}

# Step 1: Run until the interrupt
result = app_hitl.invoke(
    {"messages": [{"role": "user", "content": "What is 25 * 4?"}]},
    config=config,
)

# Inspect proposed tool calls
state = app_hitl.get_state(config)
last_message = state.values["messages"][-1]
print("Proposed tool calls:")
for tc in last_message.tool_calls:
    print(f"  {tc['name']}({tc['args']})")

# Step 2: Approve and resume (pass None to continue)
result = app_hitl.invoke(None, config=config)
print(f"\nFinal: {result['messages'][-1].content}")

7. Streaming

Stream each reasoning step as it happens.

app_stream = graph.compile()

for event in app_stream.stream(
    {"messages": [{"role": "user", "content": "What is 100 / 4?"}]}
):
    for key, value in event.items():
        print(f"--- {key} ---")
        if "messages" in value:
            for msg in value["messages"]:
                content = msg.content if msg.content else "[tool_calls]"
                print(f"  {msg.type}: {content[:150]}")