!pip install -q openai httpx langchain-openai langgraph langchain-coreBuilding a ReAct Agent from Scratch
Implementing the Reason-Act loop with tool calling, observation parsing, and stopping conditions in LangGraph and LlamaIndex
Table of Contents
1. Setup & Installation
import os
# os.environ["OPENAI_API_KEY"] = "your-api-key-here" # Uncomment and set2. Define Tools
Tools are Python functions with clear docstrings that the LLM will use.
import json
import math
import httpx
def search_wikipedia(query: str) -> str:
"""Search Wikipedia for a query and return the first paragraph of the result."""
url = "https://en.wikipedia.org/w/api.php"
params = {
"action": "query", "list": "search",
"srsearch": query, "format": "json", "srlimit": 1,
}
resp = httpx.get(url, params=params, timeout=10)
results = resp.json().get("query", {}).get("search", [])
if not results:
return "No results found."
page_id = results[0]["pageid"]
extract_resp = httpx.get(url, params={
"action": "query", "prop": "extracts", "exintro": True,
"explaintext": True, "pageids": page_id, "format": "json",
}, timeout=10)
pages = extract_resp.json().get("query", {}).get("pages", {})
return pages.get(str(page_id), {}).get("extract", "No extract available.")
def calculator(expression: str) -> str:
"""Evaluate a mathematical expression. Supports +, -, *, /, **, sqrt(), abs()."""
try:
result = eval(expression, {"__builtins__": {}}, {"sqrt": math.sqrt, "abs": abs})
return str(result)
except Exception as e:
return f"Error: {e}"
def get_current_weather(city: str) -> str:
"""Get the current weather for a city."""
weather_data = {
"paris": "15°C, partly cloudy",
"london": "12°C, rainy",
"tokyo": "22°C, sunny",
"new york": "18°C, clear",
}
return weather_data.get(city.lower(), f"Weather data not available for {city}")
TOOLS = {
"search_wikipedia": search_wikipedia,
"calculator": calculator,
"get_current_weather": get_current_weather,
}
# Quick test
print(search_wikipedia("Python programming language")[:200])
print(calculator("25 * 4"))3. ReAct System Prompt
The prompt defines the Thought-Action-Observation format and available tools.
def build_react_prompt(tools: dict) -> str:
tool_descriptions = "\n".join(
f"- {name}: {func.__doc__}" for name, func in tools.items()
)
return f"""You are a helpful assistant that answers questions by reasoning
step-by-step and using tools when needed.
## Available Tools
{tool_descriptions}
## Output Format
Always use this exact format:
Thought: <your reasoning about what to do next>
Action: <tool_name>
Action Input: <input string for the tool>
After receiving a tool result, it will appear as:
Observation: <tool output>
Continue the Thought/Action/Observation cycle until you have enough
information. Then respond with:
Thought: I now have enough information to answer.
Answer: <your final answer>
## Rules
- ALWAYS start with a Thought.
- Use exactly ONE tool per Action step.
- If a tool returns an error, reason about it and try a different approach.
- Never make up information — use tools to verify facts.
- Stop after at most 8 reasoning steps.
"""
print(build_react_prompt(TOOLS))4. Text-Parsed ReAct Agent Loop
The raw ReAct loop: send conversation to LLM, parse output, execute tools, append observation.
from openai import OpenAI
import re
client = OpenAI()
def parse_react_output(text: str) -> dict:
"""Parse LLM output into thought, action, action_input, or answer."""
answer_match = re.search(r"Answer:\s*(.+)", text, re.DOTALL)
if answer_match:
return {"type": "answer", "content": answer_match.group(1).strip()}
action_match = re.search(r"Action:\s*(\w+)", text)
input_match = re.search(r"Action Input:\s*(.+?)(?:\n|$)", text)
if action_match and input_match:
return {
"type": "action",
"tool": action_match.group(1).strip(),
"input": input_match.group(1).strip(),
}
return {"type": "continue", "content": text}
def run_react_agent(
query: str,
tools: dict = TOOLS,
model: str = "gpt-4o-mini",
max_steps: int = 8,
verbose: bool = True,
) -> str:
"""Run a ReAct agent loop until it produces a final answer."""
system_prompt = build_react_prompt(tools)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": query},
]
for step in range(max_steps):
response = client.chat.completions.create(
model=model, messages=messages, temperature=0, max_tokens=1024,
)
assistant_msg = response.choices[0].message.content.strip()
messages.append({"role": "assistant", "content": assistant_msg})
if verbose:
print(f"\n--- Step {step + 1} ---")
print(assistant_msg)
parsed = parse_react_output(assistant_msg)
if parsed["type"] == "answer":
return parsed["content"]
if parsed["type"] == "action":
tool_name = parsed["tool"]
tool_input = parsed["input"]
if tool_name not in tools:
observation = f"Error: Unknown tool '{tool_name}'. Available: {list(tools.keys())}"
else:
try:
observation = tools[tool_name](tool_input)
except Exception as e:
observation = f"Error executing {tool_name}: {e}"
messages.append({"role": "user", "content": f"Observation: {observation}"})
if verbose:
print(f"Observation: {observation[:200]}...")
return "Agent reached maximum steps without producing a final answer."# Run the text-parsed ReAct agent
answer = run_react_agent("What is the population of the capital of France?")
print(f"\n🎯 Final Answer: {answer}")5. ReAct with OpenAI Function Calling
Native function calling eliminates parsing errors — the model returns structured tool_calls objects.
TOOL_SCHEMAS = [
{
"type": "function",
"function": {
"name": "search_wikipedia",
"description": "Search Wikipedia for a query and return the first paragraph.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query"}
},
"required": ["query"],
},
},
},
{
"type": "function",
"function": {
"name": "calculator",
"description": "Evaluate a mathematical expression.",
"parameters": {
"type": "object",
"properties": {
"expression": {"type": "string", "description": "Math expression to evaluate"}
},
"required": ["expression"],
},
},
},
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get current weather for a city.",
"parameters": {
"type": "object",
"properties": {
"city": {"type": "string", "description": "City name"}
},
"required": ["city"],
},
},
},
]
def run_function_calling_agent(
query: str,
tools: dict = TOOLS,
tool_schemas: list = TOOL_SCHEMAS,
model: str = "gpt-4o-mini",
max_steps: int = 8,
verbose: bool = True,
) -> str:
"""ReAct agent using OpenAI's native function calling."""
messages = [
{"role": "system", "content": "You are a helpful assistant. Use tools to answer questions accurately."},
{"role": "user", "content": query},
]
for step in range(max_steps):
response = client.chat.completions.create(
model=model, messages=messages,
tools=tool_schemas, tool_choice="auto", temperature=0,
)
msg = response.choices[0].message
messages.append(msg)
if not msg.tool_calls:
if verbose:
print(f"\n--- Step {step + 1}: Final Answer ---")
print(msg.content)
return msg.content
for tool_call in msg.tool_calls:
name = tool_call.function.name
args = json.loads(tool_call.function.arguments)
if verbose:
print(f"\n--- Step {step + 1}: Tool Call ---")
print(f" Tool: {name}, Args: {args}")
result = tools[name](**args) if name in tools else f"Error: Unknown tool '{name}'"
if verbose:
print(f" Result: {result[:200]}...")
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": str(result),
})
return "Agent reached maximum steps."# Run the function-calling agent
answer = run_function_calling_agent("What is the population of Tokyo divided by 3?")
print(f"\n🎯 Final Answer: {answer}")Text Parsing vs. Function Calling
| Aspect | Text Parsing | Native Function Calling |
|---|---|---|
| Reliability | Fragile — regex can break | Robust — structured JSON |
| Model compatibility | Any LLM | Requires function-calling support |
| Parallel tool calls | One per step | Multiple in one step |
| Transparency | Explicit Thought: traces |
Thoughts may be hidden |
6. ReAct Agent with LangGraph
Build the ReAct loop as a state graph — nodes are processing steps, edges define flow.
from typing import TypedDict, Annotated
from langgraph.graph import StateGraph, END, START
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode
from langchain_openai import ChatOpenAI
from langchain_core.tools import tool
from langchain_core.messages import AIMessage
# --- State ---
class AgentState(TypedDict):
messages: Annotated[list, add_messages]
# --- Tools ---
@tool
def search_wikipedia_lc(query: str) -> str:
"""Search Wikipedia and return the first paragraph."""
return search_wikipedia(query) # Reuse our earlier function
@tool
def calculator_lc(expression: str) -> str:
"""Evaluate a math expression. Supports +, -, *, /, **."""
return calculator(expression)
tools = [search_wikipedia_lc, calculator_lc]
# --- LLM with tools ---
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
llm_with_tools = llm.bind_tools(tools)
# --- Agent node ---
def agent_node(state: AgentState) -> dict:
response = llm_with_tools.invoke(state["messages"])
return {"messages": [response]}
# --- Routing ---
def should_continue(state: AgentState) -> str:
last_message = state["messages"][-1]
if isinstance(last_message, AIMessage) and last_message.tool_calls:
return "tools"
return END
# --- Build graph ---
graph = StateGraph(AgentState)
graph.add_node("agent", agent_node)
graph.add_node("tools", ToolNode(tools))
graph.add_edge(START, "agent")
graph.add_conditional_edges("agent", should_continue, {"tools": "tools", END: END})
graph.add_edge("tools", "agent")
app = graph.compile()
print("✅ LangGraph ReAct agent compiled")# Run the LangGraph agent
result = app.invoke({
"messages": [{"role": "user", "content": "What is 25 * 4?"}]
})
for msg in result["messages"]:
role = msg.type
content = msg.content if msg.content else "[tool_calls]"
print(f"{role}: {content[:200]}")7. Quick Start: create_react_agent
For the common ReAct pattern, LangGraph provides a prebuilt one-liner.
from langgraph.prebuilt import create_react_agent
agent = create_react_agent(
model=llm,
tools=[search_wikipedia_lc, calculator_lc],
prompt="You are a research assistant. Always verify facts with tools before answering.",
)
result = agent.invoke({
"messages": [{"role": "user", "content": "What is the population of Tokyo divided by 3?"}]
})
for msg in result["messages"]:
print(f"{msg.type}: {msg.content[:200] if msg.content else '[tool_calls]'}")