Agentic AIPythonverifiedVerified
ReAct Agent Pattern in Python
Interleaves chain-of-thought Reasoning with Action execution, enabling LLMs to dynamically plan, act, and observe in a loop.
How to Implement the ReAct Agent Pattern in Python
1Step 1: Define the Tool and AgentStep data structures
from dataclasses import dataclass, field
from typing import Callable, Awaitable
@dataclass
class Tool:
name: str
description: str
execute: Callable[[str], Awaitable[str]]
@dataclass
class AgentStep:
thought: str
action: str
action_input: str
observation: str2Step 2: Implement the ReAct reasoning loop
MAX_STEPS = 10
async def react_loop(
query: str,
tools: list[Tool],
llm: Callable[[str], Awaitable[str]],
) -> str:
steps: list[AgentStep] = []
for _ in range(MAX_STEPS):
prompt = build_prompt(query, tools, steps)
response = await llm(prompt)
thought, action, action_input, is_final, final_answer = parse_response(response)
if is_final:
return final_answer
tool = next((t for t in tools if t.name == action), None)
if tool is None:
raise ValueError(f"Unknown tool: {action}")
observation = await tool.execute(action_input)
steps.append(AgentStep(thought, action, action_input, observation))
return "Max steps reached without final answer."3Step 3: Build the prompt and parse LLM responses
def build_prompt(query: str, tools: list[Tool], steps: list[AgentStep]) -> str:
tool_names = ", ".join(t.name for t in tools)
return f"Query: {query}\nTools: {tool_names}"
def parse_response(response: str) -> tuple[str, str, str, bool, str]:
return ("", "", "", False, "")"""ReAct Agent with structured tool calling, step history, and abort support."""
import asyncio
import logging
import time
from dataclasses import dataclass, field
from typing import Any, Callable, Awaitable, Protocol
logger = logging.getLogger(__name__)
# [step] Define tool and agent data structures
@dataclass(frozen=True)
class ToolResult:
success: bool
data: str
metadata: dict[str, Any] = field(default_factory=dict)
class Tool(Protocol):
name: str
description: str
parameters: dict[str, str]
async def execute(self, params: dict[str, str]) -> ToolResult: ...
@dataclass
class AgentStep:
thought: str
action: str
action_input: dict[str, str]
observation: ToolResult
timestamp: float = field(default_factory=time.time)
@dataclass(frozen=True)
class AgentConfig:
max_steps: int
model: str
temperature: float
tools: list[Tool]
system_prompt: str
@dataclass(frozen=True)
class AgentResult:
answer: str
steps: list[AgentStep]
total_tokens: int
duration_ms: float
# [step] Implement the ReAct agent class with full lifecycle
class ReActAgent:
def __init__(self, config: AgentConfig) -> None:
self._config = config
self._steps: list[AgentStep] = []
async def run(
self, query: str, *, cancel_event: asyncio.Event | None = None
) -> AgentResult:
start = time.monotonic()
self._steps = []
total_tokens = 0
for i in range(self._config.max_steps):
if cancel_event and cancel_event.is_set():
raise asyncio.CancelledError("Agent execution aborted")
messages = self._build_messages(query)
response = await self._call_llm(messages)
total_tokens += response["tokens"]
if response["is_final_answer"]:
return AgentResult(
answer=response["final_answer"],
steps=list(self._steps),
total_tokens=total_tokens,
duration_ms=(time.monotonic() - start) * 1000,
)
# Find and execute tool
action = response["action"]
tool = next(
(t for t in self._config.tools if t.name == action), None
)
if tool is None:
available = ", ".join(t.name for t in self._config.tools)
raise ValueError(
f'Tool "{action}" not found. Available: {available}'
)
observation = await tool.execute(response["action_input"])
self._steps.append(
AgentStep(
thought=response["thought"],
action=action,
action_input=response["action_input"],
observation=observation,
)
)
return AgentResult(
answer="Reached maximum steps without a final answer.",
steps=list(self._steps),
total_tokens=total_tokens,
duration_ms=(time.monotonic() - start) * 1000,
)
def _build_messages(self, query: str) -> dict[str, Any]:
return {
"system": self._config.system_prompt,
"query": query,
"history": self._steps,
}
async def _call_llm(self, _messages: Any) -> dict[str, Any]:
# Replace with actual LLM API call
return {
"thought": "",
"action": "",
"action_input": {},
"is_final_answer": True,
"final_answer": "Mock response",
"tokens": 150,
}ReAct Agent Pattern Architecture
hourglass_empty
Rendering diagram...
lightbulb
ReAct Agent Pattern in the Real World
“Like a detective investigating a case: they form a hypothesis (Thought), gather evidence by interviewing witnesses or examining clues (Action), analyze what they found (Observation), and then refine their theory. They keep investigating until they solve the case.”