Agentic AIPythonverifiedVerified
Tool Use Agent Pattern in Python
Augment an LLM with callable external tools — APIs, code interpreters, databases — so it can take actions and retrieve real-time information beyond its training data.
How to Implement the Tool Use Agent Pattern in Python
1Step 1: Define the tool and message types
from dataclasses import dataclass
from typing import Any, Callable, Awaitable
@dataclass
class ToolDefinition:
name: str
description: str
parameters: dict[str, dict[str, str]]
@dataclass
class ToolResult:
tool_name: str
output: str
is_error: bool
@dataclass
class AgentMessage:
role: str # "user" | "assistant" | "tool"
content: str
tool_name: str | None = None
ToolHandler = Callable[[dict[str, Any]], Awaitable[str]]2Step 2: Create the ToolRegistry for registration and execution
class ToolRegistry:
def __init__(self) -> None:
self._tools: dict[str, tuple[ToolDefinition, ToolHandler]] = {}
def register(self, definition: ToolDefinition, handler: ToolHandler) -> None:
self._tools[definition.name] = (definition, handler)
def get_definitions(self) -> list[ToolDefinition]:
return [defn for defn, _ in self._tools.values()]
async def execute(self, name: str, params: dict[str, Any]) -> ToolResult:
entry = self._tools.get(name)
if entry is None:
return ToolResult(name, f"Unknown tool: {name}", is_error=True)
try:
output = await entry[1](params)
return ToolResult(name, output, is_error=False)
except Exception as exc:
return ToolResult(name, str(exc), is_error=True)3Step 3: Implement the tool-use loop
async def tool_use_loop(
user_message: str,
registry: ToolRegistry,
llm: Callable[
[list[AgentMessage], list[ToolDefinition]],
Awaitable[dict[str, Any]],
],
) -> str:
messages: list[AgentMessage] = [AgentMessage("user", user_message)]
for _ in range(10):
response = await llm(messages, registry.get_definitions())
messages.append(AgentMessage("assistant", response["content"]))
tool_call = response.get("tool_call")
if tool_call is None:
return response["content"]
result = await registry.execute(tool_call["name"], tool_call["params"])
messages.append(AgentMessage("tool", result.output, result.tool_name))
return "Max iterations reached""""Structured Tool-Use Agent with schema validation and execution history."""
import asyncio
import logging
import re
import time
import uuid
from dataclasses import dataclass, field
from typing import Any, Callable, Awaitable
logger = logging.getLogger(__name__)
# [step] Define tool schema and message types
@dataclass(frozen=True)
class ToolParam:
type: str
description: str
enum: list[str] | None = None
@dataclass(frozen=True)
class ToolDefinition:
name: str
description: str
parameters: dict[str, ToolParam]
required: list[str]
def __post_init__(self) -> None:
if not re.match(r"^[a-z_][a-z0-9_]*$", self.name):
raise ValueError(f"Invalid tool name: {self.name}")
if len(self.description) < 10:
raise ValueError("Description must be at least 10 characters")
@dataclass(frozen=True)
class ToolCall:
id: str
tool_name: str
params: dict[str, Any]
@dataclass(frozen=True)
class ToolResult:
call_id: str
tool_name: str
output: Any
is_error: bool
duration_ms: float
@dataclass
class LLMMessage:
role: str # "system" | "user" | "assistant" | "tool"
content: str
tool_call_id: str | None = None
ToolHandler = Callable[[dict[str, Any]], Awaitable[Any]]
# [step] Implement the ToolUseAgent with validation and history
class ToolUseAgent:
def __init__(self, system_prompt: str) -> None:
self._tools: dict[str, tuple[ToolDefinition, ToolHandler]] = {}
self._history: list[LLMMessage] = [
LLMMessage("system", system_prompt)
]
def register_tool(
self, definition: ToolDefinition, handler: ToolHandler
) -> "ToolUseAgent":
self._tools[definition.name] = (definition, handler)
return self
async def run(
self,
user_message: str,
*,
cancel_event: asyncio.Event | None = None,
) -> str:
self._history.append(LLMMessage("user", user_message))
for _ in range(15):
if cancel_event and cancel_event.is_set():
raise asyncio.CancelledError("Agent aborted")
definitions = [d for d, _ in self._tools.values()]
response = await self._call_llm(self._history, definitions)
if response.get("tool_call") is None:
self._history.append(
LLMMessage("assistant", response["content"])
)
return response["content"]
tc = response["tool_call"]
tool_call = ToolCall(
id=str(uuid.uuid4()), tool_name=tc["tool_name"], params=tc["params"]
)
self._history.append(
LLMMessage("assistant", f"Using tool: {tool_call.tool_name}",
tool_call.id)
)
result = await self._execute_tool(tool_call)
content = f"Error: {result.output}" if result.is_error else str(result.output)
self._history.append(
LLMMessage("tool", content, result.call_id)
)
raise RuntimeError("Exceeded max tool-use iterations")
async def _execute_tool(self, call: ToolCall) -> ToolResult:
entry = self._tools.get(call.tool_name)
start = time.monotonic()
if entry is None:
return ToolResult(call.id, call.tool_name,
f'Tool "{call.tool_name}" not registered',
True, 0.0)
definition, handler = entry
# Validate required parameters
for key in definition.required:
if key not in call.params:
return ToolResult(
call.id, call.tool_name,
f'Missing required parameter: "{key}"',
True, (time.monotonic() - start) * 1000,
)
try:
output = await handler(call.params)
return ToolResult(call.id, call.tool_name, output, False,
(time.monotonic() - start) * 1000)
except Exception as exc:
return ToolResult(call.id, call.tool_name, str(exc), True,
(time.monotonic() - start) * 1000)
async def _call_llm(
self, _messages: list[LLMMessage], _tools: list[ToolDefinition]
) -> dict[str, Any]:
# Replace with actual LLM API call
return {"content": "Mock LLM response"}Tool Use Agent Pattern Architecture
hourglass_empty
Rendering diagram...
lightbulb
Tool Use Agent Pattern in the Real World
“A lawyer (the LLM) in a courtroom knows the law but needs a paralegal team (the tools) to pull case files, run searches, and retrieve exhibits. The lawyer directs which file to fetch, the paralegal returns it, and the lawyer integrates that information into their argument — the lawyer's intelligence is amplified by the support staff's ability to reach into the real world.”