Select frameworks to compare
Pick one or more frameworks from the bar above
Tool Call
OpenAI
import json
from openai import OpenAI
LLM_MODEL = "gpt-5.4"
client = OpenAI()
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
print(f"-> call: get_weather({city})")
result = f"The weather in {city} is 72°F and sunny."
print(f"-> result: {result}")
return result
tools = [{
"type": "function",
"name": "get_weather",
"description": "Get the current weather for a city.",
"parameters": {
"type": "object",
"properties": {
"city": {"type": "string"},
},
"required": ["city"],
},
}]
input = [{
"role": "user",
"content": "What's the weather in Paris?",
}]
# step 1: LLM decides to call the tool
response = client.responses.create(
model=LLM_MODEL, input=input, tools=tools,
)
tool_call = next(i for i in response.output if i.type == "function_call")
result = get_weather(**json.loads(tool_call.arguments))
# step 2: send tool result back, LLM generates final response
input += response.output
input.append({
"type": "function_call_output",
"call_id": tool_call.call_id,
"output": result,
})
response = client.responses.create(
model=LLM_MODEL, input=input, tools=tools,
)
print(response.output_text)
# -> call: get_weather(Paris)
# -> result: The weather in Paris is 72°F and sunny.
# "It's 72°F and sunny in Paris."
Anthropic
import anthropic
LLM_MODEL = "claude-opus-4-6"
client = anthropic.Anthropic()
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
print(f"-> call: get_weather({city})")
result = f"The weather in {city} is 72°F and sunny."
print(f"-> result: {result}")
return result
tools = [{
"name": "get_weather",
"description": "Get the current weather for a city.",
"input_schema": {
"type": "object",
"properties": {
"city": {"type": "string"},
},
"required": ["city"],
},
}]
messages = [{"role": "user", "content": "What's the weather in Paris?"}]
# step 1: LLM decides to call the tool
response = client.messages.create(
model=LLM_MODEL, max_tokens=1024, tools=tools, messages=messages,
)
# block.input is already a dict — no json.loads needed
tool_block = next(b for b in response.content if b.type == "tool_use")
result = get_weather(**tool_block.input)
# step 2: send tool result back, LLM generates final response
messages.append({"role": "assistant", "content": response.content})
messages.append({"role": "user", "content": [{
"type": "tool_result",
"tool_use_id": tool_block.id,
"content": result,
}]})
response = client.messages.create(
model=LLM_MODEL, max_tokens=1024, tools=tools, messages=messages,
)
print(response.content[0].text)
# -> call: get_weather(Paris)
# -> result: The weather in Paris is 72°F and sunny.
# "It's 72°F and sunny in Paris."
Gemini
from google import genai
from google.genai import types
LLM_MODEL = "gemini-pro-latest"
client = genai.Client()
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
print(f"-> call: get_weather({city})")
result = f"The weather in {city} is 72°F and sunny."
print(f"-> result: {result}")
return result
# automatic function calling: SDK executes the tool and feeds results back
config = types.GenerateContentConfig(tools=[get_weather])
response = client.models.generate_content(
model=LLM_MODEL,
config=config,
contents="What's the weather in Paris?",
)
print(response.text)
# -> call: get_weather(Paris)
# -> result: The weather in Paris is 72°F and sunny.
# "It's 72°F and sunny in Paris."
Pydantic AI
from pydantic_ai import Agent
LLM_MODEL = "openai:gpt-5.4"
agent = Agent(LLM_MODEL)
# @agent.tool_plain registers a function the LLM can call
# docstring → tool description, type hints → input schema
@agent.tool_plain
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
print(f"-> call: get_weather({city})")
result = f"The weather in {city} is 72°F and sunny."
print(f"-> result: {result}")
return result
result = agent.run_sync("What's the weather in Paris?")
print(result.output)
# -> call: get_weather(Paris)
# -> result: The weather in Paris is 72°F and sunny.
# "It's 72°F and sunny in Paris."
LangGraph
from langchain.tools import tool
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI
LLM_MODEL = "gpt-5.4"
model = ChatOpenAI(model=LLM_MODEL)
# docstring → tool description, type hints → input schema
@tool
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
print(f"-> call: get_weather({city})")
result = f"The weather in {city} is 72°F and sunny."
print(f"-> result: {result}")
return result
# the agent runs the ReAct loop: LLM → tool call → LLM → response
agent = create_agent(model, [get_weather])
result = agent.invoke({
"messages": [("user", "What's the weather in Paris?")]
})
print(result["messages"][-1].content)
# -> call: get_weather(Paris)
# -> result: The weather in Paris is 72°F and sunny.
# "It's 72°F and sunny in Paris."
AI SDK
import { ToolLoopAgent, tool } from "ai";
import { openai } from "@ai-sdk/openai";
import { z } from "zod";
const LLM_MODEL = "gpt-5.4";
// tool() creates a typed tool with Zod inputSchema
const getWeather = tool({
description: "Get the current weather for a city",
inputSchema: z.object({ city: z.string() }),
execute: async ({ city }) => {
console.log(`-> call: getWeather(${city})`);
const result = `The weather in ${city} is 72°F and sunny.`;
console.log(`-> result: ${result}`);
return result;
},
});
// ToolLoopAgent loops automatically — no step count needed for single tool
const agent = new ToolLoopAgent({
model: openai(LLM_MODEL),
tools: { getWeather },
});
const result = await agent.generate({
prompt: "What's the weather in Paris?",
});
console.log(result.text);
// -> call: getWeather(Paris)
// -> result: The weather in Paris is 72°F and sunny.
// "It's 72°F and sunny in Paris."
Mastra
import { Agent } from "@mastra/core/agent";
import { createTool } from "@mastra/core/tools";
import { z } from "zod";
// model specified as "provider/model-name" string
const LLM_MODEL = "openai/gpt-5.4";
// inputSchema defines typed parameters the LLM must provide
const getWeather = createTool({
id: "get-weather",
description: "Get the current weather for a city",
inputSchema: z.object({ city: z.string() }),
execute: async ({ city }) => {
console.log(`-> call: getWeather(${city})`);
const result = `The weather in ${city} is 72°F and sunny.`;
console.log(`-> result: ${result}`);
return result;
},
});
const agent = new Agent({
name: "weather-agent",
instructions: "You are a helpful assistant.",
model: LLM_MODEL,
tools: { getWeather },
});
const result = await agent.generate("What's the weather in Paris?");
console.log(result.text);
// -> call: getWeather(Paris)
// -> result: The weather in Paris is 72°F and sunny.
// "It's 72°F and sunny in Paris."Multi-step
OpenAI
import json
from openai import OpenAI
from pydantic import BaseModel
LLM_MODEL = "gpt-5.4"
client = OpenAI()
# two tools with a data dependency:
# lookup_customer returns an ID that get_balance needs
CUSTOMERS = {"alice@example.com": "CUS_8f3a2b"}
BALANCES = {"CUS_8f3a2b": "$1,432.50"}
def lookup_customer(email: str) -> str:
"""Look up a customer by email and return their internal ID."""
print(f"-> call: lookup_customer({email})")
result = CUSTOMERS[email]
print(f"-> result: {result}")
return result
def get_balance(customer_id: str) -> str:
"""Get the account balance for a customer ID."""
print(f"-> call: get_balance({customer_id})")
result = BALANCES[customer_id]
print(f"-> result: {result}")
return result
class LookupCustomerParams(BaseModel):
email: str
class GetBalanceParams(BaseModel):
customer_id: str
# build tool schema and register for dispatch
registry = {}
def to_tool(fn, params):
registry[fn.__name__] = fn
return {
"type": "function",
"name": fn.__name__,
"description": fn.__doc__,
"parameters": params.model_json_schema(),
}
tools = [
to_tool(lookup_customer, LookupCustomerParams),
to_tool(get_balance, GetBalanceParams),
]
input = [{
"role": "user",
"content": "What's the balance for alice@example.com?",
}]
# ReAct loop: LLM calls tools until it can answer
while True:
response = client.responses.create(
model=LLM_MODEL, input=input, tools=tools,
)
tool_calls = [i for i in response.output if i.type == "function_call"]
if not tool_calls:
break
input += response.output
for tc in tool_calls:
result = registry[tc.name](**json.loads(tc.arguments))
input.append({
"type": "function_call_output",
"call_id": tc.call_id,
"output": result,
})
print(response.output_text)
# -> call: lookup_customer(alice@example.com)
# -> result: CUS_8f3a2b
# -> call: get_balance(CUS_8f3a2b)
# -> result: $1,432.50
# "The balance for alice@example.com is $1,432.50."
Anthropic
import anthropic
LLM_MODEL = "claude-opus-4-6"
client = anthropic.Anthropic()
# two tools with a data dependency:
# lookup_customer returns an ID that get_balance needs
CUSTOMERS = {"alice@example.com": "CUS_8f3a2b"}
BALANCES = {"CUS_8f3a2b": "$1,432.50"}
def lookup_customer(email: str) -> str:
"""Look up a customer by email and return their internal ID."""
print(f"-> call: lookup_customer({email})")
result = CUSTOMERS[email]
print(f"-> result: {result}")
return result
def get_balance(customer_id: str) -> str:
"""Get the account balance for a customer ID."""
print(f"-> call: get_balance({customer_id})")
result = BALANCES[customer_id]
print(f"-> result: {result}")
return result
# build tool schema and register for dispatch
registry = {}
def to_tool(fn, input_schema):
registry[fn.__name__] = fn
return {
"name": fn.__name__,
"description": fn.__doc__,
"input_schema": input_schema,
}
tools = [
to_tool(lookup_customer, {
"type": "object",
"properties": {"email": {"type": "string"}},
"required": ["email"],
}),
to_tool(get_balance, {
"type": "object",
"properties": {"customer_id": {"type": "string"}},
"required": ["customer_id"],
}),
]
messages = [{"role": "user", "content": "What's the balance for alice@example.com?"}]
# ReAct loop: LLM calls tools until it can answer
while True:
response = client.messages.create(
model=LLM_MODEL, max_tokens=1024, tools=tools, messages=messages,
)
if response.stop_reason != "tool_use":
break
messages.append({"role": "assistant", "content": response.content})
tool_results = []
for block in response.content:
if block.type == "tool_use":
result = registry[block.name](**block.input)
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
"content": result,
})
messages.append({"role": "user", "content": tool_results})
print(response.content[0].text)
# -> call: lookup_customer(alice@example.com)
# -> result: CUS_8f3a2b
# -> call: get_balance(CUS_8f3a2b)
# -> result: $1,432.50
# "The balance for alice@example.com is $1,432.50."
Gemini
from google import genai
from google.genai import types
LLM_MODEL = "gemini-pro-latest"
client = genai.Client()
# two tools with a data dependency:
# lookup_customer returns an ID that get_balance needs
CUSTOMERS = {"alice@example.com": "CUS_8f3a2b"}
BALANCES = {"CUS_8f3a2b": "$1,432.50"}
def lookup_customer(email: str) -> str:
"""Look up a customer by email and return their internal ID."""
print(f"-> call: lookup_customer({email})")
result = CUSTOMERS[email]
print(f"-> result: {result}")
return result
def get_balance(customer_id: str) -> str:
"""Get the account balance for a customer ID."""
print(f"-> call: get_balance({customer_id})")
result = BALANCES[customer_id]
print(f"-> result: {result}")
return result
# automatic function calling: SDK runs the ReAct loop for you
config = types.GenerateContentConfig(
tools=[lookup_customer, get_balance],
)
response = client.models.generate_content(
model=LLM_MODEL,
config=config,
contents="What's the balance for alice@example.com?",
)
print(response.text)
# -> call: lookup_customer(alice@example.com)
# -> result: CUS_8f3a2b
# -> call: get_balance(CUS_8f3a2b)
# -> result: $1,432.50
# "The balance for alice@example.com is $1,432.50."
Pydantic AI
from pydantic_ai import Agent
LLM_MODEL = "openai:gpt-5.4"
agent = Agent(LLM_MODEL)
CUSTOMERS = {"alice@example.com": "CUS_8f3a2b"}
BALANCES = {"CUS_8f3a2b": "$1,432.50"}
@agent.tool_plain
def lookup_customer(email: str) -> str:
"""Look up a customer by email and return their internal ID."""
print(f"-> call: lookup_customer({email})")
result = CUSTOMERS[email]
print(f"-> result: {result}")
return result
@agent.tool_plain
def get_balance(customer_id: str) -> str:
"""Get the account balance for a customer ID."""
print(f"-> call: get_balance({customer_id})")
result = BALANCES[customer_id]
print(f"-> result: {result}")
return result
# the LLM must call lookup_customer first to get the ID,
# then pass it to get_balance — a true data dependency
result = agent.run_sync("What's the balance for alice@example.com?")
print(result.output)
# -> call: lookup_customer(alice@example.com)
# -> result: CUS_8f3a2b
# -> call: get_balance(CUS_8f3a2b)
# -> result: $1,432.50
# "The balance for alice@example.com is $1,432.50."
LangGraph
from langchain.tools import tool
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI
LLM_MODEL = "gpt-5.4"
model = ChatOpenAI(model=LLM_MODEL)
CUSTOMERS = {"alice@example.com": "CUS_8f3a2b"}
BALANCES = {"CUS_8f3a2b": "$1,432.50"}
@tool
def lookup_customer(email: str) -> str:
"""Look up a customer by email and return their internal ID."""
print(f"-> call: lookup_customer({email})")
result = CUSTOMERS[email]
print(f"-> result: {result}")
return result
@tool
def get_balance(customer_id: str) -> str:
"""Get the account balance for a customer ID."""
print(f"-> call: get_balance({customer_id})")
result = BALANCES[customer_id]
print(f"-> result: {result}")
return result
# the graph loops: LLM → lookup_customer → LLM → get_balance → LLM
# each iteration is one "step" in the ReAct cycle
agent = create_agent(model, [lookup_customer, get_balance])
result = agent.invoke({
"messages": [("user", "What's the balance for alice@example.com?")]
})
print(result["messages"][-1].content)
# -> call: lookup_customer(alice@example.com)
# -> result: CUS_8f3a2b
# -> call: get_balance(CUS_8f3a2b)
# -> result: $1,432.50
# "The balance for alice@example.com is $1,432.50."
AI SDK
import { ToolLoopAgent, tool } from "ai";
import { openai } from "@ai-sdk/openai";
import { z } from "zod";
const LLM_MODEL = "gpt-5.4";
const CUSTOMERS: Record<string, string> = { "alice@example.com": "CUS_8f3a2b" };
const BALANCES: Record<string, string> = { CUS_8f3a2b: "$1,432.50" };
const lookupCustomer = tool({
description: "Look up a customer by email and return their internal ID",
inputSchema: z.object({ email: z.string() }),
execute: async ({ email }) => {
console.log(`-> call: lookupCustomer(${email})`);
const result = CUSTOMERS[email];
console.log(`-> result: ${result}`);
return result;
},
});
const getBalance = tool({
description: "Get the account balance for a customer ID",
inputSchema: z.object({ customerId: z.string() }),
execute: async ({ customerId }) => {
console.log(`-> call: getBalance(${customerId})`);
const result = BALANCES[customerId];
console.log(`-> result: ${result}`);
return result;
},
});
// ToolLoopAgent handles multi-step automatically (defaults to 20 steps max)
const agent = new ToolLoopAgent({
model: openai(LLM_MODEL),
tools: { lookupCustomer, getBalance },
});
const result = await agent.generate({
prompt: "What's the balance for alice@example.com?",
});
console.log(result.text);
// -> call: lookupCustomer(alice@example.com)
// -> result: CUS_8f3a2b
// -> call: getBalance(CUS_8f3a2b)
// -> result: $1,432.50
// "The balance for alice@example.com is $1,432.50."
Mastra
import { Agent } from "@mastra/core/agent";
import { createTool } from "@mastra/core/tools";
import { z } from "zod";
// model specified as "provider/model-name" string
const LLM_MODEL = "openai/gpt-5.4";
const CUSTOMERS: Record<string, string> = { "alice@example.com": "CUS_8f3a2b" };
const BALANCES: Record<string, string> = { CUS_8f3a2b: "$1,432.50" };
const lookupCustomer = createTool({
id: "lookup-customer",
description: "Look up a customer by email and return their internal ID",
inputSchema: z.object({ email: z.string() }),
execute: async ({ email }) => {
console.log(`-> call: lookupCustomer(${email})`);
const result = CUSTOMERS[email];
console.log(`-> result: ${result}`);
return result;
},
});
const getBalance = createTool({
id: "get-balance",
description: "Get the account balance for a customer ID",
inputSchema: z.object({ customerId: z.string() }),
execute: async ({ customerId }) => {
console.log(`-> call: getBalance(${customerId})`);
const result = BALANCES[customerId];
console.log(`-> result: ${result}`);
return result;
},
});
const agent = new Agent({
name: "multi-step-agent",
instructions: "You are a helpful assistant.",
model: LLM_MODEL,
tools: { lookupCustomer, getBalance },
});
// maxSteps controls the ReAct loop iterations (default: 1)
// without maxSteps > 1, Mastra won't loop back after a tool call
const result = await agent.generate(
"What's the balance for alice@example.com?",
{ maxSteps: 3 },
);
console.log(result.text);
// -> call: lookupCustomer(alice@example.com)
// -> result: CUS_8f3a2b
// -> call: getBalance(CUS_8f3a2b)
// -> result: $1,432.50
// "The balance for alice@example.com is $1,432.50."Complex Parameters
OpenAI
import json
from typing import Literal
from openai import OpenAI
from pydantic import BaseModel, Field
LLM_MODEL = "gpt-5.4"
client = OpenAI()
# nested Pydantic models → rich JSON schema for the LLM
class LineItem(BaseModel):
sku: str = Field(description="Product SKU, e.g. 'SKU_921'")
quantity: int = Field(ge=1, description="Number of units to order")
gift_wrap: bool = Field(default=False, description="Wrap item in gift packaging")
class ShippingAddress(BaseModel):
street: str
city: str
zip: str = Field(description="Postal/ZIP code")
country: str = Field(description="ISO 3166-1 alpha-2 country code, e.g. 'US'")
class PlaceOrderParams(BaseModel):
items: list[LineItem]
shipping: ShippingAddress
shipping_method: Literal["standard", "express", "overnight"] = "standard"
notes: str | None = Field(default=None, description="Delivery instructions")
# json.loads returns raw dicts — wrapping in PlaceOrderParams
# gives us validated, typed access (params.items[0].sku vs i['sku'])
def place_order(params: PlaceOrderParams) -> str:
"""Place an order with line items and shipping details."""
print(f"-> call: place_order({len(params.items)} items, {params.shipping_method})")
summary = ", ".join(
f"{i.quantity}× {i.sku}" + (" (gift)" if i.gift_wrap else "")
for i in params.items
)
result = (
f"Order ORD_743 confirmed:\n"
f" {summary}.\n"
f" {params.shipping_method} to {params.shipping.city}."
)
print(f"-> result: {result}")
return result
# build tool schema and register for dispatch
registry = {}
def to_tool(fn, params_model):
registry[fn.__name__] = (fn, params_model)
return {
"type": "function",
"name": fn.__name__,
"description": fn.__doc__,
"parameters": params_model.model_json_schema(),
}
tools = [to_tool(place_order, PlaceOrderParams)]
messages = [{
"role": "user",
"content": """\
Order 2 of SKU_921 with gift wrap and 1 of SKU_114.
Ship overnight to 100 Main St, San Francisco 94105, US.
Leave at the back door.""",
}]
# ReAct loop: LLM calls tools until it can answer
while True:
response = client.responses.create(
model=LLM_MODEL, input=messages, tools=tools,
)
tool_calls = [i for i in response.output if i.type == "function_call"]
if not tool_calls:
break
messages += response.output
for tc in tool_calls:
fn, params_model = registry[tc.name]
result = fn(params_model(**json.loads(tc.arguments)))
messages.append({
"type": "function_call_output",
"call_id": tc.call_id,
"output": result,
})
print(response.output_text)
# -> call: place_order(2 items, overnight)
# -> result: Order ORD_743 confirmed:
# 2× SKU_921 (gift), 1× SKU_114.
# overnight to San Francisco.
# "Done - order ORD_743 is confirmed.
# - 2 × SKU_921, gift wrapped
# - 1 × SKU_114
# - Shipping: overnight
# - Address: 100 Main St, San Francisco, 94105, US
# - Note: Leave at the back door."
Anthropic
from typing import Literal
import anthropic
from pydantic import BaseModel, Field
LLM_MODEL = "claude-opus-4-6"
client = anthropic.Anthropic()
# nested Pydantic models → rich JSON schema for the LLM
class LineItem(BaseModel):
sku: str = Field(description="Product SKU, e.g. 'SKU_921'")
quantity: int = Field(ge=1, description="Number of units to order")
gift_wrap: bool = Field(default=False, description="Wrap item in gift packaging")
class ShippingAddress(BaseModel):
street: str
city: str
zip: str = Field(description="Postal/ZIP code")
country: str = Field(description="ISO 3166-1 alpha-2 country code, e.g. 'US'")
class PlaceOrderParams(BaseModel):
items: list[LineItem]
shipping: ShippingAddress
shipping_method: Literal["standard", "express", "overnight"] = "standard"
notes: str | None = Field(default=None, description="Delivery instructions")
# block.input is already a dict — wrapping in PlaceOrderParams
# gives us validated, typed access (params.items[0].sku vs params['items'][0]['sku'])
def place_order(params: PlaceOrderParams) -> str:
"""Place an order with line items and shipping details."""
print(f"-> call: place_order({len(params.items)} items, {params.shipping_method})")
summary = ", ".join(
f"{i.quantity}× {i.sku}" + (" (gift)" if i.gift_wrap else "")
for i in params.items
)
result = (
f"Order ORD_743 confirmed:\n"
f" {summary}.\n"
f" {params.shipping_method} to {params.shipping.city}."
)
print(f"-> result: {result}")
return result
# build tool schema and register for dispatch
registry = {}
def to_tool(fn, params_model):
registry[fn.__name__] = (fn, params_model)
return {
"name": fn.__name__,
"description": fn.__doc__,
"input_schema": params_model.model_json_schema(),
}
tools = [to_tool(place_order, PlaceOrderParams)]
messages = [{"role": "user", "content": """\
Order 2 of SKU_921 with gift wrap and 1 of SKU_114.
Ship overnight to 100 Main St, San Francisco 94105, US.
Leave at the back door.""",
}]
# ReAct loop: LLM calls tools until it can answer
while True:
response = client.messages.create(
model=LLM_MODEL, max_tokens=1024, tools=tools, messages=messages,
)
if response.stop_reason != "tool_use":
break
messages.append({"role": "assistant", "content": response.content})
tool_results = []
for block in response.content:
if block.type == "tool_use":
fn, params_model = registry[block.name]
result = fn(params_model(**block.input))
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
"content": result,
})
messages.append({"role": "user", "content": tool_results})
print(response.content[0].text)
# -> call: place_order(2 items, overnight)
# -> result: Order ORD_743 confirmed:
# 2× SKU_921 (gift), 1× SKU_114.
# overnight to San Francisco.
# "Done - order ORD_743 is confirmed.
# - 2 × SKU_921, gift wrapped
# - 1 × SKU_114
# - Shipping: overnight
# - Address: 100 Main St, San Francisco, 94105, US
# - Note: Leave at the back door."
Gemini
from typing import Literal
from google import genai
from google.genai import types
from pydantic import BaseModel, Field
LLM_MODEL = "gemini-pro-latest"
client = genai.Client()
# nested Pydantic models → rich JSON schema for the LLM
class LineItem(BaseModel):
sku: str = Field(description="Product SKU, e.g. 'SKU_921'")
quantity: int = Field(ge=1, description="Number of units to order")
gift_wrap: bool = Field(default=False, description="Wrap item in gift packaging")
class ShippingAddress(BaseModel):
street: str
city: str
zip: str = Field(description="Postal/ZIP code")
country: str = Field(description="ISO 3166-1 alpha-2 country code, e.g. 'US'")
class PlaceOrderParams(BaseModel):
items: list[LineItem]
shipping: ShippingAddress
shipping_method: Literal["standard", "express", "overnight"] = "standard"
notes: str | None = Field(default=None, description="Delivery instructions")
def place_order(params: PlaceOrderParams) -> str:
"""Place an order with line items and shipping details."""
print(f"-> call: place_order({len(params.items)} items, {params.shipping_method})")
summary = ", ".join(
f"{i.quantity}× {i.sku}" + (" (gift)" if i.gift_wrap else "")
for i in params.items
)
result = (
f"Order ORD_743 confirmed:\n"
f" {summary}.\n"
f" {params.shipping_method} to {params.shipping.city}."
)
print(f"-> result: {result}")
return result
tools = [types.Tool(function_declarations=[
types.FunctionDeclaration(
name="place_order",
description=place_order.__doc__,
parameters_json_schema=PlaceOrderParams.model_json_schema(),
),
])]
config = types.GenerateContentConfig(tools=tools)
contents = [
types.Content(role="user", parts=[types.Part.from_text(text="""\
Order 2 of SKU_921 with gift wrap and 1 of SKU_114.
Ship overnight to 100 Main St, San Francisco 94105, US.
Leave at the back door.""",
)]),
]
# ReAct loop: LLM calls tools until it can answer
while True:
response = client.models.generate_content(
model=LLM_MODEL, config=config, contents=contents,
)
if not response.function_calls:
break
contents.append(response.candidates[0].content)
for tool_call in response.function_calls:
result = place_order(PlaceOrderParams(**tool_call.args))
contents.append(types.Content(role="tool", parts=[
types.Part.from_function_response(
name=tool_call.name,
response={"result": result},
),
]))
print(response.text)
# -> call: place_order(2 items, overnight)
# -> result: Order ORD_743 confirmed:
# 2× SKU_921 (gift), 1× SKU_114.
# overnight to San Francisco.
# "Done - order ORD_743 is confirmed.
# - 2 × SKU_921, gift wrapped
# - 1 × SKU_114
# - Shipping: overnight
# - Address: 100 Main St, San Francisco, 94105, US
# - Note: Leave at the back door."
Pydantic AI
from typing import Literal
from pydantic import BaseModel, Field
from pydantic_ai import Agent
LLM_MODEL = "openai:gpt-5.4"
agent = Agent(LLM_MODEL)
# nested Pydantic models as type hints → JSON schema for the LLM
class LineItem(BaseModel):
sku: str = Field(description="Product SKU, e.g. 'SKU_921'")
quantity: int = Field(ge=1, description="Number of units to order")
gift_wrap: bool = Field(default=False, description="Wrap item in gift packaging")
class ShippingAddress(BaseModel):
street: str
city: str
zip: str = Field(description="Postal/ZIP code")
country: str = Field(description="ISO 3166-1 alpha-2 country code, e.g. 'US'")
@agent.tool_plain
def place_order(
items: list[LineItem],
shipping: ShippingAddress,
shipping_method: Literal["standard", "express", "overnight"] = "standard",
notes: str | None = None,
) -> str:
"""Place an order with line items and shipping details."""
print(f"-> call: place_order({len(items)} items, {shipping_method})")
summary = ", ".join(
f"{i.quantity}× {i.sku}" + (" (gift)" if i.gift_wrap else "")
for i in items
)
result = (
f"Order ORD_743 confirmed:\n"
f" {summary}.\n"
f" {shipping_method} to {shipping.city}."
)
print(f"-> result: {result}")
return result
result = agent.run_sync("""\
Order 2 of SKU_921 with gift wrap and 1 of SKU_114.
Ship overnight to 100 Main St, San Francisco 94105, US.
Leave at the back door.""")
print(result.output)
# -> call: place_order(2 items, overnight)
# -> result: Order ORD_743 confirmed:
# 2× SKU_921 (gift), 1× SKU_114.
# overnight to San Francisco.
# "Done - order ORD_743 is confirmed.
# - 2 × SKU_921, gift wrapped
# - 1 × SKU_114
# - Shipping: overnight
# - Address: 100 Main St, San Francisco, 94105, US
# - Note: Leave at the back door."
LangGraph
from typing import Literal
from pydantic import BaseModel, Field
from langchain_openai import ChatOpenAI
from langchain.tools import tool
from langchain.agents import create_agent
LLM_MODEL = "gpt-5.4"
model = ChatOpenAI(model=LLM_MODEL)
# nested Pydantic models as type hints → JSON schema for the LLM
class LineItem(BaseModel):
sku: str = Field(description="Product SKU, e.g. 'SKU_921'")
quantity: int = Field(ge=1, description="Number of units to order")
gift_wrap: bool = Field(default=False, description="Wrap item in gift packaging")
class ShippingAddress(BaseModel):
street: str
city: str
zip: str = Field(description="Postal/ZIP code")
country: str = Field(description="ISO 3166-1 alpha-2 country code, e.g. 'US'")
@tool
def place_order(
items: list[LineItem],
shipping: ShippingAddress,
shipping_method: Literal["standard", "express", "overnight"] = "standard",
notes: str | None = None,
) -> str:
"""Place an order with line items and shipping details."""
print(f"-> call: place_order({len(items)} items, {shipping_method})")
summary = ", ".join(
f"{i.quantity}× {i.sku}" + (" (gift)" if i.gift_wrap else "")
for i in items
)
result = (
f"Order ORD_743 confirmed:\n"
f" {summary}.\n"
f" {shipping_method} to {shipping.city}."
)
print(f"-> result: {result}")
return result
agent = create_agent(model, [place_order])
result = agent.invoke({
"messages": [(
"user",
"""\
Order 2 of SKU_921 with gift wrap and 1 of SKU_114.
Ship overnight to 100 Main St, San Francisco 94105, US.
Leave at the back door.""",
)],
})
print(result["messages"][-1].content)
# -> call: place_order(2 items, overnight)
# -> result: Order ORD_743 confirmed:
# 2× SKU_921 (gift), 1× SKU_114.
# overnight to San Francisco.
# "Done - order ORD_743 is confirmed.
# - 2 × SKU_921, gift wrapped
# - 1 × SKU_114
# - Shipping: overnight
# - Address: 100 Main St, San Francisco, 94105, US
# - Note: Leave at the back door."
AI SDK
import { ToolLoopAgent, tool } from "ai";
import { openai } from "@ai-sdk/openai";
import { z } from "zod";
const LLM_MODEL = "gpt-5.4";
// nested Zod schemas with enums, constraints, defaults, and descriptions
const placeOrder = tool({
description: "Place an order with line items and shipping details",
inputSchema: z.object({
items: z.array(
z.object({
sku: z.string().describe("Product SKU, e.g. 'SKU_921'"),
quantity: z.number().int().min(1).describe("Number of units to order"),
giftWrap: z.boolean().default(false).describe("Wrap item in gift packaging"),
}),
),
shipping: z.object({
street: z.string(),
city: z.string(),
zip: z.string().describe("Postal/ZIP code"),
country: z.string().describe("ISO 3166-1 alpha-2 country code, e.g. 'US'"),
}),
shippingMethod: z.enum(["standard", "express", "overnight"]).default("standard"),
notes: z.string().optional().describe("Delivery instructions"),
}),
execute: async ({ items, shipping, shippingMethod }) => {
console.log(`-> call: placeOrder(${items.length} items, ${shippingMethod})`);
const summary = items
.map((i) => `${i.quantity}× ${i.sku}${i.giftWrap ? " (gift)" : ""}`)
.join(", ");
const result = [
`Order ORD_743 confirmed:`,
` ${summary}.`,
` ${shippingMethod} to ${shipping.city}.`,
].join("\n");
console.log(`-> result: ${result}`);
return result;
},
});
const agent = new ToolLoopAgent({
model: openai(LLM_MODEL),
tools: { placeOrder },
});
const result = await agent.generate({
prompt: `Order 2 of SKU_921 with gift wrap and 1 of SKU_114.
Ship overnight to 100 Main St, San Francisco 94105, US.
Leave at the back door.`,
});
console.log(result.text);
// -> call: placeOrder(2 items, overnight)
// -> result: Order ORD_743 confirmed:
// 2× SKU_921 (gift), 1× SKU_114.
// overnight to San Francisco.
// "Done - order ORD_743 is confirmed.
// - 2 × SKU_921, gift wrapped
// - 1 × SKU_114
// - Shipping: overnight
// - Address: 100 Main St, San Francisco, 94105, US
// - Note: Leave at the back door."
Mastra
import { Agent } from "@mastra/core/agent";
import { createTool } from "@mastra/core/tools";
import { z } from "zod";
// model specified as "provider/model-name" string
const LLM_MODEL = "openai/gpt-5.4";
// nested Zod schemas with enums, constraints, defaults, and descriptions
const placeOrder = createTool({
id: "place-order",
description: "Place an order with line items and shipping details",
inputSchema: z.object({
items: z.array(
z.object({
sku: z.string().describe("Product SKU, e.g. 'SKU_921'"),
quantity: z.number().int().min(1).describe("Number of units to order"),
giftWrap: z.boolean().default(false).describe("Wrap item in gift packaging"),
}),
),
shipping: z.object({
street: z.string(),
city: z.string(),
zip: z.string().describe("Postal/ZIP code"),
country: z.string().describe("ISO 3166-1 alpha-2 country code, e.g. 'US'"),
}),
shippingMethod: z.enum(["standard", "express", "overnight"]).default("standard"),
notes: z.string().optional().describe("Delivery instructions"),
}),
execute: async ({ items, shipping, shippingMethod }) => {
console.log(`-> call: placeOrder(${items.length} items, ${shippingMethod})`);
const summary = items
.map((i) => `${i.quantity}× ${i.sku}${i.giftWrap ? " (gift)" : ""}`)
.join(", ");
const result = [
`Order ORD_743 confirmed:`,
` ${summary}.`,
` ${shippingMethod} to ${shipping.city}.`,
].join("\n");
console.log(`-> result: ${result}`);
return result;
},
});
const agent = new Agent({
name: "order-agent",
instructions: "You are a helpful assistant.",
model: LLM_MODEL,
tools: { placeOrder },
});
const result = await agent.generate(
`Order 2 of SKU_921 with gift wrap and 1 of SKU_114.
Ship overnight to 100 Main St, San Francisco 94105, US.
Leave at the back door.`,
);
console.log(result.text);
// -> call: placeOrder(2 items, overnight)
// -> result: Order ORD_743 confirmed:
// 2× SKU_921 (gift), 1× SKU_114.
// overnight to San Francisco.
// "Done - order ORD_743 is confirmed.
// - 2 × SKU_921, gift wrapped
// - 1 × SKU_114
// - Shipping: overnight
// - Address: 100 Main St, San Francisco, 94105, US
// - Note: Leave at the back door."Tool Error Handling
OpenAI
import json
from openai import OpenAI
from pydantic import BaseModel
LLM_MODEL = "gpt-5.4"
client = OpenAI()
# Mock calendar. In production this would query Google Calendar, Outlook, etc.
CALENDAR = {
"09:00": "Team standup",
"14:00": "Design review",
"16:00": "1:1 with manager",
}
def schedule_meeting(time: str, title: str) -> str:
"""Schedule a meeting at a given time."""
print(f"-> call: schedule_meeting(time={time!r}, title={title!r})")
if time in CALENDAR:
# Times are zero-padded HH:MM strings, so plain string comparison
# keeps them in chronological order for this example.
available = [
slot
for slot in [f"{hour:02d}:00" for hour in range(9, 18)]
if slot not in CALENDAR and slot > time
]
print(f"-> error: {time} is already booked ({CALENDAR[time]})")
raise ValueError(
f"{time} is already booked ({CALENDAR[time]}). "
f"Next available slots after {time}: {', '.join(available)}"
)
result = f"Scheduled '{title}' at {time}."
CALENDAR[time] = title
print(f"-> result: {result}")
return result
class ScheduleMeetingParams(BaseModel):
time: str
title: str
# build tool schema and register for dispatch
registry = {}
def to_tool(fn, params):
registry[fn.__name__] = fn
return {
"type": "function",
"name": fn.__name__,
"description": fn.__doc__,
"parameters": params.model_json_schema(),
}
tools = [to_tool(schedule_meeting, ScheduleMeetingParams)]
conversation = [{
"role": "user",
"content": "Schedule a meeting with Sarah at 14:00 to discuss Q3 roadmap.",
}]
# ReAct loop: LLM calls tools until it can answer
while True:
response = client.responses.create(
model=LLM_MODEL,
instructions="""\
You schedule meetings with the schedule_meeting tool.
If a slot is unavailable, pick the next available slot
automatically and retry.
""",
input=conversation,
tools=tools,
)
tool_calls = [i for i in response.output if i.type == "function_call"]
if not tool_calls:
break
conversation += response.output
for tc in tool_calls:
try:
result = registry[tc.name](**json.loads(tc.arguments))
except Exception as e:
# send the error back so the LLM can repair the tool call
result = f"Error: {e}"
conversation.append({
"type": "function_call_output",
"call_id": tc.call_id,
"output": result,
})
print(response.output_text)
# -> call: schedule_meeting(time='14:00', title='Meeting with Sarah: discuss Q3 roadmap')
# -> error: 14:00 is already booked (Design review)
# -> call: schedule_meeting(time='15:00', title='Meeting with Sarah: discuss Q3 roadmap')
# -> result: Scheduled 'Meeting with Sarah: discuss Q3 roadmap' at 15:00.
# "Scheduled for 15:00: Meeting with Sarah to discuss Q3 roadmap."
Anthropic
import anthropic
LLM_MODEL = "claude-opus-4-6"
client = anthropic.Anthropic()
# Mock calendar. In production this would query Google Calendar, Outlook, etc.
CALENDAR = {
"09:00": "Team standup",
"14:00": "Design review",
"16:00": "1:1 with manager",
}
def schedule_meeting(time: str, title: str) -> str:
"""Schedule a meeting at a given time."""
print(f"-> call: schedule_meeting(time={time!r}, title={title!r})")
if time in CALENDAR:
# Times are zero-padded HH:MM strings, so plain string comparison
# keeps them in chronological order for this example.
available = [
slot
for slot in [f"{hour:02d}:00" for hour in range(9, 18)]
if slot not in CALENDAR and slot > time
]
print(f"-> error: {time} is already booked ({CALENDAR[time]})")
raise ValueError(
f"{time} is already booked ({CALENDAR[time]}). "
f"Next available slots after {time}: {', '.join(available)}"
)
result = f"Scheduled '{title}' at {time}."
CALENDAR[time] = title
print(f"-> result: {result}")
return result
tools = [{
"name": "schedule_meeting",
"description": "Schedule a meeting at a given time.",
"input_schema": {
"type": "object",
"properties": {
"time": {"type": "string"},
"title": {"type": "string"},
},
"required": ["time", "title"],
},
}]
messages = [{
"role": "user",
"content": "Schedule a meeting with Sarah at 14:00 to discuss Q3 roadmap.",
}]
# ReAct loop: LLM calls tools until it can answer
while True:
response = client.messages.create(
model=LLM_MODEL,
max_tokens=1024,
system="""\
You schedule meetings with the schedule_meeting tool.
If a slot is unavailable, pick the next available slot
automatically and retry.
""",
tools=tools,
messages=messages,
)
if response.stop_reason != "tool_use":
break
messages.append({"role": "assistant", "content": response.content})
tool_results = []
for block in response.content:
if block.type == "tool_use":
try:
result = schedule_meeting(**block.input)
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
"content": result,
})
except Exception as e:
# is_error tells the LLM this tool call failed and should be repaired
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
"content": str(e),
"is_error": True,
})
messages.append({"role": "user", "content": tool_results})
print(response.content[0].text)
# -> call: schedule_meeting(time='14:00', title='Meeting with Sarah - Discuss Q3 Roadmap')
# -> error: 14:00 is already booked (Design review)
# -> call: schedule_meeting(time='15:00', title='Meeting with Sarah - Discuss Q3 Roadmap')
# -> result: Scheduled 'Meeting with Sarah - Discuss Q3 Roadmap' at 15:00.
# "Your meeting has been scheduled! Here's a summary:
# - Title: Meeting with Sarah - Discuss Q3 Roadmap
# - Time: 15:00 (moved from 14:00, which was occupied by a Design Review)
# Let me know if you'd like to adjust anything!"
Gemini
from google import genai
from google.genai import types
LLM_MODEL = "gemini-pro-latest"
client = genai.Client()
# Mock calendar. In production this would query Google Calendar, Outlook, etc.
CALENDAR = {
"09:00": "Team standup",
"14:00": "Design review",
"16:00": "1:1 with manager",
}
# with automatic calling, return error info instead of raising —
# the SDK sends it back and the model can retry with a better call
def schedule_meeting(time: str, title: str) -> str:
"""Schedule a meeting at a given time."""
print(f"-> call: schedule_meeting(time={time!r}, title={title!r})")
if time in CALENDAR:
# Times are zero-padded HH:MM strings, so plain string comparison
# keeps them in chronological order for this example.
available = [
slot
for slot in [f"{hour:02d}:00" for hour in range(9, 18)]
if slot not in CALENDAR and slot > time
]
print(f"-> error: {time} is already booked ({CALENDAR[time]})")
return (
f"Error: {time} is already booked ({CALENDAR[time]}). "
f"Next available slots after {time}: {', '.join(available)}"
)
result = f"Scheduled '{title}' at {time}."
CALENDAR[time] = title
print(f"-> result: {result}")
return result
config = types.GenerateContentConfig(
tools=[schedule_meeting],
system_instruction="""\
You schedule meetings with the schedule_meeting tool.
If a slot is unavailable, pick the next available slot
automatically and retry.
""",
)
response = client.models.generate_content(
model=LLM_MODEL,
config=config,
contents="Schedule a meeting with Sarah at 14:00 to discuss Q3 roadmap.",
)
print(response.text)
# -> call: schedule_meeting(time='14:00', title='Discuss Q3 roadmap with Sarah')
# -> error: 14:00 is already booked (Design review)
# -> call: schedule_meeting(time='15:00', title='Discuss Q3 roadmap with Sarah')
# -> result: Scheduled 'Discuss Q3 roadmap with Sarah' at 15:00.
# "The 14:00 slot was already booked, so I automatically scheduled the
# meeting with Sarah to discuss the Q3 roadmap for the next available
# slot at 15:00."
Pydantic AI
from pydantic_ai import Agent, ModelRetry
LLM_MODEL = "openai:gpt-5.4"
# Mock calendar. In production this would query Google Calendar, Outlook, etc.
CALENDAR = {
"09:00": "Team standup",
"14:00": "Design review",
"16:00": "1:1 with manager",
}
# These instructions make the model repair the tool call and retry automatically.
agent = Agent(
LLM_MODEL,
instructions="""\
You schedule meetings with the schedule_meeting tool.
If a slot is unavailable, pick the next available slot
automatically and retry.
""",
)
@agent.tool_plain(retries=2)
def schedule_meeting(time: str, title: str) -> str:
"""Schedule a meeting at a given time.
Args:
time: Time in HH:MM 24-hour format, e.g. '09:00', '14:30'.
title: Meeting title.
"""
print(f"-> call: schedule_meeting(time={time!r}, title={title!r})")
if time in CALENDAR:
# Times are zero-padded HH:MM strings, so plain string comparison
# keeps them in chronological order for this example.
available = [
slot
for slot in [f"{hour:02d}:00" for hour in range(9, 18)]
if slot not in CALENDAR and slot > time
]
print(f"-> error: {time} is already booked ({CALENDAR[time]})")
# ModelRetry sends the tool failure back so the model can fix the input.
raise ModelRetry(
f"{time} is already booked ({CALENDAR[time]}). "
f"Next available slots after {time}: {', '.join(available)}"
)
CALENDAR[time] = title
print(f"-> result: Scheduled '{title}' at {time}")
return f"Scheduled '{title}' at {time}."
result = agent.run_sync("Schedule a meeting with Sarah at 14:00 to discuss Q3 roadmap.")
print(result.output)
# -> call: schedule_meeting(time='14:00', title='Discuss Q3 roadmap with Sarah')
# -> error: 14:00 is already booked (Design review)
# -> call: schedule_meeting(time='15:00', title='Discuss Q3 roadmap with Sarah')
# -> result: Scheduled 'Discuss Q3 roadmap with Sarah' at 15:00
# "Scheduled with Sarah at 15:00: Discuss Q3 roadmap."
LangGraph
from langchain.tools import tool
from langchain.messages import ToolMessage
from langchain.agents import create_agent
from langchain.agents.middleware import wrap_tool_call
from langchain_openai import ChatOpenAI
LLM_MODEL = "gpt-5.4"
model = ChatOpenAI(model=LLM_MODEL)
# Mock calendar. In production this would query Google Calendar, Outlook, etc.
CALENDAR = {
"09:00": "Team standup",
"14:00": "Design review",
"16:00": "1:1 with manager",
}
@tool
def schedule_meeting(time: str, title: str) -> str:
"""Schedule a meeting at a given time."""
print(f"-> call: schedule_meeting(time={time!r}, title={title!r})")
if time in CALENDAR:
# Times are zero-padded HH:MM strings, so plain string comparison
# keeps them in chronological order for this example.
available = [
slot
for slot in [f"{hour:02d}:00" for hour in range(9, 18)]
if slot not in CALENDAR and slot > time
]
print(f"-> error: {time} is already booked ({CALENDAR[time]})")
raise ValueError(
f"{time} is already booked ({CALENDAR[time]}). "
f"Next available slots after {time}: {', '.join(available)}"
)
result = f"Scheduled '{title}' at {time}."
CALENDAR[time] = title
print(f"-> result: {result}")
return result
# wrap_tool_call catches errors and surfaces them to the LLM
@wrap_tool_call
def handle_errors(request, handler):
try:
return handler(request)
except Exception as e:
return ToolMessage(
content=f"Error: {e}",
tool_call_id=request.tool_call["id"],
)
agent = create_agent(
model,
[schedule_meeting],
system_prompt="""\
You schedule meetings with the schedule_meeting tool.
If a slot is unavailable, pick the next available slot
automatically and retry.
""",
middleware=[handle_errors],
)
result = agent.invoke({
"messages": [("user", "Schedule a meeting with Sarah at 14:00 to discuss Q3 roadmap.")]
})
print(result["messages"][-1].content)
# -> call: schedule_meeting(time='14:00', title='Discuss Q3 roadmap with Sarah')
# -> error: 14:00 is already booked (Design review)
# -> call: schedule_meeting(time='15:00', title='Discuss Q3 roadmap with Sarah')
# -> result: Scheduled 'Discuss Q3 roadmap with Sarah' at 15:00.
# "Scheduled with Sarah for 15:00: Discuss Q3 roadmap."
AI SDK
import { ToolLoopAgent, tool } from "ai";
import { openai } from "@ai-sdk/openai";
import { z } from "zod";
const LLM_MODEL = "gpt-5.4";
const CALENDAR = {
"09:00": "Team standup",
"14:00": "Design review",
"16:00": "1:1 with manager",
};
const scheduleMeeting = tool({
description: "Schedule a meeting at a given time",
inputSchema: z.object({
time: z.string(),
title: z.string(),
}),
execute: async ({ time, title }) => {
console.log(`-> call: scheduleMeeting(time=${JSON.stringify(time)}, title=${JSON.stringify(title)})`);
if (time in CALENDAR) {
// Times are zero-padded HH:MM strings, so plain string comparison
// keeps them in chronological order for this example.
const available = Array.from({ length: 9 }, (_, index) => `${String(index + 9).padStart(2, "0")}:00`)
.filter((slot) => !(slot in CALENDAR))
.filter((slot) => slot > time);
// thrown errors are surfaced back to the LLM automatically
console.log(`-> error: ${time} is already booked (${CALENDAR[time as keyof typeof CALENDAR]})`);
throw new Error(
`${time} is already booked (${CALENDAR[time as keyof typeof CALENDAR]}). `
+ `Next available slots after ${time}: ${available.join(", ")}`
);
}
const result = `Scheduled '${title}' at ${time}.`;
CALENDAR[time as keyof typeof CALENDAR] = title;
console.log(`-> result: ${result}`);
return result;
},
});
const agent = new ToolLoopAgent({
model: openai(LLM_MODEL),
instructions: `\
You schedule meetings with the scheduleMeeting tool.
If a slot is unavailable, pick the next available slot
automatically and retry.
`,
tools: { scheduleMeeting },
});
const result = await agent.generate({
prompt: "Schedule a meeting with Sarah at 14:00 to discuss Q3 roadmap.",
});
console.log(result.text);
// -> call: scheduleMeeting(time="14:00", title="Discuss Q3 roadmap with Sarah")
// -> error: 14:00 is already booked (Design review)
// -> call: scheduleMeeting(time="15:00", title="Discuss Q3 roadmap with Sarah")
// -> result: Scheduled 'Discuss Q3 roadmap with Sarah' at 15:00.
// "Scheduled with Sarah at 15:00: Discuss Q3 roadmap."
Mastra
import { Agent } from "@mastra/core/agent";
import { createTool } from "@mastra/core/tools";
import { z } from "zod";
// model specified as "provider/model-name" string
const LLM_MODEL = "openai/gpt-5.4";
const CALENDAR = {
"09:00": "Team standup",
"14:00": "Design review",
"16:00": "1:1 with manager",
};
const scheduleMeeting = createTool({
id: "schedule-meeting",
description: "Schedule a meeting at a given time",
inputSchema: z.object({
time: z.string(),
title: z.string(),
}),
execute: async ({ time, title }) => {
console.log(`-> call: scheduleMeeting(time=${JSON.stringify(time)}, title=${JSON.stringify(title)})`);
if (time in CALENDAR) {
// Times are zero-padded HH:MM strings, so plain string comparison
// keeps them in chronological order for this example.
const available = Array.from({ length: 9 }, (_, index) => `${String(index + 9).padStart(2, "0")}:00`)
.filter((slot) => !(slot in CALENDAR))
.filter((slot) => slot > time);
// Returning the error text keeps the retry behavior without noisy logs.
console.log(`-> error: ${time} is already booked (${CALENDAR[time as keyof typeof CALENDAR]})`);
return `Error: ${time} is already booked (${CALENDAR[time as keyof typeof CALENDAR]}). `
+ `Next available slots after ${time}: ${available.join(", ")}`;
}
const result = `Scheduled '${title}' at ${time}.`;
CALENDAR[time as keyof typeof CALENDAR] = title;
console.log(`-> result: ${result}`);
return result;
},
});
const agent = new Agent({
name: "error-handling-agent",
instructions: `\
You schedule meetings with the scheduleMeeting tool.
If a slot is unavailable, pick the next available slot
automatically and retry.
`,
model: LLM_MODEL,
tools: { scheduleMeeting },
});
const result = await agent.generate("Schedule a meeting with Sarah at 14:00 to discuss Q3 roadmap.");
console.log(result.text);
// -> call: scheduleMeeting(time="14:00", title="Meeting with Sarah: discuss Q3 roadmap")
// -> error: 14:00 is already booked (Design review)
// -> call: scheduleMeeting(time="15:00", title="Meeting with Sarah: discuss Q3 roadmap")
// -> result: Scheduled 'Meeting with Sarah: discuss Q3 roadmap' at 15:00.
// "Scheduled with Sarah at 15:00 to discuss Q3 roadmap."