🤖
Agent Party

Output

Select frameworks to compare

Pick one or more frameworks from the bar above

Structured Output

OpenAI

from typing import Literal
from pydantic import BaseModel, Field
from openai import OpenAI

LLM_MODEL = "gpt-5.4"

client = OpenAI()


class TicketAnalysis(BaseModel):
    category: Literal["billing", "technical", "account", "product"]
    priority: Literal["low", "medium", "high", "critical"]
    sentiment: Literal["positive", "neutral", "negative"]
    requires_escalation: bool
    summary: str = Field(description="One-sentence summary of the issue")
    suggested_tags: list[str] = Field(description="1-4 short labels for routing")


# text_format enforces the Pydantic schema on the LLM response
response = client.responses.parse(
    model=LLM_MODEL,
    text_format=TicketAnalysis,
    input=[{
        "role": "user",
        "content": """\
            Analyze this support ticket:
            'I've been charged twice for my Pro subscription this month.
            I contacted support 3 days ago and haven't heard back.
            If this isn't resolved by Friday I'm switching to a competitor.'""",
    }],
)
print(response.output_parsed)
# category='billing' priority='high' sentiment='negative' requires_escalation=True
# summary='Customer was double-charged for Pro subscription and hasn't received support.'
# suggested_tags=['billing', 'double-charge', 'escalation', 'churn-risk']

Anthropic

from typing import Literal
from pydantic import BaseModel, Field
import anthropic

LLM_MODEL = "claude-opus-4-6"

client = anthropic.Anthropic()


class TicketAnalysis(BaseModel):
    category: Literal["billing", "technical", "account", "product"]
    priority: Literal["low", "medium", "high", "critical"]
    sentiment: Literal["positive", "neutral", "negative"]
    requires_escalation: bool
    summary: str = Field(description="One-sentence summary of the issue")
    suggested_tags: list[str] = Field(description="1-4 short labels for routing")


# output_format enforces the Pydantic schema on the LLM response
response = client.messages.parse(
    model=LLM_MODEL,
    max_tokens=1024,
    output_format=TicketAnalysis,
    messages=[{
        "role": "user",
        "content": """\
            Analyze this support ticket:
            'I've been charged twice for my Pro subscription this month.
            I contacted support 3 days ago and haven't heard back.
            If this isn't resolved by Friday I'm switching to a competitor.'""",
    }],
)
print(response.parsed_output)
# category='billing' priority='high' sentiment='negative' requires_escalation=True
# summary='Customer was double-charged for Pro subscription and hasn't received support.'
# suggested_tags=['billing', 'double-charge', 'escalation', 'churn-risk']

Gemini

from typing import Literal
from pydantic import BaseModel, Field
from google import genai
from google.genai import types

LLM_MODEL = "gemini-pro-latest"

client = genai.Client()


class TicketAnalysis(BaseModel):
    category: Literal["billing", "technical", "account", "product"]
    priority: Literal["low", "medium", "high", "critical"]
    sentiment: Literal["positive", "neutral", "negative"]
    requires_escalation: bool
    summary: str = Field(description="One-sentence summary of the issue")
    suggested_tags: list[str] = Field(description="1-4 short labels for routing")


# response_schema accepts a Pydantic class; response_mime_type forces JSON output
config = types.GenerateContentConfig(
    response_mime_type="application/json",
    response_schema=TicketAnalysis,
)

response = client.models.generate_content(
    model=LLM_MODEL,
    config=config,
    contents="""\
        Analyze this support ticket:
        'I've been charged twice for my Pro subscription this month.
        I contacted support 3 days ago and haven't heard back.
        If this isn't resolved by Friday I'm switching to a competitor.'""",
)
print(response.parsed)
# category='billing' priority='high' sentiment='negative' requires_escalation=True
# summary='Customer was double-charged for Pro subscription and hasn't received support.'
# suggested_tags=['billing', 'double-charge', 'escalation', 'churn-risk']

Pydantic AI

from typing import Literal
from pydantic import BaseModel, Field
from pydantic_ai import Agent

LLM_MODEL = "openai:gpt-5.4"


class TicketAnalysis(BaseModel):
    category: Literal["billing", "technical", "account", "product"]
    priority: Literal["low", "medium", "high", "critical"]
    sentiment: Literal["positive", "neutral", "negative"]
    requires_escalation: bool
    summary: str = Field(description="One-sentence summary of the issue")
    suggested_tags: list[str] = Field(description="1-4 short labels for routing")


# output_type constrains the agent to return a validated TicketAnalysis
agent = Agent(LLM_MODEL, output_type=TicketAnalysis)

result = agent.run_sync("""\
    Analyze this support ticket:
    'I've been charged twice for my Pro subscription this month.
    I contacted support 3 days ago and haven't heard back.
    If this isn't resolved by Friday I'm switching to a competitor.'""")
print(result.output)
# category='billing' priority='high' sentiment='negative' requires_escalation=True
# summary='Customer was double-charged for Pro subscription and hasn't received support.'
# suggested_tags=['billing', 'double-charge', 'escalation', 'churn-risk']

LangGraph

from typing import Literal
from langchain.agents import create_agent
from pydantic import BaseModel, Field
from langchain_openai import ChatOpenAI

LLM_MODEL = "gpt-5.4"

model = ChatOpenAI(model=LLM_MODEL)


class TicketAnalysis(BaseModel):
    category: Literal["billing", "technical", "account", "product"]
    priority: Literal["low", "medium", "high", "critical"]
    sentiment: Literal["positive", "neutral", "negative"]
    requires_escalation: bool
    summary: str = Field(description="One-sentence summary of the issue")
    suggested_tags: list[str] = Field(description="1-4 short labels for routing")


# response_format wires structured output through the agent API
agent = create_agent(model, tools=[], response_format=TicketAnalysis)

result = agent.invoke({
    "messages": [("user", """\
        Analyze this support ticket:
        'I've been charged twice for my Pro subscription this month.
        I contacted support 3 days ago and haven't heard back.
        If this isn't resolved by Friday I'm switching to a competitor.'""")]
})
print(result["structured_response"])
# category='billing' priority='high' sentiment='negative' requires_escalation=True
# summary='Customer was double-charged for Pro subscription and hasn't received support.'
# suggested_tags=['billing', 'double-charge', 'escalation', 'churn-risk']

AI SDK

import { generateText, Output } from "ai";
import { openai } from "@ai-sdk/openai";
import { z } from "zod";

const LLM_MODEL = "gpt-5.4";

const ticketAnalysis = z.object({
  category: z.enum(["billing", "technical", "account", "product"]),
  priority: z.enum(["low", "medium", "high", "critical"]),
  sentiment: z.enum(["positive", "neutral", "negative"]),
  requires_escalation: z.boolean(),
  summary: z.string().describe("One-sentence summary of the issue"),
  suggested_tags: z.array(z.string()).describe("1-4 short labels for routing"),
});

// Output.object() constrains the response to match the Zod schema
const { output } = await generateText({
  model: openai(LLM_MODEL),
  output: Output.object({ schema: ticketAnalysis }),
  prompt: `Analyze this support ticket:
  'I've been charged twice for my Pro subscription this month.
  I contacted support 3 days ago and haven't heard back.
  If this isn't resolved by Friday I'm switching to a competitor.'`,
});
console.log(output);
// { category: 'billing', priority: 'high', sentiment: 'negative',
//   requires_escalation: true, summary: 'Customer was double-charged...',
//   suggested_tags: ['billing', 'double-charge', 'escalation', 'churn-risk'] }

Mastra

import { Agent } from "@mastra/core/agent";
import { z } from "zod";

// model specified as "provider/model-name" string
const LLM_MODEL = "openai/gpt-5.4";

const ticketAnalysis = z.object({
  category: z.enum(["billing", "technical", "account", "product"]),
  priority: z.enum(["low", "medium", "high", "critical"]),
  sentiment: z.enum(["positive", "neutral", "negative"]),
  requires_escalation: z.boolean(),
  summary: z.string().describe("One-sentence summary of the issue"),
  suggested_tags: z.array(z.string()).describe("1-4 short labels for routing"),
});

const agent = new Agent({
  name: "ticket-analyzer",
  instructions: "You are a helpful assistant.",
  model: LLM_MODEL,
});

// structuredOutput constrains the response to match the Zod schema
const result = await agent.generate(
  `Analyze this support ticket:
  'I've been charged twice for my Pro subscription this month.
  I contacted support 3 days ago and haven't heard back.
  If this isn't resolved by Friday I'm switching to a competitor.'`,
  { structuredOutput: { schema: ticketAnalysis } },
);
console.log(result.object);
// { category: 'billing', priority: 'high', sentiment: 'negative',
//   requires_escalation: true, summary: 'Customer was double-charged...',
//   suggested_tags: ['billing', 'double-charge', 'escalation', 'churn-risk'] }

Tool as Output

OpenAI

import json
from openai import OpenAI

LLM_MODEL = "gpt-5.4"

client = OpenAI()

ARTICLE = """\
BrightHome Inc. of Austin, Texas, has recalled 142,000 SmartHeat Pro
space heaters due to fire and burn hazards. The company received 23
reports of overheating, including 4 fires and 2 minor burn injuries.
No deaths have been reported. The heaters were sold at HomeBase,
WarmthPlus, and Amazon.com from September 2023 through February 2024
for between $89 and $149."""

# instead of text_format, use a tool call to extract data —
# the LLM "calls" the function, and we capture the arguments
tools = [{
    "type": "function",
    "name": "extract_recall",
    "description": "Extract product recall information from an article.",
    "parameters": {
        "type": "object",
        "properties": {
            "product": {"type": "string"},
            "company": {"type": "string"},
            "units_affected": {"type": "integer"},
            "hazard": {"type": "string", "description": "Primary hazard"},
            "injuries_reported": {"type": "integer"},
            "fatalities": {"type": "boolean"},
            "retailers": {
                "type": "array",
                "items": {"type": "string"},
                "description": "Stores that sold the product",
            },
        },
        "required": [
            "product", "company", "units_affected", "hazard",
            "injuries_reported", "fatalities", "retailers",
        ],
    },
}]

response = client.responses.create(
    model=LLM_MODEL,
    tools=tools,
    input=[{
        "role": "user",
        "content": "Extract recall data from this article:\n\n" + ARTICLE,
    }],
)

tool_call = next(i for i in response.output if i.type == "function_call")
print(json.loads(tool_call.arguments))
# {'product': 'SmartHeat Pro', 'company': 'BrightHome Inc.', 'units_affected': 142000,
#  'hazard': 'fire and burn', 'injuries_reported': 2, 'fatalities': False,
#  'retailers': ['HomeBase', 'WarmthPlus', 'Amazon.com']}

Anthropic

import anthropic

LLM_MODEL = "claude-opus-4-6"

client = anthropic.Anthropic()

ARTICLE = """\
BrightHome Inc. of Austin, Texas, has recalled 142,000 SmartHeat Pro
space heaters due to fire and burn hazards. The company received 23
reports of overheating, including 4 fires and 2 minor burn injuries.
No deaths have been reported. The heaters were sold at HomeBase,
WarmthPlus, and Amazon.com from September 2023 through February 2024
for between $89 and $149."""

# instead of output_format, use a tool call to extract data —
# the LLM "calls" the function, and we capture the arguments
tools = [{
    "name": "extract_recall",
    "description": "Extract product recall information from an article.",
    "input_schema": {
        "type": "object",
        "properties": {
            "product": {"type": "string"},
            "company": {"type": "string"},
            "units_affected": {"type": "integer"},
            "hazard": {"type": "string", "description": "Primary hazard"},
            "injuries_reported": {"type": "integer"},
            "fatalities": {"type": "boolean"},
            "retailers": {
                "type": "array",
                "items": {"type": "string"},
                "description": "Stores that sold the product",
            },
        },
        "required": [
            "product", "company", "units_affected", "hazard",
            "injuries_reported", "fatalities", "retailers",
        ],
    },
}]

response = client.messages.create(
    model=LLM_MODEL,
    max_tokens=1024,
    tools=tools,
    messages=[{
        "role": "user",
        "content": "Extract recall data from this article:\n\n" + ARTICLE,
    }],
)

# block.input is already a dict — no json.loads needed
tool_block = next(b for b in response.content if b.type == "tool_use")
print(tool_block.input)
# {'product': 'SmartHeat Pro', 'company': 'BrightHome Inc.', 'units_affected': 142000,
#  'hazard': 'fire and burn', 'injuries_reported': 2, 'fatalities': False,
#  'retailers': ['HomeBase', 'WarmthPlus', 'Amazon.com']}

Gemini

from google import genai
from google.genai import types

LLM_MODEL = "gemini-pro-latest"

client = genai.Client()

ARTICLE = """\
BrightHome Inc. of Austin, Texas, has recalled 142,000 SmartHeat Pro
space heaters due to fire and burn hazards. The company received 23
reports of overheating, including 4 fires and 2 minor burn injuries.
No deaths have been reported. The heaters were sold at HomeBase,
WarmthPlus, and Amazon.com from September 2023 through February 2024
for between $89 and $149."""

# instead of response_schema, use a tool call to extract data —
# the LLM "calls" the function, and we capture the arguments
tools = [types.Tool(function_declarations=[{
    "name": "extract_recall",
    "description": "Extract product recall information from an article.",
    "parameters": {
        "type": "object",
        "properties": {
            "product": {"type": "string"},
            "company": {"type": "string"},
            "units_affected": {"type": "integer"},
            "hazard": {"type": "string", "description": "Primary hazard"},
            "injuries_reported": {"type": "integer"},
            "fatalities": {"type": "boolean"},
            "retailers": {
                "type": "array",
                "items": {"type": "string"},
                "description": "Stores that sold the product",
            },
        },
        "required": [
            "product", "company", "units_affected", "hazard",
            "injuries_reported", "fatalities", "retailers",
        ],
    },
}])]

config = types.GenerateContentConfig(tools=tools)

response = client.models.generate_content(
    model=LLM_MODEL,
    config=config,
    contents="Extract recall data from this article:\n\n" + ARTICLE,
)

tool_call = response.function_calls[0]
print(dict(tool_call.args))
# {'product': 'SmartHeat Pro', 'company': 'BrightHome Inc.', 'units_affected': 142000,
#  'hazard': 'fire and burn', 'injuries_reported': 2, 'fatalities': False,
#  'retailers': ['HomeBase', 'WarmthPlus', 'Amazon.com']}

Pydantic AI

from pydantic_ai import Agent

LLM_MODEL = "openai:gpt-5.4"

agent = Agent(LLM_MODEL)

ARTICLE = """\
BrightHome Inc. of Austin, Texas, has recalled 142,000 SmartHeat Pro
space heaters due to fire and burn hazards. The company received 23
reports of overheating, including 4 fires and 2 minor burn injuries.
No deaths have been reported. The heaters were sold at HomeBase,
WarmthPlus, and Amazon.com from September 2023 through February 2024
for between $89 and $149."""

# instead of output_type, use a tool call to extract data —
# the LLM "calls" the function, and we capture the arguments

@agent.tool_plain
def extract_recall(
    product: str, company: str, units_affected: int,
    hazard: str, injuries_reported: int, fatalities: bool,
    retailers: list[str],
) -> str:
    """Extract product recall information from an article."""
    print({
        "product": product, "company": company,
        "units_affected": units_affected, "hazard": hazard,
        "injuries_reported": injuries_reported, "fatalities": fatalities,
        "retailers": retailers,
    })
    return "Extracted."

result = agent.run_sync(
    "Extract recall data from this article:\n\n" + ARTICLE
)
# {'product': 'SmartHeat Pro', 'company': 'BrightHome Inc.', 'units_affected': 142000,
#  'hazard': 'fire and burn', 'injuries_reported': 2, 'fatalities': False,
#  'retailers': ['HomeBase', 'WarmthPlus', 'Amazon.com']}

LangGraph

from pydantic import BaseModel, Field
from langchain_openai import ChatOpenAI

LLM_MODEL = "gpt-5.4"

model = ChatOpenAI(model=LLM_MODEL)

ARTICLE = """\
BrightHome Inc. of Austin, Texas, has recalled 142,000 SmartHeat Pro
space heaters due to fire and burn hazards. The company received 23
reports of overheating, including 4 fires and 2 minor burn injuries.
No deaths have been reported. The heaters were sold at HomeBase,
WarmthPlus, and Amazon.com from September 2023 through February 2024
for between $89 and $149."""


# instead of with_structured_output(), use a tool call to extract data —
# the LLM "calls" the tool, and we capture the arguments
class RecallData(BaseModel):
    """Extract product recall information from an article."""
    product: str
    company: str
    units_affected: int
    hazard: str = Field(description="Primary hazard")
    injuries_reported: int
    fatalities: bool
    retailers: list[str] = Field(description="Stores that sold the product")


# tool-as-output needs the tool call arguments without executing the tool,
# so this example binds the schema on the chat model instead of using an agent
model_with_tools = model.bind_tools([RecallData])

response = model_with_tools.invoke(
    "Extract recall data from this article:\n\n" + ARTICLE
)
print(response.tool_calls[0]["args"])
# {'product': 'SmartHeat Pro', 'company': 'BrightHome Inc.', 'units_affected': 142000,
#  'hazard': 'fire and burn', 'injuries_reported': 2, 'fatalities': False,
#  'retailers': ['HomeBase', 'WarmthPlus', 'Amazon.com']}

AI SDK

import { generateText, tool } from "ai";
import { openai } from "@ai-sdk/openai";
import { z } from "zod";

const LLM_MODEL = "gpt-5.4";

const ARTICLE = `\
BrightHome Inc. of Austin, Texas, has recalled 142,000 SmartHeat Pro
space heaters due to fire and burn hazards. The company received 23
reports of overheating, including 4 fires and 2 minor burn injuries.
No deaths have been reported. The heaters were sold at HomeBase,
WarmthPlus, and Amazon.com from September 2023 through February 2024
for between $89 and $149.`;

// instead of Output.object(), use a tool call to extract data —
// the LLM "calls" the function, and we capture the arguments
const extractRecall = tool({
  description: "Extract product recall information from an article.",
  inputSchema: z.object({
    product: z.string(),
    company: z.string(),
    units_affected: z.number().int(),
    hazard: z.string().describe("Primary hazard"),
    injuries_reported: z.number().int(),
    fatalities: z.boolean(),
    retailers: z.array(z.string()).describe("Stores that sold the product"),
  }),
});

// toolChoice forces the model to call extractRecall
const { toolCalls } = await generateText({
  model: openai(LLM_MODEL),
  tools: { extractRecall },
  toolChoice: { type: "tool", toolName: "extractRecall" },
  prompt: "Extract recall data from this article:\n\n" + ARTICLE,
});
console.log(toolCalls[0].input);
// { product: 'SmartHeat Pro', company: 'BrightHome Inc.', units_affected: 142000,
//   hazard: 'fire and burn', injuries_reported: 2, fatalities: false,
//   retailers: ['HomeBase', 'WarmthPlus', 'Amazon.com'] }

Mastra

import { Agent } from "@mastra/core/agent";
import { createTool } from "@mastra/core/tools";
import { z } from "zod";

// model specified as "provider/model-name" string
const LLM_MODEL = "openai/gpt-5.4";

const ARTICLE = `\
BrightHome Inc. of Austin, Texas, has recalled 142,000 SmartHeat Pro
space heaters due to fire and burn hazards. The company received 23
reports of overheating, including 4 fires and 2 minor burn injuries.
No deaths have been reported. The heaters were sold at HomeBase,
WarmthPlus, and Amazon.com from September 2023 through February 2024
for between $89 and $149.`;

// instead of structuredOutput, use a tool call to extract data —
// the LLM "calls" the function, and we capture the arguments
const extractRecall = createTool({
  id: "extract-recall",
  description: "Extract product recall information from an article.",
  inputSchema: z.object({
    product: z.string(),
    company: z.string(),
    units_affected: z.number().int(),
    hazard: z.string().describe("Primary hazard"),
    injuries_reported: z.number().int(),
    fatalities: z.boolean(),
    retailers: z.array(z.string()).describe("Stores that sold the product"),
  }),
  execute: async (args) => {
    console.log(args);
    return "Extracted.";
  },
});

const agent = new Agent({
  name: "extractor",
  instructions: "You are a helpful assistant.",
  model: LLM_MODEL,
  tools: { extractRecall },
});

const result = await agent.generate(
  "Extract recall data from this article:\n\n" + ARTICLE,
);
// { product: 'SmartHeat Pro', company: 'BrightHome Inc.', units_affected: 142000,
//   hazard: 'fire and burn', injuries_reported: 2, fatalities: false,
//   retailers: ['HomeBase', 'WarmthPlus', 'Amazon.com'] }