Select frameworks to compare
Pick one or more frameworks from the bar above
Text Streaming
OpenAI
from openai import OpenAI
LLM_MODEL = "gpt-5.4"
client = OpenAI()
# stream=True returns typed server-sent events
stream = client.responses.create(
model=LLM_MODEL,
input="Explain what an API is in a few sentences.",
stream=True,
)
for event in stream:
if event.type == "response.output_text.delta":
print(event.delta, end="", flush=True)
print()
# An API (Application Programming Interface) is a set of rules and protocols
# that allows different software applications to communicate with each other...
Anthropic
import anthropic
LLM_MODEL = "claude-opus-4-6"
client = anthropic.Anthropic()
# .messages.stream() returns a context manager with convenience iterators
with client.messages.stream(
model=LLM_MODEL,
max_tokens=1024,
messages=[{"role": "user", "content": "Explain what an API is in a few sentences."}],
) as stream:
for text in stream.text_stream:
print(text, end="", flush=True)
print()
# An API (Application Programming Interface) is a set of rules and protocols
# that allows different software applications to communicate with each other...
Gemini
from google import genai
LLM_MODEL = "gemini-pro-latest"
client = genai.Client()
# generate_content_stream instead of generate_content — that's it
for chunk in client.models.generate_content_stream(
model=LLM_MODEL,
contents="Explain what an API is in a few sentences.",
):
print(chunk.text, end="", flush=True)
print()
# An API (Application Programming Interface) is a set of rules and protocols
# that allows different software applications to communicate with each other...
Pydantic AI
from pydantic_ai import Agent
LLM_MODEL = "openai:gpt-5.4"
agent = Agent(LLM_MODEL)
# stream_text(delta=True) yields each token as it arrives
result = agent.run_stream_sync(
"Explain what an API is in a few sentences.",
)
for text in result.stream_text(delta=True):
print(text, end="", flush=True)
print()
# An API (Application Programming Interface) is a set of rules and protocols
# that allows different software applications to communicate with each other...
LangGraph
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI
LLM_MODEL = "gpt-5.4"
model = ChatOpenAI(model=LLM_MODEL)
agent = create_agent(model, tools=[])
# stream_mode="messages" gives token-level deltas (not state updates)
for chunk, metadata in agent.stream(
{"messages": [("user", "Explain what an API is in a few sentences.")]},
stream_mode="messages",
):
if hasattr(chunk, "content") and chunk.content:
print(chunk.content, end="", flush=True)
print()
# An API (Application Programming Interface) is a set of rules and protocols
# that allows different software applications to communicate with each other...
AI SDK
import { streamText } from "ai";
import { openai } from "@ai-sdk/openai";
const LLM_MODEL = "gpt-5.4";
// streamText returns an object with async iterable stream properties
const result = streamText({
model: openai(LLM_MODEL),
prompt: "Explain what an API is in a few sentences.",
});
for await (const chunk of result.textStream) {
process.stdout.write(chunk);
}
console.log();
// An API (Application Programming Interface) is a set of rules and protocols
// that allows different software applications to communicate with each other...
Mastra
import { Agent } from "@mastra/core/agent";
// model specified as "provider/model-name" string
const LLM_MODEL = "openai/gpt-5.4";
const agent = new Agent({
name: "streaming-agent",
instructions: "You are a helpful assistant.",
model: LLM_MODEL,
});
// .stream() returns an object with typed stream properties
const stream = await agent.stream(
"Explain what an API is in a few sentences.",
);
for await (const chunk of stream.textStream) {
process.stdout.write(chunk);
}
console.log();
// An API (Application Programming Interface) is a set of rules and protocols
// that allows different software applications to communicate with each other...Stream Events
OpenAI
import json
from openai import OpenAI
from pydantic import BaseModel
LLM_MODEL = "gpt-5.4"
client = OpenAI()
# same multi-step tools: lookup_customer -> get_balance
CUSTOMERS = {"alice@example.com": "CUS_8f3a2b"}
BALANCES = {"CUS_8f3a2b": "$1,432.50"}
def lookup_customer(email: str) -> str:
"""Look up a customer by email and return their internal ID."""
return CUSTOMERS[email]
def get_balance(customer_id: str) -> str:
"""Get the account balance for a customer ID."""
return BALANCES[customer_id]
class LookupCustomerParams(BaseModel):
email: str
class GetBalanceParams(BaseModel):
customer_id: str
registry = {}
def to_tool(fn, params):
registry[fn.__name__] = fn
return {
"type": "function",
"name": fn.__name__,
"description": fn.__doc__,
"parameters": params.model_json_schema(),
}
tools = [
to_tool(lookup_customer, LookupCustomerParams),
to_tool(get_balance, GetBalanceParams),
]
input = [{"role": "user", "content": "What's the balance for alice@example.com?"}]
# stream the full agent lifecycle: tool calls, results, and final text
while True:
stream = client.responses.create(
model=LLM_MODEL, input=input, tools=tools, stream=True,
)
tool_calls = []
output_items = []
for event in stream:
# completed output item — collect for next round
if event.type == "response.output_item.done":
output_items.append(event.item)
if event.item.type == "function_call":
tc = event.item
print(f"-> call: {tc.name}({tc.arguments})")
result = registry[tc.name](**json.loads(tc.arguments))
print(f"-> result: {result}")
tool_calls.append((tc, result))
# text delta — stream to stdout
elif event.type == "response.output_text.delta":
print(event.delta, end="", flush=True)
if not tool_calls:
break
input += output_items
for tc, result in tool_calls:
input.append({
"type": "function_call_output",
"call_id": tc.call_id,
"output": result,
})
print()
# -> call: lookup_customer({"email":"alice@example.com"})
# -> result: CUS_8f3a2b
# -> call: get_balance({"customer_id":"CUS_8f3a2b"})
# -> result: $1,432.50
# The balance for alice@example.com is $1,432.50.
Anthropic
import json
import anthropic
LLM_MODEL = "claude-opus-4-6"
client = anthropic.Anthropic()
# same multi-step tools: lookup_customer -> get_balance
CUSTOMERS = {"alice@example.com": "CUS_8f3a2b"}
BALANCES = {"CUS_8f3a2b": "$1,432.50"}
def lookup_customer(email: str) -> str:
"""Look up a customer by email and return their internal ID."""
return CUSTOMERS[email]
def get_balance(customer_id: str) -> str:
"""Get the account balance for a customer ID."""
return BALANCES[customer_id]
registry = {"lookup_customer": lookup_customer, "get_balance": get_balance}
tools = [
{
"name": "lookup_customer",
"description": "Look up a customer by email and return their internal ID",
"input_schema": {
"type": "object",
"properties": {"email": {"type": "string"}},
"required": ["email"],
},
},
{
"name": "get_balance",
"description": "Get the account balance for a customer ID",
"input_schema": {
"type": "object",
"properties": {"customer_id": {"type": "string"}},
"required": ["customer_id"],
},
},
]
messages = [{"role": "user", "content": "What's the balance for alice@example.com?"}]
# stream the full agent lifecycle: tool calls, results, and final text
while True:
with client.messages.stream(
model=LLM_MODEL, max_tokens=1024, tools=tools, messages=messages,
) as stream:
for event in stream:
# completed tool_use block — print name and args
if event.type == "content_block_stop" and event.content_block.type == "tool_use":
tb = event.content_block
print(f"-> call: {tb.name}({json.dumps(tb.input)})")
# text delta — stream to stdout
elif event.type == "text":
print(event.text, end="", flush=True)
response = stream.get_final_message()
if response.stop_reason != "tool_use":
break
messages.append({"role": "assistant", "content": response.content})
tool_results = []
for block in response.content:
if block.type == "tool_use":
result = registry[block.name](**block.input)
print(f"-> result: {result}")
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
"content": result,
})
messages.append({"role": "user", "content": tool_results})
print()
# -> call: lookup_customer({"email": "alice@example.com"})
# -> result: CUS_8f3a2b
# -> call: get_balance({"customer_id": "CUS_8f3a2b"})
# -> result: $1,432.50
# The balance for alice@example.com is $1,432.50.
Gemini
import json
from google import genai
from google.genai import types
LLM_MODEL = "gemini-pro-latest"
client = genai.Client()
# same multi-step tools: lookup_customer -> get_balance
CUSTOMERS = {"alice@example.com": "CUS_8f3a2b"}
BALANCES = {"CUS_8f3a2b": "$1,432.50"}
def lookup_customer(email: str) -> str:
"""Look up a customer by email and return their internal ID."""
return CUSTOMERS[email]
def get_balance(customer_id: str) -> str:
"""Get the account balance for a customer ID."""
return BALANCES[customer_id]
registry = {"lookup_customer": lookup_customer, "get_balance": get_balance}
# disable automatic function calling to observe events manually
# (AFC would execute tools internally, hiding them from the stream)
config = types.GenerateContentConfig(
tools=[lookup_customer, get_balance],
automatic_function_calling=types.AutomaticFunctionCallingConfig(
disable=True,
),
)
contents = ["What's the balance for alice@example.com?"]
# stream the full agent lifecycle: tool calls, results, and final text
while True:
func_calls = []
model_parts = []
for chunk in client.models.generate_content_stream(
model=LLM_MODEL, config=config, contents=contents,
):
for part in (chunk.candidates[0].content.parts if chunk.candidates else []):
if part.function_call:
fc = part.function_call
print(f"-> call: {fc.name}({json.dumps(dict(fc.args))})")
func_calls.append(fc)
model_parts.append(part)
elif part.text:
print(part.text, end="", flush=True)
if not func_calls:
break
# preserve original parts (includes thought_signature required by API)
contents.append(types.Content(role="model", parts=model_parts))
tool_parts = []
for fc in func_calls:
result = registry[fc.name](**dict(fc.args))
print(f"-> result: {result}")
tool_parts.append(types.Part.from_function_response(
name=fc.name, response={"result": result},
))
contents.append(types.Content(role="tool", parts=tool_parts))
print()
# -> call: lookup_customer({"email": "alice@example.com"})
# -> result: CUS_8f3a2b
# -> call: get_balance({"customer_id": "CUS_8f3a2b"})
# -> result: $1,432.50
# The balance for alice@example.com is $1,432.50.
Pydantic AI
import asyncio
import json
from pydantic_ai import Agent
from pydantic_ai.messages import (
FunctionToolCallEvent,
FunctionToolResultEvent,
PartStartEvent,
PartDeltaEvent,
TextPart,
TextPartDelta,
)
LLM_MODEL = "openai:gpt-5.4"
agent = Agent(LLM_MODEL)
CUSTOMERS = {"alice@example.com": "CUS_8f3a2b"}
BALANCES = {"CUS_8f3a2b": "$1,432.50"}
@agent.tool_plain
def lookup_customer(email: str) -> str:
"""Look up a customer by email and return their internal ID."""
return CUSTOMERS[email]
@agent.tool_plain
def get_balance(customer_id: str) -> str:
"""Get the account balance for a customer ID."""
return BALANCES[customer_id]
# run_stream_events yields every lifecycle event — no manual loop needed
async def main():
async for event in agent.run_stream_events(
"What's the balance for alice@example.com?",
):
if isinstance(event, FunctionToolCallEvent):
args = json.dumps(event.part.args) if isinstance(event.part.args, dict) else event.part.args
print(f"-> call: {event.part.tool_name}({args})")
elif isinstance(event, FunctionToolResultEvent):
print(f"-> result: {event.result.content}")
elif isinstance(event, PartStartEvent) and isinstance(event.part, TextPart):
print(event.part.content, end="", flush=True)
elif isinstance(event, PartDeltaEvent) and isinstance(event.delta, TextPartDelta):
print(event.delta.content_delta, end="", flush=True)
print()
asyncio.run(main())
# -> call: lookup_customer({"email": "alice@example.com"})
# -> result: CUS_8f3a2b
# -> call: get_balance({"customer_id": "CUS_8f3a2b"})
# -> result: $1,432.50
# The balance for alice@example.com is $1,432.50.
LangGraph
from langchain.tools import tool
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI
from langchain_core.messages import AIMessageChunk, ToolMessage
LLM_MODEL = "gpt-5.4"
model = ChatOpenAI(model=LLM_MODEL)
CUSTOMERS = {"alice@example.com": "CUS_8f3a2b"}
BALANCES = {"CUS_8f3a2b": "$1,432.50"}
@tool
def lookup_customer(email: str) -> str:
"""Look up a customer by email and return their internal ID."""
return CUSTOMERS[email]
@tool
def get_balance(customer_id: str) -> str:
"""Get the account balance for a customer ID."""
return BALANCES[customer_id]
agent = create_agent(model, [lookup_customer, get_balance])
# stream_mode="messages" emits tool calls, tool results, and text deltas
tool_name = ""
tool_args = ""
for chunk, metadata in agent.stream(
{"messages": [("user", "What's the balance for alice@example.com?")]},
stream_mode="messages",
):
if isinstance(chunk, AIMessageChunk):
# tool call args stream incrementally — accumulate them
for tc in chunk.tool_call_chunks:
if tc["name"]:
tool_name = tc["name"]
tool_args = tc.get("args", "") or ""
else:
tool_args += tc.get("args", "") or ""
# text delta — stream to stdout
if chunk.content and metadata["langgraph_node"] == "model":
print(chunk.content, end="", flush=True)
elif isinstance(chunk, ToolMessage):
if tool_name:
print(f"-> call: {tool_name}({tool_args})")
tool_name = ""
tool_args = ""
print(f"-> result: {chunk.content}")
print()
# -> call: lookup_customer({"email": "alice@example.com"})
# -> result: CUS_8f3a2b
# -> call: get_balance({"customer_id": "CUS_8f3a2b"})
# -> result: $1,432.50
# The balance for alice@example.com is $1,432.50.
AI SDK
import { ToolLoopAgent, tool } from "ai";
import { openai } from "@ai-sdk/openai";
import { z } from "zod";
const LLM_MODEL = "gpt-5.4";
const CUSTOMERS: Record<string, string> = { "alice@example.com": "CUS_8f3a2b" };
const BALANCES: Record<string, string> = { CUS_8f3a2b: "$1,432.50" };
const lookupCustomer = tool({
description: "Look up a customer by email and return their internal ID",
inputSchema: z.object({ email: z.string() }),
execute: async ({ email }) => CUSTOMERS[email],
});
const getBalance = tool({
description: "Get the account balance for a customer ID",
inputSchema: z.object({ customerId: z.string() }),
execute: async ({ customerId }) => BALANCES[customerId],
});
const agent = new ToolLoopAgent({
model: openai(LLM_MODEL),
tools: { lookupCustomer, getBalance },
});
// agent.stream() is async — fullStream yields typed events
const result = await agent.stream({
prompt: "What's the balance for alice@example.com?",
});
for await (const chunk of result.fullStream) {
if (chunk.type === "tool-call") {
console.log(`-> call: ${chunk.toolName}(${JSON.stringify(chunk.input)})`);
} else if (chunk.type === "tool-result") {
console.log(`-> result: ${chunk.output}`);
} else if (chunk.type === "text-delta") {
process.stdout.write(chunk.text);
}
}
console.log();
// -> call: lookupCustomer({"email":"alice@example.com"})
// -> result: CUS_8f3a2b
// -> call: getBalance({"customerId":"CUS_8f3a2b"})
// -> result: $1,432.50
// The balance for alice@example.com is $1,432.50.
Mastra
import { Agent } from "@mastra/core/agent";
import { createTool } from "@mastra/core/tools";
import { z } from "zod";
// model specified as "provider/model-name" string
const LLM_MODEL = "openai/gpt-5.4";
const CUSTOMERS: Record<string, string> = { "alice@example.com": "CUS_8f3a2b" };
const BALANCES: Record<string, string> = { CUS_8f3a2b: "$1,432.50" };
const lookupCustomer = createTool({
id: "lookup-customer",
description: "Look up a customer by email and return their internal ID",
inputSchema: z.object({ email: z.string() }),
execute: async ({ email }) => CUSTOMERS[email],
});
const getBalance = createTool({
id: "get-balance",
description: "Get the account balance for a customer ID",
inputSchema: z.object({ customerId: z.string() }),
execute: async ({ customerId }) => BALANCES[customerId],
});
const agent = new Agent({
name: "stream-events-agent",
instructions: "You are a helpful assistant.",
model: LLM_MODEL,
tools: { lookupCustomer, getBalance },
});
// fullStream yields typed events for the entire agent lifecycle
const stream = await agent.stream(
"What's the balance for alice@example.com?",
{ maxSteps: 3 },
);
for await (const chunk of stream.fullStream) {
if (chunk.type === "tool-call") {
console.log(`-> call: ${chunk.payload.toolName}(${JSON.stringify(chunk.payload.args)})`);
} else if (chunk.type === "tool-result") {
console.log(`-> result: ${chunk.payload.result}`);
} else if (chunk.type === "text-delta") {
process.stdout.write(chunk.payload.text);
}
}
console.log();
// -> call: lookupCustomer({"email":"alice@example.com"})
// -> result: CUS_8f3a2b
// -> call: getBalance({"customerId":"CUS_8f3a2b"})
// -> result: $1,432.50
// The balance for alice@example.com is $1,432.50.