Select frameworks to compare
Pick one or more frameworks from the bar above
Setup
OpenAI
# install the OpenAI Python SDK
uv add openai
# use python-dotenv or set directly
export OPENAI_API_KEY="sk-..."
Anthropic
# install the Anthropic Python SDK
uv add anthropic
# use python-dotenv or set directly
export ANTHROPIC_API_KEY="sk-ant-..."
Gemini
# install the Google Gen AI SDK
uv add google-genai
# use python-dotenv or set directly
export GEMINI_API_KEY="AIza..."
Pydantic AI
# install Pydantic AI with OpenAI support
uv add pydantic-ai[openai]
# python-dotenv is included — load .env with: from dotenv import load_dotenv; load_dotenv()
# alternative: set your API key directly in the shell
export OPENAI_API_KEY="sk-..."
LangGraph
# install langchain + langgraph + the OpenAI chat model
uv add langchain langgraph langchain-openai
# no built-in .env loading — use python-dotenv or set directly
export OPENAI_API_KEY="sk-..."
AI SDK
npm install ai @ai-sdk/openai zod tsx
export OPENAI_API_KEY="sk-..."
# run a script
npx tsx agent.ts
Mastra
# install mastra
bun add @mastra/core
# .env is auto-loaded when using Mastra CLI (mastra dev)
# alternative: set your API key directly in the shell
export OPENAI_API_KEY="sk-..."Hello World
OpenAI
from openai import OpenAI
LLM_MODEL = "gpt-5.4"
client = OpenAI()
response = client.responses.create(
model=LLM_MODEL,
input="What is the capital of France?",
)
print(response.output_text)
# "Paris."
Anthropic
import anthropic
LLM_MODEL = "claude-opus-4-6"
client = anthropic.Anthropic()
response = client.messages.create(
model=LLM_MODEL,
max_tokens=1024,
messages=[{"role": "user", "content": "What is the capital of France?"}],
)
print(response.content[0].text)
# "Paris."
Gemini
from google import genai
LLM_MODEL = "gemini-pro-latest"
client = genai.Client()
response = client.models.generate_content(
model=LLM_MODEL,
contents="What is the capital of France?",
)
print(response.text)
# "Paris."
Pydantic AI
from pydantic_ai import Agent
# model specified as "provider:model" string
LLM_MODEL = "openai:gpt-5.4"
agent = Agent(LLM_MODEL)
result = agent.run_sync("What is the capital of France?")
print(result.output)
# "Paris."
LangGraph
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI
LLM_MODEL = "gpt-5.4"
model = ChatOpenAI(model=LLM_MODEL)
# create_agent builds a graph that loops: LLM → tools → LLM
# with no tools, it's a simple prompt → response
agent = create_agent(model, tools=[])
result = agent.invoke({
"messages": [("user", "What is the capital of France?")]
})
print(result["messages"][-1].content)
# "Paris."
AI SDK
import { generateText } from "ai";
import { openai } from "@ai-sdk/openai";
const LLM_MODEL = "gpt-5.4";
const { text } = await generateText({
model: openai(LLM_MODEL),
prompt: "What is the capital of France?",
});
console.log(text);
// "Paris."
Mastra
import { Agent } from "@mastra/core/agent";
// model specified as "provider/model-name" string
const LLM_MODEL = "openai/gpt-5.4";
const agent = new Agent({
name: "hello-world",
instructions: "You are a helpful assistant.",
model: LLM_MODEL,
});
const result = await agent.generate("What is the capital of France?");
console.log(result.text);
// "Paris."Instructions
OpenAI
from openai import OpenAI
LLM_MODEL = "gpt-5.4"
client = OpenAI()
# instructions is sent as a system message before the input
response = client.responses.create(
model=LLM_MODEL,
instructions="You are a pirate. Always respond in pirate speak.",
input="What is the capital of France?",
)
print(response.output_text)
# "Arrr, Paris be the capital, matey!"
Anthropic
import anthropic
LLM_MODEL = "claude-opus-4-6"
client = anthropic.Anthropic()
# system prompt is a top-level parameter, not a message
response = client.messages.create(
model=LLM_MODEL,
max_tokens=1024,
system="You are a pirate. Always respond in pirate speak.",
messages=[{"role": "user", "content": "What is the capital of France?"}],
)
print(response.content[0].text)
# "Arrr, Paris be the capital, matey!"
Gemini
from google import genai
from google.genai import types
LLM_MODEL = "gemini-pro-latest"
client = genai.Client()
# system_instruction is passed via GenerateContentConfig
response = client.models.generate_content(
model=LLM_MODEL,
config=types.GenerateContentConfig(
system_instruction="You are a pirate. Always respond in pirate speak.",
),
contents="What is the capital of France?",
)
print(response.text)
# "Arrr, Paris be the capital, matey!"
Pydantic AI
from pydantic_ai import Agent
LLM_MODEL = "openai:gpt-5.4"
# instructions is the preferred prompt surface for most Pydantic AI agents
agent = Agent(
LLM_MODEL,
instructions="You are a pirate. Always respond in pirate speak.",
)
result = agent.run_sync("What is the capital of France?")
print(result.output)
# "Arrr, Paris be the capital, matey!"
LangGraph
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI
LLM_MODEL = "gpt-5.4"
model = ChatOpenAI(model=LLM_MODEL)
# system_prompt is prepended to every conversation automatically
agent = create_agent(
model,
tools=[],
system_prompt="You are a pirate. Always respond in pirate speak.",
)
result = agent.invoke({
"messages": [("user", "What is the capital of France?")]
})
print(result["messages"][-1].content)
# "Arrr, Paris be the capital, matey!"
AI SDK
import { generateText } from "ai";
import { openai } from "@ai-sdk/openai";
const LLM_MODEL = "gpt-5.4";
// system is sent as a system message before the user prompt
const { text } = await generateText({
model: openai(LLM_MODEL),
system: "You are a pirate. Always respond in pirate speak.",
prompt: "What is the capital of France?",
});
console.log(text);
// "Arrr, Paris be the capital, matey!"
Mastra
import { Agent } from "@mastra/core/agent";
// model specified as "provider/model-name" string
const LLM_MODEL = "openai/gpt-5.4";
// instructions is sent as a system message before every user message
const agent = new Agent({
name: "pirate-agent",
instructions: "You are a pirate. Always respond in pirate speak.",
model: LLM_MODEL,
});
const result = await agent.generate("What is the capital of France?");
console.log(result.text);
// "Arrr, Paris be the capital, matey!"Message History
OpenAI
from openai import OpenAI
LLM_MODEL = "gpt-5.4"
client = OpenAI()
# turn 1
messages = [{"role": "user", "content": "What is the capital of France?"}]
response = client.responses.create(model=LLM_MODEL, input=messages)
print(response.output_text)
# "The capital of France is Paris."
# without history, the model can't resolve "its"
no_context = client.responses.create(model=LLM_MODEL, input="What is its population?")
print(no_context.output_text)
# "Could you clarify what 'its' refers to?"
# the API is stateless — pass the full conversation each call
messages += response.output
messages.append({"role": "user", "content": "What is its population?"})
response = client.responses.create(model=LLM_MODEL, input=messages)
print(response.output_text)
# "The population of Paris is approximately 2.1 million..."
Anthropic
import anthropic
LLM_MODEL = "claude-opus-4-6"
client = anthropic.Anthropic()
# turn 1
messages = [{"role": "user", "content": "What is the capital of France?"}]
response = client.messages.create(model=LLM_MODEL, max_tokens=1024, messages=messages)
print(response.content[0].text)
# "The capital of France is Paris."
# without history, the model can't resolve "its"
no_context = client.messages.create(
model=LLM_MODEL, max_tokens=1024,
messages=[{"role": "user", "content": "What is its population?"}],
)
print(no_context.content[0].text)
# "Could you clarify what 'its' refers to?"
# the API is stateless — pass the full conversation each call
messages.append({"role": "assistant", "content": response.content})
messages.append({"role": "user", "content": "What is its population?"})
response = client.messages.create(model=LLM_MODEL, max_tokens=1024, messages=messages)
print(response.content[0].text)
# "The population of Paris is approximately 2.1 million..."
Gemini
from google import genai
from google.genai import types
LLM_MODEL = "gemini-pro-latest"
client = genai.Client()
# turn 1
contents = [types.Content(role="user", parts=[types.Part.from_text(text="What is the capital of France?")])]
response = client.models.generate_content(model=LLM_MODEL, contents=contents)
print(response.text)
# "The capital of France is Paris."
# without history, the model can't resolve "its"
no_context = client.models.generate_content(model=LLM_MODEL, contents="What is its population?")
print(no_context.text)
# "Could you clarify what 'its' refers to?"
# the API is stateless — pass the full conversation each call
contents.append(response.candidates[0].content)
contents.append(types.Content(role="user", parts=[types.Part.from_text(text="What is its population?")]))
response = client.models.generate_content(model=LLM_MODEL, contents=contents)
print(response.text)
# "The population of Paris is approximately 2.1 million..."
Pydantic AI
from pydantic_ai import Agent
LLM_MODEL = "openai:gpt-5.4"
agent = Agent(LLM_MODEL)
# turn 1
result = agent.run_sync("What is the capital of France?")
print(result.output)
# "The capital of France is Paris."
# without history, the model can't resolve "its"
no_context = agent.run_sync("What is its population?")
print(no_context.output)
# "Could you clarify what 'its' refers to?"
# new_messages() carries the full exchange so "its" resolves to France
result = agent.run_sync("What is its population?", message_history=result.new_messages())
print(result.output)
# "The population of Paris is approximately 2.1 million..."
LangGraph
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI
LLM_MODEL = "gpt-5.4"
model = ChatOpenAI(model=LLM_MODEL)
agent = create_agent(model, tools=[])
# turn 1
result = agent.invoke({"messages": [("user", "What is the capital of France?")]})
print(result["messages"][-1].content)
# "The capital of France is Paris."
# without history, the model can't resolve "its"
no_context = agent.invoke({"messages": [("user", "What is its population?")]})
print(no_context["messages"][-1].content)
# "Could you clarify what 'its' refers to?"
# the agent is stateless — pass the full conversation each call
result = agent.invoke({"messages": result["messages"] + [("user", "What is its population?")]})
print(result["messages"][-1].content)
# "The population of Paris is approximately 2.1 million..."
AI SDK
import { generateText, type ModelMessage } from "ai";
import { openai } from "@ai-sdk/openai";
const LLM_MODEL = "gpt-5.4";
// turn 1
const messages: ModelMessage[] = [
{ role: "user", content: "What is the capital of France?" },
];
const result1 = await generateText({ model: openai(LLM_MODEL), messages });
console.log(result1.text);
// "The capital of France is Paris."
// without history, the model can't resolve "its"
const noContext = await generateText({
model: openai(LLM_MODEL),
prompt: "What is its population?",
});
console.log(noContext.text);
// "Could you clarify what 'its' refers to?"
// the API is stateless — pass the full conversation each call
messages.push({ role: "assistant", content: result1.text });
messages.push({ role: "user", content: "What is its population?" });
const result2 = await generateText({ model: openai(LLM_MODEL), messages });
console.log(result2.text);
// "The population of Paris is approximately 2.1 million..."
Mastra
import { Agent } from "@mastra/core/agent";
const LLM_MODEL = "openai/gpt-5.4";
const agent = new Agent({
name: "assistant",
instructions: "You are a helpful assistant.",
model: LLM_MODEL,
});
// turn 1
const messages: { role: "user" | "assistant"; content: string }[] = [
{ role: "user", content: "What is the capital of France?" },
];
const result1 = await agent.generate(messages);
console.log(result1.text);
// "The capital of France is Paris."
// without history, the model can't resolve "its"
const noContext = await agent.generate("What is its population?");
console.log(noContext.text);
// "Could you clarify what 'its' refers to?"
// the API is stateless — pass the full conversation each call
messages.push({ role: "assistant", content: result1.text });
messages.push({ role: "user", content: "What is its population?" });
const result2 = await agent.generate(messages);
console.log(result2.text);
// "The population of Paris is approximately 2.1 million..."