typescript
import { createAgent } from "langchain";
import { tool } from "@langchain/core/tools";
import { z } from "zod";
const getWeather = tool(
async ({ location }) => `Weather in ${location}: Sunny, 72F`,
{
name: "get_weather",
description: "Get current weather for a location.",
schema: z.object({ location: z.string().describe("City name") }),
}
);
const agent = createAgent({
model: "anthropic:claude-sonnet-4-5",
tools: [getWeather],
systemPrompt: "You are a helpful assistant.",
});
const result = await agent.invoke({
messages: [{ role: "user", content: "What's the weather in Paris?" }],
});
console.log(result.messages[result.messages.length - 1].content);
checkpointer = MemorySaver()
agent = create_agent(
model="anthropic:claude-sonnet-4-5",
tools=[search],
checkpointer=checkpointer,
)
config = {"configurable": {"thread_id": "user-123"}}
agent.invoke({"messages": [{"role": "user", "content": "My name is Alice"}]}, config=config)
result = agent.invoke({"messages": [{"role": "user", "content": "What's my name?"}]}, config=config)
Agent remembers: "Your name is Alice"
</python>
<typescript>
Add MemorySaver checkpointer to maintain conversation state across invocations.typescript
import { createAgent } from "langchain";
import { MemorySaver } from "@langchain/langgraph";
const checkpointer = new MemorySaver();
const agent = createAgent({
model: "anthropic:claude-sonnet-4-5",
tools: [search],
checkpointer,
});
const config = { configurable: { thread_id: "user-123" } };
await agent.invoke({ messages: [{ role: "user", content: "My name is Alice" }] }, config);
const result = await agent.invoke({ messages: [{ role: "user", content: "What's my name?" }] }, config);
// Agent remembers: "Your name is Alice"
Tools are functions that agents can call. Use the
@tool
decorator (Python) or
tool()
function (TypeScript).
@tool
def calculate(expression: str) -> str:
"""Evaluate a mathematical expression.
Args:
expression: Math expression like "2 + 2" or "10 * 5"
"""
return str(eval(expression))
typescript
import { tool } from "@langchain/core/tools";
import { z } from "zod";
const calculate = tool(
async ({ expression }) => String(eval(expression)),
{
name: "calculate",
description: "Evaluate a mathematical expression.",
schema: z.object({
expression: z.string().describe("Math expression like '2 + 2' or '10 * 5'"),
}),
}
);
Middleware intercepts the agent loop to add human approval, error handling, logging, and more. A deep understanding of middleware is essential for production agents — use
HumanInTheLoopMiddleware
(Python) /
humanInTheLoopMiddleware
(TypeScript) for approval workflows, and
@wrap_tool_call
(Python) /
createMiddleware
(TypeScript) for custom hooks.
Key imports:
from
langchain
.
agents
.
middleware
import
HumanInTheLoopMiddleware
,
wrap_tool_call
import
{
humanInTheLoopMiddleware
,
createMiddleware
}
from
"langchain"
;
Key patterns:
HITL
:
middleware=[HumanInTheLoopMiddleware(interrupt_on={"dangerous_tool": True})]
— requires
checkpointer
+
thread_id
Resume after interrupt
:
agent.invoke(Command(resume={"decisions": [{"type": "approve"}]}), config=config)
Custom middleware
:
@wrap_tool_call
decorator (Python) or
createMiddleware({ wrapToolCall: ... })
(TypeScript)
<structured_output>
Structured Output
Get typed, validated responses from agents using
response_format
or
with_structured_output()
.
class ContactInfo(BaseModel):
name: str
email: str
phone: str = Field(description="Phone number with area code")
Option 1: Agent with structured output
agent = create_agent(model="gpt-4.1", tools=[search], response_format=ContactInfo)
result = agent.invoke({"messages": [{"role": "user", "content": "Find contact for John"}]})
print(result["structured_response"]) # ContactInfo(name='John', ...)
Option 2: Model-level structured output (no agent needed)
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-4.1")
structured_model = model.with_structured_output(ContactInfo)
response = structured_model.invoke("Extract: John,
john@example.com
, 555-1234")
ContactInfo(name='John', email='
john@example.com
', phone='555-1234')
</python>
<typescript>typescript
import { ChatOpenAI } from "@langchain/openai";
import { z } from "zod";
const ContactInfo = z.object({
name: z.string(),
email: z.string().email(),
phone: z.string().describe("Phone number with area code"),
});
// Model-level structured output
const model = new ChatOpenAI({ model: "gpt-4.1" });
const structuredModel = model.withStructuredOutput(ContactInfo);
const response = await structuredModel.invoke("Extract: John, john@example.com, 555-1234");
// { name: 'John', email: 'john@example.com', phone: '555-1234' }
typescript
// WRONG: Vague description
const badTool = tool(async ({ input }) => "result", {
name: "bad_tool",
description: "Does stuff.", // Too vague!
schema: z.object({ input: z.string() }),
});
// CORRECT: Clear, specific description
const search = tool(async ({ query }) => webSearch(query), {
name: "search",
description: "Search the web for current information about a topic. Use this when you need recent data or facts.",
schema: z.object({
query: z.string().describe("The search query (2-10 words recommended)"),
}),
});
CORRECT: Add checkpointer and thread_id
from langgraph.checkpoint.memory import MemorySaver
agent = create_agent(
model="anthropic:claude-sonnet-4-5",
tools=[search],
checkpointer=MemorySaver(),
)
config = {"configurable": {"thread_id": "session-1"}}
agent.invoke({"messages": [{"role": "user", "content": "I'm Bob"}]}, config=config)
agent.invoke({"messages": [{"role": "user", "content": "What's my name?"}]}, config=config)
Agent remembers: "Your name is Bob"
</python>
<typescript>
Add checkpointer and thread_id for conversation memory across invocations.typescript
// WRONG: No persistence
const agent = createAgent({ model: "anthropic:claude-sonnet-4-5", tools: [search] });
await agent.invoke({ messages: [{ role: "user", content: "I'm Bob" }] });
await agent.invoke({ messages: [{ role: "user", content: "What's my name?" }] });
// Agent doesn't remember!
// CORRECT: Add checkpointer and thread_id
import { MemorySaver } from "@langchain/langgraph";
const agent = createAgent({
model: "anthropic:claude-sonnet-4-5",
tools: [search],
checkpointer: new MemorySaver(),
});
const config = { configurable: { thread_id: "session-1" } };
await agent.invoke({ messages: [{ role: "user", content: "I'm Bob" }] }, config);
await agent.invoke({ messages: [{ role: "user", content: "What's my name?" }] }, config);
// Agent remembers: "Your name is Bob"
CORRECT: Set recursion_limit in config
result = agent.invoke(
{"messages": [("user", "Do research")]},
config={"recursion_limit": 10}, # Stop after 10 steps
)
typescript
// WRONG: No iteration limit
const result = await agent.invoke({ messages: [["user", "Do research"]] });
// CORRECT: Set recursionLimit in config
const result = await agent.invoke(
{ messages: [["user", "Do research"]] },
{ recursionLimit: 10 }, // Stop after 10 steps
);
CORRECT: Access messages from result dict
result = agent.invoke({"messages": [{"role": "user", "content": "Hello"}]})
print(result["messages"][-1].content) # Last message content
</python>
<typescript>
Access the messages array from the result, not result.content directly.typescript
// WRONG: Trying to access result.content directly
const result = await agent.invoke({ messages: [{ role: "user", content: "Hello" }] });
console.log(result.content); // undefined!
// CORRECT: Access messages from result object
const result = await agent.invoke({ messages: [{ role: "user", content: "Hello" }] });
console.log(result.messages[result.messages.length - 1].content); // Last message content
langchain-fundamentals
安装
npx skills add https://github.com/langchain-ai/langchain-skills --skill langchain-fundamentals