LangChain
Add Sekuire governance to LangChain agents. Every LLM call goes through policy enforcement, and tools are automatically filtered by policy.
Available in both TypeScript and Python.
What You Get
- Every
invoke()andainvoke()call is governed by yoursekuire.ymlpolicy - Blocked tools are filtered out at creation time and re-enforced at execution time (defense in depth)
- Full message history flows through to the LLM - not just the last message
- Model allowlists, rate limits, and token tracking on every call
Prerequisites
- TypeScript
- Python
- Node.js 18+
@sekuire/sdk,@langchain/core,langchain
- Python 3.10+
sekuire-sdk,langchain,langchain-core
Installation
- TypeScript
- Python
pnpm add @sekuire/sdk @langchain/core langchain zod
pip install sekuire-sdk langchain langchain-core
Configure Governance
Both languages use the same sekuire.yml:
project:
name: langchain-integration
version: 1.0.0
agent:
name: Research Assistant
system_prompt: ./system_prompt.md
tools: ./tools.json
llm:
provider: openai
model: gpt-4o-mini
api_key_env: OPENAI_API_KEY
temperature: 0.7
max_tokens: 1024
models:
allowed_models:
- gpt-4o-mini
- gpt-4o
blocked_models:
- gpt-3.5-turbo
toolsets:
allowed_tools:
- name: web_search
- name: "files:read"
- name: calculator
blocked_tools:
- file_delete
- env_set
permissions:
network:
enabled: true
require_tls: true
allowed_domains:
- api.openai.com
- "*.wikipedia.org"
blocked_domains:
- "*.malware.net"
filesystem:
enabled: true
allowed_paths:
- "./data/*"
- "/tmp/*"
blocked_paths:
- "/etc/*"
- "~/.ssh/*"
rate_limits:
per_agent:
requests_per_minute: 10
Create the ChatModel
The governed ChatModel extends LangChain's BaseChatModel. It loads the Sekuire provider and enforcer from config and routes every _generate() call through policy enforcement.
- TypeScript
- Python
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
import type { BaseMessage } from "@langchain/core/messages";
import { AIMessage, HumanMessage, SystemMessage, ToolMessage } from "@langchain/core/messages";
import type { ChatResult } from "@langchain/core/outputs";
import type {
LLMProvider,
ChatOptions,
LLMMessage as SekuireMessage,
LLMToolDefinition as ToolDefinition,
} from "@sekuire/sdk";
import { PolicyEnforcer, loadConfig, getAgentConfig, createLLMProvider } from "@sekuire/sdk";
function convertMessages(messages: BaseMessage[]): SekuireMessage[] {
return messages.map((msg) => {
if (msg instanceof SystemMessage) {
return { role: "system" as const, content: extractText(msg) };
}
if (msg instanceof HumanMessage) {
return { role: "user" as const, content: extractText(msg) };
}
if (msg instanceof AIMessage) {
const result: SekuireMessage = { role: "assistant" as const, content: extractText(msg) };
if (msg.tool_calls?.length) {
result.tool_calls = msg.tool_calls.map((tc) => ({
id: tc.id || crypto.randomUUID(),
type: "function" as const,
function: { name: tc.name, arguments: JSON.stringify(tc.args) },
}));
if (!result.content) result.content = null;
}
return result;
}
if (msg instanceof ToolMessage) {
return { role: "tool" as const, content: extractText(msg), tool_call_id: msg.tool_call_id };
}
return { role: "user" as const, content: extractText(msg) };
});
}
export class SekuireChatModel extends BaseChatModel {
private provider: LLMProvider;
private enforcer?: PolicyEnforcer;
private boundTools?: ToolDefinition[];
static async fromConfig(configPath?: string): Promise<SekuireChatModel> {
const config = await loadConfig(configPath);
const agentConfig = getAgentConfig(config);
const provider = await createLLMProvider(agentConfig.llm.provider, {
apiKey: process.env[agentConfig.llm.api_key_env] || "",
model: agentConfig.llm.model,
});
let enforcer: PolicyEnforcer | undefined;
const policyContent: Record<string, unknown> = {};
if (agentConfig.models) policyContent.agent = { models: agentConfig.models };
if (agentConfig.toolsets) {
policyContent.tools = {
allowed_tools: agentConfig.toolsets.allowed_tools,
blocked_tools: agentConfig.toolsets.blocked_tools,
};
}
if (config.rate_limits) policyContent.rate_limits = config.rate_limits;
if (Object.keys(policyContent).length > 0) {
enforcer = new PolicyEnforcer({
policy_id: "local", workspace_id: "local",
version: "1.0", status: "active", hash: "",
content: policyContent,
});
if (provider.setPolicyEnforcer) provider.setPolicyEnforcer(enforcer);
}
return new SekuireChatModel({ provider, enforcer });
}
async _generate(messages: BaseMessage[]): Promise<ChatResult> {
const sekuireMessages = convertMessages(messages);
const chatOptions: ChatOptions = {};
if (this.boundTools?.length) chatOptions.tools = this.boundTools;
const response = await this.provider.chat(sekuireMessages, chatOptions);
const aiMessage = response.tool_calls?.length
? new AIMessage({
content: response.content || "",
tool_calls: response.tool_calls.map((tc) => ({
name: tc.function.name,
args: JSON.parse(tc.function.arguments),
id: tc.id,
type: "tool_call" as const,
})),
})
: new AIMessage(response.content);
return {
generations: [{ text: response.content, message: aiMessage }],
llmOutput: { model: response.model, usage: response.usage },
};
}
_llmType(): string { return "sekuire"; }
getEnforcer(): PolicyEnforcer | undefined { return this.enforcer; }
}
The TypeScript version handles full tool call round-trips: AIMessage.tool_calls are converted to Sekuire's tool_calls format, and ToolMessage results carry the tool_call_id through.
import os
from typing import Any, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage, ToolMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from pydantic import Field
from sekuire_sdk.config import load_config, get_agent_config
from sekuire_sdk.new_llm import LLMProvider, Message, create_llm_provider
from sekuire_sdk.policy import ActivePolicy, PolicyEnforcer
def _convert_messages(messages: List[BaseMessage]) -> List[Message]:
result = []
for msg in messages:
if isinstance(msg, SystemMessage):
role = "system"
elif isinstance(msg, HumanMessage):
role = "user"
elif isinstance(msg, AIMessage):
role = "assistant"
elif isinstance(msg, ToolMessage):
role = "tool"
else:
role = "user"
text = msg.content if isinstance(msg.content, str) else str(msg.content)
result.append(Message(role=role, content=text))
return result
class SekuireChatModel(BaseChatModel):
provider: Any = Field(default=None, exclude=True)
enforcer: Any = Field(default=None, exclude=True)
model_name: str = Field(default="")
class Config:
arbitrary_types_allowed = True
@classmethod
def from_config(
cls,
config_path: str = "./sekuire.yml",
policy_path: str = "./policy.json",
) -> "SekuireChatModel":
config = load_config(config_path)
agent_config = get_agent_config(config)
api_key = os.environ.get(agent_config.llm.api_key_env, "")
provider = _run_sync(create_llm_provider(
provider=agent_config.llm.provider,
api_key=api_key,
model=agent_config.llm.model,
))
enforcer = _load_policy(policy_path) if policy_path else None
return cls(provider=provider, enforcer=enforcer, model_name=agent_config.llm.model)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs,
) -> ChatResult:
if self.enforcer:
self.enforcer.enforce_model(self.model_name)
self.enforcer.enforce_rate_limit("request")
sekuire_messages = _convert_messages(messages)
response = _run_sync(self.provider.chat(sekuire_messages))
if response.usage and self.enforcer:
self.enforcer.enforce_rate_limit("token", response.usage.total_tokens)
generation = ChatGeneration(
message=AIMessage(content=response.content),
generation_info={"usage": response.usage} if response.usage else {},
)
return ChatResult(generations=[generation])
@property
def _llm_type(self) -> str:
return "sekuire"
Both versions use a fromConfig() / from_config() factory that loads the provider and enforcer directly from sekuire.yml. You do not need to construct them manually.
Wrap Tools with Governance
Sekuire tools are wrapped as LangChain tools with policy enforcement at both creation time (filtering) and execution time (defense in depth).
- TypeScript
- Python
import { DynamicStructuredTool } from "@langchain/core/tools";
import { z } from "zod";
import type { PolicyEnforcer, Tool } from "@sekuire/sdk";
import { createDefaultToolRegistry } from "@sekuire/sdk";
export function wrapSekuireTools(options?: {
enforcer?: PolicyEnforcer;
filter?: string[];
}): DynamicStructuredTool[] {
const registry = createDefaultToolRegistry();
const enforcer = options?.enforcer;
let tools: Tool[] = registry.list();
if (options?.filter) {
tools = tools.filter((t) => options.filter!.includes(t.metadata.name));
}
return tools
.filter((tool) => {
if (!enforcer) return true;
try {
enforcer.enforceTool(tool.metadata.name);
return true;
} catch {
return false;
}
})
.map((tool) => {
const schema = tool.toSchema();
const zodSchema = buildZodSchema(
schema.parameters?.properties || {},
schema.parameters?.required || []
);
return new DynamicStructuredTool({
name: tool.metadata.name,
description: tool.metadata.description,
schema: zodSchema,
func: async (args) => {
if (enforcer) enforcer.enforceTool(tool.metadata.name);
const result = await tool.execute(args);
return typeof result === "string" ? result : JSON.stringify(result);
},
});
});
}
from typing import List, Optional
from langchain_core.tools import BaseTool
from pydantic import Field
from sekuire_sdk import create_default_tool_registry
from sekuire_sdk.policy import PolicyEnforcer, PolicyViolation
class SekuireTool(BaseTool):
name: str = ""
description: str = ""
tool_impl: Any = Field(default=None, exclude=True)
enforcer: Any = Field(default=None, exclude=True)
def _run(self, **kwargs) -> str:
if self.enforcer:
self.enforcer.enforce_tool(self.name)
result = _run_sync(self.tool_impl.execute(kwargs))
return str(result)
def create_governed_tools(
enforcer: Optional[PolicyEnforcer] = None,
) -> List[SekuireTool]:
registry = create_default_tool_registry()
tools = []
for tool in registry.list():
if enforcer:
try:
enforcer.enforce_tool(tool.metadata.name)
except PolicyViolation:
continue
tools.append(SekuireTool(
name=tool.metadata.name,
description=tool.metadata.description,
tool_impl=tool,
enforcer=enforcer,
))
return tools
Usage
- TypeScript
- Python
Single message
import { HumanMessage } from "@langchain/core/messages";
import { SekuireChatModel } from "./sekuire-chat-model";
const model = await SekuireChatModel.fromConfig();
const result = await model.invoke([
new HumanMessage("What is the capital of France?"),
]);
console.log(result.content);
Multi-turn conversation
import { HumanMessage, SystemMessage, AIMessage } from "@langchain/core/messages";
const result = await model.invoke([
new SystemMessage("You are a geography expert. Be concise."),
new HumanMessage("What is the capital of France?"),
new AIMessage("The capital of France is Paris."),
new HumanMessage("And what is its population?"),
]);
console.log(result.content);
Tool call round-trip
import { AIMessage, HumanMessage, SystemMessage, ToolMessage } from "@langchain/core/messages";
const result = await model.invoke([
new SystemMessage("You are a helpful assistant with tools."),
new HumanMessage("What is 2 + 2?"),
new AIMessage({
content: "",
tool_calls: [{ id: "call_1", name: "calculator", args: { expression: "2 + 2" } }],
}),
new ToolMessage({ content: "4", tool_call_id: "call_1" }),
new HumanMessage("Now multiply that by 10."),
]);
console.log(result.content);
Single message
from langchain_core.messages import HumanMessage
from sekuire_chat_model import SekuireChatModel
model = SekuireChatModel.from_config()
result = await model.ainvoke([
HumanMessage(content="What is the capital of France?"),
])
print(result.content)
Multi-turn conversation
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
result = await model.ainvoke([
SystemMessage(content="You are a geography expert. Be concise."),
HumanMessage(content="What is the capital of France?"),
AIMessage(content="The capital of France is Paris."),
HumanMessage(content="And what is its population?"),
])
print(result.content)
Using governed tools
from sekuire_tools import create_governed_tools
tools = create_governed_tools(enforcer=model.enforcer)
print(f"Allowed tools: {[t.name for t in tools]}")
Policy Enforcement
Blocked tool
const enforcer = model.getEnforcer();
try {
enforcer.enforceTool("file_delete");
} catch (err) {
// PolicyViolationError: Tool file_delete is blocked by policy
}
Blocked model
try {
enforcer.enforceModel("gpt-3.5-turbo");
} catch (err) {
// PolicyViolationError: Model gpt-3.5-turbo is not allowed
}
Allowed tools pass through:
enforcer.enforceTool("web_search"); // No error - tool is in allowed list
Known Limitations
- Python: The Sekuire
Messagedataclass hasroleandcontentfields only. Tool call metadata (tool_callsonAIMessage,tool_call_idonToolMessage) is dropped during conversion. Full tool call round-trips require the TypeScript SDK. - TypeScript: Handles full tool call metadata through the conversion layer.
Next Steps
- CrewAI Integration - Multi-agent governance with CrewAI
- Policy Enforcement - Full
PolicyEnforcerAPI reference - TypeScript example source
- Python example source