Framework Guides
LangChain Integration
Integrate Saf3AI with LangChain for automatic tracing and security.
LangChain Integration
Saf3AI provides deep integration with LangChain, automatically capturing all chain executions, LLM calls, and tool usage while providing real-time security scanning.
Installation
pip install saf3ai-sdk langchain langchain-openai
Quick Setup
Step 1: Initialize the SDK
import os
from dotenv import load_dotenv
from saf3ai_sdk import init
# Load environment variables
load_dotenv()
# Initialize SDK for LangChain
init(
service_name=os.getenv("SAF3AI_SERVICE_NAME", "langchain-agent"),
framework="langchain",
agent_id="my-langchain-agent",
api_key=os.getenv("SAF3AI_API_KEY"),
api_key_header_name=os.getenv("SAF3AI_API_KEY_HEADER", "X-API-Key"),
safeai_collector_agent=os.getenv("SAF3AI_COLLECTOR_AGENT"),
)
Step 2: Define Security Policy
def security_policy(text: str, scan_results: dict, text_type: str) -> bool:
"""
Return True to allow, False to block.
Args:
text: The text that was scanned
scan_results: Dict containing detection results
text_type: Either "prompt" or "response"
"""
detections = scan_results.get("detection_results", {})
return not any(
result.get("result") == "MATCH_FOUND"
for result in detections.values()
)
Step 3: Create Security Callback
import uuid
from saf3ai_sdk.langchain_callbacks import create_security_callback
# Generate or retrieve conversation ID for session tracking
conversation_id = str(uuid.uuid4())
# Create security callback
security_callback = create_security_callback(
api_endpoint=os.getenv("SAF3AI_API_ENDPOINT"),
on_scan_complete=security_policy,
scan_responses=True,
conversation_id=conversation_id,
)
Step 4: Use with LangChain
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
# Create LangChain chain with callback
chat = ChatOpenAI(
openai_api_key=os.getenv("OPENAI_API_KEY"),
callbacks=[security_callback],
)
chain = ConversationChain(llm=chat)
# Use chain with error handling
try:
response = chain.run("Hello, how are you?")
print(response)
except ValueError as e:
if "cannot assist" in str(e).lower():
print("Request blocked by security policy")
else:
raise
Complete Example
import os
import uuid
from dotenv import load_dotenv
from saf3ai_sdk import init
from saf3ai_sdk.langchain_callbacks import create_security_callback
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
# Step 1: Load environment
load_dotenv()
# Step 2: Initialize SDK
init(
service_name=os.getenv("SAF3AI_SERVICE_NAME", "langchain-agent"),
framework="langchain",
agent_id="my-langchain-agent",
api_key=os.getenv("SAF3AI_API_KEY"),
api_key_header_name=os.getenv("SAF3AI_API_KEY_HEADER", "X-API-Key"),
safeai_collector_agent=os.getenv("SAF3AI_COLLECTOR_AGENT"),
)
# Step 3: Define security policy
def security_policy(text: str, scan_results: dict, text_type: str) -> bool:
detections = scan_results.get("detection_results", {})
return not any(
result.get("result") == "MATCH_FOUND"
for result in detections.values()
)
# Step 4: Create security callback
conversation_id = str(uuid.uuid4())
security_callback = create_security_callback(
api_endpoint=os.getenv("SAF3AI_API_ENDPOINT"),
on_scan_complete=security_policy,
scan_responses=True,
conversation_id=conversation_id,
)
# Step 5: Create LangChain chain with callback
chat = ChatOpenAI(
openai_api_key=os.getenv("OPENAI_API_KEY"),
callbacks=[security_callback],
)
chain = ConversationChain(llm=chat)
# Step 6: Use chain with error handling
try:
response = chain.run("Hello, how are you?")
print(response)
except ValueError as e:
if "cannot assist" in str(e).lower():
print("Request blocked by security policy")
else:
raise
Agent Executor Integration
For LangChain agents with tools:
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain.tools import Tool
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
# Create chat model with security callback
chat = ChatOpenAI(
model="gpt-4",
callbacks=[security_callback],
)
# Define your tools
tools = [
Tool(
name="search",
func=lambda x: "Search results...",
description="Search the web for information"
)
]
# Create prompt
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant."),
("user", "{input}"),
("placeholder", "{agent_scratchpad}"),
])
# Create agent with security callback
agent = create_openai_tools_agent(
llm=chat,
tools=tools,
prompt=prompt,
)
executor = AgentExecutor(
agent=agent,
tools=tools,
callbacks=[security_callback],
verbose=True,
)
# Run agent
try:
response = executor.invoke({"input": "Search for AI news"})
print(response)
except ValueError as e:
if "cannot assist" in str(e).lower():
print("Request blocked by security policy")
else:
raise
LCEL (LangChain Expression Language)
Works seamlessly with LCEL chains:
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
prompt = ChatPromptTemplate.from_template("Explain {concept} simply")
model = ChatOpenAI(model="gpt-4")
parser = StrOutputParser()
chain = prompt | model | parser
# Pass callbacks at runtime
result = chain.invoke(
{"concept": "quantum computing"},
config={"callbacks": [security_callback]}
)
Security Policy Examples
Basic Policy (Block All Threats)
def basic_policy(text: str, scan_results: dict, text_type: str) -> bool:
"""Block any detected threats."""
detections = scan_results.get("detection_results", {})
return not any(
result.get("result") == "MATCH_FOUND"
for result in detections.values()
)
Selective Policy (Block Specific Threats)
def selective_policy(text: str, scan_results: dict, text_type: str) -> bool:
"""Block only specific threat types."""
detections = scan_results.get("detection_results", {})
blocked_types = {"CSAM", "Dangerous", "HateSpeech"}
for threat_type, result in detections.items():
if threat_type in blocked_types and result.get("result") == "MATCH_FOUND":
return False
return True
Logging Policy (Log but Don’t Block)
import logging
logger = logging.getLogger(__name__)
def logging_policy(text: str, scan_results: dict, text_type: str) -> bool:
"""Log all threats but allow them through."""
detections = scan_results.get("detection_results", {})
for threat_type, result in detections.items():
if result.get("result") == "MATCH_FOUND":
logger.warning(f"Threat detected: {threat_type} in {text_type}")
return True # Always allow
Automatic Tracing
With the callback handler, Saf3AI automatically captures:
- Chain executions: Start/end times, inputs, outputs
- LLM calls: Model, tokens, latency, messages
- Tool usage: Tool name, inputs, outputs
- Retriever operations: Query, documents retrieved
- Errors: Full exception details
Conversation Stitching
To track multi-turn conversations, pass the same conversation_id across interactions:
# Store conversation_id in session (Flask example)
@app.route('/chat', methods=['POST'])
def chat():
# Get or create conversation ID
conversation_id = session.get('conversation_id') or str(uuid.uuid4())
session['conversation_id'] = conversation_id
# Create callback with conversation ID
callback = create_security_callback(
api_endpoint=os.getenv("SAF3AI_API_ENDPOINT"),
conversation_id=conversation_id,
on_scan_complete=security_policy,
)
# ... use with LangChain
Troubleshooting
Callbacks not executing
- Ensure SDK is initialized before creating callbacks
- Verify callbacks are passed to both the LLM and chain/executor
- Check that
framework="langchain"is set ininit()
Conversation ID not stitching
- Pass
conversation_idtocreate_security_callback() - Use the same
conversation_idacross multiple calls
Requests not being blocked
- Verify your
on_scan_completefunction returnsFalsefor blocked content - Check that
scan_resultscontains the expecteddetection_resultsformat