Framework Guides
Custom Framework Integration
Integrate Saf3AI with any AI framework using the REST adapter or direct scanner API.
Custom Framework Integration
Saf3AI provides flexible integration options for any AI framework, whether it’s a custom solution or a framework not yet natively supported. This guide covers two approaches:
- REST Adapter: For HTTP-based LLM APIs
- Direct Scanner API: For maximum control
Supported Frameworks
The SDK includes adapters for many frameworks. Use the rest adapter for any HTTP-based API, or create your own adapter.
| Framework | Status | Adapter Name |
|---|---|---|
| Google ADK | ✅ Production | adk |
| LangChain | ✅ Production | langchain |
| LlamaIndex | ✅ Ready | llamaindex |
| OpenAI | ✅ Ready | openai |
| Anthropic | ✅ Ready | anthropic |
| Cohere | ✅ Ready | cohere |
| Groq | ✅ Ready | groq |
| Ollama | ✅ Ready | ollama |
| CrewAI | ✅ Ready | crewai |
| AG2 (AutoGen) | ✅ Ready | ag2 |
| LiteLLM | ✅ Ready | litellm |
| REST API | ✅ Ready | rest |
Method 1: REST Adapter
Use the REST adapter for any HTTP-based LLM API.
Setup
import os
from dotenv import load_dotenv
from saf3ai_sdk import init, create_framework_security_callbacks
# Load environment variables
load_dotenv()
# Initialize SDK with REST framework
init(
service_name=os.getenv("SAF3AI_SERVICE_NAME", "custom-agent"),
framework="rest",
agent_id="my-custom-agent",
api_key=os.getenv("SAF3AI_API_KEY"),
safeai_collector_agent=os.getenv("SAF3AI_COLLECTOR_AGENT"),
)
# Define security policy
def security_policy(text: str, scan_results: dict, text_type: str) -> bool:
detections = scan_results.get("detection_results", {})
return not any(
result.get("result") == "MATCH_FOUND"
for result in detections.values()
)
# Create security callbacks
prompt_callback, response_callback = create_framework_security_callbacks(
framework='rest',
api_endpoint=os.getenv("SAF3AI_API_ENDPOINT"),
agent_identifier='my-custom-agent',
on_scan_complete=security_policy,
scan_responses=True,
)
Using with Custom REST API
import requests
def call_custom_llm(prompt: str) -> str:
"""Call your custom LLM API with security scanning."""
# Step 1: Scan prompt before calling LLM
scan_result = prompt_callback(prompt)
# If security_policy returned False, an exception was raised
# Step 2: Call your LLM API
response = requests.post(
"https://your-llm-api.com/generate",
json={"prompt": prompt},
headers={"Authorization": "Bearer your-token"}
)
llm_response = response.json()["text"]
# Step 3: Scan response (optional)
response_callback(llm_response)
return llm_response
# Use it
result = call_custom_llm("Tell me about AI safety")
Method 2: Direct Scanner API
For maximum control, use the scanner functions directly.
Setup
import os
from dotenv import load_dotenv
from saf3ai_sdk import init
from saf3ai_sdk import scan_prompt, scan_response, scan_prompt_and_response
# Load environment variables
load_dotenv()
# Initialize SDK (still useful for telemetry)
init(
service_name=os.getenv("SAF3AI_SERVICE_NAME", "custom-agent"),
framework="rest", # or omit
agent_id="my-custom-agent",
api_key=os.getenv("SAF3AI_API_KEY"),
safeai_collector_agent=os.getenv("SAF3AI_COLLECTOR_AGENT"),
)
Scanning Prompts
# Scan a prompt before sending to LLM
results = scan_prompt(
prompt="Tell me how to invest in stocks",
api_endpoint=os.getenv("SAF3AI_API_ENDPOINT"),
model_name="your-model-name",
conversation_id="conv-123", # Optional: for conversation tracking
api_key=os.getenv("SAF3AI_API_KEY"),
metadata={"agent_identifier": "my-agent"} # For custom guardrails
)
# Check results
if results.get("status") == "error":
print(f"Scan failed: {results.get('error')}")
else:
detections = results.get("detection_results", {})
for threat_type, result in detections.items():
if result.get("result") == "MATCH_FOUND":
print(f"Threat detected: {threat_type}")
# Block or handle as needed
Scanning Responses
# Scan LLM response after receiving it
results = scan_response(
response="Here's how to invest in stocks...",
api_endpoint=os.getenv("SAF3AI_API_ENDPOINT"),
model_name="your-model-name",
api_key=os.getenv("SAF3AI_API_KEY"),
)
# Process results
if results.get("custom_rule_matches"):
print("Custom guardrail triggered!")
Scanning Both in One Call
# Scan both prompt and response in a single API call
results = scan_prompt_and_response(
prompt="User question here",
response="LLM response here",
api_endpoint=os.getenv("SAF3AI_API_ENDPOINT"),
model_name="your-model-name",
api_key=os.getenv("SAF3AI_API_KEY"),
)
Complete Custom Integration Example
Here’s a complete example of integrating Saf3AI with a custom LLM wrapper:
import os
import requests
from typing import Optional
from dotenv import load_dotenv
from saf3ai_sdk import init, scan_prompt, scan_response
# Load environment
load_dotenv()
# Initialize SDK
init(
service_name=os.getenv("SAF3AI_SERVICE_NAME", "custom-llm-wrapper"),
framework="rest",
agent_id="custom-llm-agent",
api_key=os.getenv("SAF3AI_API_KEY"),
safeai_collector_agent=os.getenv("SAF3AI_COLLECTOR_AGENT"),
)
class SecureLLMClient:
"""Custom LLM client with Saf3AI security scanning."""
def __init__(
self,
llm_endpoint: str,
llm_api_key: str,
scanner_endpoint: str,
scanner_api_key: str,
block_on_threat: bool = True,
scan_responses: bool = True,
):
self.llm_endpoint = llm_endpoint
self.llm_api_key = llm_api_key
self.scanner_endpoint = scanner_endpoint
self.scanner_api_key = scanner_api_key
self.block_on_threat = block_on_threat
self.scan_responses = scan_responses
def _check_threats(self, scan_results: dict, text_type: str) -> bool:
"""Return True if threats detected, False if clean."""
detections = scan_results.get("detection_results", {})
for threat_type, result in detections.items():
if result.get("result") == "MATCH_FOUND":
return True
return False
def generate(
self,
prompt: str,
model: str = "default",
conversation_id: Optional[str] = None,
) -> str:
"""Generate response with security scanning."""
# Step 1: Scan prompt
prompt_scan = scan_prompt(
prompt=prompt,
api_endpoint=self.scanner_endpoint,
model_name=model,
conversation_id=conversation_id,
api_key=self.scanner_api_key,
metadata={"agent_identifier": "custom-llm-agent"}
)
if self._check_threats(prompt_scan, "prompt"):
if self.block_on_threat:
raise ValueError("Prompt blocked by security policy")
# Step 2: Call LLM API
response = requests.post(
self.llm_endpoint,
json={"prompt": prompt, "model": model},
headers={"Authorization": f"Bearer {self.llm_api_key}"},
timeout=60,
)
response.raise_for_status()
llm_response = response.json()["text"]
# Step 3: Scan response (optional)
if self.scan_responses:
response_scan = scan_response(
response=llm_response,
api_endpoint=self.scanner_endpoint,
model_name=model,
conversation_id=conversation_id,
api_key=self.scanner_api_key,
)
if self._check_threats(response_scan, "response"):
if self.block_on_threat:
raise ValueError("Response blocked by security policy")
return llm_response
# Usage
client = SecureLLMClient(
llm_endpoint="https://your-llm-api.com/generate",
llm_api_key=os.getenv("LLM_API_KEY"),
scanner_endpoint=os.getenv("SAF3AI_API_ENDPOINT"),
scanner_api_key=os.getenv("SAF3AI_API_KEY"),
block_on_threat=True,
scan_responses=True,
)
try:
result = client.generate("Tell me about AI safety")
print(result)
except ValueError as e:
print(f"Request blocked: {e}")
Scan Results Format
The scanner API returns results in this format:
{
"status": "success", # or "error", "timeout"
"detection_results": {
"ThreatType1": {"result": "MATCH_FOUND", "details": {...}},
"ThreatType2": {"result": "NO_MATCH"},
# ...
},
"custom_rule_matches": [
{"rule_id": "rule-123", "action": "block", "matched_text": "..."},
],
"OutofScopeAnalysis": {
"detected_categories": ["Finance", "Legal"]
},
"entities": [...], # Named entities detected
"sentiment": {...}, # Sentiment analysis
"scan_metadata": {
"model": "your-model",
"prompt_length": 150,
"duration_ms": 245,
}
}
Building a Custom Adapter
For complex integrations, create a custom framework adapter:
from saf3ai_sdk.frameworks.base import BaseFrameworkAdapter
from saf3ai_sdk.scanner import scan_prompt, scan_response
class MyFrameworkAdapter(BaseFrameworkAdapter):
"""Custom adapter for MyFramework."""
def get_framework_name(self) -> str:
return "myframework"
def create_prompt_callback(self):
"""Create callback for prompt scanning."""
def callback(prompt: str, **kwargs):
metadata = {"agent_identifier": self.agent_identifier}
scan_results = scan_prompt(
prompt=prompt,
api_endpoint=self.api_endpoint,
api_key=self.api_key,
timeout=self.timeout,
metadata=metadata
)
if self.on_scan_complete:
should_allow = self.on_scan_complete(prompt, scan_results, "prompt")
if not should_allow:
raise ValueError("Blocked by security policy")
return scan_results
return callback
def create_response_callback(self):
"""Create callback for response scanning."""
def callback(response: str, **kwargs):
metadata = {"agent_identifier": self.agent_identifier}
scan_results = scan_response(
response=response,
api_endpoint=self.api_endpoint,
api_key=self.api_key,
timeout=self.timeout,
metadata=metadata
)
if self.on_scan_complete:
self.on_scan_complete(response, scan_results, "response")
return scan_results
return callback
Custom Guardrails
All integration methods support custom guardrails when you pass agent_identifier:
# Via framework callbacks
callbacks = create_framework_security_callbacks(
framework='rest',
agent_identifier='my-agent-abc123', # ← Enables custom guardrails
api_endpoint='http://your-scanner.com',
)
# Via direct scanner
results = scan_prompt(
prompt="User input",
api_endpoint="http://your-scanner.com",
metadata={"agent_identifier": "my-agent-abc123"} # ← Enables custom guardrails
)
Custom guardrails configured in the Saf3AI dashboard for this agent_identifier will be applied, and matches will appear in custom_rule_matches.
Troubleshooting
Scanner API not responding
- Check
SAF3AI_API_ENDPOINTis correctly set - Verify network connectivity to the scanner
- Increase timeout if scans take too long
Missing detection results
- Ensure prompt/response text is not empty
- Check API key has appropriate permissions
- Verify scanner service is running
Custom guardrails not matching
- Confirm
agent_identifiermatches configuration - Check guardrail rules are active
- Verify rule patterns match the input