Comprehensive SDK Guide
A complete guide to the Cognitora SDKs for Python and JavaScript/TypeScript.
Cognitora provides powerful SDKs for Python and JavaScript/TypeScript to interact with the Cognitora API. This guide covers installation, configuration, and usage for both SDKs.
Python SDK
The Python SDK for Cognitora provides a user-friendly interface to the Cognitora API, with support for both synchronous and asynchronous operations.
Installation
pip install cognitora
Authentication
You can authenticate by passing your API key directly to the client or by using environment variables.
from cognitora import Cognitora
import os
# Method 1: Direct API key
client = Cognitora(
api_key="your_api_key_here",
base_url="https://api.cognitora.dev"
)
# Method 2: Environment variable
# set os.environ['COGNITORA_API_KEY'] = "your_api_key_here"
client = Cognitora(base_url="https://api.cognitora.dev")
Code Interpreter
The Code Interpreter API allows you to execute code in a sandboxed environment.
Basic Execution
result = client.code_interpreter.execute(
code="print('Hello from Cognitora!')",
language="python"
)
print(f"Status: {result.data.status}")
for output in result.data.outputs:
print(f"{output.type}: {output.data}")
Sessions
For stateful executions, you can use sessions.
# Create a session
session = client.code_interpreter.create_session(
language="python",
timeout_minutes=30
)
# Execute code in the session
client.code_interpreter.execute(
code="x = 10",
session_id=session.session_id
)
result = client.code_interpreter.execute(
code="print(x)",
session_id=session.session_id
)
# The output will be "10"
print(result.data.outputs[0].data)
Compute
The Compute API allows you to run containerized workloads.
execution = client.compute.create_execution(
image="docker.io/library/python:3.11-slim",
command=["python", "-c", "print('Hello from a container!')"],
cpu_cores=1.0,
memory_mb=512,
max_cost_credits=10,
storage_gb=5
)
print(f"Execution ID: {execution.id}")
print(f"Status: {execution.status}")
JavaScript/TypeScript SDK
The JavaScript/TypeScript SDK provides a modern, type-safe interface to the Cognitora API.
Installation
npm install @cognitora/sdk
Authentication
You can authenticate by passing your API key directly to the client or by using environment variables.
import { Cognitora } from '@cognitora/sdk';
// Method 1: Direct API key
const client = new Cognitora({
apiKey: 'your_api_key_here',
baseURL: 'https://api.cognitora.dev'
});
// Method 2: Environment variable
const client = new Cognitora({
apiKey: process.env.COGNITORA_API_KEY!,
baseURL: 'https://api.cognitora.dev'
});
Code Interpreter
Basic Execution
const result = await client.codeInterpreter.execute({
code: "console.log('Hello from Cognitora!')",
language: 'javascript'
});
console.log(`Status: ${result.data.status}`);
result.data.outputs.forEach(output => {
console.log(`${output.type}: ${output.data}`);
});
Sessions
// Create a session
const session = await client.codeInterpreter.createSession({
language: 'javascript',
timeout_minutes: 30
});
// Execute code in the session
await client.codeInterpreter.execute({
code: "const x = 10;",
session_id: session.data.session_id
});
const result = await client.codeInterpreter.execute({
code: "console.log(x);",
session_id: session.data.session_id
});
// The output will be "10"
console.log(result.data.outputs[0].data);
Compute
const execution = await client.compute.createExecution({
image: 'docker.io/library/node:18-alpine',
command: ['node', '-e', "console.log('Hello from a container!')"],
cpu_cores: 1.0,
memory_mb: 512,
max_cost_credits: 10,
storage_gb: 5
});
console.log(`Execution ID: ${execution.id}`);
console.log(`Status: ${execution.status}`);
Framework Integrations
LangChain Integration
from cognitora.integrations.langchain import CognitoraTool
from langchain.agents import initialize_agent
from langchain.llms import OpenAI
# Create Cognitora tool for LangChain
cognitora_tool = CognitoraTool(
api_key="your_api_key",
default_config={
"image": "python:3.11",
"resource_limits": {
"cpu_cores": 2,
"memory_gb": 4
}
}
)
# Initialize LangChain agent with Cognitora tool
llm = OpenAI(temperature=0)
tools = [cognitora_tool]
agent = initialize_agent(
tools,
llm,
agent="zero-shot-react-description",
verbose=True
)
# Agent can now execute code securely
response = agent.run("""
Write and execute a Python script that:
1. Downloads the latest stock prices for AAPL
2. Calculates the 50-day moving average
3. Creates a visualization and saves it as a PNG
""")
print(response)
AutoGPT Integration
from cognitora.integrations.autogpt import CognitoraExecutor
from autogpt.config import Config
from autogpt.main import run_auto_gpt
# Configure AutoGPT with Cognitora executor
config = Config()
config.cognitora_api_key = "your_api_key"
# Custom executor for secure code execution
executor = CognitoraExecutor(
config=config,
execution_config={
"image": "autogpt/runtime:latest",
"resource_limits": {
"cpu_cores": 4,
"memory_gb": 8,
"storage_gb": 20
},
"timeout_seconds": 3600,
"networking": {
"internet_access": True,
"a2a_enabled": True
}
}
)
# Run AutoGPT with secure execution
await run_auto_gpt(
config=config,
executor=executor,
goal="Analyze market data and generate investment recommendations"
)
CrewAI Integration
from crewai import Agent, Task, Crew
from cognitora.integrations.crewai import CognitoraTaskExecutor
# Configure Cognitora-powered agents
executor = CognitoraTaskExecutor(
api_key="your_api_key",
default_config={
"image": "python:3.11-data",
"resource_limits": {
"cpu_cores": 2,
"memory_gb": 6
}
}
)
# Define specialized agents
data_analyst = Agent(
role='Data Analyst',
goal='Analyze large datasets efficiently',
backstory="Expert in data processing and statistical analysis",
tools=[executor.create_tool("data_analysis")],
verbose=True
)
ml_engineer = Agent(
role='ML Engineer',
goal='Build and train machine learning models',
backstory="Specialist in MLOps and model deployment",
tools=[executor.create_tool("ml_training")],
verbose=True
)
# Define collaborative tasks
analysis_task = Task(
description="""
Analyze the provided dataset for patterns and insights.
Generate statistical summaries and visualizations.
""",
agent=data_analyst
)
modeling_task = Task(
description="""
Based on the analysis results, build and train a predictive model.
Optimize hyperparameters and evaluate performance.
""",
agent=ml_engineer
)
# Create crew with Cognitora-powered execution
crew = Crew(
agents=[data_analyst, ml_engineer],
tasks=[analysis_task, modeling_task],
verbose=2
)
# Execute the crew workflow
result = crew.kickoff()
print(result)
A2A Protocol Implementation
Direct Agent Communication
# Initialize A2A connection
a2a_client = await client.a2a.connect(
agent_id="data-processor-001",
capabilities=["data_processing", "file_io", "computation"],
discovery_config={
"auto_discovery": True,
"heartbeat_interval": 30,
"timeout": 300
}
)
# Discover other agents
available_agents = await a2a_client.discover_agents(
required_capabilities=["machine_learning", "gpu_compute"]
)
# Send task to ML agent
ml_agent = available_agents[0]
task_response = await a2a_client.send_task(
target_agent=ml_agent.id,
task={
"type": "train_model",
"data_location": "s3://bucket/training_data.parquet",
"model_config": {
"algorithm": "xgboost",
"max_depth": 10,
"learning_rate": 0.1
},
"output_location": "s3://bucket/models/"
},
timeout=3600,
priority="high"
)
# Handle task completion
if task_response.status == "completed":
model_path = task_response.result["model_path"]
metrics = task_response.result["metrics"]
print(f"Model trained successfully: {model_path}")
print(f"Accuracy: {metrics['accuracy']}")
Multi-Agent Workflows
# Orchestrate complex multi-agent workflow
workflow = await client.workflows.create({
"name": "data_pipeline_workflow",
"agents": [
{
"id": "extractor",
"image": "data-extractor:latest",
"role": "data_extraction"
},
{
"id": "processor",
"image": "data-processor:latest",
"role": "data_processing"
},
{
"id": "analyzer",
"image": "data-analyzer:latest",
"role": "analysis"
}
],
"tasks": [
{
"name": "extract_data",
"agent": "extractor",
"config": {
"sources": ["api", "database", "files"],
"output_format": "parquet"
}
},
{
"name": "process_data",
"agent": "processor",
"depends_on": ["extract_data"],
"config": {
"transformations": ["clean", "normalize", "feature_engineering"]
}
},
{
"name": "analyze_data",
"agent": "analyzer",
"depends_on": ["process_data"],
"config": {
"analysis_type": "predictive_modeling"
}
}
]
})
# Execute workflow
execution = await workflow.execute()
# Monitor progress
async for status in execution.stream_status():
print(f"Task {status.task_name}: {status.status}")
if status.status == "completed":
print(f"Output: {status.output}")
Performance Optimization
Resource Right-Sizing
# Auto-optimization based on historical data
optimizer = client.optimization.create_advisor(
execution_history_days=30,
optimization_target="cost_performance_balanced"
)
# Get recommendations
recommendations = await optimizer.analyze_workload(
workload_pattern="ml_training",
typical_runtime_hours=2.5,
data_size_gb=50
)
# Apply optimized configuration
optimized_config = ExecutionConfig(
**recommendations.recommended_config
)
print(f"Estimated cost savings: {recommendations.cost_savings_percent}%")
print(f"Performance impact: {recommendations.performance_impact}")
Caching and Optimization
# Enable intelligent caching
config = ExecutionConfig(
# ... other config
caching={
"enabled": True,
"cache_key_strategy": "content_hash",
"ttl_hours": 24,
"cache_layers": ["filesystem", "memory", "network"]
},
optimization={
"pre_warm_containers": True,
"optimize_image_layers": True,
"use_spot_instances": True,
"auto_scaling": {
"metric": "cpu_utilization",
"target": 70,
"min_instances": 1,
"max_instances": 10
}
}
)
Error Handling & Debugging
Comprehensive Error Handling
from cognitora.exceptions import (
CognitoraError,
ExecutionError,
ResourceLimitError,
CheckpointError,
NetworkError
)
async def robust_execution(config):
try:
execution = await client.execute(config)
result = await execution.wait_for_completion()
return result
except ResourceLimitError as e:
print(f"Resource limit exceeded: {e.resource_type}")
print(f"Limit: {e.limit}, Usage: {e.current_usage}")
# Auto-retry with reduced resources
reduced_config = config.copy()
reduced_config.resource_limits.memory_gb *= 0.8
return await robust_execution(reduced_config)
except ExecutionError as e:
print(f"Execution failed with exit code: {e.exit_code}")
print(f"Last 100 log lines:\n{e.logs[-100:]}")
# Create debug checkpoint for investigation
if e.execution_id:
debug_checkpoint = await client.create_debug_checkpoint(
execution_id=e.execution_id,
include_core_dump=True
)
print(f"Debug checkpoint created: {debug_checkpoint.id}")
except NetworkError as e:
print(f"Network error: {e.message}")
# Implement exponential backoff retry
await asyncio.sleep(2 ** e.retry_count)
return await robust_execution(config)
except CognitoraError as e:
print(f"API error: {e.message}")
print(f"Request ID: {e.request_id}")
print(f"Status code: {e.status_code}")
raise # Re-raise for unknown errors
Debugging Tools
# Enable debug mode
client.set_debug_mode(True)
# Advanced logging
execution = await client.execute(config, debug_options={
"capture_system_calls": True,
"profile_performance": True,
"trace_network_calls": True,
"monitor_resource_usage": True
})
# Access debug information
debug_info = await execution.get_debug_info()
print(f"System calls: {len(debug_info.syscalls)}")
print(f"Performance profile: {debug_info.performance_profile}")
print(f"Network trace: {debug_info.network_trace}")
Best Practices
Security Best Practices
# Secure credential management
from cognitora.security import SecretManager
secret_manager = SecretManager(client)
# Store secrets securely
await secret_manager.create_secret(
name="database_password",
value="super_secure_password",
description="Database access credential"
)
# Use secrets in execution
config = ExecutionConfig(
# ... other config
secrets=[
{
"name": "DATABASE_PASSWORD",
"secret_ref": "database_password"
}
]
)
Cost Optimization
# Implement cost controls
cost_controls = {
"max_cost_per_execution": 5.0, # USD
"max_cost_per_hour": 50.0, # USD
"budget_alerts": [
{"threshold": 0.8, "action": "notify"},
{"threshold": 0.95, "action": "throttle"},
{"threshold": 1.0, "action": "terminate"}
]
}
client.set_cost_controls(cost_controls)
# Monitor spending
spending = await client.billing.get_current_usage()
print(f"Current month spending: ${spending.current_month_usd}")
print(f"Projected month spending: ${spending.projected_month_usd}")