Run local LLMs with Ollama and execute code in Cognitora sandboxes.
pip install ollama cognitora
1import ollama
2from cognitora import Cognitora
3import re
4
5cognitora = Cognitora(api_key="your-cognitora-api-key")
6
7def extract_code_block(llm_response):
8 pattern = re.compile(r'```python\n(.*?)\n```', re.DOTALL)
9 match = pattern.search(llm_response)
10 if match:
11 return match.group(1)
12 return ""
13
14async def run_ollama_code_interpreter(user_query: str):
15 session = cognitora.sessions.create(
16 image="docker.io/library/python:3.11-slim",
17 timeout=300,
18 persistent=True
19 )
20
21 system_prompt = """You are a helpful coding assistant that can execute python code.
22You are given tasks to complete and you run Python code to solve them.
23ALWAYS FORMAT YOUR RESPONSE IN MARKDOWN
24ALWAYS RESPOND ONLY WITH CODE IN CODE BLOCK LIKE THIS:
25```python
26{code}
27```"""
28
29 response = ollama.chat(
30 model="llama3.2",
31 messages=[
32 {"role": "system", "content": system_prompt},
33 {"role": "user", "content": user_query}
34 ]
35 )
36
37 content = response['message']['content']
38 code = extract_code_block(content)
39
40 if code:
41 execution = cognitora.compute.execute(
42 session_id=session.id,
43 command=["python", "-c", code]
44 )
45
46 return {
47 "code": code,
48 "result": execution.stdout,
49 "error": execution.stderr,
50 "exit_code": execution.exit_code
51 }
52
53 return {"error": "No code block found in response"}
Integrate with GPT-4 and other OpenAI models for intelligent code generation and execution.
Build intelligent applications with Claude 3.5 Sonnet and other Anthropic models.
Build intelligent applications with Google's Gemini Pro models and function calling.
Get started with Ollama and Cognitora in minutes. Secure, scalable, and ready for anything.