"""
Agent Workforce Orchestrator
Manages autonomous agents using local Ollama models
Model Strategy:
- gemma3:12b: General purpose, Romanian language
- qwen2.5-coder:7b: Code generation and debugging
- llama3.2:3b: Fast responses, simple tasks
"""
import subprocess
import json
import time
import requests
from datetime import datetime, timedelta
from pathlib import Path
class OllamaAgent:
"""Base class for all Ollama-powered agents"""
# Model recommendations by task
MODELS = {
"general": "gemma3:12b",
"coding": "qwen2.5-coder:7b",
"fast": "llama3.2:3b",
"romanian": "gemma3:12b"
}
def __init__(self, name, model=None, role=""):
self.name = name
self.model = model or self.MODELS["general"]
self.role = role
self.log_file = Path(f"agents/{name}_log.txt")
self.log_file.parent.mkdir(parents=True, exist_ok=True)
def query(self, prompt, system_prompt=None, model_override=None):
"""Send query to Ollama using API (faster than CLI)"""
model = model_override or self.model
payload = {
"model": model,
"prompt": prompt,
"system": system_prompt or "",
"stream": False
}
try:
response = requests.post(
"http://localhost:11434/api/generate",
json=payload,
timeout=120
)
response.raise_for_status()
result = response.json()
answer = result.get("response", "").strip()
self.log(f"Model: {model}\nQuery: {prompt[:100]}...\nResponse: {answer[:200]}...")
return answer
except Exception as e:
self.log(f"ERROR: {e}")
return self._query_cli(prompt, system_prompt, model)
def _query_cli(self, prompt, system_prompt, model):
"""Fallback CLI query method"""
full_prompt = f"{system_prompt}\n\n{prompt}" if system_prompt else prompt
cmd = ["ollama", "run", model, full_prompt]
try:
result = subprocess.run(cmd, capture_output=True, text=True, timeout=120)
return result.stdout.strip()
except Exception as e:
self.log(f"CLI ERROR: {e}")
return None
def log(self, message):
"""Log agent activity"""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with open(self.log_file, "a", encoding="utf-8") as f:
f.write(f"[{timestamp}] {message}\n")
class OrchestratorAgent(OllamaAgent):
"""Meta-level orchestration and task delegation"""
def __init__(self):
super().__init__(
name="OrchestratorAgent",
model="gpt-oss:20b",
role="Task orchestration and agent coordination"
)
def plan_workflow(self, task, available_agents):
"""Plan a multi-step workflow using available agents"""
system_prompt = """You are a workflow orchestrator.
Break down complex tasks into steps.
Assign each step to the most appropriate agent.
Output a structured plan in JSON format."""
agents_list = ", ".join(available_agents)
prompt = f"""Task: {task}
Available agents: {agents_list}
Create a workflow plan that:
1. Breaks the task into logical steps
2. Assigns each step to the best agent
3. Defines dependencies between steps
Output format:
{{
\"steps\": [
{{\"step\": 1, \"description\": \"...\", \"agent\": \"...\", \"depends_on\": []}}
]
}}"""
return self.query(prompt, system_prompt)
# Usage example
if __name__ == "__main__":
orchestrator = OrchestratorAgent()
# Example: Plan a workflow
task = "Create a tutorial about AI and post it on the forum"
agents = ["WriterAgent", "CoderAgent", "ForumAgent"]
workflow = orchestrator.plan_workflow(task, agents)
print(workflow)