refactor: Extract agent logic into separate Agent class and simplify agentic_search.py

This commit is contained in:
Willem van den Ende (aider) 2025-05-04 09:36:38 +01:00
parent 527492326a
commit 0cde86a0c7
2 changed files with 71 additions and 71 deletions

68
agent.py Normal file
View File

@ -0,0 +1,68 @@
import json
from dataclasses import dataclass
from typing import Optional, List, Dict, Any
from rich.console import Console
from qwen_agent.agents import Assistant
@dataclass
class Agent:
model: str
server: str
api_key: str
max_tokens: int = 30000
enable_thinking: bool = True
tools: Optional[List[Dict[str, Any]]] = None
console: Console = Console()
def __post_init__(self):
if self.tools is None:
self.tools = [
{'mcpServers': {
'time': {
'command': 'uvx',
'args': ['mcp-server-time', '--local-timezone=Europe/London']
},
"fetch": {
"command": "uvx",
"args": ["mcp-server-fetch"]
},
"ddg-search": {
"command": "npx",
"args": ["-y", "duckduckgo-mcp-server"]
},
}},
'code_interpreter',
]
def run(self, prompt: str) -> None:
"""Run the agent with the given prompt"""
llm_cfg = {
'model': self.model,
'model_server': self.server,
'api_key': self.api_key,
}
# Define Agent
bot = Assistant(llm=llm_cfg, function_list=self.tools)
# Streaming generation
messages = [{'role': 'user', 'content': prompt}]
final_responses = None
try:
with self.console.status("[bold blue]Thinking...", spinner="dots") as status:
for responses in bot.run(messages=messages,
enable_thinking=self.enable_thinking,
max_tokens=self.max_tokens):
final_responses = responses.pop()
except Exception as e:
self.console.print(f"[bold red]An error occurred during agent execution:[/] {e}")
# Pretty-print the final response object
if final_responses:
self.console.print("\n[bold green]--- Full Response Object ---[/]")
self.console.print(json.dumps(final_responses, indent=2))
self.console.print("\n[bold green]--- Extracted Content ---[/]")
self.console.print(final_responses.get('content', 'No content found in response.'))
else:
self.console.print("[bold red]No final response received from the agent.[/]")

View File

@ -1,41 +1,6 @@
import json
import sys import sys
import argparse import argparse
from typing import Optional, List, Dict, Any from agent import Agent
from dataclasses import dataclass
from rich.console import Console
from rich.spinner import Spinner
from qwen_agent.agents import Assistant
from transformers import pipeline
@dataclass
class AgentConfig:
model: str
server: str
api_key: str
max_tokens: int = 30000
enable_thinking: bool = True
tools: Optional[List[Dict[str, Any]]] = None
def __post_init__(self):
if self.tools is None:
self.tools = [
{'mcpServers': {
'time': {
'command': 'uvx',
'args': ['mcp-server-time', '--local-timezone=Europe/London']
},
"fetch": {
"command": "uvx",
"args": ["mcp-server-fetch"]
},
"ddg-search": {
"command": "npx",
"args": ["-y", "duckduckgo-mcp-server"]
},
}},
'code_interpreter',
]
def setup_argparse(): def setup_argparse():
parser = argparse.ArgumentParser(description='Qwen3 Agent CLI') parser = argparse.ArgumentParser(description='Qwen3 Agent CLI')
@ -61,39 +26,6 @@ def read_prompt(text: str) -> str:
return sys.stdin.read().strip() return sys.stdin.read().strip()
return text return text
def run_agent(config: AgentConfig, prompt: str) -> None:
"""Run the agent with the given configuration and prompt"""
llm_cfg = {
'model': config.model,
'model_server': config.server,
'api_key': config.api_key,
}
# Define Agent
bot = Assistant(llm=llm_cfg, function_list=config.tools)
console = Console()
# Streaming generation
messages = [{'role': 'user', 'content': prompt}]
final_responses = None
try:
with console.status("[bold blue]Thinking...", spinner="dots") as status:
for responses in bot.run(messages=messages,
enable_thinking=config.enable_thinking,
max_tokens=config.max_tokens):
final_responses = responses.pop()
except Exception as e:
console.print(f"[bold red]An error occurred during agent execution:[/] {e}")
# Pretty-print the final response object
if final_responses:
console.print("\n[bold green]--- Full Response Object ---[/]")
console.print(json.dumps(final_responses, indent=2))
console.print("\n[bold green]--- Extracted Content ---[/]")
console.print(final_responses.get('content', 'No content found in response.'))
else:
console.print("[bold red]No final response received from the agent.[/]")
def main(): def main():
parser = setup_argparse() parser = setup_argparse()
@ -101,12 +33,12 @@ def main():
if args.command == 'prompt': if args.command == 'prompt':
prompt_text = read_prompt(args.text) prompt_text = read_prompt(args.text)
config = AgentConfig( agent = Agent(
model=args.model, model=args.model,
server=args.server, server=args.server,
api_key=args.api_key api_key=args.api_key
) )
run_agent(config, prompt_text) agent.run(prompt_text)
else: else:
parser.print_help() parser.print_help()