refactor: introduce AgentConfig dataclass for cleaner configuration management

This commit is contained in:
Willem van den Ende (aider) 2025-05-04 09:33:19 +01:00
parent 49f785cf8d
commit 527492326a

View File

@ -1,12 +1,42 @@
import json import json
import sys import sys
import argparse import argparse
from typing import Optional from typing import Optional, List, Dict, Any
from dataclasses import dataclass
from rich.console import Console from rich.console import Console
from rich.spinner import Spinner from rich.spinner import Spinner
from qwen_agent.agents import Assistant from qwen_agent.agents import Assistant
from transformers import pipeline from transformers import pipeline
@dataclass
class AgentConfig:
model: str
server: str
api_key: str
max_tokens: int = 30000
enable_thinking: bool = True
tools: Optional[List[Dict[str, Any]]] = None
def __post_init__(self):
if self.tools is None:
self.tools = [
{'mcpServers': {
'time': {
'command': 'uvx',
'args': ['mcp-server-time', '--local-timezone=Europe/London']
},
"fetch": {
"command": "uvx",
"args": ["mcp-server-fetch"]
},
"ddg-search": {
"command": "npx",
"args": ["-y", "duckduckgo-mcp-server"]
},
}},
'code_interpreter',
]
def setup_argparse(): def setup_argparse():
parser = argparse.ArgumentParser(description='Qwen3 Agent CLI') parser = argparse.ArgumentParser(description='Qwen3 Agent CLI')
parser.add_argument('--model', default='qwen3:32b', parser.add_argument('--model', default='qwen3:32b',
@ -31,36 +61,16 @@ def read_prompt(text: str) -> str:
return sys.stdin.read().strip() return sys.stdin.read().strip()
return text return text
def run_agent(model: str, server: str, api_key: str, prompt: str) -> None: def run_agent(config: AgentConfig, prompt: str) -> None:
"""Run the agent with the given configuration and prompt""" """Run the agent with the given configuration and prompt"""
llm_cfg = { llm_cfg = {
'model': model, 'model': config.model,
'model_server': server, 'model_server': config.server,
'api_key': api_key, 'api_key': config.api_key,
} }
# Define Tools
tools = [
{'mcpServers': { # You can specify the MCP configuration file
'time': {
'command': 'uvx',
'args': ['mcp-server-time', '--local-timezone=Europe/London']
},
"fetch": {
"command": "uvx",
"args": ["mcp-server-fetch"]
},
"ddg-search": {
"command": "npx",
"args": ["-y", "duckduckgo-mcp-server"]
},
}
},
'code_interpreter', # Built-in tools
]
# Define Agent # Define Agent
bot = Assistant(llm=llm_cfg, function_list=tools) bot = Assistant(llm=llm_cfg, function_list=config.tools)
console = Console() console = Console()
# Streaming generation # Streaming generation
@ -69,7 +79,9 @@ def run_agent(model: str, server: str, api_key: str, prompt: str) -> None:
final_responses = None final_responses = None
try: try:
with console.status("[bold blue]Thinking...", spinner="dots") as status: with console.status("[bold blue]Thinking...", spinner="dots") as status:
for responses in bot.run(messages=messages, enable_thinking=True, max_tokens=30000): for responses in bot.run(messages=messages,
enable_thinking=config.enable_thinking,
max_tokens=config.max_tokens):
final_responses = responses.pop() final_responses = responses.pop()
except Exception as e: except Exception as e:
console.print(f"[bold red]An error occurred during agent execution:[/] {e}") console.print(f"[bold red]An error occurred during agent execution:[/] {e}")
@ -89,7 +101,12 @@ def main():
if args.command == 'prompt': if args.command == 'prompt':
prompt_text = read_prompt(args.text) prompt_text = read_prompt(args.text)
run_agent(args.model, args.server, args.api_key, prompt_text) config = AgentConfig(
model=args.model,
server=args.server,
api_key=args.api_key
)
run_agent(config, prompt_text)
else: else:
parser.print_help() parser.print_help()