From 0cde86a0c77da91fe758e60cc67e5b46a5769298 Mon Sep 17 00:00:00 2001
From: "Willem van den Ende (aider)" <willem@qwan.eu>
Date: Sun, 4 May 2025 09:36:38 +0100
Subject: [PATCH] refactor: Extract agent logic into separate Agent class and
 simplify agentic_search.py

---
 agent.py          | 68 +++++++++++++++++++++++++++++++++++++++++++
 agentic_search.py | 74 ++---------------------------------------------
 2 files changed, 71 insertions(+), 71 deletions(-)
 create mode 100644 agent.py

diff --git a/agent.py b/agent.py
new file mode 100644
index 0000000..8ba834a
--- /dev/null
+++ b/agent.py
@@ -0,0 +1,68 @@
+import json
+from dataclasses import dataclass
+from typing import Optional, List, Dict, Any
+from rich.console import Console
+from qwen_agent.agents import Assistant
+
+@dataclass
+class Agent:
+    model: str
+    server: str
+    api_key: str
+    max_tokens: int = 30000
+    enable_thinking: bool = True
+    tools: Optional[List[Dict[str, Any]]] = None
+    console: Console = Console()
+
+    def __post_init__(self):
+        if self.tools is None:
+            self.tools = [
+                {'mcpServers': {
+                    'time': {
+                        'command': 'uvx',
+                        'args': ['mcp-server-time', '--local-timezone=Europe/London']
+                    },
+                    "fetch": {
+                        "command": "uvx",
+                        "args": ["mcp-server-fetch"]
+                    },
+                    "ddg-search": {
+                        "command": "npx",
+                        "args": ["-y", "duckduckgo-mcp-server"]
+                    },
+                }},
+                'code_interpreter',
+            ]
+
+    def run(self, prompt: str) -> None:
+        """Run the agent with the given prompt"""
+        llm_cfg = {
+            'model': self.model,
+            'model_server': self.server,
+            'api_key': self.api_key,
+        }
+
+        # Define Agent
+        bot = Assistant(llm=llm_cfg, function_list=self.tools)
+
+        # Streaming generation
+        messages = [{'role': 'user', 'content': prompt}]
+
+        final_responses = None
+        try:
+            with self.console.status("[bold blue]Thinking...", spinner="dots") as status:
+                for responses in bot.run(messages=messages, 
+                                       enable_thinking=self.enable_thinking, 
+                                       max_tokens=self.max_tokens):
+                    final_responses = responses.pop()
+        except Exception as e:
+            self.console.print(f"[bold red]An error occurred during agent execution:[/] {e}")
+
+        # Pretty-print the final response object
+        if final_responses:
+            self.console.print("\n[bold green]--- Full Response Object ---[/]")
+            self.console.print(json.dumps(final_responses, indent=2))
+            self.console.print("\n[bold green]--- Extracted Content ---[/]")
+            self.console.print(final_responses.get('content', 'No content found in response.'))
+        else:
+            self.console.print("[bold red]No final response received from the agent.[/]")
diff --git a/agentic_search.py b/agentic_search.py
index 1ee99c4..250da5c 100644
--- a/agentic_search.py
+++ b/agentic_search.py
@@ -1,41 +1,6 @@
-import json
 import sys
 import argparse
-from typing import Optional, List, Dict, Any
-from dataclasses import dataclass
-from rich.console import Console
-from rich.spinner import Spinner
-from qwen_agent.agents import Assistant
-from transformers import pipeline
-
-@dataclass
-class AgentConfig:
-    model: str
-    server: str
-    api_key: str
-    max_tokens: int = 30000
-    enable_thinking: bool = True
-    tools: Optional[List[Dict[str, Any]]] = None
-
-    def __post_init__(self):
-        if self.tools is None:
-            self.tools = [
-                {'mcpServers': {
-                    'time': {
-                        'command': 'uvx',
-                        'args': ['mcp-server-time', '--local-timezone=Europe/London']
-                    },
-                    "fetch": {
-                        "command": "uvx",
-                        "args": ["mcp-server-fetch"]
-                    },
-                    "ddg-search": {
-                        "command": "npx",
-                        "args": ["-y", "duckduckgo-mcp-server"]
-                    },
-                }},
-                'code_interpreter',
-            ]
+from agent import Agent
 
 def setup_argparse():
     parser = argparse.ArgumentParser(description='Qwen3 Agent CLI')
@@ -61,39 +26,6 @@ def read_prompt(text: str) -> str:
         return sys.stdin.read().strip()
     return text
 
-def run_agent(config: AgentConfig, prompt: str) -> None:
-    """Run the agent with the given configuration and prompt"""
-    llm_cfg = {
-        'model': config.model,
-        'model_server': config.server,
-        'api_key': config.api_key,
-    }
-
-    # Define Agent
-    bot = Assistant(llm=llm_cfg, function_list=config.tools)
-    console = Console()
-
-    # Streaming generation
-    messages = [{'role': 'user', 'content': prompt}]
-
-    final_responses = None
-    try:
-        with console.status("[bold blue]Thinking...", spinner="dots") as status:
-            for responses in bot.run(messages=messages, 
-                                   enable_thinking=config.enable_thinking, 
-                                   max_tokens=config.max_tokens):
-                final_responses = responses.pop()
-    except Exception as e:
-        console.print(f"[bold red]An error occurred during agent execution:[/] {e}")
-
-    # Pretty-print the final response object
-    if final_responses:
-        console.print("\n[bold green]--- Full Response Object ---[/]")
-        console.print(json.dumps(final_responses, indent=2))
-        console.print("\n[bold green]--- Extracted Content ---[/]")
-        console.print(final_responses.get('content', 'No content found in response.'))
-    else:
-        console.print("[bold red]No final response received from the agent.[/]")
 
 def main():
     parser = setup_argparse()
@@ -101,12 +33,12 @@ def main():
     
     if args.command == 'prompt':
         prompt_text = read_prompt(args.text)
-        config = AgentConfig(
+        agent = Agent(
             model=args.model,
             server=args.server,
             api_key=args.api_key
         )
-        run_agent(config, prompt_text)
+        agent.run(prompt_text)
     else:
         parser.print_help()