Compare commits
No commits in common. "0cde86a0c77da91fe758e60cc67e5b46a5769298" and "49f785cf8d81f694d49160c4c14b2a48680754c0" have entirely different histories.
0cde86a0c7
...
49f785cf8d
68
agent.py
68
agent.py
@ -1,68 +0,0 @@
|
|||||||
import json
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from typing import Optional, List, Dict, Any
|
|
||||||
from rich.console import Console
|
|
||||||
from qwen_agent.agents import Assistant
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class Agent:
|
|
||||||
model: str
|
|
||||||
server: str
|
|
||||||
api_key: str
|
|
||||||
max_tokens: int = 30000
|
|
||||||
enable_thinking: bool = True
|
|
||||||
tools: Optional[List[Dict[str, Any]]] = None
|
|
||||||
console: Console = Console()
|
|
||||||
|
|
||||||
def __post_init__(self):
|
|
||||||
if self.tools is None:
|
|
||||||
self.tools = [
|
|
||||||
{'mcpServers': {
|
|
||||||
'time': {
|
|
||||||
'command': 'uvx',
|
|
||||||
'args': ['mcp-server-time', '--local-timezone=Europe/London']
|
|
||||||
},
|
|
||||||
"fetch": {
|
|
||||||
"command": "uvx",
|
|
||||||
"args": ["mcp-server-fetch"]
|
|
||||||
},
|
|
||||||
"ddg-search": {
|
|
||||||
"command": "npx",
|
|
||||||
"args": ["-y", "duckduckgo-mcp-server"]
|
|
||||||
},
|
|
||||||
}},
|
|
||||||
'code_interpreter',
|
|
||||||
]
|
|
||||||
|
|
||||||
def run(self, prompt: str) -> None:
|
|
||||||
"""Run the agent with the given prompt"""
|
|
||||||
llm_cfg = {
|
|
||||||
'model': self.model,
|
|
||||||
'model_server': self.server,
|
|
||||||
'api_key': self.api_key,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Define Agent
|
|
||||||
bot = Assistant(llm=llm_cfg, function_list=self.tools)
|
|
||||||
|
|
||||||
# Streaming generation
|
|
||||||
messages = [{'role': 'user', 'content': prompt}]
|
|
||||||
|
|
||||||
final_responses = None
|
|
||||||
try:
|
|
||||||
with self.console.status("[bold blue]Thinking...", spinner="dots") as status:
|
|
||||||
for responses in bot.run(messages=messages,
|
|
||||||
enable_thinking=self.enable_thinking,
|
|
||||||
max_tokens=self.max_tokens):
|
|
||||||
final_responses = responses.pop()
|
|
||||||
except Exception as e:
|
|
||||||
self.console.print(f"[bold red]An error occurred during agent execution:[/] {e}")
|
|
||||||
|
|
||||||
# Pretty-print the final response object
|
|
||||||
if final_responses:
|
|
||||||
self.console.print("\n[bold green]--- Full Response Object ---[/]")
|
|
||||||
self.console.print(json.dumps(final_responses, indent=2))
|
|
||||||
self.console.print("\n[bold green]--- Extracted Content ---[/]")
|
|
||||||
self.console.print(final_responses.get('content', 'No content found in response.'))
|
|
||||||
else:
|
|
||||||
self.console.print("[bold red]No final response received from the agent.[/]")
|
|
@ -1,6 +1,11 @@
|
|||||||
|
import json
|
||||||
import sys
|
import sys
|
||||||
import argparse
|
import argparse
|
||||||
from agent import Agent
|
from typing import Optional
|
||||||
|
from rich.console import Console
|
||||||
|
from rich.spinner import Spinner
|
||||||
|
from qwen_agent.agents import Assistant
|
||||||
|
from transformers import pipeline
|
||||||
|
|
||||||
def setup_argparse():
|
def setup_argparse():
|
||||||
parser = argparse.ArgumentParser(description='Qwen3 Agent CLI')
|
parser = argparse.ArgumentParser(description='Qwen3 Agent CLI')
|
||||||
@ -26,6 +31,57 @@ def read_prompt(text: str) -> str:
|
|||||||
return sys.stdin.read().strip()
|
return sys.stdin.read().strip()
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
def run_agent(model: str, server: str, api_key: str, prompt: str) -> None:
|
||||||
|
"""Run the agent with the given configuration and prompt"""
|
||||||
|
llm_cfg = {
|
||||||
|
'model': model,
|
||||||
|
'model_server': server,
|
||||||
|
'api_key': api_key,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Define Tools
|
||||||
|
tools = [
|
||||||
|
{'mcpServers': { # You can specify the MCP configuration file
|
||||||
|
'time': {
|
||||||
|
'command': 'uvx',
|
||||||
|
'args': ['mcp-server-time', '--local-timezone=Europe/London']
|
||||||
|
},
|
||||||
|
"fetch": {
|
||||||
|
"command": "uvx",
|
||||||
|
"args": ["mcp-server-fetch"]
|
||||||
|
},
|
||||||
|
"ddg-search": {
|
||||||
|
"command": "npx",
|
||||||
|
"args": ["-y", "duckduckgo-mcp-server"]
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'code_interpreter', # Built-in tools
|
||||||
|
]
|
||||||
|
|
||||||
|
# Define Agent
|
||||||
|
bot = Assistant(llm=llm_cfg, function_list=tools)
|
||||||
|
console = Console()
|
||||||
|
|
||||||
|
# Streaming generation
|
||||||
|
messages = [{'role': 'user', 'content': prompt}]
|
||||||
|
|
||||||
|
final_responses = None
|
||||||
|
try:
|
||||||
|
with console.status("[bold blue]Thinking...", spinner="dots") as status:
|
||||||
|
for responses in bot.run(messages=messages, enable_thinking=True, max_tokens=30000):
|
||||||
|
final_responses = responses.pop()
|
||||||
|
except Exception as e:
|
||||||
|
console.print(f"[bold red]An error occurred during agent execution:[/] {e}")
|
||||||
|
|
||||||
|
# Pretty-print the final response object
|
||||||
|
if final_responses:
|
||||||
|
console.print("\n[bold green]--- Full Response Object ---[/]")
|
||||||
|
console.print(json.dumps(final_responses, indent=2))
|
||||||
|
console.print("\n[bold green]--- Extracted Content ---[/]")
|
||||||
|
console.print(final_responses.get('content', 'No content found in response.'))
|
||||||
|
else:
|
||||||
|
console.print("[bold red]No final response received from the agent.[/]")
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = setup_argparse()
|
parser = setup_argparse()
|
||||||
@ -33,12 +89,7 @@ def main():
|
|||||||
|
|
||||||
if args.command == 'prompt':
|
if args.command == 'prompt':
|
||||||
prompt_text = read_prompt(args.text)
|
prompt_text = read_prompt(args.text)
|
||||||
agent = Agent(
|
run_agent(args.model, args.server, args.api_key, prompt_text)
|
||||||
model=args.model,
|
|
||||||
server=args.server,
|
|
||||||
api_key=args.api_key
|
|
||||||
)
|
|
||||||
agent.run(prompt_text)
|
|
||||||
else:
|
else:
|
||||||
parser.print_help()
|
parser.print_help()
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user