Compare commits

..

No commits in common. "49f785cf8d81f694d49160c4c14b2a48680754c0" and "6f7735f70f86c60da0243f744def4904e64a2e24" have entirely different histories.

2 changed files with 60 additions and 96 deletions

View File

@ -56,19 +56,9 @@ Event working with tools, when used with `agentic_search.py` worked, up to a poi
2. **Run the agent script:** 2. **Run the agent script:**
```bash ```bash
# Run with direct prompt python agentic_search.py
python agentic_search.py --model "qwen3:32b" prompt "Your prompt here"
# Run with prompt from stdin
echo "Your prompt" | python agentic_search.py prompt -
# Run with custom server and API key
python agentic_search.py \
--model "hf.co/unsloth/Qwen3-30B-A3B-GGUF:Q5_K_M" \
--server "https://api.example.com/v1" \
--api-key "your-key" \
prompt "Your prompt"
``` ```
This will execute the predefined query in the script, run the agent, print progress dots (`.`) for each response chunk, and finally output the full structured response and the extracted content.
## Dependencies ## Dependencies

View File

@ -1,97 +1,71 @@
import json import json
import sys
import argparse
from typing import Optional
from rich.console import Console from rich.console import Console
from rich.spinner import Spinner from rich.spinner import Spinner
from qwen_agent.agents import Assistant from qwen_agent.agents import Assistant
from transformers import pipeline
def setup_argparse(): # Define LLM
parser = argparse.ArgumentParser(description='Qwen3 Agent CLI') llm_cfg = {
parser.add_argument('--model', default='qwen3:32b', 'model': 'hf.co/unsloth/Qwen3-30B-A3B-GGUF:Q5_K_M',
help='Model identifier (default: qwen3:32b)') # 'model': 'qwen3:32b',
parser.add_argument('--server', default='http://localhost:11434/v1',
help='Model server URL (default: http://localhost:11434/v1)')
parser.add_argument('--api-key', default='EMPTY',
help='API key for the model server (default: EMPTY)')
subparsers = parser.add_subparsers(dest='command', help='Available commands')
# Prompt command
prompt_parser = subparsers.add_parser('prompt', help='Run agent with a prompt')
prompt_parser.add_argument('text', nargs='?', default='-',
help='Prompt text or "-" for stdin (default: -)')
return parser
def read_prompt(text: str) -> str: # Use a custom endpoint compatible with OpenAI API:
"""Read prompt from argument or stdin if text is '-'""" 'model_server': 'http://localhost:11434/v1', # api_base
if text == '-': 'api_key': 'EMPTY',
return sys.stdin.read().strip()
return text
def run_agent(model: str, server: str, api_key: str, prompt: str) -> None: # Other parameters:
"""Run the agent with the given configuration and prompt""" # 'generate_cfg': {
llm_cfg = { # # Add: When the response content is `<think>this is the thought</think>this is the answer;
'model': model, # # Do not add: When the response has been separated by reasoning_content and content.
'model_server': server, # 'thought_in_content': True,
'api_key': api_key, # },
} }
# Define Tools # Define Tools
tools = [ tools = [
{'mcpServers': { # You can specify the MCP configuration file {'mcpServers': { # You can specify the MCP configuration file
'time': { 'time': {
'command': 'uvx', 'command': 'uvx',
'args': ['mcp-server-time', '--local-timezone=Europe/London'] 'args': ['mcp-server-time', '--local-timezone=Europe/London']
},
"fetch": {
"command": "uvx",
"args": ["mcp-server-fetch"]
},
"ddg-search": {
"command": "npx",
"args": ["-y", "duckduckgo-mcp-server"]
},
}
}, },
'code_interpreter', # Built-in tools "fetch": {
] "command": "uvx",
"args": ["mcp-server-fetch"]
},
"ddg-search": {
"command": "npx",
"args": ["-y", "duckduckgo-mcp-server"]
},
}
},
'code_interpreter', # Built-in tools
]
# Define Agent # Define Agent
bot = Assistant(llm=llm_cfg, function_list=tools) bot = Assistant(llm=llm_cfg, function_list=tools)
console = Console() console = Console()
# Streaming generation # Streaming generation
messages = [{'role': 'user', 'content': prompt}] messages = [{'role': 'user',
'content':
""""
- ***Research** Parsing CLI commands and options in python code.
-- **Analyze** Clean separation of concerns between parsing commands and options and execution of the commands.
-- **Answer** What is the best way to parse CLI commands and options in python code?"""}]
final_responses = None final_responses = None
try: # Consider adding error handling around bot.run
with console.status("[bold blue]Thinking...", spinner="dots") as status: try:
for responses in bot.run(messages=messages, enable_thinking=True, max_tokens=30000): with console.status("[bold blue]Thinking...", spinner="dots") as status:
final_responses = responses.pop() for responses in bot.run(messages=messages, enable_thinking=True, max_tokens=30000):
except Exception as e: final_responses = responses.pop()
console.print(f"[bold red]An error occurred during agent execution:[/] {e}") except Exception as e:
console.print(f"[bold red]An error occurred during agent execution:[/] {e}")
# Pretty-print the final response object # Pretty-print the final response object
if final_responses: if final_responses:
console.print("\n[bold green]--- Full Response Object ---[/]") console.print("\n[bold green]--- Full Response Object ---[/]")
console.print(json.dumps(final_responses, indent=2)) console.print(json.dumps(final_responses, indent=2))
console.print("\n[bold green]--- Extracted Content ---[/]") console.print("\n[bold green]--- Extracted Content ---[/]")
console.print(final_responses.get('content', 'No content found in response.')) console.print(final_responses.get('content', 'No content found in response.'))
else: else:
console.print("[bold red]No final response received from the agent.[/]") console.print("[bold red]No final response received from the agent.[/]")
def main():
parser = setup_argparse()
args = parser.parse_args()
if args.command == 'prompt':
prompt_text = read_prompt(args.text)
run_agent(args.model, args.server, args.api_key, prompt_text)
else:
parser.print_help()
if __name__ == '__main__':
main()