agentic-search/agentic_search.py

69 lines
2.2 KiB
Python

import json # Import the json module
from qwen_agent.agents import Assistant
# Define LLM
llm_cfg = {
# 'model': 'hf.co/unsloth/Qwen3-30B-A3B-GGUF:Q5_K_M',
'model': 'qwen3:32b',
# Use a custom endpoint compatible with OpenAI API:
'model_server': 'http://localhost:11434/v1', # api_base
'api_key': 'EMPTY',
# Other parameters:
# 'generate_cfg': {
# # Add: When the response content is `<think>this is the thought</think>this is the answer;
# # Do not add: When the response has been separated by reasoning_content and content.
# 'thought_in_content': True,
# },
}
# Define Tools
tools = [
{'mcpServers': { # You can specify the MCP configuration file
'time': {
'command': 'uvx',
'args': ['mcp-server-time', '--local-timezone=Europe/London']
},
"fetch": {
"command": "uvx",
"args": ["mcp-server-fetch"]
},
"ddg-search": {
"command": "npx",
"args": ["-y", "duckduckgo-mcp-server"]
},
}
},
'code_interpreter', # Built-in tools
]
# Define Agent
bot = Assistant(llm=llm_cfg, function_list=tools)
# Streaming generation
messages = [{'role': 'user',
'content':
""""
- ***Research** Kagi search, privacy and company investors and financials.
-- **Analyze** Recent developments around Kagi.
-- **Answer** Is it a company that I can trust with my money and data?"""}]
final_responses = None
# Consider adding error handling around bot.run
try:
for responses in bot.run(messages=messages, enable_thinking=True, max_tokens=30000):
print(".", end="", flush=True)
final_responses = responses.pop()
except Exception as e:
print(f"An error occurred during agent execution: {e}")
# Pretty-print the final response object
if final_responses:
print("--- Full Response Object ---")
print(json.dumps(final_responses, indent=2)) # Use indent=2 (or 4) for pretty printing
print("\n--- Extracted Content ---")
print(final_responses.get('content', 'No content found in response.')) # Use .get for safer access
else:
print("No final response received from the agent.")