This commit is contained in:
Willem van den Ende 2025-04-30 15:23:29 +01:00
parent 7fa0c78298
commit acfd344f88

View File

@ -1,4 +1,5 @@
import json # Import the json module
from qwen_agent.agents import Assistant
# Define LLM
@ -45,18 +46,18 @@ tools = [
bot = Assistant(llm=llm_cfg, function_list=tools)
# Streaming generation
messages = [{'role': 'user', 'content': 'Write a 500 word blog post about the latest qwen 3 model. Use the search tool, and fetch the top 3 articles before you write the post. Write in a casual, but factual style - no hyperbole. Provide references to the webpages in the output.'}]
messages = [{'role': 'user',
'content': 'Write a 500 word blog post about the latest qwen 3 model. Use the search tool, and fetch the top 3 articles before you write the post. Write in a casual, but factual style - no hyperbole. Provide references to the webpages in the output.'}]
final_responses = None
# Consider adding error handling around bot.run
try:
for responses in bot.run(messages=messages, enable_thinking=True, max_tokens=30000):
print(".",end="", flush=True)
print(".", end="", flush=True)
final_responses = responses.pop()
except Exception as e:
print(f"An error occurred during agent execution: {e}")
# Pretty-print the final response object
if final_responses:
print("--- Full Response Object ---")