import json # Import the json module from qwen_agent.agents import Assistant # Define LLM llm_cfg = { 'model': 'hf.co/unsloth/Qwen3-30B-A3B-GGUF:Q5_K_M', # 'model': 'qwen3:32b', # Use a custom endpoint compatible with OpenAI API: 'model_server': 'http://localhost:11434/v1', # api_base 'api_key': 'EMPTY', # Other parameters: # 'generate_cfg': { # # Add: When the response content is `this is the thoughtthis is the answer; # # Do not add: When the response has been separated by reasoning_content and content. # 'thought_in_content': True, # }, } # Define Tools tools = [ {'mcpServers': { # You can specify the MCP configuration file 'time': { 'command': 'uvx', 'args': ['mcp-server-time', '--local-timezone=Asia/Shanghai'] }, "fetch": { "command": "uvx", "args": ["mcp-server-fetch"] }, "ddg-search": { "command": "npx", "args": ["-y", "duckduckgo-mcp-server"] }, } }, 'code_interpreter', # Built-in tools ] # Define Agent bot = Assistant(llm=llm_cfg, function_list=tools) # Streaming generation messages = [{'role': 'user', 'content': """"- ***Research** What is enshittification, and who came up with it? -- **Analyze** Developments around enshittification in the last five years, and related concepts. -- **Answer** What is enshittification, and what does it mean for society?"""}] final_responses = None # Consider adding error handling around bot.run try: for responses in bot.run(messages=messages, enable_thinking=True, max_tokens=30000): print(".", end="", flush=True) final_responses = responses.pop() except Exception as e: print(f"An error occurred during agent execution: {e}") # Pretty-print the final response object if final_responses: print("--- Full Response Object ---") print(json.dumps(final_responses, indent=2)) # Use indent=2 (or 4) for pretty printing print("\n--- Extracted Content ---") print(final_responses.get('content', 'No content found in response.')) # Use .get for safer access else: print("No final response received from the agent.")