Compare commits
24 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
167b1acd5c | ||
|
8f3a60f52b | ||
|
0952caf526 | ||
|
ea98fe569e | ||
|
f6b2250e95 | ||
|
0072174023 | ||
|
fc5e25343c | ||
|
395d5a3add | ||
|
f380372a07 | ||
|
bc3149e983 | ||
|
f22b225156 | ||
|
ecc84306e8 | ||
|
7baab6ad95 | ||
|
792dc664a7 | ||
|
4d02defd3b | ||
|
a8fc3e9709 | ||
|
de5ca60cf7 | ||
|
e25cfa2cb3 | ||
|
3f6e515970 | ||
|
5ae32c91e5 | ||
|
6e45490412 | ||
|
463bc0fe75 | ||
|
099074eec1 | ||
|
e08a6b313a |
254
app.py
Normal file
254
app.py
Normal file
@ -0,0 +1,254 @@
|
|||||||
|
import asyncio
|
||||||
|
import uuid
|
||||||
|
from datetime import datetime
|
||||||
|
from json import dumps
|
||||||
|
|
||||||
|
from fastapi import Body, FastAPI, HTTPException, Request
|
||||||
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
|
from fastapi.responses import HTMLResponse, JSONResponse, StreamingResponse
|
||||||
|
from fastapi.staticfiles import StaticFiles
|
||||||
|
from fastapi.templating import Jinja2Templates
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
|
||||||
|
app.mount("/static", StaticFiles(directory="static"), name="static")
|
||||||
|
templates = Jinja2Templates(directory="templates")
|
||||||
|
|
||||||
|
app.add_middleware(
|
||||||
|
CORSMiddleware,
|
||||||
|
allow_origins=["*"],
|
||||||
|
allow_credentials=True,
|
||||||
|
allow_methods=["*"],
|
||||||
|
allow_headers=["*"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class Task(BaseModel):
|
||||||
|
id: str
|
||||||
|
prompt: str
|
||||||
|
created_at: datetime
|
||||||
|
status: str
|
||||||
|
steps: list = []
|
||||||
|
|
||||||
|
def model_dump(self, *args, **kwargs):
|
||||||
|
data = super().model_dump(*args, **kwargs)
|
||||||
|
data["created_at"] = self.created_at.isoformat()
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
class TaskManager:
|
||||||
|
def __init__(self):
|
||||||
|
self.tasks = {}
|
||||||
|
self.queues = {}
|
||||||
|
|
||||||
|
def create_task(self, prompt: str) -> Task:
|
||||||
|
task_id = str(uuid.uuid4())
|
||||||
|
task = Task(
|
||||||
|
id=task_id, prompt=prompt, created_at=datetime.now(), status="pending"
|
||||||
|
)
|
||||||
|
self.tasks[task_id] = task
|
||||||
|
self.queues[task_id] = asyncio.Queue()
|
||||||
|
return task
|
||||||
|
|
||||||
|
async def update_task_step(
|
||||||
|
self, task_id: str, step: int, result: str, step_type: str = "step"
|
||||||
|
):
|
||||||
|
if task_id in self.tasks:
|
||||||
|
task = self.tasks[task_id]
|
||||||
|
task.steps.append({"step": step, "result": result, "type": step_type})
|
||||||
|
await self.queues[task_id].put(
|
||||||
|
{"type": step_type, "step": step, "result": result}
|
||||||
|
)
|
||||||
|
await self.queues[task_id].put(
|
||||||
|
{"type": "status", "status": task.status, "steps": task.steps}
|
||||||
|
)
|
||||||
|
|
||||||
|
async def complete_task(self, task_id: str):
|
||||||
|
if task_id in self.tasks:
|
||||||
|
task = self.tasks[task_id]
|
||||||
|
task.status = "completed"
|
||||||
|
await self.queues[task_id].put(
|
||||||
|
{"type": "status", "status": task.status, "steps": task.steps}
|
||||||
|
)
|
||||||
|
await self.queues[task_id].put({"type": "complete"})
|
||||||
|
|
||||||
|
async def fail_task(self, task_id: str, error: str):
|
||||||
|
if task_id in self.tasks:
|
||||||
|
self.tasks[task_id].status = f"failed: {error}"
|
||||||
|
await self.queues[task_id].put({"type": "error", "message": error})
|
||||||
|
|
||||||
|
|
||||||
|
task_manager = TaskManager()
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/", response_class=HTMLResponse)
|
||||||
|
async def index(request: Request):
|
||||||
|
return templates.TemplateResponse("index.html", {"request": request})
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/tasks")
|
||||||
|
async def create_task(prompt: str = Body(..., embed=True)):
|
||||||
|
task = task_manager.create_task(prompt)
|
||||||
|
asyncio.create_task(run_task(task.id, prompt))
|
||||||
|
return {"task_id": task.id}
|
||||||
|
|
||||||
|
|
||||||
|
from app.agent.manus import Manus
|
||||||
|
|
||||||
|
|
||||||
|
async def run_task(task_id: str, prompt: str):
|
||||||
|
try:
|
||||||
|
task_manager.tasks[task_id].status = "running"
|
||||||
|
|
||||||
|
agent = Manus(
|
||||||
|
name="Manus",
|
||||||
|
description="A versatile agent that can solve various tasks using multiple tools",
|
||||||
|
max_steps=30,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def on_think(thought):
|
||||||
|
await task_manager.update_task_step(task_id, 0, thought, "think")
|
||||||
|
|
||||||
|
async def on_tool_execute(tool, input):
|
||||||
|
await task_manager.update_task_step(
|
||||||
|
task_id, 0, f"Executing tool: {tool}\nInput: {input}", "tool"
|
||||||
|
)
|
||||||
|
|
||||||
|
async def on_action(action):
|
||||||
|
await task_manager.update_task_step(
|
||||||
|
task_id, 0, f"Executing action: {action}", "act"
|
||||||
|
)
|
||||||
|
|
||||||
|
async def on_run(step, result):
|
||||||
|
await task_manager.update_task_step(task_id, step, result, "run")
|
||||||
|
|
||||||
|
from app.logger import logger
|
||||||
|
|
||||||
|
class SSELogHandler:
|
||||||
|
def __init__(self, task_id):
|
||||||
|
self.task_id = task_id
|
||||||
|
|
||||||
|
async def __call__(self, message):
|
||||||
|
import re
|
||||||
|
|
||||||
|
# 提取 - 后面的内容
|
||||||
|
cleaned_message = re.sub(r"^.*? - ", "", message)
|
||||||
|
|
||||||
|
event_type = "log"
|
||||||
|
if "✨ Manus's thoughts:" in cleaned_message:
|
||||||
|
event_type = "think"
|
||||||
|
elif "🛠️ Manus selected" in cleaned_message:
|
||||||
|
event_type = "tool"
|
||||||
|
elif "🎯 Tool" in cleaned_message:
|
||||||
|
event_type = "act"
|
||||||
|
elif "📝 Oops!" in cleaned_message:
|
||||||
|
event_type = "error"
|
||||||
|
elif "🏁 Special tool" in cleaned_message:
|
||||||
|
event_type = "complete"
|
||||||
|
|
||||||
|
await task_manager.update_task_step(
|
||||||
|
self.task_id, 0, cleaned_message, event_type
|
||||||
|
)
|
||||||
|
|
||||||
|
sse_handler = SSELogHandler(task_id)
|
||||||
|
logger.add(sse_handler)
|
||||||
|
|
||||||
|
result = await agent.run(prompt)
|
||||||
|
await task_manager.update_task_step(task_id, 1, result, "result")
|
||||||
|
await task_manager.complete_task(task_id)
|
||||||
|
except Exception as e:
|
||||||
|
await task_manager.fail_task(task_id, str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/tasks/{task_id}/events")
|
||||||
|
async def task_events(task_id: str):
|
||||||
|
async def event_generator():
|
||||||
|
if task_id not in task_manager.queues:
|
||||||
|
yield f"event: error\ndata: {dumps({'message': 'Task not found'})}\n\n"
|
||||||
|
return
|
||||||
|
|
||||||
|
queue = task_manager.queues[task_id]
|
||||||
|
|
||||||
|
task = task_manager.tasks.get(task_id)
|
||||||
|
if task:
|
||||||
|
status_data = {"type": "status", "status": task.status, "steps": task.steps}
|
||||||
|
yield f"event: status\ndata: {dumps(status_data)}\n\n"
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
event = await queue.get()
|
||||||
|
formatted_event = dumps(event)
|
||||||
|
|
||||||
|
yield ": heartbeat\n\n"
|
||||||
|
|
||||||
|
if event["type"] == "complete":
|
||||||
|
yield f"event: complete\ndata: {formatted_event}\n\n"
|
||||||
|
break
|
||||||
|
elif event["type"] == "error":
|
||||||
|
yield f"event: error\ndata: {formatted_event}\n\n"
|
||||||
|
break
|
||||||
|
elif event["type"] == "step":
|
||||||
|
task = task_manager.tasks.get(task_id)
|
||||||
|
if task:
|
||||||
|
status_data = {
|
||||||
|
"type": "status",
|
||||||
|
"status": task.status,
|
||||||
|
"steps": task.steps,
|
||||||
|
}
|
||||||
|
yield f"event: status\ndata: {dumps(status_data)}\n\n"
|
||||||
|
yield f"event: {event['type']}\ndata: {formatted_event}\n\n"
|
||||||
|
elif event["type"] in ["think", "tool", "act", "run"]:
|
||||||
|
yield f"event: {event['type']}\ndata: {formatted_event}\n\n"
|
||||||
|
else:
|
||||||
|
yield f"event: {event['type']}\ndata: {formatted_event}\n\n"
|
||||||
|
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
print(f"Client disconnected for task {task_id}")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error in event stream: {str(e)}")
|
||||||
|
yield f"event: error\ndata: {dumps({'message': str(e)})}\n\n"
|
||||||
|
break
|
||||||
|
|
||||||
|
return StreamingResponse(
|
||||||
|
event_generator(),
|
||||||
|
media_type="text/event-stream",
|
||||||
|
headers={
|
||||||
|
"Cache-Control": "no-cache",
|
||||||
|
"Connection": "keep-alive",
|
||||||
|
"X-Accel-Buffering": "no",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/tasks")
|
||||||
|
async def get_tasks():
|
||||||
|
sorted_tasks = sorted(
|
||||||
|
task_manager.tasks.values(), key=lambda task: task.created_at, reverse=True
|
||||||
|
)
|
||||||
|
return JSONResponse(
|
||||||
|
content=[task.model_dump() for task in sorted_tasks],
|
||||||
|
headers={"Content-Type": "application/json"},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/tasks/{task_id}")
|
||||||
|
async def get_task(task_id: str):
|
||||||
|
if task_id not in task_manager.tasks:
|
||||||
|
raise HTTPException(status_code=404, detail="Task not found")
|
||||||
|
return task_manager.tasks[task_id]
|
||||||
|
|
||||||
|
|
||||||
|
@app.exception_handler(Exception)
|
||||||
|
async def generic_exception_handler(request: Request, exc: Exception):
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=500, content={"message": f"Server error: {str(exc)}"}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import uvicorn
|
||||||
|
|
||||||
|
uvicorn.run(app, host="localhost", port=5172)
|
131
mcp/README.md
Normal file
131
mcp/README.md
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
# OpenManus-mcp 🤖
|
||||||
|
|
||||||
|
Implement a server based on [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) that exposes **OpenManus** tool functionalities as standardized APIs and create a simple client to interact with the server.
|
||||||
|
|
||||||
|
## ✨ Features
|
||||||
|
|
||||||
|
This MCP server provides access to the following OpenManus tools:
|
||||||
|
|
||||||
|
1. **Browser Automation** 🌐 - Navigate webpages, click elements, input text, and more
|
||||||
|
2. **Google Search** 🔍 - Execute searches and retrieve result links
|
||||||
|
3. **Python Code Execution** 🐍 - Run Python code in a secure environment
|
||||||
|
4. **File Saving** 💾 - Save content to local files
|
||||||
|
5. **Termination Control** 🛑 - Control program execution flow
|
||||||
|
|
||||||
|
## 🚀 Installation
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- Python 3.10+
|
||||||
|
- OpenManus project dependencies
|
||||||
|
|
||||||
|
### Installation Steps
|
||||||
|
|
||||||
|
1. First, install the OpenManus project:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/mannaandpoem/OpenManus.git
|
||||||
|
cd OpenManus
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install dependencies:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Using uv (recommended)
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
uv venv
|
||||||
|
source .venv/bin/activate # Unix/macOS
|
||||||
|
# or .venv\Scripts\activate # Windows
|
||||||
|
uv pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Install MCP dependencies:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv pip install -r mcp/mcp_requirements.txt
|
||||||
|
playright install
|
||||||
|
```
|
||||||
|
|
||||||
|
## Demo display
|
||||||
|
https://github.com/user-attachments/assets/177b1f50-422f-4c2e-ab7d-1f3d7ff27679
|
||||||
|
|
||||||
|
## 📖 Usage
|
||||||
|
|
||||||
|
### 1. Testing the server with Claude for Desktop 🖥️
|
||||||
|
|
||||||
|
> ⚠️ **Note**: Claude for Desktop is not yet available on Linux. Linux users can build an MCP client that connects to the server we just built.
|
||||||
|
|
||||||
|
#### Step 1: Installation Check ✅
|
||||||
|
First, make sure you have Claude for Desktop installed. [You can install the latest version here](https://claude.ai/download). If you already have Claude for Desktop, **make sure it's updated to the latest version**.
|
||||||
|
|
||||||
|
#### Step 2: Configuration Setup ⚙️
|
||||||
|
We'll need to configure Claude for Desktop for this server you want to use. To do this, open your Claude for Desktop App configuration at `~/Library/Application Support/Claude/claude_desktop_config.json` in a text editor. Make sure to create the file if it doesn't exist.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
vim ~/Library/Application\ Support/Claude/claude_desktop_config.json
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 3: Server Configuration 🔧
|
||||||
|
You'll then add your servers in the `mcpServers` key. The MCP UI elements will only show up in Claude for Desktop if at least one server is properly configured.
|
||||||
|
|
||||||
|
In this case, we'll add our single Openmanus server like so:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"openmanus": {
|
||||||
|
"command": "/ABSOLUTE/PATH/TO/PARENT/FOLDER/uv",
|
||||||
|
"args": [
|
||||||
|
"--directory",
|
||||||
|
"/ABSOLUTE/PATH/TO/OpenManus/mcp/server",
|
||||||
|
"run",
|
||||||
|
"server.py"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
> 💡 **Tip**: You may need to put the full path to the uv executable in the command field. You can get this by running:
|
||||||
|
> - MacOS/Linux: `which uv`
|
||||||
|
> - Windows: `where uv`
|
||||||
|
|
||||||
|
#### Step 4: Understanding the Configuration 📝
|
||||||
|
This tells Claude for Desktop:
|
||||||
|
1. There's an MCP server named "openmanus" 🔌
|
||||||
|
2. To launch it by running `uv --directory /ABSOLUTE/PATH/TO/OpenManus/mcp/server run server.py` 🚀
|
||||||
|
|
||||||
|
#### Step 5: Activation 🔄
|
||||||
|
Save the file, and restart Claude for Desktop.
|
||||||
|
|
||||||
|
#### Step 6: Verification ✨
|
||||||
|
Let's make sure Claude for Desktop is picking up the five tools we've exposed in our `openmanus` server. You can do this by looking for the hammer icon 
|
||||||
|

|
||||||
|
|
||||||
|
After clicking on the hammer icon, you should see tools listed:
|
||||||
|

|
||||||
|
|
||||||
|
#### Ready to Test! 🎉
|
||||||
|
**Now, you can test the openmanus server in Claude for Desktop**:
|
||||||
|
* 🔍 Try to find the recent news about Manus AI agent, and write a post for me!
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### 💻 2. Testing with simple Client Example
|
||||||
|
|
||||||
|
Check out `client.py` to test the openmanus server using the MCP client.
|
||||||
|
|
||||||
|
#### Demo display
|
||||||
|
https://github.com/user-attachments/assets/aeacd93d-9bec-46d1-831b-20e898c7507b
|
||||||
|
```
|
||||||
|
python mcp/client/client.py
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## 🔒 Security Considerations
|
||||||
|
|
||||||
|
- When using in production, ensure proper authentication and authorization mechanisms are in place
|
||||||
|
- The Python execution tool has timeout limits to prevent long-running code
|
||||||
|
|
||||||
|
## 📄 License
|
||||||
|
|
||||||
|
Same license as the OpenManus project
|
BIN
mcp/assets/1.jpg
Normal file
BIN
mcp/assets/1.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 97 KiB |
BIN
mcp/assets/2.png
Normal file
BIN
mcp/assets/2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 274 KiB |
3
mcp/assets/claude-desktop-mcp-hammer-icon.svg
Normal file
3
mcp/assets/claude-desktop-mcp-hammer-icon.svg
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<path d="M31.4175 14L22.985 5.51002C20.7329 3.26243 17.6811 2.00012 14.4993 2.00012C11.3176 2.00012 8.26581 3.26243 6.01372 5.51002L6.00247 5.52127L4.28122 7.30002C4.10292 7.49163 4.00685 7.74552 4.01364 8.00717C4.02043 8.26883 4.12954 8.51739 4.31754 8.6995C4.50554 8.88161 4.75745 8.98276 5.01919 8.98122C5.28092 8.97968 5.53163 8.87558 5.71747 8.69127L7.43372 6.91877C8.12421 6.22842 8.91217 5.64303 9.77247 5.18127L15.585 11L3.58497 23C3.39921 23.1857 3.25185 23.4062 3.15131 23.6489C3.05077 23.8916 2.99902 24.1517 2.99902 24.4144C2.99902 24.6771 3.05077 24.9372 3.15131 25.1799C3.25185 25.4225 3.39921 25.643 3.58497 25.8288L6.17122 28.415C6.35694 28.6008 6.57744 28.7481 6.82012 28.8487C7.06281 28.9492 7.32291 29.001 7.5856 29.001C7.84828 29.001 8.10839 28.9492 8.35107 28.8487C8.59375 28.7481 8.81425 28.6008 8.99997 28.415L21 16.415L22.7925 18.2075L25 20.4125C25.1857 20.5983 25.4062 20.7456 25.6489 20.8462C25.8916 20.9467 26.1517 20.9985 26.4143 20.9985C26.677 20.9985 26.9371 20.9467 27.1798 20.8462C27.4225 20.7456 27.643 20.5983 27.8287 20.4125L31.415 16.8263C31.7897 16.4516 32.0005 15.9436 32.0009 15.4137C32.0014 14.8838 31.7915 14.3753 31.4175 14ZM7.58497 27L4.99997 24.4138L13.5 15.9138L16.085 18.5L7.58497 27ZM20.2925 14.29L17.5 17.0838L14.9137 14.5L17.7075 11.7063C17.8004 11.6134 17.8742 11.5031 17.9245 11.3817C17.9749 11.2603 18.0008 11.1302 18.0008 10.9988C18.0008 10.8673 17.9749 10.7372 17.9245 10.6158C17.8742 10.4944 17.8004 10.3841 17.7075 10.2913L11.79 4.37502C13.4996 3.89351 15.3067 3.87606 17.0253 4.32445C18.744 4.77284 20.3122 5.67089 21.5687 6.92627L27.0962 12.49L23.5 16.0825L21.7075 14.29C21.6146 14.197 21.5043 14.1233 21.3829 14.073C21.2615 14.0226 21.1314 13.9967 21 13.9967C20.8686 13.9967 20.7384 14.0226 20.617 14.073C20.4956 14.1233 20.3853 14.197 20.2925 14.29ZM26.4175 18.9975L24.9175 17.4975L28.5 13.9063L30 15.4063L26.4175 18.9975Z" fill="#343330"/>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 2.0 KiB |
BIN
mcp/assets/demo.mp4
Normal file
BIN
mcp/assets/demo.mp4
Normal file
Binary file not shown.
217
mcp/client/client.py
Normal file
217
mcp/client/client.py
Normal file
@ -0,0 +1,217 @@
|
|||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from contextlib import AsyncExitStack
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from colorama import Fore, init
|
||||||
|
from openai import AsyncOpenAI
|
||||||
|
|
||||||
|
from mcp import ClientSession, StdioServerParameters
|
||||||
|
from mcp.client.stdio import stdio_client
|
||||||
|
|
||||||
|
|
||||||
|
# Add current directory to Python path
|
||||||
|
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
parent_dir = os.path.dirname(current_dir)
|
||||||
|
sys.path.insert(0, parent_dir)
|
||||||
|
sys.path.insert(0, current_dir)
|
||||||
|
|
||||||
|
# Add root directory to Python path
|
||||||
|
root_dir = os.path.dirname(parent_dir)
|
||||||
|
sys.path.insert(0, root_dir)
|
||||||
|
from app.config import config
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize colorama
|
||||||
|
def init_colorama():
|
||||||
|
init(autoreset=True)
|
||||||
|
|
||||||
|
|
||||||
|
class OpenManusClient:
|
||||||
|
def __init__(self):
|
||||||
|
# Load configuration
|
||||||
|
# self.config = load_config()
|
||||||
|
|
||||||
|
# Initialize session and client objects
|
||||||
|
self.session: Optional[ClientSession] = None
|
||||||
|
self.exit_stack = AsyncExitStack()
|
||||||
|
|
||||||
|
# Initialize AsyncOpenAI client with config
|
||||||
|
self.llm_config = config.llm["default"]
|
||||||
|
api_key = self.llm_config.api_key or os.getenv("OPENAI_API_KEY")
|
||||||
|
if not api_key:
|
||||||
|
raise ValueError(
|
||||||
|
"OpenAI API key not found in config.toml or environment variables"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.openai_client = AsyncOpenAI(
|
||||||
|
api_key=api_key, base_url=self.llm_config.base_url
|
||||||
|
)
|
||||||
|
|
||||||
|
async def connect_to_server(self, server_script_path: str = None):
|
||||||
|
"""Connect to the openmanus MCP server"""
|
||||||
|
# Use provided path or default from config
|
||||||
|
script_path = server_script_path
|
||||||
|
|
||||||
|
server_params = StdioServerParameters(
|
||||||
|
command="python", args=[script_path], env=None
|
||||||
|
)
|
||||||
|
|
||||||
|
stdio_transport = await self.exit_stack.enter_async_context(
|
||||||
|
stdio_client(server_params)
|
||||||
|
)
|
||||||
|
self.stdio, self.write = stdio_transport
|
||||||
|
self.session = await self.exit_stack.enter_async_context(
|
||||||
|
ClientSession(self.stdio, self.write)
|
||||||
|
)
|
||||||
|
|
||||||
|
await self.session.initialize()
|
||||||
|
|
||||||
|
# List available tools
|
||||||
|
response = await self.session.list_tools()
|
||||||
|
tools = response.tools
|
||||||
|
print("\nConnected to server with tools:", [tool.name for tool in tools])
|
||||||
|
|
||||||
|
async def chat_loop(self):
|
||||||
|
"""Run an interactive chat loop for testing tools"""
|
||||||
|
print(Fore.CYAN + "\n🚀 OpenManus MCP Client Started!")
|
||||||
|
print(Fore.GREEN + "Type your queries or 'quit' to exit.")
|
||||||
|
print(
|
||||||
|
Fore.YELLOW
|
||||||
|
+ "Example query: 'What is the recent news about the stock market?'\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
query = input(Fore.BLUE + "🔍 Query: ").strip()
|
||||||
|
|
||||||
|
if query.lower() == "quit":
|
||||||
|
print(Fore.RED + "👋 Exiting... Goodbye!")
|
||||||
|
break
|
||||||
|
|
||||||
|
response = await self.process_query(query)
|
||||||
|
print(Fore.MAGENTA + "\n💬 Response: " + response)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(Fore.RED + f"\n❌ Error: {str(e)}")
|
||||||
|
|
||||||
|
async def cleanup(self):
|
||||||
|
"""Clean up resources"""
|
||||||
|
await self.exit_stack.aclose()
|
||||||
|
await self.openai_client.close() # Close the OpenAI client
|
||||||
|
|
||||||
|
async def process_query(self, query: str) -> str:
|
||||||
|
"""Process a query using LLM and available tools"""
|
||||||
|
# Add a system message to set the context for the model
|
||||||
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": "You are a general-purpose AI assistant called OpenManus. You can help users complete a wide range of tasks, providing detailed information and assistance as needed. Please include emojis in your responses to make them more engaging.",
|
||||||
|
},
|
||||||
|
{"role": "user", "content": query},
|
||||||
|
]
|
||||||
|
|
||||||
|
response = await self.session.list_tools()
|
||||||
|
available_tools = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": tool.name,
|
||||||
|
"description": tool.description,
|
||||||
|
"parameters": tool.inputSchema,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for tool in response.tools
|
||||||
|
]
|
||||||
|
# Initial LLM API call
|
||||||
|
response = await self.openai_client.chat.completions.create(
|
||||||
|
model=self.llm_config.model,
|
||||||
|
messages=messages,
|
||||||
|
tools=available_tools,
|
||||||
|
tool_choice="auto",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Process response and handle tool calls
|
||||||
|
final_text = []
|
||||||
|
|
||||||
|
while True:
|
||||||
|
message = response.choices[0].message
|
||||||
|
|
||||||
|
# Add assistant's message to conversation
|
||||||
|
messages.append(
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": message.content if message.content else None,
|
||||||
|
"tool_calls": message.tool_calls
|
||||||
|
if hasattr(message, "tool_calls")
|
||||||
|
else None,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# If no tool calls, we're done
|
||||||
|
if not hasattr(message, "tool_calls") or not message.tool_calls:
|
||||||
|
if message.content:
|
||||||
|
final_text.append(message.content)
|
||||||
|
break
|
||||||
|
|
||||||
|
# Handle tool calls
|
||||||
|
for tool_call in message.tool_calls:
|
||||||
|
tool_name = tool_call.function.name
|
||||||
|
tool_args = tool_call.function.arguments
|
||||||
|
|
||||||
|
# Convert tool_args from string to dictionary if necessary
|
||||||
|
if isinstance(tool_args, str):
|
||||||
|
try:
|
||||||
|
tool_args = json.loads(tool_args)
|
||||||
|
except (ValueError, SyntaxError) as e:
|
||||||
|
print(f"Error converting tool_args to dict: {e}")
|
||||||
|
tool_args = {}
|
||||||
|
|
||||||
|
# Ensure tool_args is a dictionary
|
||||||
|
if not isinstance(tool_args, dict):
|
||||||
|
tool_args = {}
|
||||||
|
|
||||||
|
# Execute tool call
|
||||||
|
print(f"Calling tool {tool_name} with args: {tool_args}")
|
||||||
|
result = await self.session.call_tool(tool_name, tool_args)
|
||||||
|
final_text.append(f"[Calling tool {tool_name}]")
|
||||||
|
# final_text.append(f"Result: {result.content}")
|
||||||
|
|
||||||
|
# Add tool result to messages
|
||||||
|
messages.append(
|
||||||
|
{
|
||||||
|
"role": "tool",
|
||||||
|
"tool_call_id": tool_call.id,
|
||||||
|
"content": str(result.content),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get next response from LLM
|
||||||
|
response = await self.openai_client.chat.completions.create(
|
||||||
|
model=self.llm_config.model,
|
||||||
|
messages=messages,
|
||||||
|
tools=available_tools,
|
||||||
|
tool_choice="auto",
|
||||||
|
)
|
||||||
|
|
||||||
|
return "\n".join(final_text)
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
server_script = sys.argv[1]
|
||||||
|
else:
|
||||||
|
server_script = "mcp/server/server.py"
|
||||||
|
|
||||||
|
client = OpenManusClient()
|
||||||
|
try:
|
||||||
|
await client.connect_to_server(server_script)
|
||||||
|
await client.chat_loop()
|
||||||
|
finally:
|
||||||
|
await client.cleanup()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
4
mcp/mcp_requirements.txt
Normal file
4
mcp/mcp_requirements.txt
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# Core dependencies
|
||||||
|
mcp
|
||||||
|
httpx>=0.27.0
|
||||||
|
tomli>=2.0.0
|
182
mcp/server/server.py
Normal file
182
mcp/server/server.py
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
import argparse
|
||||||
|
import asyncio
|
||||||
|
import atexit
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from inspect import Parameter, Signature
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
from mcp.server.fastmcp import FastMCP
|
||||||
|
|
||||||
|
|
||||||
|
# Add directories to Python path
|
||||||
|
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
parent_dir = os.path.dirname(current_dir)
|
||||||
|
root_dir = os.path.dirname(parent_dir)
|
||||||
|
sys.path.insert(0, parent_dir)
|
||||||
|
sys.path.insert(0, current_dir)
|
||||||
|
sys.path.insert(0, root_dir)
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||||
|
)
|
||||||
|
logger = logging.getLogger("mcp-server")
|
||||||
|
|
||||||
|
from app.tool.base import BaseTool
|
||||||
|
from app.tool.bash import Bash
|
||||||
|
|
||||||
|
# Import OpenManus tools
|
||||||
|
from app.tool.browser_use_tool import BrowserUseTool
|
||||||
|
from app.tool.str_replace_editor import StrReplaceEditor
|
||||||
|
from app.tool.terminate import Terminate
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize FastMCP server
|
||||||
|
openmanus = FastMCP("openmanus")
|
||||||
|
|
||||||
|
# Initialize tool instances
|
||||||
|
bash_tool = Bash()
|
||||||
|
browser_tool = BrowserUseTool()
|
||||||
|
str_replace_editor_tool = StrReplaceEditor()
|
||||||
|
terminate_tool = Terminate()
|
||||||
|
|
||||||
|
|
||||||
|
def register_tool(tool: BaseTool, method_name: Optional[str] = None) -> None:
|
||||||
|
"""Register a tool with the OpenManus server.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tool: The tool instance to register
|
||||||
|
method_name: Optional custom name for the tool method
|
||||||
|
"""
|
||||||
|
tool_name = method_name or tool.name
|
||||||
|
|
||||||
|
# Get tool information using its own methods
|
||||||
|
tool_param = tool.to_param()
|
||||||
|
tool_function = tool_param["function"]
|
||||||
|
|
||||||
|
# Define the async function to be registered
|
||||||
|
async def tool_method(**kwargs):
|
||||||
|
logger.info(f"Executing {tool_name}: {kwargs}")
|
||||||
|
result = await tool.execute(**kwargs)
|
||||||
|
|
||||||
|
# Handle different types of results
|
||||||
|
if hasattr(result, "model_dump"):
|
||||||
|
return json.dumps(result.model_dump())
|
||||||
|
elif isinstance(result, dict):
|
||||||
|
return json.dumps(result)
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Set the function name
|
||||||
|
tool_method.__name__ = tool_name
|
||||||
|
|
||||||
|
# Set the function docstring
|
||||||
|
description = tool_function.get("description", "")
|
||||||
|
param_props = tool_function.get("parameters", {}).get("properties", {})
|
||||||
|
required_params = tool_function.get("parameters", {}).get("required", [])
|
||||||
|
|
||||||
|
# Build a proper docstring with parameter descriptions
|
||||||
|
docstring = description
|
||||||
|
|
||||||
|
# Create parameter list separately for the signature
|
||||||
|
parameters = []
|
||||||
|
|
||||||
|
# Add parameters to both docstring and signature
|
||||||
|
if param_props:
|
||||||
|
docstring += "\n\nParameters:\n"
|
||||||
|
for param_name, param_details in param_props.items():
|
||||||
|
required_str = (
|
||||||
|
"(required)" if param_name in required_params else "(optional)"
|
||||||
|
)
|
||||||
|
param_type = param_details.get("type", "any")
|
||||||
|
param_desc = param_details.get("description", "")
|
||||||
|
|
||||||
|
# Add to docstring
|
||||||
|
docstring += (
|
||||||
|
f" {param_name} ({param_type}) {required_str}: {param_desc}\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create parameter for signature
|
||||||
|
default = Parameter.empty if param_name in required_params else None
|
||||||
|
annotation = Any
|
||||||
|
|
||||||
|
# Try to get a better type annotation based on the parameter type
|
||||||
|
if param_type == "string":
|
||||||
|
annotation = str
|
||||||
|
elif param_type == "integer":
|
||||||
|
annotation = int
|
||||||
|
elif param_type == "number":
|
||||||
|
annotation = float
|
||||||
|
elif param_type == "boolean":
|
||||||
|
annotation = bool
|
||||||
|
elif param_type == "object":
|
||||||
|
annotation = dict
|
||||||
|
elif param_type == "array":
|
||||||
|
annotation = list
|
||||||
|
|
||||||
|
# Create parameter
|
||||||
|
param = Parameter(
|
||||||
|
name=param_name,
|
||||||
|
kind=Parameter.KEYWORD_ONLY,
|
||||||
|
default=default,
|
||||||
|
annotation=annotation,
|
||||||
|
)
|
||||||
|
parameters.append(param)
|
||||||
|
|
||||||
|
# Store the full docstring
|
||||||
|
tool_method.__doc__ = docstring
|
||||||
|
|
||||||
|
# Create and set the signature
|
||||||
|
tool_method.__signature__ = Signature(parameters=parameters)
|
||||||
|
|
||||||
|
# Store the complete parameter schema for tools that need to access it programmatically
|
||||||
|
tool_method._parameter_schema = {
|
||||||
|
param_name: {
|
||||||
|
"description": param_details.get("description", ""),
|
||||||
|
"type": param_details.get("type", "any"),
|
||||||
|
"required": param_name in required_params,
|
||||||
|
}
|
||||||
|
for param_name, param_details in param_props.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
# Register the tool with FastMCP
|
||||||
|
openmanus.tool()(tool_method)
|
||||||
|
logger.info(f"Registered tool: {tool_name}")
|
||||||
|
|
||||||
|
|
||||||
|
# Register all tools
|
||||||
|
register_tool(bash_tool)
|
||||||
|
register_tool(browser_tool)
|
||||||
|
register_tool(str_replace_editor_tool)
|
||||||
|
register_tool(terminate_tool)
|
||||||
|
|
||||||
|
|
||||||
|
# Clean up resources
|
||||||
|
async def cleanup():
|
||||||
|
"""Clean up all tool resources"""
|
||||||
|
logger.info("Cleaning up resources")
|
||||||
|
await browser_tool.cleanup()
|
||||||
|
|
||||||
|
|
||||||
|
# Register cleanup function
|
||||||
|
atexit.register(lambda: asyncio.run(cleanup()))
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
"""Parse command line arguments"""
|
||||||
|
parser = argparse.ArgumentParser(description="OpenManus MCP Server")
|
||||||
|
parser.add_argument(
|
||||||
|
"--transport",
|
||||||
|
choices=["stdio"],
|
||||||
|
default="stdio",
|
||||||
|
help="Communication method: stdio or http (default: stdio)",
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = parse_args()
|
||||||
|
logger.info("Starting OpenManus server (stdio mode)")
|
||||||
|
openmanus.run(transport="stdio")
|
Loading…
x
Reference in New Issue
Block a user