refactor mcp folder

This commit is contained in:
gantnocap 2025-03-18 00:40:29 +08:00
parent 395d5a3add
commit fc5e25343c
8 changed files with 35 additions and 57 deletions

View File

@ -1,6 +1,6 @@
# OpenManus-server 🤖
# OpenManus-mcp 🤖
This project provides a server based on [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) that exposes **OpenManus** tool functionalities as standardized APIs.
Implement a server based on [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) that exposes **OpenManus** tool functionalities as standardized APIs and create a simple client to interact with the server.
## ✨ Features
@ -42,7 +42,8 @@ uv pip install -r requirements.txt
3. Install MCP dependencies:
```bash
uv pip install -r openmanus_server/mcp_requirements.txt
uv pip install -r mcp/mcp_requirements.txt
playright install
```
## Demo display
@ -50,7 +51,7 @@ https://github.com/user-attachments/assets/177b1f50-422f-4c2e-ab7d-1f3d7ff27679
## 📖 Usage
### 1. Testing your server with Claude for Desktop 🖥️
### 1. Testing the server with Claude for Desktop 🖥️
> ⚠️ **Note**: Claude for Desktop is not yet available on Linux. Linux users can build an MCP client that connects to the server we just built.
@ -75,9 +76,9 @@ In this case, we'll add our single Openmanus server like so:
"command": "/ABSOLUTE/PATH/TO/PARENT/FOLDER/uv",
"args": [
"--directory",
"/ABSOLUTE/PATH/TO/OpenManus/openmanus_server",
"/ABSOLUTE/PATH/TO/OpenManus/mcp/server",
"run",
"openmanus_server.py"
"server.py"
]
}
}
@ -91,13 +92,13 @@ In this case, we'll add our single Openmanus server like so:
#### Step 4: Understanding the Configuration 📝
This tells Claude for Desktop:
1. There's an MCP server named "openmanus" 🔌
2. To launch it by running `uv --directory /ABSOLUTE/PATH/TO/OpenManus/openmanus_server run openmanus_server.py` 🚀
2. To launch it by running `uv --directory /ABSOLUTE/PATH/TO/OpenManus/mcp/server run server.py` 🚀
#### Step 5: Activation 🔄
Save the file, and restart Claude for Desktop.
#### Step 6: Verification ✨
Let's make sure Claude for Desktop is picking up the six tools we've exposed in our `openmanus` server. You can do this by looking for the hammer icon ![hammer icon](./assets/claude-desktop-mcp-hammer-icon.svg)
Let's make sure Claude for Desktop is picking up the five tools we've exposed in our `openmanus` server. You can do this by looking for the hammer icon ![hammer icon](./assets/claude-desktop-mcp-hammer-icon.svg)
![tools_in_claude](./assets/1.jpg)
After clicking on the hammer icon, you should see tools listed:
@ -111,12 +112,12 @@ After clicking on the hammer icon, you should see tools listed:
### 💻 2. Testing with simple Client Example
Check out `openmanus_client.py` to test the openmanus server using the MCP client.
Check out `client.py` to test the openmanus server using the MCP client.
#### Demo display
https://github.com/user-attachments/assets/aeacd93d-9bec-46d1-831b-20e898c7507b
```
python openmanus_server/openmanus_client.py
python mcp/client/client.py
```

View File

Before

Width:  |  Height:  |  Size: 97 KiB

After

Width:  |  Height:  |  Size: 97 KiB

View File

Before

Width:  |  Height:  |  Size: 274 KiB

After

Width:  |  Height:  |  Size: 274 KiB

View File

Before

Width:  |  Height:  |  Size: 2.0 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,66 +1,55 @@
import ast
import asyncio
import json
import os
import sys
from contextlib import AsyncExitStack
from pathlib import Path
from typing import Optional
import tomli
from colorama import Fore, init
from dotenv import load_dotenv
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from openai import AsyncOpenAI
# Add current directory to Python path
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
sys.path.insert(0, current_dir)
# Add root directory to Python path
root_dir = os.path.dirname(parent_dir)
sys.path.insert(0, root_dir)
from app.config import config
# Initialize colorama
def init_colorama():
init(autoreset=True)
# Load config
def load_config():
config_path = Path(__file__).parent.parent / "config" / "config.toml"
try:
with open(config_path, "rb") as f:
return tomli.load(f)
except FileNotFoundError:
print(f"Error: config.toml not found at {config_path}")
sys.exit(1)
except tomli.TOMLDecodeError as e:
print(f"Error: Invalid TOML in config.toml: {e}")
sys.exit(1)
# Load environment variables (as fallback)
load_dotenv()
class OpenManusClient:
def __init__(self):
# Load configuration
self.config = load_config()
# self.config = load_config()
# Initialize session and client objects
self.session: Optional[ClientSession] = None
self.exit_stack = AsyncExitStack()
# Initialize AsyncOpenAI client with config
api_key = self.config["llm"]["api_key"] or os.getenv("OPENAI_API_KEY")
self.llm_config = config.llm["default"]
api_key = self.llm_config.api_key or os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError(
"OpenAI API key not found in config.toml or environment variables"
)
self.openai_client = AsyncOpenAI(
api_key=api_key, base_url=self.config["llm"]["base_url"]
api_key=api_key, base_url=self.llm_config.base_url
)
async def connect_to_server(self, server_script_path: str = None):
"""Connect to the openmanus MCP server"""
# Use provided path or default from config
script_path = server_script_path or self.config["server"]["default_script"]
script_path = server_script_path
server_params = StdioServerParameters(
command="python", args=[script_path], env=None
@ -134,7 +123,7 @@ class OpenManusClient:
]
# Initial LLM API call
response = await self.openai_client.chat.completions.create(
model=self.config["llm"]["model"],
model=self.llm_config.model,
messages=messages,
tools=available_tools,
tool_choice="auto",
@ -171,7 +160,7 @@ class OpenManusClient:
# Convert tool_args from string to dictionary if necessary
if isinstance(tool_args, str):
try:
tool_args = ast.literal_eval(tool_args)
tool_args = json.loads(tool_args)
except (ValueError, SyntaxError) as e:
print(f"Error converting tool_args to dict: {e}")
tool_args = {}
@ -197,7 +186,7 @@ class OpenManusClient:
# Get next response from LLM
response = await self.openai_client.chat.completions.create(
model=self.config["llm"]["model"],
model=self.llm_config.model,
messages=messages,
tools=available_tools,
tool_choice="auto",
@ -210,7 +199,7 @@ async def main():
if len(sys.argv) > 1:
server_script = sys.argv[1]
else:
server_script = "./openmanus_server/openmanus_server.py"
server_script = "mcp/server/server.py"
client = OpenManusClient()
try:

View File

@ -14,6 +14,10 @@ parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
sys.path.insert(0, current_dir)
# Add root directory to Python path
root_dir = os.path.dirname(parent_dir)
sys.path.insert(0, root_dir)
# Configure logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
@ -23,7 +27,6 @@ logger = logging.getLogger("mcp-server")
# Import OpenManus tools
from app.tool.browser_use_tool import BrowserUseTool
from app.tool.file_saver import FileSaver
from app.tool.google_search import GoogleSearch
from app.tool.python_execute import PythonExecute
from app.tool.terminate import Terminate
@ -33,7 +36,6 @@ openmanus = FastMCP("openmanus")
# Initialize tool instances
browser_tool = BrowserUseTool()
google_search_tool = GoogleSearch()
python_execute_tool = PythonExecute()
file_saver_tool = FileSaver()
terminate_tool = Terminate()
@ -94,20 +96,6 @@ async def get_browser_state() -> str:
return json.dumps(result.model_dump())
# Google search tool
@openmanus.tool()
async def google_search(query: str, num_results: int = 10) -> str:
"""Execute Google search and return list of relevant links.
Args:
query: Search query
num_results: Number of results to return (default is 10)
"""
logger.info(f"Executing Google search: {query}")
results = await google_search_tool.execute(query=query, num_results=num_results)
return json.dumps(results)
# Python execution tool
@openmanus.tool()
async def python_execute(code: str, timeout: int = 5) -> str: