refactor mcp folder
This commit is contained in:
parent
395d5a3add
commit
fc5e25343c
@ -1,6 +1,6 @@
|
|||||||
# OpenManus-server 🤖
|
# OpenManus-mcp 🤖
|
||||||
|
|
||||||
This project provides a server based on [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) that exposes **OpenManus** tool functionalities as standardized APIs.
|
Implement a server based on [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) that exposes **OpenManus** tool functionalities as standardized APIs and create a simple client to interact with the server.
|
||||||
|
|
||||||
## ✨ Features
|
## ✨ Features
|
||||||
|
|
||||||
@ -42,7 +42,8 @@ uv pip install -r requirements.txt
|
|||||||
3. Install MCP dependencies:
|
3. Install MCP dependencies:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv pip install -r openmanus_server/mcp_requirements.txt
|
uv pip install -r mcp/mcp_requirements.txt
|
||||||
|
playright install
|
||||||
```
|
```
|
||||||
|
|
||||||
## Demo display
|
## Demo display
|
||||||
@ -50,7 +51,7 @@ https://github.com/user-attachments/assets/177b1f50-422f-4c2e-ab7d-1f3d7ff27679
|
|||||||
|
|
||||||
## 📖 Usage
|
## 📖 Usage
|
||||||
|
|
||||||
### 1. Testing your server with Claude for Desktop 🖥️
|
### 1. Testing the server with Claude for Desktop 🖥️
|
||||||
|
|
||||||
> ⚠️ **Note**: Claude for Desktop is not yet available on Linux. Linux users can build an MCP client that connects to the server we just built.
|
> ⚠️ **Note**: Claude for Desktop is not yet available on Linux. Linux users can build an MCP client that connects to the server we just built.
|
||||||
|
|
||||||
@ -75,9 +76,9 @@ In this case, we'll add our single Openmanus server like so:
|
|||||||
"command": "/ABSOLUTE/PATH/TO/PARENT/FOLDER/uv",
|
"command": "/ABSOLUTE/PATH/TO/PARENT/FOLDER/uv",
|
||||||
"args": [
|
"args": [
|
||||||
"--directory",
|
"--directory",
|
||||||
"/ABSOLUTE/PATH/TO/OpenManus/openmanus_server",
|
"/ABSOLUTE/PATH/TO/OpenManus/mcp/server",
|
||||||
"run",
|
"run",
|
||||||
"openmanus_server.py"
|
"server.py"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -91,13 +92,13 @@ In this case, we'll add our single Openmanus server like so:
|
|||||||
#### Step 4: Understanding the Configuration 📝
|
#### Step 4: Understanding the Configuration 📝
|
||||||
This tells Claude for Desktop:
|
This tells Claude for Desktop:
|
||||||
1. There's an MCP server named "openmanus" 🔌
|
1. There's an MCP server named "openmanus" 🔌
|
||||||
2. To launch it by running `uv --directory /ABSOLUTE/PATH/TO/OpenManus/openmanus_server run openmanus_server.py` 🚀
|
2. To launch it by running `uv --directory /ABSOLUTE/PATH/TO/OpenManus/mcp/server run server.py` 🚀
|
||||||
|
|
||||||
#### Step 5: Activation 🔄
|
#### Step 5: Activation 🔄
|
||||||
Save the file, and restart Claude for Desktop.
|
Save the file, and restart Claude for Desktop.
|
||||||
|
|
||||||
#### Step 6: Verification ✨
|
#### Step 6: Verification ✨
|
||||||
Let's make sure Claude for Desktop is picking up the six tools we've exposed in our `openmanus` server. You can do this by looking for the hammer icon 
|
Let's make sure Claude for Desktop is picking up the five tools we've exposed in our `openmanus` server. You can do this by looking for the hammer icon 
|
||||||

|

|
||||||
|
|
||||||
After clicking on the hammer icon, you should see tools listed:
|
After clicking on the hammer icon, you should see tools listed:
|
||||||
@ -111,12 +112,12 @@ After clicking on the hammer icon, you should see tools listed:
|
|||||||
|
|
||||||
### 💻 2. Testing with simple Client Example
|
### 💻 2. Testing with simple Client Example
|
||||||
|
|
||||||
Check out `openmanus_client.py` to test the openmanus server using the MCP client.
|
Check out `client.py` to test the openmanus server using the MCP client.
|
||||||
|
|
||||||
#### Demo display
|
#### Demo display
|
||||||
https://github.com/user-attachments/assets/aeacd93d-9bec-46d1-831b-20e898c7507b
|
https://github.com/user-attachments/assets/aeacd93d-9bec-46d1-831b-20e898c7507b
|
||||||
```
|
```
|
||||||
python openmanus_server/openmanus_client.py
|
python mcp/client/client.py
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
Before Width: | Height: | Size: 97 KiB After Width: | Height: | Size: 97 KiB |
Before Width: | Height: | Size: 274 KiB After Width: | Height: | Size: 274 KiB |
Before Width: | Height: | Size: 2.0 KiB After Width: | Height: | Size: 2.0 KiB |
@ -1,66 +1,55 @@
|
|||||||
import ast
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
from contextlib import AsyncExitStack
|
from contextlib import AsyncExitStack
|
||||||
from pathlib import Path
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import tomli
|
|
||||||
from colorama import Fore, init
|
from colorama import Fore, init
|
||||||
from dotenv import load_dotenv
|
|
||||||
from mcp import ClientSession, StdioServerParameters
|
from mcp import ClientSession, StdioServerParameters
|
||||||
from mcp.client.stdio import stdio_client
|
from mcp.client.stdio import stdio_client
|
||||||
from openai import AsyncOpenAI
|
from openai import AsyncOpenAI
|
||||||
|
|
||||||
|
# Add current directory to Python path
|
||||||
|
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
parent_dir = os.path.dirname(current_dir)
|
||||||
|
sys.path.insert(0, parent_dir)
|
||||||
|
sys.path.insert(0, current_dir)
|
||||||
|
|
||||||
|
# Add root directory to Python path
|
||||||
|
root_dir = os.path.dirname(parent_dir)
|
||||||
|
sys.path.insert(0, root_dir)
|
||||||
|
from app.config import config
|
||||||
|
|
||||||
# Initialize colorama
|
# Initialize colorama
|
||||||
def init_colorama():
|
def init_colorama():
|
||||||
init(autoreset=True)
|
init(autoreset=True)
|
||||||
|
|
||||||
|
|
||||||
# Load config
|
|
||||||
def load_config():
|
|
||||||
config_path = Path(__file__).parent.parent / "config" / "config.toml"
|
|
||||||
try:
|
|
||||||
with open(config_path, "rb") as f:
|
|
||||||
return tomli.load(f)
|
|
||||||
except FileNotFoundError:
|
|
||||||
print(f"Error: config.toml not found at {config_path}")
|
|
||||||
sys.exit(1)
|
|
||||||
except tomli.TOMLDecodeError as e:
|
|
||||||
print(f"Error: Invalid TOML in config.toml: {e}")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
# Load environment variables (as fallback)
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
|
|
||||||
class OpenManusClient:
|
class OpenManusClient:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# Load configuration
|
# Load configuration
|
||||||
self.config = load_config()
|
# self.config = load_config()
|
||||||
|
|
||||||
# Initialize session and client objects
|
# Initialize session and client objects
|
||||||
self.session: Optional[ClientSession] = None
|
self.session: Optional[ClientSession] = None
|
||||||
self.exit_stack = AsyncExitStack()
|
self.exit_stack = AsyncExitStack()
|
||||||
|
|
||||||
# Initialize AsyncOpenAI client with config
|
# Initialize AsyncOpenAI client with config
|
||||||
api_key = self.config["llm"]["api_key"] or os.getenv("OPENAI_API_KEY")
|
self.llm_config = config.llm["default"]
|
||||||
|
api_key = self.llm_config.api_key or os.getenv("OPENAI_API_KEY")
|
||||||
if not api_key:
|
if not api_key:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"OpenAI API key not found in config.toml or environment variables"
|
"OpenAI API key not found in config.toml or environment variables"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.openai_client = AsyncOpenAI(
|
self.openai_client = AsyncOpenAI(
|
||||||
api_key=api_key, base_url=self.config["llm"]["base_url"]
|
api_key=api_key, base_url=self.llm_config.base_url
|
||||||
)
|
)
|
||||||
|
|
||||||
async def connect_to_server(self, server_script_path: str = None):
|
async def connect_to_server(self, server_script_path: str = None):
|
||||||
"""Connect to the openmanus MCP server"""
|
"""Connect to the openmanus MCP server"""
|
||||||
# Use provided path or default from config
|
# Use provided path or default from config
|
||||||
script_path = server_script_path or self.config["server"]["default_script"]
|
script_path = server_script_path
|
||||||
|
|
||||||
server_params = StdioServerParameters(
|
server_params = StdioServerParameters(
|
||||||
command="python", args=[script_path], env=None
|
command="python", args=[script_path], env=None
|
||||||
@ -134,7 +123,7 @@ class OpenManusClient:
|
|||||||
]
|
]
|
||||||
# Initial LLM API call
|
# Initial LLM API call
|
||||||
response = await self.openai_client.chat.completions.create(
|
response = await self.openai_client.chat.completions.create(
|
||||||
model=self.config["llm"]["model"],
|
model=self.llm_config.model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
tools=available_tools,
|
tools=available_tools,
|
||||||
tool_choice="auto",
|
tool_choice="auto",
|
||||||
@ -171,7 +160,7 @@ class OpenManusClient:
|
|||||||
# Convert tool_args from string to dictionary if necessary
|
# Convert tool_args from string to dictionary if necessary
|
||||||
if isinstance(tool_args, str):
|
if isinstance(tool_args, str):
|
||||||
try:
|
try:
|
||||||
tool_args = ast.literal_eval(tool_args)
|
tool_args = json.loads(tool_args)
|
||||||
except (ValueError, SyntaxError) as e:
|
except (ValueError, SyntaxError) as e:
|
||||||
print(f"Error converting tool_args to dict: {e}")
|
print(f"Error converting tool_args to dict: {e}")
|
||||||
tool_args = {}
|
tool_args = {}
|
||||||
@ -197,7 +186,7 @@ class OpenManusClient:
|
|||||||
|
|
||||||
# Get next response from LLM
|
# Get next response from LLM
|
||||||
response = await self.openai_client.chat.completions.create(
|
response = await self.openai_client.chat.completions.create(
|
||||||
model=self.config["llm"]["model"],
|
model=self.llm_config.model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
tools=available_tools,
|
tools=available_tools,
|
||||||
tool_choice="auto",
|
tool_choice="auto",
|
||||||
@ -210,7 +199,7 @@ async def main():
|
|||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
server_script = sys.argv[1]
|
server_script = sys.argv[1]
|
||||||
else:
|
else:
|
||||||
server_script = "./openmanus_server/openmanus_server.py"
|
server_script = "mcp/server/server.py"
|
||||||
|
|
||||||
client = OpenManusClient()
|
client = OpenManusClient()
|
||||||
try:
|
try:
|
@ -14,6 +14,10 @@ parent_dir = os.path.dirname(current_dir)
|
|||||||
sys.path.insert(0, parent_dir)
|
sys.path.insert(0, parent_dir)
|
||||||
sys.path.insert(0, current_dir)
|
sys.path.insert(0, current_dir)
|
||||||
|
|
||||||
|
# Add root directory to Python path
|
||||||
|
root_dir = os.path.dirname(parent_dir)
|
||||||
|
sys.path.insert(0, root_dir)
|
||||||
|
|
||||||
# Configure logging
|
# Configure logging
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||||
@ -23,7 +27,6 @@ logger = logging.getLogger("mcp-server")
|
|||||||
# Import OpenManus tools
|
# Import OpenManus tools
|
||||||
from app.tool.browser_use_tool import BrowserUseTool
|
from app.tool.browser_use_tool import BrowserUseTool
|
||||||
from app.tool.file_saver import FileSaver
|
from app.tool.file_saver import FileSaver
|
||||||
from app.tool.google_search import GoogleSearch
|
|
||||||
from app.tool.python_execute import PythonExecute
|
from app.tool.python_execute import PythonExecute
|
||||||
from app.tool.terminate import Terminate
|
from app.tool.terminate import Terminate
|
||||||
|
|
||||||
@ -33,7 +36,6 @@ openmanus = FastMCP("openmanus")
|
|||||||
|
|
||||||
# Initialize tool instances
|
# Initialize tool instances
|
||||||
browser_tool = BrowserUseTool()
|
browser_tool = BrowserUseTool()
|
||||||
google_search_tool = GoogleSearch()
|
|
||||||
python_execute_tool = PythonExecute()
|
python_execute_tool = PythonExecute()
|
||||||
file_saver_tool = FileSaver()
|
file_saver_tool = FileSaver()
|
||||||
terminate_tool = Terminate()
|
terminate_tool = Terminate()
|
||||||
@ -94,20 +96,6 @@ async def get_browser_state() -> str:
|
|||||||
return json.dumps(result.model_dump())
|
return json.dumps(result.model_dump())
|
||||||
|
|
||||||
|
|
||||||
# Google search tool
|
|
||||||
@openmanus.tool()
|
|
||||||
async def google_search(query: str, num_results: int = 10) -> str:
|
|
||||||
"""Execute Google search and return list of relevant links.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query: Search query
|
|
||||||
num_results: Number of results to return (default is 10)
|
|
||||||
"""
|
|
||||||
logger.info(f"Executing Google search: {query}")
|
|
||||||
results = await google_search_tool.execute(query=query, num_results=num_results)
|
|
||||||
return json.dumps(results)
|
|
||||||
|
|
||||||
|
|
||||||
# Python execution tool
|
# Python execution tool
|
||||||
@openmanus.tool()
|
@openmanus.tool()
|
||||||
async def python_execute(code: str, timeout: int = 5) -> str:
|
async def python_execute(code: str, timeout: int = 5) -> str:
|
Loading…
x
Reference in New Issue
Block a user