Merge pull request #882 from fred913/patch/mcp-server

Patch/mcp server
This commit is contained in:
Sheng Fan 2025-03-21 11:28:03 +08:00 committed by GitHub
commit e218c0655f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 198 additions and 151 deletions

View File

@ -1,15 +1,18 @@
from typing import Dict, List, Literal, Optional, Union
import boto3
import json import json
import sys
import time import time
import uuid import uuid
from datetime import datetime from datetime import datetime
import sys from typing import Dict, List, Literal, Optional
import boto3
# Global variables to track the current tool use ID across function calls # Global variables to track the current tool use ID across function calls
# Tmp solution # Tmp solution
CURRENT_TOOLUSE_ID = None CURRENT_TOOLUSE_ID = None
# Class to handle OpenAI-style response formatting # Class to handle OpenAI-style response formatting
class OpenAIResponse: class OpenAIResponse:
def __init__(self, data): def __init__(self, data):
@ -18,31 +21,37 @@ class OpenAIResponse:
if isinstance(value, dict): if isinstance(value, dict):
value = OpenAIResponse(value) value = OpenAIResponse(value)
elif isinstance(value, list): elif isinstance(value, list):
value = [OpenAIResponse(item) if isinstance(item, dict) else item for item in value] value = [
OpenAIResponse(item) if isinstance(item, dict) else item
for item in value
]
setattr(self, key, value) setattr(self, key, value)
def model_dump(self, *args, **kwargs): def model_dump(self, *args, **kwargs):
# Convert object to dict and add timestamp # Convert object to dict and add timestamp
data = self.__dict__ data = self.__dict__
data['created_at'] = datetime.now().isoformat() data["created_at"] = datetime.now().isoformat()
return data return data
# Main client class for interacting with Amazon Bedrock # Main client class for interacting with Amazon Bedrock
class BedrockClient: class BedrockClient:
def __init__(self): def __init__(self):
# Initialize Bedrock client, you need to configure AWS env first # Initialize Bedrock client, you need to configure AWS env first
try: try:
self.client = boto3.client('bedrock-runtime') self.client = boto3.client("bedrock-runtime")
self.chat = Chat(self.client) self.chat = Chat(self.client)
except Exception as e: except Exception as e:
print(f"Error initializing Bedrock client: {e}") print(f"Error initializing Bedrock client: {e}")
sys.exit(1) sys.exit(1)
# Chat interface class # Chat interface class
class Chat: class Chat:
def __init__(self, client): def __init__(self, client):
self.completions = ChatCompletions(client) self.completions = ChatCompletions(client)
# Core class handling chat completions functionality # Core class handling chat completions functionality
class ChatCompletions: class ChatCompletions:
def __init__(self, client): def __init__(self, client):
@ -52,19 +61,23 @@ class ChatCompletions:
# Convert OpenAI function calling format to Bedrock tool format # Convert OpenAI function calling format to Bedrock tool format
bedrock_tools = [] bedrock_tools = []
for tool in tools: for tool in tools:
if tool.get('type') == 'function': if tool.get("type") == "function":
function = tool.get('function', {}) function = tool.get("function", {})
bedrock_tool = { bedrock_tool = {
"toolSpec": { "toolSpec": {
"name": function.get('name', ''), "name": function.get("name", ""),
"description": function.get('description', ''), "description": function.get("description", ""),
"inputSchema": { "inputSchema": {
"json": { "json": {
"type": "object", "type": "object",
"properties": function.get('parameters', {}).get('properties', {}), "properties": function.get("parameters", {}).get(
"required": function.get('parameters', {}).get('required', []) "properties", {}
),
"required": function.get("parameters", {}).get(
"required", []
),
} }
} },
} }
} }
bedrock_tools.append(bedrock_tool) bedrock_tools.append(bedrock_tool)
@ -75,41 +88,43 @@ class ChatCompletions:
bedrock_messages = [] bedrock_messages = []
system_prompt = [] system_prompt = []
for message in messages: for message in messages:
if message.get('role') == 'system': if message.get("role") == "system":
system_prompt = [{"text": message.get('content')}] system_prompt = [{"text": message.get("content")}]
elif message.get('role') == 'user': elif message.get("role") == "user":
bedrock_message = { bedrock_message = {
"role": message.get('role', 'user'), "role": message.get("role", "user"),
"content": [{"text": message.get('content')}] "content": [{"text": message.get("content")}],
} }
bedrock_messages.append(bedrock_message) bedrock_messages.append(bedrock_message)
elif message.get('role') == 'assistant': elif message.get("role") == "assistant":
bedrock_message = { bedrock_message = {
"role": "assistant", "role": "assistant",
"content": [{"text": message.get('content')}] "content": [{"text": message.get("content")}],
} }
openai_tool_calls = message.get('tool_calls', []) openai_tool_calls = message.get("tool_calls", [])
if openai_tool_calls: if openai_tool_calls:
bedrock_tool_use = { bedrock_tool_use = {
"toolUseId": openai_tool_calls[0]['id'], "toolUseId": openai_tool_calls[0]["id"],
"name": openai_tool_calls[0]['function']['name'], "name": openai_tool_calls[0]["function"]["name"],
"input": json.loads(openai_tool_calls[0]['function']['arguments']) "input": json.loads(
openai_tool_calls[0]["function"]["arguments"]
),
} }
bedrock_message['content'].append({"toolUse": bedrock_tool_use}) bedrock_message["content"].append({"toolUse": bedrock_tool_use})
global CURRENT_TOOLUSE_ID global CURRENT_TOOLUSE_ID
CURRENT_TOOLUSE_ID = openai_tool_calls[0]['id'] CURRENT_TOOLUSE_ID = openai_tool_calls[0]["id"]
bedrock_messages.append(bedrock_message) bedrock_messages.append(bedrock_message)
elif message.get('role') == 'tool': elif message.get("role") == "tool":
bedrock_message = { bedrock_message = {
"role": "user", "role": "user",
"content": [ "content": [
{ {
"toolResult": { "toolResult": {
"toolUseId": CURRENT_TOOLUSE_ID, "toolUseId": CURRENT_TOOLUSE_ID,
"content": [{"text":message.get('content')}] "content": [{"text": message.get("content")}],
} }
} }
] ],
} }
bedrock_messages.append(bedrock_message) bedrock_messages.append(bedrock_message)
else: else:
@ -119,26 +134,27 @@ class ChatCompletions:
def _convert_bedrock_response_to_openai_format(self, bedrock_response): def _convert_bedrock_response_to_openai_format(self, bedrock_response):
# Convert Bedrock response format to OpenAI format # Convert Bedrock response format to OpenAI format
content = "" content = ""
if bedrock_response.get('output', {}).get('message', {}).get('content'): if bedrock_response.get("output", {}).get("message", {}).get("content"):
content_array = bedrock_response['output']['message']['content'] content_array = bedrock_response["output"]["message"]["content"]
content = "".join(item.get('text', '') for item in content_array) content = "".join(item.get("text", "") for item in content_array)
if content == "": content = "." if content == "":
content = "."
# Handle tool calls in response # Handle tool calls in response
openai_tool_calls = [] openai_tool_calls = []
if bedrock_response.get('output', {}).get('message', {}).get('content'): if bedrock_response.get("output", {}).get("message", {}).get("content"):
for content_item in bedrock_response['output']['message']['content']: for content_item in bedrock_response["output"]["message"]["content"]:
if content_item.get('toolUse'): if content_item.get("toolUse"):
bedrock_tool_use = content_item['toolUse'] bedrock_tool_use = content_item["toolUse"]
global CURRENT_TOOLUSE_ID global CURRENT_TOOLUSE_ID
CURRENT_TOOLUSE_ID = bedrock_tool_use['toolUseId'] CURRENT_TOOLUSE_ID = bedrock_tool_use["toolUseId"]
openai_tool_call = { openai_tool_call = {
'id': CURRENT_TOOLUSE_ID, "id": CURRENT_TOOLUSE_ID,
'type': 'function', "type": "function",
'function': { "function": {
'name': bedrock_tool_use['name'], "name": bedrock_tool_use["name"],
'arguments': json.dumps(bedrock_tool_use['input']) "arguments": json.dumps(bedrock_tool_use["input"]),
} },
} }
openai_tool_calls.append(openai_tool_call) openai_tool_calls.append(openai_tool_call)
@ -150,126 +166,169 @@ class ChatCompletions:
"system_fingerprint": None, "system_fingerprint": None,
"choices": [ "choices": [
{ {
"finish_reason": bedrock_response.get('stopReason', 'end_turn'), "finish_reason": bedrock_response.get("stopReason", "end_turn"),
"index": 0, "index": 0,
"message": { "message": {
"content": content, "content": content,
"role": bedrock_response.get('output', {}).get('message', {}).get('role', 'assistant'), "role": bedrock_response.get("output", {})
"tool_calls": openai_tool_calls if openai_tool_calls != [] else None, .get("message", {})
"function_call": None .get("role", "assistant"),
} "tool_calls": openai_tool_calls
if openai_tool_calls != []
else None,
"function_call": None,
},
} }
], ],
"usage": { "usage": {
"completion_tokens": bedrock_response.get('usage', {}).get('outputTokens', 0), "completion_tokens": bedrock_response.get("usage", {}).get(
"prompt_tokens": bedrock_response.get('usage', {}).get('inputTokens', 0), "outputTokens", 0
"total_tokens": bedrock_response.get('usage', {}).get('totalTokens', 0) ),
} "prompt_tokens": bedrock_response.get("usage", {}).get(
"inputTokens", 0
),
"total_tokens": bedrock_response.get("usage", {}).get("totalTokens", 0),
},
} }
return OpenAIResponse(openai_format) return OpenAIResponse(openai_format)
async def _invoke_bedrock( async def _invoke_bedrock(
self, self,
model: str, model: str,
messages: List[Dict[str, str]], messages: List[Dict[str, str]],
max_tokens: int, max_tokens: int,
temperature: float, temperature: float,
tools: Optional[List[dict]] = None, tools: Optional[List[dict]] = None,
tool_choice: Literal["none", "auto", "required"] = "auto", tool_choice: Literal["none", "auto", "required"] = "auto",
**kwargs **kwargs,
) -> OpenAIResponse: ) -> OpenAIResponse:
# Non-streaming invocation of Bedrock model # Non-streaming invocation of Bedrock model
system_prompt, bedrock_messages = self._convert_openai_messages_to_bedrock_format(messages) (
system_prompt,
bedrock_messages,
) = self._convert_openai_messages_to_bedrock_format(messages)
response = self.client.converse( response = self.client.converse(
modelId = model, modelId=model,
system = system_prompt, system=system_prompt,
messages = bedrock_messages, messages=bedrock_messages,
inferenceConfig = {"temperature": temperature, "maxTokens": max_tokens}, inferenceConfig={"temperature": temperature, "maxTokens": max_tokens},
toolConfig = {"tools": tools} if tools else None, toolConfig={"tools": tools} if tools else None,
) )
openai_response = self._convert_bedrock_response_to_openai_format(response) openai_response = self._convert_bedrock_response_to_openai_format(response)
return openai_response return openai_response
async def _invoke_bedrock_stream( async def _invoke_bedrock_stream(
self, self,
model: str, model: str,
messages: List[Dict[str, str]], messages: List[Dict[str, str]],
max_tokens: int, max_tokens: int,
temperature: float, temperature: float,
tools: Optional[List[dict]] = None, tools: Optional[List[dict]] = None,
tool_choice: Literal["none", "auto", "required"] = "auto", tool_choice: Literal["none", "auto", "required"] = "auto",
**kwargs **kwargs,
) -> OpenAIResponse: ) -> OpenAIResponse:
# Streaming invocation of Bedrock model # Streaming invocation of Bedrock model
system_prompt, bedrock_messages = self._convert_openai_messages_to_bedrock_format(messages) (
system_prompt,
bedrock_messages,
) = self._convert_openai_messages_to_bedrock_format(messages)
response = self.client.converse_stream( response = self.client.converse_stream(
modelId = model, modelId=model,
system = system_prompt, system=system_prompt,
messages = bedrock_messages, messages=bedrock_messages,
inferenceConfig = {"temperature": temperature, "maxTokens": max_tokens}, inferenceConfig={"temperature": temperature, "maxTokens": max_tokens},
toolConfig = {"tools": tools} if tools else None, toolConfig={"tools": tools} if tools else None,
) )
# Initialize response structure # Initialize response structure
bedrock_response = { bedrock_response = {
'output': { "output": {"message": {"role": "", "content": []}},
'message': { "stopReason": "",
'role': '', "usage": {},
'content': [] "metrics": {},
}
},
'stopReason': '',
'usage': {},
'metrics': {}
} }
bedrock_response_text = "" bedrock_response_text = ""
bedrock_response_tool_input = "" bedrock_response_tool_input = ""
# Process streaming response # Process streaming response
stream = response.get('stream') stream = response.get("stream")
if stream: if stream:
for event in stream: for event in stream:
if event.get('messageStart', {}).get('role'): if event.get("messageStart", {}).get("role"):
bedrock_response['output']['message']['role'] = event['messageStart']['role'] bedrock_response["output"]["message"]["role"] = event[
if event.get('contentBlockDelta', {}).get('delta', {}).get('text'): "messageStart"
bedrock_response_text += event['contentBlockDelta']['delta']['text'] ]["role"]
print(event['contentBlockDelta']['delta']['text'], end='', flush=True) if event.get("contentBlockDelta", {}).get("delta", {}).get("text"):
if event.get('contentBlockStop', {}).get('contentBlockIndex') == 0: bedrock_response_text += event["contentBlockDelta"]["delta"]["text"]
bedrock_response['output']['message']['content'].append({"text": bedrock_response_text}) print(
if event.get('contentBlockStart', {}).get('start', {}).get('toolUse'): event["contentBlockDelta"]["delta"]["text"], end="", flush=True
bedrock_tool_use = event['contentBlockStart']['start']['toolUse'] )
if event.get("contentBlockStop", {}).get("contentBlockIndex") == 0:
bedrock_response["output"]["message"]["content"].append(
{"text": bedrock_response_text}
)
if event.get("contentBlockStart", {}).get("start", {}).get("toolUse"):
bedrock_tool_use = event["contentBlockStart"]["start"]["toolUse"]
tool_use = { tool_use = {
"toolUseId": bedrock_tool_use['toolUseId'], "toolUseId": bedrock_tool_use["toolUseId"],
"name": bedrock_tool_use['name'], "name": bedrock_tool_use["name"],
} }
bedrock_response['output']['message']['content'].append({"toolUse": tool_use}) bedrock_response["output"]["message"]["content"].append(
{"toolUse": tool_use}
)
global CURRENT_TOOLUSE_ID global CURRENT_TOOLUSE_ID
CURRENT_TOOLUSE_ID = bedrock_tool_use['toolUseId'] CURRENT_TOOLUSE_ID = bedrock_tool_use["toolUseId"]
if event.get('contentBlockDelta', {}).get('delta', {}).get('toolUse'): if event.get("contentBlockDelta", {}).get("delta", {}).get("toolUse"):
bedrock_response_tool_input += event['contentBlockDelta']['delta']['toolUse']['input'] bedrock_response_tool_input += event["contentBlockDelta"]["delta"][
print(event['contentBlockDelta']['delta']['toolUse']['input'], end='', flush=True) "toolUse"
if event.get('contentBlockStop', {}).get('contentBlockIndex') == 1: ]["input"]
bedrock_response['output']['message']['content'][1]['toolUse']['input'] = json.loads(bedrock_response_tool_input) print(
event["contentBlockDelta"]["delta"]["toolUse"]["input"],
end="",
flush=True,
)
if event.get("contentBlockStop", {}).get("contentBlockIndex") == 1:
bedrock_response["output"]["message"]["content"][1]["toolUse"][
"input"
] = json.loads(bedrock_response_tool_input)
print() print()
openai_response = self._convert_bedrock_response_to_openai_format(bedrock_response) openai_response = self._convert_bedrock_response_to_openai_format(
bedrock_response
)
return openai_response return openai_response
def create( def create(
self, self,
model: str, model: str,
messages: List[Dict[str, str]], messages: List[Dict[str, str]],
max_tokens: int, max_tokens: int,
temperature: float, temperature: float,
stream: Optional[bool] = True, stream: Optional[bool] = True,
tools: Optional[List[dict]] = None, tools: Optional[List[dict]] = None,
tool_choice: Literal["none", "auto", "required"] = "auto", tool_choice: Literal["none", "auto", "required"] = "auto",
**kwargs **kwargs,
) -> OpenAIResponse: ) -> OpenAIResponse:
# Main entry point for chat completion # Main entry point for chat completion
bedrock_tools = [] bedrock_tools = []
if tools is not None: if tools is not None:
bedrock_tools = self._convert_openai_tools_to_bedrock_format(tools) bedrock_tools = self._convert_openai_tools_to_bedrock_format(tools)
if stream: if stream:
return self._invoke_bedrock_stream(model, messages, max_tokens, temperature, bedrock_tools, tool_choice, **kwargs) return self._invoke_bedrock_stream(
model,
messages,
max_tokens,
temperature,
bedrock_tools,
tool_choice,
**kwargs,
)
else: else:
return self._invoke_bedrock(model, messages, max_tokens, temperature, bedrock_tools, tool_choice, **kwargs) return self._invoke_bedrock(
model,
messages,
max_tokens,
temperature,
bedrock_tools,
tool_choice,
**kwargs,
)

View File

@ -18,6 +18,7 @@ from tenacity import (
wait_random_exponential, wait_random_exponential,
) )
from app.bedrock import BedrockClient
from app.config import LLMSettings, config from app.config import LLMSettings, config
from app.exceptions import TokenLimitExceeded from app.exceptions import TokenLimitExceeded
from app.logger import logger # Assuming a logger is set up in your app from app.logger import logger # Assuming a logger is set up in your app
@ -28,7 +29,6 @@ from app.schema import (
Message, Message,
ToolChoice, ToolChoice,
) )
from app.bedrock import BedrockClient
REASONING_MODELS = ["o1", "o3-mini"] REASONING_MODELS = ["o1", "o3-mini"]

View File

@ -1,30 +1,19 @@
import logging
import sys
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(sys.stderr)])
import argparse import argparse
import asyncio import asyncio
import atexit import atexit
import json import json
import logging
import os
import sys
from inspect import Parameter, Signature from inspect import Parameter, Signature
from typing import Any, Dict, Optional from typing import Any, Dict, Optional
from mcp.server.fastmcp import FastMCP from mcp.server.fastmcp import FastMCP
from app.logger import logger
# Add directories to Python path (needed for proper importing)
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
root_dir = os.path.dirname(parent_dir)
sys.path.insert(0, parent_dir)
sys.path.insert(0, current_dir)
sys.path.insert(0, root_dir)
# Configure logging (using the same format as original)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger("mcp-server")
from app.tool.base import BaseTool from app.tool.base import BaseTool
from app.tool.bash import Bash from app.tool.bash import Bash
from app.tool.browser_use_tool import BrowserUseTool from app.tool.browser_use_tool import BrowserUseTool
@ -45,11 +34,6 @@ class MCPServer:
self.tools["editor"] = StrReplaceEditor() self.tools["editor"] = StrReplaceEditor()
self.tools["terminate"] = Terminate() self.tools["terminate"] = Terminate()
from app.logger import logger as app_logger
global logger
logger = app_logger
def register_tool(self, tool: BaseTool, method_name: Optional[str] = None) -> None: def register_tool(self, tool: BaseTool, method_name: Optional[str] = None) -> None:
"""Register a tool with parameter validation and documentation.""" """Register a tool with parameter validation and documentation."""
tool_name = method_name or tool.name tool_name = method_name or tool.name

View File

@ -13,10 +13,14 @@ class MCPRunner:
def __init__(self): def __init__(self):
self.root_path = config.root_path self.root_path = config.root_path
self.server_script = self.root_path / "app" / "mcp" / "server.py" self.server_reference = "app.mcp.server"
self.agent = MCPAgent() self.agent = MCPAgent()
async def initialize(self, connection_type: str, server_url: str = None) -> None: async def initialize(
self,
connection_type: str,
server_url: str | None = None,
) -> None:
"""Initialize the MCP agent with the appropriate connection.""" """Initialize the MCP agent with the appropriate connection."""
logger.info(f"Initializing MCPAgent with {connection_type} connection...") logger.info(f"Initializing MCPAgent with {connection_type} connection...")
@ -24,7 +28,7 @@ class MCPRunner:
await self.agent.initialize( await self.agent.initialize(
connection_type="stdio", connection_type="stdio",
command=sys.executable, command=sys.executable,
args=[str(self.server_script)], args=["-m", self.server_reference],
) )
else: # sse else: # sse
await self.agent.initialize(connection_type="sse", server_url=server_url) await self.agent.initialize(connection_type="sse", server_url=server_url)