From 1086a9788a05f6cbcb985422fa84576ffe82fe54 Mon Sep 17 00:00:00 2001 From: Wuzi Date: Fri, 7 Mar 2025 14:40:03 +0800 Subject: [PATCH 01/77] place the generated file in the workspace directory --- app/tool/file_saver.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/app/tool/file_saver.py b/app/tool/file_saver.py index 9f4d3cb..8b44a69 100644 --- a/app/tool/file_saver.py +++ b/app/tool/file_saver.py @@ -4,6 +4,7 @@ import os import aiofiles from app.tool.base import BaseTool +from app.config import WORKSPACE_ROOT class FileSaver(BaseTool): @@ -46,16 +47,23 @@ The tool accepts content and a file path, and saves the content to that location str: A message indicating the result of the operation. """ try: + # Place the generated file in the workspace directory + if os.path.isabs(file_path): + file_name = os.path.basename(file_path) + full_path = os.path.join(WORKSPACE_ROOT, file_name) + else: + full_path = os.path.join(WORKSPACE_ROOT, file_path) + # Ensure the directory exists - directory = os.path.dirname(file_path) + directory = os.path.dirname(full_path) if directory and not os.path.exists(directory): os.makedirs(directory) # Write directly to the file - async with aiofiles.open(file_path, mode, encoding="utf-8") as file: + async with aiofiles.open(full_path, mode, encoding="utf-8") as file: await file.write(content) - return f"Content successfully saved to {file_path}" + return f"Content successfully saved to {full_path}" except Exception as e: return f"Error saving file: {str(e)}" From 15024e320a03d7afc39778985ebbb27b279b7e39 Mon Sep 17 00:00:00 2001 From: seeker Date: Fri, 7 Mar 2025 18:47:53 +0800 Subject: [PATCH 02/77] add: Added a sandbox for executing commands within docker containers --- .gitignore | 1 + app/config.py | 22 +- app/sandbox/__init__.py | 30 ++ app/sandbox/client.py | 201 +++++++++++ app/sandbox/core/exceptions.py | 17 + app/sandbox/core/manager.py | 313 +++++++++++++++++ app/sandbox/core/sandbox.py | 462 ++++++++++++++++++++++++++ app/sandbox/core/terminal.py | 346 +++++++++++++++++++ app/tool/file_operators.py | 156 +++++++++ app/tool/run.py | 43 --- app/tool/str_replace_editor.py | 272 ++++++++++----- config/config.example.toml | 10 + requirements.txt | 4 + tests/sandbox/test_client.py | 110 ++++++ tests/sandbox/test_docker_terminal.py | 104 ++++++ tests/sandbox/test_sandbox.py | 152 +++++++++ tests/sandbox/test_sandbox_manager.py | 138 ++++++++ 17 files changed, 2248 insertions(+), 133 deletions(-) create mode 100644 app/sandbox/__init__.py create mode 100644 app/sandbox/client.py create mode 100644 app/sandbox/core/exceptions.py create mode 100644 app/sandbox/core/manager.py create mode 100644 app/sandbox/core/sandbox.py create mode 100644 app/sandbox/core/terminal.py create mode 100644 app/tool/file_operators.py delete mode 100644 app/tool/run.py create mode 100644 tests/sandbox/test_client.py create mode 100644 tests/sandbox/test_docker_terminal.py create mode 100644 tests/sandbox/test_sandbox.py create mode 100644 tests/sandbox/test_sandbox_manager.py diff --git a/.gitignore b/.gitignore index 653fd83..bd23c42 100644 --- a/.gitignore +++ b/.gitignore @@ -178,3 +178,4 @@ data/ # Workspace workspace/ +.DS_Store diff --git a/app/config.py b/app/config.py index 2275fb4..f1968d2 100644 --- a/app/config.py +++ b/app/config.py @@ -23,8 +23,23 @@ class LLMSettings(BaseModel): temperature: float = Field(1.0, description="Sampling temperature") +class SandboxConfig(BaseModel): + """Configuration for the execution sandbox""" + + use_sandbox: bool = Field(False, description="Whether to use the sandbox") + image: str = Field("python:3.10-slim", description="Base image") + work_dir: str = Field("/workspace", description="Container working directory") + memory_limit: str = Field("512m", description="Memory limit") + cpu_limit: float = Field(1.0, description="CPU limit") + timeout: int = Field(300, description="Default command timeout (seconds)") + network_enabled: bool = Field( + False, description="Whether network access is allowed" + ) + + class AppConfig(BaseModel): llm: Dict[str, LLMSettings] + sandbox: SandboxConfig class Config: @@ -85,7 +100,8 @@ class Config: name: {**default_settings, **override_config} for name, override_config in llm_overrides.items() }, - } + }, + "sandbox": raw_config.get("sandbox", {}), } self._config = AppConfig(**config_dict) @@ -94,5 +110,9 @@ class Config: def llm(self) -> Dict[str, LLMSettings]: return self._config.llm + @property + def sandbox(self) -> SandboxConfig: + return self._config.sandbox + config = Config() diff --git a/app/sandbox/__init__.py b/app/sandbox/__init__.py new file mode 100644 index 0000000..ccf0df6 --- /dev/null +++ b/app/sandbox/__init__.py @@ -0,0 +1,30 @@ +""" +Docker Sandbox Module + +Provides secure containerized execution environment with resource limits +and isolation for running untrusted code. +""" +from app.sandbox.client import ( + BaseSandboxClient, + LocalSandboxClient, + create_sandbox_client, +) +from app.sandbox.core.exceptions import ( + SandboxError, + SandboxResourceError, + SandboxTimeoutError, +) +from app.sandbox.core.manager import SandboxManager +from app.sandbox.core.sandbox import DockerSandbox + + +__all__ = [ + "DockerSandbox", + "SandboxManager", + "BaseSandboxClient", + "LocalSandboxClient", + "create_sandbox_client", + "SandboxError", + "SandboxTimeoutError", + "SandboxResourceError", +] diff --git a/app/sandbox/client.py b/app/sandbox/client.py new file mode 100644 index 0000000..e2c412f --- /dev/null +++ b/app/sandbox/client.py @@ -0,0 +1,201 @@ +from abc import ABC, abstractmethod +from typing import Dict, Optional, Protocol + +from app.config import SandboxConfig +from app.sandbox.core.sandbox import DockerSandbox + + +class SandboxFileOperations(Protocol): + """Protocol for sandbox file operations.""" + + async def copy_from(self, container_path: str, local_path: str) -> None: + """Copies file from container to local. + + Args: + container_path: File path in container. + local_path: Local destination path. + """ + ... + + async def copy_to(self, local_path: str, container_path: str) -> None: + """Copies file from local to container. + + Args: + local_path: Local source file path. + container_path: Destination path in container. + """ + ... + + async def read_file(self, path: str) -> str: + """Reads file content from container. + + Args: + path: File path in container. + + Returns: + str: File content. + """ + ... + + async def write_file(self, path: str, content: str) -> None: + """Writes content to file in container. + + Args: + path: File path in container. + content: Content to write. + """ + ... + + +class BaseSandboxClient(ABC): + """Base sandbox client interface.""" + + @abstractmethod + async def create( + self, + config: Optional[SandboxConfig] = None, + volume_bindings: Optional[Dict[str, str]] = None, + ) -> None: + """Creates sandbox.""" + + @abstractmethod + async def run_command(self, command: str, timeout: Optional[int] = None) -> str: + """Executes command.""" + + @abstractmethod + async def copy_from(self, container_path: str, local_path: str) -> None: + """Copies file from container.""" + + @abstractmethod + async def copy_to(self, local_path: str, container_path: str) -> None: + """Copies file to container.""" + + @abstractmethod + async def read_file(self, path: str) -> str: + """Reads file.""" + + @abstractmethod + async def write_file(self, path: str, content: str) -> None: + """Writes file.""" + + @abstractmethod + async def cleanup(self) -> None: + """Cleans up resources.""" + + +class LocalSandboxClient(BaseSandboxClient): + """Local sandbox client implementation.""" + + def __init__(self): + """Initializes local sandbox client.""" + self.sandbox: Optional[DockerSandbox] = None + + async def create( + self, + config: Optional[SandboxConfig] = None, + volume_bindings: Optional[Dict[str, str]] = None, + ) -> None: + """Creates a sandbox. + + Args: + config: Sandbox configuration. + volume_bindings: Volume mappings. + + Raises: + RuntimeError: If sandbox creation fails. + """ + self.sandbox = DockerSandbox(config, volume_bindings) + await self.sandbox.create() + + async def run_command(self, command: str, timeout: Optional[int] = None) -> str: + """Runs command in sandbox. + + Args: + command: Command to execute. + timeout: Execution timeout in seconds. + + Returns: + Command output. + + Raises: + RuntimeError: If sandbox not initialized. + """ + if not self.sandbox: + raise RuntimeError("Sandbox not initialized") + return await self.sandbox.run_command(command, timeout) + + async def copy_from(self, container_path: str, local_path: str) -> None: + """Copies file from container to local. + + Args: + container_path: File path in container. + local_path: Local destination path. + + Raises: + RuntimeError: If sandbox not initialized. + """ + if not self.sandbox: + raise RuntimeError("Sandbox not initialized") + await self.sandbox.copy_from(container_path, local_path) + + async def copy_to(self, local_path: str, container_path: str) -> None: + """Copies file from local to container. + + Args: + local_path: Local source file path. + container_path: Destination path in container. + + Raises: + RuntimeError: If sandbox not initialized. + """ + if not self.sandbox: + raise RuntimeError("Sandbox not initialized") + await self.sandbox.copy_to(local_path, container_path) + + async def read_file(self, path: str) -> str: + """Reads file from container. + + Args: + path: File path in container. + + Returns: + File content. + + Raises: + RuntimeError: If sandbox not initialized. + """ + if not self.sandbox: + raise RuntimeError("Sandbox not initialized") + return await self.sandbox.read_file(path) + + async def write_file(self, path: str, content: str) -> None: + """Writes file to container. + + Args: + path: File path in container. + content: File content. + + Raises: + RuntimeError: If sandbox not initialized. + """ + if not self.sandbox: + raise RuntimeError("Sandbox not initialized") + await self.sandbox.write_file(path, content) + + async def cleanup(self) -> None: + """Cleans up resources.""" + if self.sandbox: + await self.sandbox.cleanup() + self.sandbox = None + + +async def create_sandbox_client() -> LocalSandboxClient: + """Creates a sandbox client. + + Returns: + LocalSandboxClient: Sandbox client instance. + """ + return LocalSandboxClient() + + +SANDBOX_CLIENT = create_sandbox_client() diff --git a/app/sandbox/core/exceptions.py b/app/sandbox/core/exceptions.py new file mode 100644 index 0000000..5c1f0e8 --- /dev/null +++ b/app/sandbox/core/exceptions.py @@ -0,0 +1,17 @@ +"""Exception classes for the sandbox system. + +This module defines custom exceptions used throughout the sandbox system to +handle various error conditions in a structured way. +""" + + +class SandboxError(Exception): + """Base exception for sandbox-related errors.""" + + +class SandboxTimeoutError(SandboxError): + """Exception raised when a sandbox operation times out.""" + + +class SandboxResourceError(SandboxError): + """Exception raised for resource-related errors.""" diff --git a/app/sandbox/core/manager.py b/app/sandbox/core/manager.py new file mode 100644 index 0000000..6269d72 --- /dev/null +++ b/app/sandbox/core/manager.py @@ -0,0 +1,313 @@ +import asyncio +import uuid +from contextlib import asynccontextmanager +from typing import Dict, Optional, Set + +import docker +from docker.errors import APIError, ImageNotFound + +from app.config import SandboxConfig +from app.logger import logger +from app.sandbox.core.sandbox import DockerSandbox + + +class SandboxManager: + """Docker sandbox manager. + + Manages multiple DockerSandbox instances lifecycle including creation, + monitoring, and cleanup. Provides concurrent access control and automatic + cleanup mechanisms for sandbox resources. + + Attributes: + max_sandboxes: Maximum allowed number of sandboxes. + idle_timeout: Sandbox idle timeout in seconds. + cleanup_interval: Cleanup check interval in seconds. + _sandboxes: Active sandbox instance mapping. + _last_used: Last used time record for sandboxes. + """ + + def __init__( + self, + max_sandboxes: int = 100, + idle_timeout: int = 3600, + cleanup_interval: int = 300, + ): + """Initializes sandbox manager. + + Args: + max_sandboxes: Maximum sandbox count limit. + idle_timeout: Idle timeout in seconds. + cleanup_interval: Cleanup check interval in seconds. + """ + self.max_sandboxes = max_sandboxes + self.idle_timeout = idle_timeout + self.cleanup_interval = cleanup_interval + + # Docker client + self._client = docker.from_env() + + # Resource mappings + self._sandboxes: Dict[str, DockerSandbox] = {} + self._last_used: Dict[str, float] = {} + + # Concurrency control + self._locks: Dict[str, asyncio.Lock] = {} + self._global_lock = asyncio.Lock() + self._active_operations: Set[str] = set() + + # Cleanup task + self._cleanup_task: Optional[asyncio.Task] = None + self._is_shutting_down = False + + # Start automatic cleanup + self.start_cleanup_task() + + async def ensure_image(self, image: str) -> bool: + """Ensures Docker image is available. + + Args: + image: Image name. + + Returns: + bool: Whether image is available. + """ + try: + self._client.images.get(image) + return True + except ImageNotFound: + try: + logger.info(f"Pulling image {image}...") + await asyncio.get_event_loop().run_in_executor( + None, self._client.images.pull, image + ) + return True + except (APIError, Exception) as e: + logger.error(f"Failed to pull image {image}: {e}") + return False + + @asynccontextmanager + async def sandbox_operation(self, sandbox_id: str): + """Context manager for sandbox operations. + + Provides concurrency control and usage time updates. + + Args: + sandbox_id: Sandbox ID. + + Raises: + KeyError: If sandbox not found. + """ + if sandbox_id not in self._locks: + self._locks[sandbox_id] = asyncio.Lock() + + async with self._locks[sandbox_id]: + if sandbox_id not in self._sandboxes: + raise KeyError(f"Sandbox {sandbox_id} not found") + + self._active_operations.add(sandbox_id) + try: + self._last_used[sandbox_id] = asyncio.get_event_loop().time() + yield self._sandboxes[sandbox_id] + finally: + self._active_operations.remove(sandbox_id) + + async def create_sandbox( + self, + config: Optional[SandboxConfig] = None, + volume_bindings: Optional[Dict[str, str]] = None, + ) -> str: + """Creates a new sandbox instance. + + Args: + config: Sandbox configuration. + volume_bindings: Volume mapping configuration. + + Returns: + str: Sandbox ID. + + Raises: + RuntimeError: If max sandbox count reached or creation fails. + """ + async with self._global_lock: + if len(self._sandboxes) >= self.max_sandboxes: + raise RuntimeError( + f"Maximum number of sandboxes ({self.max_sandboxes}) reached" + ) + + config = config or SandboxConfig() + if not await self.ensure_image(config.image): + raise RuntimeError(f"Failed to ensure Docker image: {config.image}") + + sandbox_id = str(uuid.uuid4()) + try: + sandbox = DockerSandbox(config, volume_bindings) + await sandbox.create() + + self._sandboxes[sandbox_id] = sandbox + self._last_used[sandbox_id] = asyncio.get_event_loop().time() + self._locks[sandbox_id] = asyncio.Lock() + + logger.info(f"Created sandbox {sandbox_id}") + return sandbox_id + + except Exception as e: + logger.error(f"Failed to create sandbox: {e}") + if sandbox_id in self._sandboxes: + await self.delete_sandbox(sandbox_id) + raise RuntimeError(f"Failed to create sandbox: {e}") + + async def get_sandbox(self, sandbox_id: str) -> DockerSandbox: + """Gets a sandbox instance. + + Args: + sandbox_id: Sandbox ID. + + Returns: + DockerSandbox: Sandbox instance. + + Raises: + KeyError: If sandbox does not exist. + """ + async with self.sandbox_operation(sandbox_id) as sandbox: + return sandbox + + def start_cleanup_task(self) -> None: + """Starts automatic cleanup task.""" + + async def cleanup_loop(): + while not self._is_shutting_down: + try: + await self._cleanup_idle_sandboxes() + except Exception as e: + logger.error(f"Error in cleanup loop: {e}") + await asyncio.sleep(self.cleanup_interval) + + self._cleanup_task = asyncio.create_task(cleanup_loop()) + + async def _cleanup_idle_sandboxes(self) -> None: + """Cleans up idle sandboxes.""" + current_time = asyncio.get_event_loop().time() + to_cleanup = [] + + async with self._global_lock: + for sandbox_id, last_used in self._last_used.items(): + if ( + sandbox_id not in self._active_operations + and current_time - last_used > self.idle_timeout + ): + to_cleanup.append(sandbox_id) + + for sandbox_id in to_cleanup: + try: + await self.delete_sandbox(sandbox_id) + except Exception as e: + logger.error(f"Error cleaning up sandbox {sandbox_id}: {e}") + + async def cleanup(self) -> None: + """Cleans up all resources.""" + logger.info("Starting manager cleanup...") + self._is_shutting_down = True + + # Cancel cleanup task + if self._cleanup_task: + self._cleanup_task.cancel() + try: + await asyncio.wait_for(self._cleanup_task, timeout=1.0) + except (asyncio.CancelledError, asyncio.TimeoutError): + pass + + # Get all sandbox IDs to clean up + async with self._global_lock: + sandbox_ids = list(self._sandboxes.keys()) + + # Concurrently clean up all sandboxes + cleanup_tasks = [] + for sandbox_id in sandbox_ids: + task = asyncio.create_task(self._safe_delete_sandbox(sandbox_id)) + cleanup_tasks.append(task) + + if cleanup_tasks: + # Wait for all cleanup tasks to complete, with timeout to avoid infinite waiting + try: + await asyncio.wait(cleanup_tasks, timeout=30.0) + except asyncio.TimeoutError: + logger.error("Sandbox cleanup timed out") + + # Clean up remaining references + self._sandboxes.clear() + self._last_used.clear() + self._locks.clear() + self._active_operations.clear() + + logger.info("Manager cleanup completed") + + async def _safe_delete_sandbox(self, sandbox_id: str) -> None: + """Safely deletes a single sandbox. + + Args: + sandbox_id: Sandbox ID to delete. + """ + try: + if sandbox_id in self._active_operations: + logger.warning( + f"Sandbox {sandbox_id} has active operations, waiting for completion" + ) + for _ in range(10): # Wait at most 10 times + await asyncio.sleep(0.5) + if sandbox_id not in self._active_operations: + break + else: + logger.warning( + f"Timeout waiting for sandbox {sandbox_id} operations to complete" + ) + + # Get reference to sandbox object + sandbox = self._sandboxes.get(sandbox_id) + if sandbox: + await sandbox.cleanup() + + # Remove sandbox record from manager + async with self._global_lock: + self._sandboxes.pop(sandbox_id, None) + self._last_used.pop(sandbox_id, None) + self._locks.pop(sandbox_id, None) + logger.info(f"Deleted sandbox {sandbox_id}") + except Exception as e: + logger.error(f"Error during cleanup of sandbox {sandbox_id}: {e}") + + async def delete_sandbox(self, sandbox_id: str) -> None: + """Deletes specified sandbox. + + Args: + sandbox_id: Sandbox ID. + """ + if sandbox_id not in self._sandboxes: + return + + try: + await self._safe_delete_sandbox(sandbox_id) + except Exception as e: + logger.error(f"Failed to delete sandbox {sandbox_id}: {e}") + + async def __aenter__(self) -> "SandboxManager": + """Async context manager entry.""" + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb) -> None: + """Async context manager exit.""" + await self.cleanup() + + def get_stats(self) -> Dict: + """Gets manager statistics. + + Returns: + Dict: Statistics information. + """ + return { + "total_sandboxes": len(self._sandboxes), + "active_operations": len(self._active_operations), + "max_sandboxes": self.max_sandboxes, + "idle_timeout": self.idle_timeout, + "cleanup_interval": self.cleanup_interval, + "is_shutting_down": self._is_shutting_down, + } diff --git a/app/sandbox/core/sandbox.py b/app/sandbox/core/sandbox.py new file mode 100644 index 0000000..400b12a --- /dev/null +++ b/app/sandbox/core/sandbox.py @@ -0,0 +1,462 @@ +import asyncio +import io +import os +import tarfile +import tempfile +import uuid +from typing import Dict, Optional + +import docker +from docker.errors import NotFound +from docker.models.containers import Container + +from app.config import SandboxConfig +from app.sandbox.core.exceptions import SandboxTimeoutError +from app.sandbox.core.terminal import AsyncDockerizedTerminal + + +class DockerSandbox: + """Docker sandbox environment. + + Provides a containerized execution environment with resource limits, + file operations, and command execution capabilities. + + Attributes: + config: Sandbox configuration. + volume_bindings: Volume mapping configuration. + client: Docker client. + container: Docker container instance. + terminal: Container terminal interface. + """ + + def __init__( + self, + config: Optional[SandboxConfig] = None, + volume_bindings: Optional[Dict[str, str]] = None, + ): + """Initializes a sandbox instance. + + Args: + config: Sandbox configuration. Default configuration used if None. + volume_bindings: Volume mappings in {host_path: container_path} format. + """ + self.config = config or SandboxConfig() + self.volume_bindings = volume_bindings or {} + self.client = docker.from_env() + self.container: Optional[Container] = None + self.terminal: Optional[AsyncDockerizedTerminal] = None + + async def create(self) -> "DockerSandbox": + """Creates and starts the sandbox container. + + Returns: + Current sandbox instance. + + Raises: + docker.errors.APIError: If Docker API call fails. + RuntimeError: If container creation or startup fails. + """ + try: + # Prepare container config + host_config = self.client.api.create_host_config( + mem_limit=self.config.memory_limit, + cpu_period=100000, + cpu_quota=int(100000 * self.config.cpu_limit), + network_mode="none" if not self.config.network_enabled else "bridge", + binds=self._prepare_volume_bindings(), + ) + + # Generate unique container name with sandbox_ prefix + container_name = f"sandbox_{uuid.uuid4().hex[:8]}" + + # Create container + container = await asyncio.to_thread( + self.client.api.create_container, + image=self.config.image, + command="tail -f /dev/null", + hostname="sandbox", + working_dir=self.config.work_dir, + host_config=host_config, + name=container_name, + tty=True, + detach=True, + ) + + self.container = self.client.containers.get(container["Id"]) + + # Start container + await asyncio.to_thread(self.container.start) + + # Initialize terminal + self.terminal = AsyncDockerizedTerminal( + container["Id"], + self.config.work_dir, + env_vars={"PYTHONUNBUFFERED": "1"} + # Ensure Python output is not buffered + ) + await self.terminal.init() + + return self + + except Exception as e: + await self.cleanup() # Ensure resources are cleaned up + raise RuntimeError(f"Failed to create sandbox: {e}") from e + + def _prepare_volume_bindings(self) -> Dict[str, Dict[str, str]]: + """Prepares volume binding configuration. + + Returns: + Volume binding configuration dictionary. + """ + bindings = {} + + # Create and add working directory mapping + work_dir = self._ensure_host_dir(self.config.work_dir) + bindings[work_dir] = {"bind": self.config.work_dir, "mode": "rw"} + + # Add custom volume bindings + for host_path, container_path in self.volume_bindings.items(): + bindings[host_path] = {"bind": container_path, "mode": "rw"} + + return bindings + + @staticmethod + def _ensure_host_dir(path: str) -> str: + """Ensures directory exists on the host. + + Args: + path: Directory path. + + Returns: + Actual path on the host. + """ + host_path = os.path.join( + tempfile.gettempdir(), + f"sandbox_{os.path.basename(path)}_{os.urandom(4).hex()}", + ) + os.makedirs(host_path, exist_ok=True) + return host_path + + async def run_command(self, cmd: str, timeout: Optional[int] = None) -> str: + """Runs a command in the sandbox. + + Args: + cmd: Command to execute. + timeout: Timeout in seconds. + + Returns: + Command output as string. + + Raises: + RuntimeError: If sandbox not initialized or command execution fails. + TimeoutError: If command execution times out. + """ + if not self.terminal: + raise RuntimeError("Sandbox not initialized") + + try: + return await self.terminal.run_command( + cmd, timeout=timeout or self.config.timeout + ) + except TimeoutError: + raise SandboxTimeoutError( + f"Command execution timed out after {timeout or self.config.timeout} seconds" + ) + + async def read_file(self, path: str) -> str: + """Reads a file from the container. + + Args: + path: File path. + + Returns: + File contents as string. + + Raises: + FileNotFoundError: If file does not exist. + RuntimeError: If read operation fails. + """ + if not self.container: + raise RuntimeError("Sandbox not initialized") + + try: + # Get file archive + resolved_path = self._safe_resolve_path(path) + tar_stream, _ = await asyncio.to_thread( + self.container.get_archive, resolved_path + ) + + # Read file content from tar stream + content = await self._read_from_tar(tar_stream) + return content.decode("utf-8") + + except NotFound: + raise FileNotFoundError(f"File not found: {path}") + except Exception as e: + raise RuntimeError(f"Failed to read file: {e}") + + async def write_file(self, path: str, content: str) -> None: + """Writes content to a file in the container. + + Args: + path: Target path. + content: File content. + + Raises: + RuntimeError: If write operation fails. + """ + if not self.container: + raise RuntimeError("Sandbox not initialized") + + try: + resolved_path = self._safe_resolve_path(path) + parent_dir = os.path.dirname(resolved_path) + + # Create parent directory + if parent_dir: + await self.run_command(f"mkdir -p {parent_dir}") + + # Prepare file data + tar_stream = await self._create_tar_stream( + os.path.basename(path), content.encode("utf-8") + ) + + # Write file + await asyncio.to_thread( + self.container.put_archive, parent_dir or "/", tar_stream + ) + + except Exception as e: + raise RuntimeError(f"Failed to write file: {e}") + + def _safe_resolve_path(self, path: str) -> str: + """Safely resolves container path, preventing path traversal. + + Args: + path: Original path. + + Returns: + Resolved absolute path. + + Raises: + ValueError: If path contains potentially unsafe patterns. + """ + # Check for path traversal attempts + if ".." in path.split("/"): + raise ValueError("Path contains potentially unsafe patterns") + + resolved = ( + os.path.join(self.config.work_dir, path) + if not os.path.isabs(path) + else path + ) + return resolved + + async def copy_from(self, src_path: str, dst_path: str) -> None: + """Copies a file from the container. + + Args: + src_path: Source file path (container). + dst_path: Destination path (host). + + Raises: + FileNotFoundError: If source file does not exist. + RuntimeError: If copy operation fails. + """ + try: + # Ensure destination file's parent directory exists + parent_dir = os.path.dirname(dst_path) + if parent_dir: + os.makedirs(parent_dir, exist_ok=True) + + # Get file stream + resolved_src = self._safe_resolve_path(src_path) + stream, stat = await asyncio.to_thread( + self.container.get_archive, resolved_src + ) + + # Create temporary directory to extract file + with tempfile.TemporaryDirectory() as tmp_dir: + # Write stream to temporary file + tar_path = os.path.join(tmp_dir, "temp.tar") + with open(tar_path, "wb") as f: + for chunk in stream: + f.write(chunk) + + # Extract file + with tarfile.open(tar_path) as tar: + members = tar.getmembers() + if not members: + raise FileNotFoundError(f"Source file is empty: {src_path}") + + # If destination is a directory, we should preserve relative path structure + if os.path.isdir(dst_path): + tar.extractall(dst_path) + else: + # If destination is a file, we only extract the source file's content + if len(members) > 1: + raise RuntimeError( + f"Source path is a directory but destination is a file: {src_path}" + ) + + with open(dst_path, "wb") as dst: + src_file = tar.extractfile(members[0]) + if src_file is None: + raise RuntimeError( + f"Failed to extract file: {src_path}" + ) + dst.write(src_file.read()) + + except docker.errors.NotFound: + raise FileNotFoundError(f"Source file not found: {src_path}") + except Exception as e: + raise RuntimeError(f"Failed to copy file: {e}") + + async def copy_to(self, src_path: str, dst_path: str) -> None: + """Copies a file to the container. + + Args: + src_path: Source file path (host). + dst_path: Destination path (container). + + Raises: + FileNotFoundError: If source file does not exist. + RuntimeError: If copy operation fails. + """ + try: + if not os.path.exists(src_path): + raise FileNotFoundError(f"Source file not found: {src_path}") + + # Create destination directory in container + resolved_dst = self._safe_resolve_path(dst_path) + container_dir = os.path.dirname(resolved_dst) + if container_dir: + await self.run_command(f"mkdir -p {container_dir}") + + # Create tar file to upload + with tempfile.TemporaryDirectory() as tmp_dir: + tar_path = os.path.join(tmp_dir, "temp.tar") + with tarfile.open(tar_path, "w") as tar: + # Handle directory source path + if os.path.isdir(src_path): + os.path.basename(src_path.rstrip("/")) + for root, _, files in os.walk(src_path): + for file in files: + file_path = os.path.join(root, file) + arcname = os.path.join( + os.path.basename(dst_path), + os.path.relpath(file_path, src_path), + ) + tar.add(file_path, arcname=arcname) + else: + # Add single file to tar + tar.add(src_path, arcname=os.path.basename(dst_path)) + + # Read tar file content + with open(tar_path, "rb") as f: + data = f.read() + + # Upload to container + await asyncio.to_thread( + self.container.put_archive, + os.path.dirname(resolved_dst) or "/", + data, + ) + + # Verify file was created successfully + try: + await self.run_command(f"test -e {resolved_dst}") + except Exception: + raise RuntimeError(f"Failed to verify file creation: {dst_path}") + + except FileNotFoundError: + raise + except Exception as e: + raise RuntimeError(f"Failed to copy file: {e}") + + @staticmethod + async def _create_tar_stream(name: str, content: bytes) -> io.BytesIO: + """Creates a tar file stream. + + Args: + name: Filename. + content: File content. + + Returns: + Tar file stream. + """ + tar_stream = io.BytesIO() + with tarfile.open(fileobj=tar_stream, mode="w") as tar: + tarinfo = tarfile.TarInfo(name=name) + tarinfo.size = len(content) + tar.addfile(tarinfo, io.BytesIO(content)) + tar_stream.seek(0) + return tar_stream + + @staticmethod + async def _read_from_tar(tar_stream) -> bytes: + """Reads file content from a tar stream. + + Args: + tar_stream: Tar file stream. + + Returns: + File content. + + Raises: + RuntimeError: If read operation fails. + """ + with tempfile.NamedTemporaryFile() as tmp: + for chunk in tar_stream: + tmp.write(chunk) + tmp.seek(0) + + with tarfile.open(fileobj=tmp) as tar: + member = tar.next() + if not member: + raise RuntimeError("Empty tar archive") + + file_content = tar.extractfile(member) + if not file_content: + raise RuntimeError("Failed to extract file content") + + return file_content.read() + + async def cleanup(self) -> None: + """Cleans up sandbox resources.""" + errors = [] + try: + if self.terminal: + try: + await self.terminal.close() + except Exception as e: + errors.append(f"Terminal cleanup error: {e}") + finally: + self.terminal = None + + if self.container: + try: + await asyncio.to_thread(self.container.stop, timeout=5) + except Exception as e: + errors.append(f"Container stop error: {e}") + + try: + await asyncio.to_thread(self.container.remove, force=True) + except Exception as e: + errors.append(f"Container remove error: {e}") + finally: + self.container = None + + except Exception as e: + errors.append(f"General cleanup error: {e}") + + if errors: + print(f"Warning: Errors during cleanup: {', '.join(errors)}") + + async def __aenter__(self) -> "DockerSandbox": + """Async context manager entry.""" + return await self.create() + + async def __aexit__(self, exc_type, exc_val, exc_tb) -> None: + """Async context manager exit.""" + await self.cleanup() diff --git a/app/sandbox/core/terminal.py b/app/sandbox/core/terminal.py new file mode 100644 index 0000000..aee5184 --- /dev/null +++ b/app/sandbox/core/terminal.py @@ -0,0 +1,346 @@ +""" +Asynchronous Docker Terminal + +This module provides asynchronous terminal functionality for Docker containers, +allowing interactive command execution with timeout control. +""" + +import asyncio +import re +import socket +from typing import Dict, Optional, Tuple, Union + +import docker +from docker import APIClient +from docker.errors import APIError +from docker.models.containers import Container + + +class DockerSession: + def __init__(self, container_id: str) -> None: + """Initializes a Docker session. + + Args: + container_id: ID of the Docker container. + """ + self.api = APIClient() + self.container_id = container_id + self.exec_id = None + self.socket = None + + async def create(self, working_dir: str, env_vars: Dict[str, str]) -> None: + """Creates an interactive session with the container. + + Args: + working_dir: Working directory inside the container. + env_vars: Environment variables to set. + + Raises: + RuntimeError: If socket connection fails. + """ + startup_command = [ + "bash", + "-c", + f"cd {working_dir} && " + "PROMPT_COMMAND='' " + "PS1='$ ' " + "exec bash --norc --noprofile", + ] + + exec_data = self.api.exec_create( + self.container_id, + startup_command, + stdin=True, + tty=True, + stdout=True, + stderr=True, + privileged=True, + user="root", + environment={**env_vars, "TERM": "dumb", "PS1": "$ ", "PROMPT_COMMAND": ""}, + ) + self.exec_id = exec_data["Id"] + + socket_data = self.api.exec_start( + self.exec_id, socket=True, tty=True, stream=True, demux=True + ) + + if hasattr(socket_data, "_sock"): + self.socket = socket_data._sock + self.socket.setblocking(False) + else: + raise RuntimeError("Failed to get socket connection") + + await self._read_until_prompt() + + async def close(self) -> None: + """Cleans up session resources. + + 1. Sends exit command + 2. Closes socket connection + 3. Checks and cleans up exec instance + """ + try: + if self.socket: + # Send exit command to close bash session + try: + self.socket.sendall(b"exit\n") + # Allow time for command execution + await asyncio.sleep(0.1) + except: + pass # Ignore sending errors, continue cleanup + + # Close socket connection + try: + self.socket.shutdown(socket.SHUT_RDWR) + except: + pass # Some platforms may not support shutdown + + self.socket.close() + self.socket = None + + if self.exec_id: + try: + # Check exec instance status + exec_inspect = self.api.exec_inspect(self.exec_id) + if exec_inspect.get("Running", False): + # If still running, wait for it to complete + await asyncio.sleep(0.5) + except: + pass # Ignore inspection errors, continue cleanup + + self.exec_id = None + + except Exception as e: + # Log error but don't raise, ensure cleanup continues + print(f"Warning: Error during session cleanup: {e}") + + async def _read_until_prompt(self) -> str: + """Reads output until prompt is found. + + Returns: + String containing output up to the prompt. + + Raises: + socket.error: If socket communication fails. + """ + buffer = b"" + while b"$ " not in buffer: + try: + chunk = self.socket.recv(4096) + if chunk: + buffer += chunk + except socket.error as e: + if e.errno == socket.EWOULDBLOCK: + await asyncio.sleep(0.1) + continue + raise + return buffer.decode("utf-8") + + async def execute(self, command: str, timeout: Optional[int] = None) -> str: + """Executes a command and returns cleaned output. + + Args: + command: Shell command to execute. + timeout: Maximum execution time in seconds. + + Returns: + Command output as string with prompt markers removed. + + Raises: + RuntimeError: If session not initialized or execution fails. + TimeoutError: If command execution exceeds timeout. + """ + if not self.socket: + raise RuntimeError("Session not initialized") + + try: + # Sanitize command to prevent shell injection + sanitized_command = self._sanitize_command(command) + full_command = f"{sanitized_command}\necho $?\n" + self.socket.sendall(full_command.encode()) + + async def read_output() -> str: + buffer = b"" + result_lines = [] + command_sent = False + + while True: + try: + chunk = self.socket.recv(4096) + if not chunk: + break + + buffer += chunk + lines = buffer.split(b"\n") + + buffer = lines[-1] + lines = lines[:-1] + + for line in lines: + line = line.rstrip(b"\r") + + if not command_sent: + command_sent = True + continue + + if line.strip() == b"echo $?" or line.strip().isdigit(): + continue + + if line.strip(): + result_lines.append(line) + + if buffer.endswith(b"$ "): + break + + except socket.error as e: + if e.errno == socket.EWOULDBLOCK: + await asyncio.sleep(0.1) + continue + raise + + output = b"\n".join(result_lines).decode("utf-8") + output = re.sub(r"\n\$ echo \$\$?.*$", "", output) + + return output + + if timeout: + result = await asyncio.wait_for(read_output(), timeout) + else: + result = await read_output() + + return result.strip() + + except asyncio.TimeoutError: + raise TimeoutError(f"Command execution timed out after {timeout} seconds") + except Exception as e: + raise RuntimeError(f"Failed to execute command: {e}") + + def _sanitize_command(self, command: str) -> str: + """Sanitizes the command string to prevent shell injection. + + Args: + command: Raw command string. + + Returns: + Sanitized command string. + + Raises: + ValueError: If command contains potentially dangerous patterns. + """ + + # Additional checks for specific risky commands + risky_commands = [ + "rm -rf /", + "rm -rf /*", + "mkfs", + "dd if=/dev/zero", + ":(){:|:&};:", + "chmod -R 777 /", + "chown -R", + ] + + for risky in risky_commands: + if risky in command.lower(): + raise ValueError( + f"Command contains potentially dangerous operation: {risky}" + ) + + return command + + +class AsyncDockerizedTerminal: + def __init__( + self, + container: Union[str, Container], + working_dir: str = "/workspace", + env_vars: Optional[Dict[str, str]] = None, + default_timeout: int = 60, + ) -> None: + """Initializes an asynchronous terminal for Docker containers. + + Args: + container: Docker container ID or Container object. + working_dir: Working directory inside the container. + env_vars: Environment variables to set. + default_timeout: Default command execution timeout in seconds. + """ + self.client = docker.from_env() + self.container = ( + container + if isinstance(container, Container) + else self.client.containers.get(container) + ) + self.working_dir = working_dir + self.env_vars = env_vars or {} + self.default_timeout = default_timeout + self.session = None + + async def init(self) -> None: + """Initializes the terminal environment. + + Ensures working directory exists and creates an interactive session. + + Raises: + RuntimeError: If initialization fails. + """ + await self._ensure_workdir() + + self.session = DockerSession(self.container.id) + await self.session.create(self.working_dir, self.env_vars) + + async def _ensure_workdir(self) -> None: + """Ensures working directory exists in container. + + Raises: + RuntimeError: If directory creation fails. + """ + try: + await self._exec_simple(f"mkdir -p {self.working_dir}") + except APIError as e: + raise RuntimeError(f"Failed to create working directory: {e}") + + async def _exec_simple(self, cmd: str) -> Tuple[int, str]: + """Executes a simple command using Docker's exec_run. + + Args: + cmd: Command to execute. + + Returns: + Tuple of (exit_code, output). + """ + result = await asyncio.to_thread( + self.container.exec_run, cmd, environment=self.env_vars + ) + return result.exit_code, result.output.decode("utf-8") + + async def run_command(self, cmd: str, timeout: Optional[int] = None) -> str: + """Runs a command in the container with timeout. + + Args: + cmd: Shell command to execute. + timeout: Maximum execution time in seconds. + + Returns: + Command output as string. + + Raises: + RuntimeError: If terminal not initialized. + """ + if not self.session: + raise RuntimeError("Terminal not initialized") + + return await self.session.execute(cmd, timeout=timeout or self.default_timeout) + + async def close(self) -> None: + """Closes the terminal session.""" + if self.session: + await self.session.close() + + async def __aenter__(self) -> "AsyncDockerizedTerminal": + """Async context manager entry.""" + await self.init() + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb) -> None: + """Async context manager exit.""" + await self.close() diff --git a/app/tool/file_operators.py b/app/tool/file_operators.py new file mode 100644 index 0000000..5f38d30 --- /dev/null +++ b/app/tool/file_operators.py @@ -0,0 +1,156 @@ +"""File operation interfaces and implementations for local and sandbox environments.""" + +import asyncio +from pathlib import Path +from typing import Optional, Protocol, Tuple, Union, runtime_checkable + +from app.config import SandboxConfig +from app.exceptions import ToolError +from app.sandbox.client import SANDBOX_CLIENT + + +PathLike = Union[str, Path] + + +@runtime_checkable +class FileOperator(Protocol): + """Interface for file operations in different environments.""" + + async def read_file(self, path: PathLike) -> str: + """Read content from a file.""" + ... + + async def write_file(self, path: PathLike, content: str) -> None: + """Write content to a file.""" + ... + + async def is_directory(self, path: PathLike) -> bool: + """Check if path points to a directory.""" + ... + + async def exists(self, path: PathLike) -> bool: + """Check if path exists.""" + ... + + async def run_command( + self, cmd: str, timeout: Optional[float] = 120.0 + ) -> Tuple[int, str, str]: + """Run a shell command and return (return_code, stdout, stderr).""" + ... + + +class LocalFileOperator(FileOperator): + """File operations implementation for local filesystem.""" + + async def read_file(self, path: PathLike) -> str: + """Read content from a local file.""" + try: + return Path(path).read_text() + except Exception as e: + raise ToolError(f"Failed to read {path}: {str(e)}") from None + + async def write_file(self, path: PathLike, content: str) -> None: + """Write content to a local file.""" + try: + Path(path).write_text(content) + except Exception as e: + raise ToolError(f"Failed to write to {path}: {str(e)}") from None + + async def is_directory(self, path: PathLike) -> bool: + """Check if path points to a directory.""" + return Path(path).is_dir() + + async def exists(self, path: PathLike) -> bool: + """Check if path exists.""" + return Path(path).exists() + + async def run_command( + self, cmd: str, timeout: Optional[float] = 120.0 + ) -> Tuple[int, str, str]: + """Run a shell command locally.""" + process = await asyncio.create_subprocess_shell( + cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE + ) + + try: + stdout, stderr = await asyncio.wait_for( + process.communicate(), timeout=timeout + ) + return ( + process.returncode or 0, + stdout.decode(), + stderr.decode(), + ) + except asyncio.TimeoutError as exc: + try: + process.kill() + except ProcessLookupError: + pass + raise TimeoutError( + f"Command '{cmd}' timed out after {timeout} seconds" + ) from exc + + +class SandboxFileOperator(FileOperator): + """File operations implementation for sandbox environment.""" + + def __init__(self): + self.sandbox_client = SANDBOX_CLIENT + + async def _ensure_sandbox_initialized(self): + """Ensure sandbox is initialized.""" + if not self.sandbox_client.sandbox: + await self.sandbox_client.create(config=SandboxConfig()) + + async def read_file(self, path: PathLike) -> str: + """Read content from a file in sandbox.""" + await self._ensure_sandbox_initialized() + try: + return await self.sandbox_client.read_file(str(path)) + except Exception as e: + raise ToolError(f"Failed to read {path} in sandbox: {str(e)}") from None + + async def write_file(self, path: PathLike, content: str) -> None: + """Write content to a file in sandbox.""" + await self._ensure_sandbox_initialized() + try: + await self.sandbox_client.write_file(str(path), content) + except Exception as e: + raise ToolError(f"Failed to write to {path} in sandbox: {str(e)}") from None + + async def is_directory(self, path: PathLike) -> bool: + """Check if path points to a directory in sandbox.""" + await self._ensure_sandbox_initialized() + result = await self.sandbox_client.run_command( + f"test -d {path} && echo 'true' || echo 'false'" + ) + return result.strip() == "true" + + async def exists(self, path: PathLike) -> bool: + """Check if path exists in sandbox.""" + await self._ensure_sandbox_initialized() + result = await self.sandbox_client.run_command( + f"test -e {path} && echo 'true' || echo 'false'" + ) + return result.strip() == "true" + + async def run_command( + self, cmd: str, timeout: Optional[float] = 120.0 + ) -> Tuple[int, str, str]: + """Run a command in sandbox environment.""" + await self._ensure_sandbox_initialized() + try: + stdout = await self.sandbox_client.run_command( + cmd, timeout=int(timeout) if timeout else None + ) + return ( + 0, # Always return 0 since we don't have explicit return code from sandbox + stdout, + "", # No stderr capture in the current sandbox implementation + ) + except TimeoutError as exc: + raise TimeoutError( + f"Command '{cmd}' timed out after {timeout} seconds in sandbox" + ) from exc + except Exception as exc: + return 1, "", f"Error executing command in sandbox: {str(exc)}" diff --git a/app/tool/run.py b/app/tool/run.py deleted file mode 100644 index 8896c58..0000000 --- a/app/tool/run.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Utility to run shell commands asynchronously with a timeout.""" - -import asyncio - - -TRUNCATED_MESSAGE: str = "To save on context only part of this file has been shown to you. You should retry this tool after you have searched inside the file with `grep -n` in order to find the line numbers of what you are looking for." -MAX_RESPONSE_LEN: int = 16000 - - -def maybe_truncate(content: str, truncate_after: int | None = MAX_RESPONSE_LEN): - """Truncate content and append a notice if content exceeds the specified length.""" - return ( - content - if not truncate_after or len(content) <= truncate_after - else content[:truncate_after] + TRUNCATED_MESSAGE - ) - - -async def run( - cmd: str, - timeout: float | None = 120.0, # seconds - truncate_after: int | None = MAX_RESPONSE_LEN, -): - """Run a shell command asynchronously with a timeout.""" - process = await asyncio.create_subprocess_shell( - cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE - ) - - try: - stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=timeout) - return ( - process.returncode or 0, - maybe_truncate(stdout.decode(), truncate_after=truncate_after), - maybe_truncate(stderr.decode(), truncate_after=truncate_after), - ) - except asyncio.TimeoutError as exc: - try: - process.kill() - except ProcessLookupError: - pass - raise TimeoutError( - f"Command '{cmd}' timed out after {timeout} seconds" - ) from exc diff --git a/app/tool/str_replace_editor.py b/app/tool/str_replace_editor.py index 4094565..47e3d38 100644 --- a/app/tool/str_replace_editor.py +++ b/app/tool/str_replace_editor.py @@ -1,11 +1,18 @@ -from collections import defaultdict -from pathlib import Path -from typing import Literal, get_args +"""File and directory manipulation tool with sandbox support.""" +from collections import defaultdict +from typing import Any, DefaultDict, List, Literal, Optional, get_args + +from app.config import config from app.exceptions import ToolError from app.tool import BaseTool from app.tool.base import CLIResult, ToolResult -from app.tool.run import run +from app.tool.file_operators import ( + FileOperator, + LocalFileOperator, + PathLike, + SandboxFileOperator, +) Command = Literal[ @@ -15,12 +22,17 @@ Command = Literal[ "insert", "undo_edit", ] + +# Constants SNIPPET_LINES: int = 4 - MAX_RESPONSE_LEN: int = 16000 +TRUNCATED_MESSAGE: str = ( + "To save on context only part of this file has been shown to you. " + "You should retry this tool after you have searched inside the file with `grep -n` " + "in order to find the line numbers of what you are looking for." +) -TRUNCATED_MESSAGE: str = "To save on context only part of this file has been shown to you. You should retry this tool after you have searched inside the file with `grep -n` in order to find the line numbers of what you are looking for." - +# Tool description _STR_REPLACE_EDITOR_DESCRIPTION = """Custom editing tool for viewing, creating and editing files * State is persistent across command calls and discussions with the user * If `path` is a file, `view` displays the result of applying `cat -n`. If `path` is a directory, `view` lists non-hidden files and directories up to 2 levels deep @@ -35,17 +47,17 @@ Notes for using the `str_replace` command: """ -def maybe_truncate(content: str, truncate_after: int | None = MAX_RESPONSE_LEN): +def maybe_truncate( + content: str, truncate_after: Optional[int] = MAX_RESPONSE_LEN +) -> str: """Truncate content and append a notice if content exceeds the specified length.""" - return ( - content - if not truncate_after or len(content) <= truncate_after - else content[:truncate_after] + TRUNCATED_MESSAGE - ) + if not truncate_after or len(content) <= truncate_after: + return content + return content[:truncate_after] + TRUNCATED_MESSAGE class StrReplaceEditor(BaseTool): - """A tool for executing bash commands""" + """A tool for viewing, creating, and editing files with sandbox support.""" name: str = "str_replace_editor" description: str = _STR_REPLACE_EDITOR_DESCRIPTION @@ -85,8 +97,19 @@ class StrReplaceEditor(BaseTool): }, "required": ["command", "path"], } + _file_history: DefaultDict[PathLike, List[str]] = defaultdict(list) + _local_operator: LocalFileOperator = LocalFileOperator() + # todo: Sandbox resources need to be destroyed at the appropriate time. + _sandbox_operator: SandboxFileOperator = SandboxFileOperator() - _file_history: list = defaultdict(list) + # def _get_operator(self, use_sandbox: bool) -> FileOperator: + def _get_operator(self) -> FileOperator: + """Get the appropriate file operator based on execution mode.""" + return ( + self._sandbox_operator + if config.sandbox.use_sandbox + else self._local_operator + ) async def execute( self, @@ -98,24 +121,30 @@ class StrReplaceEditor(BaseTool): old_str: str | None = None, new_str: str | None = None, insert_line: int | None = None, - **kwargs, + **kwargs: Any, ) -> str: - _path = Path(path) - self.validate_path(command, _path) + """Execute a file operation command.""" + # Get the appropriate file operator + operator = self._get_operator() + + # Validate path and command combination + await self.validate_path(command, path, operator) + + # Execute the appropriate command if command == "view": - result = await self.view(_path, view_range) + result = await self.view(path, view_range, operator) elif command == "create": if file_text is None: raise ToolError("Parameter `file_text` is required for command: create") - self.write_file(_path, file_text) - self._file_history[_path].append(file_text) - result = ToolResult(output=f"File created successfully at: {_path}") + await operator.write_file(path, file_text) + self._file_history[path].append(file_text) + result = ToolResult(output=f"File created successfully at: {path}") elif command == "str_replace": if old_str is None: raise ToolError( "Parameter `old_str` is required for command: str_replace" ) - result = self.str_replace(_path, old_str, new_str) + result = await self.str_replace(path, old_str, new_str, operator) elif command == "insert": if insert_line is None: raise ToolError( @@ -123,92 +152,149 @@ class StrReplaceEditor(BaseTool): ) if new_str is None: raise ToolError("Parameter `new_str` is required for command: insert") - result = self.insert(_path, insert_line, new_str) + result = await self.insert(path, insert_line, new_str, operator) elif command == "undo_edit": - result = self.undo_edit(_path) + result = await self.undo_edit(path, operator) else: + # This should be caught by type checking, but we include it for safety raise ToolError( f'Unrecognized command {command}. The allowed commands for the {self.name} tool are: {", ".join(get_args(Command))}' ) + return str(result) - def validate_path(self, command: str, path: Path): - """ - Check that the path/command combination is valid. - """ - # Check if its an absolute path - if not path.is_absolute(): - suggested_path = Path("") / path + async def validate_path( + self, command: str, path: str, operator: FileOperator + ) -> None: + """Validate path and command combination based on execution environment.""" + # Check if path is absolute + if not path.startswith("/"): + suggested_path = f"/{path}" raise ToolError( - f"The path {path} is not an absolute path, it should start with `/`. Maybe you meant {suggested_path}?" + f"The path {path} is not an absolute path, it should start with `/`. " + f"Maybe you meant {suggested_path}?" ) - # Check if path exists - if not path.exists() and command != "create": - raise ToolError( - f"The path {path} does not exist. Please provide a valid path." - ) - if path.exists() and command == "create": - raise ToolError( - f"File already exists at: {path}. Cannot overwrite files using command `create`." - ) - # Check if the path points to a directory - if path.is_dir(): - if command != "view": + + # Only check if path exists for non-create commands + if command != "create": + if not await operator.exists(path): + raise ToolError( + f"The path {path} does not exist. Please provide a valid path." + ) + + # Check if path is a directory + is_dir = await operator.is_directory(path) + if is_dir and command != "view": raise ToolError( f"The path {path} is a directory and only the `view` command can be used on directories" ) - async def view(self, path: Path, view_range: list[int] | None = None): - """Implement the view command""" - if path.is_dir(): + # Check if file exists for create command + elif command == "create": + exists = await operator.exists(path) + if exists: + raise ToolError( + f"File already exists at: {path}. Cannot overwrite files using command `create`." + ) + + async def view( + self, + path: PathLike, + view_range: Optional[List[int]] = None, + operator: FileOperator = None, + ) -> CLIResult: + """Display file or directory content.""" + # Determine if path is a directory + is_dir = await operator.is_directory(path) + + if is_dir: + # Directory handling if view_range: raise ToolError( "The `view_range` parameter is not allowed when `path` points to a directory." ) - _, stdout, stderr = await run( - rf"find {path} -maxdepth 2 -not -path '*/\.*'" - ) - if not stderr: - stdout = f"Here's the files and directories up to 2 levels deep in {path}, excluding hidden items:\n{stdout}\n" - return CLIResult(output=stdout, error=stderr) + return await self._view_directory(path, operator) + else: + # File handling + return await self._view_file(path, operator, view_range) - file_content = self.read_file(path) + @staticmethod + async def _view_directory(path: PathLike, operator: FileOperator) -> CLIResult: + """Display directory contents.""" + find_cmd = f"find {path} -maxdepth 2 -not -path '*/\\.*'" + + # Execute command using the operator + returncode, stdout, stderr = await operator.run_command(find_cmd) + + if not stderr: + stdout = ( + f"Here's the files and directories up to 2 levels deep in {path}, " + f"excluding hidden items:\n{stdout}\n" + ) + + return CLIResult(output=stdout, error=stderr) + + async def _view_file( + self, + path: PathLike, + operator: FileOperator, + view_range: Optional[List[int]] = None, + ) -> CLIResult: + """Display file content, optionally within a specified line range.""" + # Read file content + file_content = await operator.read_file(path) init_line = 1 + + # Apply view range if specified if view_range: if len(view_range) != 2 or not all(isinstance(i, int) for i in view_range): raise ToolError( "Invalid `view_range`. It should be a list of two integers." ) + file_lines = file_content.split("\n") n_lines_file = len(file_lines) init_line, final_line = view_range + + # Validate view range if init_line < 1 or init_line > n_lines_file: raise ToolError( - f"Invalid `view_range`: {view_range}. Its first element `{init_line}` should be within the range of lines of the file: {[1, n_lines_file]}" + f"Invalid `view_range`: {view_range}. Its first element `{init_line}` should be " + f"within the range of lines of the file: {[1, n_lines_file]}" ) if final_line > n_lines_file: raise ToolError( - f"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be smaller than the number of lines in the file: `{n_lines_file}`" + f"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be " + f"smaller than the number of lines in the file: `{n_lines_file}`" ) if final_line != -1 and final_line < init_line: raise ToolError( - f"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be larger or equal than its first `{init_line}`" + f"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be " + f"larger or equal than its first `{init_line}`" ) + # Apply range if final_line == -1: file_content = "\n".join(file_lines[init_line - 1 :]) else: file_content = "\n".join(file_lines[init_line - 1 : final_line]) + # Format and return result return CLIResult( output=self._make_output(file_content, str(path), init_line=init_line) ) - def str_replace(self, path: Path, old_str: str, new_str: str | None): - """Implement the str_replace command, which replaces old_str with new_str in the file content""" - # Read the file content - file_content = self.read_file(path).expandtabs() + async def str_replace( + self, + path: PathLike, + old_str: str, + new_str: Optional[str] = None, + operator: FileOperator = None, + ) -> CLIResult: + """Replace a unique string in a file with a new string.""" + # Read file content and expand tabs + file_content = (await operator.read_file(path)).expandtabs() old_str = old_str.expandtabs() new_str = new_str.expandtabs() if new_str is not None else "" @@ -219,6 +305,7 @@ class StrReplaceEditor(BaseTool): f"No replacement was performed, old_str `{old_str}` did not appear verbatim in {path}." ) elif occurrences > 1: + # Find line numbers of occurrences file_content_lines = file_content.split("\n") lines = [ idx + 1 @@ -226,16 +313,17 @@ class StrReplaceEditor(BaseTool): if old_str in line ] raise ToolError( - f"No replacement was performed. Multiple occurrences of old_str `{old_str}` in lines {lines}. Please ensure it is unique" + f"No replacement was performed. Multiple occurrences of old_str `{old_str}` " + f"in lines {lines}. Please ensure it is unique" ) # Replace old_str with new_str new_file_content = file_content.replace(old_str, new_str) # Write the new content to the file - self.write_file(path, new_file_content) + await operator.write_file(path, new_file_content) - # Save the content to history + # Save the original content to history self._file_history[path].append(file_content) # Create a snippet of the edited section @@ -253,36 +341,50 @@ class StrReplaceEditor(BaseTool): return CLIResult(output=success_msg) - def insert(self, path: Path, insert_line: int, new_str: str): - """Implement the insert command, which inserts new_str at the specified line in the file content.""" - file_text = self.read_file(path).expandtabs() + async def insert( + self, + path: PathLike, + insert_line: int, + new_str: str, + operator: FileOperator = None, + ) -> CLIResult: + """Insert text at a specific line in a file.""" + # Read and prepare content + file_text = (await operator.read_file(path)).expandtabs() new_str = new_str.expandtabs() file_text_lines = file_text.split("\n") n_lines_file = len(file_text_lines) + # Validate insert_line if insert_line < 0 or insert_line > n_lines_file: raise ToolError( - f"Invalid `insert_line` parameter: {insert_line}. It should be within the range of lines of the file: {[0, n_lines_file]}" + f"Invalid `insert_line` parameter: {insert_line}. It should be within " + f"the range of lines of the file: {[0, n_lines_file]}" ) + # Perform insertion new_str_lines = new_str.split("\n") new_file_text_lines = ( file_text_lines[:insert_line] + new_str_lines + file_text_lines[insert_line:] ) + + # Create a snippet for preview snippet_lines = ( file_text_lines[max(0, insert_line - SNIPPET_LINES) : insert_line] + new_str_lines + file_text_lines[insert_line : insert_line + SNIPPET_LINES] ) + # Join lines and write to file new_file_text = "\n".join(new_file_text_lines) snippet = "\n".join(snippet_lines) - self.write_file(path, new_file_text) + await operator.write_file(path, new_file_text) self._file_history[path].append(file_text) + # Prepare success message success_msg = f"The file {path} has been edited. " success_msg += self._make_output( snippet, @@ -290,51 +392,43 @@ class StrReplaceEditor(BaseTool): max(1, insert_line - SNIPPET_LINES + 1), ) success_msg += "Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). Edit the file again if necessary." + return CLIResult(output=success_msg) - def undo_edit(self, path: Path): - """Implement the undo_edit command.""" + async def undo_edit( + self, path: PathLike, operator: FileOperator = None + ) -> CLIResult: + """Revert the last edit made to a file.""" if not self._file_history[path]: raise ToolError(f"No edit history found for {path}.") old_text = self._file_history[path].pop() - self.write_file(path, old_text) + await operator.write_file(path, old_text) return CLIResult( output=f"Last edit to {path} undone successfully. {self._make_output(old_text, str(path))}" ) - def read_file(self, path: Path): - """Read the content of a file from a given path; raise a ToolError if an error occurs.""" - try: - return path.read_text() - except Exception as e: - raise ToolError(f"Ran into {e} while trying to read {path}") from None - - def write_file(self, path: Path, file: str): - """Write the content of a file to a given path; raise a ToolError if an error occurs.""" - try: - path.write_text(file) - except Exception as e: - raise ToolError(f"Ran into {e} while trying to write to {path}") from None - def _make_output( self, file_content: str, file_descriptor: str, init_line: int = 1, expand_tabs: bool = True, - ): - """Generate output for the CLI based on the content of a file.""" + ) -> str: + """Format file content for display with line numbers.""" file_content = maybe_truncate(file_content) if expand_tabs: file_content = file_content.expandtabs() + + # Add line numbers to each line file_content = "\n".join( [ f"{i + init_line:6}\t{line}" for i, line in enumerate(file_content.split("\n")) ] ) + return ( f"Here's the result of running `cat -n` on {file_descriptor}:\n" + file_content diff --git a/config/config.example.toml b/config/config.example.toml index de71832..68d6a62 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -11,3 +11,13 @@ temperature = 0.0 model = "claude-3-5-sonnet" base_url = "https://api.openai.com/v1" api_key = "sk-..." + +# Sandbox configuration +[sandbox] +use_sandbox = false +image = "python:3.10-slim" +work_dir = "/workspace" +memory_limit = "1g" # 512m +cpu_limit = 2.0 +timeout = 300 +network_enabled = false diff --git a/requirements.txt b/requirements.txt index a4128d2..eae7eff 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,3 +19,7 @@ aiofiles~=24.1.0 pydantic_core~=2.27.2 colorama~=0.4.6 playwright~=1.49.1 + +docker~=7.1.0 +pytest~=8.3.5 +pytest-asyncio~=0.25.3 diff --git a/tests/sandbox/test_client.py b/tests/sandbox/test_client.py new file mode 100644 index 0000000..5b05fed --- /dev/null +++ b/tests/sandbox/test_client.py @@ -0,0 +1,110 @@ +import tempfile +from pathlib import Path +from typing import AsyncGenerator + +import pytest +import pytest_asyncio + +from app.config import SandboxConfig +from app.sandbox.client import LocalSandboxClient, create_sandbox_client + + +@pytest_asyncio.fixture(scope="function") +async def local_client() -> AsyncGenerator[LocalSandboxClient, None]: + """Creates a local sandbox client for testing.""" + client = await create_sandbox_client() + try: + yield client + finally: + await client.cleanup() + + +@pytest.fixture(scope="function") +def temp_dir() -> Path: + """Creates a temporary directory for testing.""" + with tempfile.TemporaryDirectory() as tmp_dir: + yield Path(tmp_dir) + + +@pytest.mark.asyncio +async def test_sandbox_creation(local_client: LocalSandboxClient): + """Tests sandbox creation with specific configuration.""" + config = SandboxConfig( + image="python:3.10-slim", + work_dir="/workspace", + memory_limit="512m", + cpu_limit=0.5, + ) + + await local_client.create(config) + result = await local_client.run_command("python3 --version") + assert "Python 3.10" in result + + +@pytest.mark.asyncio +async def test_local_command_execution(local_client: LocalSandboxClient): + """Tests command execution in local sandbox.""" + await local_client.create() + + result = await local_client.run_command("echo 'test'") + assert result.strip() == "test" + + with pytest.raises(Exception): + await local_client.run_command("sleep 10", timeout=1) + + +@pytest.mark.asyncio +async def test_local_file_operations(local_client: LocalSandboxClient, temp_dir: Path): + """Tests file operations in local sandbox.""" + await local_client.create() + + # Test write and read operations + test_content = "Hello, World!" + await local_client.write_file("/workspace/test.txt", test_content) + content = await local_client.read_file("/workspace/test.txt") + assert content.strip() == test_content + + # Test copying file to container + src_file = temp_dir / "src.txt" + src_file.write_text("Copy to container") + await local_client.copy_to(str(src_file), "/workspace/copied.txt") + content = await local_client.read_file("/workspace/copied.txt") + assert content.strip() == "Copy to container" + + # Test copying file from container + dst_file = temp_dir / "dst.txt" + await local_client.copy_from("/workspace/test.txt", str(dst_file)) + assert dst_file.read_text().strip() == test_content + + +@pytest.mark.asyncio +async def test_local_volume_binding(local_client: LocalSandboxClient, temp_dir: Path): + """Tests volume binding in local sandbox.""" + bind_path = str(temp_dir) + volume_bindings = {bind_path: "/data"} + + await local_client.create(volume_bindings=volume_bindings) + + test_file = temp_dir / "test.txt" + test_file.write_text("Volume test") + + content = await local_client.read_file("/data/test.txt") + assert "Volume test" in content + + +@pytest.mark.asyncio +async def test_local_error_handling(local_client: LocalSandboxClient): + """Tests error handling in local sandbox.""" + await local_client.create() + + with pytest.raises(Exception) as exc: + await local_client.read_file("/nonexistent.txt") + assert "not found" in str(exc.value).lower() + + with pytest.raises(Exception) as exc: + await local_client.copy_from("/nonexistent.txt", "local.txt") + assert "not found" in str(exc.value).lower() + + +if __name__ == "__main__": + pytest.main(["-v", __file__]) diff --git a/tests/sandbox/test_docker_terminal.py b/tests/sandbox/test_docker_terminal.py new file mode 100644 index 0000000..7903d95 --- /dev/null +++ b/tests/sandbox/test_docker_terminal.py @@ -0,0 +1,104 @@ +"""Tests for the AsyncDockerizedTerminal implementation.""" + +import docker +import pytest +import pytest_asyncio + +from app.sandbox.core.terminal import AsyncDockerizedTerminal + + +@pytest.fixture(scope="module") +def docker_client(): + """Fixture providing a Docker client.""" + return docker.from_env() + + +@pytest_asyncio.fixture(scope="module") +async def docker_container(docker_client): + """Fixture providing a test Docker container.""" + container = docker_client.containers.run( + "python:3.10-slim", + "tail -f /dev/null", + name="test_container", + detach=True, + remove=True, + ) + yield container + container.stop() + + +@pytest_asyncio.fixture +async def terminal(docker_container): + """Fixture providing an initialized AsyncDockerizedTerminal instance.""" + terminal = AsyncDockerizedTerminal( + docker_container, + working_dir="/workspace", + env_vars={"TEST_VAR": "test_value"}, + default_timeout=30, + ) + await terminal.init() + yield terminal + await terminal.close() + + +class TestAsyncDockerizedTerminal: + """Test cases for AsyncDockerizedTerminal.""" + + @pytest.mark.asyncio + async def test_basic_command_execution(self, terminal): + """Test basic command execution functionality.""" + result = await terminal.run_command("echo 'Hello World'") + assert "Hello World" in result + + @pytest.mark.asyncio + async def test_environment_variables(self, terminal): + """Test environment variable setting and access.""" + result = await terminal.run_command("echo $TEST_VAR") + assert "test_value" in result + + @pytest.mark.asyncio + async def test_working_directory(self, terminal): + """Test working directory setup.""" + result = await terminal.run_command("pwd") + assert "/workspace" == result + + @pytest.mark.asyncio + async def test_command_timeout(self, docker_container): + """Test command timeout functionality.""" + terminal = AsyncDockerizedTerminal(docker_container, default_timeout=1) + await terminal.init() + try: + with pytest.raises(TimeoutError): + await terminal.run_command("sleep 5") + finally: + await terminal.close() + + @pytest.mark.asyncio + async def test_multiple_commands(self, terminal): + """Test execution of multiple commands in sequence.""" + cmd1 = await terminal.run_command("echo 'First'") + cmd2 = await terminal.run_command("echo 'Second'") + assert "First" in cmd1 + assert "Second" in cmd2 + + @pytest.mark.asyncio + async def test_session_cleanup(self, docker_container): + """Test proper cleanup of resources.""" + terminal = AsyncDockerizedTerminal(docker_container) + await terminal.init() + assert terminal.session is not None + await terminal.close() + # Verify session is properly cleaned up + # Note: session object still exists, but internal connection is closed + assert terminal.session is not None + + +# Configure pytest-asyncio +def pytest_configure(config): + """Configure pytest-asyncio.""" + config.addinivalue_line("asyncio_mode", "strict") + config.addinivalue_line("asyncio_default_fixture_loop_scope", "function") + + +if __name__ == "__main__": + pytest.main(["-v", __file__]) diff --git a/tests/sandbox/test_sandbox.py b/tests/sandbox/test_sandbox.py new file mode 100644 index 0000000..a690c98 --- /dev/null +++ b/tests/sandbox/test_sandbox.py @@ -0,0 +1,152 @@ +import pytest +import pytest_asyncio + +from app.sandbox.core.sandbox import DockerSandbox, SandboxConfig + + +@pytest.fixture(scope="module") +def sandbox_config(): + """Creates sandbox configuration for testing.""" + return SandboxConfig( + image="python:3.10-slim", + work_dir="/workspace", + memory_limit="1g", + cpu_limit=0.5, + network_enabled=True, + ) + + +@pytest_asyncio.fixture(scope="module") +async def sandbox(sandbox_config): + """Creates and manages a test sandbox instance.""" + sandbox = DockerSandbox(sandbox_config) + await sandbox.create() + try: + yield sandbox + finally: + await sandbox.cleanup() + + +@pytest.mark.asyncio +async def test_sandbox_working_directory(sandbox): + """Tests sandbox working directory configuration.""" + result = await sandbox.terminal.run_command("pwd") + assert result.strip() == "/workspace" + + +@pytest.mark.asyncio +async def test_sandbox_file_operations(sandbox): + """Tests sandbox file read/write operations.""" + # Test file writing + test_content = "Hello from sandbox!" + await sandbox.write_file("/workspace/test.txt", test_content) + + # Test file reading + content = await sandbox.read_file("/workspace/test.txt") + assert content.strip() == test_content + + +@pytest.mark.asyncio +async def test_sandbox_python_execution(sandbox): + """Tests Python code execution in sandbox.""" + # Write test file + await sandbox.write_file("/workspace/test.txt", "Hello from file!") + + # Write Python script + python_code = """ +print("Hello from Python!") +with open('/workspace/test.txt') as f: + print(f.read()) +""" + await sandbox.write_file("/workspace/test.py", python_code) + + # Execute script and verify output + result = await sandbox.terminal.run_command("python3 /workspace/test.py") + assert "Hello from Python!" in result + assert "Hello from file!" in result + + +@pytest.mark.asyncio +async def test_sandbox_file_persistence(sandbox): + """Tests file persistence in sandbox.""" + # Create multiple files + files = { + "file1.txt": "Content 1", + "file2.txt": "Content 2", + "nested/file3.txt": "Content 3", + } + + # Write files + for path, content in files.items(): + await sandbox.write_file(f"/workspace/{path}", content) + + # Verify file contents + for path, expected_content in files.items(): + content = await sandbox.read_file(f"/workspace/{path}") + assert content.strip() == expected_content + + +@pytest.mark.asyncio +async def test_sandbox_python_environment(sandbox): + """Tests Python environment configuration.""" + # Test Python version + result = await sandbox.terminal.run_command("python3 --version") + assert "Python 3.10" in result + + # Test basic module imports + python_code = """ +import sys +import os +import json +print("Python is working!") +""" + await sandbox.write_file("/workspace/env_test.py", python_code) + result = await sandbox.terminal.run_command("python3 /workspace/env_test.py") + assert "Python is working!" in result + + +@pytest.mark.asyncio +async def test_sandbox_network_access(sandbox): + """Tests sandbox network access.""" + if not sandbox.config.network_enabled: + pytest.skip("Network access is disabled") + + # Test network connectivity + await sandbox.terminal.run_command("apt update && apt install curl -y") + result = await sandbox.terminal.run_command("curl -I https://www.example.com") + assert "HTTP/2 200" in result + + +@pytest.mark.asyncio +async def test_sandbox_cleanup(sandbox_config): + """Tests sandbox cleanup process.""" + sandbox = DockerSandbox(sandbox_config) + await sandbox.create() + + # Create test files + await sandbox.write_file("/workspace/test.txt", "test") + container_id = sandbox.terminal.container.id + # Perform cleanup + await sandbox.cleanup() + + # Verify container has been removed + import docker + + client = docker.from_env() + containers = client.containers.list(all=True) + assert not any(c.id == container_id for c in containers) + + +@pytest.mark.asyncio +async def test_sandbox_error_handling(): + """Tests error handling with invalid configuration.""" + # Test invalid configuration + invalid_config = SandboxConfig(image="nonexistent:latest", work_dir="/invalid") + + sandbox = DockerSandbox(invalid_config) + with pytest.raises(Exception): + await sandbox.create() + + +if __name__ == "__main__": + pytest.main(["-v", __file__]) diff --git a/tests/sandbox/test_sandbox_manager.py b/tests/sandbox/test_sandbox_manager.py new file mode 100644 index 0000000..09f498d --- /dev/null +++ b/tests/sandbox/test_sandbox_manager.py @@ -0,0 +1,138 @@ +import asyncio +import os +import tempfile +from typing import AsyncGenerator + +import pytest +import pytest_asyncio + +from app.sandbox.core.manager import SandboxManager + + +@pytest_asyncio.fixture(scope="function") +async def manager() -> AsyncGenerator[SandboxManager, None]: + """Creates a sandbox manager instance. + + Uses function scope to ensure each test case has its own manager instance. + """ + manager = SandboxManager(max_sandboxes=2, idle_timeout=60, cleanup_interval=30) + try: + yield manager + finally: + # Ensure all resources are cleaned up + await manager.cleanup() + + +@pytest.fixture +def temp_file(): + """Creates a temporary test file.""" + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f: + f.write("test content") + path = f.name + try: + yield path + finally: + if os.path.exists(path): + os.unlink(path) + + +@pytest.mark.asyncio +async def test_create_sandbox(manager): + """Tests sandbox creation.""" + # Create default sandbox + sandbox_id = await manager.create_sandbox() + assert sandbox_id in manager._sandboxes + assert sandbox_id in manager._last_used + + # Verify sandbox functionality + sandbox = await manager.get_sandbox(sandbox_id) + result = await sandbox.run_command("echo 'test'") + assert result.strip() == "test" + + +@pytest.mark.asyncio +async def test_max_sandboxes_limit(manager): + """Tests maximum sandbox limit enforcement.""" + created_sandboxes = [] + try: + # Create maximum number of sandboxes + for _ in range(manager.max_sandboxes): + sandbox_id = await manager.create_sandbox() + created_sandboxes.append(sandbox_id) + + # Verify created sandbox count + assert len(manager._sandboxes) == manager.max_sandboxes + + # Attempting to create additional sandbox should fail + with pytest.raises(RuntimeError) as exc_info: + await manager.create_sandbox() + + # Verify error message + expected_message = ( + f"Maximum number of sandboxes ({manager.max_sandboxes}) reached" + ) + assert str(exc_info.value) == expected_message + + finally: + # Clean up all created sandboxes + for sandbox_id in created_sandboxes: + try: + await manager.delete_sandbox(sandbox_id) + except Exception as e: + print(f"Failed to cleanup sandbox {sandbox_id}: {e}") + + +@pytest.mark.asyncio +async def test_get_nonexistent_sandbox(manager): + """Tests retrieving a non-existent sandbox.""" + with pytest.raises(KeyError, match="Sandbox .* not found"): + await manager.get_sandbox("nonexistent-id") + + +@pytest.mark.asyncio +async def test_sandbox_cleanup(manager): + """Tests sandbox cleanup functionality.""" + sandbox_id = await manager.create_sandbox() + assert sandbox_id in manager._sandboxes + + await manager.delete_sandbox(sandbox_id) + assert sandbox_id not in manager._sandboxes + assert sandbox_id not in manager._last_used + + +@pytest.mark.asyncio +async def test_idle_sandbox_cleanup(manager): + """Tests automatic cleanup of idle sandboxes.""" + # Set short idle timeout + manager.idle_timeout = 0.1 + + sandbox_id = await manager.create_sandbox() + assert sandbox_id in manager._sandboxes + + # Wait longer than idle timeout + await asyncio.sleep(0.2) + + # Trigger cleanup + await manager._cleanup_idle_sandboxes() + assert sandbox_id not in manager._sandboxes + + +@pytest.mark.asyncio +async def test_manager_cleanup(manager): + """Tests manager cleanup functionality.""" + # Create multiple sandboxes + sandbox_ids = [] + for _ in range(2): + sandbox_id = await manager.create_sandbox() + sandbox_ids.append(sandbox_id) + + # Clean up all resources + await manager.cleanup() + + # Verify all sandboxes have been cleaned up + assert not manager._sandboxes + assert not manager._last_used + + +if __name__ == "__main__": + pytest.main(["-v", __file__]) From 983e8f0d4b4a9cf628c81e554cdeab836a6b02bf Mon Sep 17 00:00:00 2001 From: the0807 Date: Wed, 12 Mar 2025 14:33:32 +0900 Subject: [PATCH 03/77] Support OpenAI Reasoning Models (o1, o3-mini) --- app/llm.py | 56 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/app/llm.py b/app/llm.py index 8f62782..90e6f03 100644 --- a/app/llm.py +++ b/app/llm.py @@ -14,6 +14,7 @@ from app.config import LLMSettings, config from app.logger import logger # Assuming a logger is set up in your app from app.schema import Message +REASONING_MODELS = ["o1", "o3-mini"] class LLM: _instances: Dict[str, "LLM"] = {} @@ -133,27 +134,30 @@ class LLM: else: messages = self.format_messages(messages) + params = { + "model": self.model, + "messages": messages, + } + + if self.model in REASONING_MODELS: + params["max_completion_tokens"] = self.max_tokens + else: + params["max_tokens"] = self.max_tokens + params["temperature"] = temperature or self.temperature + if not stream: # Non-streaming request - response = await self.client.chat.completions.create( - model=self.model, - messages=messages, - max_tokens=self.max_tokens, - temperature=temperature or self.temperature, - stream=False, - ) + params["stream"] = False + + response = await self.client.chat.completions.create(**params) + if not response.choices or not response.choices[0].message.content: raise ValueError("Empty or invalid response from LLM") return response.choices[0].message.content # Streaming request - response = await self.client.chat.completions.create( - model=self.model, - messages=messages, - max_tokens=self.max_tokens, - temperature=temperature or self.temperature, - stream=True, - ) + params["stream"] = True + response = await self.client.chat.completions.create(**params) collected_messages = [] async for chunk in response: @@ -230,16 +234,22 @@ class LLM: raise ValueError("Each tool must be a dict with 'type' field") # Set up the completion request - response = await self.client.chat.completions.create( - model=self.model, - messages=messages, - temperature=temperature or self.temperature, - max_tokens=self.max_tokens, - tools=tools, - tool_choice=tool_choice, - timeout=timeout, + params = { + "model": self.model, + "messages": messages, + "tools": tools, + "tool_choice": tool_choice, + "timeout": timeout, **kwargs, - ) + } + + if self.model in REASONING_MODELS: + params["max_completion_tokens"] = self.max_tokens + else: + params["max_tokens"] = self.max_tokens + params["temperature"] = temperature or self.temperature + + response = await self.client.chat.completions.create(**params) # Check if response is valid if not response.choices or not response.choices[0].message: From ed4b78dc37bd31a59d0831f2054527a1845e2e9d Mon Sep 17 00:00:00 2001 From: 836304831 <836304831@qq.com> Date: Wed, 12 Mar 2025 23:33:37 +0800 Subject: [PATCH 04/77] update python_execute safe --- app/tool/python_execute.py | 73 +++++++++++++++++++++----------------- 1 file changed, 41 insertions(+), 32 deletions(-) diff --git a/app/tool/python_execute.py b/app/tool/python_execute.py index 88e1aab..e9c8140 100644 --- a/app/tool/python_execute.py +++ b/app/tool/python_execute.py @@ -1,4 +1,6 @@ -import threading +import sys +from io import StringIO +import multiprocessing from typing import Dict from app.tool.base import BaseTool @@ -20,6 +22,20 @@ class PythonExecute(BaseTool): "required": ["code"], } + def _run_code(self, code: str, result_dict: dict, safe_globals: dict) -> None: + original_stdout = sys.stdout + try: + output_buffer = StringIO() + sys.stdout = output_buffer + exec(code, safe_globals, safe_globals) + result_dict["observation"] = output_buffer.getvalue() + result_dict["success"] = True + except Exception as e: + result_dict["observation"] = str(e) + result_dict["success"] = False + finally: + sys.stdout = original_stdout + async def execute( self, code: str, @@ -35,36 +51,29 @@ class PythonExecute(BaseTool): Returns: Dict: Contains 'output' with execution output or error message and 'success' status. """ - result = {"observation": ""} - def run_code(): - try: - safe_globals = {"__builtins__": dict(__builtins__)} + with multiprocessing.Manager() as manager: + result = manager.dict({ + "observation": "", + "success": False + }) + if isinstance(__builtins__, dict): + safe_globals = {"__builtins__": __builtins__} + else: + safe_globals = {"__builtins__": __builtins__.__dict__.copy()} + proc = multiprocessing.Process( + target=self._run_code, + args=(code, result, safe_globals) + ) + proc.start() + proc.join(timeout) - import sys - from io import StringIO - - output_buffer = StringIO() - sys.stdout = output_buffer - - exec(code, safe_globals, {}) - - sys.stdout = sys.__stdout__ - - result["observation"] = output_buffer.getvalue() - - except Exception as e: - result["observation"] = str(e) - result["success"] = False - - thread = threading.Thread(target=run_code) - thread.start() - thread.join(timeout) - - if thread.is_alive(): - return { - "observation": f"Execution timeout after {timeout} seconds", - "success": False, - } - - return result + # timeout process + if proc.is_alive(): + proc.terminate() + proc.join(1) + return { + "observation": f"Execution timeout after {timeout} seconds", + "success": False, + } + return dict(result) From bbaff4f095b402fae3ffc659e8dfb7eb5d9b4c39 Mon Sep 17 00:00:00 2001 From: Kingtous Date: Thu, 13 Mar 2025 00:27:48 +0800 Subject: [PATCH 05/77] feat: add baidu search tool and optional config --- app/agent/manus.py | 17 +++++++++++++- app/config.py | 16 +++++++++++++ app/tool/baidu_search.py | 48 ++++++++++++++++++++++++++++++++++++++ config/config.example.toml | 5 ++++ requirements.txt | 1 + 5 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 app/tool/baidu_search.py diff --git a/app/agent/manus.py b/app/agent/manus.py index e11ca45..7cd012c 100644 --- a/app/agent/manus.py +++ b/app/agent/manus.py @@ -8,7 +8,9 @@ from app.tool import Terminate, ToolCollection from app.tool.browser_use_tool import BrowserUseTool from app.tool.file_saver import FileSaver from app.tool.google_search import GoogleSearch +from app.tool.baidu_search import BaiduSearch from app.tool.python_execute import PythonExecute +from app.config import config class Manus(ToolCallAgent): @@ -34,9 +36,22 @@ class Manus(ToolCallAgent): # Add general-purpose tools to the tool collection available_tools: ToolCollection = Field( default_factory=lambda: ToolCollection( - PythonExecute(), GoogleSearch(), BrowserUseTool(), FileSaver(), Terminate() + PythonExecute(), Manus.get_search_tool(), BrowserUseTool(), FileSaver(), Terminate() ) ) + + @staticmethod + def get_search_tool(): + """Determines the search tool to use based on the configuration.""" + print(config.search_config) + if config.search_config is None: + return GoogleSearch() + else: + # Check search engine + engine = config.search_config.engine.lower() + if engine == "baidu": + return BaiduSearch() + return GoogleSearch() async def _handle_special_tool(self, name: str, result: Any, **kwargs): await self.available_tools.get_tool(BrowserUseTool().name).cleanup() diff --git a/app/config.py b/app/config.py index 64f478d..81e1e81 100644 --- a/app/config.py +++ b/app/config.py @@ -30,6 +30,8 @@ class ProxySettings(BaseModel): username: Optional[str] = Field(None, description="Proxy username") password: Optional[str] = Field(None, description="Proxy password") +class SearchSettings(BaseModel): + engine: str = Field(default='Google', description="Search engine the llm to use") class BrowserSettings(BaseModel): headless: bool = Field(False, description="Whether to run browser in headless mode") @@ -58,6 +60,9 @@ class AppConfig(BaseModel): browser_config: Optional[BrowserSettings] = Field( None, description="Browser configuration" ) + search_config: Optional[SearchSettings] = Field( + None, description="Search configuration" + ) class Config: arbitrary_types_allowed = True @@ -149,6 +154,12 @@ class Config: if valid_browser_params: browser_settings = BrowserSettings(**valid_browser_params) + search_config = raw_config.get("search", {}) + search_settings = None + if search_config: + search_settings = SearchSettings(**search_config) + print("search setting", search_settings) + config_dict = { "llm": { "default": default_settings, @@ -158,6 +169,7 @@ class Config: }, }, "browser_config": browser_settings, + "search_config": search_settings, } self._config = AppConfig(**config_dict) @@ -169,6 +181,10 @@ class Config: @property def browser_config(self) -> Optional[BrowserSettings]: return self._config.browser_config + + @property + def search_config(self) -> Optional[SearchSettings]: + return self._config.search_config config = Config() diff --git a/app/tool/baidu_search.py b/app/tool/baidu_search.py new file mode 100644 index 0000000..93ba50f --- /dev/null +++ b/app/tool/baidu_search.py @@ -0,0 +1,48 @@ +import asyncio +from typing import List + +from baidusearch.baidusearch import search + +from app.tool.base import BaseTool + + +class BaiduSearch(BaseTool): + name: str = "baidu_search" + description: str = """Perform a Baidu search and return a list of relevant links. +Use this tool when you need to find information on the web, get up-to-date data, or research specific topics. +The tool returns a list of URLs that match the search query. +""" + parameters: dict = { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "(required) The search query to submit to Baidu.", + }, + "num_results": { + "type": "integer", + "description": "(optional) The number of search results to return. Default is 10.", + "default": 10, + }, + }, + "required": ["query"], + } + + async def execute(self, query: str, num_results: int = 10) -> List[str]: + """ + Execute a Baidu search and return a list of URLs. + + Args: + query (str): The search query to submit to Baidu. + num_results (int, optional): The number of search results to return. Default is 10. + + Returns: + List[str]: A list of URLs matching the search query. + """ + # Run the search in a thread pool to prevent blocking + loop = asyncio.get_event_loop() + links = await loop.run_in_executor( + None, lambda: list(search(query, num_results=num_results)) + ) + + return links diff --git a/config/config.example.toml b/config/config.example.toml index 13648dd..ac8af62 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -42,3 +42,8 @@ api_key = "sk-..." # server = "http://proxy-server:port" # username = "proxy-username" # password = "proxy-password" + +# Optional configuration, Search settings. +# [search] +# Search engine for agent to use. Default is "Google", can be set to "Baidu". +#engine = "Google" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 7ce4b52..c275e65 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,6 +15,7 @@ uvicorn~=0.34.0 unidiff~=0.7.5 browser-use~=0.1.40 googlesearch-python~=1.3.0 +baidusearch~=1.0.3 aiofiles~=24.1.0 pydantic_core~=2.27.2 From f9ce06adb8349af5d1c7d4126ea0e5dea1ac1876 Mon Sep 17 00:00:00 2001 From: Kingtous Date: Thu, 13 Mar 2025 00:50:30 +0800 Subject: [PATCH 06/77] opt: remove unnessary print --- app/agent/manus.py | 1 - 1 file changed, 1 deletion(-) diff --git a/app/agent/manus.py b/app/agent/manus.py index 7cd012c..daac10e 100644 --- a/app/agent/manus.py +++ b/app/agent/manus.py @@ -43,7 +43,6 @@ class Manus(ToolCallAgent): @staticmethod def get_search_tool(): """Determines the search tool to use based on the configuration.""" - print(config.search_config) if config.search_config is None: return GoogleSearch() else: From b7774b18ef9db28fe578f224c9956740017041ac Mon Sep 17 00:00:00 2001 From: Kingtous Date: Thu, 13 Mar 2025 08:31:40 +0800 Subject: [PATCH 07/77] opt: abstract web search interface, code cleanup --- app/agent/manus.py | 17 +------- app/config.py | 1 - app/tool/google_search.py | 48 --------------------- app/tool/{baidu_search.py => web_search.py} | 31 +++++++++---- 4 files changed, 25 insertions(+), 72 deletions(-) delete mode 100644 app/tool/google_search.py rename app/tool/{baidu_search.py => web_search.py} (54%) diff --git a/app/agent/manus.py b/app/agent/manus.py index daac10e..fdf0a10 100644 --- a/app/agent/manus.py +++ b/app/agent/manus.py @@ -7,8 +7,7 @@ from app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.tool import Terminate, ToolCollection from app.tool.browser_use_tool import BrowserUseTool from app.tool.file_saver import FileSaver -from app.tool.google_search import GoogleSearch -from app.tool.baidu_search import BaiduSearch +from app.tool.web_search import WebSearch from app.tool.python_execute import PythonExecute from app.config import config @@ -36,21 +35,9 @@ class Manus(ToolCallAgent): # Add general-purpose tools to the tool collection available_tools: ToolCollection = Field( default_factory=lambda: ToolCollection( - PythonExecute(), Manus.get_search_tool(), BrowserUseTool(), FileSaver(), Terminate() + PythonExecute(), WebSearch(), BrowserUseTool(), FileSaver(), Terminate() ) ) - - @staticmethod - def get_search_tool(): - """Determines the search tool to use based on the configuration.""" - if config.search_config is None: - return GoogleSearch() - else: - # Check search engine - engine = config.search_config.engine.lower() - if engine == "baidu": - return BaiduSearch() - return GoogleSearch() async def _handle_special_tool(self, name: str, result: Any, **kwargs): await self.available_tools.get_tool(BrowserUseTool().name).cleanup() diff --git a/app/config.py b/app/config.py index 81e1e81..8fd8bd7 100644 --- a/app/config.py +++ b/app/config.py @@ -158,7 +158,6 @@ class Config: search_settings = None if search_config: search_settings = SearchSettings(**search_config) - print("search setting", search_settings) config_dict = { "llm": { diff --git a/app/tool/google_search.py b/app/tool/google_search.py deleted file mode 100644 index ed5d7d5..0000000 --- a/app/tool/google_search.py +++ /dev/null @@ -1,48 +0,0 @@ -import asyncio -from typing import List - -from googlesearch import search - -from app.tool.base import BaseTool - - -class GoogleSearch(BaseTool): - name: str = "google_search" - description: str = """Perform a Google search and return a list of relevant links. -Use this tool when you need to find information on the web, get up-to-date data, or research specific topics. -The tool returns a list of URLs that match the search query. -""" - parameters: dict = { - "type": "object", - "properties": { - "query": { - "type": "string", - "description": "(required) The search query to submit to Google.", - }, - "num_results": { - "type": "integer", - "description": "(optional) The number of search results to return. Default is 10.", - "default": 10, - }, - }, - "required": ["query"], - } - - async def execute(self, query: str, num_results: int = 10) -> List[str]: - """ - Execute a Google search and return a list of URLs. - - Args: - query (str): The search query to submit to Google. - num_results (int, optional): The number of search results to return. Default is 10. - - Returns: - List[str]: A list of URLs matching the search query. - """ - # Run the search in a thread pool to prevent blocking - loop = asyncio.get_event_loop() - links = await loop.run_in_executor( - None, lambda: list(search(query, num_results=num_results)) - ) - - return links diff --git a/app/tool/baidu_search.py b/app/tool/web_search.py similarity index 54% rename from app/tool/baidu_search.py rename to app/tool/web_search.py index 93ba50f..3beb4c4 100644 --- a/app/tool/baidu_search.py +++ b/app/tool/web_search.py @@ -1,14 +1,16 @@ import asyncio from typing import List -from baidusearch.baidusearch import search +from googlesearch import search as google_search +from baidusearch.baidusearch import search as baidu_search from app.tool.base import BaseTool +from app.config import config -class BaiduSearch(BaseTool): - name: str = "baidu_search" - description: str = """Perform a Baidu search and return a list of relevant links. +class WebSearch(BaseTool): + name: str = "web_search" + description: str = """Perform a web search and return a list of relevant links. Use this tool when you need to find information on the web, get up-to-date data, or research specific topics. The tool returns a list of URLs that match the search query. """ @@ -17,7 +19,7 @@ The tool returns a list of URLs that match the search query. "properties": { "query": { "type": "string", - "description": "(required) The search query to submit to Baidu.", + "description": "(required) The search query to submit to the search engine.", }, "num_results": { "type": "integer", @@ -27,13 +29,17 @@ The tool returns a list of URLs that match the search query. }, "required": ["query"], } + _search_engine: dict = { + "google": google_search, + "baidu": baidu_search, + } async def execute(self, query: str, num_results: int = 10) -> List[str]: """ - Execute a Baidu search and return a list of URLs. + Execute a Web search and return a list of URLs. Args: - query (str): The search query to submit to Baidu. + query (str): The search query to submit to the search engine. num_results (int, optional): The number of search results to return. Default is 10. Returns: @@ -41,8 +47,17 @@ The tool returns a list of URLs that match the search query. """ # Run the search in a thread pool to prevent blocking loop = asyncio.get_event_loop() + search_engine = self.get_search_engine() links = await loop.run_in_executor( - None, lambda: list(search(query, num_results=num_results)) + None, lambda: list(search_engine(query, num_results=num_results)) ) return links + + def get_search_engine(self): + """Determines the search engine to use based on the configuration.""" + if config.search_config is None: + return google_search + else: + engine = config.search_config.engine.lower() + return self._search_engine.get(engine, google_search) From 86d2a7d6bf921be089b2314e53db8c8569d3eab3 Mon Sep 17 00:00:00 2001 From: Kingtous Date: Thu, 13 Mar 2025 09:05:14 +0800 Subject: [PATCH 08/77] feat: implement duckduckgo search, abstract further --- app/prompt/manus.py | 2 +- app/tool/search/__init__.py | 12 ++++++++++++ app/tool/search/baidu_search.py | 9 +++++++++ app/tool/search/base.py | 15 +++++++++++++++ app/tool/search/duckduckgo_search.py | 9 +++++++++ app/tool/search/google_search.py | 8 ++++++++ app/tool/web_search.py | 20 ++++++++++---------- requirements.txt | 1 + 8 files changed, 65 insertions(+), 11 deletions(-) create mode 100644 app/tool/search/__init__.py create mode 100644 app/tool/search/baidu_search.py create mode 100644 app/tool/search/base.py create mode 100644 app/tool/search/duckduckgo_search.py create mode 100644 app/tool/search/google_search.py diff --git a/app/prompt/manus.py b/app/prompt/manus.py index e46c793..6dcca8a 100644 --- a/app/prompt/manus.py +++ b/app/prompt/manus.py @@ -8,7 +8,7 @@ FileSaver: Save files locally, such as txt, py, html, etc. BrowserUseTool: Open, browse, and use web browsers.If you open a local HTML file, you must provide the absolute path to the file. -GoogleSearch: Perform web information retrieval +WebSearch: Perform web information retrieval Terminate: End the current interaction when the task is complete or when you need additional information from the user. Use this tool to signal that you've finished addressing the user's request or need clarification before proceeding further. diff --git a/app/tool/search/__init__.py b/app/tool/search/__init__.py new file mode 100644 index 0000000..509d16d --- /dev/null +++ b/app/tool/search/__init__.py @@ -0,0 +1,12 @@ +from app.tool.search.base import WebSearchEngine +from app.tool.search.baidu_search import BaiduSearchEngine +from app.tool.search.duckduckgo_search import DuckDuckGoSearchEngine +from app.tool.search.google_search import GoogleSearchEngine + + +__all__ = [ + "WebSearchEngine", + "BaiduSearchEngine", + "DuckDuckGoSearchEngine", + "GoogleSearchEngine", +] \ No newline at end of file diff --git a/app/tool/search/baidu_search.py b/app/tool/search/baidu_search.py new file mode 100644 index 0000000..a398899 --- /dev/null +++ b/app/tool/search/baidu_search.py @@ -0,0 +1,9 @@ +from baidusearch.baidusearch import search +from app.tool.search.base import WebSearchEngine + + +class BaiduSearchEngine(WebSearchEngine): + + def perform_search(self, query, num_results = 10, *args, **kwargs): + """Baidu search engine.""" + return search(query, num_results=num_results) diff --git a/app/tool/search/base.py b/app/tool/search/base.py new file mode 100644 index 0000000..095c0b1 --- /dev/null +++ b/app/tool/search/base.py @@ -0,0 +1,15 @@ +class WebSearchEngine(object): + def perform_search(self, query: str, num_results: int = 10, *args, **kwargs) -> list[dict]: + """ + Perform a web search and return a list of URLs. + + Args: + query (str): The search query to submit to the search engine. + num_results (int, optional): The number of search results to return. Default is 10. + args: Additional arguments. + kwargs: Additional keyword arguments. + + Returns: + List: A list of dict matching the search query. + """ + raise NotImplementedError \ No newline at end of file diff --git a/app/tool/search/duckduckgo_search.py b/app/tool/search/duckduckgo_search.py new file mode 100644 index 0000000..738ecf5 --- /dev/null +++ b/app/tool/search/duckduckgo_search.py @@ -0,0 +1,9 @@ +from duckduckgo_search import DDGS +from app.tool.search.base import WebSearchEngine + + +class DuckDuckGoSearchEngine(WebSearchEngine): + + async def perform_search(self, query, num_results = 10, *args, **kwargs): + """DuckDuckGo search engine.""" + return DDGS.text(query, num_results=num_results) diff --git a/app/tool/search/google_search.py b/app/tool/search/google_search.py new file mode 100644 index 0000000..606f107 --- /dev/null +++ b/app/tool/search/google_search.py @@ -0,0 +1,8 @@ +from app.tool.search.base import WebSearchEngine +from googlesearch import search + +class GoogleSearchEngine(WebSearchEngine): + + def perform_search(self, query, num_results = 10, *args, **kwargs): + """Google search engine.""" + return search(query, num_results=num_results) diff --git a/app/tool/web_search.py b/app/tool/web_search.py index 3beb4c4..9f55199 100644 --- a/app/tool/web_search.py +++ b/app/tool/web_search.py @@ -1,11 +1,9 @@ import asyncio from typing import List -from googlesearch import search as google_search -from baidusearch.baidusearch import search as baidu_search - from app.tool.base import BaseTool from app.config import config +from app.tool.search import WebSearchEngine, BaiduSearchEngine, GoogleSearchEngine, DuckDuckGoSearchEngine class WebSearch(BaseTool): @@ -29,9 +27,10 @@ The tool returns a list of URLs that match the search query. }, "required": ["query"], } - _search_engine: dict = { - "google": google_search, - "baidu": baidu_search, + _search_engine: dict[str, WebSearchEngine] = { + "google": GoogleSearchEngine(), + "baidu": BaiduSearchEngine(), + "duckduckgo": DuckDuckGoSearchEngine(), } async def execute(self, query: str, num_results: int = 10) -> List[str]: @@ -53,11 +52,12 @@ The tool returns a list of URLs that match the search query. ) return links - - def get_search_engine(self): + + def get_search_engine(self) -> WebSearchEngine: """Determines the search engine to use based on the configuration.""" + default_engine = self._search_engine.get("google") if config.search_config is None: - return google_search + return default_engine else: engine = config.search_config.engine.lower() - return self._search_engine.get(engine, google_search) + return self._search_engine.get(engine, default_engine) diff --git a/requirements.txt b/requirements.txt index c275e65..60ad38e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,6 +16,7 @@ unidiff~=0.7.5 browser-use~=0.1.40 googlesearch-python~=1.3.0 baidusearch~=1.0.3 +duckduckgo_search~=7.5.1 aiofiles~=24.1.0 pydantic_core~=2.27.2 From 2b9ef4ea08db083e906549cef9b314ddcd923f5e Mon Sep 17 00:00:00 2001 From: Kingtous Date: Thu, 13 Mar 2025 09:10:14 +0800 Subject: [PATCH 09/77] fix: perform search on query --- app/tool/web_search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/tool/web_search.py b/app/tool/web_search.py index 9f55199..c661f3b 100644 --- a/app/tool/web_search.py +++ b/app/tool/web_search.py @@ -48,7 +48,7 @@ The tool returns a list of URLs that match the search query. loop = asyncio.get_event_loop() search_engine = self.get_search_engine() links = await loop.run_in_executor( - None, lambda: list(search_engine(query, num_results=num_results)) + None, lambda: list(search_engine.perform_search(query, num_results=num_results)) ) return links From 198f70d5246930657f7e35649c98654e2d1328ad Mon Sep 17 00:00:00 2001 From: Kingtous Date: Thu, 13 Mar 2025 09:11:20 +0800 Subject: [PATCH 10/77] opt: update config.example.json --- config/config.example.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/config.example.toml b/config/config.example.toml index ac8af62..d6c193a 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -45,5 +45,5 @@ api_key = "sk-..." # Optional configuration, Search settings. # [search] -# Search engine for agent to use. Default is "Google", can be set to "Baidu". +# Search engine for agent to use. Default is "Google", can be set to "Baidu" or "DuckDuckGo". #engine = "Google" \ No newline at end of file From 837ae1b6ebcb2a8dfd857c9fab3d7654f8bc78c1 Mon Sep 17 00:00:00 2001 From: zhoupeng Date: Thu, 13 Mar 2025 17:59:54 +0800 Subject: [PATCH 11/77] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ee33f75..5ef486d 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ cd OpenManus 3. Create a new virtual environment and activate it: ```bash -uv venv +uv venv --python 3.12 source .venv/bin/activate # On Unix/macOS # Or on Windows: # .venv\Scripts\activate From b80188141e7c65fe36d803aae41734d0ef9c4a74 Mon Sep 17 00:00:00 2001 From: zhoupeng Date: Thu, 13 Mar 2025 18:00:31 +0800 Subject: [PATCH 12/77] Update README_ja.md --- README_ja.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_ja.md b/README_ja.md index 668d9e3..e72f931 100644 --- a/README_ja.md +++ b/README_ja.md @@ -66,7 +66,7 @@ cd OpenManus 3. 新しい仮想環境を作成してアクティベートします: ```bash -uv venv +uv venv --python 3.12 source .venv/bin/activate # Unix/macOSの場合 # Windowsの場合: # .venv\Scripts\activate From 2d17a3bd6ea36f9fbff4ef58d91f1915586aeeec Mon Sep 17 00:00:00 2001 From: zhoupeng Date: Thu, 13 Mar 2025 18:00:52 +0800 Subject: [PATCH 13/77] Update README_ko.md --- README_ko.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_ko.md b/README_ko.md index 5cefd84..77fe1f0 100644 --- a/README_ko.md +++ b/README_ko.md @@ -66,7 +66,7 @@ cd OpenManus 3. 새로운 가상 환경을 생성하고 활성화합니다: ```bash -uv venv +uv venv --python 3.12 source .venv/bin/activate # Unix/macOS의 경우 # Windows의 경우: # .venv\Scripts\activate From 8e9aa733e5461cdcfae81d8aaf24237dca4ff70e Mon Sep 17 00:00:00 2001 From: zhoupeng Date: Thu, 13 Mar 2025 18:01:09 +0800 Subject: [PATCH 14/77] Update README_zh.md --- README_zh.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_zh.md b/README_zh.md index 28f6749..298e162 100644 --- a/README_zh.md +++ b/README_zh.md @@ -69,7 +69,7 @@ cd OpenManus 3. 创建并激活虚拟环境: ```bash -uv venv +uv venv --python 3.12 source .venv/bin/activate # Unix/macOS 系统 # Windows 系统使用: # .venv\Scripts\activate From cba275d40549950ed6a41bfe32c7ef150f5f32bd Mon Sep 17 00:00:00 2001 From: ca-ke Date: Thu, 13 Mar 2025 14:17:57 -0300 Subject: [PATCH 15/77] refactor: enhance web search functionality with engine fallback and retry mechanism --- app/tool/web_search.py | 69 +++++++++++++++++++++++++++++------------- 1 file changed, 48 insertions(+), 21 deletions(-) diff --git a/app/tool/web_search.py b/app/tool/web_search.py index c661f3b..0d6f0ea 100644 --- a/app/tool/web_search.py +++ b/app/tool/web_search.py @@ -4,14 +4,13 @@ from typing import List from app.tool.base import BaseTool from app.config import config from app.tool.search import WebSearchEngine, BaiduSearchEngine, GoogleSearchEngine, DuckDuckGoSearchEngine - +from tenacity import retry, stop_after_attempt, wait_exponential class WebSearch(BaseTool): name: str = "web_search" - description: str = """Perform a web search and return a list of relevant links. -Use this tool when you need to find information on the web, get up-to-date data, or research specific topics. -The tool returns a list of URLs that match the search query. -""" + description: str = """Perform a web search and return a list of relevant links. + This function attempts to use the primary search engine API to get up-to-date results. + If an error occurs, it falls back to an alternative search engine.""" parameters: dict = { "type": "object", "properties": { @@ -44,20 +43,48 @@ The tool returns a list of URLs that match the search query. Returns: List[str]: A list of URLs matching the search query. """ - # Run the search in a thread pool to prevent blocking + engine_order = self._get_engine_order() + for engine_name in engine_order: + engine = self._search_engine[engine_name] + try: + links = await self._perform_search_with_engine(engine, query, num_results) + if links: + return links + except Exception as e: + print(f"Search engine '{engine_name}' failed with error: {e}") + return [] + + def _get_engine_order(self) -> List[str]: + """ + Determines the order in which to try search engines. + Preferred engine is first (based on configuration), followed by the remaining engines. + + Returns: + List[str]: Ordered list of search engine names. + """ + preferred = "google" + if config.search_config and config.search_config.engine: + preferred = config.search_config.engine.lower() + + engine_order = [] + if preferred in self._search_engine: + engine_order.append(preferred) + for key in self._search_engine: + if key not in engine_order: + engine_order.append(key) + return engine_order + + @retry( + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=1, max=10), + ) + async def _perform_search_with_engine( + self, + engine: WebSearchEngine, + query: str, + num_results: int, + ) -> List[str]: loop = asyncio.get_event_loop() - search_engine = self.get_search_engine() - links = await loop.run_in_executor( - None, lambda: list(search_engine.perform_search(query, num_results=num_results)) - ) - - return links - - def get_search_engine(self) -> WebSearchEngine: - """Determines the search engine to use based on the configuration.""" - default_engine = self._search_engine.get("google") - if config.search_config is None: - return default_engine - else: - engine = config.search_config.engine.lower() - return self._search_engine.get(engine, default_engine) + return await loop.run_in_executor( + None, lambda: list(engine.perform_search(query, num_results=num_results)) + ) \ No newline at end of file From 89c9d904db1c2c873b3b0428d5f2e99737d9bddb Mon Sep 17 00:00:00 2001 From: xRay <3864998@qq.com> Date: Fri, 14 Mar 2025 09:46:46 +0800 Subject: [PATCH 16/77] =?UTF-8?q?=E5=B0=86=E5=B7=A5=E5=85=B7=E9=80=89?= =?UTF-8?q?=E6=8B=A9=E4=BB=8E=20ToolChoice.REQUIRED=20=E6=9B=B4=E6=96=B0?= =?UTF-8?q?=E4=B8=BA=20ToolChoice.AUTO=EF=BC=8C=E4=BB=A5=E4=BC=98=E5=8C=96?= =?UTF-8?q?=E8=A7=84=E5=88=92=E4=BB=A3=E7=90=86=E5=92=8C=E8=A7=84=E5=88=92?= =?UTF-8?q?=E6=B5=81=E7=A8=8B=E7=9A=84=E5=B7=A5=E5=85=B7=E8=B0=83=E7=94=A8?= =?UTF-8?q?=E9=80=BB=E8=BE=91=E3=80=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/agent/planning.py | 2 +- app/flow/planning.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/app/agent/planning.py b/app/agent/planning.py index cbd15a0..8cc2be8 100644 --- a/app/agent/planning.py +++ b/app/agent/planning.py @@ -212,7 +212,7 @@ class PlanningAgent(ToolCallAgent): messages=messages, system_msgs=[Message.system_message(self.system_prompt)], tools=self.available_tools.to_params(), - tool_choice=ToolChoice.REQUIRED, + tool_choice=ToolChoice.AUTO, ) assistant_msg = Message.from_tool_calls( content=response.content, tool_calls=response.tool_calls diff --git a/app/flow/planning.py b/app/flow/planning.py index a12bbe4..55ec5c9 100644 --- a/app/flow/planning.py +++ b/app/flow/planning.py @@ -124,7 +124,7 @@ class PlanningFlow(BaseFlow): messages=[user_message], system_msgs=[system_message], tools=[self.planning_tool.to_param()], - tool_choice=ToolChoice.REQUIRED, + tool_choice=ToolChoice.AUTO, ) # Process tool calls if present From 9c7834eff2c42da0c871caec05cfdace1cf29de5 Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Fri, 14 Mar 2025 12:20:59 +0800 Subject: [PATCH 17/77] update readme; format code; update config.example.toml --- README.md | 2 +- README_ja.md | 2 +- README_ko.md | 2 +- README_zh.md | 4 ++-- app/agent/base.py | 4 ++-- app/agent/manus.py | 3 +-- app/agent/planning.py | 4 ++-- app/agent/toolcall.py | 7 +++---- app/config.py | 6 ++++-- app/llm.py | 14 +++++++++++--- app/schema.py | 15 ++++++++++++--- app/tool/python_execute.py | 10 +++------- app/tool/search/__init__.py | 4 ++-- app/tool/search/baidu_search.py | 4 ++-- app/tool/search/base.py | 6 ++++-- app/tool/search/duckduckgo_search.py | 4 ++-- app/tool/search/google_search.py | 7 ++++--- app/tool/terminal.py | 22 +++++++++++----------- app/tool/web_search.py | 12 +++++++++--- config/config.example.toml | 20 +++++++++++--------- 20 files changed, 88 insertions(+), 64 deletions(-) diff --git a/README.md b/README.md index ee33f75..ae93a47 100644 --- a/README.md +++ b/README.md @@ -143,7 +143,7 @@ Join our networking group on Feishu and share your experience with other develop Thanks to [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo) and [browser-use](https://github.com/browser-use/browser-use) for providing basic support for this project! -Additionally, we are grateful to [AAAJ](https://github.com/metauto-ai/agent-as-a-judge), [MetaGPT](https://github.com/geekan/MetaGPT) and [OpenHands](https://github.com/All-Hands-AI/OpenHands). +Additionally, we are grateful to [AAAJ](https://github.com/metauto-ai/agent-as-a-judge), [MetaGPT](https://github.com/geekan/MetaGPT), [OpenHands](https://github.com/All-Hands-AI/OpenHands) and [SWE-agent](https://github.com/SWE-agent/SWE-agent). OpenManus is built by contributors from MetaGPT. Huge thanks to this agent community! diff --git a/README_ja.md b/README_ja.md index 668d9e3..2dd84d5 100644 --- a/README_ja.md +++ b/README_ja.md @@ -144,7 +144,7 @@ Feishuのネットワーキンググループに参加して、他の開発者 このプロジェクトの基本的なサポートを提供してくれた[anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo) と[browser-use](https://github.com/browser-use/browser-use)に感謝します! -さらに、[AAAJ](https://github.com/metauto-ai/agent-as-a-judge)、[MetaGPT](https://github.com/geekan/MetaGPT)、[OpenHands](https://github.com/All-Hands-AI/OpenHands)にも感謝します。 +さらに、[AAAJ](https://github.com/metauto-ai/agent-as-a-judge)、[MetaGPT](https://github.com/geekan/MetaGPT)、[OpenHands](https://github.com/All-Hands-AI/OpenHands)、[SWE-agent](https://github.com/SWE-agent/SWE-agent)にも感謝します。 OpenManusはMetaGPTのコントリビューターによって構築されました。このエージェントコミュニティに大きな感謝を! diff --git a/README_ko.md b/README_ko.md index 5cefd84..379363e 100644 --- a/README_ko.md +++ b/README_ko.md @@ -144,7 +144,7 @@ Feishu 네트워킹 그룹에 참여하여 다른 개발자들과 경험을 공 이 프로젝트에 기본적인 지원을 제공해 주신 [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo)와 [browser-use](https://github.com/browser-use/browser-use)에게 감사드립니다! -또한, [AAAJ](https://github.com/metauto-ai/agent-as-a-judge), [MetaGPT](https://github.com/geekan/MetaGPT), [OpenHands](https://github.com/All-Hands-AI/OpenHands)에 깊은 감사를 드립니다. +또한, [AAAJ](https://github.com/metauto-ai/agent-as-a-judge), [MetaGPT](https://github.com/geekan/MetaGPT), [OpenHands](https://github.com/All-Hands-AI/OpenHands), [SWE-agent](https://github.com/SWE-agent/SWE-agent)에 깊은 감사를 드립니다. OpenManus는 MetaGPT 기여자들에 의해 개발되었습니다. 이 에이전트 커뮤니티에 깊은 감사를 전합니다! diff --git a/README_zh.md b/README_zh.md index 28f6749..ea7f904 100644 --- a/README_zh.md +++ b/README_zh.md @@ -119,7 +119,7 @@ python main.py 然后通过终端输入你的创意! -如需体验开发中版本,可运行: +如需体验不稳定的开发版本,可运行: ```bash python run_flow.py @@ -148,7 +148,7 @@ python run_flow.py 特别感谢 [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo) 和 [browser-use](https://github.com/browser-use/browser-use) 为本项目提供的基础支持! -此外,我们感谢 [AAAJ](https://github.com/metauto-ai/agent-as-a-judge),[MetaGPT](https://github.com/geekan/MetaGPT) 和 [OpenHands](https://github.com/All-Hands-AI/OpenHands). +此外,我们感谢 [AAAJ](https://github.com/metauto-ai/agent-as-a-judge),[MetaGPT](https://github.com/geekan/MetaGPT),[OpenHands](https://github.com/All-Hands-AI/OpenHands) 和 [SWE-agent](https://github.com/SWE-agent/SWE-agent). OpenManus 由 MetaGPT 社区的贡献者共同构建,感谢这个充满活力的智能体开发者社区! diff --git a/app/agent/base.py b/app/agent/base.py index 3830365..fa3db30 100644 --- a/app/agent/base.py +++ b/app/agent/base.py @@ -6,7 +6,7 @@ from pydantic import BaseModel, Field, model_validator from app.llm import LLM from app.logger import logger -from app.schema import AgentState, Memory, Message, ROLE_TYPE +from app.schema import ROLE_TYPE, AgentState, Memory, Message class BaseAgent(BaseModel, ABC): @@ -82,7 +82,7 @@ class BaseAgent(BaseModel, ABC): def update_memory( self, - role: ROLE_TYPE, # type: ignore + role: ROLE_TYPE, # type: ignore content: str, **kwargs, ) -> None: diff --git a/app/agent/manus.py b/app/agent/manus.py index fdf0a10..6c2c2e5 100644 --- a/app/agent/manus.py +++ b/app/agent/manus.py @@ -7,9 +7,8 @@ from app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.tool import Terminate, ToolCollection from app.tool.browser_use_tool import BrowserUseTool from app.tool.file_saver import FileSaver -from app.tool.web_search import WebSearch from app.tool.python_execute import PythonExecute -from app.config import config +from app.tool.web_search import WebSearch class Manus(ToolCallAgent): diff --git a/app/agent/planning.py b/app/agent/planning.py index 8cc2be8..7e98912 100644 --- a/app/agent/planning.py +++ b/app/agent/planning.py @@ -6,7 +6,7 @@ from pydantic import Field, model_validator from app.agent.toolcall import ToolCallAgent from app.logger import logger from app.prompt.planning import NEXT_STEP_PROMPT, PLANNING_SYSTEM_PROMPT -from app.schema import Message, TOOL_CHOICE_TYPE, ToolCall, ToolChoice +from app.schema import TOOL_CHOICE_TYPE, Message, ToolCall, ToolChoice from app.tool import PlanningTool, Terminate, ToolCollection @@ -27,7 +27,7 @@ class PlanningAgent(ToolCallAgent): available_tools: ToolCollection = Field( default_factory=lambda: ToolCollection(PlanningTool(), Terminate()) ) - tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore + tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name]) tool_calls: List[ToolCall] = Field(default_factory=list) diff --git a/app/agent/toolcall.py b/app/agent/toolcall.py index 1f04784..ecf0bb4 100644 --- a/app/agent/toolcall.py +++ b/app/agent/toolcall.py @@ -1,13 +1,12 @@ import json - -from typing import Any, List, Literal, Optional, Union +from typing import Any, List, Optional, Union from pydantic import Field from app.agent.react import ReActAgent from app.logger import logger from app.prompt.toolcall import NEXT_STEP_PROMPT, SYSTEM_PROMPT -from app.schema import AgentState, Message, ToolCall, TOOL_CHOICE_TYPE, ToolChoice +from app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice from app.tool import CreateChatCompletion, Terminate, ToolCollection @@ -26,7 +25,7 @@ class ToolCallAgent(ReActAgent): available_tools: ToolCollection = ToolCollection( CreateChatCompletion(), Terminate() ) - tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore + tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name]) tool_calls: List[ToolCall] = Field(default_factory=list) diff --git a/app/config.py b/app/config.py index 8fd8bd7..0a267d7 100644 --- a/app/config.py +++ b/app/config.py @@ -30,8 +30,10 @@ class ProxySettings(BaseModel): username: Optional[str] = Field(None, description="Proxy username") password: Optional[str] = Field(None, description="Proxy password") + class SearchSettings(BaseModel): - engine: str = Field(default='Google', description="Search engine the llm to use") + engine: str = Field(default="Google", description="Search engine the llm to use") + class BrowserSettings(BaseModel): headless: bool = Field(False, description="Whether to run browser in headless mode") @@ -180,7 +182,7 @@ class Config: @property def browser_config(self) -> Optional[BrowserSettings]: return self._config.browser_config - + @property def search_config(self) -> Optional[SearchSettings]: return self._config.search_config diff --git a/app/llm.py b/app/llm.py index 3314062..8c085ae 100644 --- a/app/llm.py +++ b/app/llm.py @@ -12,10 +12,18 @@ from tenacity import retry, stop_after_attempt, wait_random_exponential from app.config import LLMSettings, config from app.logger import logger # Assuming a logger is set up in your app -from app.schema import Message, TOOL_CHOICE_TYPE, ROLE_VALUES, TOOL_CHOICE_VALUES, ToolChoice +from app.schema import ( + ROLE_VALUES, + TOOL_CHOICE_TYPE, + TOOL_CHOICE_VALUES, + Message, + ToolChoice, +) + REASONING_MODELS = ["o1", "o3-mini"] + class LLM: _instances: Dict[str, "LLM"] = {} @@ -140,7 +148,7 @@ class LLM: } if self.model in REASONING_MODELS: - params["max_completion_tokens"] = self.max_tokens + params["max_completion_tokens"] = self.max_tokens else: params["max_tokens"] = self.max_tokens params["temperature"] = temperature or self.temperature @@ -191,7 +199,7 @@ class LLM: system_msgs: Optional[List[Union[dict, Message]]] = None, timeout: int = 300, tools: Optional[List[dict]] = None, - tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore + tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore temperature: Optional[float] = None, **kwargs, ): diff --git a/app/schema.py b/app/schema.py index 30ccf6c..fb89c3c 100644 --- a/app/schema.py +++ b/app/schema.py @@ -3,25 +3,32 @@ from typing import Any, List, Literal, Optional, Union from pydantic import BaseModel, Field + class Role(str, Enum): """Message role options""" + SYSTEM = "system" USER = "user" - ASSISTANT = "assistant" + ASSISTANT = "assistant" TOOL = "tool" + ROLE_VALUES = tuple(role.value for role in Role) ROLE_TYPE = Literal[ROLE_VALUES] # type: ignore + class ToolChoice(str, Enum): """Tool choice options""" + NONE = "none" AUTO = "auto" REQUIRED = "required" + TOOL_CHOICE_VALUES = tuple(choice.value for choice in ToolChoice) TOOL_CHOICE_TYPE = Literal[TOOL_CHOICE_VALUES] # type: ignore + class AgentState(str, Enum): """Agent execution states""" @@ -47,7 +54,7 @@ class ToolCall(BaseModel): class Message(BaseModel): """Represents a chat message in the conversation""" - role: ROLE_TYPE = Field(...) # type: ignore + role: ROLE_TYPE = Field(...) # type: ignore content: Optional[str] = Field(default=None) tool_calls: Optional[List[ToolCall]] = Field(default=None) name: Optional[str] = Field(default=None) @@ -104,7 +111,9 @@ class Message(BaseModel): @classmethod def tool_message(cls, content: str, name, tool_call_id: str) -> "Message": """Create a tool message""" - return cls(role=Role.TOOL, content=content, name=name, tool_call_id=tool_call_id) + return cls( + role=Role.TOOL, content=content, name=name, tool_call_id=tool_call_id + ) @classmethod def from_tool_calls( diff --git a/app/tool/python_execute.py b/app/tool/python_execute.py index e9c8140..08ceffa 100644 --- a/app/tool/python_execute.py +++ b/app/tool/python_execute.py @@ -1,6 +1,6 @@ +import multiprocessing import sys from io import StringIO -import multiprocessing from typing import Dict from app.tool.base import BaseTool @@ -53,17 +53,13 @@ class PythonExecute(BaseTool): """ with multiprocessing.Manager() as manager: - result = manager.dict({ - "observation": "", - "success": False - }) + result = manager.dict({"observation": "", "success": False}) if isinstance(__builtins__, dict): safe_globals = {"__builtins__": __builtins__} else: safe_globals = {"__builtins__": __builtins__.__dict__.copy()} proc = multiprocessing.Process( - target=self._run_code, - args=(code, result, safe_globals) + target=self._run_code, args=(code, result, safe_globals) ) proc.start() proc.join(timeout) diff --git a/app/tool/search/__init__.py b/app/tool/search/__init__.py index 509d16d..4f486ac 100644 --- a/app/tool/search/__init__.py +++ b/app/tool/search/__init__.py @@ -1,5 +1,5 @@ -from app.tool.search.base import WebSearchEngine from app.tool.search.baidu_search import BaiduSearchEngine +from app.tool.search.base import WebSearchEngine from app.tool.search.duckduckgo_search import DuckDuckGoSearchEngine from app.tool.search.google_search import GoogleSearchEngine @@ -9,4 +9,4 @@ __all__ = [ "BaiduSearchEngine", "DuckDuckGoSearchEngine", "GoogleSearchEngine", -] \ No newline at end of file +] diff --git a/app/tool/search/baidu_search.py b/app/tool/search/baidu_search.py index a398899..d415ce8 100644 --- a/app/tool/search/baidu_search.py +++ b/app/tool/search/baidu_search.py @@ -1,9 +1,9 @@ from baidusearch.baidusearch import search + from app.tool.search.base import WebSearchEngine class BaiduSearchEngine(WebSearchEngine): - - def perform_search(self, query, num_results = 10, *args, **kwargs): + def perform_search(self, query, num_results=10, *args, **kwargs): """Baidu search engine.""" return search(query, num_results=num_results) diff --git a/app/tool/search/base.py b/app/tool/search/base.py index 095c0b1..3132381 100644 --- a/app/tool/search/base.py +++ b/app/tool/search/base.py @@ -1,5 +1,7 @@ class WebSearchEngine(object): - def perform_search(self, query: str, num_results: int = 10, *args, **kwargs) -> list[dict]: + def perform_search( + self, query: str, num_results: int = 10, *args, **kwargs + ) -> list[dict]: """ Perform a web search and return a list of URLs. @@ -12,4 +14,4 @@ class WebSearchEngine(object): Returns: List: A list of dict matching the search query. """ - raise NotImplementedError \ No newline at end of file + raise NotImplementedError diff --git a/app/tool/search/duckduckgo_search.py b/app/tool/search/duckduckgo_search.py index 738ecf5..3dd5c52 100644 --- a/app/tool/search/duckduckgo_search.py +++ b/app/tool/search/duckduckgo_search.py @@ -1,9 +1,9 @@ from duckduckgo_search import DDGS + from app.tool.search.base import WebSearchEngine class DuckDuckGoSearchEngine(WebSearchEngine): - - async def perform_search(self, query, num_results = 10, *args, **kwargs): + async def perform_search(self, query, num_results=10, *args, **kwargs): """DuckDuckGo search engine.""" return DDGS.text(query, num_results=num_results) diff --git a/app/tool/search/google_search.py b/app/tool/search/google_search.py index 606f107..425106d 100644 --- a/app/tool/search/google_search.py +++ b/app/tool/search/google_search.py @@ -1,8 +1,9 @@ -from app.tool.search.base import WebSearchEngine from googlesearch import search +from app.tool.search.base import WebSearchEngine + + class GoogleSearchEngine(WebSearchEngine): - - def perform_search(self, query, num_results = 10, *args, **kwargs): + def perform_search(self, query, num_results=10, *args, **kwargs): """Google search engine.""" return search(query, num_results=num_results) diff --git a/app/tool/terminal.py b/app/tool/terminal.py index df5996e..86b401c 100644 --- a/app/tool/terminal.py +++ b/app/tool/terminal.py @@ -40,7 +40,7 @@ Note: You MUST append a `sleep 0.05` to the end of the command for commands that str: The output, and error of the command execution. """ # Split the command by & to handle multiple commands - commands = [cmd.strip() for cmd in command.split('&') if cmd.strip()] + commands = [cmd.strip() for cmd in command.split("&") if cmd.strip()] final_output = CLIResult(output="", error="") for cmd in commands: @@ -61,7 +61,7 @@ Note: You MUST append a `sleep 0.05` to the end of the command for commands that stdout, stderr = await self.process.communicate() result = CLIResult( output=stdout.decode().strip(), - error=stderr.decode().strip() + error=stderr.decode().strip(), ) except Exception as e: result = CLIResult(output="", error=str(e)) @@ -70,9 +70,13 @@ Note: You MUST append a `sleep 0.05` to the end of the command for commands that # Combine outputs if result.output: - final_output.output += (result.output + "\n") if final_output.output else result.output + final_output.output += ( + (result.output + "\n") if final_output.output else result.output + ) if result.error: - final_output.error += (result.error + "\n") if final_output.error else result.error + final_output.error += ( + (result.error + "\n") if final_output.error else result.error + ) # Remove trailing newlines final_output.output = final_output.output.rstrip() @@ -124,14 +128,10 @@ Note: You MUST append a `sleep 0.05` to the end of the command for commands that if os.path.isdir(new_path): self.current_path = new_path return CLIResult( - output=f"Changed directory to {self.current_path}", - error="" + output=f"Changed directory to {self.current_path}", error="" ) else: - return CLIResult( - output="", - error=f"No such directory: {new_path}" - ) + return CLIResult(output="", error=f"No such directory: {new_path}") except Exception as e: return CLIResult(output="", error=str(e)) @@ -152,7 +152,7 @@ Note: You MUST append a `sleep 0.05` to the end of the command for commands that parts = shlex.split(command) if any(cmd in dangerous_commands for cmd in parts): raise ValueError("Use of dangerous commands is restricted.") - except Exception as e: + except Exception: # If shlex.split fails, try basic string comparison if any(cmd in command for cmd in dangerous_commands): raise ValueError("Use of dangerous commands is restricted.") diff --git a/app/tool/web_search.py b/app/tool/web_search.py index c661f3b..db4ee85 100644 --- a/app/tool/web_search.py +++ b/app/tool/web_search.py @@ -1,9 +1,14 @@ import asyncio from typing import List -from app.tool.base import BaseTool from app.config import config -from app.tool.search import WebSearchEngine, BaiduSearchEngine, GoogleSearchEngine, DuckDuckGoSearchEngine +from app.tool.base import BaseTool +from app.tool.search import ( + BaiduSearchEngine, + DuckDuckGoSearchEngine, + GoogleSearchEngine, + WebSearchEngine, +) class WebSearch(BaseTool): @@ -48,7 +53,8 @@ The tool returns a list of URLs that match the search query. loop = asyncio.get_event_loop() search_engine = self.get_search_engine() links = await loop.run_in_executor( - None, lambda: list(search_engine.perform_search(query, num_results=num_results)) + None, + lambda: list(search_engine.perform_search(query, num_results=num_results)), ) return links diff --git a/config/config.example.toml b/config/config.example.toml index d6c193a..762f42c 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -1,10 +1,10 @@ # Global LLM configuration [llm] -model = "claude-3-5-sonnet" -base_url = "https://api.openai.com/v1" -api_key = "sk-..." -max_tokens = 4096 -temperature = 0.0 +model = "claude-3-7-sonnet" # The LLM model to use +base_url = "https://api.openai.com/v1" # API endpoint URL +api_key = "sk-..." # Your API key +max_tokens = 8192 # Maximum number of tokens in the response +temperature = 0.0 # Controls randomness # [llm] #AZURE OPENAI: # api_type= 'azure' @@ -17,9 +17,11 @@ temperature = 0.0 # Optional configuration for specific LLM models [llm.vision] -model = "claude-3-5-sonnet" -base_url = "https://api.openai.com/v1" -api_key = "sk-..." +model = "claude-3-7-sonnet" # The vision model to use +base_url = "https://api.openai.com/v1" # API endpoint URL for vision model +api_key = "sk-..." # Your API key for vision model +max_tokens = 8192 # Maximum number of tokens in the response +temperature = 0.0 # Controls randomness for vision model # Optional configuration for specific browser configuration # [browser] @@ -46,4 +48,4 @@ api_key = "sk-..." # Optional configuration, Search settings. # [search] # Search engine for agent to use. Default is "Google", can be set to "Baidu" or "DuckDuckGo". -#engine = "Google" \ No newline at end of file +#engine = "Google" From 7db0b2fbf0bc8a00fd98dd8b5b6e4aa669dd56b2 Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Fri, 14 Mar 2025 12:27:05 +0800 Subject: [PATCH 18/77] update readme --- README.md | 2 ++ README_ja.md | 2 ++ README_ko.md | 2 ++ README_zh.md | 2 ++ 4 files changed, 8 insertions(+) diff --git a/README.md b/README.md index ae93a47..4e85c29 100644 --- a/README.md +++ b/README.md @@ -127,6 +127,8 @@ We welcome any friendly suggestions and helpful contributions! Just create issue Or contact @mannaandpoem via 📧email: mannaandpoem@gmail.com +**Note**: Before submitting a pull request, please use the pre-commit tool to check your changes. Run `pre-commit run --all-files` to execute the checks. + ## Community Group Join our networking group on Feishu and share your experience with other developers! diff --git a/README_ja.md b/README_ja.md index 2dd84d5..3805a69 100644 --- a/README_ja.md +++ b/README_ja.md @@ -128,6 +128,8 @@ python run_flow.py または @mannaandpoem に📧メールでご連絡ください:mannaandpoem@gmail.com +**注意**: プルリクエストを送信する前に、pre-commitツールを使用して変更を確認してください。`pre-commit run --all-files`を実行してチェックを実行します。 + ## コミュニティグループ Feishuのネットワーキンググループに参加して、他の開発者と経験を共有しましょう! diff --git a/README_ko.md b/README_ko.md index 379363e..940e9b9 100644 --- a/README_ko.md +++ b/README_ko.md @@ -128,6 +128,8 @@ python run_flow.py 또는 📧 메일로 연락주세요. @mannaandpoem : mannaandpoem@gmail.com +**참고**: pull request를 제출하기 전에 pre-commit 도구를 사용하여 변경 사항을 확인하십시오. `pre-commit run --all-files`를 실행하여 검사를 실행합니다. + ## 커뮤니티 그룹 Feishu 네트워킹 그룹에 참여하여 다른 개발자들과 경험을 공유하세요! diff --git a/README_zh.md b/README_zh.md index ea7f904..7f18d1c 100644 --- a/README_zh.md +++ b/README_zh.md @@ -131,6 +131,8 @@ python run_flow.py 或通过 📧 邮件联系 @mannaandpoem:mannaandpoem@gmail.com +**注意**: 在提交 pull request 之前,请使用 pre-commit 工具检查您的更改。运行 `pre-commit run --all-files` 来执行检查。 + ## 交流群 加入我们的飞书交流群,与其他开发者分享经验! From 9b0b69a5e1e6bb6f9f9ceeaaf0cb53886af44dc3 Mon Sep 17 00:00:00 2001 From: zhengshuli Date: Thu, 13 Mar 2025 15:46:51 +0800 Subject: [PATCH 19/77] =?UTF-8?q?Use=20the=20max=5Finput=5Ftokens=20config?= =?UTF-8?q?uration=20to=20constrain=20the=20agent=E2=80=99s=20token=20usag?= =?UTF-8?q?e.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/agent/toolcall.py | 37 +++++++--- app/config.py | 2 + app/exceptions.py | 8 +++ app/llm.py | 135 ++++++++++++++++++++++++++++++++++++- config/config.example.toml | 1 + requirements.txt | 1 + 6 files changed, 173 insertions(+), 11 deletions(-) diff --git a/app/agent/toolcall.py b/app/agent/toolcall.py index ecf0bb4..7d6afa0 100644 --- a/app/agent/toolcall.py +++ b/app/agent/toolcall.py @@ -4,6 +4,7 @@ from typing import Any, List, Optional, Union from pydantic import Field from app.agent.react import ReActAgent +from app.exceptions import TokenLimitExceeded from app.logger import logger from app.prompt.toolcall import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice @@ -32,6 +33,7 @@ class ToolCallAgent(ReActAgent): max_steps: int = 30 max_observe: Optional[Union[int, bool]] = None + max_input_tokens: Optional[int] = None async def think(self) -> bool: """Process current state and decide next actions using tools""" @@ -39,15 +41,32 @@ class ToolCallAgent(ReActAgent): user_msg = Message.user_message(self.next_step_prompt) self.messages += [user_msg] - # Get response with tool options - response = await self.llm.ask_tool( - messages=self.messages, - system_msgs=[Message.system_message(self.system_prompt)] - if self.system_prompt - else None, - tools=self.available_tools.to_params(), - tool_choice=self.tool_choices, - ) + try: + # Get response with tool options + response = await self.llm.ask_tool( + messages=self.messages, + system_msgs=[Message.system_message(self.system_prompt)] + if self.system_prompt + else None, + tools=self.available_tools.to_params(), + tool_choice=self.tool_choices, + ) + except ValueError as e: + raise + except Exception as e: + # Check if this is a RetryError containing TokenLimitExceeded + if hasattr(e, "__cause__") and isinstance(e.__cause__, TokenLimitExceeded): + token_limit_error = e.__cause__ + logger.error(f"🚨 Token limit error (from RetryError): {token_limit_error}") + self.memory.add_message( + Message.assistant_message( + f"Maximum token limit reached, cannot continue execution: {str(token_limit_error)}" + ) + ) + self.state = AgentState.FINISHED + return False + raise + self.tool_calls = response.tool_calls # Log response info diff --git a/app/config.py b/app/config.py index 0a267d7..8f78151 100644 --- a/app/config.py +++ b/app/config.py @@ -20,6 +20,7 @@ class LLMSettings(BaseModel): base_url: str = Field(..., description="API base URL") api_key: str = Field(..., description="API key") max_tokens: int = Field(4096, description="Maximum number of tokens per request") + max_input_tokens: Optional[int] = Field(None, description="Maximum input tokens to use across all requests (None for unlimited)") temperature: float = Field(1.0, description="Sampling temperature") api_type: str = Field(..., description="AzureOpenai or Openai") api_version: str = Field(..., description="Azure Openai version if AzureOpenai") @@ -118,6 +119,7 @@ class Config: "base_url": base_llm.get("base_url"), "api_key": base_llm.get("api_key"), "max_tokens": base_llm.get("max_tokens", 4096), + "max_input_tokens": base_llm.get("max_input_tokens"), "temperature": base_llm.get("temperature", 1.0), "api_type": base_llm.get("api_type", ""), "api_version": base_llm.get("api_version", ""), diff --git a/app/exceptions.py b/app/exceptions.py index 57a0148..42195de 100644 --- a/app/exceptions.py +++ b/app/exceptions.py @@ -3,3 +3,11 @@ class ToolError(Exception): def __init__(self, message): self.message = message + +class OpenManusError(Exception): + """Base exception for all OpenManus errors""" + pass + +class TokenLimitExceeded(OpenManusError): + """Exception raised when the token limit is exceeded""" + pass diff --git a/app/llm.py b/app/llm.py index 8c085ae..817bbfe 100644 --- a/app/llm.py +++ b/app/llm.py @@ -8,9 +8,11 @@ from openai import ( OpenAIError, RateLimitError, ) -from tenacity import retry, stop_after_attempt, wait_random_exponential +import tiktoken +from tenacity import retry, stop_after_attempt, wait_random_exponential, retry_if_exception_type from app.config import LLMSettings, config +from app.exceptions import TokenLimitExceeded from app.logger import logger # Assuming a logger is set up in your app from app.schema import ( ROLE_VALUES, @@ -49,6 +51,18 @@ class LLM: self.api_key = llm_config.api_key self.api_version = llm_config.api_version self.base_url = llm_config.base_url + + # Add token counting related attributes + self.total_input_tokens = 0 + self.max_input_tokens = llm_config.max_input_tokens if hasattr(llm_config, "max_input_tokens") else None + + # Initialize tokenizer + try: + self.tokenizer = tiktoken.encoding_for_model(self.model) + except KeyError: + # If the model is not in tiktoken's presets, use cl100k_base as default + self.tokenizer = tiktoken.get_encoding("cl100k_base") + if self.api_type == "azure": self.client = AsyncAzureOpenAI( base_url=self.base_url, @@ -58,6 +72,70 @@ class LLM: else: self.client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url) + def count_tokens(self, text: str) -> int: + """Calculate the number of tokens in a text""" + if not text: + return 0 + return len(self.tokenizer.encode(text)) + + def count_message_tokens(self, messages: List[dict]) -> int: + """Calculate the number of tokens in a message list""" + token_count = 0 + for message in messages: + # Base token count for each message (according to OpenAI's calculation method) + token_count += 4 # Base token count for each message + + # Calculate tokens for the role + if "role" in message: + token_count += self.count_tokens(message["role"]) + + # Calculate tokens for the content + if "content" in message and message["content"]: + token_count += self.count_tokens(message["content"]) + + # Calculate tokens for tool calls + if "tool_calls" in message and message["tool_calls"]: + for tool_call in message["tool_calls"]: + if "function" in tool_call: + # Function name + if "name" in tool_call["function"]: + token_count += self.count_tokens(tool_call["function"]["name"]) + # Function arguments + if "arguments" in tool_call["function"]: + token_count += self.count_tokens(tool_call["function"]["arguments"]) + + # Calculate tokens for tool responses + if "name" in message and message["name"]: + token_count += self.count_tokens(message["name"]) + + if "tool_call_id" in message and message["tool_call_id"]: + token_count += self.count_tokens(message["tool_call_id"]) + + # Add extra tokens for message format + token_count += 2 # Extra tokens for message format + + return token_count + + def update_token_count(self, input_tokens: int) -> None: + """Update token counts""" + # Only track tokens if max_input_tokens is set + self.total_input_tokens += input_tokens + logger.info(f"Token usage: Input={input_tokens}, Cumulative Input={self.total_input_tokens}") + + def check_token_limit(self, input_tokens: int) -> bool: + """Check if token limits are exceeded""" + if self.max_input_tokens is not None: + return (self.total_input_tokens + input_tokens) <= self.max_input_tokens + # If max_input_tokens is not set, always return True + return True + + def get_limit_error_message(self, input_tokens: int) -> str: + """Generate error message for token limit exceeded""" + if self.max_input_tokens is not None and (self.total_input_tokens + input_tokens) > self.max_input_tokens: + return f"Request may exceed input token limit (Current: {self.total_input_tokens}, Needed: {input_tokens}, Max: {self.max_input_tokens})" + + return "Token limit exceeded" + @staticmethod def format_messages(messages: List[Union[dict, Message]]) -> List[dict]: """ @@ -109,6 +187,7 @@ class LLM: @retry( wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6), + retry=retry_if_exception_type((OpenAIError, Exception, ValueError)), # Don't retry TokenLimitExceeded ) async def ask( self, @@ -130,6 +209,7 @@ class LLM: str: The generated response Raises: + TokenLimitExceeded: If token limits are exceeded ValueError: If messages are invalid or response is empty OpenAIError: If API call fails after retries Exception: For unexpected errors @@ -141,6 +221,15 @@ class LLM: messages = system_msgs + self.format_messages(messages) else: messages = self.format_messages(messages) + + # Calculate input token count + input_tokens = self.count_message_tokens(messages) + + # Check if token limits are exceeded + if not self.check_token_limit(input_tokens): + error_message = self.get_limit_error_message(input_tokens) + # Raise a special exception that won't be retried + raise TokenLimitExceeded(error_message) params = { "model": self.model, @@ -161,9 +250,15 @@ class LLM: if not response.choices or not response.choices[0].message.content: raise ValueError("Empty or invalid response from LLM") + + # Update token counts + self.update_token_count(response.usage.prompt_tokens) + return response.choices[0].message.content - # Streaming request + # Streaming request, For streaming, update estimated token count before making the request + self.update_token_count(input_tokens) + params["stream"] = True response = await self.client.chat.completions.create(**params) @@ -177,13 +272,23 @@ class LLM: full_response = "".join(collected_messages).strip() if not full_response: raise ValueError("Empty response from streaming LLM") + return full_response + except TokenLimitExceeded: + # Re-raise token limit errors without logging + raise except ValueError as ve: logger.error(f"Validation error: {ve}") raise except OpenAIError as oe: logger.error(f"OpenAI API error: {oe}") + if isinstance(oe, AuthenticationError): + logger.error("Authentication failed. Check API key.") + elif isinstance(oe, RateLimitError): + logger.error("Rate limit exceeded. Consider increasing retry attempts.") + elif isinstance(oe, APIError): + logger.error(f"API error: {oe}") raise except Exception as e: logger.error(f"Unexpected error in ask: {e}") @@ -192,6 +297,7 @@ class LLM: @retry( wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6), + retry=retry_if_exception_type((OpenAIError, Exception, ValueError)), # Don't retry TokenLimitExceeded ) async def ask_tool( self, @@ -219,6 +325,7 @@ class LLM: ChatCompletionMessage: The model's response Raises: + TokenLimitExceeded: If token limits are exceeded ValueError: If tools, tool_choice, or messages are invalid OpenAIError: If API call fails after retries Exception: For unexpected errors @@ -235,6 +342,23 @@ class LLM: else: messages = self.format_messages(messages) + # Calculate input token count + input_tokens = self.count_message_tokens(messages) + + # If there are tools, calculate token count for tool descriptions + tools_tokens = 0 + if tools: + for tool in tools: + tools_tokens += self.count_tokens(str(tool)) + + input_tokens += tools_tokens + + # Check if token limits are exceeded + if not self.check_token_limit(input_tokens): + error_message = self.get_limit_error_message(input_tokens) + # Raise a special exception that won't be retried + raise TokenLimitExceeded(error_message) + # Validate tools if provided if tools: for tool in tools: @@ -263,13 +387,20 @@ class LLM: if not response.choices or not response.choices[0].message: print(response) raise ValueError("Invalid or empty response from LLM") + + # Update token counts + self.update_token_count(response.usage.prompt_tokens) return response.choices[0].message + except TokenLimitExceeded: + # Re-raise token limit errors without logging + raise except ValueError as ve: logger.error(f"Validation error in ask_tool: {ve}") raise except OpenAIError as oe: + logger.error(f"OpenAI API error: {oe}") if isinstance(oe, AuthenticationError): logger.error("Authentication failed. Check API key.") elif isinstance(oe, RateLimitError): diff --git a/config/config.example.toml b/config/config.example.toml index 762f42c..b720088 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -5,6 +5,7 @@ base_url = "https://api.openai.com/v1" # API endpoint URL api_key = "sk-..." # Your API key max_tokens = 8192 # Maximum number of tokens in the response temperature = 0.0 # Controls randomness +#max_input_tokens = 100000 # Maximum input tokens to use across all requests (set to null or delete this line for unlimited) # [llm] #AZURE OPENAI: # api_type= 'azure' diff --git a/requirements.txt b/requirements.txt index 60ad38e..534eca9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,7 @@ loguru~=0.7.3 numpy datasets~=3.2.0 fastapi~=0.115.11 +tiktoken~=0.9.0 html2text~=2024.2.26 gymnasium~=1.0.0 From 7a5de556150a806e52a142cd5c555eb244457c92 Mon Sep 17 00:00:00 2001 From: the0807 Date: Fri, 14 Mar 2025 14:02:32 +0900 Subject: [PATCH 20/77] Add config support for Ollama --- config/config.example.toml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/config/config.example.toml b/config/config.example.toml index 762f42c..e9a9620 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -15,6 +15,14 @@ temperature = 0.0 # Controls randomness # temperature = 0.0 # api_version="AZURE API VERSION" #"2024-08-01-preview" +# [llm] #OLLAMA: +# api_type = 'ollama' +# model = "llama3.2" +# base_url = "http://localhost:11434/v1" +# api_key = "ollama" +# max_tokens = 4096 +# temperature = 0.0 + # Optional configuration for specific LLM models [llm.vision] model = "claude-3-7-sonnet" # The vision model to use @@ -23,6 +31,14 @@ api_key = "sk-..." # Your API key for vision model max_tokens = 8192 # Maximum number of tokens in the response temperature = 0.0 # Controls randomness for vision model +# [llm.vision] #OLLAMA VISION: +# api_type = 'ollama' +# model = "llama3.2-vision" +# base_url = "http://localhost:11434/v1" +# api_key = "ollama" +# max_tokens = 4096 +# temperature = 0.0 + # Optional configuration for specific browser configuration # [browser] # Whether to run browser in headless mode (default: false) From c0c03c0befe0db43920b9ec396ae5d5046de304a Mon Sep 17 00:00:00 2001 From: xiangjinyu <1376193973@qq.com> Date: Fri, 14 Mar 2025 13:25:43 +0800 Subject: [PATCH 21/77] fix _handle_special_tool bug --- app/agent/manus.py | 7 +++++-- app/tool/browser_use_tool.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/app/agent/manus.py b/app/agent/manus.py index 6c2c2e5..4638c37 100644 --- a/app/agent/manus.py +++ b/app/agent/manus.py @@ -39,5 +39,8 @@ class Manus(ToolCallAgent): ) async def _handle_special_tool(self, name: str, result: Any, **kwargs): - await self.available_tools.get_tool(BrowserUseTool().name).cleanup() - await super()._handle_special_tool(name, result, **kwargs) + if not self._is_special_tool(name): + return + else: + await self.available_tools.get_tool(BrowserUseTool().name).cleanup() + await super()._handle_special_tool(name, result, **kwargs) diff --git a/app/tool/browser_use_tool.py b/app/tool/browser_use_tool.py index 57ad03c..ad0cfa1 100644 --- a/app/tool/browser_use_tool.py +++ b/app/tool/browser_use_tool.py @@ -106,7 +106,7 @@ class BrowserUseTool(BaseTool): async def _ensure_browser_initialized(self) -> BrowserContext: """Ensure browser and context are initialized.""" if self.browser is None: - browser_config_kwargs = {"headless": False} + browser_config_kwargs = {"headless": False, "disable_security": True} if config.browser_config: from browser_use.browser.browser import ProxySettings From 350b0038ee60561f617ceef679d73cc13ff71f8d Mon Sep 17 00:00:00 2001 From: a-holm Date: Fri, 14 Mar 2025 21:01:13 +0100 Subject: [PATCH 22/77] fix(llm): improve message handling to support LLMs without content/tool_calls This commit improves the message handling in the LLM class to gracefully handle messages without 'content' or 'tool_calls' fields. Previously, the system would raise a ValueError when encountering such messages, causing crashes when working with models like Google's Gemini that sometimes return messages with different structures. Key changes: - Reordered message processing to check for Message objects first - Changed validation approach to silently skip malformed messages instead of crashing - Removed the strict ValueError when content/tool_calls are missing This change maintains compatibility with correctly formatted messages while improving robustness when working with various LLM providers. --- app/llm.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/app/llm.py b/app/llm.py index 8c085ae..d997a69 100644 --- a/app/llm.py +++ b/app/llm.py @@ -84,14 +84,15 @@ class LLM: formatted_messages = [] for message in messages: + if isinstance(message, Message): + message = message.to_dict() if isinstance(message, dict): - # If message is already a dict, ensure it has required fields + # If message is a dict, ensure it has required fields if "role" not in message: raise ValueError("Message dict must contain 'role' field") - formatted_messages.append(message) - elif isinstance(message, Message): - # If message is a Message object, convert it to dict - formatted_messages.append(message.to_dict()) + if "content" in message or "tool_calls" in message: + formatted_messages.append(message) + # else: do not include the message else: raise TypeError(f"Unsupported message type: {type(message)}") @@ -99,10 +100,6 @@ class LLM: for msg in formatted_messages: if msg["role"] not in ROLE_VALUES: raise ValueError(f"Invalid role: {msg['role']}") - if "content" not in msg and "tool_calls" not in msg: - raise ValueError( - "Message must contain either 'content' or 'tool_calls'" - ) return formatted_messages From b17c9d31a9e94f2a31eadf5ec47a551b3775658c Mon Sep 17 00:00:00 2001 From: Matt Eng Date: Fri, 14 Mar 2025 20:39:23 -0700 Subject: [PATCH 23/77] Fix temperature using default if 0 --- app/llm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/llm.py b/app/llm.py index 8c085ae..98343e6 100644 --- a/app/llm.py +++ b/app/llm.py @@ -151,7 +151,7 @@ class LLM: params["max_completion_tokens"] = self.max_tokens else: params["max_tokens"] = self.max_tokens - params["temperature"] = temperature or self.temperature + params["temperature"] = temperature if temperature is not None else self.temperature if not stream: # Non-streaming request @@ -255,7 +255,7 @@ class LLM: params["max_completion_tokens"] = self.max_tokens else: params["max_tokens"] = self.max_tokens - params["temperature"] = temperature or self.temperature + params["temperature"] = temperature if temperature is not None else self.temperature response = await self.client.chat.completions.create(**params) From 49ccd72815487a67ac72cadea7919ab1e9b4ec1a Mon Sep 17 00:00:00 2001 From: Matt Eng Date: Fri, 14 Mar 2025 21:41:43 -0700 Subject: [PATCH 24/77] Reformat --- app/llm.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/app/llm.py b/app/llm.py index 98343e6..39871f6 100644 --- a/app/llm.py +++ b/app/llm.py @@ -151,7 +151,9 @@ class LLM: params["max_completion_tokens"] = self.max_tokens else: params["max_tokens"] = self.max_tokens - params["temperature"] = temperature if temperature is not None else self.temperature + params["temperature"] = ( + temperature if temperature is not None else self.temperature + ) if not stream: # Non-streaming request @@ -255,7 +257,9 @@ class LLM: params["max_completion_tokens"] = self.max_tokens else: params["max_tokens"] = self.max_tokens - params["temperature"] = temperature if temperature is not None else self.temperature + params["temperature"] = ( + temperature if temperature is not None else self.temperature + ) response = await self.client.chat.completions.create(**params) From b6f8f825e0b7a53b5826b2b2baf5cf0fcccb9cc6 Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Sat, 15 Mar 2025 12:58:18 +0800 Subject: [PATCH 25/77] chore: ensure TOML configuration files are formatted well --- .vscode/extensions.json | 8 ++++++++ .vscode/settings.json | 17 +++++++++++++++++ config/.gitignore | 2 ++ config/config.example.toml | 20 ++++++++++---------- 4 files changed, 37 insertions(+), 10 deletions(-) create mode 100644 .vscode/extensions.json create mode 100644 .vscode/settings.json create mode 100644 config/.gitignore diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 0000000..e518685 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,8 @@ +{ + "recommendations": [ + "tamasfe.even-better-toml", + "ms-python.black-formatter", + "ms-python.isort" + ], + "unwantedRecommendations": [] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..84c0e9d --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,17 @@ +{ + "[python]": { + "editor.defaultFormatter": "ms-python.black-formatter", + "editor.codeActionsOnSave": { + "source.organizeImports": "always" + } + }, + "[toml]": { + "editor.defaultFormatter": "tamasfe.even-better-toml", + }, + "pre-commit-helper.runOnSave": "none", + "pre-commit-helper.config": ".pre-commit-config.yaml", + "evenBetterToml.schema.enabled": true, + "evenBetterToml.schema.associations": { + "^.+config[/\\\\].+\\.toml$": "../config/schema.config.json" + } +} \ No newline at end of file diff --git a/config/.gitignore b/config/.gitignore new file mode 100644 index 0000000..eaff182 --- /dev/null +++ b/config/.gitignore @@ -0,0 +1,2 @@ +# prevent the local config file from being uploaded to the remote repository +config.toml diff --git a/config/config.example.toml b/config/config.example.toml index e9a9620..aae395b 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -1,10 +1,10 @@ # Global LLM configuration [llm] -model = "claude-3-7-sonnet" # The LLM model to use -base_url = "https://api.openai.com/v1" # API endpoint URL -api_key = "sk-..." # Your API key -max_tokens = 8192 # Maximum number of tokens in the response -temperature = 0.0 # Controls randomness +model = "claude-3-7-sonnet" # The LLM model to use +base_url = "https://api.openai.com/v1" # API endpoint URL +api_key = "sk-..." # Your API key +max_tokens = 8192 # Maximum number of tokens in the response +temperature = 0.0 # Controls randomness # [llm] #AZURE OPENAI: # api_type= 'azure' @@ -25,11 +25,11 @@ temperature = 0.0 # Controls randomness # Optional configuration for specific LLM models [llm.vision] -model = "claude-3-7-sonnet" # The vision model to use -base_url = "https://api.openai.com/v1" # API endpoint URL for vision model -api_key = "sk-..." # Your API key for vision model -max_tokens = 8192 # Maximum number of tokens in the response -temperature = 0.0 # Controls randomness for vision model +model = "claude-3-7-sonnet" # The vision model to use +base_url = "https://api.openai.com/v1" # API endpoint URL for vision model +api_key = "sk-..." # Your API key for vision model +max_tokens = 8192 # Maximum number of tokens in the response +temperature = 0.0 # Controls randomness for vision model # [llm.vision] #OLLAMA VISION: # api_type = 'ollama' From d54026d7a08edfc8e82b00f757b8e3b207b4b7ec Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Sat, 15 Mar 2025 12:58:25 +0800 Subject: [PATCH 26/77] chore: organize .gitignore --- .gitignore | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 653fd83..ff8e80d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,14 @@ +### Project-specific ### +# Logs +logs/ + +# Data +data/ + +# Workspace +workspace/ + +### Python ### # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -170,11 +181,16 @@ cython_debug/ # PyPI configuration file .pypirc -# Logs -logs/ +### Visual Studio Code ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +!.vscode/*.code-snippets -# Data -data/ +# Local History for Visual Studio Code +.history/ -# Workspace -workspace/ +# Built Visual Studio Code Extensions +*.vsix From ca90880140aefd7f8f039ff3a912de2dc403fec3 Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Sat, 15 Mar 2025 13:04:21 +0800 Subject: [PATCH 27/77] fix: EOF for files --- .vscode/extensions.json | 2 +- .vscode/settings.json | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.vscode/extensions.json b/.vscode/extensions.json index e518685..f2c6cd0 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -5,4 +5,4 @@ "ms-python.isort" ], "unwantedRecommendations": [] -} \ No newline at end of file +} diff --git a/.vscode/settings.json b/.vscode/settings.json index 84c0e9d..f92c6a6 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -13,5 +13,6 @@ "evenBetterToml.schema.enabled": true, "evenBetterToml.schema.associations": { "^.+config[/\\\\].+\\.toml$": "../config/schema.config.json" - } -} \ No newline at end of file + }, + "files.insertFinalNewline": true +} From 86399b97d66b61c22fdc6325b266bf7ae8d729d9 Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Sat, 15 Mar 2025 14:40:01 +0800 Subject: [PATCH 28/77] add dependabot.yml --- .github/dependabot.yml | 58 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..4f94bce --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,58 @@ +version: 2 +updates: + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 + groups: + # Group critical packages that might need careful review + core-dependencies: + patterns: + - "pydantic*" + - "openai" + - "fastapi" + - "tiktoken" + browsergym-related: + patterns: + - "browsergym*" + - "browser-use" + - "playwright" + search-tools: + patterns: + - "googlesearch-python" + - "baidusearch" + - "duckduckgo_search" + pre-commit: + patterns: + - "pre-commit" + security-all: + applies-to: "security-updates" + patterns: + - "*" + version-all: + applies-to: "version-updates" + patterns: + - "*" + exclude-patterns: + - "pydantic*" + - "openai" + - "fastapi" + - "tiktoken" + - "browsergym*" + - "browser-use" + - "playwright" + - "googlesearch-python" + - "baidusearch" + - "duckduckgo_search" + - "pre-commit" + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 + groups: + actions: + patterns: + - "*" From 65a3898592115ff50b73188e385d9e61ad320272 Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Sat, 15 Mar 2025 14:43:07 +0800 Subject: [PATCH 29/77] format code and remove max_input_tokens for ToolCallAgent --- app/agent/toolcall.py | 7 ++-- app/config.py | 5 ++- app/exceptions.py | 4 +- app/llm.py | 90 +++++++++++++++++++++++++++---------------- 4 files changed, 66 insertions(+), 40 deletions(-) diff --git a/app/agent/toolcall.py b/app/agent/toolcall.py index 7d6afa0..29e5af4 100644 --- a/app/agent/toolcall.py +++ b/app/agent/toolcall.py @@ -33,7 +33,6 @@ class ToolCallAgent(ReActAgent): max_steps: int = 30 max_observe: Optional[Union[int, bool]] = None - max_input_tokens: Optional[int] = None async def think(self) -> bool: """Process current state and decide next actions using tools""" @@ -51,13 +50,15 @@ class ToolCallAgent(ReActAgent): tools=self.available_tools.to_params(), tool_choice=self.tool_choices, ) - except ValueError as e: + except ValueError: raise except Exception as e: # Check if this is a RetryError containing TokenLimitExceeded if hasattr(e, "__cause__") and isinstance(e.__cause__, TokenLimitExceeded): token_limit_error = e.__cause__ - logger.error(f"🚨 Token limit error (from RetryError): {token_limit_error}") + logger.error( + f"🚨 Token limit error (from RetryError): {token_limit_error}" + ) self.memory.add_message( Message.assistant_message( f"Maximum token limit reached, cannot continue execution: {str(token_limit_error)}" diff --git a/app/config.py b/app/config.py index 8f78151..51356a0 100644 --- a/app/config.py +++ b/app/config.py @@ -20,7 +20,10 @@ class LLMSettings(BaseModel): base_url: str = Field(..., description="API base URL") api_key: str = Field(..., description="API key") max_tokens: int = Field(4096, description="Maximum number of tokens per request") - max_input_tokens: Optional[int] = Field(None, description="Maximum input tokens to use across all requests (None for unlimited)") + max_input_tokens: Optional[int] = Field( + None, + description="Maximum input tokens to use across all requests (None for unlimited)", + ) temperature: float = Field(1.0, description="Sampling temperature") api_type: str = Field(..., description="AzureOpenai or Openai") api_version: str = Field(..., description="Azure Openai version if AzureOpenai") diff --git a/app/exceptions.py b/app/exceptions.py index 42195de..fc90087 100644 --- a/app/exceptions.py +++ b/app/exceptions.py @@ -4,10 +4,10 @@ class ToolError(Exception): def __init__(self, message): self.message = message + class OpenManusError(Exception): """Base exception for all OpenManus errors""" - pass + class TokenLimitExceeded(OpenManusError): """Exception raised when the token limit is exceeded""" - pass diff --git a/app/llm.py b/app/llm.py index 6777e5e..1cc640b 100644 --- a/app/llm.py +++ b/app/llm.py @@ -1,5 +1,6 @@ from typing import Dict, List, Optional, Union +import tiktoken from openai import ( APIError, AsyncAzureOpenAI, @@ -8,8 +9,12 @@ from openai import ( OpenAIError, RateLimitError, ) -import tiktoken -from tenacity import retry, stop_after_attempt, wait_random_exponential, retry_if_exception_type +from tenacity import ( + retry, + retry_if_exception_type, + stop_after_attempt, + wait_random_exponential, +) from app.config import LLMSettings, config from app.exceptions import TokenLimitExceeded @@ -51,18 +56,22 @@ class LLM: self.api_key = llm_config.api_key self.api_version = llm_config.api_version self.base_url = llm_config.base_url - + # Add token counting related attributes self.total_input_tokens = 0 - self.max_input_tokens = llm_config.max_input_tokens if hasattr(llm_config, "max_input_tokens") else None - + self.max_input_tokens = ( + llm_config.max_input_tokens + if hasattr(llm_config, "max_input_tokens") + else None + ) + # Initialize tokenizer try: self.tokenizer = tiktoken.encoding_for_model(self.model) except KeyError: # If the model is not in tiktoken's presets, use cl100k_base as default self.tokenizer = tiktoken.get_encoding("cl100k_base") - + if self.api_type == "azure": self.client = AsyncAzureOpenAI( base_url=self.base_url, @@ -77,51 +86,57 @@ class LLM: if not text: return 0 return len(self.tokenizer.encode(text)) - + def count_message_tokens(self, messages: List[dict]) -> int: """Calculate the number of tokens in a message list""" token_count = 0 for message in messages: # Base token count for each message (according to OpenAI's calculation method) token_count += 4 # Base token count for each message - + # Calculate tokens for the role if "role" in message: token_count += self.count_tokens(message["role"]) - + # Calculate tokens for the content if "content" in message and message["content"]: token_count += self.count_tokens(message["content"]) - + # Calculate tokens for tool calls if "tool_calls" in message and message["tool_calls"]: for tool_call in message["tool_calls"]: if "function" in tool_call: # Function name if "name" in tool_call["function"]: - token_count += self.count_tokens(tool_call["function"]["name"]) + token_count += self.count_tokens( + tool_call["function"]["name"] + ) # Function arguments if "arguments" in tool_call["function"]: - token_count += self.count_tokens(tool_call["function"]["arguments"]) - + token_count += self.count_tokens( + tool_call["function"]["arguments"] + ) + # Calculate tokens for tool responses if "name" in message and message["name"]: token_count += self.count_tokens(message["name"]) - + if "tool_call_id" in message and message["tool_call_id"]: token_count += self.count_tokens(message["tool_call_id"]) - + # Add extra tokens for message format token_count += 2 # Extra tokens for message format - + return token_count - + def update_token_count(self, input_tokens: int) -> None: """Update token counts""" # Only track tokens if max_input_tokens is set self.total_input_tokens += input_tokens - logger.info(f"Token usage: Input={input_tokens}, Cumulative Input={self.total_input_tokens}") - + logger.info( + f"Token usage: Input={input_tokens}, Cumulative Input={self.total_input_tokens}" + ) + def check_token_limit(self, input_tokens: int) -> bool: """Check if token limits are exceeded""" if self.max_input_tokens is not None: @@ -131,9 +146,12 @@ class LLM: def get_limit_error_message(self, input_tokens: int) -> str: """Generate error message for token limit exceeded""" - if self.max_input_tokens is not None and (self.total_input_tokens + input_tokens) > self.max_input_tokens: + if ( + self.max_input_tokens is not None + and (self.total_input_tokens + input_tokens) > self.max_input_tokens + ): return f"Request may exceed input token limit (Current: {self.total_input_tokens}, Needed: {input_tokens}, Max: {self.max_input_tokens})" - + return "Token limit exceeded" @staticmethod @@ -187,7 +205,9 @@ class LLM: @retry( wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6), - retry=retry_if_exception_type((OpenAIError, Exception, ValueError)), # Don't retry TokenLimitExceeded + retry=retry_if_exception_type( + (OpenAIError, Exception, ValueError) + ), # Don't retry TokenLimitExceeded ) async def ask( self, @@ -221,10 +241,10 @@ class LLM: messages = system_msgs + self.format_messages(messages) else: messages = self.format_messages(messages) - + # Calculate input token count input_tokens = self.count_message_tokens(messages) - + # Check if token limits are exceeded if not self.check_token_limit(input_tokens): error_message = self.get_limit_error_message(input_tokens) @@ -252,15 +272,15 @@ class LLM: if not response.choices or not response.choices[0].message.content: raise ValueError("Empty or invalid response from LLM") - + # Update token counts self.update_token_count(response.usage.prompt_tokens) - + return response.choices[0].message.content # Streaming request, For streaming, update estimated token count before making the request self.update_token_count(input_tokens) - + params["stream"] = True response = await self.client.chat.completions.create(**params) @@ -274,7 +294,7 @@ class LLM: full_response = "".join(collected_messages).strip() if not full_response: raise ValueError("Empty response from streaming LLM") - + return full_response except TokenLimitExceeded: @@ -299,7 +319,9 @@ class LLM: @retry( wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6), - retry=retry_if_exception_type((OpenAIError, Exception, ValueError)), # Don't retry TokenLimitExceeded + retry=retry_if_exception_type( + (OpenAIError, Exception, ValueError) + ), # Don't retry TokenLimitExceeded ) async def ask_tool( self, @@ -346,21 +368,21 @@ class LLM: # Calculate input token count input_tokens = self.count_message_tokens(messages) - + # If there are tools, calculate token count for tool descriptions tools_tokens = 0 if tools: for tool in tools: tools_tokens += self.count_tokens(str(tool)) - + input_tokens += tools_tokens - + # Check if token limits are exceeded if not self.check_token_limit(input_tokens): error_message = self.get_limit_error_message(input_tokens) # Raise a special exception that won't be retried raise TokenLimitExceeded(error_message) - + # Validate tools if provided if tools: for tool in tools: @@ -391,7 +413,7 @@ class LLM: if not response.choices or not response.choices[0].message: print(response) raise ValueError("Invalid or empty response from LLM") - + # Update token counts self.update_token_count(response.usage.prompt_tokens) From 60268f16961eac615519f2c6387994bfc5cb8980 Mon Sep 17 00:00:00 2001 From: a-holm Date: Sat, 15 Mar 2025 09:48:52 +0100 Subject: [PATCH 30/77] reformat with precommit --- app/llm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/llm.py b/app/llm.py index d997a69..24a50e0 100644 --- a/app/llm.py +++ b/app/llm.py @@ -90,7 +90,7 @@ class LLM: # If message is a dict, ensure it has required fields if "role" not in message: raise ValueError("Message dict must contain 'role' field") - if "content" in message or "tool_calls" in message: + if "content" in message or "tool_calls" in message: formatted_messages.append(message) # else: do not include the message else: From 49c2db7a3234109f621009aa8a9820a0ad826fda Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Sat, 15 Mar 2025 17:49:52 +0800 Subject: [PATCH 31/77] update logo --- README.md | 4 ++++ README_ja.md | 5 ++++- README_ko.md | 5 ++++- README_zh.md | 5 +++-- assets/logo.jpg | Bin 0 -> 65677 bytes 5 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 assets/logo.jpg diff --git a/README.md b/README.md index 4e85c29..debb41f 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,7 @@ +

+ +

+ English | [中文](README_zh.md) | [한국어](README_ko.md) | [日本語](README_ja.md) [![GitHub stars](https://img.shields.io/github/stars/mannaandpoem/OpenManus?style=social)](https://github.com/mannaandpoem/OpenManus/stargazers) diff --git a/README_ja.md b/README_ja.md index 3805a69..6f2420d 100644 --- a/README_ja.md +++ b/README_ja.md @@ -1,5 +1,8 @@ -[English](README.md) | [中文](README_zh.md) | [한국어](README_ko.md) | 日本語 +

+ +

+[English](README.md) | [中文](README_zh.md) | [한국어](README_ko.md) | 日本語 [![GitHub stars](https://img.shields.io/github/stars/mannaandpoem/OpenManus?style=social)](https://github.com/mannaandpoem/OpenManus/stargazers)   diff --git a/README_ko.md b/README_ko.md index 940e9b9..a07f7cb 100644 --- a/README_ko.md +++ b/README_ko.md @@ -1,5 +1,8 @@ -[English](README.md) | [中文](README_zh.md) | 한국어 | [日本語](README_ja.md) +

+ +

+[English](README.md) | [中文](README_zh.md) | 한국어 | [日本語](README_ja.md) [![GitHub stars](https://img.shields.io/github/stars/mannaandpoem/OpenManus?style=social)](https://github.com/mannaandpoem/OpenManus/stargazers)   diff --git a/README_zh.md b/README_zh.md index 7f18d1c..ded9368 100644 --- a/README_zh.md +++ b/README_zh.md @@ -1,8 +1,9 @@ +

+ +

[English](README.md) | 中文 | [한국어](README_ko.md) | [日本語](README_ja.md) - - [![GitHub stars](https://img.shields.io/github/stars/mannaandpoem/OpenManus?style=social)](https://github.com/mannaandpoem/OpenManus/stargazers)   [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)   diff --git a/assets/logo.jpg b/assets/logo.jpg new file mode 100644 index 0000000000000000000000000000000000000000..634b8f6851a075182ad04a04c5f4afec33a90e83 GIT binary patch literal 65677 zcmb@uc|4SD*f&0sCZr*vY*R@pq_S0*Nh(U3sK`1MVu-mbGG!QL8B0v%jyrCq4V5if zM#xwOZL*UQv)wJWTvugWcXQ3a0N(FK?AcyLqSnP zVUdTyV=xLz|1vh_Uq2LIVTKetm-|yt){&6?& z!NW&I#U+oQJbhkWQ(MQWf6>s^&g(l2iUni%)?>~Od zOBbNOfBmyA1&rdqo9tf}_Mg^;8bM*n(xr+^vHz?~VMz@7rKqt~Y5k66s~p|2XD(=N z*m-Ta*1lW!t6G(fEIokLXD{}tXm7L<8H4{U?SHN8zqYXJ|D%=t`@;TXU3|<6MFn*8 z6g4nJ41#RN+*^{Mg88q$5Oo;BIyP`>j>!O=D@Ui>CL9KvR-1H?Gr4Q#GcUE{mmoUL z#eD6|GBWMl*x^QykwHIG(WNv#%oZ+Ul=##9wS(qA8|}&ieTU||vHNO}cXfkQ_`v-H zh7AL7n}h6Q#JL@d7(P`r*UB2>3T<2FmeDOCjwC{-<*IW>&$S2%SDBrhmz!=e`!iF~ z8~nmVHqazub50t(HSgt65jSl<)6@@0?qWt=`PSsLGQVsQbIvA{t{`7Hrm$BDqlNjG z7lqu4@T<-#HU4B?{JO!ipK;i|{qt&6bXv;&Bc%SC%&q;tv#uV^FMcn(EWK>=n_KiH z`x~n^pS_}=>=CL6|0IUvAqOu>VJ;}+UA34Ams!CphKM2570~Tz=YH4~?6*j5=;4vm z^0wvEA_N_}7ilJ4&C0#ahQ8B%&c>arqNzP0Srs!8#x)ItC9x&-{pAV7XEr??7> znA7OqEiu07D7466Yd5c)T4`{@!xf~qHtT~GH!If~Yr7m57T2GRI(g3fv$tzf(m@3z z`nPWLrYh3X=Z^NPHuiR_)?92iJ8!uA>@Ah1iwCC+43-C7qf`DO#Uz}@gvu; zXYR;$#P{B8Uc^BDix><+o$krr`WOtLEwt!j(<*1rW$vu<^C5?MX>+ze;aRqCB)9Ix zJNN!?`_C{U12Gn`Rvm6MR%-fOp5Cla%a~#4anH+Q3oJXTP8|iO2-@QpEtmOlGXbf>A7@%zOwRfHGEp zW=mLOMxA(|w~u!Dn=OZ-2JdVJq}igmBY2v= zZ;BX(Tg|_^>Hp6!*dL9H80=BHTDHUs)<~3Q4Vs;A7F5vHL6bICmXFzb$IypB7t5-0 z1+8x0ZxQnpc@M=%h-=A>D{|BMF4$im6L8!mt3!k3LBUk5I z3tYvytvHPxJI*TGuLm{BL-3Z+j5SK0W1_A!SW{!wHPf9nH~jXz65@N4|KlWf!Hyt0 zT)Y;f*-6%H-T*O4KyE_3Aw>2EOK!mKPaC_6Ax0abI}jP2dFj;4(1$sJ)RM`%T%Da8 z)w?gOW4*n;_FPnE0+A;}RPM@RXqAYHlPrpUMP3T;k5+Y1?VQ-0?F@N|GWYEb0=!y# zloRjMt$`QUcWsrwBWTnhuqlg>QSZWgY$qqg9C^%&$ao%W4sG zJ&kS#xrz&PCC4CDV1TI02pzljr6c45@U}HCnVBuT8z~CawPU3rTbp|Am-X=je5R!Q zmDOUr`s8`v_Uf9`_!)L!U~sXw_hdsvK?S>nBCV6!XlvVmKHjHBefH-_>n>leo7C4H zuIqKyw~+#!x13gOyzt;b0>=FRg&1%1oaPk_Se@Q-;H__$R|8q?5%C|Xc9!BP`b^xtPxB(x6?pnOn6~?z%P#ibrADDBXer3m7_B%I_dav^Z0Q9^1Pn;y ziF(kYgP4%(Gt~R69}cI2Mkn&ftDsU5k{^31wAm66 zWLg`Tz38<+qJZ3uH-@T!>ej0{cOKHZ#N_rxjPlg4xm#RCkhQ6AxzoF|OBq4w5q~AT*mw+k?-=$SzHX%Tfcz0Lki{<*Vg@9G z-{LLfd+=)zT}sZyD_0`IJBhe?Ic{Gb_IIu8-+R29s+y`AI;VX2pc`nO<=IfezD zQDDkow7POdB&4T#DSG0fU^CEpG=2lpCkO!-y4W)rix>-cU=hFxo1+Z21f(qfKSZPUER7gum3 z9riAMS9Jb#7zWAsuX3jUk7%m+cR31vRWK1O_V*LE($|C7E>{Dk zZ6+`3A=5|UoEY8T7l!MSK7I_nYRkhxUI80M`~!0YNBe7H%L6^sDLxVNa#)J_zLX6U|ofN)pQWH7O{4+=BI{nzH{NiU>%S9i5Xt|mZ#jon*>HR>ZH_sxi;0piLWzXVdQSe5V7|+9x zDUyc2HLt4uTva-GmEB5Bba#48i)|yA0}QF#Z0*Zj9!@tb@fav?=Ehso?gMY2CGYv| zM=y!<{eu)CO=tk<@`6l2mbj<2Qr%Vm*Fqa9t2y$Vf1otZKDy_*z1`37PztyAq*=}Wik+xGA|LkG6bnp$wI z?p;}32^l%ofZmOD;5zKDavy)bfImh)^>J+C!^Bv7Ovn|ARg4YiGG|BG@Pf^VdUd-) z6VTwgqR+0&EVAOpo0gk6a4UNHIPq@?+vVsH7Ct1_a$5vi(D4&MMRo~%9!eDVBzGOF z3U?)Gz`7tOeV(NOZvzR;oomhJe!g^Wiy7Ab{`Sm;3oF^>r>1@>jX9Qv{U#+rvM@7jK znn`N|HPr-ED=!#ud*}=OXun8qDXpNqZ^3`9{#kOYoNLfToQ9AQrfj>Xk!a-@nF zDP$~+6I-MjS`UBIH6L!!NvRo~H?2Q^I_DIz#yZpKmA{=+|N89Ny!X{HXC8gn$l=t1 zyRcA3{J5YF$776*N$a3bvQw}}ef=UPi6y~iSv$m#`ix)ZAS0~-YMe90kfsQ^Wq}IpEtpaaD#>4!Z;&nJ*h(nR3Im36l3@9MT64;aISDurRxxd=h%R?1L6Db za*Llb*`eavm(Lqge8WpALDy*$Cc*3QUoN3bn?ga+r-=kCc#d$xNsuw8-Q2fBpf<;! z^MXoc8)7Nsoi@+NM0&zPyO^Uf3GyOxH1rV>Ul0|oFv+Bw0=kthK7fdSct zBPt6ZR!p2#eTpKm6|w~0KUiaNjl1mUxH*Fo3=|hxr^6?pj#4MXIsp!Le*D3$Ah4CV z!pb#srCpEO-;*VNbeCpuh2f?Fs|5FKZvEw}15+pH;b75URlM}oM#;wYH=R~(c6p$zoB4FJ z;;((^HhK_+xAD~>vjQMnItMk%eCU<~@tbJa<5j(kb2#R3-i_@=J<0Rq$4N{vjz`v` zk-Wc?J%Apb#Y&%U;04`C9HA>7{&9%uF>>b50N=0(3!aKb2IG?qVGW!{U7Tf^XNa$-ZV>p zgCPzP5w)B0idqZ41X0!iR3yA#WqCa;Vo0VFk5P@iQTe6GHurcX%0g$RxyS}TmD|v-(g@m>TdZ& zMZf56vZ$Qi(^2=@yg2QhbjQ;pg~v6FHax@jc;SKHIIu=MpRz6+QBeYd3peq*sUec- z`a{!ye2bv#SvS4y5u|%L4P17-hw}xzVC@n_*#ul7)+6B;M%!P)d7W z1^nnN)B}riRz>4f4V>Dq<`ji$2dx)V?N)7Bk(3px*!x)V?|jtVYHM(rdloUU#(-EI zIuKDB@YTn*lbUpO5!2=CugjmBnnYA7xjquI-^T+X)C)T&_jvJ8lZNY#ztxNNH8(o? z@+G(4RWiVxy2t59ZUjfN5(o#DRbiepeN_-Q|r-*W4N*O|FTB+e?sSf z{oQWvT3Ypf0H0)kQo>&KQJ&FWxB{4XA?}XFu1u`X`?)`X|>E8g~bBhfDB^N&zGiR`w6@}hZx2+3?r3v zX}^pCoh4me>?AN?5p(|Ip4Yb0uH+^~aBG+7#g5mj4iwSOs_r+t?z9UFx}i3H1I)5+ z5QUG}0Ew*}M$ylsrZZ66k2T#9*9lCymTHF_P*FX_&qgX?x! zDIgN`%Los6#r`!&6rxxSc*G`UKGZXVy^ zsJVT<^XZ$83v}{$Tc>;a(h6tq@ik%jDpia^s= zhI+hKsAs%If75DU!G)XAcC{qM{t`oCq5jc_#`LdhxMjsvu*pAjk7nzMa**r%x9*%l z>&lfj1tqTW3W+i=baO^fqY(Lr{aud$7HDu)Lf#lEzr6i<7wjN&fNc8PdFCb6>#AJ^ zY4LXXoz3=~A#=%*k74n+A$YCYIIp^XAfh}^)$TN|g_|L$(dlCIv3`V25C~R^M>;Dt ztolDPBH>d|2AZIx<~{G;wZvQV789#|pOaw-f?7by+l42UM z4cPz&JkokG09!u@^O*%4^`Gb1$I@aJ$$VWD^on9fG4S)_g4AY^eVg-_F`|-xqi$LkRzCFi4%2?OF>AM?({D-kp`#z4?#0e>W*(wht_h>{G$UxT z%-pJnpYH?x5Kh~qz7>-AED||0FZx$&?dcSC^~pAq$J| zAXmrCToe0Fk97{yabTkG8(TD|1%)^cdyEI>QL(2eBfr>5_R3Xe1H&=%};?Z0b$XoCM8)%cXU(P`gq)?>7q!=xw3JN#06g>bM@+L_zdaIW#G zFO>2v7MFyc>RFf+Xjml5VB-r2q4thqfo9lMhYohC8C~hVU#JUz<-_8sU!07ZqL%|d zuEnE8`=8v#4(vPyAKYQ-?NzcNnQpY7`9&xaNPEv+d;`;pe2QE zcY%9fr;-ENZcRY+N8LX%Hw|^(k8Thp5C=z)7?28UaS-JmfGqtJy7iFKVAA1R zTDcmwhf1`WVdv=%Pa%Qm+lveLCusTmp6`4s)x)oz41WY=#qC(m>6>>Wx%bV$Z%tJx zpD^;!|0{wGqpI!o53oBUkqsOoL7TlY?>C0)mB@3Nx6n*TFK^t*8#w_zx$g=z9xfao zKILQjc-~^IPi@(oPxtCIj#RySwf?3e9EhkxOMZyC9qi;qjB3=JrOZj}#Um-vmbAY) z{ajw%8DKnMZx{Y)M~cv_F#bXQn$}%v*U#Lf-%voFk%1^W-|HHhyd^PeaYMdM$yxN-AwuPCL!p20>hN}%nR1!N=wm4{tgE}S8O@Nb_7@{ zO?o2LxP$F3Y+m8|Ve7Fmzf;9eo>GQ2MklrTb+s{_v+w8!cu!vi0T(P+815_4~5Uc%Oi52In1WA?-s zg@ivo72st2@>Aw)lh@@K&+kvXPrs-u*qA~K^6+#iI_T`-v_(td?@1Xt;Z`Gt@Gh}d z-|LHnF|%8?n^t;aknhX|&t(H&4gy%#Hy>|pZ(UNVU}Ye{X4zV0a81S5>kB=W|0teN zfQ85hG{gsY&18Zq1E)aT%yDp9>@_nj9@UO5ux%l0+0X+a*Ntx~hWT#Aj|Cstny_WK z<(;fJ>Io3f%g6{FJY<*=CA-x^%oISIbzt>}h>9BuR-)X1k$7u(`H+1uKs|8#v!iSu zyjm8*V<)K9$^vGM-b!Wi=lBe zB@G->tFb!^aMG>M=8{+1`#&Bx7yHsQI~Tlsf<rVzAx2){%9 z8C7Wgxn~%V@knx6EsLKot{k-wubliI7Ox5!2~{`S2ZE)YQi?HegX|y>MxGr0MPQnD z6ny&A&>U>87FjHFee6Rjs!Yu)e$bsjylX0e2sojAyrr*!om3He5e)znOBq@La0C#^ zmRJuU{Zk{R!d>>bC!rBEv$msVBwkcXi zATMdc0bF5iT~!QP=ovsIwL-=`-BH3GbCzIXEiWPAi3ut$u0V$QpRY-J zQW!NO?c6$uBSNypsKk)+leC&{3?-I%k0t_d#U_ccQ<2~UA9er-_u$eV0|nSPT#<9jF*2H}39-F~qA3xx5?AwW6XG%Ffk5Vw zfwFt4iqm=j^E^c{8M+JFiAGu(7^qat?%?XvyhN4lRK-cl>E`WVPiAd5u{~ZJn79@B z>NYv-*KnyrZRYQrC}%j#?z;h>r)cKp!@hS#X)VN50c6ny>!ZH0bVOc`7V#D_D74MQ z$xP{Cs94NRU&NqUWKshm_wg87*2@(!mJ22ctDqrZp)1}j`zUCVLASl1S6v(f? z?0#f(^^s<`Jl}!uMlox_cjm$x(=8zV0P1)DC_Hv_(@PQ3x4Y4Ng)zF|Js7caJlBVu zgc#Ep9eG7fV+~{7C0}!Qe0+M#Xm7vSk>^^c6_6A%K+8>k1T4U-R+_Kh7(_ORA0jH* zo9;}xv%hNeD_4UaX>>t9GV1{udtkFSe{oH6Hdg6zOe+28>N5 zH#THBDV2Yj4G}d?{?=)ngWyL8PV_(Q`IzPGX6t<(pBH>2Yh4%y7NVnB(A~8&(8qZ| zFYbLL>6+Z&o+YL4@qe`KO(x+<5qt(x6mP!a{QA~kx6LgM51(o?zK#-MyXu!s?$5Bl znE!jZ-su9BdWGsx^ih4>XWYsBWp^#_ki*N4rmuZ&OMy4#`n}zHRu?-qJa@VAYU;$T zz4+H0cy$b-c0hiMd)}0|n(imh5cV>H(3EC2vepaumKkD&uP)(bp7WlVqQQ>B%sr}U z1l=$%QB!K|T*~lpo?1=9_*A^&aAjnqMGN-t7sv>siZg2@Q$Za_rv=YviKwZ~+VH{N zwO_7}dDT&@h}xm%(U(F3C3poW2lLA!(_ZgscSy7$TkNpp+X5*z-*?1)HJ|c`nax|9`=~u*@vCTz*}S-p{7IcB z7eH0dhG)PWb6(YMi}fN#6FRL3KeQf?l~$rYe*+o9J((IIY7i-Cc~N=;qRV3ghHG8_n5pSDI@oXZVOzA?@e_m8O)33dq$*C~w{M?M2|4M9f=;Z% z7cF9H(WHnYMdVCIumFP$b8AD|^oWW0mCeTVy?q2-h}=%t3{sgr9VFqw}wxBIEmr z&;Rl%njdK+rg5w5szAmuVA`5DyobbNr4V+bW&4*pyy08OcfrvJr#8eMT*fT#*4_K7 z=_V9lSDLnxRf6`oOa}g2`Qkr1o&G!h0@%M#FOzBa1 zHr@L%w9nVU$it$D9steVdwrpZwjf?vDLt7kaPNclp2l%o@VN5A2+uaV2oo|`ahJjA zBA*$+5c3W|rr?!kbyu0AMZ1Lo{@P9G7cTiC>~FYcm?-i$;pG>slJK6|ujJ-ZF&qvu98)dp$h9yFnpzVT!@QCq^e9*Dv;H zz_*b2D_@TqV_&%pLdi?3UfGYXy&vl0Qt}G>{Q|l{m7!d|0PtgsE}7}DGe`G5K0N5mNpTc!q|kLT|e+SG_r0rD{EQZT3Ek;t{zqN&CYob=-W*e@aeq*NDOg zXgeNtG!Rj_0+<|w^ki-{+gY$xnm@-xO};1ZG8=RjnWUX_89`PNnTSc#z&w8DZ&U}M z)xKPbj2r_-(#k~XJ>-51$@RYS1mbHQM1_Nf<45ub3}H66#%->3?uLyA4efgAvQ?pK^BaZ}oMe%AhiFt+h+rc--faz|~)%lDO{0&;ty z(QpfK_r^gwMN-OL4n_LTY0)}mZq-$QbHdNH+iI!m;~yvsSBK7g1!JC83ShH~= zy8@!9vaLJjSUOB!0bDLs4Y1BCp+?p<{AK}%<$+x$2^8=96Zk|=!_^cj&AvI z0&3wZAY&H#&Hda0|+>c7GbblN8j zdZY4pMU;!N&H(coUGG813fKFPuj7H|m_>{S5nBHR)|&%N5oKRtx1E^HtGBlVOQmBV zAv@e8ov_uPa%|!&etC|x_4siB-|};C78xn}!22=&A)Kuq#2IKhDLJLuwNV$#{6bdS zuK-svTE%EnjGLxp5i@%d5J=ZR!IgDSgsf3*17APrh4r5~-zh%1vkH=MyUk-x-e-Sg zeiW#}8V1dG>?iG)_3)Z4K{j*le8&Xy^D}G$>e}U8P7Ip@7tejDDf*63COA>%)}{C}|t7DmtU^(jWHOA`u0 zOY6`d=(IWM4Jh_L1uZ#r%xTaX+qhW5$}uP%R|^HYfeTq1hOHIQCa6*4@uJS6KoM0L z3hxM5U(Aurc-wh|M@-&VbCraqIy6$m!?D3JR`4fiW8eVA8RgL za0g0m*joZ~Jhs|5kIB2m&uXFAIFY7^iu-W$(>jtWg}=HZJ$X-Dcj<$A41(%cfda^Z z?Zt^OZRc`WGq*w>w|@3eUI8;qpS;jcmlZw|s4^CUZ-CKX6 z7bux*V4+g|{jgt`OIEVWLVX?)wKw&n^}9H%XVvo&*rCkn31v@K{>suo>zk-IYnPiG z22HZylf|KVCv=%<_GDC~_NMqmbMh9hR&d26jj$bj@76DVhnl|}8?v^0CHWDOOCPbE z%Q;eQjbYqFThK~mn;}yv7L5!Hq7YRwo?{bg3J9G&BJL~+xH{00*L+w1G8f3D@8`(T z9-z>-J*mjfK}i~J%o?XhllTt7&KIn*ck&}S&+8mRs1$?8*dIM8>Q_&`M!VaTy5$+4 zV~$ys;`y%b8a4aeEf+EETj0oNnF?kPHl@uCPa2P{$3;GUTEx4Rz&K6>Vx#|hEz^ka z`gQe=%mHn>coJ|}<iBUF-K5&&U$RwHezOcPuwn*tGhj?9wZQlZzmAjoe&j^5F0i+GpxMPY}>w! z4E&ghw6OY5BdVh{r&FNUzS&cMC-We`{c-EuUi{QsbSWbf9gi8U8pP8tMBb~70CI=k z24j`7-~j>=_8tg1t?>~h){j`q8!h+0dVlLh&@tRyu5ebsOzdO>Xo8bL*l07o13Cca z3w3zbw-U3%qAl=z`exa_)meMjGI^4)Bbh4aU!2~!&13Ce;`;?;`4w<&yf&jX0NoG6 zt+AC=9WgR@*b3OVU@XZrs)I_=jH#{S#g9ihrvkIah2V`_A7Aw(UHj|wZFQsHaPtPJ z$NkA@y+l0Cg^WGCp-YmU8}oB|aM-ZbhuSzzX*^_i<`J46CQ@u2bJ_zy`#r^+d$da} zg$7N(D^eKZamEquu=&HLdZ6x>c+V880Vq*WCNd|t5a`Wq$P91 z;?~noG(>)0GHb6-z|`Gb7+bPqV64l1Eae<|cBi^s0t0O>1CElQE$kUou2gb{^ac9F z;5n_S+#LS4`w{o(2cRnwHESz;8eRoH86DJcgJ1|2W~dE6EaC8HMk}mX>d+;x5qL|5 zv)YW$(Z=Z5qHyi2zhS=`{F^i$AA_O^SNNFtoC&WQ6XwHb<;C`AtZaX^2b@A1-$|kq zHA93gbZ=1U(IGQFDUnXRa3xsf)a10)Dk!nT=hNpqBI?Ta3lzhWQs>80S8WQOBrsHo zdMM)&jFpnXJ3kjO)q?I)h?c9Pc(}iLix2Dy)k?0ECsmji4bcyY=J52bz^_+Q z#J8IgC|auSr*mL%+u4)nFfdv)xL%9e>@5EDBEo?s=!w4G6GXCl|u2PYr5@66d)&AL_DhX)u1T`M_L1N zXLZ2qUpzoEuP1$CLrovuKv=LuwfpefHiDk)(g&KN$fCV_)v(Ts2$Ox+zZsYwuBofd zJ_%}9N5@r$bw8o8$wRfOLflW38ze?Egvo`vs9DazI#rMiTxh?#tr~3{JJY)#F&Z=` zaaak33d3h=8fqa&QAXOe6bsEGRb6OJlb1)|Ui+=e1m2(k%G_h;i*=PMT z(X2I)q!Td*m1a0G=1O;d_ep6KTd0Tc$>fc!NKpO+jqD9Y2CHkJ>+8`jeOA3y( z<1(68*45V|T_82_G;~2?p))!WR<~m=-L+D28ZjJz1B<2zKI5k{cOvinI*P;iO}2aS z(=Yx-V+}d-_z0dx@gF~@rwv~zs!o8nY4gQQf8g3rYq6oj?o@YTeI7DSs zX4G6;Hc3`QiydKj<{t7`1xk5lUO#yaVm;#aOkCFPxdK#7^&EsahWE`2-!?Wz=(WbL z8!#iWRi^L+6BueIHaA-J4!X*<8qYA=X4Y$k7x#4WZ z=^O@*P71cUC1xz8De@5GzUHkEwY{P9PV8TTN!_{%!cx4RAavxi^jC4dPTJ>zT3=J& zdJ7Q>tZ4E3x&dsGC_yi+3?RxsgubON)ETk7h>n#^%S;n|CZ>-kR(v-)E5I;fie~r%$e^ zTw|x};vtSo`=Ff}a`)IZqZAAhz!eT|KAVlGQe`$xx15c}Ae=NU4;$qfYD~QrV3+V# zb^E5>D>S>6qqcT#DY{W`d)GSr01h@pr@M(P(?g3+J=20;PwB3E@$YqDT~x#0@(?1T z8_z&VFj87@5RJBBXtW(euM-5yb5S4Jg)a2QWFeI;FObC`BRPcl9@FdUAZE&VVk}~N2q>x8ux%JY$UGe=|CGUzrb3yZ5ih|)ROq_09z>Q=FW$oNr~Yh z;MLt40eT4ody!?{Le(|DcWcETsrx=txE%);L1a*eCsxF;4V3p0tZAZc)(##&1vGXiLIedF)|=0lm7AB(O7ecV4_UkI`#j*N3^)u7pI6lOFblDuNhfjX^oY$&*~%x6 zdV~d+9Q!K<&iDF4?Ot^&)^Y1SQ7-B^^)JG;y^)kHf7q-O|B78L=$PXNN{+syiUcWS zB^Xx*2|9EJND)dMAFoEOx8kn+`Z~tC$qU&GUgi+;MdamQUvF+0X>u={)HP3cjb%_Z z6Brp>SgRCKSqoMpDpZ*lw1|-wLOl#?u5@TH&!nBm1OnbhqmG#*P>mXEECJXF_V#5} z``?y~`8mv`T1VB@=JSv(++p}o3{|c3aqm8bG5S%|s1*7>Ih%Xhf=T9>%&rxWF_JiF zzzexyC09lJ9@x|-!U9(_eg|#SuyQR?mwq19it%}$h0=C{Rw&k$)K6#*q{p z=S}F;*Y;Vlmxo`aW>ELqqc!glnI^3VQBjlm!CT~an@k=%i3*H(zkWy7K_%cS`TfKL zkUJ3Awu2E#cZYJzCj!>|9CIbBLY{o7|L$nld{Z?NFGhD$vibbOsj7GP@F@1RQn_C?a*KHCI~x;67CV&Zena$WHxlBaQHgsiqm^gN zc%DuxMNoNMwi2qdl0S-$)1O?#7{>3W#Uoqr!e!ZId&MLsvaG4@RvtbxuQ(sQ0oIkO zUI}lV-GKB(&T{^tMXmTvaJ;5hG?~6srDx{=D0}{u*~@?GiCnD=Z83vPm>NURwQU0)7e=Uzff> zH{TTD;Q?oAa&GhX-q{-RT6$y^Q1S37OqN#=HuEQK2F(I%qNdw!mLhz+ zp6V`Yiy?`$O>;I&z`|$`(+%l$=dQ=Uohk^;XoBfVND!6Yy4g0zfKDNa4|hYft?ggF&q1 z#HLGH8R4al!7;#()@(DkW%nQXLtl<<@O2$mnN1+ReEL5wE{WzzDP*-;?&?W`-EgxZ zg!ckF6RTyoPM?ybp9W4HS1s<;qh$}VX`^yOTwOINS~oelI3Z$0s0b%Dcb+r-6aPd#VJ9aYE^CT?%BJ1Rgj}u zSL-bP`)TfTmb^gDMV_$ItrXx^Mr+9;=6gPQ^zA>Y;j9LFJCE4;Q$fBBefu~3cXWHI zxbi;{R9$>p!mCk$MU2)*g7106L=j#%^ zY8RKZ2~zQDa|d|g<}Yf%uB3U!3cob*d9#~;L_4qQXb-m?DArSA>fXZPZV$?)1qnKl$FMN1~v znY1s`EU+`vuvWzQ&@OtJo6%&g#Pj?y>I;WM1&;-^K+*gAu}W=+pD$w0LLmpB0|Sf<;6t%+QX zueWoJHD2w=Jw*_UsQts)PEVqe-`rSA`!R@H(#C%7HN!3D(<9^$Y8@htnQie#&-$X* z6ayt~{vjvpb0*%u8R=S(=5hjzEqj&;n1r?9JM`LBKzY(n1u+}&BSTxvyI`#`DF}Y) zV5rvcV`gMP=MX^^c~_`?BG%NvR-}_it@P;O2}V_J`cn8Do_|1`b7#!(G{s=}PVtnF zPEtwz%R1nn0l)rQ#AFtsa|~v*?lNyyq3!z=#TfHKR5FBUTfP${6YHINNYa2c(YM4; zpSQm&wu{v=(GSh zNgi`@0@!IavmRs8v(S!B*&tSW3q1Kye#->-E2bg}m*V69H})-BES}x>CT@dKq}p(< zC?D-8Pcd_QQz z`7%>jm(wR`-{Z_%|KLO0a9&L6hMHP89br%Mqm+qG)l8c&9bUlO2hh2Jec)c~-;GEI zu_|WvpV4>9AHxoSsxTn)DeZ@t$usa+;7wSA>{O~nMGXrJU3NRFOb5y2Tow$j~)%*w=IWhX9mr*G=Kg*H_Djc>grT!CVRUI?UJ-W@yuZxMZ+ttoYW z@dD``oL9$55|NcC99`%8P5*k&MF+2t|6zaf6D`K6K9hK!ZSd9hujUtGRqvU(DDLPY z#$_;Sx_3iAY-evC2gWN0=D4}~?;%|`4qrnjO4Mw9F#jZCiE-%kZWtP)!J(F7Q`1oMa&gw?TYIsN|D$B{U(d8U7l>`Im)~KW zUDLk(p!{C*I=gj&RRsk7&d2Xwv<9tmS)KZZ`P=dpqZow25cd9srV`g*L+kIJyurZ4 z+cgFMzA7t9&1UB-mp4_;QD*ga2D!V=F30{vlN9W8Z&wpob4= z?s+^hV_*myrVbRFO?g@_WA9vY{ooHD?{beTZ;BE|ZnkyWTJIycvU-Iaiu~|cBDW@Hkb&N>g zf3WJVFce|xQ#t$od%;Xkt;7g%xKy}(FL8J(f!L{usLnn#P(@UY(8Hko+?d6d?Va{{ z@7?dj()ikdPu;sBZH>yq6Dp}EnnJa0uNe3oME|D+SJ?aVLNpaJVkFKeyau{vvEYc) znYtad71o(}Ui}-F8G%LWNzUP_L+|_6rUjyRs-~8qm&Y>@m3d(67wLJ_;%u}999vX{ zbXTDj^jQm#=no9#-af63EQ6S0RaP1vH)LeCh-suMLY(&R2Zggaw%+dUej5@PR8%kw zifONp>iN(meGNUYdR7ls&N`r|%e?sR44;KVaXL9bA2ib_`>cj zcS%ANsvsYHVa%OTMP=>Kwf5z{QZ8r2U|;3$LBK*QE8RvTm%4rXO^dWo8k^IG?i8DE z3_gj5snbN^puuWuGGxw~5z+ff*=jVbG7QkSW}s#Bg*c5=C7zzOo&3s{ARzY&?$L!dObcPY2ixMcfvf`gX@7{Z=)= zNMcP|iRU%6vx%iO5!$>Gm^x~IH+nqgXu9XA@joyO=E$rXIM5}i`O-zoVANQ2m^;h7 z#5$eSR6`R~e|lTHbB)ePkWhNz$)Uir)%LfYiY6~~Yu|k|X2~738nbf^K3j$UKZzDp zCP7nFG<6Pv)%8T!8N2%%Cp6@!_9lG%Yg;8~C(W{XcN2Qpf?wX`SVE~oo0}_B4fcCA zxQ)Q;8*-9?a{{+phW&9Kj2qdLG1;$6{QM3pz)z7*f+}=@0q&WT;JO&NZZTt2BK@sD z5PAuQv}Ih5Y6bVXnY!0Jhb{rSV<*3#vxy<=$8Q*iVp;23mQ=7^vP$!e={OV{3?dP?aLv7ODf!)8Bmsx+rI zz7=+c|NoHo=J8PWZ{IjcDrtlWF%?n?Wi8vZ2}#pxjj1Hd#H7f`m=W1yuIh>_E+!<& zmOV?xu9AJrD6_TM&$Ae3%{g=ZKHblKKlkhY{&hd!Uw`D~599PXkI%8b-|xejeq~wB zCC*AO$Ni`K&6u3=srqxdEf>8NIwi#n)(+7l@Mn%1B3dCgW)8PCnIUkYd-cbjtn%8S zPzK9IPy+`gS)~}Ls*$TNBY~XQ>Lk{@*erwJhLdgF8~WVi{Uo{UEd9jj0o*%Mm)Js7 zfaBz0tDlzeF&oU2nk=Y8phg5E${D}FsyQ>&B{|>rNkiCasp%n?y_QTo;qRrdcW_{i zS=ngGO;J`2TqMYneUA?METHmX1gu zH@Jx08@$g@RhPAaOqm-mNC!hvN-$i-W{G!= zei}B-$$Q_Y-&x1+OUyPKesZ5Uwa8vSwRd`Q2fRKf^!ivvOfzJ+`&X8z@rDGg%4OD6 zxQVg}cmx`?8`M8IAHr=5i>b*qNMPj@&3 z99sw(^q5L3^LgyA=bz)Rb}sg@3|0sNTgy=z05Txk<6HhjJ_=nahe&Ju)UxvpVmM4$ zzgt%)wUHQrHXU$#Lz&cs%wzR6_b=%^3(QlPwkXZHkx%wMlo|n0CT0E`yjTu z1fd@sHf67#adG#m_Q@m1oNO2{^}E`PJLvcSratta>?K?x4zV8XdMCPvyY$Mjr)wPN zkJ;q<)6)10%@5tneEEaY!}on4dQ|C~HtEY~xE1bkSwc*vaRxFMDrhZN#A*{iy?g}) zz|qj1EthA%TQOw}3ck#}pA4c^Hl}3eb!B}a*$~n=*kRB!EH4OWL&tIB3ujjy8pYPq z+PaV0It~1Er_X{W!7=+G`?f#F4Hzr!Pi)NKubh9sTITy+koZ>zV7x)Z>K7&DXxh`y zREwT0{$!nx2FJcG3x4~{P^UIG@kxDoZERSg)DyZ|!C&7%-FFil?{vngdSGn?%8HRZ zx2(moiCS*^>Dis1x_k>MQlcmK9iEQC6^e7NWQa!xKROmCoc8t1d35=%lqhnC6dFb2 ze8b6_$1*uD88y$JU#*5$G9(+%T)AnVeQ5?)NoZYT$o^^zs*uEY?ir-m{JXPl7Voyl zZNbdSh}TuZA?{tMq6|MiR?~BAa=oZpyyZpwb%vomtkP0;TP^l(W*CWJd8T$OU3vvp z_qt{ILvIN&6X+-10)Fm`FtL=zsl(M?eeim)eul83WF+%h+>pQv(IN(J>a=kU%EOy}Zu9!7 zE!B+UEbANj)4WOV``nx}X9d`*u|$(M=Yj<{%u1k2&-&Z>s(pVK=6*=nw?*sx<-1YM z*j@svW>@6l&xYKwchYWT(#*XOb6Dt5`6EF={Vyuki&(6`%7#cU|5j@o)gs(cca>|u z1v{hvI~kM|Q)y5*9aVZhD)_-oM-_zJwDK=+@;>-sH2}i0J9)qNPH9oc-R~4N#5?58 z)<;2FL!?sb4kW3SCms8En5sOj9A@;|@Nun#m9TD98BmS2AW~V z3j{U;LCgR;;8MZ6o_JxBk07LAIgpH5?dR!D)FwN6XwW)?r|RQspH81PZOY@|{y^jJ1K zUwKP;^E5znYpSPRmsoeG_KimE-9~TTL8;7}2|HxE*z%O)*%|D!bQqC|O>(ZUz72SJe z1XU0+fwxryz08#tYY;7n{rgNa!~;!AXW(Ssa=dS~=&wfoGS}uG6jQ{Z zxU`#{MDwjvc;mx2OiH^L*l0?=$0GPKE`rem*a@p4Tar>`-|lM%SCUMI zj?Zj9*`bzx-|h1C!p*_C>0#G5o>jgiu{{dcwJcr_GS28+&`<6b{DMu#Zks+^!i;AM z0>vMY{X*+xRo_VCc7b#K^ICe7;g+ly%UGpGy8@5C{13LI3m)9%cq*3V_5Nv*_p^_l zV6aVtd;v*TK_r%>_Imv;wGz;?{TUOy^tm&+wXxK7?$c&pz^G&`wa?;cW01kpGj7JD zdZt^`9&+uGp{4iPcGMO=2eDJjb8m-)#DHa3rwwlTrOjshc5&V%D) zrucC|7ngaLs$f&#zj1CX9#w&_kT!Pn8vd#-I07(FCy`jLTh*Nbok#pkFFN;2*;q^MH3`d450^~QhU{Wn`PNC;;p0ksXVMKQpsOc4NGyE zAn@e4{>N4%=2k;=jbm#+MI24hDr>Y-jMh^nuaCCe=PpL;3EI8au~J? z#M_3P>ONG_TMz^*q_E%rBLQ$gF^lPK4t3sRO)DwXdV^Emdw*WSV>;9wwG-ZG-#ePD z`*()~?g1S!r4i`1;H-J#*UtA}m+K}1IHB=@N%2Yi9&ry=#k7b#{`o#mzu@ISZf$(G zf}?(WxkvHk<8BhTayDX>XP15k{ICQ0gur^j%2Id<-8`zx@|JqbXvV2F+L*`tTp?JV zk6$7Goy6)m;;8pn237^v>qEdyF0BQa?S;%yzhNhSUjplZQP=L|hIZ_Vrk&q*AX-&VgTN`)zr)fgZ;s(qhzD*hw@6cSpPnxmG*> zUUw%?2G5R5{>r8NBcb!B6mkt5#C20PVn^dt1JIR0&X;1GAM%iIEgNC73xn$5WNfs_ zzHe7uI|;H-miX8%`u<5->R&Shw{^0~J1t(v?7 za?G*li^2I_pkY%&fUb471 z;*-|~=xgF4Qyu8gV9rL5xdBnDEfxiLs$NN*epM4a;YyO5Hm`ThZte)bE-AVP_-QDx z`##qBdKXm1Snw$czxV$4AA-n{Ee|<<&aTzCaSu)?_j{OeqIYe!Tk+j*9hcKa&ld-2 z?r6r96A+`Zvq{V1HJZc5~l&!^yZQ?%J}I*R9_P27)Fl zs{ic0uh_INYB#Oro0r;eQd6`usx_t+XP4PZsA0Fx9d)?Z$!&KgJT?%vo`SVAE}?JXa+9iQAO%B1e~ zGuunF0loGN=+>U?Ga7agwkbnHT!T%{>7G13LkblEWru zqH?jRznA)}fhKh_u36Fa2OHf9g(Zg)Q;g5S=d;zIs|DP0q>0N%k9MlMIjmeU zdN(Av*x#;Quh1;YR8O(g;O=N)u^CO%XUuBgWQs$gz-kPr-kU1?W z0*PbGDH?}|i^f)&ql)GnCaOpjuYn;VUuJd!YI6iFi90}R>pms{!}(Ol;Nsi#4egR! zY3nJvJnSlRx^p9}!MJPMy~DID(QEr?NZFbI7CeAG{_4eW)q)C=qAh{&#c~6{Z0tR0 zEqL73kLDgoCbzr0Y+PM%y>S2yg$KV!_^UX;z0bF&&9q>pFE;wUGj`xd9_^TK^dw2i zW|HE6`6g`C9a_>qbEN(PW@)mjs`>?>T$R_$CDF$@JGw z>z!@|9&zB#cpl<=R+o>=FMcyg=3wS$+GtQjd~4sDzQ>4O8v{D-No-@UwKDb2M{XRn zFrTx`VFkEjW2=aMCLayDjzPO|m8hCVmBp3xKghQa%uunTbEK*)=gy-?{PhjzbZc+g>Z|VBBMFI02hTXLFk#&JX|Wfmrw`)%!FOJ}J~ogZ2)Ye7BUs6k%oML~f$66PtCD*>#PZl=>Ov*JtQ9=$7A5yZHsnS?bX&M+FF+Sc5QOA8>0e`HL0G-0V zx8s_lN)H4MNRCjOa+x`1Vz>@HSUXr&MOPvFa5c9;b}3eS3nz#`@Gj_)bU)GBPN?Tv zc_+Z#`kP}Q$@b;&V*JZ;;xMyXLL5Qk+ynDUdNhrbMW`-_2kBPXU1rrgS`{Hlyb;Ll zyn$R=HL(c6u#F-24G@+?k{2)-O>$GDW9OuC{g|l}&UR$Uw!hKbM=mxszSNqL;Ndwk zfA#7dfd7}W{?FtV_r_Pez8@MBsG!P4{M5ovZ&c9jjJ3ep*a{0bKh$D;ZJoli&?#Tb)R!mE-2(IW0z&-*DAt4|q3g*UPqN8z7Ec zjh)*s_nz~$S#xjWYssF8w*R*ccjXu*jCu%Iz`0L)`_I70y0-6C^EFggBE+l#%#l zs#f_3#d^o&YtDS~+0gGbaSWS7je%S49l)`aqpV1RJIGI{OyGg&z~+KjWP+EBTZQT& z4qR);P8%awn%`;DSKzwqiL#me8*Kth?Mv-5C*K?-%q~_uRzhT@Wj=A+ou2;UXfu|7 z3OoTw-lDT8uHgS7@fg)Wlw(CERbk?Pd3Yg*IoQ@W29$1W9Cpp@l4C=bqGR?x-;B2l zo$l}dB2sbiN>won-q`*JOPz+9qGAUZn=#+?#B!g(S%xmqh<#B2hz*#FY*zH-Op6Ur z@Pv|iW1vg@zRPhWZ{KNaI(aif~Yh$-RcE-FfHjyW8}%OE$zgM44ED8fVo75KzxH zT6$YA$2IK|cVy$pdHS07ashIsJdYS=v`cw!dy_t*^7tE28B8^2wC{=TGFp97Spu8D ztRbNrgum5fCEW=`X0^<05vE#E{_4Rtr)ZK@&fgN#>8d;;djz(Qy6ERg7B!C@zmeBmDS^q7_P)1K%4%Q$%( zrD^k8HF_&O>6dMBdC6!BzfAgkW4-lU`!Dj#p+)vacMo@@t#vBQx-ZW3{VhV>N|)!0+1?JggkUTVn>A7Z8QanODW|*lgB1h%bVwjn z+0qniV4_^}W({on=&7lUQ3epd9q{zD$$2*QVv=C%6VAt|&rtWml^#}|^PNNO&bQ8S z6sJ1QN^$U@eQ^@#kzOSwn&DQ{K%j}yv;xwx>wg(v3&E`P8)TP0y3|lRJ*hRiofs(7 znz8xn)BT}9_9Np2U8!WV5kb46^RDDPv$hOXY~z89D7}-q5y1-&0IZi>A$ntE@!VJ= z5yEj@QmdH>UKWU+Z@64ZZ)_k;bS{S?3)-x;++qE!wg5G|_{P1kR7d^nt>f2-7QIFi}@$~ z5X$e1(uHlADVH-`1GOMZ#;5ekmMfKSqp)+`|3AiqNr|Va?IZYv`f?h!dhI_m-Cc?tNlTtfjoo4o|!xuG*y zM7Qg1e#Sn16YK03QL-pYk*sO0bL{xzOr_c%Z>=oWCxtU7Oc&j;54 z_dwB;3XhL&A6s@S9+^$Oqb7l<3w}>6j{q|=XI?Z+@Y;fu3wMg&VliSR^cbx3YU$xu zKvbjn^6?v{GN?~ye`P=9>O1nViKqwewn?P@p7@gksKO+6$S}}5y7$YVhX;{-NkWAE zZ_W;`=ZqL*D!L~&o?_IDwU)7j4oDc!$%TVU0kMQGgIbJIwfrptdkY=|C?G8gy$G-8 zpMsW?42mD$v)Zq-aL1IrX4*o!vqAt7#a?ymADXsjtS{L3SZ2l-RgD!Ip)|x)xJwLz z#wHs*vX&QQ-bQ07@>E)9`lA0p7>@M=p?GC~u|&U>uRpJEvwxZjd@vk$CzME6^0PP^ zQxd+d+qnS}*RUuLS^~fBllewMH3EI~+2{sEK~}a|%9-PE!R%DTzUwX7I{^{HV(3uo z#!V*Ck%v^az9W9#UA(zIcc08pfQ{X>@BkohqN@NJB1qHh4+r5DabMcjvr&hqX$oKv zynl^VVB-9!i^ef6=}$?(BV4@&{xiWNmUMuM3>6-vgvW^2v_N(pUcN2ZaZvKJ2-pRa z#LG#BSn5DVgx7oXm{8Tb$HuaO#rW(VHT02ge$jx?i;-lZSLd{_Ia`@oDS?8XbPR>Y zp|i@+jb{Wes~J4yM}_X<*K7$8hE%EjY9E4uC@Au-np+P64lmZZGNYFm=)u!+_3Yru zezf>CaRyq5*01hyfBR*~Jso84{@^jaNOH)H15j9e2zE7c=I9W`l1r2It0%-;nR7*I zy>dAF{wlfkopQ!@164(`ms43?PD3U$9ua!#mgGTMqU*LeZP$Ub5`n)(qc{GZZ9%8l z?+*(M721R+#lRb`q^>9KWpz-RFtB0DVx4&FN4Mke?siHkAs}vzQk&PNMUffF>233INld=r1c-2q`)G^{^=0W?MJ6gn+M)nH@_@ zSGKVlpy)+J6tG*YMu!fD=4}ePsHi>gm-gWuGQ9XZHpbur{M7{bex)@3S7#2ux=~i= z0-t%32Qe{XL&QyZ)B|11gpd9yjfR=AWhojUekzissce%~fCrh8B1&&-5EhSnl6^y2 zZs#Rv);KUeM+{^-K?12q>)d%YkZ~UI6>dST#|(53J!r>d?yC=@Gojv?4onj5LbiuS z81G7@_l;{WJnXE$#B(&ZeJ99~uXxmbg|SIi&XU#OfX)EZlB4z$AZI2P%&iVCmBrA? z`|g+)lq2`x`ACEPrAwkiEtj$vxL;dDxKrc)JevA>?S9+mM5Fw^I3T}$-PDN^;GXF> zsQ}B%EYU&G_XZz%oBDQ5HJQhb@jf$F_b*CAN%{FKEpTf+oWNH%Hx+t{>AlIdOBtO}`E^bgZnG6VreehXlaiz81|DNAd5D|dM!CBZ%0A2>(^-Vpx>3)QTwM! zwwTT3_ObWvKOJ44dZ%dTx+l?c5)S_o$FaS?eRza^!GHMhXw#GjXMX!|pZ(W5?(Yx- zA3hv&=-lw`xJVl%o%ZCYKyopy+_1bh|2lHwQ&^n($~=r+gvY$KY3G+?lFW8 zPw=uI<@%o_$MEbr*moK7yQ@kpCVJA(!FW1V3q1^PzL|C{d`v;L+EAWc(9UWM{#p6f zr*W86+r9m#>N6=(KA>(hAUt*jtz|Fm66Ow}Vma2i?=*b#QGQu6)fAbW+;@a6Z(~Bv zm5pF)5XF?6HP;<;rpA(0Vy!kv4?9H^kO9A0{l&0>;gVj&$D@$cVrrC?lptGFXtRRa z$&^Mtg$KlgOoc`qRK~?e&kZ_xv}Y_1sx&m!P1cj*QCz;e9hm&5cc=}&9%bpqE$Zos zywAQ|mzH*a_2=Jzs3{ZKRf}rG3NHY$9iX_RLTwM5)E1@Ed~sFhT|LNU2Zoiq1!+ip zf!2ri`qWzf$L(KELC<@CY59lA#8Xv0TX-4<79>Iutlq~2`4ZWMKmrV2pRIOsHhkpCm$R6k~8Aa2D~@22xddh+V6V6mn5$L)nj)LDwPHIkuXydB}qMBPM1Kv>is5y6@5x8hT6^wCRj zEYR0P-AeFZolFHK&yW4V^3U)r3}^M(4es_X@3%6|^)UAE`=!Zv>NmO}KR>JSn4~y| z0D&um%J&@xm**Nqafs<}$Wm#&6}d3i0hhJiOt1g@R`nfYwzxN2Vl;4DT|D<_qW05^ zK76O6I~Dl6(n_CCZAcZV~jH0(`FC`EO2d;S; z3=aR1;Lhii_z~8MO~w5VTw1DHX^Lr2)vb~Ecq{C04QnhW^bJ>CyPslPI+dQi>*b3y zd-23G6gSxNBamAccL4K4;W6eopkR0aFcV$boh-|twK5X1n;L^7YiFq##DhauK!okf zyXMQ|;|~D={O8joO~E-MS(3iP&%aK}ES3vZzzci|V8CT};T{+xcnXa^5bmbv3{Wh~ z$WQrg;w`XYJV+5z#OC7H13}f+x^0@9Awt|j=qt;XF)!_5PW+L^5vta-7|VpJtH#vc zLyQD>){y;h6z*X?xQaK~?}c4$v*J_YCNEwGWaGYH4J<9Z2ns!Zy=TdD%8RuMgQ>fGYAGQ^Kmtx?Y*LKe@b4 zHyX9%VK=>Ho1DSi*I&a{SIThJ&v#Zl=*B-jKRtSSx1-uP1)p_f$bPURK=Owns`^8? z))m~C+J(CSsjTjIZ8}>*lx(H$TS~q$H~QVl7uQ8f>^cVwqHQW|=b2fTKrTtvpCI3O zPCY&C>k(K#J;v1jV(Jb5FbSDoN&O(r{fq(6M-Y$Ws<534$S#!*!|}YXjMP`$1rYu# zDiGs?h#l1P`DJzgJju!f;&77{E702MsML2=h*d?&V1k3=zMO4%kV8KlcC0kqR z3Uw(`5CG9kB=0M+vW3@Ak2gBPzMVcws@gEA{S}2xE<013Ml+h%U$1{?s7>wsnBvp; z9RHhrf=~%zFHoU_MJj?@h%}d(sFvP|TaRMElo<6I^?sS`f)wTgp1D^#taUDF-aKhq z-?2;CVz;JlQ|gzU+NoTFY6oY(kAnx^zFRHx^E$BpUMA#<2c+mZH!w{20XcRMScLM0 z*#e%Nw=GmHNO>+QYh$a5joK@}HJKuZ-)&GhxaHu*0A!gjfT`k0m}X2^2XA zr~@J`{Jr(@XU?=9ERq{r8Gv?{p-fN$9eRmAtXuLc?qKMrsYs(k9X zC9PMmt#^|YRK_Vl)oy_EB>|BuN2clwcI|JxpWL;e5ZW3*_NXlm5X(ZdjE1!}8M?QN zZLl@3f|4qBRKX)(lLHBT_ddrtcvs*5AhrC1E7Qz~CRBcny#Sdv)9zr`QDqUMj~87V z^`Nq503Tl!Wz$>wN5hnt@{mcx&MODC`<@r&uRo3^ewaFAu+AwjDsK;vIp+m0m3UnT#Vop!pQ znr*PE3F*Cz;5_1{F?{VKvMi{rhGQn;^@79lDds@LLCdM}K<-J+UeIkZh_`O`yF3w+ z5wYfC=bL6+8VNDQH)EkpV8ns6EBdGHFJ}*NYww6{5b>PS71Qrx8Dyb_6*9JNZG|1F zgxuwrZB+23K9yxIr}+Ab6t@h~Cw0v*S-#@6bwV) z=gJW}G}S8_8eY|xLtm2&O;gss<^2*zIN7L@N?8{$)nYZ&7zu|+&0ZUUG#R7CMg<9m z_{}Wqn%kUz0B*(b2#`*TMa}cvSA91T0U80P1`I`=!*zn!?HW4oX!>qWr+x4>N2BAn z2!zacC*aaSZr5@tQ=lvQdqS`s!SLL6;{tH3f|lh0a)IZ-d@lX$Z;VgC1A?E8))+U- zZQf*I6Xn$XAs5IzCTL46$s|h+t4@C21_JT+XdcOAgu|DoPh6-1aK}YpCkgjWAhyx3hK=r!{A=us*0>HP(_z>9M zfRm-1gpK`vSzZ4?e$dHEtAlr-ZX=6`^89oJPWGd}yL9zC!7a@+4)*h$iP1#&Zp995 z#TKnL6JssA-A0`q+DWb-^t!Zz@-;TEa+Y>ovHCIQEMf?=`Cur-|A^($oAF7Y+E|Ys z?hn$c8*Bt>>K(WG?u1TUzcXKZUufG-_i!{K_co zSlKMNaWZMtVZ!R61CZ_<3^D9P&#Y8P!QQ^%a0K`LfLI;9J{0m7XU|8k0GrK8lS5p9 z5j(ifdC=Wm8z&nSq#ZF8N>KEAW2GM?DQchsE-Q#8y6LDA$q4|0;x3DFt@Mh>Q#mP1 z*l6)SZ)ll+^ugYkIh75xRwiueHf^I*U5>xM+^#8|-jS`8bmNl&Q=6$HLCpYh{@+t? z1Ni~>Gibpa9nhzkg0Dv49wm}*wRk&fU<6-9B_^3l$=w_Bvl;M@Ezsh{Jy@qAZkddioR25lPGkigbb8O_^-K8 zM7+56$%Wb26&>;$awQ~%SD1qi{8EAJMwLtd6SiWZ2(++JP z<*&SjyNe4lem^F+$$`d8+yrN|NN_GIF-~?Wdu0kqJ3PvYAYRvokSYbu01D~fbM&oa z8HTb#blO`EricDO%je*j0v zD3D{$!oCS0ncYZC*AFj2GTle44iG5daA_I&90eQ|UHf76+Wx zvk3n#qj28jLDl^|GT)s3OXEdJ@`uY@qVNoABuZpn!KL7;%{zpjt02P``idDFgQiN4 zgpxAU@%hBTPh?_?yZXB_b#ij&q!!~CR2p@E=N$m5&O2Y_Tx2p3MN3Sdix;{8LCcpo zf7)GaHNH#3Gn524kDE}>FrKHg*hF5$WqeJRXOX}16pq&1F#RCj1l8mR1W^F%BK%<&qW zVijI}N_l* z+^9<%)gD?(@kAy0Bjr&hDy+17Jkq)u_mtOhp2p3GEE6wQNQfPQ6pAjA137lqkF}BR zGS)W+L!hRQH_#mOg6!JyvP7yJ>~+t=LHnrn;RCQ-8l#Z)nfHg5tbz^W#Kz*0bhBIW@f189VVBX>PFTbAO`iH=!qYk*@+eW^xpX+`U%ui0 zcxvGp*|OEWbqAdPCI7j<1>?4X1g|I+?y+)9cC z{E#s#;+Wm^tx2{;uunY@j2FZ4$`#C$8Mb^?pE|h=pJrWt{vc2CdeX}AE!fd6RIN>D zI!#!|IM0tgB9(jlyLz5wmphnyEaH zryRfmOcjQM#5`q9Go^7?oOH!%hk0=io_QpvSawvBab5!X zzfPyEs7GEERR=kIbl^(Yt$WRQoc{-N8az5 zLk10UjI`vN5adfyn(*`tZr$9{lTki`p}|tE8xVJ{!gtFD+`z+~H#H?IkrZ5v`&Rwi z(L?R^w7Wfd?LGXo#w5pvD9rRiD^&^VibD0cpxV=I`-Qs$Ou7&AZBhwJk8QLrU`?sX z#5L~aezcgd@2B8$rkwX4_Ic{}%j}`Ae}R8SBHf9ZgH z9}}+y%^K;4z&dGQnNU^HUrc{OihDTnH;W*68VkvChfgAWJ}E&fW4ttk4^SA9DIes? zj`)!QSAVJ&l2`<8X>BiEqp42&nfGJNk2>&4W#E+hXf+H}X**5<)k5@5muKlhy?~;K z{ErB>nf0N9au!L?1q*cUx4|yF&Fx( zDv1OOw~0HkRef{gaQsn31vHbk!{!~d1hvY8rXSQyU&J4TvQu=1ogW|tU0ypH_xNe9 zgG%mJ9!Ow?6|z$ljJMF!f(lF)q_La6UrHR+K#)AiZO&pqDw7yA7IO^Ro%cl6CQW30$`Y3Vn= zcWrx;sBJF)Mm;aLzT%+|*guI8z`!`1k$J;JfOTXex?fYZ7<0x%c6q_&Ql~>CCjaGT)(>8`Vh-5@`36$lFg1QYz2wPh5yb4X);XcR@tlvtOwv4qBMXa zBS;|;r(w)(v7&#|Zm=V>W&s~ZtmI_jIxN0C;9+B%bcgjH8)cH#Pg~52jiAZ0QYX1r zX<4NN#N6>^T^he#dBagj)SK2~MdLiBRo4Qp4Ns5*Cksx(H4h$xqyoRU3NKz_$P&F&VXur@l@~y;q@rdxbW>0it`7Tt zMvTFW($SC%mv~%Y+4vAorP+yU8_oK`(`^yHY%)Mi(>~9e^%YKgslL+G01_UaW_mZ> zq*Li6*z%-rs;wKA5+H$l272{CJl8?EN<3j!GeqqsREM?DlYc#Ycm22(HkaJgcMN$D zxbjqSUvMF1H@vT3tnF{cGjhd0C%gBmeE+g~w*R=6gfIwny#CWg{y%>ahtoKpfi?s1 zzsUCVSsCRbNb4-X)X@a7qTI%C5?KGoNFLDFtw(O;%c-A&b?M@5Q==}UV`jDWoev&Q zy&(Ixi7%B2@Cm3-9+Ix2mt!6KUgoD6sHHigJR7}#Bzle@T)37umW-Rege1boJX}hn zZP-5&!6dNaxILq1mboZ==G6-=(;{tJK za57dJv|MtY;5Hn3=TWvN&UyHP1@pFeLtF4w!K^_UyHl)J^XJE@jDqM8+1G=x1g`(E z;E*U0=Tr9HlIs-T!Wg&XjNPHiBiqLVFOT3}6tidwNY~lwL@r()5q4T6e?;=SY_|D* zy1%!W%s4k(I#=@O>Ta*G6LFKh@|fp-|KfDpnSONEVUF)?CPeRGXwdxn5}P(6j!;pH zv(e;C6S#1XYi>?nI%p;Mb(HR-*7KZ9Z)cT_f2!%H=y7NC@$J+t%<+P6^`1+!<;~65 zPr<-}4}hEXmi$A!_N(xi$4tG`(`pWBoWzw1V95Z%jJ8vCAx&w$-)F?z*-DfMZ&GA< zJ~m;}G)dN^uJ*-!B371sv&$ZOl`@(aOe`kHv`b(sn2;Y(pGZE9^uVLwbKT(NHWztN z>Sr%GE?}s=P(Tf28sBc1s&Xq(oLPoc436Fm2Ys=a3|o}s^X!ek#@|f&rH`8YBf~l) zjnbtP?rw;0Y@_iO z$#bO?trEZ5-SXiV-cl;OfA5F~)niK$Tb+Y|++?Mf=-??LgYgba+ z90qJg3dnuj{8t?l`|k5?$E?mg_IFOvXrgoWUAleg8s?Lhpc#xO0H6fySFBi9pPe5n zG-p!>G*`aJ_8US*#C;@~`Au2UT)vqot zY@EJTP4B?Tjkl?i=Rle!NB&2`VerC-{Hk&1eFFz@FR{q>FjOO4xSqG_r~r2yB3Hf3YzyVvuxSwS2Tf^=z)#Fdpqr4xHb~Y`y51Vyr;LnnvjH* ztDcxMKK&AGZgdXzqU^WuK5tdtSzG@qwrO0uKKX*ochDeQRgDHgT`hQI_rmN1Hu(f2 zFdx0pF9tjQDn^#1<$yL6O`WWJrU7!TusNTa5ubn(9)F{*uJIEn0+6}mlM4NYo<9H+ z;$Pb0Kw!a-J|Qb`7Tu>w2Lx23)f zlN#e58Zsw_Nvek}nonu`DfJU1C(%)~mM_>!nKpe(cOdYtIL4*E651oet2Luom8MQP&JhSvrBogHu zyKL4CmNmNMbXn=IEaB4CC)$X7!?hnwj-T{INM3{Ue+2F{s@N%ked9a#<2MKzm4Z97 zN)e!C^$}(OiZ$Ph=<1=-K~_=l{#^aOOBpDYM*&)^do+$zoP5%vO8*`{GhiAI^TO zgHT$;n+sKrxVu=)0@z6aO<4R5cZ(kQ7L)T1mP5S_GJBy!oBva>{=-C27`7=#(L{KSI2w z5c~$wm?>ylGog$bY!$nUgn46{S_IV!BE_#^%`9%pl{FcYfx2A`#j!TV=2#Q1P}Y;;q-N|JmpNT<$-rZ`9BR_~Fs(+LWkUpx%&^>-CsaY&?75 znvw+O{lfbKv77)r!E@?|T6fO{AiqpfqU=Dz3d2EHy>(KQryq)o@mirq8T81V&NyPT zZE&Kq$$04Pi9wyQs75UBuk#)sk7_G4f0MA86V=c-gFr-_31rp^l5JMA5OcvWbk@Qt z|4RjP)h9+90Ddm&Ee_53j0QeI-1xG|frr~`_6FSl=!#HD5;N32W7Q3MXNRi|Wsx8v zefIqUTn0XYu?{F{Zh^gWIQV27h7t@hyVOqf&4FE)N1*Ii9LC0}?4aWWmZk=*4+huc z;kR82SXcS%s<~b{iO2$>C3rt>G)_LP;!kA@S~FpSD2Rvb;ul}@lTUypdF|GOP6Fnt zR!aBtanu6-l&uTt*SP-i1&h%-#%TDnk){u*XQ&0pe(IT=O3gL3_7q20mV@tHv(Z~F z3|8To$5wSGuGRMIoq^K$+|XzKkJQ{&-W+%qetE<0QwezqQhXe;%Mew;3-ZA+zob|o zcz}ZtWFgiWLIfJ`M!ru2{;!jg9>u-sH`wM6<{SR)u-MMf))T zs(x+`(iN|@6&g}(M9-*_GuJ+R19vU8uTxX5uBoODc4=iKI6IHdfh~WNrqwatEaIeQ zLlz}Dtv0%^-Cd*i9`Dt<58u?jq=#RV6!iSpZ3KhB|L9Ig0@W{w5|$e11r&C=pwSIM z%QYbERDDei-F7@MRJvrAA+;zczb*Po(&Ri5BOjhW;+^fdC504>B+YT`6#PK& zs%>Y>5GLU%QCJvn{2_6BN$E6wU76_&d`rd{eEpsXM znX1FanU!1uF7J9vm9!G7-F64mb5mvlx}b1Rn>O~AY>U7~%XJA6eNt$1yvH7_!FVd} z$w+tcjP~c+vS1U!`=IN`vwX>yBa4Hz*y~0|Mq79N=-PC$N&)=?EdOW`y}5@0Qb3Y7 zxt$A4F4Uy$K-j}hx+`cnJ1ZU#4;Y8*xM20pjqYCgVD?*F#mB*t3KdYY-Y}kri?#rplaqZ2=mc?5)*<3j;qfv~C`}K#mTj1P z>=|Hlm%$)?%Is>*MMI*7^iAP31F;YD$yf6$&Oq!p1-r{P3ik{>qJ^x1|n?=V`C1>ugt z0D4-MJ?ro}Ue?mP2G)eH7GoBkW?iBSu5!+^fBjy!zL>zoP`? zPJnkXLRNN*pAyDIXg<9S9i`2A{MoFd2r?ATC7a&QjMzBV^Bn4IbF$q=!5sU(usm(} z7{toWeVSm~aa`Dh$)c~L^IzgeVoudPKs0C{xOxp6&v)--vteLdEtqTTD!!A^Ue_HV z0Cn)KN-xvnenAg*h1{C1D9-G#r!-k_C&PR1ZqIZ@$uw3t^7rkJ+s{46$~`08bu4Jt zE9qH6e&0@CpVQIH@7xTF+jHo5)Z`bLCLU4w8wUB@vIf}Y5z>oUjclLBu(Fy6D@VH~ z4U@p@jq7Z(HH596dJ*rNhLDNSL!iYJx|^drEyf2GaZ`-v{nPD+o1PDgOr;?nN9yYp zWL01l`_P-+04W1tRkvw%y2Fi-k~?b%fT#m6Rx^?r%Vlntsem?VYXo0;8?=p8GZYfC z&8eE97MOZD40j8xt>Pi7)zz>_a;ih?B0IrU1@!j4!6v@HG!%c&LsnNo1C&xeIz^|I z2adW8{&1GmhVuQ(v3}YvnQh(fcTU$9y;+~y*#I0Dwl5;vBF!3WhNNE&u_>>q&eawF zK0vi7jG`6)=N9fS6218&L#G_kr#GLrHiSy?wP%g#0nCvcK~Jn%r2pLu=0M==8(Y|u z??z5)h?Iu3;Z_2nGUD^} zcKQfz(yi1m&Q||xeqiqJRn>7b-f;M2Z76?Y*p*klr>o6_kMB3KDqBGT8Okn23@dtdsUd{>v z?ECG?c$MiaO^#V*zoGaYYQYWcU-|>!+cO7}+0Tvg3R$Eu9W|Q$A0&4zc&>_&35hNa|xk7#*|xFh#QzSEOpC7*9wK^;&C{ctz1P zKssZ{<^PQVrXLzvlB62g3QE(27_B{Pj5Ar{`pLA)0v{p;C&1sT*mlP^D02nd}K|!arhO%-uH?r#+>gxNb7I|?&CW_QrLj3z(tx+{^~Bu#s@VXNJH< zTjz!?S)20qyAFYuz}2)q8E{FApTs57j0hB<09WDl>M})gt&Q;2N2j_jM6Koek=REE zNd--clJWm^;$N7t1kMY_`b!|t1k-X0?{{Y7=F%fWKV2?#47>K3J=Aw)Wp1kIuucfbR8}hhhg4Qz}{@b|BL2&=vI%7sl>e0^&9h$Dch9*=78l1`D z1u!0HY3frggya-VRZ+VOu8pHUglwg*gZm+N+g;}(lBL;P0!NSP1XNVIw>yPgT4v$i zlMml(_573)7wKFaes;pHE|PF-_4kegOY${_D4=F6o)PFW;*gE>W_#L(S5ilczKypA7QsYfY7NjRuJdT&T;@xjJw`Rv@ z35hD}!}W>^WZrYtZz3`>YzGF(5>B>^8cW0q^jT=`H{SB3EJ~M$H1jzn#=&V)ks0*ia}+or|?KraBBa z+{lM3=xeCf13!P2TOSn6#cEf?6jH)Z>TMMt^)kN~HfiH=wD%I#<`F$;cVhlSx?BG| zZ;b+d2d)x38w5t0GI;^A5$VEJA-C#)18kyp72)BbpI+^Po)s_&pE3b6I%#{sP^>n@ zsD-1^=f^lXP;qPDhjGW6(IXil^z!NJ7amd+fqWEnocCUd?;%rjmJlG!Y%Qd9{7yAq z#lgIM5~DMnS#z1f7G|~So^dct7s5aelm zw;1)|e{P8X$Jmqxkj%|Tx%_zCGcB10%6FktV{I2Y^ zcjGpADgBBQXGL4nq0;tIMhs^Q#jXRRB8w2*I4Ck>tcJS|41uzIZtZl9>vN}&lCRgt z3m=wES;iK;*_Ey`Sl;+Xt?l*eyIT~Le$Gk$1W{!g+$j)TIPdI@Jp$5s64k37%Dl75 zd`NFOTzaDrIFyHE<}4$tUq5o^iHy)MJD027H)wD!GgmXmZoN}Yp!SXoF9q5cDma*8 zKm~VcZdEhMPuwr@m1o=TJ4o7?I*&v3h*7}lFHEg*1HQ!j6;tK}4UhiztM`jD6n8J= z*%;&buiI}H{=}5v6mfhBcO<=oX7bJyH>xk$2bV+G7SDX1|DfzDpO|Ec>2#!;7*y!9 zIUHyfQG)Sh)7rbXHHc2#*hro)C52w=x3($wN)Q1hOeWnQGd3nMlfRJd1)b}F*@sdG znK3+G)V_*wvQM@{xZw$KbQG%IKMhw2KC|>PCg)JqWlDrjVY*6?s#mLlhvw9_hTfxm zj2A#2VcGyI&H+T?mk_2E7lQy^P;>yT%XU9YjPArFLSuo%>fu!Cj^cvP520j1O2vXU zucbJwe4qRjE|IrxjplV+vgz}zN|+=VpGz=p^c)=~g0$T>MNwu(Z#MZ?!I!BA-oz1H z2yuzwP;@h-B6FVB2hGQ{Qg}_-)p45IfDgjcol-qZDQ&*qA*E=33qhX-Nq5;P7p*q) zY)(uBy#Qc@G#NDxpa=a_S>(~i0JXAT;j5xkr2 zaba=Kj@;29W4ypU^UqtJ@_Iu3?GNMcP@k|s`?Ck64!bmKCzXk7XX>!CB0d-WidwTk zvmK(DjRw>7SXElAL!h^qt@)mi;;MXB_n-nYi{pc1;xImn z2!Z7gXS+GM_4-Gi7Cmq-Nz*cf-GNS9?6|Wwb=$XxMNX1>oFdic-SUpdhB1iV`(Tvx z$mc$9{K=sPk8%fc8GP?lwu(uswqcwCF64hd_qM$RC^~-J5f4cN^hLInQN1ub55-Rw zrfQVcTvHO_Z-$~(r?WS?zTB1p3Vxk2Loq70BHJDn z%-*zj^Pauibp8kfHnNyK`xtZ?eM@8-y`U;X|28%@ z@c|%3<@e_C>tvqzwf4SihF!ZWDA*HF)-Br|grFI}kjppBYsq{gCi;D|W;fowvhJ>O z!~xu@pEmRk;##U(Pz6l#6kFW1CJVfeIy@T+wQ+ERxJeEB%REhM`ppXSe9h9BdDHI9 zFNBxxy2)Y65tVygYXg<$dHgT3O(0S%&w;fH&jd~=%NseyBu^j4nrrXIWM}#i3^nux z$uWgelXV8}=kxSt%Ifvcxcd*Pe%s)@t+8!?w}Pwu5uMMc!@C?rW&&^LCV;TPUBV^0 zPplslUAkyyqb9f(1I_TSOk0KsXB+zFZ9Mp$G1e&N^k~Qjd(pR9mbd4ka_QL%)i0ck z{qJfj$Uev!xBz?_m&IuBSwme1i1{1gdf`GlU7KgjNfqE%P4x$Ryk%PN^`5b8pHzSL zj%rn?bE|f^!Ie1?G`b@80`bI9n3a*^$A>^@-U?Z%11f442#8SI@%4(H!qap1z%=lt z_<5&uJv=hm_`CI@#jWb(^Kn0p!e)G(lFa?Pt&IeYc5k`E)(3_p>|ArP38&zyx}|xB zAO!26Aea58Dd2z6UiN>#*8lTx=s*6a?K=5W?oy47XO3MJd0<$W**g$X*wGkHmN-$b z(glIC{o?&0N6g0vR5$4S#3uNT|E2ujZjZoESN3xJPkeZX67br1un9o+=}eZ6)$>+P zGg##2&<749)r@%VMr*6`K*^P@`zlcF+&G?^S*_siA*S_JQla~*=b%a5UaW)_6v;LM ze9f{P=y=|Wyh*esGK|}B*L>VIagv-&Tm3F{RnzU~g}gY-YO0d(kH0%>_OFKT*PH*+ z4-DUWL{Y9xd&PTJG)tGRp&X(sA+Ly4!>z=dquXTWduGCI*Y#RZO52SJJI+1J>d3Vo z+CFG~b&^zS8X&d#l1#NP_wML>GxJBprvQb;j{grxng8Zv|6jg~|G)q7v&?rAvx;gB z#PF$MlRPyTIIymU($eWdj0UB?{dxyOur%x^AM7jt2!sUyxOniAap<^_bHC`^UE%k| z#m~wl%E|o9!}i;waPi%z|1)hVmiRDN~YMVXS`^=Je^(E4h2+f z_Tdw3HL49vESiA*I`ww4iF@z%w9&9b4QSHZ#~1n>1X#7N_r@l?qUbvqV3Y9$3}C3; z`OtIjlvqnNERJj%s(Rzoe+b@Uo(x$y^@)ajP0ii!QjPl>uw~Cm%;7Vm2kS`AGmo^S@qriP&DXkw%1MM z7+maBv)h(uf8~CfU|Y&^1xzKcvQwt}34Dk(yu5sB2v_ah2`Ys#LqXHM*I;_S2__we zFN=5fy{S%TsL1SKpp8^VX>ig#b{8IQ&Na8t?3Je(R$fb!d{B~(gX;i&F5qo;jtTEv zh~0f2RuMMZwGcM|_D^Ri^?*E=re7X+PmF6Bs?1R)$-|wa-+o_k7PD^(2F;Wsb?91@ z;P1jevV|?!^;4XUgIGb|0n|*`e<{mQM6NO*A2f`eTM}j24lbpfHi)aznjM$ge?^wT z#!wHU9xmIGt6m2iLMH9~Q$*+i@)onwb~Bv(MvP9O>5?Y&2W-cMT#cDJfI1AEW)sgl zw^vqu-(_4L@>rn~>}zL`z7}u+q-PT{%2^L%z;oLTHw(DBR84qBf{6#(DiBPDx&*a= zUgmm&n}$OLj}9)h@{}p3lzW!LbrL#!nCf8LsPWTBuaW!C+L1kKo?HNDFr#q2{};Rs z-NJ>jUjSVa5AMO0*1lMjAX9!ig#|nZGLU!1RC&bDi7v7Z7LZmgXUn#6wamQr5TjC^ zag@LZppK{Ahq520JuV>mm=82;6m)ywYkR{&JPCj%{R=czKnIxrg7-7r7F>JVCHVvH zuikg4#dPr2Y=MsLmr3@lhIGA*3^PhQh3G`$MjQZLR|%PIHrI76j`cnnl3f*}`NPpN zZ{fI)G3GF+_d|BtI1>lwl?VDmla+P%wO`Pz(Y!WPX}?7A$39@S&_OxEGTv1@)POOe^Oa1K|!P`N&oo)QI#S9Z)ry{#L z1&FJrJGM9UV}&*JgseD{oOI|JRE+x0K7i>poHYiWgC=_ODNuc&Q9uVN8rEvYRXMbz zt9!OTrO?~KTq(1$jN(8L>m?gh#tu}Y;{XNhOEPUs0@?V(#Mf{iBWU4PwKuKnHK#wU zBy^^f@G1((xxlgDgHMA8XlIQR%sg}z%!zaXvG?$h$PMhPTjv%|!;kqn<%i6aE?T;m zL_!N^`g|6T{BedA`B1a%_g#y3HvvjL5sXae@0pT3Igk_fp>C7|Ga5=2e=a>_^6ux3 z9%9nlg~Q_A4>m>Xms3pIN4HIs2yh#}UN~~*ZMh(o9mJEWV<#W6H#+N7ZgeJCJBw*QLpysptnHd?*XS-r$8vnYT1nyc58=eO9W zV?JLwn|sORpWeX$DRq?FP<3BP9>e7)&>?p$3fv-Ss`-hmgsme%9Ce^L@^-PS2~!>4 z+HLw8R12LeURC^LYEsTwJ@9X^T@EJ~G;gmJ^j`VloIh=j=tH;Ib>*G0MT2uqxby#& zLq)n3!5gy< zlMwzyPuqf1^9MJXu7T*uR)HjSE(J6*T?=4?TQ&XF__Y*TM~K6z zh6f*F45C+hS`i%W#);mqmS_Cq>g{C*oGw#5WEwNb>XVZ^cqds@Dpm%DT3ds)u~8v7 zVGO*UuhW%hNkQ?!d~I&-6ZHAvzU{`H31&0N0DGlLsS_^WflZk2f{j0hSoYx4HlyFH zR`^$e%zzE%WKX(~RYhQqqqyYVI%6r@{|L?f`d@@}EKU8OaAfUa`HOkRdqC=P71y`x ztsF9V$1$RYH}r8Yq{nHOvs%V@Qe8y;z5R(i-%p-0!mfXNWW=fYk%yQ zco|td+A?-byq!7uFt~5}AnEB>+$z&jxw2Hpx%SzBX*p;^Z;xAaE+wVMXL(*KRzxzZ z0WI`PxJ-QxS;dAh$m&?BG2AMNfO{UlFMP2p%qD=(YkM!s1AC1jK()>He@Z?mz7|8P zv>BFM%sMpM)uD$`kzh3_mn(kOS3BSIy#WM6ck#42>nK2z8s>`a_{4UVE!CI=9!hpY z*b_Hl(YGo*t+NeTGc*^fa?UC!KS?e8+;Gjlt|HzpXAOBu=ECg@8z+IG9}tdAmDkRv zzwbg;7eN87T6OsV@hUH$1CytbdqVS!ZN16(d+y?i(jasTalI*aQ%&oscQiv#&ES0X zo(MOV0p$gphG%Eka!wKDc@&)q1_G9J$dw20HWvV2s*(4-qVg9%^3M2YtvRra4jm)H`*D(! z;Is0P{&%!4%qkkVP2eSM#Z`dWosznRwrZHH6(8^%iVOl056AHEk%p@k)65dDgC#l1 z=hKhh#qR9;^!cuV3ttJKlS07I1ccIN9G(A<-p*kJ?GPcvj$V}{Lo+Tx6|$YMNqC-l zJpaap?FUA&ZM?*~jMlGunuB?^p&p}aWBexZnt|*+$v5b&*H30^{e}LL1uiW{$oZX{ zu$|!ut}>jmVytE2@}3~ibdi?OXl|YIU->(`nPXJ_{-2Y3Z&aMMHeK(rciGj%VkHSy zvKsvI3$WgDF|7DRQ+hlR^AihXZW!z4UzL~rEafG7vf@*b)zg**pSqgK1rB2R!T!)7 zD$cy=p376&A;{up19s)@L)1gVEB?q=K%NS+jS`;ZuJX%WHR<#?TkGuB>>cXXSK7B~ zI_clpU}>pZmcQ@rN>fG9_)vuPLHtn#-w%8dvq8sIMD~I&3$Y2;mn^m>58V}ZxdGKY zYeJCE-T`al(!;~oI`MGK1;UPO;#{=Mr&HOVEi^*QC|hhCmJ*7-#nG$kJ0I9;)}GW; zzSHVf`pK|_hDVs=G|>Hw#LvowbE-j$Xc1AmNp=`Edg9|{T+PA34q`QDxW8y>;_Bf_ z;*1`8NM;PRdR6y{*hd#*zFyBtnsGTcH{l=MZmG9p=xj)nGO34At23i{t3ggZONUzcc>B~WfvPY5D zL#$>-H~5L`+lGBKpoK(P2)bnus>v!I`)bLl*9jIqmK_*m`Kt6yq5aVBt7l4V2d}R9 zxSk9|HF}WMG2pfGmrlKmOSx;|E@6435Z5_w=M;0M7l5Iex2|2YTuArr2Q7h5eJrN- zw07p>0t!(eiJ=?(b>Bd|_F1m}z{N`*r=c&g4F~;I+U_XOchkU*i4jui*o2J@V2HL1 zf^Nje`rUEcEGCQ8%7Z61HzXST`b;X<5$sMb3VM#$8n4m3{&dvw%B8u}0Ji|fSAXJR zYaPi}d7&&wFc$|}{H=IZTDz^W*!;0yULayIB=ZgsTkyTIAAaj1T^17cM&a=0(YYw& zIhK}V7T}n~T!7-@$=ETf^P~&*cun1az5-Gw`er<5%&)J`)hQ(upLIiLj*=;>wg>`B!1Qiaw)9>!}7hod+;P zq?ud8Y@dS-*`l-1J;^h8M6v|hS z8utl_+er_%if5^I1uM-OF~{oJ;ERHLMlg~B*jY-VfunyV_y_FG#nBmt3bKcCq_b_& z#L7XQ_k_3iZdaSF>M%d`;YFDXe{S7qaIThE_ijB9oX(%S~ zYk_Vou`)}hcCTq2^`N{QF{gwJsv$^~d3vFEb|xIsvBG)~$SMO!OAZjTh&g!ttW zNQ7~(vtuyXKKuKi>|ptnX1{WjqU;Ve46Gp@M|-O7A;OTF(wY%-aopW1$82)4t%c0z z9ilh!YITZ(883>eEFdPHT#=DBsZL+-(Pght-*D@Y;v{Fqe=NeFwdI$%AZ~%cqBUT; z-@w0)(Zs7hUY<8SbjKp&IVsYcF4$cgQ0$~RvOsW(C^YWOS*LYv!Y(HYJ=Kb=$^hF# z3ho*pAev8_Dbp2Xzl$qa5Mxq0+H)0T;J?Hio+HFeH+w%@URcMigTsrbMms76*hKT1 znxU-4lo~y);Y-%UqOX~*Gqf#GW=v7u(MOn?zriUlt`*{88xx5N)U!lTR~biM^H|;O z!}0Uwu7_)LC(tqUfb7YmoZOb6KY!2+I}Ps{WX_5s64EQPt{a_KK!2Enx3gs$JrZys z(HWqbqQ<>QMoodixf+xsK(~=M%524=;Z7T3GYw;_ga2)*1Xvfw(;{Wp+!NXmTI(!g z{{*%d0VdOynaoSZ{rCaHx^)M&YZYYrOWnvHk8{44lYf|4 z|E$Hu~&7Ufki!T^?Z)5yW`(ZH64U*-GB=R z!gT&h0b`5jbpCxVbe8;{fvm0<7!yv&|B?BEDn>}YH9I8jWGHY9oAW_~b|E(OUj@sF z!3O)a$zVo3^gzg6-Y6O@e@dxu@vNyVJnIEK%jgMlH_M65`M1%7bqcZu@a&iX0AstB z-mK42g3*(69n5%!8o>R83sLReSU&RLiEMXzOCQPo`cMgE+OrP6(iU|2 z47`1@7wHMJ8L4kzjiK&tzoURtqzlc{-)kYM4ifIa3QhDFjO=Hi(&C^z2QmMOspig# zAuUN7RCuo}&{k9a$W~dl8y*vsquaK=BrWc~DJC>6YNh=QG4#?78I=5EmTGl;xV&tB zuWw|;d93e0p5tBzKfUJb^&Y^0gG#1>LDpI?@$2{*nCkNM*>-U+HnpWE-2FBq~WD$4ifn(v>HiuV^-n{_@bAaBEBdT=pl%i1r2t zg_CF9q^?LW?>F291`3?9EKGdES@93ym)5x$WYchii&jf0BuOo&+KG=m5QJEr9vX9o z6N{(qoGa@Fx7IdFm&cBeuv6RCIc;$R^k^3aN|8tzOq*88?;+~&y~%hH1|YgYdPP%* zkN=TF5HnKE4)<4o{kGeATUtiZfl~9l3OL3s!=)x!(73wm)bMi$ zXT&)|glMaHcOuBx{C2&Y2&(ftkp7pVc&Y2fhZ#w?gL}J#OxQ5KRqkQ;1O0GG*L>8l z?fQ!HPrCew>$!PQR$I-5!>{*_=P%Fld-R>!Bvzm;g4kdQYB%tEhN2l^xthQs2`h7v z8puM1M?1O>19c+;7v0P!YRwFnuTg#k>j;N3XjB}%#3{o1n&&0Pq>GgR1kw+$R5-Eu z(-x<6z>>f7RS8}i9|~!PyjGCyH(S$`N8fZ6OnYXD&|;hq+wfyHVBN6blc3{$?5WNi z0q7^0IqYuSeftP+<{&sxaN6LH3q7#K4}M%aq=u}Vk}jwCIXxrn03r`ohhbjXn|CWK zST=r>gM)MJznF6d`<%Cd_@7c1D&6?$c1G{4=~!SMyM`S-x(@yR^?x{qeg_P{m=`qE z6b9gzDrx)0N7DV)GSDIw$cq1^Q~AB|6H3U(!R`t!M0e*nCY5asIW85f8BLYdO!^NM zghhs1nf+Y2)P(5>}E8y&&j@)$spcMM`V=`VG5cdi1yA8<-tqpQ?+APX z5~H?$7L9+Snp_5?Mj~h!tu_9bX%2l@#3oR81P))nRwZyTIshK;!C(bsC!G%(Qtg0C-^A0r&a+DP<^kzR?AU$3i*KTYmx!Mb44z9LkQ?=P~ zKY^+tV?A{T^rgd-^B`ELw><9TU$ho7-s3h>`N!kEF0oC$DJkAW!R4ApV z-~Ggmn(#k_ANa~u+W8sK`9}#IZc+D7a+kq5$8L84Kdcs-ao?`_IW)FX}?f{!|bUJm=>-TpkSK+k2$FrcQ`wjcqi({r2{L>*WB3XrGtOSdC^wx)8>Hw_|2 z)4}3tEj2cD6|CgF^Q!Zc;H*)NmkcS^BW=|262WEQPm{Ag^6bFf_6^^4 zA_VshiWP?_4g#j=0m!K~!GI2;;@mW`0v?o}Mc(n&&zIvn^{fZ1O$6hSot&KmEEV^u z!U3wYFuB{ZFVb7tNo`LQar#Qo^|cRvBfKXx7L=JwzAApjRnN&(0smD)x)DBw)kFHZ z&kZ|zZrLi{{c*fMr<1c8`e!(IMfJe^l~(7zdF+O-s?&&eAIrtQSNOa?%=mcU;^*^} zt?ZzT+X~c;0C7bC%jX)s*$Kcj56G4Q@%k0AKgHY2g-mr?*ELJ66wYc=-KSpzW?1+H zvb9co`EZm=`x|DlbD_*w?B%|f67fx2XoX%&0a+m{@p-Qdn}2pQN|FT(YL&rR=RkiA z^4d#8X$@KK*E1rCvZa7kph|m#S&a+9z&jmge}Mi6x%cA`XM-#NqKljaxb@8;TkVRc zs?Z~9lW$7}b8271RULM@a}1#H#GfLacFx9zY~_Sq^Kvzpy4=yvzvr$}uyvJ#yyF}u znF}4C8|@Sh&oach{k8`1)e4~a>JmcUrHyUFDhAgxscN9R5_d@Yldcg+CPzx>=7dxo zS8wOmIz~oW;Am>K0_NQpnf6kj`WB|LNTf;kol8S@5HKTIl6BpqhyF@Z(QpOF6t$#Z<<Ci*_M8Cxy((d z8lrQ2V7|5i!zYZ}IlK_ReOTP0i2T7Z6k8-vgM@(g-Pt}mQ6eO2QmVWL8(f6+^)p#) z?MxOhyqFnY&@N<^PcNp9b%hlNXIB)2U8NWMeCXQNKKS8#k4eK_lXd8?&XQ8_W)9+| zXmIwfK$asTU<^Xtf4Kk|z_GX@fLp3IRy&=#WdKn{x)*o~xS(`33bBRl{+KGos8dgJ z`+s5+I2O+vUqq%_9Pj6Bf-fx`1vt;d1`Ap}JdDj9M)(G0W|qqM2&GPQL=UQ$ZC)kEJr~XacThKKf*qpBiQ!gZhN839lj$Zg4`B5 z(g3|O*k^I;8#ULSm>1w?-3toL*YA{Wb+dQSMYvSW~D>}5K&E+mQabfG0X_FFmsC7E1U^}LcR zj!S(U#9kOpjeF=Hak=<|@goIU1yq!5eUgs@gV7GoO0iz80KI|YdZu4yC!lLk0^B(7 zz)$0mPIXBO&O@Ot0WP^?C;H}HEWdBKRK_f;*iu{bB5_;X;jlFdxc62-nFPG9$J0dU zG~>N~D{md5?{bM(jc-o9im3BCzp!hfU!zswBF8>cx${9dE66{96yWPm7OS;p4P{s4 z`4x>3f=0tZo7k-|D6?kKEhG9F>YJMaJsP;${MyL?wd(96O4q*%r)-W!*~Yx1U&5wM zsL2zo2~tHE-{zsW3DyEaYC_sOjOy&qdeNbwe-%0{5wO>QGGZ%d!_YCUbz{zax$)1H z4bG8yE*jmOS|I_qdOE8(OK)xOKML1}n{coJR;JFAJc3lR+Hp6Dm4=->J?bBWWxfMH zF*^4AS2CP{sUB2Sc_s|KGMHnObBHC5O@97Ni?-pOJNp-5pF)uWMvLkL?-hqjXkw%m zDxGAsV)dw-;S52^)^s1puq}P{jE{=g{r+x!wUM5yPe_t$7Bd0#i{kMqc9-(mVE0q4 zk$yJqWxdDvda+Ui+8{;?M6C2t6M+K&gc=daXxUNNL0F1i|8;K5gX;22?cts?6YIs> z^7l0@rpHo=kG+ERbi~{Lsj}NPkzV4RuQui#p|vN9&gOy&XrYW-K*(0upDMLm{c(#DXm5L?Mm;O6X&TMwW= zc_3&osQUX7t(z!`M8JC)R1r&}aBuGZA9wED;!?&5`d9amQvyMc)D#^oAE7%i zuzshk0FW+gL2HdG-TSY?G#|=Ppx1OO&~K;9Hu-~XWCt%Dk4o}J#7Jh#7W?z%_d!NA zmyo*;C{Q>zjrz;Jw2Y=b=8yX$=x_^H`O`?t zarndwV6M1@>yI5lN$$yC*zTDErKvyhR_=EAl^E|)Svxq~%F)%-1ZdKmeqnz8bRpOe z>0XU}4SV?jMm@DqSD58@Lr7Mi(LNGSx=}Kfsmd$=oaeuck9kJl07|A|47}@0GZ#2& zuHQ|+0OY}JALa#B1=oFvr>&=+g0#|PVMU&V;vo0_X7}v&3a+qAz_{lO88ebtPS3iR z|C5l?r1b08z|U^y!{ETzIom&1o$BYvsqZ%nF?6kd0Za?KkG2S%okh44mGx;xrA9MG z#RWC>)wM}R$)tq*xWK-{{tB2G&}UsyNds19Y!Ucz8UI-?4ot0=$=p3)E1?l?6%`GC z66@O!Tr{Tbr>b%KGACR5zIuMH)My0hU?F@t+`oi!XrcXfXLu1Y(67aP(sPhChF@o` zfQbg-hLC1!X^j)oW5X9_7OK7<6ng*h{Sj4WqIxKb zzA6*|4(X7i3;FmkcuIfsAeS7`?ywg%F%nIAMyS(y_tfZVTWxq7pSd;|Qqy_)xSSQT zQ!+B}?2h)5IJ3N zaX1KjgR|%emvD3E2jg#90-Nm}Qvqj%(UVRaLW2z{pJLZ^{TQiq zraWMo?N&H`(yGm=!Bwo*5negRa1~o5nc?<*3xDw zgx;4=zfQmWI=xYMgNDEVpLRyS+3i@i&QTwDbc3!X6ovt!HVuyimhANFu&Ib&5o)QP*p?9P*&g@$9x*N^P_xxa!8=ri|CU#jx(y#eCEj`%#! zvtR2jJq%Jn{oqM)vI}DJ>=$uAD1mv3J}$nKIIwKg8qi|9R*OB3e!gWerY$=>VfNAA z-=BMqcNHZ&k40oUN0#pYQd65mh{#Wk!i|r$+8Xs*69i~9<%<(|*hyF0CUp9!9)&*M zv7kAaxV@^7Fr(>VvE@SncTwfdl{dPtTYp@l?l4tEwCQZLXonCzrY32Y7c6-ps`^a$ zOvpmuSCdTLpqU#6ce@HG99TFt4P%pDg;sv>e1=-Y(A!S(phM}4wnW(w zc?=|0k3l#EGaq0?YZM?`uY;`i0AD>L?r9~1?1rVmSSmPpaT5kmQCs>*M=t_MPPQ5x zx>~bP1$#tX6P3GDoZhU9I?x}=Rh9iQVC|wYURcfqY3Aq3BFeSzpD)z)Rm-C=Ul<24 zGoa{NQB3`7h+_g0x{b*4soc$+oKpiF6g(sBZ2H+)n|ZckZL)x;;Uwg(eqaCk&0$N! zSZnf=K=bumzCY>@t^Zx$?MzD)`U?p`gV%dkDpjJ7U|_I4ONwHE)IShc`wup=Ir#Yf z9I(ckO}M_e7`oC=+?uIt7)@5sDwy7htEQ)HtzvsecJMaJ>$`=Hw|U&wU_4zyfxdpP!iU+$_T3BqM?`|2x#S{VN|GL5M9^_6fksw(Rha7hF>$s z0*d1XXJsGf3c9boCFnZi>tlZr_pLhns5UmQuBz6Nyz6uH^vVkd(9q-(2Q;9RF=8TV zkQZ_fX$C!x$>DG;jhlobRf_mBm+EdE?HU+8lcp>y~MH zN?m~lz-5no_6*3tV_cpDcAu}20||HtT!ugKCc&w@7 zIQ+L?kO4q3_=95(E7Jv=e#bDF7JM7w&Zpx7Mip@2909n8S+jo?!okM^+{3{WsD&*c z@R`z^G4ViES8^P;iMEE~C$*Ds<(Z~70d0CHR$p1(Jx{n6Dz_ddcJNTV*C!)NNSe~! z)sKjo`68%j@ht*Krh%x zS?FyM^HNxbFNUp8mV0ffuXQnT=?(4Pqa*2PV&p~E2lJ3YfS-*AgysFg$Z9b=p6V}lp47WJNnAr!hIs9bXy*YD zmc?B`y*4nv#aAG@4(chlluorqHXKQu%sFFBYu&SAArUwh)8Q@gP4BCF z6wzZb0`Isxf`>Rfgv93oelAg*CB(%1^u`>mCa4%Wz57O6OWi3Z#$Nn+V_(NxBO_(+ zeN(1Vqci2@#mltxk(B{be~LppV|f^QES!Iw&F3X?HV)H3pQ?qsan?E< zayTN!#9I^mBLs~Bhq<*dPk@fU=CroRKkRC-^SGU1hH3boy0Y8H0O!a^oYS;OZ^fiw zG!X46OHq1KC^Md`+7QOSVbV70Kc87r2&aRdT#}i~Z(ZK~^EPHV#|JKkMTXeg9O_$a z&VKz(IpLe}nP!6vQ8*SbrAG-hYyHGPvS1EAm1SZPC&NRL0$iI>&XK={#51|e#moe2 z3%{%C7gr?(mDz3$-g_MSTWb~h5_>DL%JbJ|%-8w@7=F4q06ALJ#+ClAx9?wtf%H?4 z(BnO@IRRNE;4|RgF+hN;HIui&+J+=#-sD!1W5Uqsymd0{Q%_G`pso2}wGGMsovkiK zjaXeD5WQeO*x*-j7LNDsyL6FR2G~fi)-*R5yM5(*Vr&2>nqFL2r^LQ+33C$SSFTF}Az& zaKDJNllGdp-g-;kG;K%P%De#pm_Se)c~^9cPh0SKFA8p`yXK;-!g`XbLpH8bTd9PB&PBwDt95;@ff zeiKxlRlKEad;TxVQIO(;SKX2>8Gv0Tu{SZS1(6jLE?ge>xCm|)40om{(X@u141ld& zn{!;8`z&+xdAI2amMFLOTu4~C#i=txDz3-5tbveZ!ua7?#YHUm0s{~aSW^thkvKZm zhTCZE2R;8GWVdjW=xe{(YRFvrWM)8sT#P>Wj?rd`P1;3u;_qOQ>UGsl4DK@XUOj@P z?l0#}UBA@mYkkMOD&L22mUmbYxlezdF4L+)R#`~Twdapg_sAKs(NIn_n5Tg0EM|kY|ie%x#Q)Yj)TPs(kjH zL?8Sm4gg@41-vATCY6*ApA!Kh#nmDbM#*1%?GE{}64jF)mnm5XN4Do$ip?{2KhR{B zP}*fDAAJ}PGPm8?|I-<#g})&*dWIfOc6KHx6G%EbB;em?LA?u`C5}2VF>7VUxOxg__Ko6 zKHy{1EO*g1G!A}ju%}lY;;yqcfqLxsyw;DN4!JU(GGoK4*4m8uY<~bVO$UoE9{He+ z7NNm=7{!l7x(!a=6S8Ls!fs-VN1gwh7^%Yv*a>*pi&1GDZ8+89ScmX!=ZQvfdeq6y zNm~j>DHcx7`6|VpJ)w&7CGL|3nORodWz_9_s)5!2Q)D{?Tmq0qp^-DVLC0L-^!M-6 zg30L7q0yBevxUMS)!)xK+;up8>HrE52Ae^Oc5&Q9vH%`(HZsVl4v(F)9f#)5h(G$Q zfL*WLvo|ZBYsJQ1w|xlBv{;IA4n0=ob(q`g*jntFd+_Lm*tqRT#5L4rJvDGQ0(p)Z zeoRU7U4EB)VSIN>$Pru;PnU!Fnya6tbD_bU_FLtUXdYyc+5++(?Jgw!Q=J4&JySc5OAcv>%B3l(~V^RqFNrL;0&eB}~;1ZPfQmwX!<^ z8X}5x{x5Gk`=t!Cr!W34vNe@UDnA$7fKf|RtgE<@NxPxlTt0)RL^U7k$0h8>`7z_JZ>+lF|03iZIXZjW%^JUrf4?&E zVgKE}CicVKg75njk&S;V&O6LjO4lQU0MG-}fS9Y1*BGUSAThwHuk4*|I4Eq?Y2Gun zU>TSABv`BHc-6o+K8IWB7hnu&azbr`J5Ac|&YIXaD2eFsUpSejGjc}+qCUucw6*?K zz!rm_;+4SjInSNaZia!oG(J$*!J5}e&`;5l@}2*QB@3J+*( za}42R$xXOQyy8m}XD1kTU3KaWb0)5YNsigyb&N`1PI@23t#U3Qp#F9oVxM?2yk7< z9tK1_e;B;nm*7SYxoMohjG{cRXdH_E9+<6ucdvqC?f8T7x~eJ@RVNTg(K2lp5JdA} z8~`8D!JQzb4?!n$C^!oDQVUrNT9tp#ARo937kP`Fj4F`=1|p@)iii?GYpVy1jUj9K zBVZ;+D^`-#bNSPt4XpoOo{9^sl>S9s2|a(0yy3D8lbGkloYW_0tSuc{(4%T6(&o+% zhc>VIJ*1kqTIQalg75IrOfq){3OHkw;R9?|ziBCM%_tznfz-P)Yc^dRDVdfRa@HfS zLERggF2X0^G-W449*=tCHM+hcTD@DG%+TsEudUy+-E=5d3krx23B#_R8Q$AzFp2Lk zjC-q-GgTgP0leK{{Chu%349R9D|8W8;ry}Dmcc;5NkEVHt*jp^WX173#ly+W`#egk zP%CkC`@J5qR!Z0}W?1s49|i3GyxcMSzIIM(!BovPF!bcg^vajoy)X)7;b65?0pqw~ z$y1=pyzKt~t|vRrq4=@3rp?`l*Ick6^sw@rsOM>Tw|P zUHBrV#db~gz$Mkw&#HC=mB3l;mj7G>{%#i0)cwL60r);vj@)+4kppfILmUI$TN9}l zz1e|gJk8StcT7yy+bmA0n$BVagStpk@w7W`HBBpM*x|e3Q4J{h1>%ljKfmyk%DnvXZTG670Ix;I*xONA=}`vpsq{Sdj#p_SYXX z$WHLr2-=|GQC1u%S^&&yhZvRa^LBK7U1Vt&{N z%s*B|^pbn-J?FgVd7k%q@ZQ6&;zf7qqQ~cE`}l0A)cu$KL95|H)nM}Wl*vKGr&AVP zb?fP(U#aVr<(i;UhMfA~pP+{B5WX#Y$O|GUzy0p;b3CK!W77$v)60!6+7;i6^zsN; z9}m*5mC#Q4fA!c*UWo>2bCpP0G;HIU&5e?{|C?#T6s3dhQMaHv;>ATgc!K9 zTbuF7YbO2Y*}L(+rM5iX_=xF=L%hPsXTs99r@z{;(yp%ATe6$57k(>rn*n6BntZ8S zrX!WwgW1Zg%tUHV(?S$aG;bPrAAjwhuV%rWYx^Xqe?0ha6 zhdk!>?5pGvu5!<1Vp8|@(6>{AyJqIM%$wT{m{x^i1;&X~{GR-vQ4eTiM28Rb;vMdh z*W;d3go%rnU8K_eV{<*fv@#xx(j0B6Soh2ZP8y?fLmfKEmp=8_dD~hQoZN>^6BnZ9#sF$F;7m=vT>r@_7sH6FWK6-tzZqawirG|r!95y_6v zv+JKHtd*@4JCt1Ggw-^lj3SZ&VNYXYhNi8UkhlpMOh4m2zTdhv*-sZ{D@4p}f(&M5 zKDeBuOAfvQ7;-Gt?l^=P`DK+5dBhByswi?paWG+J>^4L zkWv?(aeanIM}u}v+w8PR6O3#e70K74iA~wEOQdwXz1i62ny2}bgC>SRVWgP|lcifA zpaeYENaCf^v1g z$Hi|axYapb-`g5SXkacu&O3BF=!(G$ElYYBB}mxlGDe6+`F$rsYC||LG}!Gjp8%zE zWJu1BG8Z0B$N7A7Kt;b#JFNN#Uz=ZUM7~_98o`k&u;B&wcg8dRWG>pmmo@=_hEA;mScBR$+8M#q zIIT9jc2@YGXmDq9{pO^s5T7~~WPUepLIWnl1-Puab(b+|@%qOcw+CsGB|eQz0U?R- z_7!IxI7ip*AC z4iD7z71%~8i*2e4f($pjrjf~Kw0+oD8+G20-ul*N(6EBoKf@y9jLlVq3*{+-+f_sj zAKz0a+9K&n3!UPRlIy6d)B;uZKntbLG!hyM4zr4kknrdj0pkkYv{z>9Z(8l$$t9B+ z7j#CS6d(M&hvJ_fRKrcFDDpeWeKRSs^R4^R%eb(Cr~mosMtZ>IOfya zh@n{_FWDdnruS4!kN3^rZqU8wM+&a*hLlgcEt}jNLOMpxUq|Lfh87SgeR$Z?m)E97 zuDK!Fdt)C4#Yr^fESjq{Sn+18GJ2+z9~5@K-_e%lgRm1L^^5mI@>CDr$a-PQ5HCL7 z!*z0KeuFwamjDWN=z-M*!cdjaaEdSK(1Q2^D-Z9SKCFWN!Du28{6_*M+A!|d6|cuC+axzH6i}fj zvPgi{rCHo^bFQiBSFD!(^farTqh2;+xZd32UM13A9dI{T^NoNz=GQYH_d{D~xc>!m zm=ciWdl~4KQt;`2Dv=_A9gMXtCBX#chH(Bff1XIFbbrE=!?mjePPceun@3{nf<^YH zcGjhi%ME8>s;s0IoxWn=bhO7x7~7If`?agQ@|B8IRR$U0NUG#2O5wAw;g--hfgY#3 z1-JH1$Dh>jcE!@c5r-_UsXjvk$#KUm_BIti7iE zo`RY}1O`*$54U?xrX+Uhz;SWy3l16c1@YjDtwoYCnXGj5%;3J zq{K|NYdrVdkMvbCm;Tv<;v_hnAD9d+nG?F+Pr;08qG{L^Yu7Ssdv*KfY-Yws=8^1H z<%L)%4(|cz$K$+fR6HM(xG1a`Q)J!?jKWl*#rW$u3P;HRH7|c#MZ40Z~6YJ(tDCB!=E{%#nVlj#fKu$ z1tkB&aRvH9uLrXGC+h_!8P}~S7#4aicwNJX-J*w=2)5;jD!1(myNY4t3Nef)5Py}^%&r_v zB%C1*xj4&jmeOn~h)&~oG`rVy+*{O7-A3ObxWgA2Nh$r4$~+!w!R=LpZo*XWhJI`2 zy0MeKZAByKS3PB4F|-`OpK#38F1&5@nB^Jrb9iUT9;OdQ(FC@K zP-7-1r*u0oYDeNeXM4p#jB4tBmC-g@FU%zIT*B@60n zuF*G)S8B$D@<$PhPv>7R8i)44@>Kkp0`IPHY!j0^$X8)D?1^5n%OaOAjBX}qH)_Y! znpOO(g8P=5BOoLTqwV*qza0qLY&O00IqrQ1z(OvtA3lH?dxWkgqskK-_35pnc$CE_ ztWxwbDjnLZd6rlzwzYmXD<3YwXb#EsJu1ds#=GzwEEXjCOq~wCgXJ;S^Slj@UkPU^ zbK1zutfCN`0f}N`pM@x%a91>JB$<@olHS)2E{0o#QdgzV$*JlzB^@N)Ep6-f!_?FD|sGfz;d&Le8a#%EM(gsEH zVas7RNs_!AA4oR787I!_VA<6q@`sFlQxH~sgqyzQ3YiTO`_|=3SQ*}&XQ0-SZ7?s> zXjxPA=ZRBO(84)??=bc-tb2II3oi~Yhb@T(^b@Bj!>S%uksg&fqZvG0lH*cwU@K6O zemjt{ma$a7Wv@gsw{c5-D#z>bE7~sbgE^R7*a({$=}5Mky}mtl(A=!I?Bf>gGw(As zz)1iWe=rj8;#)35|6B|uObC?i>{XZ!P2{bwCTS~PG4vuzDf~*R*Dcq zzf<>x2a^n>qEUA=ILaMdh(+nMC4!uzNFdY7bi_hvVP?_QR+Cfid>$Hca`t~$&L5th zR=wqn`NT%I0u|CJQIS`2&ato6Glj{>M}7@h5g@~DhU=^3_six+EL#|wjaWho?Z<~B zlX;eX^vymQ_I`TAahE_#2qj%E<;(qcDC2%Lv4;%Bl}oOf+1vSsj}P}OyfnWGCatvWhy*^ z`UHIy;+0tCu&c#U3;r&m(?PdoR3GTez*X$~LD|Ncg)J2Sg2_vv84YW0d3yfpOrcOT zoi(?bYAf{~R{46A(ZnZx`dyzbwZSHk7JSFEF^VWo04!(Hwz2AHp&Ak^Igcz;FbhDW z$6#7nri`jLh*$Ov2kiiXp03fMSI15~tx5SH=yYQr&mI_EQ0V(UIUoAqhR|%5s}X9d zvP)dZW3_RHx}iUf#t6!j89I37V)uNs_PJtbZ(*->*G9>DL)-0tP5vi`x3WJ zBBPSvPxeVmO#`Ep&0H}R`9YTk)2cHpTN=ObwI;5nCZ6m!x60Mp+hZkt29jj!Q+p$- zAN5&1+u6KD6sf*f^1)juG!w4R{_r72bFF`j!SQ%TAyK9eVCd5IV&aSrKbI(ZEib2; z*6Rub#P)mYkS{IFlKzsfKJXYjU~m}6*+IA>g?c}sbiqn6AIy@@oBAM|ks)k$-}_`&s;*Rg88&JFqA3A2t1 z`y@S_>j}+PC&JdM+}fucUa=lmL_+p+P$Q9W11td~bRMF`p8M$!Htdo*pdqc?tung& zPMBFTCs~1`ddwO%sl8PuYu42kKoYsi&Uw2PwE6Eo>2Rqpee&92beQpO(oM(517{p zu-h%JKPC*|)Dd@rbUXIV*di*+@50dFsH_6lZn6Ne_txkcgEENGzIcl){YhpZUP3#K zE}@k1`etNKf?Uf3oZfj+%0kNBm~ir$!Xaq2elH)r?GLWuYZjtz3p$Bv!P&X}dL9{H zPh&qRqfBHNyzNB>)O-BYHpWu0f#bANmQD@o>88un$K%wFBHdRTtV?`^yC%5<$;HFm zV7s{9dmURl!YS)+sGiL;M>@a1rX<160B^Z$KWr>1^tMqQ)^kRsiZD`qkB^Ig(7@^U zdTwWucD<12=1x3*V@tKeF?WQNWhm-a9?V=MJ2;*2CG^>4$M6a>dOpm5VilZlYL!XY5D!apNUn-@8s{b#NKIbgy8f#Il5JXY4O;Bu^%u_`J#^We0nrr|Rgb3{Z*34CxQ>8o7w+@q|~O;t9(cybsK; zd%A^CNt}fzY$+$R6Z(}DcsN7Vk`Opk$?k5GT!jC4S^5)W3K2iU&|fLIxW^m_TVuurW191y~wt_eOY@ey8V7evy?*wzVX2%_UccS+fyl&7E$O(IaE7cZCVyxXY zd%|>hJB=`mtL4H{DO5kCM_8fiICJQ!go%KuGn7??qfeO!UmJEDVK#GCH(1oN)^KfG zajBPH&*%O|H$jM3#%p5cy8L>#zk12Kw6FL(cG+Zh!R?=wJ|)<bCcN3_e{nY8&J9FA?#NQiih=#_i`l>qB zg4()-!uKP8_xPSTF?_$0h_7f_8fx?qdL*%^DMzO1J02+0BJeC{c8xc~l4E9P#Bbq*X`)-SXl1f;ES6j5Ia6p8uwQG>+Ms;yxB7pbX(sQg7>%@j>7tFp~A|*ADV?tCb5q6t9j<~O8KxX@Aj`M(F^JhQg z?rXeb^VDkW3qmSM+tHY|UTn?DfD`IZ;=yLoIv=Oa&kED&22^-|h-syvA8xD*NEq`& z2JRLFKWZ_nL0mT-8F`5Rb??lBdwcH~c8i%3-l7^Sh`q{%C*rC|8X-}_j=FE#pO*PZ zZ94%d2OWh8^`iYSd1=jH<^A{Zyxp7ixB0BkaChzJQ+ICI zoDEV&2;-?TJvMy70JN)=L``%l)!I%Y1i@JPocbEyq6a&zY8?NNle(?xNZa zyK~jDbaVKGq(vf7moJEUG+ORR>sR{Z)eC8>@S$7nKu-gow+WO?cp!a4TPqTH1h zT@V_AyXv>8vfpvD%=1ysUsMye3z{L`2yuyEqck?`pEg1Tqve%N# zD5L;GSViIRxRe^T#Ch?9&J^w zA7Q4s_gr6ojb*td`$Rm!k8@^`!S#}9!GMlO>%xc-hFDDs9C|ggTVF;J=e8G%Ed?)1 z-=0L0W5X(%V(+6y3XNR7c_VCOp__e|Sv;!q4~E+1%yhmYZCr;mUnqh{K-r(VXuDb3I=CWwy)v+0*5_RpvAhDSxm9Tzf`W zy`md{2?-&Iu8*oT*K8@E1~xe^MLuNmEkPlA=-MqebKMrMdTvTqflu)+LS`U&Wx&we zqNuaG?YClN3-T)P>R3z~lW7x9Atq@cTparBj`o$&cJ)T)f{66Xs6AT9`|_%)e6`;R z(v&2#1rc>PYSc9Pc0O}_>2F-9K7;%oHSD(X;(;7ySowVi5ZNl%4gN{8LiQttQ9w8=cu>u0=MOnX=t-m`QDcG@$`vBjXIM7lj1Vy z-9=n~!53yDUiOQe89_3T`Ji{p-31f_v~gU}-7%sW$~c*~-24G2>j2W-c1m|oMPLed z=_jjaTig4WH+d_s*`!=4zscaGagc-9H_UOvOfi&zr$F$@PA2I_7?}9Inq#jWlDR`> z9l6x1Tx+s-((e78UI<9p*4l`>O_}?8zm{xab-N%jIwxe4)>bfVIv%9_DNq78nXnda z5@_V}q>ND;`8^I9H?+$Iy)7;jSjvnT$Zd3VTEOZctRezM-)zJ;PWB!m z0Yl+P+rrOK+fSMi8g>g+PuF^}gE)CpQ@9((^BK*j_y)1h^EQ5(qpXz?#k#4MgsO%ug1G4Y6rq9P=b1_RpeG}c1Hq9P z>_G5;24OlL1oHU|bI41vVj0r07mRjDG~-J2FJ!RA58vl#JOMH&G4XnXkq#uVQLte2 zUmw;`&w|%Y3niY&b33KnPB-pr%XoRu=Jv)ZCVt@PDc=*s^GaV~FmFy~gkF9(<){g5I^>T+;B8~4FHdFAVrXl1?*=*8t1C@Ug>yN_y zouLoI@y6Pc$*wk;DbJT3N1v1g)$68K6V|>3@VuMvmOf(C;>j3VFeu@YE`xhDX|MU2 zfSMtTlv|dYlZ|}S(~^z|F|!1J9J*;xrZuimogRmNtKD+Y*94xj<#nWkxB2fh13}l> z4lt=;>(-R>pT->>_DOZW(Jjzr(tHbjN-pK*R=;{uQC0oSm|*($cf6{BM}UPj1Y9Vi=E&o7o^=2en*H zZZ19IWGKwscSUEn$`I&mNHWbai8|bDbZ{d9M&6$kZ^vjlMHJj(^X1z*YHb{06&lB) z`_1s?;SWb9R=Z(aiQl93qa9Maao^XQTqEjA3UY*DtepqbG}{*yn$%Q4wQXfO@-q4g zbX*c8&!=H2Vk5}VOOMkVY;RmqKR|Qgqt^d|t{Vqj8y9~u#86|v3afDc^D4BH(;FIIFC9y39gA=RO+ zgCEU&mF}S7Yf)b$wwarH#rl+T)2+t!;=nXsU%Bj9$tX3^FAqY|xL?v}du$~s6^xim zJtwF7oRnYoa66VNZ9I&VcSMYz(O1Ec)~ z%oT4(cMlADHLes7rj}bV^rAfL)!zZ@*-8cvAQ&J zKF-!tUP{|s2vH`6Yv9|qGt^E_xQ6)|>zzGGE#?QNH5@$e;ZJR43QYQIP9E?*ymPtX zYHWY^n`p&yz;%}bBc5<+y2kJac`j7G<=FDJIGZ0myT?shpB?i?ABcAmZ$(gzpCTWs z$3f6YL^i)02wRBj>S!4;Y7UL|H*c#R%VR~egD_u2!z8fdldgtu<{YZ{-bXRSUJ@~u z@vF=zr!7ln@B0n24vg$$(O}Z|g{em36A7eL`cAMy>?|=!YOphAdc>kUyOZWYw}hLU zeph}cN@5_CXqm=Go=cl$DnqYPo8In&OQM_5OvHsLiVfgjbC3Ww;toEfRcJ@EU3J+L z{z$O-E(R2|6~NlPHU)dlqJFOKQi=Cqb7zXZA%%76<8+W zEyqR5Xd#j13bG1X$Q?2yffZhvD|&Hzt}D8wUid(fSv>h!=a_lk-M|Z>8$8C=J->tf z_;I{jrdJK)7le+Nj-cs8RCOGC*{A02rmSEUo^2aW3qAHzUSPmRH=+vlwv5cn{qBd? zw7MCYHNQ)jxZ(u2r@%23LzDBMg4mU#P}Sh)9@-`kIV>-sZ{W7Kfk-B`ovA)V&RT*Q zGHb-Zu_x}dsnIXnr+OS+J9VC8aP_!~FEXtV=oz~_S5(zKWF$F<1|m_jv)xj{F>w^D zV1$||I8=L@mT=x$bk%YDGcm*&Q;0i<+kUYU9&7#Lsd`aQulCe>La`xXAx0J>tLc>7N|Di!L$^BgKl? zf8d$@f#?_MW)KG=HI7<@=gj2Bl_VY5ra#KrcvZG0dhui1#SlA+M7Op4szP0CD%A`2 zbP=ZRTcc|jWgb?z)Amh-yV249GMleax58h% ZNK%&X1&gfzH(uZ1WBLE}(wJ{U{{bAq0IC20 literal 0 HcmV?d00001 From a9999cef21df5584764d2e221f68bca489736916 Mon Sep 17 00:00:00 2001 From: shenheng Date: Sat, 15 Mar 2025 18:00:53 +0800 Subject: [PATCH 32/77] update .gitignore to include config/config.toml for sensitive information --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 653fd83..c319d64 100644 --- a/.gitignore +++ b/.gitignore @@ -178,3 +178,6 @@ data/ # Workspace workspace/ + +# sensitive information +config/config.toml From b3277c4957d80b94bfb8ef076efcf79de7ddf9e7 Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Sat, 15 Mar 2025 18:33:38 +0800 Subject: [PATCH 33/77] style: Add setting to trim trailing whitespace --- .vscode/settings.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index f92c6a6..b7aafe1 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -14,5 +14,6 @@ "evenBetterToml.schema.associations": { "^.+config[/\\\\].+\\.toml$": "../config/schema.config.json" }, - "files.insertFinalNewline": true + "files.insertFinalNewline": true, + "files.trimTrailingWhitespace": true } From b4b83bf668dbcfed0d6a5af61dd9b1f7f479d363 Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Sat, 15 Mar 2025 18:34:42 +0800 Subject: [PATCH 34/77] style: Enable format on save in VSCode settings --- .vscode/settings.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index b7aafe1..d3aa302 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -15,5 +15,6 @@ "^.+config[/\\\\].+\\.toml$": "../config/schema.config.json" }, "files.insertFinalNewline": true, - "files.trimTrailingWhitespace": true + "files.trimTrailingWhitespace": true, + "editor.formatOnSave": true } From 765155c9c37c151b65774dff024f908074d7dd8e Mon Sep 17 00:00:00 2001 From: kuma Date: Thu, 13 Mar 2025 18:45:25 +0900 Subject: [PATCH 35/77] add docker file --- Dockerfile | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 Dockerfile diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..9f7a190 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,13 @@ +FROM python:3.12-slim + +WORKDIR /app/OpenManus + +RUN apt-get update && apt-get install -y --no-install-recommends git curl \ + && rm -rf /var/lib/apt/lists/* \ + && (command -v uv >/dev/null 2>&1 || pip install --no-cache-dir uv) + +COPY . . + +RUN uv pip install --system -r requirements.txt + +CMD ["bash"] From 5e35f01ea828c6c6bb6d7cac2f5a358589644f23 Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Sun, 16 Mar 2025 12:57:06 +0800 Subject: [PATCH 36/77] format code --- app/tool/web_search.py | 24 ++++++++++++++---------- config/config.example.toml | 4 ++-- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/app/tool/web_search.py b/app/tool/web_search.py index cd23f5d..7b1018b 100644 --- a/app/tool/web_search.py +++ b/app/tool/web_search.py @@ -1,8 +1,9 @@ import asyncio from typing import List -from app.config import config from tenacity import retry, stop_after_attempt, wait_exponential + +from app.config import config from app.tool.base import BaseTool from app.tool.search import ( BaiduSearchEngine, @@ -11,10 +12,11 @@ from app.tool.search import ( WebSearchEngine, ) + class WebSearch(BaseTool): name: str = "web_search" - description: str = """Perform a web search and return a list of relevant links. - This function attempts to use the primary search engine API to get up-to-date results. + description: str = """Perform a web search and return a list of relevant links. + This function attempts to use the primary search engine API to get up-to-date results. If an error occurs, it falls back to an alternative search engine.""" parameters: dict = { "type": "object", @@ -52,13 +54,15 @@ class WebSearch(BaseTool): for engine_name in engine_order: engine = self._search_engine[engine_name] try: - links = await self._perform_search_with_engine(engine, query, num_results) + links = await self._perform_search_with_engine( + engine, query, num_results + ) if links: return links except Exception as e: print(f"Search engine '{engine_name}' failed with error: {e}") return [] - + def _get_engine_order(self) -> List[str]: """ Determines the order in which to try search engines. @@ -78,18 +82,18 @@ class WebSearch(BaseTool): if key not in engine_order: engine_order.append(key) return engine_order - + @retry( stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10), ) async def _perform_search_with_engine( - self, - engine: WebSearchEngine, - query: str, + self, + engine: WebSearchEngine, + query: str, num_results: int, ) -> List[str]: loop = asyncio.get_event_loop() return await loop.run_in_executor( None, lambda: list(engine.perform_search(query, num_results=num_results)) - ) \ No newline at end of file + ) diff --git a/config/config.example.toml b/config/config.example.toml index de02853..2eecdfb 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -1,6 +1,6 @@ # Global LLM configuration [llm] -model = "claude-3-7-sonnet" # The LLM model to use +model = "gpt-4o" # The LLM model to use base_url = "https://api.openai.com/v1" # API endpoint URL api_key = "sk-..." # Your API key max_tokens = 8192 # Maximum number of tokens in the response @@ -26,7 +26,7 @@ temperature = 0.0 # Controls randomness # Optional configuration for specific LLM models [llm.vision] -model = "claude-3-7-sonnet" # The vision model to use +model = "gpt-4o" # The vision model to use base_url = "https://api.openai.com/v1" # API endpoint URL for vision model api_key = "sk-..." # Your API key for vision model max_tokens = 8192 # Maximum number of tokens in the response From 24bae6633387b18ad9185e5f9f082e3b8745741b Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Sun, 16 Mar 2025 13:01:25 +0800 Subject: [PATCH 37/77] update dependabot.yml --- .github/dependabot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4f94bce..1ef0e94 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,7 +4,7 @@ updates: directory: "/" schedule: interval: "weekly" - open-pull-requests-limit: 5 + open-pull-requests-limit: 4 groups: # Group critical packages that might need careful review core-dependencies: @@ -51,7 +51,7 @@ updates: directory: "/" schedule: interval: "weekly" - open-pull-requests-limit: 5 + open-pull-requests-limit: 4 groups: actions: patterns: From 4ba7bf692e3affed92abb5d589047d4cab90ea37 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 16 Mar 2025 05:02:06 +0000 Subject: [PATCH 38/77] Bump actions/stale from 5 to 9 in the actions group Bumps the actions group with 1 update: [actions/stale](https://github.com/actions/stale). Updates `actions/stale` from 5 to 9 - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/v5...v9) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-major dependency-group: actions ... Signed-off-by: dependabot[bot] --- .github/workflows/stale.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 70d8458..ea52562 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -11,7 +11,7 @@ jobs: issues: write pull-requests: write steps: - - uses: actions/stale@v5 + - uses: actions/stale@v9 with: days-before-issue-stale: 30 days-before-issue-close: 14 From 31133bccbbcf5ec639a4e6ff988bcc74bf377981 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 16 Mar 2025 05:02:36 +0000 Subject: [PATCH 39/77] Bump the core-dependencies group with 3 updates Updates the requirements on [pydantic](https://github.com/pydantic/pydantic), [openai](https://github.com/openai/openai-python) and [pydantic-core](https://github.com/pydantic/pydantic-core) to permit the latest version. Updates `pydantic` to 2.10.6 - [Release notes](https://github.com/pydantic/pydantic/releases) - [Changelog](https://github.com/pydantic/pydantic/blob/main/HISTORY.md) - [Commits](https://github.com/pydantic/pydantic/compare/v2.10.4...v2.10.6) Updates `openai` to 1.66.3 - [Release notes](https://github.com/openai/openai-python/releases) - [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/openai/openai-python/compare/v1.58.1...v1.66.3) Updates `pydantic-core` to 2.32.0 - [Release notes](https://github.com/pydantic/pydantic-core/releases) - [Commits](https://github.com/pydantic/pydantic-core/compare/v2.27.2...v2.32.0) --- updated-dependencies: - dependency-name: pydantic dependency-type: direct:production dependency-group: core-dependencies - dependency-name: openai dependency-type: direct:production dependency-group: core-dependencies - dependency-name: pydantic-core dependency-type: direct:production dependency-group: core-dependencies ... Signed-off-by: dependabot[bot] --- requirements.txt | 6 +++--- setup.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements.txt b/requirements.txt index 534eca9..3c72869 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ -pydantic~=2.10.4 -openai~=1.58.1 +pydantic~=2.10.6 +openai~=1.66.3 tenacity~=9.0.0 pyyaml~=6.0.2 loguru~=0.7.3 @@ -20,6 +20,6 @@ baidusearch~=1.0.3 duckduckgo_search~=7.5.1 aiofiles~=24.1.0 -pydantic_core~=2.27.2 +pydantic_core~=2.32.0 colorama~=0.4.6 playwright~=1.49.1 diff --git a/setup.py b/setup.py index dd46f9c..2ca6eb0 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ setup( packages=find_packages(), install_requires=[ "pydantic~=2.10.4", - "openai~=1.58.1", + "openai>=1.58.1,<1.67.0", "tenacity~=9.0.0", "pyyaml~=6.0.2", "loguru~=0.7.3", @@ -31,7 +31,7 @@ setup( "browser-use~=0.1.40", "googlesearch-python~=1.3.0", "aiofiles~=24.1.0", - "pydantic_core~=2.27.2", + "pydantic_core>=2.27.2,<2.33.0", "colorama~=0.4.6", ], classifiers=[ From 9781eadb9e1e7fc9c30b35a0fc4263db4fb277a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 16 Mar 2025 05:12:56 +0000 Subject: [PATCH 40/77] Update playwright requirement in the browsergym-related group Updates the requirements on [playwright](https://github.com/microsoft/playwright-python) to permit the latest version. Updates `playwright` to 1.50.0 - [Release notes](https://github.com/microsoft/playwright-python/releases) - [Commits](https://github.com/microsoft/playwright-python/compare/v1.49.1...v1.50.0) --- updated-dependencies: - dependency-name: playwright dependency-type: direct:production dependency-group: browsergym-related ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3c72869..4d10ff8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,4 +22,4 @@ duckduckgo_search~=7.5.1 aiofiles~=24.1.0 pydantic_core~=2.32.0 colorama~=0.4.6 -playwright~=1.49.1 +playwright~=1.50.0 From 16290a120b8ca3549db929f453e365a0d97dceee Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Sun, 16 Mar 2025 17:47:29 +0800 Subject: [PATCH 41/77] ci(chore): top-issues panel --- .github/workflows/top-issues.yaml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 .github/workflows/top-issues.yaml diff --git a/.github/workflows/top-issues.yaml b/.github/workflows/top-issues.yaml new file mode 100644 index 0000000..b3d23f0 --- /dev/null +++ b/.github/workflows/top-issues.yaml @@ -0,0 +1,23 @@ +name: Top issues +on: + schedule: + - cron: '0 0/2 * * *' + workflow_dispatch: + +jobs: + ShowAndLabelTopIssues: + name: Display and label top issues + runs-on: ubuntu-latest + if: github.repository == 'mannaandpoem/OpenManus' + steps: + - name: Run top issues action + uses: rickstaa/top-issues-action@7e8dda5d5ae3087670f9094b9724a9a091fc3ba1 # v1.3.101 + env: + github_token: ${{ secrets.GITHUB_TOKEN }} + with: + label: true + dashboard: true + dashboard_show_total_reactions: true + top_issues: true + top_pull_requests: true + top_list_size: 32 From 491f27358c501e61ac51e3cb22582968209a875c Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Sun, 16 Mar 2025 19:56:37 +0800 Subject: [PATCH 42/77] refactor: Add permissions for top-issues workflow --- .github/workflows/top-issues.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/top-issues.yaml b/.github/workflows/top-issues.yaml index b3d23f0..47b6bf0 100644 --- a/.github/workflows/top-issues.yaml +++ b/.github/workflows/top-issues.yaml @@ -3,15 +3,19 @@ on: schedule: - cron: '0 0/2 * * *' workflow_dispatch: - jobs: ShowAndLabelTopIssues: + permissions: + issues: write + pull-requests: write + actions: read + contents: read name: Display and label top issues runs-on: ubuntu-latest if: github.repository == 'mannaandpoem/OpenManus' steps: - name: Run top issues action - uses: rickstaa/top-issues-action@7e8dda5d5ae3087670f9094b9724a9a091fc3ba1 # v1.3.101 + uses: rickstaa/top-issues-action@7e8dda5d5ae3087670f9094b9724a9a091fc3ba1 # v1.3.101 env: github_token: ${{ secrets.GITHUB_TOKEN }} with: From 10ecc91e5e5286025a236a73395b1851c07209dc Mon Sep 17 00:00:00 2001 From: zhiyuanRen <1131876818@qq.com> Date: Sun, 16 Mar 2025 21:47:46 +0800 Subject: [PATCH 43/77] print the token usage of each step's prompt and completion, as well as the cumulative total consumption up to now, which is useful for analyzing resource usage. --- app/llm.py | 25 +++++++++++++++++++++---- app/tool/file_saver.py | 2 +- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/app/llm.py b/app/llm.py index 18a13af..47e18ab 100644 --- a/app/llm.py +++ b/app/llm.py @@ -59,6 +59,7 @@ class LLM: # Add token counting related attributes self.total_input_tokens = 0 + self.total_completion_tokens = 0 self.max_input_tokens = ( llm_config.max_input_tokens if hasattr(llm_config, "max_input_tokens") @@ -129,12 +130,15 @@ class LLM: return token_count - def update_token_count(self, input_tokens: int) -> None: + def update_token_count(self, input_tokens: int, completion_tokens: int = 0) -> None: """Update token counts""" # Only track tokens if max_input_tokens is set self.total_input_tokens += input_tokens + self.total_completion_tokens += completion_tokens logger.info( - f"Token usage: Input={input_tokens}, Cumulative Input={self.total_input_tokens}" + f"Token usage: Input={input_tokens}, Completion={completion_tokens}, " + f"Cumulative Input={self.total_input_tokens}, Cumulative Completion={self.total_completion_tokens}, " + f"Total={input_tokens + completion_tokens}, Cumulative Total={self.total_input_tokens + self.total_completion_tokens}" ) def check_token_limit(self, input_tokens: int) -> bool: @@ -271,7 +275,9 @@ class LLM: raise ValueError("Empty or invalid response from LLM") # Update token counts - self.update_token_count(response.usage.prompt_tokens) + self.update_token_count( + response.usage.prompt_tokens, response.usage.completion_tokens + ) return response.choices[0].message.content @@ -282,9 +288,11 @@ class LLM: response = await self.client.chat.completions.create(**params) collected_messages = [] + completion_text = "" async for chunk in response: chunk_message = chunk.choices[0].delta.content or "" collected_messages.append(chunk_message) + completion_text += chunk_message print(chunk_message, end="", flush=True) print() # Newline after streaming @@ -292,6 +300,13 @@ class LLM: if not full_response: raise ValueError("Empty response from streaming LLM") + # 对于流式响应,估算completion tokens + completion_tokens = self.count_tokens(completion_text) + logger.info( + f"Estimated completion tokens for streaming response: {completion_tokens}" + ) + self.total_completion_tokens += completion_tokens + return full_response except TokenLimitExceeded: @@ -412,7 +427,9 @@ class LLM: raise ValueError("Invalid or empty response from LLM") # Update token counts - self.update_token_count(response.usage.prompt_tokens) + self.update_token_count( + response.usage.prompt_tokens, response.usage.completion_tokens + ) return response.choices[0].message diff --git a/app/tool/file_saver.py b/app/tool/file_saver.py index 96d64b3..7d92a02 100644 --- a/app/tool/file_saver.py +++ b/app/tool/file_saver.py @@ -2,8 +2,8 @@ import os import aiofiles -from app.tool.base import BaseTool from app.config import WORKSPACE_ROOT +from app.tool.base import BaseTool class FileSaver(BaseTool): From ea72591c6552ccc3a61dad2cd3f8b9a2e1e60572 Mon Sep 17 00:00:00 2001 From: fbosso Date: Sun, 16 Mar 2025 14:55:21 +0100 Subject: [PATCH 44/77] Fix pydantic_core version to 2.27.2 to resolve dependency conflict --- requirements.txt | 4 ++-- setup.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4d10ff8..f063b6c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,6 +20,6 @@ baidusearch~=1.0.3 duckduckgo_search~=7.5.1 aiofiles~=24.1.0 -pydantic_core~=2.32.0 +pydantic_core~=2.27.2 colorama~=0.4.6 -playwright~=1.50.0 +playwright~=1.50.0 \ No newline at end of file diff --git a/setup.py b/setup.py index 2ca6eb0..eb36dac 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ setup( "browser-use~=0.1.40", "googlesearch-python~=1.3.0", "aiofiles~=24.1.0", - "pydantic_core>=2.27.2,<2.33.0", + "pydantic_core>=2.27.2,<2.28.0", "colorama~=0.4.6", ], classifiers=[ From 3d2c74f7918b9b3c3d4acadd93c6f374d37c43ae Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Sun, 16 Mar 2025 23:14:08 +0800 Subject: [PATCH 45/77] feat(workflow): add PR diff summarization workflow --- .github/workflows/pr-autodiff.yaml | 131 +++++++++++++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100644 .github/workflows/pr-autodiff.yaml diff --git a/.github/workflows/pr-autodiff.yaml b/.github/workflows/pr-autodiff.yaml new file mode 100644 index 0000000..5e1d785 --- /dev/null +++ b/.github/workflows/pr-autodiff.yaml @@ -0,0 +1,131 @@ +name: PR Diff Summarization +on: + pull_request: + branches: [main] + types: [opened, ready_for_review, reopened] + issue_comment: + types: [created] +permissions: + contents: read + pull-requests: write +jobs: + pr-diff-summarization: + runs-on: ubuntu-latest + if: | + (github.event_name == 'pull_request') || + (github.event_name == 'issue_comment' && + contains(github.event.comment.body, '!pr-diff') && + (github.event.comment.author_association == 'COLLABORATOR' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') && + github.event.issue.pull_request) + steps: + - name: Get PR head SHA + id: get-pr-sha + run: | + if [ "${{ github.event_name }}" == "pull_request" ]; then + echo "pr_sha=${{ github.event.pull_request.head.sha }}" >> $GITHUB_OUTPUT + echo "Retrieved PR head SHA: ${{ github.event.pull_request.head.sha }}" + else + PR_URL="${{ github.event.issue.pull_request.url }}" + SHA=$(curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" $PR_URL | jq -r '.head.sha') + echo "pr_sha=$SHA" >> $GITHUB_OUTPUT + echo "Retrieved PR head SHA from API: $SHA" + fi + - name: Check out code + uses: actions/checkout@v4 + with: + ref: ${{ steps.get-pr-sha.outputs.pr_sha }} + fetch-depth: 0 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install openai requests + - name: Create and run Python script + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }} + GH_TOKEN: ${{ github.token }} + PR_NUMBER: ${{ github.event.pull_request.number || github.event.issue.number }} + run: |- + cat << 'EOF' > /tmp/_workflow_core.py + import os + import subprocess + import json + import requests + from openai import OpenAI + + def get_diff(): + result = subprocess.run( + ['git', 'diff', 'origin/main...HEAD'], + capture_output=True, text=True, check=True) + return '\n'.join( + line for line in result.stdout.split('\n') + if any(line.startswith(c) for c in ('+', '-')) + and not line.startswith(('---', '+++')) + )[:round(200000 * 0.4)] # Truncate to prevent overflow + + def generate_comment(diff_content): + client = OpenAI( + base_url=os.getenv("OPENAI_BASE_URL"), + api_key=os.getenv("OPENAI_API_KEY") + ) + + guidelines = ''' + 1. English version first, Chinese Simplified version after + 2. Example format: + # Diff Report + ## English + - Added `ABC` class + - Fixed `f()` behavior in `foo` module + + ### Comments Highlight + - `config.toml` needs to be configured properly to make sure new features work as expected. + + ### Spelling/Offensive Content Check + - No spelling mistakes or offensive content found in the code or comments. + 3. Highlight non-English comments + 4. Check for spelling/offensive content''' + + response = client.chat.completions.create( + model="o3-mini", + messages=[{ + "role": "system", + "content": "Generate bilingual code review feedback." + }, { + "role": "user", + "content": f"Review these changes per guidelines:\n{guidelines}\n\nDIFF:\n{diff_content}" + }] + ) + return response.choices[0].message.content + + def post_comment(comment): + repo = os.getenv("GITHUB_REPOSITORY") + pr_number = os.getenv("PR_NUMBER") + + headers = { + "Authorization": f"Bearer {os.getenv('GH_TOKEN')}", + "Accept": "application/vnd.github.v3+json" + } + url = f"https://api.github.com/repos/{repo}/issues/{pr_number}/comments" + + requests.post(url, json={"body": comment}, headers=headers) + + if __name__ == "__main__": + diff_content = get_diff() + if not diff_content.strip(): + print("No meaningful diff detected.") + exit(0) + + try: + comment = generate_comment(diff_content) + post_comment(comment) + print("Comment posted successfully.") + except Exception as e: + print(f"Failed to process: {str(e)}") + exit(1) + EOF + + python /tmp/_workflow_core.py From 114bd467206fb34c53696903e9d3001863bc342d Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Mon, 17 Mar 2025 00:04:17 +0800 Subject: [PATCH 46/77] update config.example.toml and format file_saver.py --- app/tool/file_saver.py | 2 +- config/config.example.toml | 21 ++++++++++----------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/app/tool/file_saver.py b/app/tool/file_saver.py index 96d64b3..7d92a02 100644 --- a/app/tool/file_saver.py +++ b/app/tool/file_saver.py @@ -2,8 +2,8 @@ import os import aiofiles -from app.tool.base import BaseTool from app.config import WORKSPACE_ROOT +from app.tool.base import BaseTool class FileSaver(BaseTool): diff --git a/config/config.example.toml b/config/config.example.toml index 2eecdfb..51b8ead 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -1,11 +1,10 @@ # Global LLM configuration [llm] -model = "gpt-4o" # The LLM model to use -base_url = "https://api.openai.com/v1" # API endpoint URL -api_key = "sk-..." # Your API key -max_tokens = 8192 # Maximum number of tokens in the response -temperature = 0.0 # Controls randomness -# max_input_tokens = 100000 # Maximum input tokens to use across all requests (set to null or delete this line for unlimited) +model = "claude-3-7-sonnet-20250219" # The LLM model to use +base_url = "https://api.anthropic.com/v1/" # API endpoint URL +api_key = "YOUR_API_KEY" # Your API key +max_tokens = 8192 # Maximum number of tokens in the response +temperature = 0.0 # Controls randomness # [llm] #AZURE OPENAI: # api_type= 'azure' @@ -26,11 +25,11 @@ temperature = 0.0 # Controls randomness # Optional configuration for specific LLM models [llm.vision] -model = "gpt-4o" # The vision model to use -base_url = "https://api.openai.com/v1" # API endpoint URL for vision model -api_key = "sk-..." # Your API key for vision model -max_tokens = 8192 # Maximum number of tokens in the response -temperature = 0.0 # Controls randomness for vision model +model = "claude-3-7-sonnet-20250219" # The vision model to use +base_url = "https://api.anthropic.com/v1/" # API endpoint URL for vision model +api_key = "YOUR_API_KEY" # Your API key for vision model +max_tokens = 8192 # Maximum number of tokens in the response +temperature = 0.0 # Controls randomness for vision model # [llm.vision] #OLLAMA VISION: # api_type = 'ollama' From 24b3d2d62c40c15472c1105e4deab92b5629052c Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Mon, 17 Mar 2025 00:23:38 +0800 Subject: [PATCH 47/77] fix: end of file line --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f063b6c..2dcaabd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,4 +22,4 @@ duckduckgo_search~=7.5.1 aiofiles~=24.1.0 pydantic_core~=2.27.2 colorama~=0.4.6 -playwright~=1.50.0 \ No newline at end of file +playwright~=1.50.0 From 5777334fb413cdd0155c748961183ca427074f63 Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Mon, 17 Mar 2025 00:41:04 +0800 Subject: [PATCH 48/77] ci(requirements): environment corruption check --- .../workflows/environment-corrupt-check.yaml | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 .github/workflows/environment-corrupt-check.yaml diff --git a/.github/workflows/environment-corrupt-check.yaml b/.github/workflows/environment-corrupt-check.yaml new file mode 100644 index 0000000..19af0dc --- /dev/null +++ b/.github/workflows/environment-corrupt-check.yaml @@ -0,0 +1,33 @@ +name: Environment Corruption Check +on: + push: + branches: ["main"] + paths: + - requirements.txt + pull_request: + branches: ["main"] + paths: + - requirements.txt +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }} + cancel-in-progress: true +jobs: + test-python-versions: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.11.4", "3.12.8", "3.13.1"] + fail-fast: false + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Upgrade pip + run: | + python -m pip install --upgrade pip + - name: Install dependencies + run: | + pip install -r requirements.txt From 4414f05cd5229c2a9946d72c1c2d1e500e1af679 Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Mon, 17 Mar 2025 00:50:17 +0800 Subject: [PATCH 49/77] fix(pr-autodiff): remove unnecessary try-except block --- .github/workflows/pr-autodiff.yaml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/.github/workflows/pr-autodiff.yaml b/.github/workflows/pr-autodiff.yaml index 5e1d785..7f5b5cc 100644 --- a/.github/workflows/pr-autodiff.yaml +++ b/.github/workflows/pr-autodiff.yaml @@ -119,13 +119,9 @@ jobs: print("No meaningful diff detected.") exit(0) - try: - comment = generate_comment(diff_content) - post_comment(comment) - print("Comment posted successfully.") - except Exception as e: - print(f"Failed to process: {str(e)}") - exit(1) + comment = generate_comment(diff_content) + post_comment(comment) + print("Comment posted successfully.") EOF python /tmp/_workflow_core.py From 9d693409dcf2d405f38d5ee619e324101e27ba0c Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Mon, 17 Mar 2025 00:52:18 +0800 Subject: [PATCH 50/77] chore: update Python version to latest ones --- .github/workflows/environment-corrupt-check.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/environment-corrupt-check.yaml b/.github/workflows/environment-corrupt-check.yaml index 19af0dc..ba53fcc 100644 --- a/.github/workflows/environment-corrupt-check.yaml +++ b/.github/workflows/environment-corrupt-check.yaml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.11.4", "3.12.8", "3.13.1"] + python-version: ["3.11.11", "3.12.8", "3.13.2"] fail-fast: false steps: - name: Checkout repository From daafb2c9788f828f1348ee77c4d01509f5ecc528 Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Mon, 17 Mar 2025 01:31:43 +0800 Subject: [PATCH 51/77] refactor(workflow): disable pull request triggers in favor of issue comments --- .github/workflows/pr-autodiff.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pr-autodiff.yaml b/.github/workflows/pr-autodiff.yaml index 7f5b5cc..33ce307 100644 --- a/.github/workflows/pr-autodiff.yaml +++ b/.github/workflows/pr-autodiff.yaml @@ -1,8 +1,8 @@ name: PR Diff Summarization on: - pull_request: - branches: [main] - types: [opened, ready_for_review, reopened] + # pull_request: + # branches: [main] + # types: [opened, ready_for_review, reopened] issue_comment: types: [created] permissions: From c076ec0f0ce9aaafa06ed4d9996798b1d1025ae6 Mon Sep 17 00:00:00 2001 From: tboy1337 <30571311+tboy1337@users.noreply.github.com> Date: Sun, 16 Mar 2025 17:41:50 +0000 Subject: [PATCH 52/77] Update environment-corrupt-check.yaml --- .github/workflows/environment-corrupt-check.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/environment-corrupt-check.yaml b/.github/workflows/environment-corrupt-check.yaml index ba53fcc..dc66fe0 100644 --- a/.github/workflows/environment-corrupt-check.yaml +++ b/.github/workflows/environment-corrupt-check.yaml @@ -20,9 +20,9 @@ jobs: fail-fast: false steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Upgrade pip From a107cb2f6ccedaa5f13b908b37b8122571081aca Mon Sep 17 00:00:00 2001 From: tboy1337 <30571311+tboy1337@users.noreply.github.com> Date: Sun, 16 Mar 2025 17:47:19 +0000 Subject: [PATCH 53/77] Update pr-autodiff.yaml --- .github/workflows/pr-autodiff.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-autodiff.yaml b/.github/workflows/pr-autodiff.yaml index 33ce307..ed218dc 100644 --- a/.github/workflows/pr-autodiff.yaml +++ b/.github/workflows/pr-autodiff.yaml @@ -36,7 +36,7 @@ jobs: ref: ${{ steps.get-pr-sha.outputs.pr_sha }} fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.11' - name: Install dependencies From 395d5a3add551ebd6ad6c54517909e842b288693 Mon Sep 17 00:00:00 2001 From: gantnocap Date: Mon, 17 Mar 2025 09:59:19 +0800 Subject: [PATCH 54/77] reformat --- openmanus_server/openmanus_server.py | 1 - 1 file changed, 1 deletion(-) diff --git a/openmanus_server/openmanus_server.py b/openmanus_server/openmanus_server.py index 50b4351..f0f6aee 100644 --- a/openmanus_server/openmanus_server.py +++ b/openmanus_server/openmanus_server.py @@ -4,7 +4,6 @@ import json import logging import os import sys -from typing import Optional from mcp.server.fastmcp import FastMCP From cc1abe630c9147fd7eccaff706c00e9b70bce42b Mon Sep 17 00:00:00 2001 From: Yizhou Chi Date: Mon, 17 Mar 2025 15:22:50 +0800 Subject: [PATCH 55/77] fix click --- app/tool/browser_use_tool.py | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/app/tool/browser_use_tool.py b/app/tool/browser_use_tool.py index ad0cfa1..b7b9899 100644 --- a/app/tool/browser_use_tool.py +++ b/app/tool/browser_use_tool.py @@ -45,6 +45,7 @@ class BrowserUseTool(BaseTool): "enum": [ "navigate", "click", + "get_current_state", "input_text", "screenshot", "get_html", @@ -64,7 +65,7 @@ class BrowserUseTool(BaseTool): }, "index": { "type": "integer", - "description": "Element index for 'click' or 'input_text' actions", + "description": "Element index (retrieved using get_current_state) for 'click' or 'input_text' actions", }, "text": {"type": "string", "description": "Text for 'input_text' action"}, "script": { @@ -200,6 +201,9 @@ class BrowserUseTool(BaseTool): if download_path: output += f" - Downloaded file to {download_path}" return ToolResult(output=output) + + elif action == "get_current_state": + return await self.get_current_state(context) elif action == "input_text": if index is None or not text: @@ -287,21 +291,20 @@ class BrowserUseTool(BaseTool): except Exception as e: return ToolResult(error=f"Browser action '{action}' failed: {str(e)}") - async def get_current_state(self) -> ToolResult: + async def get_current_state(self, context: BrowserContext) -> ToolResult: """Get the current browser state as a ToolResult.""" - async with self.lock: - try: - context = await self._ensure_browser_initialized() - state = await context.get_state() - state_info = { - "url": state.url, - "title": state.title, - "tabs": [tab.model_dump() for tab in state.tabs], - "interactive_elements": state.element_tree.clickable_elements_to_string(), - } - return ToolResult(output=json.dumps(state_info)) - except Exception as e: - return ToolResult(error=f"Failed to get browser state: {str(e)}") + try: + state = await context.get_state() + state_info = { + "url": state.url, + "title": state.title, + "tabs": [tab.model_dump() for tab in state.tabs], + "help": "The numbers in brackets ([0], [1], [2], etc.) represent clickable indices corresponding to the elements listed.", + "interactive_elements": state.element_tree.clickable_elements_to_string(), + } + return ToolResult(output=json.dumps(state_info, indent=4, ensure_ascii=False)) + except Exception as e: + return ToolResult(error=f"Failed to get browser state: {str(e)}") async def cleanup(self): """Clean up browser resources.""" From 9bc267cef3870c1d5214b5a4cade9f5db3c2a974 Mon Sep 17 00:00:00 2001 From: Cyzus Date: Mon, 17 Mar 2025 15:44:50 +0800 Subject: [PATCH 56/77] refine help text --- app/tool/browser_use_tool.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/app/tool/browser_use_tool.py b/app/tool/browser_use_tool.py index b7b9899..385a95b 100644 --- a/app/tool/browser_use_tool.py +++ b/app/tool/browser_use_tool.py @@ -201,7 +201,7 @@ class BrowserUseTool(BaseTool): if download_path: output += f" - Downloaded file to {download_path}" return ToolResult(output=output) - + elif action == "get_current_state": return await self.get_current_state(context) @@ -299,10 +299,12 @@ class BrowserUseTool(BaseTool): "url": state.url, "title": state.title, "tabs": [tab.model_dump() for tab in state.tabs], - "help": "The numbers in brackets ([0], [1], [2], etc.) represent clickable indices corresponding to the elements listed.", + "help": "[0], [1], [2], etc., represent clickable indices corresponding to the elements listed. Clicking on these indices will navigate to or interact with the respective content behind them.", "interactive_elements": state.element_tree.clickable_elements_to_string(), } - return ToolResult(output=json.dumps(state_info, indent=4, ensure_ascii=False)) + return ToolResult( + output=json.dumps(state_info, indent=4, ensure_ascii=False) + ) except Exception as e: return ToolResult(error=f"Failed to get browser state: {str(e)}") From 4af5ed34ab884f4b29dc612ab8fb0f72ea3683fc Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Mon, 17 Mar 2025 19:31:48 +0800 Subject: [PATCH 57/77] ci(top-issues): reduce number of top issues --- .github/workflows/top-issues.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/top-issues.yaml b/.github/workflows/top-issues.yaml index 47b6bf0..85ad0d8 100644 --- a/.github/workflows/top-issues.yaml +++ b/.github/workflows/top-issues.yaml @@ -24,4 +24,4 @@ jobs: dashboard_show_total_reactions: true top_issues: true top_pull_requests: true - top_list_size: 32 + top_list_size: 14 From 3fa14d00669ac47f45d6b11b318e0aebb1c4fa20 Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Mon, 17 Mar 2025 20:10:50 +0800 Subject: [PATCH 58/77] chore(app.__init__): add Python version check for 3.11-3.13 --- app/__init__.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/app/__init__.py b/app/__init__.py index e69de29..0749c6d 100644 --- a/app/__init__.py +++ b/app/__init__.py @@ -0,0 +1,10 @@ +# Python version check: 3.11-3.13 +import sys + + +if sys.version_info < (3, 11) or sys.version_info > (3, 13): + print( + "Warning: Unsupported Python version {ver}, please use 3.11-3.13".format( + ver=".".join(map(str, sys.version_info)) + ) + ) From 50ab26880ee1fbb581a79c530964572ca8c6a298 Mon Sep 17 00:00:00 2001 From: xiangjinyu <1376193973@qq.com> Date: Mon, 17 Mar 2025 20:18:10 +0800 Subject: [PATCH 59/77] add get_current_state description --- app/tool/browser_use_tool.py | 1 + 1 file changed, 1 insertion(+) diff --git a/app/tool/browser_use_tool.py b/app/tool/browser_use_tool.py index 385a95b..fb47ed8 100644 --- a/app/tool/browser_use_tool.py +++ b/app/tool/browser_use_tool.py @@ -31,6 +31,7 @@ content extraction, and tab management. Supported actions include: - 'new_tab': Open a new tab - 'close_tab': Close the current tab - 'refresh': Refresh the current page +- 'get_current_state': Get the current browser state including URL, title, tabs, and interactive elements """ From fb0d1c02a687e894d6077a4dd3d5c06db2a3fc70 Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Mon, 17 Mar 2025 21:30:04 +0800 Subject: [PATCH 60/77] add TokenCounter and ask_with_images --- app/llm.py | 326 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 286 insertions(+), 40 deletions(-) diff --git a/app/llm.py b/app/llm.py index 18a13af..e354125 100644 --- a/app/llm.py +++ b/app/llm.py @@ -1,3 +1,4 @@ +import math from typing import Dict, List, Optional, Union import tiktoken @@ -31,6 +32,142 @@ from app.schema import ( REASONING_MODELS = ["o1", "o3-mini"] +class TokenCounter: + # Token constants + BASE_MESSAGE_TOKENS = 4 + FORMAT_TOKENS = 2 + LOW_DETAIL_IMAGE_TOKENS = 85 + HIGH_DETAIL_TILE_TOKENS = 170 + + # Image processing constants + MAX_SIZE = 2048 + HIGH_DETAIL_TARGET_SHORT_SIDE = 768 + TILE_SIZE = 512 + + def __init__(self, tokenizer): + self.tokenizer = tokenizer + + def count_text(self, text: str) -> int: + """Calculate tokens for a text string""" + return 0 if not text else len(self.tokenizer.encode(text)) + + def count_image(self, image_item: dict) -> int: + """ + Calculate tokens for an image based on detail level and dimensions + + For "low" detail: fixed 85 tokens + For "high" detail: + 1. Scale to fit in 2048x2048 square + 2. Scale shortest side to 768px + 3. Count 512px tiles (170 tokens each) + 4. Add 85 tokens + """ + detail = image_item.get("detail", "medium") + + # For low detail, always return fixed token count + if detail == "low": + return self.LOW_DETAIL_IMAGE_TOKENS + + # For medium detail (default in OpenAI), use high detail calculation + # OpenAI doesn't specify a separate calculation for medium + + # For high detail, calculate based on dimensions if available + if detail == "high" or detail == "medium": + # If dimensions are provided in the image_item + if "dimensions" in image_item: + width, height = image_item["dimensions"] + return self._calculate_high_detail_tokens(width, height) + + # Default values when dimensions aren't available or detail level is unknown + if detail == "high": + # Default to a 1024x1024 image calculation for high detail + return self._calculate_high_detail_tokens(1024, 1024) # 765 tokens + elif detail == "medium": + # Default to a medium-sized image for medium detail + return 1024 # This matches the original default + else: + # For unknown detail levels, use medium as default + return 1024 + + def _calculate_high_detail_tokens(self, width: int, height: int) -> int: + """Calculate tokens for high detail images based on dimensions""" + # Step 1: Scale to fit in MAX_SIZE x MAX_SIZE square + if width > self.MAX_SIZE or height > self.MAX_SIZE: + scale = self.MAX_SIZE / max(width, height) + width = int(width * scale) + height = int(height * scale) + + # Step 2: Scale so shortest side is HIGH_DETAIL_TARGET_SHORT_SIDE + scale = self.HIGH_DETAIL_TARGET_SHORT_SIDE / min(width, height) + scaled_width = int(width * scale) + scaled_height = int(height * scale) + + # Step 3: Count number of 512px tiles + tiles_x = math.ceil(scaled_width / self.TILE_SIZE) + tiles_y = math.ceil(scaled_height / self.TILE_SIZE) + total_tiles = tiles_x * tiles_y + + # Step 4: Calculate final token count + return ( + total_tiles * self.HIGH_DETAIL_TILE_TOKENS + ) + self.LOW_DETAIL_IMAGE_TOKENS + + def count_content(self, content: Union[str, List[Union[str, dict]]]) -> int: + """Calculate tokens for message content""" + if not content: + return 0 + + if isinstance(content, str): + return self.count_text(content) + + token_count = 0 + for item in content: + if isinstance(item, str): + token_count += self.count_text(item) + elif isinstance(item, dict): + if "text" in item: + token_count += self.count_text(item["text"]) + elif "image_url" in item: + token_count += self.count_image(item) + return token_count + + def count_tool_calls(self, tool_calls: List[dict]) -> int: + """Calculate tokens for tool calls""" + token_count = 0 + for tool_call in tool_calls: + if "function" in tool_call: + function = tool_call["function"] + token_count += self.count_text(function.get("name", "")) + token_count += self.count_text(function.get("arguments", "")) + return token_count + + def count_message_tokens(self, messages: List[dict]) -> int: + """Calculate the total number of tokens in a message list""" + total_tokens = self.FORMAT_TOKENS # Base format tokens + + for message in messages: + tokens = self.BASE_MESSAGE_TOKENS # Base tokens per message + + # Add role tokens + tokens += self.count_text(message.get("role", "")) + + # Add content tokens + if "content" in message: + tokens += self.count_content(message["content"]) + + # Add tool calls tokens + if "tool_calls" in message: + tokens += self.count_tool_calls(message["tool_calls"]) + + # Add name and tool_call_id tokens + tokens += self.count_text(message.get("name", "")) + tokens += self.count_text(message.get("tool_call_id", "")) + + total_tokens += tokens + + return total_tokens + + class LLM: _instances: Dict[str, "LLM"] = {} @@ -81,6 +218,8 @@ class LLM: else: self.client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url) + self.token_counter = TokenCounter(self.tokenizer) + def count_tokens(self, text: str) -> int: """Calculate the number of tokens in a text""" if not text: @@ -88,46 +227,7 @@ class LLM: return len(self.tokenizer.encode(text)) def count_message_tokens(self, messages: List[dict]) -> int: - """Calculate the number of tokens in a message list""" - token_count = 0 - for message in messages: - # Base token count for each message (according to OpenAI's calculation method) - token_count += 4 # Base token count for each message - - # Calculate tokens for the role - if "role" in message: - token_count += self.count_tokens(message["role"]) - - # Calculate tokens for the content - if "content" in message and message["content"]: - token_count += self.count_tokens(message["content"]) - - # Calculate tokens for tool calls - if "tool_calls" in message and message["tool_calls"]: - for tool_call in message["tool_calls"]: - if "function" in tool_call: - # Function name - if "name" in tool_call["function"]: - token_count += self.count_tokens( - tool_call["function"]["name"] - ) - # Function arguments - if "arguments" in tool_call["function"]: - token_count += self.count_tokens( - tool_call["function"]["arguments"] - ) - - # Calculate tokens for tool responses - if "name" in message and message["name"]: - token_count += self.count_tokens(message["name"]) - - if "tool_call_id" in message and message["tool_call_id"]: - token_count += self.count_tokens(message["tool_call_id"]) - - # Add extra tokens for message format - token_count += 2 # Extra tokens for message format - - return token_count + return self.token_counter.count_message_tokens(messages) def update_token_count(self, input_tokens: int) -> None: """Update token counts""" @@ -313,6 +413,152 @@ class LLM: logger.error(f"Unexpected error in ask: {e}") raise + @retry( + wait=wait_random_exponential(min=1, max=60), + stop=stop_after_attempt(6), + retry=retry_if_exception_type( + (OpenAIError, Exception, ValueError) + ), # Don't retry TokenLimitExceeded + ) + async def ask_with_images( + self, + messages: List[Union[dict, Message]], + images: List[Union[str, dict]], + system_msgs: Optional[List[Union[dict, Message]]] = None, + stream: bool = False, + temperature: Optional[float] = None, + ) -> str: + """ + Send a prompt with images to the LLM and get the response. + + Args: + messages: List of conversation messages + images: List of image URLs or image data dictionaries + system_msgs: Optional system messages to prepend + stream (bool): Whether to stream the response + temperature (float): Sampling temperature for the response + + Returns: + str: The generated response + + Raises: + TokenLimitExceeded: If token limits are exceeded + ValueError: If messages are invalid or response is empty + OpenAIError: If API call fails after retries + Exception: For unexpected errors + """ + try: + # Format messages + formatted_messages = self.format_messages(messages) + + # Ensure the last message is from the user to attach images + if not formatted_messages or formatted_messages[-1]["role"] != "user": + raise ValueError( + "The last message must be from the user to attach images" + ) + + # Process the last user message to include images + last_message = formatted_messages[-1] + + # Convert content to multimodal format if needed + content = last_message["content"] + multimodal_content = ( + [{"type": "text", "text": content}] + if isinstance(content, str) + else content + if isinstance(content, list) + else [] + ) + + # Add images to content + for image in images: + if isinstance(image, str): + multimodal_content.append( + {"type": "image_url", "image_url": {"url": image}} + ) + elif isinstance(image, dict) and "url" in image: + multimodal_content.append({"type": "image_url", "image_url": image}) + elif isinstance(image, dict) and "image_url" in image: + multimodal_content.append(image) + else: + raise ValueError(f"Unsupported image format: {image}") + + # Update the message with multimodal content + last_message["content"] = multimodal_content + + # Add system messages if provided + if system_msgs: + all_messages = self.format_messages(system_msgs) + formatted_messages + else: + all_messages = formatted_messages + + # Calculate tokens and check limits + input_tokens = self.count_message_tokens(all_messages) + if not self.check_token_limit(input_tokens): + raise TokenLimitExceeded(self.get_limit_error_message(input_tokens)) + + # Set up API parameters + params = { + "model": self.model, + "messages": all_messages, + "stream": stream, + } + + # Add model-specific parameters + if self.model in REASONING_MODELS: + params["max_completion_tokens"] = self.max_tokens + else: + params["max_tokens"] = self.max_tokens + params["temperature"] = ( + temperature if temperature is not None else self.temperature + ) + + # Handle non-streaming request + if not stream: + response = await self.client.chat.completions.create(**params) + + if not response.choices or not response.choices[0].message.content: + raise ValueError("Empty or invalid response from LLM") + + self.update_token_count(response.usage.prompt_tokens) + return response.choices[0].message.content + + # Handle streaming request + self.update_token_count(input_tokens) + response = await self.client.chat.completions.create(**params) + + collected_messages = [] + async for chunk in response: + chunk_message = chunk.choices[0].delta.content or "" + collected_messages.append(chunk_message) + print(chunk_message, end="", flush=True) + + print() # Newline after streaming + full_response = "".join(collected_messages).strip() + + if not full_response: + raise ValueError("Empty response from streaming LLM") + + return full_response + + except TokenLimitExceeded: + raise + except ValueError as ve: + logger.error(f"Validation error in ask_with_images: {ve}") + raise + except OpenAIError as oe: + logger.error(f"OpenAI API error: {oe}") + if isinstance(oe, AuthenticationError): + logger.error("Authentication failed. Check API key.") + elif isinstance(oe, RateLimitError): + logger.error("Rate limit exceeded. Consider increasing retry attempts.") + elif isinstance(oe, APIError): + logger.error(f"API error: {oe}") + raise + except Exception as e: + logger.error(f"Unexpected error in ask_with_images: {e}") + raise + @retry( wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6), From 6dcd2ca0648cfe20b703ea5c143a0ccec0ec9b48 Mon Sep 17 00:00:00 2001 From: zhiyuanRen <1131876818@qq.com> Date: Mon, 17 Mar 2025 21:36:04 +0800 Subject: [PATCH 61/77] fix: replace chinese comment with english version --- app/llm.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/app/llm.py b/app/llm.py index 47e18ab..966ec52 100644 --- a/app/llm.py +++ b/app/llm.py @@ -27,7 +27,6 @@ from app.schema import ( ToolChoice, ) - REASONING_MODELS = ["o1", "o3-mini"] @@ -300,7 +299,7 @@ class LLM: if not full_response: raise ValueError("Empty response from streaming LLM") - # 对于流式响应,估算completion tokens + # estimate completion tokens for streaming response completion_tokens = self.count_tokens(completion_text) logger.info( f"Estimated completion tokens for streaming response: {completion_tokens}" From 11d1bd77294dfa5ba31be527d7b14f7bb6322ff0 Mon Sep 17 00:00:00 2001 From: zhiyuanRen <1131876818@qq.com> Date: Mon, 17 Mar 2025 21:39:36 +0800 Subject: [PATCH 62/77] format change for precommit purpose --- app/llm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/app/llm.py b/app/llm.py index 966ec52..334103a 100644 --- a/app/llm.py +++ b/app/llm.py @@ -27,6 +27,7 @@ from app.schema import ( ToolChoice, ) + REASONING_MODELS = ["o1", "o3-mini"] From 9bdd8201052bd80b0914b1ff4e61d9bd3c497e77 Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Mon, 17 Mar 2025 23:07:04 +0800 Subject: [PATCH 63/77] update BrowserUseTool --- app/config.py | 3 + app/tool/browser_use_tool.py | 396 +++++++++++++++++++++++++++-------- 2 files changed, 317 insertions(+), 82 deletions(-) diff --git a/app/config.py b/app/config.py index 51356a0..9ab1448 100644 --- a/app/config.py +++ b/app/config.py @@ -59,6 +59,9 @@ class BrowserSettings(BaseModel): proxy: Optional[ProxySettings] = Field( None, description="Proxy settings for the browser" ) + max_content_length: int = Field( + 2000, description="Maximum length for content retrieval operations" + ) class AppConfig(BaseModel): diff --git a/app/tool/browser_use_tool.py b/app/tool/browser_use_tool.py index fb47ed8..468d054 100644 --- a/app/tool/browser_use_tool.py +++ b/app/tool/browser_use_tool.py @@ -1,6 +1,6 @@ import asyncio import json -from typing import Optional +from typing import Generic, Optional, TypeVar from browser_use import Browser as BrowserUseBrowser from browser_use import BrowserConfig @@ -11,31 +11,54 @@ from pydantic_core.core_schema import ValidationInfo from app.config import config from app.tool.base import BaseTool, ToolResult +from app.tool.web_search import WebSearch -MAX_LENGTH = 2000 - _BROWSER_DESCRIPTION = """ -Interact with a web browser to perform various actions such as navigation, element interaction, -content extraction, and tab management. Supported actions include: +Interact with a web browser to perform various actions such as navigation, element interaction, content extraction, and tab management. This tool provides a comprehensive set of browser automation capabilities: + +Navigation: - 'navigate': Go to a specific URL -- 'click': Click an element by index -- 'input_text': Input text into an element -- 'screenshot': Capture a screenshot -- 'get_html': Get page HTML content -- 'get_text': Get text content of the page -- 'read_links': Get all links on the page -- 'execute_js': Execute JavaScript code -- 'scroll': Scroll the page -- 'switch_tab': Switch to a specific tab -- 'new_tab': Open a new tab -- 'close_tab': Close the current tab +- 'go_back': Navigate back in browser history - 'refresh': Refresh the current page -- 'get_current_state': Get the current browser state including URL, title, tabs, and interactive elements +- 'web_search': Search the web with a specific query + +Element Interaction: +- 'click_element': Click an element by index +- 'input_text': Input text into a form element +- 'scroll_down'/'scroll_up': Scroll the page (with optional pixel amount) +- 'scroll_to_text': Scroll to specific text on the page +- 'send_keys': Send keyboard shortcuts or special keys +- 'get_dropdown_options': Get all options from a dropdown +- 'select_dropdown_option': Select an option from a dropdown by text + +Content Extraction: +- 'get_current_state': Get detailed browser state including URL, title, tabs, and interactive elements +- 'get_html': Get page HTML content +- 'get_text': Get text content of the page (supports start_index and end_index parameters) +- 'read_links': Get all links on the page +- 'extract_content': Extract specific information from the page using AI +- 'screenshot': Capture a screenshot + +Tab Management: +- 'switch_tab': Switch to a specific tab +- 'open_tab': Open a new tab with a URL +- 'close_tab': Close the current tab + +Utility: +- 'wait': Wait for a specified number of seconds +- 'execute_js': Execute JavaScript code on the page + +Task Completion: +- 'done': Complete the task and return results + +Each action requires specific parameters. Use get_current_state first to understand the current browser context. """ +Context = TypeVar("Context") -class BrowserUseTool(BaseTool): + +class BrowserUseTool(BaseTool, Generic[Context]): name: str = "browser_use" description: str = _BROWSER_DESCRIPTION parameters: dict = { @@ -45,18 +68,24 @@ class BrowserUseTool(BaseTool): "type": "string", "enum": [ "navigate", - "click", + "click_element", "get_current_state", "input_text", "screenshot", "get_html", "get_text", + "read_links", "execute_js", - "scroll", - "switch_tab", - "new_tab", - "close_tab", - "refresh", + "scroll_down", + "scroll_up", + "scroll_to_text", + "send_keys", + "get_dropdown_options", + "select_dropdown_option", + "go_back", + "web_search", + "wait", + "done", ], "description": "The browser action to perform", }, @@ -66,7 +95,7 @@ class BrowserUseTool(BaseTool): }, "index": { "type": "integer", - "description": "Element index (retrieved using get_current_state) for 'click' or 'input_text' actions", + "description": "Element index (retrieved using get_current_state) for 'click_element' or 'input_text' actions", }, "text": {"type": "string", "description": "Text for 'input_text' action"}, "script": { @@ -75,22 +104,59 @@ class BrowserUseTool(BaseTool): }, "scroll_amount": { "type": "integer", - "description": "Pixels to scroll (positive for down, negative for up) for 'scroll' action", + "description": "Pixels to scroll (positive for down, negative for up) for 'scroll_down' or 'scroll_up' actions", }, "tab_id": { "type": "integer", "description": "Tab ID for 'switch_tab' action", }, + "start_index": { + "type": "integer", + "description": "Starting character index for text observation (for 'scroll_to_text' and 'get_text' actions)", + }, + "end_index": { + "type": "integer", + "description": "Ending character index for text observation (for 'scroll_to_text' and 'get_text' actions)", + }, + "query": { + "type": "string", + "description": "Search query for 'web_search' action", + }, + "goal": { + "type": "string", + "description": "Extraction goal for 'extract_content' action", + }, + "success": { + "type": "boolean", + "description": "Success status for 'done' action", + }, + "keys": { + "type": "string", + "description": "Keys to send for 'send_keys' action", + }, + "seconds": { + "type": "integer", + "description": "Seconds to wait for 'wait' action", + }, }, "required": ["action"], "dependencies": { "navigate": ["url"], - "click": ["index"], + "click_element": ["index"], "input_text": ["index", "text"], "execute_js": ["script"], "switch_tab": ["tab_id"], "new_tab": ["url"], - "scroll": ["scroll_amount"], + "scroll_down": ["scroll_amount"], + "scroll_up": ["scroll_amount"], + "scroll_to_text": ["text"], + "send_keys": ["keys"], + "get_dropdown_options": ["index"], + "select_dropdown_option": ["index", "text"], + "go_back": [], + "web_search": ["query"], + "wait": ["seconds"], + "done": ["text"], }, } @@ -98,6 +164,10 @@ class BrowserUseTool(BaseTool): browser: Optional[BrowserUseBrowser] = Field(default=None, exclude=True) context: Optional[BrowserContext] = Field(default=None, exclude=True) dom_service: Optional[DomService] = Field(default=None, exclude=True) + web_search_tool: WebSearch = Field(default_factory=WebSearch, exclude=True) + + # Context for generic functionality + tool_context: Optional[Context] = Field(default=None, exclude=True) @field_validator("parameters", mode="before") def validate_parameters(cls, v: dict, info: ValidationInfo) -> dict: @@ -163,6 +233,13 @@ class BrowserUseTool(BaseTool): script: Optional[str] = None, scroll_amount: Optional[int] = None, tab_id: Optional[int] = None, + start_index: Optional[int] = None, + end_index: Optional[int] = None, + query: Optional[str] = None, + goal: Optional[str] = None, + success: Optional[bool] = None, + keys: Optional[str] = None, + seconds: Optional[int] = None, **kwargs, ) -> ToolResult: """ @@ -172,10 +249,17 @@ class BrowserUseTool(BaseTool): action: The browser action to perform url: URL for navigation or new tab index: Element index for click or input actions - text: Text for input action + text: Text for input action or search query script: JavaScript code for execution scroll_amount: Pixels to scroll for scroll action tab_id: Tab ID for switch_tab action + start_index: Starting character index for text observation + end_index: Ending character index for text observation + query: Search query for Google search + goal: Extraction goal for content extraction + success: Success status for done action + keys: Keys to send for keyboard actions + seconds: Seconds to wait **kwargs: Additional arguments Returns: @@ -185,15 +269,52 @@ class BrowserUseTool(BaseTool): try: context = await self._ensure_browser_initialized() + # Get max content length from config + max_content_length = getattr( + config.browser_config, "max_content_length", 2000 + ) + + # Navigation actions if action == "navigate": if not url: return ToolResult(error="URL is required for 'navigate' action") await context.navigate_to(url) return ToolResult(output=f"Navigated to {url}") - elif action == "click": + elif action == "go_back": + await context.go_back() + return ToolResult(output="Navigated back") + + elif action == "refresh": + await context.refresh_page() + return ToolResult(output="Refreshed current page") + + elif action == "web_search": + if not query: + return ToolResult( + error="Query is required for 'web_search' action" + ) + search_results = await self.web_search_tool.execute(query) + + if search_results: + # Navigate to the first search result + first_result = search_results[0] + await context.navigate_to(first_result) + return ToolResult( + output=f"Searched for '{query}' and navigated to first result: {first_result}\nAll results:" + + "\n".join(search_results) + ) + else: + return ToolResult( + error=f"No search results found for '{query}'" + ) + + # Element interaction actions + elif action == "click_element": if index is None: - return ToolResult(error="Index is required for 'click' action") + return ToolResult( + error="Index is required for 'click_element' action" + ) element = await context.get_dom_element_by_index(index) if not element: return ToolResult(error=f"Element with index {index} not found") @@ -203,9 +324,6 @@ class BrowserUseTool(BaseTool): output += f" - Downloaded file to {download_path}" return ToolResult(output=output) - elif action == "get_current_state": - return await self.get_current_state(context) - elif action == "input_text": if index is None or not text: return ToolResult( @@ -219,6 +337,126 @@ class BrowserUseTool(BaseTool): output=f"Input '{text}' into element at index {index}" ) + elif action == "scroll_down" or action == "scroll_up": + direction = 1 if action == "scroll_down" else -1 + amount = ( + scroll_amount + if scroll_amount is not None + else context.config.browser_window_size["height"] + ) + await context.execute_javascript( + f"window.scrollBy(0, {direction * amount});" + ) + return ToolResult( + output=f"Scrolled {'down' if direction > 0 else 'up'} by {amount} pixels" + ) + + elif action == "scroll_to_text": + if not text: + return ToolResult( + error="Text is required for 'scroll_to_text' action" + ) + page = await context.get_current_page() + try: + locator = page.get_by_text(text, exact=False) + await locator.scroll_into_view_if_needed() + return ToolResult(output=f"Scrolled to text: '{text}'") + except Exception as e: + return ToolResult(error=f"Failed to scroll to text: {str(e)}") + + elif action == "send_keys": + if not keys: + return ToolResult( + error="Keys are required for 'send_keys' action" + ) + page = await context.get_current_page() + await page.keyboard.press(keys) + return ToolResult(output=f"Sent keys: {keys}") + + elif action == "get_dropdown_options": + if index is None: + return ToolResult( + error="Index is required for 'get_dropdown_options' action" + ) + element = await context.get_dom_element_by_index(index) + if not element: + return ToolResult(error=f"Element with index {index} not found") + page = await context.get_current_page() + options = await page.evaluate( + """ + (xpath) => { + const select = document.evaluate(xpath, document, null, + XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue; + if (!select) return null; + return Array.from(select.options).map(opt => ({ + text: opt.text, + value: opt.value, + index: opt.index + })); + } + """, + element.xpath, + ) + return ToolResult(output=f"Dropdown options: {options}") + + elif action == "select_dropdown_option": + if index is None or not text: + return ToolResult( + error="Index and text are required for 'select_dropdown_option' action" + ) + element = await context.get_dom_element_by_index(index) + if not element: + return ToolResult(error=f"Element with index {index} not found") + page = await context.get_current_page() + await page.select_option(element.xpath, label=text) + return ToolResult( + output=f"Selected option '{text}' from dropdown at index {index}" + ) + + # Content extraction actions + elif action == "get_current_state": + return await self.get_current_state(context) + + elif action == "get_html": + html = await context.get_page_html() + truncated = ( + html[:max_content_length] + "..." + if len(html) > max_content_length + else html + ) + return ToolResult(output=truncated) + + elif action == "get_text": + start = start_index if start_index is not None else 0 + end = end_index if end_index is not None else max_content_length + text = await context.execute_javascript( + f"document.body.innerText.substring({start}, {end})" + ) + full_length = await context.execute_javascript( + "document.body.innerText.length" + ) + result = f"Text from index {start} to {end}:\n{text}" + if end < full_length: + result += f"\n\n[Text continues... {full_length - end} more characters available]" + if start > 0: + result += f"\n[{start} characters before this point]" + return ToolResult(output=result) + + elif action == "read_links": + links = await context.execute_javascript( + "Array.from(document.querySelectorAll('a[href]')).map(elem => elem.innerText && elem.href ? `${elem.innerText.trim()} - ${elem.href}` : null).filter(Boolean).join('\\n')" + ) + return ToolResult(output=links) + + elif action == "extract_content": + if not goal: + return ToolResult( + error="Goal is required for 'extract_content' action" + ) + await context.get_page_html() + # Note: In a real implementation, this would use an LLM to extract content + return ToolResult(output=f"Extracted content for goal: {goal}") + elif action == "screenshot": screenshot = await context.take_screenshot(full_page=True) return ToolResult( @@ -226,22 +464,30 @@ class BrowserUseTool(BaseTool): system=screenshot, ) - elif action == "get_html": - html = await context.get_page_html() - truncated = ( - html[:MAX_LENGTH] + "..." if len(html) > MAX_LENGTH else html - ) - return ToolResult(output=truncated) + # Tab management actions + elif action == "switch_tab": + if tab_id is None: + return ToolResult( + error="Tab ID is required for 'switch_tab' action" + ) + await context.switch_to_tab(tab_id) + return ToolResult(output=f"Switched to tab {tab_id}") - elif action == "get_text": - text = await context.execute_javascript("document.body.innerText") - return ToolResult(output=text) + elif action == "open_tab": + if not url: + return ToolResult(error="URL is required for 'open_tab' action") + await context.create_new_tab(url) + return ToolResult(output=f"Opened new tab with URL {url}") - elif action == "read_links": - links = await context.execute_javascript( - "document.querySelectorAll('a[href]').forEach((elem) => {if (elem.innerText) {console.log(elem.innerText, elem.href)}})" - ) - return ToolResult(output=links) + elif action == "close_tab": + await context.close_current_tab() + return ToolResult(output="Closed current tab") + + # Utility actions + elif action == "wait": + seconds_to_wait = seconds if seconds is not None else 3 + await asyncio.sleep(seconds_to_wait) + return ToolResult(output=f"Waited for {seconds_to_wait} seconds") elif action == "execute_js": if not script: @@ -251,40 +497,12 @@ class BrowserUseTool(BaseTool): result = await context.execute_javascript(script) return ToolResult(output=str(result)) - elif action == "scroll": - if scroll_amount is None: - return ToolResult( - error="Scroll amount is required for 'scroll' action" - ) - await context.execute_javascript( - f"window.scrollBy(0, {scroll_amount});" - ) - direction = "down" if scroll_amount > 0 else "up" - return ToolResult( - output=f"Scrolled {direction} by {abs(scroll_amount)} pixels" - ) - - elif action == "switch_tab": - if tab_id is None: - return ToolResult( - error="Tab ID is required for 'switch_tab' action" - ) - await context.switch_to_tab(tab_id) - return ToolResult(output=f"Switched to tab {tab_id}") - - elif action == "new_tab": - if not url: - return ToolResult(error="URL is required for 'new_tab' action") - await context.create_new_tab(url) - return ToolResult(output=f"Opened new tab with URL {url}") - - elif action == "close_tab": - await context.close_current_tab() - return ToolResult(output="Closed current tab") - - elif action == "refresh": - await context.refresh_page() - return ToolResult(output="Refreshed current page") + # Task completion + elif action == "done": + if not text: + return ToolResult(error="Text is required for 'done' action") + success_value = success if success is not None else True + return ToolResult(output=text, is_done=True, success=success_value) else: return ToolResult(error=f"Unknown action: {action}") @@ -302,6 +520,13 @@ class BrowserUseTool(BaseTool): "tabs": [tab.model_dump() for tab in state.tabs], "help": "[0], [1], [2], etc., represent clickable indices corresponding to the elements listed. Clicking on these indices will navigate to or interact with the respective content behind them.", "interactive_elements": state.element_tree.clickable_elements_to_string(), + "scroll_info": { + "pixels_above": state.pixels_above, + "pixels_below": state.pixels_below, + "total_height": state.pixels_above + + state.pixels_below + + (state.viewport_info.height if state.viewport_info else 0), + }, } return ToolResult( output=json.dumps(state_info, indent=4, ensure_ascii=False) @@ -329,3 +554,10 @@ class BrowserUseTool(BaseTool): loop = asyncio.new_event_loop() loop.run_until_complete(self.cleanup()) loop.close() + + @classmethod + def create_with_context(cls, context: Context) -> "BrowserUseTool[Context]": + """Factory method to create a BrowserUseTool with a specific context.""" + tool = cls() + tool.tool_context = context + return tool From 5cf34f82df63ef6c079a385b298d0662eb1acbc4 Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Mon, 17 Mar 2025 23:51:16 +0800 Subject: [PATCH 64/77] remove WebSearch tool for Manus --- app/agent/manus.py | 3 +-- app/prompt/manus.py | 4 +--- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/app/agent/manus.py b/app/agent/manus.py index 4638c37..df784ed 100644 --- a/app/agent/manus.py +++ b/app/agent/manus.py @@ -8,7 +8,6 @@ from app.tool import Terminate, ToolCollection from app.tool.browser_use_tool import BrowserUseTool from app.tool.file_saver import FileSaver from app.tool.python_execute import PythonExecute -from app.tool.web_search import WebSearch class Manus(ToolCallAgent): @@ -34,7 +33,7 @@ class Manus(ToolCallAgent): # Add general-purpose tools to the tool collection available_tools: ToolCollection = Field( default_factory=lambda: ToolCollection( - PythonExecute(), WebSearch(), BrowserUseTool(), FileSaver(), Terminate() + PythonExecute(), BrowserUseTool(), FileSaver(), Terminate() ) ) diff --git a/app/prompt/manus.py b/app/prompt/manus.py index 6dcca8a..0cb944d 100644 --- a/app/prompt/manus.py +++ b/app/prompt/manus.py @@ -6,9 +6,7 @@ PythonExecute: Execute Python code to interact with the computer system, data pr FileSaver: Save files locally, such as txt, py, html, etc. -BrowserUseTool: Open, browse, and use web browsers.If you open a local HTML file, you must provide the absolute path to the file. - -WebSearch: Perform web information retrieval +BrowserUseTool: Open, browse, and use web browsers. If you open a local HTML file, you must provide the absolute path to the file. Terminate: End the current interaction when the task is complete or when you need additional information from the user. Use this tool to signal that you've finished addressing the user's request or need clarification before proceeding further. From fc5e25343c8b469ef8518341c553fb9727d186e6 Mon Sep 17 00:00:00 2001 From: gantnocap Date: Tue, 18 Mar 2025 00:40:29 +0800 Subject: [PATCH 65/77] refactor mcp folder --- {openmanus_server => mcp}/README.md | 21 ++++---- {openmanus_server => mcp}/assets/1.jpg | Bin {openmanus_server => mcp}/assets/2.png | Bin .../assets/claude-desktop-mcp-hammer-icon.svg | 0 {openmanus_server => mcp}/assets/demo.mp4 | Bin .../client/client.py | 51 +++++++----------- .../mcp_requirements.txt | 0 .../server/server.py | 20 ++----- 8 files changed, 35 insertions(+), 57 deletions(-) rename {openmanus_server => mcp}/README.md (81%) rename {openmanus_server => mcp}/assets/1.jpg (100%) rename {openmanus_server => mcp}/assets/2.png (100%) rename {openmanus_server => mcp}/assets/claude-desktop-mcp-hammer-icon.svg (100%) rename {openmanus_server => mcp}/assets/demo.mp4 (100%) rename openmanus_server/openmanus_client.py => mcp/client/client.py (85%) rename {openmanus_server => mcp}/mcp_requirements.txt (100%) rename openmanus_server/openmanus_server.py => mcp/server/server.py (89%) diff --git a/openmanus_server/README.md b/mcp/README.md similarity index 81% rename from openmanus_server/README.md rename to mcp/README.md index bfd3339..c7892a3 100644 --- a/openmanus_server/README.md +++ b/mcp/README.md @@ -1,6 +1,6 @@ -# OpenManus-server 🤖 +# OpenManus-mcp 🤖 -This project provides a server based on [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) that exposes **OpenManus** tool functionalities as standardized APIs. +Implement a server based on [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) that exposes **OpenManus** tool functionalities as standardized APIs and create a simple client to interact with the server. ## ✨ Features @@ -42,7 +42,8 @@ uv pip install -r requirements.txt 3. Install MCP dependencies: ```bash -uv pip install -r openmanus_server/mcp_requirements.txt +uv pip install -r mcp/mcp_requirements.txt +playright install ``` ## Demo display @@ -50,7 +51,7 @@ https://github.com/user-attachments/assets/177b1f50-422f-4c2e-ab7d-1f3d7ff27679 ## 📖 Usage -### 1. Testing your server with Claude for Desktop 🖥️ +### 1. Testing the server with Claude for Desktop 🖥️ > ⚠️ **Note**: Claude for Desktop is not yet available on Linux. Linux users can build an MCP client that connects to the server we just built. @@ -75,9 +76,9 @@ In this case, we'll add our single Openmanus server like so: "command": "/ABSOLUTE/PATH/TO/PARENT/FOLDER/uv", "args": [ "--directory", - "/ABSOLUTE/PATH/TO/OpenManus/openmanus_server", + "/ABSOLUTE/PATH/TO/OpenManus/mcp/server", "run", - "openmanus_server.py" + "server.py" ] } } @@ -91,13 +92,13 @@ In this case, we'll add our single Openmanus server like so: #### Step 4: Understanding the Configuration 📝 This tells Claude for Desktop: 1. There's an MCP server named "openmanus" 🔌 -2. To launch it by running `uv --directory /ABSOLUTE/PATH/TO/OpenManus/openmanus_server run openmanus_server.py` 🚀 +2. To launch it by running `uv --directory /ABSOLUTE/PATH/TO/OpenManus/mcp/server run server.py` 🚀 #### Step 5: Activation 🔄 Save the file, and restart Claude for Desktop. #### Step 6: Verification ✨ -Let's make sure Claude for Desktop is picking up the six tools we've exposed in our `openmanus` server. You can do this by looking for the hammer icon ![hammer icon](./assets/claude-desktop-mcp-hammer-icon.svg) +Let's make sure Claude for Desktop is picking up the five tools we've exposed in our `openmanus` server. You can do this by looking for the hammer icon ![hammer icon](./assets/claude-desktop-mcp-hammer-icon.svg) ![tools_in_claude](./assets/1.jpg) After clicking on the hammer icon, you should see tools listed: @@ -111,12 +112,12 @@ After clicking on the hammer icon, you should see tools listed: ### 💻 2. Testing with simple Client Example -Check out `openmanus_client.py` to test the openmanus server using the MCP client. +Check out `client.py` to test the openmanus server using the MCP client. #### Demo display https://github.com/user-attachments/assets/aeacd93d-9bec-46d1-831b-20e898c7507b ``` -python openmanus_server/openmanus_client.py +python mcp/client/client.py ``` diff --git a/openmanus_server/assets/1.jpg b/mcp/assets/1.jpg similarity index 100% rename from openmanus_server/assets/1.jpg rename to mcp/assets/1.jpg diff --git a/openmanus_server/assets/2.png b/mcp/assets/2.png similarity index 100% rename from openmanus_server/assets/2.png rename to mcp/assets/2.png diff --git a/openmanus_server/assets/claude-desktop-mcp-hammer-icon.svg b/mcp/assets/claude-desktop-mcp-hammer-icon.svg similarity index 100% rename from openmanus_server/assets/claude-desktop-mcp-hammer-icon.svg rename to mcp/assets/claude-desktop-mcp-hammer-icon.svg diff --git a/openmanus_server/assets/demo.mp4 b/mcp/assets/demo.mp4 similarity index 100% rename from openmanus_server/assets/demo.mp4 rename to mcp/assets/demo.mp4 diff --git a/openmanus_server/openmanus_client.py b/mcp/client/client.py similarity index 85% rename from openmanus_server/openmanus_client.py rename to mcp/client/client.py index 0be1f47..76d19b5 100644 --- a/openmanus_server/openmanus_client.py +++ b/mcp/client/client.py @@ -1,66 +1,55 @@ -import ast import asyncio +import json import os import sys from contextlib import AsyncExitStack -from pathlib import Path from typing import Optional -import tomli from colorama import Fore, init -from dotenv import load_dotenv from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client from openai import AsyncOpenAI +# Add current directory to Python path +current_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.dirname(current_dir) +sys.path.insert(0, parent_dir) +sys.path.insert(0, current_dir) + +# Add root directory to Python path +root_dir = os.path.dirname(parent_dir) +sys.path.insert(0, root_dir) +from app.config import config # Initialize colorama def init_colorama(): init(autoreset=True) - -# Load config -def load_config(): - config_path = Path(__file__).parent.parent / "config" / "config.toml" - try: - with open(config_path, "rb") as f: - return tomli.load(f) - except FileNotFoundError: - print(f"Error: config.toml not found at {config_path}") - sys.exit(1) - except tomli.TOMLDecodeError as e: - print(f"Error: Invalid TOML in config.toml: {e}") - sys.exit(1) - - -# Load environment variables (as fallback) -load_dotenv() - - class OpenManusClient: def __init__(self): # Load configuration - self.config = load_config() + # self.config = load_config() # Initialize session and client objects self.session: Optional[ClientSession] = None self.exit_stack = AsyncExitStack() # Initialize AsyncOpenAI client with config - api_key = self.config["llm"]["api_key"] or os.getenv("OPENAI_API_KEY") + self.llm_config = config.llm["default"] + api_key = self.llm_config.api_key or os.getenv("OPENAI_API_KEY") if not api_key: raise ValueError( "OpenAI API key not found in config.toml or environment variables" ) self.openai_client = AsyncOpenAI( - api_key=api_key, base_url=self.config["llm"]["base_url"] + api_key=api_key, base_url=self.llm_config.base_url ) async def connect_to_server(self, server_script_path: str = None): """Connect to the openmanus MCP server""" # Use provided path or default from config - script_path = server_script_path or self.config["server"]["default_script"] + script_path = server_script_path server_params = StdioServerParameters( command="python", args=[script_path], env=None @@ -134,7 +123,7 @@ class OpenManusClient: ] # Initial LLM API call response = await self.openai_client.chat.completions.create( - model=self.config["llm"]["model"], + model=self.llm_config.model, messages=messages, tools=available_tools, tool_choice="auto", @@ -171,7 +160,7 @@ class OpenManusClient: # Convert tool_args from string to dictionary if necessary if isinstance(tool_args, str): try: - tool_args = ast.literal_eval(tool_args) + tool_args = json.loads(tool_args) except (ValueError, SyntaxError) as e: print(f"Error converting tool_args to dict: {e}") tool_args = {} @@ -197,7 +186,7 @@ class OpenManusClient: # Get next response from LLM response = await self.openai_client.chat.completions.create( - model=self.config["llm"]["model"], + model=self.llm_config.model, messages=messages, tools=available_tools, tool_choice="auto", @@ -210,7 +199,7 @@ async def main(): if len(sys.argv) > 1: server_script = sys.argv[1] else: - server_script = "./openmanus_server/openmanus_server.py" + server_script = "mcp/server/server.py" client = OpenManusClient() try: diff --git a/openmanus_server/mcp_requirements.txt b/mcp/mcp_requirements.txt similarity index 100% rename from openmanus_server/mcp_requirements.txt rename to mcp/mcp_requirements.txt diff --git a/openmanus_server/openmanus_server.py b/mcp/server/server.py similarity index 89% rename from openmanus_server/openmanus_server.py rename to mcp/server/server.py index f0f6aee..1107ef9 100644 --- a/openmanus_server/openmanus_server.py +++ b/mcp/server/server.py @@ -14,6 +14,10 @@ parent_dir = os.path.dirname(current_dir) sys.path.insert(0, parent_dir) sys.path.insert(0, current_dir) +# Add root directory to Python path +root_dir = os.path.dirname(parent_dir) +sys.path.insert(0, root_dir) + # Configure logging logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" @@ -23,7 +27,6 @@ logger = logging.getLogger("mcp-server") # Import OpenManus tools from app.tool.browser_use_tool import BrowserUseTool from app.tool.file_saver import FileSaver -from app.tool.google_search import GoogleSearch from app.tool.python_execute import PythonExecute from app.tool.terminate import Terminate @@ -33,7 +36,6 @@ openmanus = FastMCP("openmanus") # Initialize tool instances browser_tool = BrowserUseTool() -google_search_tool = GoogleSearch() python_execute_tool = PythonExecute() file_saver_tool = FileSaver() terminate_tool = Terminate() @@ -94,20 +96,6 @@ async def get_browser_state() -> str: return json.dumps(result.model_dump()) -# Google search tool -@openmanus.tool() -async def google_search(query: str, num_results: int = 10) -> str: - """Execute Google search and return list of relevant links. - - Args: - query: Search query - num_results: Number of results to return (default is 10) - """ - logger.info(f"Executing Google search: {query}") - results = await google_search_tool.execute(query=query, num_results=num_results) - return json.dumps(results) - - # Python execution tool @openmanus.tool() async def python_execute(code: str, timeout: int = 5) -> str: From f6b2250e95bcbfe2d6120220604e700a47eefb1a Mon Sep 17 00:00:00 2001 From: gantnocap Date: Tue, 18 Mar 2025 00:42:04 +0800 Subject: [PATCH 66/77] reformat --- mcp/client/client.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mcp/client/client.py b/mcp/client/client.py index 76d19b5..5a1ec8d 100644 --- a/mcp/client/client.py +++ b/mcp/client/client.py @@ -6,9 +6,11 @@ from contextlib import AsyncExitStack from typing import Optional from colorama import Fore, init +from openai import AsyncOpenAI + from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client -from openai import AsyncOpenAI + # Add current directory to Python path current_dir = os.path.dirname(os.path.abspath(__file__)) @@ -21,10 +23,12 @@ root_dir = os.path.dirname(parent_dir) sys.path.insert(0, root_dir) from app.config import config + # Initialize colorama def init_colorama(): init(autoreset=True) + class OpenManusClient: def __init__(self): # Load configuration From 91d14a3a473f9e419caef879d3cf267d2501fbd5 Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Tue, 18 Mar 2025 02:30:30 +0800 Subject: [PATCH 67/77] update llm, schema, BaseTool and BaseAgent --- app/agent/base.py | 8 ++++--- app/llm.py | 60 +++++++++++++++++++++++++++++++++++++---------- app/schema.py | 38 +++++++++++++++++++++++------- app/tool/base.py | 6 ++--- 4 files changed, 85 insertions(+), 27 deletions(-) diff --git a/app/agent/base.py b/app/agent/base.py index fa3db30..9ece688 100644 --- a/app/agent/base.py +++ b/app/agent/base.py @@ -84,6 +84,7 @@ class BaseAgent(BaseModel, ABC): self, role: ROLE_TYPE, # type: ignore content: str, + base64_image: Optional[str] = None, **kwargs, ) -> None: """Add a message to the agent's memory. @@ -91,6 +92,7 @@ class BaseAgent(BaseModel, ABC): Args: role: The role of the message sender (user, system, assistant, tool). content: The message content. + base64_image: Optional base64 encoded image. **kwargs: Additional arguments (e.g., tool_call_id for tool messages). Raises: @@ -106,9 +108,9 @@ class BaseAgent(BaseModel, ABC): if role not in message_map: raise ValueError(f"Unsupported message role: {role}") - msg_factory = message_map[role] - msg = msg_factory(content, **kwargs) if role == "tool" else msg_factory(content) - self.memory.add_message(msg) + # Create message with appropriate parameters based on role + kwargs = {"base64_image": base64_image, **(kwargs if role == "tool" else {})} + self.memory.add_message(message_map[role](content, **kwargs)) async def run(self, request: Optional[str] = None) -> str: """Execute the agent's main loop asynchronously. diff --git a/app/llm.py b/app/llm.py index e354125..eccedf1 100644 --- a/app/llm.py +++ b/app/llm.py @@ -280,22 +280,58 @@ class LLM: formatted_messages = [] for message in messages: + # Convert Message objects to dictionaries if isinstance(message, Message): message = message.to_dict() - if isinstance(message, dict): - # If message is a dict, ensure it has required fields - if "role" not in message: - raise ValueError("Message dict must contain 'role' field") - if "content" in message or "tool_calls" in message: - formatted_messages.append(message) - # else: do not include the message - else: + + if not isinstance(message, dict): raise TypeError(f"Unsupported message type: {type(message)}") - # Validate all messages have required fields - for msg in formatted_messages: - if msg["role"] not in ROLE_VALUES: - raise ValueError(f"Invalid role: {msg['role']}") + # Validate required fields + if "role" not in message: + raise ValueError("Message dict must contain 'role' field") + + # Process base64 images if present + if message.get("base64_image"): + # Initialize or convert content to appropriate format + if not message.get("content"): + message["content"] = [] + elif isinstance(message["content"], str): + message["content"] = [{"type": "text", "text": message["content"]}] + elif isinstance(message["content"], list): + # Convert string items to proper text objects + message["content"] = [ + ( + {"type": "text", "text": item} + if isinstance(item, str) + else item + ) + for item in message["content"] + ] + + # Add the image to content + message["content"].append( + { + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{message['base64_image']}" + }, + } + ) + + # Remove the base64_image field + del message["base64_image"] + + # Only include messages with content or tool_calls + if "content" in message or "tool_calls" in message: + formatted_messages.append(message) + + # Validate all roles + invalid_roles = [ + msg for msg in formatted_messages if msg["role"] not in ROLE_VALUES + ] + if invalid_roles: + raise ValueError(f"Invalid role: {invalid_roles[0]['role']}") return formatted_messages diff --git a/app/schema.py b/app/schema.py index fb89c3c..de18c4f 100644 --- a/app/schema.py +++ b/app/schema.py @@ -59,6 +59,7 @@ class Message(BaseModel): tool_calls: Optional[List[ToolCall]] = Field(default=None) name: Optional[str] = Field(default=None) tool_call_id: Optional[str] = Field(default=None) + base64_image: Optional[str] = Field(default=None) def __add__(self, other) -> List["Message"]: """支持 Message + list 或 Message + Message 的操作""" @@ -91,12 +92,16 @@ class Message(BaseModel): message["name"] = self.name if self.tool_call_id is not None: message["tool_call_id"] = self.tool_call_id + if self.base64_image is not None: + message["base64_image"] = self.base64_image return message @classmethod - def user_message(cls, content: str) -> "Message": + def user_message( + cls, content: str, base64_image: Optional[str] = None + ) -> "Message": """Create a user message""" - return cls(role=Role.USER, content=content) + return cls(role=Role.USER, content=content, base64_image=base64_image) @classmethod def system_message(cls, content: str) -> "Message": @@ -104,33 +109,50 @@ class Message(BaseModel): return cls(role=Role.SYSTEM, content=content) @classmethod - def assistant_message(cls, content: Optional[str] = None) -> "Message": + def assistant_message( + cls, content: Optional[str] = None, base64_image: Optional[str] = None + ) -> "Message": """Create an assistant message""" - return cls(role=Role.ASSISTANT, content=content) + return cls(role=Role.ASSISTANT, content=content, base64_image=base64_image) @classmethod - def tool_message(cls, content: str, name, tool_call_id: str) -> "Message": + def tool_message( + cls, content: str, name, tool_call_id: str, base64_image: Optional[str] = None + ) -> "Message": """Create a tool message""" return cls( - role=Role.TOOL, content=content, name=name, tool_call_id=tool_call_id + role=Role.TOOL, + content=content, + name=name, + tool_call_id=tool_call_id, + base64_image=base64_image, ) @classmethod def from_tool_calls( - cls, tool_calls: List[Any], content: Union[str, List[str]] = "", **kwargs + cls, + tool_calls: List[Any], + content: Union[str, List[str]] = "", + base64_image: Optional[str] = None, + **kwargs, ): """Create ToolCallsMessage from raw tool calls. Args: tool_calls: Raw tool calls from LLM content: Optional message content + base64_image: Optional base64 encoded image """ formatted_calls = [ {"id": call.id, "function": call.function.model_dump(), "type": "function"} for call in tool_calls ] return cls( - role=Role.ASSISTANT, content=content, tool_calls=formatted_calls, **kwargs + role=Role.ASSISTANT, + content=content, + tool_calls=formatted_calls, + base64_image=base64_image, + **kwargs, ) diff --git a/app/tool/base.py b/app/tool/base.py index ae3c9f5..ba4084d 100644 --- a/app/tool/base.py +++ b/app/tool/base.py @@ -37,6 +37,7 @@ class ToolResult(BaseModel): output: Any = Field(default=None) error: Optional[str] = Field(default=None) + base64_image: Optional[str] = Field(default=None) system: Optional[str] = Field(default=None) class Config: @@ -58,6 +59,7 @@ class ToolResult(BaseModel): return ToolResult( output=combine_fields(self.output, other.output), error=combine_fields(self.error, other.error), + base64_image=combine_fields(self.base64_image, other.base64_image, False), system=combine_fields(self.system, other.system), ) @@ -76,7 +78,3 @@ class CLIResult(ToolResult): class ToolFailure(ToolResult): """A ToolResult that represents a failure.""" - - -class AgentAwareTool: - agent: Optional = None From c3203e7fa3c49345c92ec8bd36897a710c1b1b40 Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Tue, 18 Mar 2025 02:38:56 +0800 Subject: [PATCH 68/77] update BrowserUseTool --- app/tool/browser_use_tool.py | 274 ++++++++++++++++++----------------- 1 file changed, 144 insertions(+), 130 deletions(-) diff --git a/app/tool/browser_use_tool.py b/app/tool/browser_use_tool.py index 468d054..7817aef 100644 --- a/app/tool/browser_use_tool.py +++ b/app/tool/browser_use_tool.py @@ -10,6 +10,7 @@ from pydantic import Field, field_validator from pydantic_core.core_schema import ValidationInfo from app.config import config +from app.llm import LLM from app.tool.base import BaseTool, ToolResult from app.tool.web_search import WebSearch @@ -18,27 +19,22 @@ _BROWSER_DESCRIPTION = """ Interact with a web browser to perform various actions such as navigation, element interaction, content extraction, and tab management. This tool provides a comprehensive set of browser automation capabilities: Navigation: -- 'navigate': Go to a specific URL -- 'go_back': Navigate back in browser history +- 'go_to_url': Go to a specific URL in the current tab +- 'go_back': Go back - 'refresh': Refresh the current page -- 'web_search': Search the web with a specific query +- 'web_search': Search the query in the current tab, the query should be a search query like humans search in web, concrete and not vague or super long. More the single most important items. Element Interaction: - 'click_element': Click an element by index - 'input_text': Input text into a form element - 'scroll_down'/'scroll_up': Scroll the page (with optional pixel amount) -- 'scroll_to_text': Scroll to specific text on the page -- 'send_keys': Send keyboard shortcuts or special keys +- 'scroll_to_text': If you dont find something which you want to interact with, scroll to it +- 'send_keys': Send strings of special keys like Escape,Backspace, Insert, PageDown, Delete, Enter, Shortcuts such as `Control+o`, `Control+Shift+T` are supported as well. This gets used in keyboard.press. - 'get_dropdown_options': Get all options from a dropdown -- 'select_dropdown_option': Select an option from a dropdown by text +- 'select_dropdown_option': Select dropdown option for interactive element index by the text of the option you want to select Content Extraction: -- 'get_current_state': Get detailed browser state including URL, title, tabs, and interactive elements -- 'get_html': Get page HTML content -- 'get_text': Get text content of the page (supports start_index and end_index parameters) -- 'read_links': Get all links on the page -- 'extract_content': Extract specific information from the page using AI -- 'screenshot': Capture a screenshot +- 'extract_content': Extract page content to retrieve specific information from the page, e.g. all company names, a specifc description, all information about, links with companies in structured format or simply links Tab Management: - 'switch_tab': Switch to a specific tab @@ -47,12 +43,6 @@ Tab Management: Utility: - 'wait': Wait for a specified number of seconds -- 'execute_js': Execute JavaScript code on the page - -Task Completion: -- 'done': Complete the task and return results - -Each action requires specific parameters. Use get_current_state first to understand the current browser context. """ Context = TypeVar("Context") @@ -67,15 +57,9 @@ class BrowserUseTool(BaseTool, Generic[Context]): "action": { "type": "string", "enum": [ - "navigate", + "go_to_url", "click_element", - "get_current_state", "input_text", - "screenshot", - "get_html", - "get_text", - "read_links", - "execute_js", "scroll_down", "scroll_up", "scroll_to_text", @@ -85,22 +69,24 @@ class BrowserUseTool(BaseTool, Generic[Context]): "go_back", "web_search", "wait", - "done", + "extract_content", + "switch_tab", + "open_tab", + "close_tab", ], "description": "The browser action to perform", }, "url": { "type": "string", - "description": "URL for 'navigate' or 'new_tab' actions", + "description": "URL for 'go_to_url' or 'open_tab' actions", }, "index": { "type": "integer", - "description": "Element index (retrieved using get_current_state) for 'click_element' or 'input_text' actions", + "description": "Element index for 'click_element', 'input_text', 'get_dropdown_options', or 'select_dropdown_option' actions", }, - "text": {"type": "string", "description": "Text for 'input_text' action"}, - "script": { + "text": { "type": "string", - "description": "JavaScript code for 'execute_js' action", + "description": "Text for 'input_text', 'scroll_to_text', or 'select_dropdown_option' actions", }, "scroll_amount": { "type": "integer", @@ -110,14 +96,6 @@ class BrowserUseTool(BaseTool, Generic[Context]): "type": "integer", "description": "Tab ID for 'switch_tab' action", }, - "start_index": { - "type": "integer", - "description": "Starting character index for text observation (for 'scroll_to_text' and 'get_text' actions)", - }, - "end_index": { - "type": "integer", - "description": "Ending character index for text observation (for 'scroll_to_text' and 'get_text' actions)", - }, "query": { "type": "string", "description": "Search query for 'web_search' action", @@ -126,10 +104,6 @@ class BrowserUseTool(BaseTool, Generic[Context]): "type": "string", "description": "Extraction goal for 'extract_content' action", }, - "success": { - "type": "boolean", - "description": "Success status for 'done' action", - }, "keys": { "type": "string", "description": "Keys to send for 'send_keys' action", @@ -141,12 +115,11 @@ class BrowserUseTool(BaseTool, Generic[Context]): }, "required": ["action"], "dependencies": { - "navigate": ["url"], + "go_to_url": ["url"], "click_element": ["index"], "input_text": ["index", "text"], - "execute_js": ["script"], "switch_tab": ["tab_id"], - "new_tab": ["url"], + "open_tab": ["url"], "scroll_down": ["scroll_amount"], "scroll_up": ["scroll_amount"], "scroll_to_text": ["text"], @@ -156,7 +129,7 @@ class BrowserUseTool(BaseTool, Generic[Context]): "go_back": [], "web_search": ["query"], "wait": ["seconds"], - "done": ["text"], + "extract_content": ["goal"], }, } @@ -169,6 +142,8 @@ class BrowserUseTool(BaseTool, Generic[Context]): # Context for generic functionality tool_context: Optional[Context] = Field(default=None, exclude=True) + llm: Optional[LLM] = Field(default_factory=LLM) + @field_validator("parameters", mode="before") def validate_parameters(cls, v: dict, info: ValidationInfo) -> dict: if not v: @@ -230,14 +205,10 @@ class BrowserUseTool(BaseTool, Generic[Context]): url: Optional[str] = None, index: Optional[int] = None, text: Optional[str] = None, - script: Optional[str] = None, scroll_amount: Optional[int] = None, tab_id: Optional[int] = None, - start_index: Optional[int] = None, - end_index: Optional[int] = None, query: Optional[str] = None, goal: Optional[str] = None, - success: Optional[bool] = None, keys: Optional[str] = None, seconds: Optional[int] = None, **kwargs, @@ -250,14 +221,10 @@ class BrowserUseTool(BaseTool, Generic[Context]): url: URL for navigation or new tab index: Element index for click or input actions text: Text for input action or search query - script: JavaScript code for execution scroll_amount: Pixels to scroll for scroll action tab_id: Tab ID for switch_tab action - start_index: Starting character index for text observation - end_index: Ending character index for text observation query: Search query for Google search goal: Extraction goal for content extraction - success: Success status for done action keys: Keys to send for keyboard actions seconds: Seconds to wait **kwargs: Additional arguments @@ -275,10 +242,14 @@ class BrowserUseTool(BaseTool, Generic[Context]): ) # Navigation actions - if action == "navigate": + if action == "go_to_url": if not url: - return ToolResult(error="URL is required for 'navigate' action") - await context.navigate_to(url) + return ToolResult( + error="URL is required for 'go_to_url' action" + ) + page = await context.get_current_page() + await page.goto(url) + await page.wait_for_load_state() return ToolResult(output=f"Navigated to {url}") elif action == "go_back": @@ -299,10 +270,22 @@ class BrowserUseTool(BaseTool, Generic[Context]): if search_results: # Navigate to the first search result first_result = search_results[0] - await context.navigate_to(first_result) + if isinstance(first_result, dict) and "url" in first_result: + url_to_navigate = first_result["url"] + elif isinstance(first_result, str): + url_to_navigate = first_result + else: + return ToolResult( + error=f"Invalid search result format: {first_result}" + ) + + page = await context.get_current_page() + await page.goto(url_to_navigate) + await page.wait_for_load_state() + return ToolResult( - output=f"Searched for '{query}' and navigated to first result: {first_result}\nAll results:" - + "\n".join(search_results) + output=f"Searched for '{query}' and navigated to first result: {url_to_navigate}\nAll results:" + + "\n".join([str(r) for r in search_results]) ) else: return ToolResult( @@ -414,55 +397,70 @@ class BrowserUseTool(BaseTool, Generic[Context]): ) # Content extraction actions - elif action == "get_current_state": - return await self.get_current_state(context) - - elif action == "get_html": - html = await context.get_page_html() - truncated = ( - html[:max_content_length] + "..." - if len(html) > max_content_length - else html - ) - return ToolResult(output=truncated) - - elif action == "get_text": - start = start_index if start_index is not None else 0 - end = end_index if end_index is not None else max_content_length - text = await context.execute_javascript( - f"document.body.innerText.substring({start}, {end})" - ) - full_length = await context.execute_javascript( - "document.body.innerText.length" - ) - result = f"Text from index {start} to {end}:\n{text}" - if end < full_length: - result += f"\n\n[Text continues... {full_length - end} more characters available]" - if start > 0: - result += f"\n[{start} characters before this point]" - return ToolResult(output=result) - - elif action == "read_links": - links = await context.execute_javascript( - "Array.from(document.querySelectorAll('a[href]')).map(elem => elem.innerText && elem.href ? `${elem.innerText.trim()} - ${elem.href}` : null).filter(Boolean).join('\\n')" - ) - return ToolResult(output=links) - elif action == "extract_content": if not goal: return ToolResult( error="Goal is required for 'extract_content' action" ) - await context.get_page_html() - # Note: In a real implementation, this would use an LLM to extract content - return ToolResult(output=f"Extracted content for goal: {goal}") + page = await context.get_current_page() + try: + # Get page content and convert to markdown for better processing + html_content = await page.content() - elif action == "screenshot": - screenshot = await context.take_screenshot(full_page=True) - return ToolResult( - output=f"Screenshot captured (base64 length: {len(screenshot)})", - system=screenshot, - ) + # Import markdownify here to avoid global import + try: + import markdownify + + content = markdownify.markdownify(html_content) + except ImportError: + # Fallback if markdownify is not available + content = html_content + + # Create prompt for LLM + prompt_text = """ +Your task is to extract the content of the page. You will be given a page and a goal, and you should extract all relevant information around this goal from the page. + +Examples of extraction goals: +- Extract all company names +- Extract specific descriptions +- Extract all information about a topic +- Extract links with companies in structured format +- Extract all links + +If the goal is vague, summarize the page. Respond in JSON format. + +Extraction goal: {goal} + +Page content: +{page} +""" + # Format the prompt with the goal and content + max_content_length = min(50000, len(content)) + formatted_prompt = prompt_text.format( + goal=goal, page=content[:max_content_length] + ) + + # Create a proper message list for the LLM + from app.schema import Message + + messages = [Message.user_message(formatted_prompt)] + + # Use LLM to extract content based on the goal + response = await self.llm.ask(messages) + + msg = f"Extracted from page:\n{response}\n" + return ToolResult(output=msg) + except Exception as e: + # Provide a more helpful error message + error_msg = f"Failed to extract content: {str(e)}" + try: + # Try to return a portion of the page content as fallback + return ToolResult( + output=f"{error_msg}\nHere's a portion of the page content:\n{content[:2000]}..." + ) + except: + # If all else fails, just return the error + return ToolResult(error=error_msg) # Tab management actions elif action == "switch_tab": @@ -471,13 +469,15 @@ class BrowserUseTool(BaseTool, Generic[Context]): error="Tab ID is required for 'switch_tab' action" ) await context.switch_to_tab(tab_id) + page = await context.get_current_page() + await page.wait_for_load_state() return ToolResult(output=f"Switched to tab {tab_id}") elif action == "open_tab": if not url: return ToolResult(error="URL is required for 'open_tab' action") await context.create_new_tab(url) - return ToolResult(output=f"Opened new tab with URL {url}") + return ToolResult(output=f"Opened new tab with {url}") elif action == "close_tab": await context.close_current_tab() @@ -489,47 +489,61 @@ class BrowserUseTool(BaseTool, Generic[Context]): await asyncio.sleep(seconds_to_wait) return ToolResult(output=f"Waited for {seconds_to_wait} seconds") - elif action == "execute_js": - if not script: - return ToolResult( - error="Script is required for 'execute_js' action" - ) - result = await context.execute_javascript(script) - return ToolResult(output=str(result)) - - # Task completion - elif action == "done": - if not text: - return ToolResult(error="Text is required for 'done' action") - success_value = success if success is not None else True - return ToolResult(output=text, is_done=True, success=success_value) - else: return ToolResult(error=f"Unknown action: {action}") except Exception as e: return ToolResult(error=f"Browser action '{action}' failed: {str(e)}") - async def get_current_state(self, context: BrowserContext) -> ToolResult: - """Get the current browser state as a ToolResult.""" + async def get_current_state( + self, context: Optional[BrowserContext] = None + ) -> ToolResult: + """ + Get the current browser state as a ToolResult. + If context is not provided, uses self.context. + """ try: - state = await context.get_state() + # Use provided context or fall back to self.context + ctx = context or self.context + if not ctx: + return ToolResult(error="Browser context not initialized") + + state = await ctx.get_state() + + # Create a viewport_info dictionary if it doesn't exist + viewport_height = 0 + if hasattr(state, "viewport_info") and state.viewport_info: + viewport_height = state.viewport_info.height + elif hasattr(ctx, "config") and hasattr(ctx.config, "browser_window_size"): + viewport_height = ctx.config.browser_window_size.get("height", 0) + + # Take a screenshot for the state + screenshot = await ctx.take_screenshot(full_page=True) + + # Build the state info with all required fields state_info = { "url": state.url, "title": state.title, "tabs": [tab.model_dump() for tab in state.tabs], "help": "[0], [1], [2], etc., represent clickable indices corresponding to the elements listed. Clicking on these indices will navigate to or interact with the respective content behind them.", - "interactive_elements": state.element_tree.clickable_elements_to_string(), + "interactive_elements": ( + state.element_tree.clickable_elements_to_string() + if state.element_tree + else "" + ), "scroll_info": { - "pixels_above": state.pixels_above, - "pixels_below": state.pixels_below, - "total_height": state.pixels_above - + state.pixels_below - + (state.viewport_info.height if state.viewport_info else 0), + "pixels_above": getattr(state, "pixels_above", 0), + "pixels_below": getattr(state, "pixels_below", 0), + "total_height": getattr(state, "pixels_above", 0) + + getattr(state, "pixels_below", 0) + + viewport_height, }, + "viewport_height": viewport_height, } + return ToolResult( - output=json.dumps(state_info, indent=4, ensure_ascii=False) + output=json.dumps(state_info, indent=4, ensure_ascii=False), + base64_image=screenshot, ) except Exception as e: return ToolResult(error=f"Failed to get browser state: {str(e)}") From 2509bc30c49e08b9de616022932656fd7ded6dd2 Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Tue, 18 Mar 2025 02:39:11 +0800 Subject: [PATCH 69/77] update ToolCallAgent and Manus --- app/agent/manus.py | 46 ++++++++++++++++++++++++++++++++++++++++++- app/agent/toolcall.py | 41 ++++++++++++++++++++++++++++++-------- 2 files changed, 78 insertions(+), 9 deletions(-) diff --git a/app/agent/manus.py b/app/agent/manus.py index df784ed..06101aa 100644 --- a/app/agent/manus.py +++ b/app/agent/manus.py @@ -1,8 +1,10 @@ -from typing import Any +import json +from typing import Any, Optional from pydantic import Field from app.agent.toolcall import ToolCallAgent +from app.logger import logger from app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.tool import Terminate, ToolCollection from app.tool.browser_use_tool import BrowserUseTool @@ -43,3 +45,45 @@ class Manus(ToolCallAgent): else: await self.available_tools.get_tool(BrowserUseTool().name).cleanup() await super()._handle_special_tool(name, result, **kwargs) + + async def get_browser_state(self) -> Optional[dict]: + """Get the current browser state for context in next steps.""" + browser_tool = self.available_tools.get_tool(BrowserUseTool().name) + if not browser_tool: + return None + + try: + # Get browser state directly from the tool with no context parameter + result = await browser_tool.get_current_state() + + if result.error: + logger.debug(f"Browser state error: {result.error}") + return None + + # Store screenshot if available + if hasattr(result, "base64_image") and result.base64_image: + self._current_base64_image = result.base64_image + + # Parse the state info + return json.loads(result.output) + + except Exception as e: + logger.debug(f"Failed to get browser state: {str(e)}") + return None + + async def think(self) -> bool: + # Add your custom pre-processing here + browser_state = await self.get_browser_state() + + # Modify the next_step_prompt temporarily + original_prompt = self.next_step_prompt + if browser_state and not browser_state.get("error"): + self.next_step_prompt += f"\nCurrent browser state:\nURL: {browser_state.get('url', 'N/A')}\nTitle: {browser_state.get('title', 'N/A')}\n" + + # Call parent implementation + result = await super().think() + + # Restore original prompt + self.next_step_prompt = original_prompt + + return result diff --git a/app/agent/toolcall.py b/app/agent/toolcall.py index 29e5af4..131fd91 100644 --- a/app/agent/toolcall.py +++ b/app/agent/toolcall.py @@ -30,6 +30,7 @@ class ToolCallAgent(ReActAgent): special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name]) tool_calls: List[ToolCall] = Field(default_factory=list) + _current_base64_image: Optional[str] = None max_steps: int = 30 max_observe: Optional[Union[int, bool]] = None @@ -44,9 +45,11 @@ class ToolCallAgent(ReActAgent): # Get response with tool options response = await self.llm.ask_tool( messages=self.messages, - system_msgs=[Message.system_message(self.system_prompt)] - if self.system_prompt - else None, + system_msgs=( + [Message.system_message(self.system_prompt)] + if self.system_prompt + else None + ), tools=self.available_tools.to_params(), tool_choice=self.tool_choices, ) @@ -79,6 +82,9 @@ class ToolCallAgent(ReActAgent): logger.info( f"🧰 Tools being prepared: {[call.function.name for call in response.tool_calls]}" ) + logger.info( + f"🔧 Tool arguments: {response.tool_calls[0].function.arguments}" + ) try: # Handle different tool_choices modes @@ -130,6 +136,9 @@ class ToolCallAgent(ReActAgent): results = [] for command in self.tool_calls: + # Reset base64_image for each tool call + self._current_base64_image = None + result = await self.execute_tool(command) if self.max_observe: @@ -141,7 +150,10 @@ class ToolCallAgent(ReActAgent): # Add tool response to memory tool_msg = Message.tool_message( - content=result, tool_call_id=command.id, name=command.function.name + content=result, + tool_call_id=command.id, + name=command.function.name, + base64_image=self._current_base64_image, ) self.memory.add_message(tool_msg) results.append(result) @@ -165,16 +177,29 @@ class ToolCallAgent(ReActAgent): logger.info(f"🔧 Activating tool: '{name}'...") result = await self.available_tools.execute(name=name, tool_input=args) - # Format result for display + # Handle special tools + await self._handle_special_tool(name=name, result=result) + + # Check if result is a ToolResult with base64_image + if hasattr(result, "base64_image") and result.base64_image: + # Store the base64_image for later use in tool_message + self._current_base64_image = result.base64_image + + # Format result for display + observation = ( + f"Observed output of cmd `{name}` executed:\n{str(result)}" + if result + else f"Cmd `{name}` completed with no output" + ) + return observation + + # Format result for display (standard case) observation = ( f"Observed output of cmd `{name}` executed:\n{str(result)}" if result else f"Cmd `{name}` completed with no output" ) - # Handle special tools like `finish` - await self._handle_special_tool(name=name, result=result) - return observation except json.JSONDecodeError: error_msg = f"Error parsing arguments for {name}: Invalid JSON format" From cc550af04b21a05ef1739d15c0f13f31f7b6a9ae Mon Sep 17 00:00:00 2001 From: xiangjinyu <1376193973@qq.com> Date: Tue, 18 Mar 2025 14:25:23 +0800 Subject: [PATCH 70/77] edit str_replace_editor.py --- app/tool/str_replace_editor.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/app/tool/str_replace_editor.py b/app/tool/str_replace_editor.py index 4094565..07f4c46 100644 --- a/app/tool/str_replace_editor.py +++ b/app/tool/str_replace_editor.py @@ -138,10 +138,7 @@ class StrReplaceEditor(BaseTool): """ # Check if its an absolute path if not path.is_absolute(): - suggested_path = Path("") / path - raise ToolError( - f"The path {path} is not an absolute path, it should start with `/`. Maybe you meant {suggested_path}?" - ) + raise ToolError(f"The path {path} is not an absolute path") # Check if path exists if not path.exists() and command != "create": raise ToolError( From 2e661d486dfe438dda086a67648b9beb1e33b67d Mon Sep 17 00:00:00 2001 From: xiangjinyu <1376193973@qq.com> Date: Tue, 18 Mar 2025 14:56:05 +0800 Subject: [PATCH 71/77] modify manus prompt and directory --- app/agent/manus.py | 13 +++++++++---- app/prompt/manus.py | 18 +++++------------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/app/agent/manus.py b/app/agent/manus.py index 06101aa..fd3e843 100644 --- a/app/agent/manus.py +++ b/app/agent/manus.py @@ -1,4 +1,6 @@ import json +import os +from pathlib import Path from typing import Any, Optional from pydantic import Field @@ -8,8 +10,11 @@ from app.logger import logger from app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.tool import Terminate, ToolCollection from app.tool.browser_use_tool import BrowserUseTool -from app.tool.file_saver import FileSaver from app.tool.python_execute import PythonExecute +from app.tool.str_replace_editor import StrReplaceEditor + + +initial_working_directory = Path(os.getcwd()) / "workspace" class Manus(ToolCallAgent): @@ -26,16 +31,16 @@ class Manus(ToolCallAgent): "A versatile agent that can solve various tasks using multiple tools" ) - system_prompt: str = SYSTEM_PROMPT + system_prompt: str = SYSTEM_PROMPT.format(directory=initial_working_directory) next_step_prompt: str = NEXT_STEP_PROMPT - max_observe: int = 2000 + max_observe: int = 10000 max_steps: int = 20 # Add general-purpose tools to the tool collection available_tools: ToolCollection = Field( default_factory=lambda: ToolCollection( - PythonExecute(), BrowserUseTool(), FileSaver(), Terminate() + PythonExecute(), BrowserUseTool(), StrReplaceEditor(), Terminate() ) ) diff --git a/app/prompt/manus.py b/app/prompt/manus.py index 0cb944d..f080ba4 100644 --- a/app/prompt/manus.py +++ b/app/prompt/manus.py @@ -1,16 +1,8 @@ -SYSTEM_PROMPT = "You are OpenManus, an all-capable AI assistant, aimed at solving any task presented by the user. You have various tools at your disposal that you can call upon to efficiently complete complex requests. Whether it's programming, information retrieval, file processing, or web browsing, you can handle it all." - -NEXT_STEP_PROMPT = """You can interact with the computer using PythonExecute, save important content and information files through FileSaver, open browsers with BrowserUseTool, and retrieve information using GoogleSearch. - -PythonExecute: Execute Python code to interact with the computer system, data processing, automation tasks, etc. - -FileSaver: Save files locally, such as txt, py, html, etc. - -BrowserUseTool: Open, browse, and use web browsers. If you open a local HTML file, you must provide the absolute path to the file. - -Terminate: End the current interaction when the task is complete or when you need additional information from the user. Use this tool to signal that you've finished addressing the user's request or need clarification before proceeding further. +SYSTEM_PROMPT = ( + "You are OpenManus, an all-capable AI assistant, aimed at solving any task presented by the user. You have various tools at your disposal that you can call upon to efficiently complete complex requests. Whether it's programming, information retrieval, file processing, or web browsing, you can handle it all." + "The initial directory is: {directory}" +) +NEXT_STEP_PROMPT = """ Based on user needs, proactively select the most appropriate tool or combination of tools. For complex tasks, you can break down the problem and use different tools step by step to solve it. After using each tool, clearly explain the execution results and suggest the next steps. - -Always maintain a helpful, informative tone throughout the interaction. If you encounter any limitations or need more details, clearly communicate this to the user before terminating. """ From 3d7d55347656b290ceefea3a242d7f9cff1f0c5d Mon Sep 17 00:00:00 2001 From: xiangjinyu <1376193973@qq.com> Date: Tue, 18 Mar 2025 14:59:51 +0800 Subject: [PATCH 72/77] add example.txt in workspace --- workspace/example.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 workspace/example.txt diff --git a/workspace/example.txt b/workspace/example.txt new file mode 100644 index 0000000..08a2808 --- /dev/null +++ b/workspace/example.txt @@ -0,0 +1 @@ +This is a sample file. Files generated by OpenManus are stored in the current folder by default. From 91b1d06f9ceb8b6e41480f9e64ec783d304e1fbe Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Tue, 18 Mar 2025 16:27:54 +0800 Subject: [PATCH 73/77] ci(top-issues): Enable top bugs & feature requests --- .github/workflows/top-issues.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/top-issues.yaml b/.github/workflows/top-issues.yaml index 85ad0d8..9ad9f59 100644 --- a/.github/workflows/top-issues.yaml +++ b/.github/workflows/top-issues.yaml @@ -23,5 +23,7 @@ jobs: dashboard: true dashboard_show_total_reactions: true top_issues: true + top_features: true + top_bugs: true top_pull_requests: true top_list_size: 14 From 1204d841ae30f6db37119c87f2e356b7f9175151 Mon Sep 17 00:00:00 2001 From: seeker Date: Tue, 18 Mar 2025 17:56:29 +0800 Subject: [PATCH 74/77] fix: fix bug --- app/agent/base.py | 3 ++- app/config.py | 19 +++++++++---------- app/sandbox/client.py | 2 +- app/tool/str_replace_editor.py | 26 +------------------------- config/config.example.toml | 2 +- tests/sandbox/test_client.py | 2 +- 6 files changed, 15 insertions(+), 39 deletions(-) diff --git a/app/agent/base.py b/app/agent/base.py index 9ece688..65f6600 100644 --- a/app/agent/base.py +++ b/app/agent/base.py @@ -6,6 +6,7 @@ from pydantic import BaseModel, Field, model_validator from app.llm import LLM from app.logger import logger +from app.sandbox.client import SANDBOX_CLIENT from app.schema import ROLE_TYPE, AgentState, Memory, Message @@ -149,7 +150,7 @@ class BaseAgent(BaseModel, ABC): self.current_step = 0 self.state = AgentState.IDLE results.append(f"Terminated: Reached max steps ({self.max_steps})") - + await SANDBOX_CLIENT.cleanup() return "\n".join(results) if results else "No steps executed" @abstractmethod diff --git a/app/config.py b/app/config.py index f62354a..3c0ebc3 100644 --- a/app/config.py +++ b/app/config.py @@ -210,18 +210,17 @@ class Config: def llm(self) -> Dict[str, LLMSettings]: return self._config.llm + @property + def sandbox(self) -> SandboxSettings: + return self._config.sandbox -def sandbox(self) -> SandboxSettings: - return self._config.sandbox + @property + def browser_config(self) -> Optional[BrowserSettings]: + return self._config.browser_config - -def browser_config(self) -> Optional[BrowserSettings]: - return self._config.browser_config - - -@property -def search_config(self) -> Optional[SearchSettings]: - return self._config.search_config + @property + def search_config(self) -> Optional[SearchSettings]: + return self._config.search_config config = Config() diff --git a/app/sandbox/client.py b/app/sandbox/client.py index 06a534e..09a8f2e 100644 --- a/app/sandbox/client.py +++ b/app/sandbox/client.py @@ -189,7 +189,7 @@ class LocalSandboxClient(BaseSandboxClient): self.sandbox = None -async def create_sandbox_client() -> LocalSandboxClient: +def create_sandbox_client() -> LocalSandboxClient: """Creates a sandbox client. Returns: diff --git a/app/tool/str_replace_editor.py b/app/tool/str_replace_editor.py index 84580de..a907f41 100644 --- a/app/tool/str_replace_editor.py +++ b/app/tool/str_replace_editor.py @@ -100,7 +100,6 @@ class StrReplaceEditor(BaseTool): } _file_history: DefaultDict[PathLike, List[str]] = defaultdict(list) _local_operator: LocalFileOperator = LocalFileOperator() - # todo: Sandbox resources need to be destroyed at the appropriate time. _sandbox_operator: SandboxFileOperator = SandboxFileOperator() # def _get_operator(self, use_sandbox: bool) -> FileOperator: @@ -129,7 +128,7 @@ class StrReplaceEditor(BaseTool): operator = self._get_operator() # Validate path and command combination - await self.validate_path(command, path, operator) + await self.validate_path(command, Path(path), operator) # Execute the appropriate command if command == "view": @@ -164,14 +163,12 @@ class StrReplaceEditor(BaseTool): return str(result) - # <<<<<<< HEAD async def validate_path( self, command: str, path: Path, operator: FileOperator ) -> None: """Validate path and command combination based on execution environment.""" # Check if path is absolute if not path.is_absolute(): - # suggested_path = f"/{path}" raise ToolError(f"The path {path} is not an absolute path") # Only check if path exists for non-create commands @@ -184,27 +181,6 @@ class StrReplaceEditor(BaseTool): # Check if path is a directory is_dir = await operator.is_directory(path) if is_dir and command != "view": - # ======= - # def validate_path(self, command: str, path: Path): - # """ - # Check that the path/command combination is valid. - # """ - # # Check if its an absolute path - # if not path.is_absolute(): - # raise ToolError(f"The path {path} is not an absolute path") - # # Check if path exists - # if not path.exists() and command != "create": - # raise ToolError( - # f"The path {path} does not exist. Please provide a valid path." - # ) - # if path.exists() and command == "create": - # raise ToolError( - # f"File already exists at: {path}. Cannot overwrite files using command `create`." - # ) - # # Check if the path points to a directory - # if path.is_dir(): - # if command != "view": - # >>>>>>> upstream/main raise ToolError( f"The path {path} is a directory and only the `view` command can be used on directories" ) diff --git a/config/config.example.toml b/config/config.example.toml index dcddeb8..d5750a2 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -69,7 +69,7 @@ temperature = 0.0 # Controls randomness for vision mod ## Sandbox configuration #[sandbox] #use_sandbox = false -#image = "python:3.10-slim" +#image = "python:3.12-slim" #work_dir = "/workspace" #memory_limit = "1g" # 512m #cpu_limit = 2.0 diff --git a/tests/sandbox/test_client.py b/tests/sandbox/test_client.py index 1e4557d..a69b894 100644 --- a/tests/sandbox/test_client.py +++ b/tests/sandbox/test_client.py @@ -12,7 +12,7 @@ from app.sandbox.client import LocalSandboxClient, create_sandbox_client @pytest_asyncio.fixture(scope="function") async def local_client() -> AsyncGenerator[LocalSandboxClient, None]: """Creates a local sandbox client for testing.""" - client = await create_sandbox_client() + client = create_sandbox_client() try: yield client finally: From 99f1f054e414c3450264d1f8e9de78187cdbb9fa Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Tue, 18 Mar 2025 20:52:42 +0800 Subject: [PATCH 75/77] change python:3.10 to python:3.12 for docker image --- app/config.py | 2 +- tests/sandbox/test_client.py | 2 +- tests/sandbox/test_docker_terminal.py | 2 +- tests/sandbox/test_sandbox.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/app/config.py b/app/config.py index 3c0ebc3..6d724ec 100644 --- a/app/config.py +++ b/app/config.py @@ -68,7 +68,7 @@ class SandboxSettings(BaseModel): """Configuration for the execution sandbox""" use_sandbox: bool = Field(False, description="Whether to use the sandbox") - image: str = Field("python:3.10-slim", description="Base image") + image: str = Field("python:3.12-slim", description="Base image") work_dir: str = Field("/workspace", description="Container working directory") memory_limit: str = Field("512m", description="Memory limit") cpu_limit: float = Field(1.0, description="CPU limit") diff --git a/tests/sandbox/test_client.py b/tests/sandbox/test_client.py index a69b894..6b2c61f 100644 --- a/tests/sandbox/test_client.py +++ b/tests/sandbox/test_client.py @@ -30,7 +30,7 @@ def temp_dir() -> Path: async def test_sandbox_creation(local_client: LocalSandboxClient): """Tests sandbox creation with specific configuration.""" config = SandboxSettings( - image="python:3.10-slim", + image="python:3.12-slim", work_dir="/workspace", memory_limit="512m", cpu_limit=0.5, diff --git a/tests/sandbox/test_docker_terminal.py b/tests/sandbox/test_docker_terminal.py index 7903d95..bf0821a 100644 --- a/tests/sandbox/test_docker_terminal.py +++ b/tests/sandbox/test_docker_terminal.py @@ -17,7 +17,7 @@ def docker_client(): async def docker_container(docker_client): """Fixture providing a test Docker container.""" container = docker_client.containers.run( - "python:3.10-slim", + "python:3.12-slim", "tail -f /dev/null", name="test_container", detach=True, diff --git a/tests/sandbox/test_sandbox.py b/tests/sandbox/test_sandbox.py index 3eea366..b21dd6f 100644 --- a/tests/sandbox/test_sandbox.py +++ b/tests/sandbox/test_sandbox.py @@ -8,7 +8,7 @@ from app.sandbox.core.sandbox import DockerSandbox, SandboxSettings def sandbox_config(): """Creates sandbox configuration for testing.""" return SandboxSettings( - image="python:3.10-slim", + image="python:3.12-slim", work_dir="/workspace", memory_limit="1g", cpu_limit=0.5, From dc42bd525a4004f6166bf881baff52497688616a Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Tue, 18 Mar 2025 22:51:27 +0800 Subject: [PATCH 76/77] add BrowserAgent and update Manus --- app/agent/__init__.py | 2 + app/agent/browser.py | 129 ++++++++++++++++++++++++++++++++++++++++++ app/agent/manus.py | 63 ++++++--------------- app/prompt/browser.py | 92 ++++++++++++++++++++++++++++++ app/tool/__init__.py | 2 + 5 files changed, 243 insertions(+), 45 deletions(-) create mode 100644 app/agent/browser.py create mode 100644 app/prompt/browser.py diff --git a/app/agent/__init__.py b/app/agent/__init__.py index a7b69c7..4b8fb9d 100644 --- a/app/agent/__init__.py +++ b/app/agent/__init__.py @@ -1,4 +1,5 @@ from app.agent.base import BaseAgent +from app.agent.browser import BrowserAgent from app.agent.planning import PlanningAgent from app.agent.react import ReActAgent from app.agent.swe import SWEAgent @@ -7,6 +8,7 @@ from app.agent.toolcall import ToolCallAgent __all__ = [ "BaseAgent", + "BrowserAgent", "PlanningAgent", "ReActAgent", "SWEAgent", diff --git a/app/agent/browser.py b/app/agent/browser.py new file mode 100644 index 0000000..ae0ce2f --- /dev/null +++ b/app/agent/browser.py @@ -0,0 +1,129 @@ +import json +from typing import Any, Optional + +from pydantic import Field + +from app.agent.toolcall import ToolCallAgent +from app.logger import logger +from app.prompt.browser import NEXT_STEP_PROMPT, SYSTEM_PROMPT +from app.schema import Message, ToolChoice +from app.tool import BrowserUseTool, Terminate, ToolCollection + + +class BrowserAgent(ToolCallAgent): + """ + A browser agent that uses the browser_use library to control a browser. + + This agent can navigate web pages, interact with elements, fill forms, + extract content, and perform other browser-based actions to accomplish tasks. + """ + + name: str = "browser" + description: str = "A browser agent that can control a browser to accomplish tasks" + + system_prompt: str = SYSTEM_PROMPT + next_step_prompt: str = NEXT_STEP_PROMPT + + max_observe: int = 10000 + max_steps: int = 20 + + # Configure the available tools + available_tools: ToolCollection = Field( + default_factory=lambda: ToolCollection(BrowserUseTool(), Terminate()) + ) + + # Use Auto for tool choice to allow both tool usage and free-form responses + tool_choices: ToolChoice = ToolChoice.AUTO + special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name]) + + _current_base64_image: Optional[str] = None + + async def _handle_special_tool(self, name: str, result: Any, **kwargs): + if not self._is_special_tool(name): + return + else: + await self.available_tools.get_tool(BrowserUseTool().name).cleanup() + await super()._handle_special_tool(name, result, **kwargs) + + async def get_browser_state(self) -> Optional[dict]: + """Get the current browser state for context in next steps.""" + browser_tool = self.available_tools.get_tool(BrowserUseTool().name) + if not browser_tool: + return None + + try: + # Get browser state directly from the tool + result = await browser_tool.get_current_state() + + if result.error: + logger.debug(f"Browser state error: {result.error}") + return None + + # Store screenshot if available + if hasattr(result, "base64_image") and result.base64_image: + self._current_base64_image = result.base64_image + + # Parse the state info + return json.loads(result.output) + + except Exception as e: + logger.debug(f"Failed to get browser state: {str(e)}") + return None + + async def think(self) -> bool: + """Process current state and decide next actions using tools, with browser state info added""" + # Add browser state to the context + browser_state = await self.get_browser_state() + + # Initialize placeholder values + url_info = "" + tabs_info = "" + content_above_info = "" + content_below_info = "" + results_info = "" + + if browser_state and not browser_state.get("error"): + # URL and title info + url_info = f"\n URL: {browser_state.get('url', 'N/A')}\n Title: {browser_state.get('title', 'N/A')}" + + # Tab information + if "tabs" in browser_state: + tabs = browser_state.get("tabs", []) + if tabs: + tabs_info = f"\n {len(tabs)} tab(s) available" + + # Content above/below viewport + pixels_above = browser_state.get("pixels_above", 0) + pixels_below = browser_state.get("pixels_below", 0) + + if pixels_above > 0: + content_above_info = f" ({pixels_above} pixels)" + + if pixels_below > 0: + content_below_info = f" ({pixels_below} pixels)" + + # Add screenshot as base64 if available + if self._current_base64_image: + # Create a message with image attachment + image_message = Message.user_message( + content="Current browser screenshot:", + base64_image=self._current_base64_image, + ) + self.memory.add_message(image_message) + + # Replace placeholders with actual browser state info + self.next_step_prompt = NEXT_STEP_PROMPT.format( + url_placeholder=url_info, + tabs_placeholder=tabs_info, + content_above_placeholder=content_above_info, + content_below_placeholder=content_below_info, + results_placeholder=results_info, + ) + + # Call parent implementation + result = await super().think() + + # Reset the next_step_prompt to its original state + self.next_step_prompt = NEXT_STEP_PROMPT + + return result diff --git a/app/agent/manus.py b/app/agent/manus.py index fd3e843..8cba71a 100644 --- a/app/agent/manus.py +++ b/app/agent/manus.py @@ -1,12 +1,9 @@ -import json import os from pathlib import Path -from typing import Any, Optional from pydantic import Field -from app.agent.toolcall import ToolCallAgent -from app.logger import logger +from app.agent.browser import BrowserAgent from app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.tool import Terminate, ToolCollection from app.tool.browser_use_tool import BrowserUseTool @@ -17,11 +14,11 @@ from app.tool.str_replace_editor import StrReplaceEditor initial_working_directory = Path(os.getcwd()) / "workspace" -class Manus(ToolCallAgent): +class Manus(BrowserAgent): """ A versatile general-purpose agent that uses planning to solve various tasks. - This agent extends PlanningAgent with a comprehensive set of tools and capabilities, + This agent extends BrowserAgent with a comprehensive set of tools and capabilities, including Python execution, web browsing, file operations, and information retrieval to handle a wide range of user requests. """ @@ -44,48 +41,24 @@ class Manus(ToolCallAgent): ) ) - async def _handle_special_tool(self, name: str, result: Any, **kwargs): - if not self._is_special_tool(name): - return - else: - await self.available_tools.get_tool(BrowserUseTool().name).cleanup() - await super()._handle_special_tool(name, result, **kwargs) - - async def get_browser_state(self) -> Optional[dict]: - """Get the current browser state for context in next steps.""" - browser_tool = self.available_tools.get_tool(BrowserUseTool().name) - if not browser_tool: - return None - - try: - # Get browser state directly from the tool with no context parameter - result = await browser_tool.get_current_state() - - if result.error: - logger.debug(f"Browser state error: {result.error}") - return None - - # Store screenshot if available - if hasattr(result, "base64_image") and result.base64_image: - self._current_base64_image = result.base64_image - - # Parse the state info - return json.loads(result.output) - - except Exception as e: - logger.debug(f"Failed to get browser state: {str(e)}") - return None - async def think(self) -> bool: - # Add your custom pre-processing here - browser_state = await self.get_browser_state() - - # Modify the next_step_prompt temporarily + """Process current state and decide next actions with appropriate context.""" + # Store original prompt original_prompt = self.next_step_prompt - if browser_state and not browser_state.get("error"): - self.next_step_prompt += f"\nCurrent browser state:\nURL: {browser_state.get('url', 'N/A')}\nTitle: {browser_state.get('title', 'N/A')}\n" - # Call parent implementation + # Only check recent messages (last 3) for browser activity + recent_messages = self.memory.messages[-3:] if self.memory.messages else [] + browser_in_use = any( + "browser_use" in msg.content.lower() + for msg in recent_messages + if hasattr(msg, "content") and isinstance(msg.content, str) + ) + + if browser_in_use: + # Override with parent class's prompt temporarily to get browser context + self.next_step_prompt = BrowserAgent.next_step_prompt + + # Call parent's think method result = await super().think() # Restore original prompt diff --git a/app/prompt/browser.py b/app/prompt/browser.py new file mode 100644 index 0000000..70fed30 --- /dev/null +++ b/app/prompt/browser.py @@ -0,0 +1,92 @@ +SYSTEM_PROMPT = """\ +You are an AI agent designed to automate browser tasks. Your goal is to accomplish the ultimate task following the rules. + +# Input Format +Task +Previous steps +Current URL +Open Tabs +Interactive Elements +[index]text +- index: Numeric identifier for interaction +- type: HTML element type (button, input, etc.) +- text: Element description +Example: +[33] + +- Only elements with numeric indexes in [] are interactive +- elements without [] provide only context + +# Response Rules +1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format: +{{"current_state": {{"evaluation_previous_goal": "Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Mention if something unexpected happened. Shortly state why/why not", +"memory": "Description of what has been done and what you need to remember. Be very specific. Count here ALWAYS how many times you have done something and how many remain. E.g. 0 out of 10 websites analyzed. Continue with abc and xyz", +"next_goal": "What needs to be done with the next immediate action"}}, +"action":[{{"one_action_name": {{// action-specific parameter}}}}, // ... more actions in sequence]}} + +2. ACTIONS: You can specify multiple actions in the list to be executed in sequence. But always specify only one action name per item. Use maximum {{max_actions}} actions per sequence. +Common action sequences: +- Form filling: [{{"input_text": {{"index": 1, "text": "username"}}}}, {{"input_text": {{"index": 2, "text": "password"}}}}, {{"click_element": {{"index": 3}}}}] +- Navigation and extraction: [{{"go_to_url": {{"url": "https://example.com"}}}}, {{"extract_content": {{"goal": "extract the names"}}}}] +- Actions are executed in the given order +- If the page changes after an action, the sequence is interrupted and you get the new state. +- Only provide the action sequence until an action which changes the page state significantly. +- Try to be efficient, e.g. fill forms at once, or chain actions where nothing changes on the page +- only use multiple actions if it makes sense. + +3. ELEMENT INTERACTION: +- Only use indexes of the interactive elements +- Elements marked with "[]Non-interactive text" are non-interactive + +4. NAVIGATION & ERROR HANDLING: +- If no suitable elements exist, use other functions to complete the task +- If stuck, try alternative approaches - like going back to a previous page, new search, new tab etc. +- Handle popups/cookies by accepting or closing them +- Use scroll to find elements you are looking for +- If you want to research something, open a new tab instead of using the current tab +- If captcha pops up, try to solve it - else try a different approach +- If the page is not fully loaded, use wait action + +5. TASK COMPLETION: +- Use the done action as the last action as soon as the ultimate task is complete +- Dont use "done" before you are done with everything the user asked you, except you reach the last step of max_steps. +- If you reach your last step, use the done action even if the task is not fully finished. Provide all the information you have gathered so far. If the ultimate task is completly finished set success to true. If not everything the user asked for is completed set success in done to false! +- If you have to do something repeatedly for example the task says for "each", or "for all", or "x times", count always inside "memory" how many times you have done it and how many remain. Don't stop until you have completed like the task asked you. Only call done after the last step. +- Don't hallucinate actions +- Make sure you include everything you found out for the ultimate task in the done text parameter. Do not just say you are done, but include the requested information of the task. + +6. VISUAL CONTEXT: +- When an image is provided, use it to understand the page layout +- Bounding boxes with labels on their top right corner correspond to element indexes + +7. Form filling: +- If you fill an input field and your action sequence is interrupted, most often something changed e.g. suggestions popped up under the field. + +8. Long tasks: +- Keep track of the status and subresults in the memory. + +9. Extraction: +- If your task is to find information - call extract_content on the specific pages to get and store the information. +Your responses must be always JSON with the specified format. +""" + +NEXT_STEP_PROMPT = """ +What should I do next to achieve my goal? + +When you see [Current state starts here], focus on the following: +- Current URL and page title{url_placeholder} +- Available tabs{tabs_placeholder} +- Interactive elements and their indices +- Content above{content_above_placeholder} or below{content_below_placeholder} the viewport (if indicated) +- Any action results or errors{results_placeholder} + +For browser interactions: +- To navigate: browser_use with action="go_to_url", url="..." +- To click: browser_use with action="click_element", index=N +- To type: browser_use with action="input_text", index=N, text="..." +- To extract: browser_use with action="extract_content", goal="..." +- To scroll: browser_use with action="scroll_down" or "scroll_up" + +Consider both what's visible and what might be beyond the current viewport. +Be methodical - remember your progress and what you've learned so far. +""" diff --git a/app/tool/__init__.py b/app/tool/__init__.py index 9ab569a..6fbd1bc 100644 --- a/app/tool/__init__.py +++ b/app/tool/__init__.py @@ -1,5 +1,6 @@ from app.tool.base import BaseTool from app.tool.bash import Bash +from app.tool.browser_use_tool import BrowserUseTool from app.tool.create_chat_completion import CreateChatCompletion from app.tool.planning import PlanningTool from app.tool.str_replace_editor import StrReplaceEditor @@ -10,6 +11,7 @@ from app.tool.tool_collection import ToolCollection __all__ = [ "BaseTool", "Bash", + "BrowserUseTool", "Terminate", "StrReplaceEditor", "ToolCollection", From 421e962258ea7ac117aca7fc749666bd4d9da700 Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Tue, 18 Mar 2025 22:57:47 +0800 Subject: [PATCH 77/77] add workspace_root for Config and update Manus --- app/agent/manus.py | 14 +++++--------- app/config.py | 5 +++++ 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/app/agent/manus.py b/app/agent/manus.py index 8cba71a..d7ec2f9 100644 --- a/app/agent/manus.py +++ b/app/agent/manus.py @@ -1,9 +1,8 @@ -import os -from pathlib import Path - from pydantic import Field from app.agent.browser import BrowserAgent +from app.config import config +from app.prompt.browser import NEXT_STEP_PROMPT as BROWSER_NEXT_STEP_PROMPT from app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.tool import Terminate, ToolCollection from app.tool.browser_use_tool import BrowserUseTool @@ -11,9 +10,6 @@ from app.tool.python_execute import PythonExecute from app.tool.str_replace_editor import StrReplaceEditor -initial_working_directory = Path(os.getcwd()) / "workspace" - - class Manus(BrowserAgent): """ A versatile general-purpose agent that uses planning to solve various tasks. @@ -28,7 +24,7 @@ class Manus(BrowserAgent): "A versatile agent that can solve various tasks using multiple tools" ) - system_prompt: str = SYSTEM_PROMPT.format(directory=initial_working_directory) + system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root) next_step_prompt: str = NEXT_STEP_PROMPT max_observe: int = 10000 @@ -55,8 +51,8 @@ class Manus(BrowserAgent): ) if browser_in_use: - # Override with parent class's prompt temporarily to get browser context - self.next_step_prompt = BrowserAgent.next_step_prompt + # Override with browser-specific prompt temporarily to get browser context + self.next_step_prompt = BROWSER_NEXT_STEP_PROMPT # Call parent's think method result = await super().think() diff --git a/app/config.py b/app/config.py index 6d724ec..0be771b 100644 --- a/app/config.py +++ b/app/config.py @@ -222,5 +222,10 @@ class Config: def search_config(self) -> Optional[SearchSettings]: return self._config.search_config + @property + def workspace_root(self) -> Path: + """Get the workspace root directory""" + return WORKSPACE_ROOT + config = Config()