From bd742382358d18fd23af3479b356f77877d6b67d Mon Sep 17 00:00:00 2001 From: liangxinbing <1580466765@qq.com> Date: Thu, 6 Mar 2025 23:29:53 +0800 Subject: [PATCH] update README.md and config.example.toml --- README.md | 53 ++++++++++++++++++++++++++++++++++++-- config/config.example.toml | 8 +++--- 2 files changed, 55 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index ef86390..633828b 100644 --- a/README.md +++ b/README.md @@ -7,11 +7,60 @@ It's a simple implementation, so we welcome any suggestions, contributions, and Enjoy your own agent with OpenManus! -## Quick Start -Two line for run OpenManus: +## Installation + +1. Create a new conda environment: + +```bash +conda create -n open_manus python=3.12 +conda activate open_manus +``` + +2. Clone the repository: + +```bash +git clone https://github.com/mannaandpoem/OpenManus.git +cd OpenManus +``` + +3. Install dependencies: ```bash pip install -r requirements.txt +``` + +## Configuration + +OpenManus requires configuration for the LLM APIs it uses. Follow these steps to set up your configuration: + +1. Create a `config.toml` file in the `config` directory (you can copy from the example): + +```bash +cp config/config.example.toml config/config.toml +``` + +2. Edit `config/config.toml` to add your API keys and customize settings: + +```toml +# Global LLM configuration +[llm] +model = "gpt-4o" +base_url = "https://api.openai.com/v1" +api_key = "sk-..." # Replace with your actual API key +max_tokens = 4096 +temperature = 0.0 + +# Optional configuration for specific LLM models +[llm.vision] +model = "gpt-4o" +base_url = "https://api.openai.com/v1" +api_key = "sk-..." # Replace with your actual API key +``` + +## Quick Start +One line for run OpenManus: + +```bash python main.py ``` diff --git a/config/config.example.toml b/config/config.example.toml index 65180b8..5821609 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -1,13 +1,13 @@ # Global LLM configuration [llm] -model = "deepseek-chat" -base_url = "https://api.deepseek.com/v1" +model = "gpt-4o" +base_url = "https://api.openai.com/v1" api_key = "sk-..." max_tokens = 4096 temperature = 0.0 # Optional configuration for specific LLM models [llm.vision] -model = "..." -base_url = "..." +model = "gpt-4o" +base_url = "https://api.openai.com/v1" api_key = "sk-..."