Merge branch 'main' of https://github.com/mannaandpoem/OpenManus into mcp
This commit is contained in:
commit
f380372a07
58
.github/dependabot.yml
vendored
Normal file
58
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "pip"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
|
open-pull-requests-limit: 4
|
||||||
|
groups:
|
||||||
|
# Group critical packages that might need careful review
|
||||||
|
core-dependencies:
|
||||||
|
patterns:
|
||||||
|
- "pydantic*"
|
||||||
|
- "openai"
|
||||||
|
- "fastapi"
|
||||||
|
- "tiktoken"
|
||||||
|
browsergym-related:
|
||||||
|
patterns:
|
||||||
|
- "browsergym*"
|
||||||
|
- "browser-use"
|
||||||
|
- "playwright"
|
||||||
|
search-tools:
|
||||||
|
patterns:
|
||||||
|
- "googlesearch-python"
|
||||||
|
- "baidusearch"
|
||||||
|
- "duckduckgo_search"
|
||||||
|
pre-commit:
|
||||||
|
patterns:
|
||||||
|
- "pre-commit"
|
||||||
|
security-all:
|
||||||
|
applies-to: "security-updates"
|
||||||
|
patterns:
|
||||||
|
- "*"
|
||||||
|
version-all:
|
||||||
|
applies-to: "version-updates"
|
||||||
|
patterns:
|
||||||
|
- "*"
|
||||||
|
exclude-patterns:
|
||||||
|
- "pydantic*"
|
||||||
|
- "openai"
|
||||||
|
- "fastapi"
|
||||||
|
- "tiktoken"
|
||||||
|
- "browsergym*"
|
||||||
|
- "browser-use"
|
||||||
|
- "playwright"
|
||||||
|
- "googlesearch-python"
|
||||||
|
- "baidusearch"
|
||||||
|
- "duckduckgo_search"
|
||||||
|
- "pre-commit"
|
||||||
|
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
|
open-pull-requests-limit: 4
|
||||||
|
groups:
|
||||||
|
actions:
|
||||||
|
patterns:
|
||||||
|
- "*"
|
33
.github/workflows/environment-corrupt-check.yaml
vendored
Normal file
33
.github/workflows/environment-corrupt-check.yaml
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
name: Environment Corruption Check
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["main"]
|
||||||
|
paths:
|
||||||
|
- requirements.txt
|
||||||
|
pull_request:
|
||||||
|
branches: ["main"]
|
||||||
|
paths:
|
||||||
|
- requirements.txt
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
jobs:
|
||||||
|
test-python-versions:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
python-version: ["3.11.11", "3.12.8", "3.13.2"]
|
||||||
|
fail-fast: false
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
- name: Upgrade pip
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip install -r requirements.txt
|
127
.github/workflows/pr-autodiff.yaml
vendored
Normal file
127
.github/workflows/pr-autodiff.yaml
vendored
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
name: PR Diff Summarization
|
||||||
|
on:
|
||||||
|
# pull_request:
|
||||||
|
# branches: [main]
|
||||||
|
# types: [opened, ready_for_review, reopened]
|
||||||
|
issue_comment:
|
||||||
|
types: [created]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
jobs:
|
||||||
|
pr-diff-summarization:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: |
|
||||||
|
(github.event_name == 'pull_request') ||
|
||||||
|
(github.event_name == 'issue_comment' &&
|
||||||
|
contains(github.event.comment.body, '!pr-diff') &&
|
||||||
|
(github.event.comment.author_association == 'COLLABORATOR' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') &&
|
||||||
|
github.event.issue.pull_request)
|
||||||
|
steps:
|
||||||
|
- name: Get PR head SHA
|
||||||
|
id: get-pr-sha
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" == "pull_request" ]; then
|
||||||
|
echo "pr_sha=${{ github.event.pull_request.head.sha }}" >> $GITHUB_OUTPUT
|
||||||
|
echo "Retrieved PR head SHA: ${{ github.event.pull_request.head.sha }}"
|
||||||
|
else
|
||||||
|
PR_URL="${{ github.event.issue.pull_request.url }}"
|
||||||
|
SHA=$(curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" $PR_URL | jq -r '.head.sha')
|
||||||
|
echo "pr_sha=$SHA" >> $GITHUB_OUTPUT
|
||||||
|
echo "Retrieved PR head SHA from API: $SHA"
|
||||||
|
fi
|
||||||
|
- name: Check out code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ steps.get-pr-sha.outputs.pr_sha }}
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install openai requests
|
||||||
|
- name: Create and run Python script
|
||||||
|
env:
|
||||||
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
PR_NUMBER: ${{ github.event.pull_request.number || github.event.issue.number }}
|
||||||
|
run: |-
|
||||||
|
cat << 'EOF' > /tmp/_workflow_core.py
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
from openai import OpenAI
|
||||||
|
|
||||||
|
def get_diff():
|
||||||
|
result = subprocess.run(
|
||||||
|
['git', 'diff', 'origin/main...HEAD'],
|
||||||
|
capture_output=True, text=True, check=True)
|
||||||
|
return '\n'.join(
|
||||||
|
line for line in result.stdout.split('\n')
|
||||||
|
if any(line.startswith(c) for c in ('+', '-'))
|
||||||
|
and not line.startswith(('---', '+++'))
|
||||||
|
)[:round(200000 * 0.4)] # Truncate to prevent overflow
|
||||||
|
|
||||||
|
def generate_comment(diff_content):
|
||||||
|
client = OpenAI(
|
||||||
|
base_url=os.getenv("OPENAI_BASE_URL"),
|
||||||
|
api_key=os.getenv("OPENAI_API_KEY")
|
||||||
|
)
|
||||||
|
|
||||||
|
guidelines = '''
|
||||||
|
1. English version first, Chinese Simplified version after
|
||||||
|
2. Example format:
|
||||||
|
# Diff Report
|
||||||
|
## English
|
||||||
|
- Added `ABC` class
|
||||||
|
- Fixed `f()` behavior in `foo` module
|
||||||
|
|
||||||
|
### Comments Highlight
|
||||||
|
- `config.toml` needs to be configured properly to make sure new features work as expected.
|
||||||
|
|
||||||
|
### Spelling/Offensive Content Check
|
||||||
|
- No spelling mistakes or offensive content found in the code or comments.
|
||||||
|
3. Highlight non-English comments
|
||||||
|
4. Check for spelling/offensive content'''
|
||||||
|
|
||||||
|
response = client.chat.completions.create(
|
||||||
|
model="o3-mini",
|
||||||
|
messages=[{
|
||||||
|
"role": "system",
|
||||||
|
"content": "Generate bilingual code review feedback."
|
||||||
|
}, {
|
||||||
|
"role": "user",
|
||||||
|
"content": f"Review these changes per guidelines:\n{guidelines}\n\nDIFF:\n{diff_content}"
|
||||||
|
}]
|
||||||
|
)
|
||||||
|
return response.choices[0].message.content
|
||||||
|
|
||||||
|
def post_comment(comment):
|
||||||
|
repo = os.getenv("GITHUB_REPOSITORY")
|
||||||
|
pr_number = os.getenv("PR_NUMBER")
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bearer {os.getenv('GH_TOKEN')}",
|
||||||
|
"Accept": "application/vnd.github.v3+json"
|
||||||
|
}
|
||||||
|
url = f"https://api.github.com/repos/{repo}/issues/{pr_number}/comments"
|
||||||
|
|
||||||
|
requests.post(url, json={"body": comment}, headers=headers)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
diff_content = get_diff()
|
||||||
|
if not diff_content.strip():
|
||||||
|
print("No meaningful diff detected.")
|
||||||
|
exit(0)
|
||||||
|
|
||||||
|
comment = generate_comment(diff_content)
|
||||||
|
post_comment(comment)
|
||||||
|
print("Comment posted successfully.")
|
||||||
|
EOF
|
||||||
|
|
||||||
|
python /tmp/_workflow_core.py
|
2
.github/workflows/stale.yaml
vendored
2
.github/workflows/stale.yaml
vendored
@ -11,7 +11,7 @@ jobs:
|
|||||||
issues: write
|
issues: write
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v5
|
- uses: actions/stale@v9
|
||||||
with:
|
with:
|
||||||
days-before-issue-stale: 30
|
days-before-issue-stale: 30
|
||||||
days-before-issue-close: 14
|
days-before-issue-close: 14
|
||||||
|
27
.github/workflows/top-issues.yaml
vendored
Normal file
27
.github/workflows/top-issues.yaml
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
name: Top issues
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 0/2 * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
jobs:
|
||||||
|
ShowAndLabelTopIssues:
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
name: Display and label top issues
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository == 'mannaandpoem/OpenManus'
|
||||||
|
steps:
|
||||||
|
- name: Run top issues action
|
||||||
|
uses: rickstaa/top-issues-action@7e8dda5d5ae3087670f9094b9724a9a091fc3ba1 # v1.3.101
|
||||||
|
env:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
label: true
|
||||||
|
dashboard: true
|
||||||
|
dashboard_show_total_reactions: true
|
||||||
|
top_issues: true
|
||||||
|
top_pull_requests: true
|
||||||
|
top_list_size: 32
|
28
.gitignore
vendored
28
.gitignore
vendored
@ -1,3 +1,14 @@
|
|||||||
|
### Project-specific ###
|
||||||
|
# Logs
|
||||||
|
logs/
|
||||||
|
|
||||||
|
# Data
|
||||||
|
data/
|
||||||
|
|
||||||
|
# Workspace
|
||||||
|
workspace/
|
||||||
|
|
||||||
|
### Python ###
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
__pycache__/
|
__pycache__/
|
||||||
*.py[cod]
|
*.py[cod]
|
||||||
@ -170,11 +181,16 @@ cython_debug/
|
|||||||
# PyPI configuration file
|
# PyPI configuration file
|
||||||
.pypirc
|
.pypirc
|
||||||
|
|
||||||
# Logs
|
### Visual Studio Code ###
|
||||||
logs/
|
.vscode/*
|
||||||
|
!.vscode/settings.json
|
||||||
|
!.vscode/tasks.json
|
||||||
|
!.vscode/launch.json
|
||||||
|
!.vscode/extensions.json
|
||||||
|
!.vscode/*.code-snippets
|
||||||
|
|
||||||
# Data
|
# Local History for Visual Studio Code
|
||||||
data/
|
.history/
|
||||||
|
|
||||||
# Workspace
|
# Built Visual Studio Code Extensions
|
||||||
workspace/
|
*.vsix
|
||||||
|
8
.vscode/extensions.json
vendored
Normal file
8
.vscode/extensions.json
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"recommendations": [
|
||||||
|
"tamasfe.even-better-toml",
|
||||||
|
"ms-python.black-formatter",
|
||||||
|
"ms-python.isort"
|
||||||
|
],
|
||||||
|
"unwantedRecommendations": []
|
||||||
|
}
|
20
.vscode/settings.json
vendored
Normal file
20
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"[python]": {
|
||||||
|
"editor.defaultFormatter": "ms-python.black-formatter",
|
||||||
|
"editor.codeActionsOnSave": {
|
||||||
|
"source.organizeImports": "always"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"[toml]": {
|
||||||
|
"editor.defaultFormatter": "tamasfe.even-better-toml",
|
||||||
|
},
|
||||||
|
"pre-commit-helper.runOnSave": "none",
|
||||||
|
"pre-commit-helper.config": ".pre-commit-config.yaml",
|
||||||
|
"evenBetterToml.schema.enabled": true,
|
||||||
|
"evenBetterToml.schema.associations": {
|
||||||
|
"^.+config[/\\\\].+\\.toml$": "../config/schema.config.json"
|
||||||
|
},
|
||||||
|
"files.insertFinalNewline": true,
|
||||||
|
"files.trimTrailingWhitespace": true,
|
||||||
|
"editor.formatOnSave": true
|
||||||
|
}
|
13
Dockerfile
Normal file
13
Dockerfile
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
FROM python:3.12-slim
|
||||||
|
|
||||||
|
WORKDIR /app/OpenManus
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends git curl \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& (command -v uv >/dev/null 2>&1 || pip install --no-cache-dir uv)
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
RUN uv pip install --system -r requirements.txt
|
||||||
|
|
||||||
|
CMD ["bash"]
|
10
README.md
10
README.md
@ -1,3 +1,7 @@
|
|||||||
|
<p align="center">
|
||||||
|
<img src="assets/logo.jpg" width="200"/>
|
||||||
|
</p>
|
||||||
|
|
||||||
English | [中文](README_zh.md) | [한국어](README_ko.md) | [日本語](README_ja.md)
|
English | [中文](README_zh.md) | [한국어](README_ko.md) | [日本語](README_ja.md)
|
||||||
|
|
||||||
[](https://github.com/mannaandpoem/OpenManus/stargazers)
|
[](https://github.com/mannaandpoem/OpenManus/stargazers)
|
||||||
@ -65,7 +69,7 @@ cd OpenManus
|
|||||||
3. Create a new virtual environment and activate it:
|
3. Create a new virtual environment and activate it:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv venv
|
uv venv --python 3.12
|
||||||
source .venv/bin/activate # On Unix/macOS
|
source .venv/bin/activate # On Unix/macOS
|
||||||
# Or on Windows:
|
# Or on Windows:
|
||||||
# .venv\Scripts\activate
|
# .venv\Scripts\activate
|
||||||
@ -127,6 +131,8 @@ We welcome any friendly suggestions and helpful contributions! Just create issue
|
|||||||
|
|
||||||
Or contact @mannaandpoem via 📧email: mannaandpoem@gmail.com
|
Or contact @mannaandpoem via 📧email: mannaandpoem@gmail.com
|
||||||
|
|
||||||
|
**Note**: Before submitting a pull request, please use the pre-commit tool to check your changes. Run `pre-commit run --all-files` to execute the checks.
|
||||||
|
|
||||||
## Community Group
|
## Community Group
|
||||||
Join our networking group on Feishu and share your experience with other developers!
|
Join our networking group on Feishu and share your experience with other developers!
|
||||||
|
|
||||||
@ -143,7 +149,7 @@ Join our networking group on Feishu and share your experience with other develop
|
|||||||
Thanks to [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo)
|
Thanks to [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo)
|
||||||
and [browser-use](https://github.com/browser-use/browser-use) for providing basic support for this project!
|
and [browser-use](https://github.com/browser-use/browser-use) for providing basic support for this project!
|
||||||
|
|
||||||
Additionally, we are grateful to [AAAJ](https://github.com/metauto-ai/agent-as-a-judge), [MetaGPT](https://github.com/geekan/MetaGPT) and [OpenHands](https://github.com/All-Hands-AI/OpenHands).
|
Additionally, we are grateful to [AAAJ](https://github.com/metauto-ai/agent-as-a-judge), [MetaGPT](https://github.com/geekan/MetaGPT), [OpenHands](https://github.com/All-Hands-AI/OpenHands) and [SWE-agent](https://github.com/SWE-agent/SWE-agent).
|
||||||
|
|
||||||
OpenManus is built by contributors from MetaGPT. Huge thanks to this agent community!
|
OpenManus is built by contributors from MetaGPT. Huge thanks to this agent community!
|
||||||
|
|
||||||
|
11
README_ja.md
11
README_ja.md
@ -1,5 +1,8 @@
|
|||||||
[English](README.md) | [中文](README_zh.md) | [한국어](README_ko.md) | 日本語
|
<p align="center">
|
||||||
|
<img src="assets/logo.jpg" width="200"/>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
[English](README.md) | [中文](README_zh.md) | [한국어](README_ko.md) | 日本語
|
||||||
|
|
||||||
[](https://github.com/mannaandpoem/OpenManus/stargazers)
|
[](https://github.com/mannaandpoem/OpenManus/stargazers)
|
||||||
 
|
 
|
||||||
@ -66,7 +69,7 @@ cd OpenManus
|
|||||||
3. 新しい仮想環境を作成してアクティベートします:
|
3. 新しい仮想環境を作成してアクティベートします:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv venv
|
uv venv --python 3.12
|
||||||
source .venv/bin/activate # Unix/macOSの場合
|
source .venv/bin/activate # Unix/macOSの場合
|
||||||
# Windowsの場合:
|
# Windowsの場合:
|
||||||
# .venv\Scripts\activate
|
# .venv\Scripts\activate
|
||||||
@ -128,6 +131,8 @@ python run_flow.py
|
|||||||
|
|
||||||
または @mannaandpoem に📧メールでご連絡ください:mannaandpoem@gmail.com
|
または @mannaandpoem に📧メールでご連絡ください:mannaandpoem@gmail.com
|
||||||
|
|
||||||
|
**注意**: プルリクエストを送信する前に、pre-commitツールを使用して変更を確認してください。`pre-commit run --all-files`を実行してチェックを実行します。
|
||||||
|
|
||||||
## コミュニティグループ
|
## コミュニティグループ
|
||||||
Feishuのネットワーキンググループに参加して、他の開発者と経験を共有しましょう!
|
Feishuのネットワーキンググループに参加して、他の開発者と経験を共有しましょう!
|
||||||
|
|
||||||
@ -144,7 +149,7 @@ Feishuのネットワーキンググループに参加して、他の開発者
|
|||||||
このプロジェクトの基本的なサポートを提供してくれた[anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo)
|
このプロジェクトの基本的なサポートを提供してくれた[anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo)
|
||||||
と[browser-use](https://github.com/browser-use/browser-use)に感謝します!
|
と[browser-use](https://github.com/browser-use/browser-use)に感謝します!
|
||||||
|
|
||||||
さらに、[AAAJ](https://github.com/metauto-ai/agent-as-a-judge)、[MetaGPT](https://github.com/geekan/MetaGPT)、[OpenHands](https://github.com/All-Hands-AI/OpenHands)にも感謝します。
|
さらに、[AAAJ](https://github.com/metauto-ai/agent-as-a-judge)、[MetaGPT](https://github.com/geekan/MetaGPT)、[OpenHands](https://github.com/All-Hands-AI/OpenHands)、[SWE-agent](https://github.com/SWE-agent/SWE-agent)にも感謝します。
|
||||||
|
|
||||||
OpenManusはMetaGPTのコントリビューターによって構築されました。このエージェントコミュニティに大きな感謝を!
|
OpenManusはMetaGPTのコントリビューターによって構築されました。このエージェントコミュニティに大きな感謝を!
|
||||||
|
|
||||||
|
11
README_ko.md
11
README_ko.md
@ -1,5 +1,8 @@
|
|||||||
[English](README.md) | [中文](README_zh.md) | 한국어 | [日本語](README_ja.md)
|
<p align="center">
|
||||||
|
<img src="assets/logo.jpg" width="200"/>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
[English](README.md) | [中文](README_zh.md) | 한국어 | [日本語](README_ja.md)
|
||||||
|
|
||||||
[](https://github.com/mannaandpoem/OpenManus/stargazers)
|
[](https://github.com/mannaandpoem/OpenManus/stargazers)
|
||||||
 
|
 
|
||||||
@ -66,7 +69,7 @@ cd OpenManus
|
|||||||
3. 새로운 가상 환경을 생성하고 활성화합니다:
|
3. 새로운 가상 환경을 생성하고 활성화합니다:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv venv
|
uv venv --python 3.12
|
||||||
source .venv/bin/activate # Unix/macOS의 경우
|
source .venv/bin/activate # Unix/macOS의 경우
|
||||||
# Windows의 경우:
|
# Windows의 경우:
|
||||||
# .venv\Scripts\activate
|
# .venv\Scripts\activate
|
||||||
@ -128,6 +131,8 @@ python run_flow.py
|
|||||||
|
|
||||||
또는 📧 메일로 연락주세요. @mannaandpoem : mannaandpoem@gmail.com
|
또는 📧 메일로 연락주세요. @mannaandpoem : mannaandpoem@gmail.com
|
||||||
|
|
||||||
|
**참고**: pull request를 제출하기 전에 pre-commit 도구를 사용하여 변경 사항을 확인하십시오. `pre-commit run --all-files`를 실행하여 검사를 실행합니다.
|
||||||
|
|
||||||
## 커뮤니티 그룹
|
## 커뮤니티 그룹
|
||||||
Feishu 네트워킹 그룹에 참여하여 다른 개발자들과 경험을 공유하세요!
|
Feishu 네트워킹 그룹에 참여하여 다른 개발자들과 경험을 공유하세요!
|
||||||
|
|
||||||
@ -144,7 +149,7 @@ Feishu 네트워킹 그룹에 참여하여 다른 개발자들과 경험을 공
|
|||||||
이 프로젝트에 기본적인 지원을 제공해 주신 [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo)와
|
이 프로젝트에 기본적인 지원을 제공해 주신 [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo)와
|
||||||
[browser-use](https://github.com/browser-use/browser-use)에게 감사드립니다!
|
[browser-use](https://github.com/browser-use/browser-use)에게 감사드립니다!
|
||||||
|
|
||||||
또한, [AAAJ](https://github.com/metauto-ai/agent-as-a-judge), [MetaGPT](https://github.com/geekan/MetaGPT), [OpenHands](https://github.com/All-Hands-AI/OpenHands)에 깊은 감사를 드립니다.
|
또한, [AAAJ](https://github.com/metauto-ai/agent-as-a-judge), [MetaGPT](https://github.com/geekan/MetaGPT), [OpenHands](https://github.com/All-Hands-AI/OpenHands), [SWE-agent](https://github.com/SWE-agent/SWE-agent)에 깊은 감사를 드립니다.
|
||||||
|
|
||||||
OpenManus는 MetaGPT 기여자들에 의해 개발되었습니다. 이 에이전트 커뮤니티에 깊은 감사를 전합니다!
|
OpenManus는 MetaGPT 기여자들에 의해 개발되었습니다. 이 에이전트 커뮤니티에 깊은 감사를 전합니다!
|
||||||
|
|
||||||
|
13
README_zh.md
13
README_zh.md
@ -1,8 +1,9 @@
|
|||||||
|
<p align="center">
|
||||||
|
<img src="assets/logo.jpg" width="200"/>
|
||||||
|
</p>
|
||||||
|
|
||||||
[English](README.md) | 中文 | [한국어](README_ko.md) | [日本語](README_ja.md)
|
[English](README.md) | 中文 | [한국어](README_ko.md) | [日本語](README_ja.md)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
[](https://github.com/mannaandpoem/OpenManus/stargazers)
|
[](https://github.com/mannaandpoem/OpenManus/stargazers)
|
||||||
 
|
 
|
||||||
[](https://opensource.org/licenses/MIT)  
|
[](https://opensource.org/licenses/MIT)  
|
||||||
@ -69,7 +70,7 @@ cd OpenManus
|
|||||||
3. 创建并激活虚拟环境:
|
3. 创建并激活虚拟环境:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv venv
|
uv venv --python 3.12
|
||||||
source .venv/bin/activate # Unix/macOS 系统
|
source .venv/bin/activate # Unix/macOS 系统
|
||||||
# Windows 系统使用:
|
# Windows 系统使用:
|
||||||
# .venv\Scripts\activate
|
# .venv\Scripts\activate
|
||||||
@ -119,7 +120,7 @@ python main.py
|
|||||||
|
|
||||||
然后通过终端输入你的创意!
|
然后通过终端输入你的创意!
|
||||||
|
|
||||||
如需体验开发中版本,可运行:
|
如需体验不稳定的开发版本,可运行:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python run_flow.py
|
python run_flow.py
|
||||||
@ -131,6 +132,8 @@ python run_flow.py
|
|||||||
|
|
||||||
或通过 📧 邮件联系 @mannaandpoem:mannaandpoem@gmail.com
|
或通过 📧 邮件联系 @mannaandpoem:mannaandpoem@gmail.com
|
||||||
|
|
||||||
|
**注意**: 在提交 pull request 之前,请使用 pre-commit 工具检查您的更改。运行 `pre-commit run --all-files` 来执行检查。
|
||||||
|
|
||||||
## 交流群
|
## 交流群
|
||||||
|
|
||||||
加入我们的飞书交流群,与其他开发者分享经验!
|
加入我们的飞书交流群,与其他开发者分享经验!
|
||||||
@ -148,7 +151,7 @@ python run_flow.py
|
|||||||
特别感谢 [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo)
|
特别感谢 [anthropic-computer-use](https://github.com/anthropics/anthropic-quickstarts/tree/main/computer-use-demo)
|
||||||
和 [browser-use](https://github.com/browser-use/browser-use) 为本项目提供的基础支持!
|
和 [browser-use](https://github.com/browser-use/browser-use) 为本项目提供的基础支持!
|
||||||
|
|
||||||
此外,我们感谢 [AAAJ](https://github.com/metauto-ai/agent-as-a-judge),[MetaGPT](https://github.com/geekan/MetaGPT) 和 [OpenHands](https://github.com/All-Hands-AI/OpenHands).
|
此外,我们感谢 [AAAJ](https://github.com/metauto-ai/agent-as-a-judge),[MetaGPT](https://github.com/geekan/MetaGPT),[OpenHands](https://github.com/All-Hands-AI/OpenHands) 和 [SWE-agent](https://github.com/SWE-agent/SWE-agent).
|
||||||
|
|
||||||
OpenManus 由 MetaGPT 社区的贡献者共同构建,感谢这个充满活力的智能体开发者社区!
|
OpenManus 由 MetaGPT 社区的贡献者共同构建,感谢这个充满活力的智能体开发者社区!
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ from pydantic import BaseModel, Field, model_validator
|
|||||||
|
|
||||||
from app.llm import LLM
|
from app.llm import LLM
|
||||||
from app.logger import logger
|
from app.logger import logger
|
||||||
from app.schema import AgentState, Memory, Message, ROLE_TYPE
|
from app.schema import ROLE_TYPE, AgentState, Memory, Message
|
||||||
|
|
||||||
|
|
||||||
class BaseAgent(BaseModel, ABC):
|
class BaseAgent(BaseModel, ABC):
|
||||||
@ -82,7 +82,7 @@ class BaseAgent(BaseModel, ABC):
|
|||||||
|
|
||||||
def update_memory(
|
def update_memory(
|
||||||
self,
|
self,
|
||||||
role: ROLE_TYPE, # type: ignore
|
role: ROLE_TYPE, # type: ignore
|
||||||
content: str,
|
content: str,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -7,8 +7,8 @@ from app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT
|
|||||||
from app.tool import Terminate, ToolCollection
|
from app.tool import Terminate, ToolCollection
|
||||||
from app.tool.browser_use_tool import BrowserUseTool
|
from app.tool.browser_use_tool import BrowserUseTool
|
||||||
from app.tool.file_saver import FileSaver
|
from app.tool.file_saver import FileSaver
|
||||||
from app.tool.google_search import GoogleSearch
|
|
||||||
from app.tool.python_execute import PythonExecute
|
from app.tool.python_execute import PythonExecute
|
||||||
|
from app.tool.web_search import WebSearch
|
||||||
|
|
||||||
|
|
||||||
class Manus(ToolCallAgent):
|
class Manus(ToolCallAgent):
|
||||||
@ -34,10 +34,13 @@ class Manus(ToolCallAgent):
|
|||||||
# Add general-purpose tools to the tool collection
|
# Add general-purpose tools to the tool collection
|
||||||
available_tools: ToolCollection = Field(
|
available_tools: ToolCollection = Field(
|
||||||
default_factory=lambda: ToolCollection(
|
default_factory=lambda: ToolCollection(
|
||||||
PythonExecute(), GoogleSearch(), BrowserUseTool(), FileSaver(), Terminate()
|
PythonExecute(), WebSearch(), BrowserUseTool(), FileSaver(), Terminate()
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _handle_special_tool(self, name: str, result: Any, **kwargs):
|
async def _handle_special_tool(self, name: str, result: Any, **kwargs):
|
||||||
await self.available_tools.get_tool(BrowserUseTool().name).cleanup()
|
if not self._is_special_tool(name):
|
||||||
await super()._handle_special_tool(name, result, **kwargs)
|
return
|
||||||
|
else:
|
||||||
|
await self.available_tools.get_tool(BrowserUseTool().name).cleanup()
|
||||||
|
await super()._handle_special_tool(name, result, **kwargs)
|
||||||
|
@ -6,7 +6,7 @@ from pydantic import Field, model_validator
|
|||||||
from app.agent.toolcall import ToolCallAgent
|
from app.agent.toolcall import ToolCallAgent
|
||||||
from app.logger import logger
|
from app.logger import logger
|
||||||
from app.prompt.planning import NEXT_STEP_PROMPT, PLANNING_SYSTEM_PROMPT
|
from app.prompt.planning import NEXT_STEP_PROMPT, PLANNING_SYSTEM_PROMPT
|
||||||
from app.schema import Message, TOOL_CHOICE_TYPE, ToolCall, ToolChoice
|
from app.schema import TOOL_CHOICE_TYPE, Message, ToolCall, ToolChoice
|
||||||
from app.tool import PlanningTool, Terminate, ToolCollection
|
from app.tool import PlanningTool, Terminate, ToolCollection
|
||||||
|
|
||||||
|
|
||||||
@ -27,7 +27,7 @@ class PlanningAgent(ToolCallAgent):
|
|||||||
available_tools: ToolCollection = Field(
|
available_tools: ToolCollection = Field(
|
||||||
default_factory=lambda: ToolCollection(PlanningTool(), Terminate())
|
default_factory=lambda: ToolCollection(PlanningTool(), Terminate())
|
||||||
)
|
)
|
||||||
tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore
|
tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore
|
||||||
special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])
|
special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])
|
||||||
|
|
||||||
tool_calls: List[ToolCall] = Field(default_factory=list)
|
tool_calls: List[ToolCall] = Field(default_factory=list)
|
||||||
@ -212,7 +212,7 @@ class PlanningAgent(ToolCallAgent):
|
|||||||
messages=messages,
|
messages=messages,
|
||||||
system_msgs=[Message.system_message(self.system_prompt)],
|
system_msgs=[Message.system_message(self.system_prompt)],
|
||||||
tools=self.available_tools.to_params(),
|
tools=self.available_tools.to_params(),
|
||||||
tool_choice=ToolChoice.REQUIRED,
|
tool_choice=ToolChoice.AUTO,
|
||||||
)
|
)
|
||||||
assistant_msg = Message.from_tool_calls(
|
assistant_msg = Message.from_tool_calls(
|
||||||
content=response.content, tool_calls=response.tool_calls
|
content=response.content, tool_calls=response.tool_calls
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
import json
|
import json
|
||||||
|
from typing import Any, List, Optional, Union
|
||||||
from typing import Any, List, Literal, Optional, Union
|
|
||||||
|
|
||||||
from pydantic import Field
|
from pydantic import Field
|
||||||
|
|
||||||
from app.agent.react import ReActAgent
|
from app.agent.react import ReActAgent
|
||||||
|
from app.exceptions import TokenLimitExceeded
|
||||||
from app.logger import logger
|
from app.logger import logger
|
||||||
from app.prompt.toolcall import NEXT_STEP_PROMPT, SYSTEM_PROMPT
|
from app.prompt.toolcall import NEXT_STEP_PROMPT, SYSTEM_PROMPT
|
||||||
from app.schema import AgentState, Message, ToolCall, TOOL_CHOICE_TYPE, ToolChoice
|
from app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice
|
||||||
from app.tool import CreateChatCompletion, Terminate, ToolCollection
|
from app.tool import CreateChatCompletion, Terminate, ToolCollection
|
||||||
|
|
||||||
|
|
||||||
@ -26,7 +26,7 @@ class ToolCallAgent(ReActAgent):
|
|||||||
available_tools: ToolCollection = ToolCollection(
|
available_tools: ToolCollection = ToolCollection(
|
||||||
CreateChatCompletion(), Terminate()
|
CreateChatCompletion(), Terminate()
|
||||||
)
|
)
|
||||||
tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore
|
tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore
|
||||||
special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])
|
special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])
|
||||||
|
|
||||||
tool_calls: List[ToolCall] = Field(default_factory=list)
|
tool_calls: List[ToolCall] = Field(default_factory=list)
|
||||||
@ -40,15 +40,34 @@ class ToolCallAgent(ReActAgent):
|
|||||||
user_msg = Message.user_message(self.next_step_prompt)
|
user_msg = Message.user_message(self.next_step_prompt)
|
||||||
self.messages += [user_msg]
|
self.messages += [user_msg]
|
||||||
|
|
||||||
# Get response with tool options
|
try:
|
||||||
response = await self.llm.ask_tool(
|
# Get response with tool options
|
||||||
messages=self.messages,
|
response = await self.llm.ask_tool(
|
||||||
system_msgs=[Message.system_message(self.system_prompt)]
|
messages=self.messages,
|
||||||
if self.system_prompt
|
system_msgs=[Message.system_message(self.system_prompt)]
|
||||||
else None,
|
if self.system_prompt
|
||||||
tools=self.available_tools.to_params(),
|
else None,
|
||||||
tool_choice=self.tool_choices,
|
tools=self.available_tools.to_params(),
|
||||||
)
|
tool_choice=self.tool_choices,
|
||||||
|
)
|
||||||
|
except ValueError:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
# Check if this is a RetryError containing TokenLimitExceeded
|
||||||
|
if hasattr(e, "__cause__") and isinstance(e.__cause__, TokenLimitExceeded):
|
||||||
|
token_limit_error = e.__cause__
|
||||||
|
logger.error(
|
||||||
|
f"🚨 Token limit error (from RetryError): {token_limit_error}"
|
||||||
|
)
|
||||||
|
self.memory.add_message(
|
||||||
|
Message.assistant_message(
|
||||||
|
f"Maximum token limit reached, cannot continue execution: {str(token_limit_error)}"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self.state = AgentState.FINISHED
|
||||||
|
return False
|
||||||
|
raise
|
||||||
|
|
||||||
self.tool_calls = response.tool_calls
|
self.tool_calls = response.tool_calls
|
||||||
|
|
||||||
# Log response info
|
# Log response info
|
||||||
|
@ -20,6 +20,10 @@ class LLMSettings(BaseModel):
|
|||||||
base_url: str = Field(..., description="API base URL")
|
base_url: str = Field(..., description="API base URL")
|
||||||
api_key: str = Field(..., description="API key")
|
api_key: str = Field(..., description="API key")
|
||||||
max_tokens: int = Field(4096, description="Maximum number of tokens per request")
|
max_tokens: int = Field(4096, description="Maximum number of tokens per request")
|
||||||
|
max_input_tokens: Optional[int] = Field(
|
||||||
|
None,
|
||||||
|
description="Maximum input tokens to use across all requests (None for unlimited)",
|
||||||
|
)
|
||||||
temperature: float = Field(1.0, description="Sampling temperature")
|
temperature: float = Field(1.0, description="Sampling temperature")
|
||||||
api_type: str = Field(..., description="AzureOpenai or Openai")
|
api_type: str = Field(..., description="AzureOpenai or Openai")
|
||||||
api_version: str = Field(..., description="Azure Openai version if AzureOpenai")
|
api_version: str = Field(..., description="Azure Openai version if AzureOpenai")
|
||||||
@ -31,6 +35,10 @@ class ProxySettings(BaseModel):
|
|||||||
password: Optional[str] = Field(None, description="Proxy password")
|
password: Optional[str] = Field(None, description="Proxy password")
|
||||||
|
|
||||||
|
|
||||||
|
class SearchSettings(BaseModel):
|
||||||
|
engine: str = Field(default="Google", description="Search engine the llm to use")
|
||||||
|
|
||||||
|
|
||||||
class BrowserSettings(BaseModel):
|
class BrowserSettings(BaseModel):
|
||||||
headless: bool = Field(False, description="Whether to run browser in headless mode")
|
headless: bool = Field(False, description="Whether to run browser in headless mode")
|
||||||
disable_security: bool = Field(
|
disable_security: bool = Field(
|
||||||
@ -58,6 +66,9 @@ class AppConfig(BaseModel):
|
|||||||
browser_config: Optional[BrowserSettings] = Field(
|
browser_config: Optional[BrowserSettings] = Field(
|
||||||
None, description="Browser configuration"
|
None, description="Browser configuration"
|
||||||
)
|
)
|
||||||
|
search_config: Optional[SearchSettings] = Field(
|
||||||
|
None, description="Search configuration"
|
||||||
|
)
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
arbitrary_types_allowed = True
|
arbitrary_types_allowed = True
|
||||||
@ -111,6 +122,7 @@ class Config:
|
|||||||
"base_url": base_llm.get("base_url"),
|
"base_url": base_llm.get("base_url"),
|
||||||
"api_key": base_llm.get("api_key"),
|
"api_key": base_llm.get("api_key"),
|
||||||
"max_tokens": base_llm.get("max_tokens", 4096),
|
"max_tokens": base_llm.get("max_tokens", 4096),
|
||||||
|
"max_input_tokens": base_llm.get("max_input_tokens"),
|
||||||
"temperature": base_llm.get("temperature", 1.0),
|
"temperature": base_llm.get("temperature", 1.0),
|
||||||
"api_type": base_llm.get("api_type", ""),
|
"api_type": base_llm.get("api_type", ""),
|
||||||
"api_version": base_llm.get("api_version", ""),
|
"api_version": base_llm.get("api_version", ""),
|
||||||
@ -149,6 +161,11 @@ class Config:
|
|||||||
if valid_browser_params:
|
if valid_browser_params:
|
||||||
browser_settings = BrowserSettings(**valid_browser_params)
|
browser_settings = BrowserSettings(**valid_browser_params)
|
||||||
|
|
||||||
|
search_config = raw_config.get("search", {})
|
||||||
|
search_settings = None
|
||||||
|
if search_config:
|
||||||
|
search_settings = SearchSettings(**search_config)
|
||||||
|
|
||||||
config_dict = {
|
config_dict = {
|
||||||
"llm": {
|
"llm": {
|
||||||
"default": default_settings,
|
"default": default_settings,
|
||||||
@ -158,6 +175,7 @@ class Config:
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
"browser_config": browser_settings,
|
"browser_config": browser_settings,
|
||||||
|
"search_config": search_settings,
|
||||||
}
|
}
|
||||||
|
|
||||||
self._config = AppConfig(**config_dict)
|
self._config = AppConfig(**config_dict)
|
||||||
@ -170,5 +188,9 @@ class Config:
|
|||||||
def browser_config(self) -> Optional[BrowserSettings]:
|
def browser_config(self) -> Optional[BrowserSettings]:
|
||||||
return self._config.browser_config
|
return self._config.browser_config
|
||||||
|
|
||||||
|
@property
|
||||||
|
def search_config(self) -> Optional[SearchSettings]:
|
||||||
|
return self._config.search_config
|
||||||
|
|
||||||
|
|
||||||
config = Config()
|
config = Config()
|
||||||
|
@ -3,3 +3,11 @@ class ToolError(Exception):
|
|||||||
|
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
self.message = message
|
self.message = message
|
||||||
|
|
||||||
|
|
||||||
|
class OpenManusError(Exception):
|
||||||
|
"""Base exception for all OpenManus errors"""
|
||||||
|
|
||||||
|
|
||||||
|
class TokenLimitExceeded(OpenManusError):
|
||||||
|
"""Exception raised when the token limit is exceeded"""
|
||||||
|
@ -124,7 +124,7 @@ class PlanningFlow(BaseFlow):
|
|||||||
messages=[user_message],
|
messages=[user_message],
|
||||||
system_msgs=[system_message],
|
system_msgs=[system_message],
|
||||||
tools=[self.planning_tool.to_param()],
|
tools=[self.planning_tool.to_param()],
|
||||||
tool_choice=ToolChoice.REQUIRED,
|
tool_choice=ToolChoice.AUTO,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process tool calls if present
|
# Process tool calls if present
|
||||||
|
244
app/llm.py
244
app/llm.py
@ -1,5 +1,6 @@
|
|||||||
from typing import Dict, List, Optional, Union
|
from typing import Dict, List, Optional, Union
|
||||||
|
|
||||||
|
import tiktoken
|
||||||
from openai import (
|
from openai import (
|
||||||
APIError,
|
APIError,
|
||||||
AsyncAzureOpenAI,
|
AsyncAzureOpenAI,
|
||||||
@ -8,11 +9,26 @@ from openai import (
|
|||||||
OpenAIError,
|
OpenAIError,
|
||||||
RateLimitError,
|
RateLimitError,
|
||||||
)
|
)
|
||||||
from tenacity import retry, stop_after_attempt, wait_random_exponential
|
from tenacity import (
|
||||||
|
retry,
|
||||||
|
retry_if_exception_type,
|
||||||
|
stop_after_attempt,
|
||||||
|
wait_random_exponential,
|
||||||
|
)
|
||||||
|
|
||||||
from app.config import LLMSettings, config
|
from app.config import LLMSettings, config
|
||||||
|
from app.exceptions import TokenLimitExceeded
|
||||||
from app.logger import logger # Assuming a logger is set up in your app
|
from app.logger import logger # Assuming a logger is set up in your app
|
||||||
from app.schema import Message, TOOL_CHOICE_TYPE, ROLE_VALUES, TOOL_CHOICE_VALUES, ToolChoice
|
from app.schema import (
|
||||||
|
ROLE_VALUES,
|
||||||
|
TOOL_CHOICE_TYPE,
|
||||||
|
TOOL_CHOICE_VALUES,
|
||||||
|
Message,
|
||||||
|
ToolChoice,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
REASONING_MODELS = ["o1", "o3-mini"]
|
||||||
|
|
||||||
|
|
||||||
class LLM:
|
class LLM:
|
||||||
@ -40,6 +56,22 @@ class LLM:
|
|||||||
self.api_key = llm_config.api_key
|
self.api_key = llm_config.api_key
|
||||||
self.api_version = llm_config.api_version
|
self.api_version = llm_config.api_version
|
||||||
self.base_url = llm_config.base_url
|
self.base_url = llm_config.base_url
|
||||||
|
|
||||||
|
# Add token counting related attributes
|
||||||
|
self.total_input_tokens = 0
|
||||||
|
self.max_input_tokens = (
|
||||||
|
llm_config.max_input_tokens
|
||||||
|
if hasattr(llm_config, "max_input_tokens")
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize tokenizer
|
||||||
|
try:
|
||||||
|
self.tokenizer = tiktoken.encoding_for_model(self.model)
|
||||||
|
except KeyError:
|
||||||
|
# If the model is not in tiktoken's presets, use cl100k_base as default
|
||||||
|
self.tokenizer = tiktoken.get_encoding("cl100k_base")
|
||||||
|
|
||||||
if self.api_type == "azure":
|
if self.api_type == "azure":
|
||||||
self.client = AsyncAzureOpenAI(
|
self.client = AsyncAzureOpenAI(
|
||||||
base_url=self.base_url,
|
base_url=self.base_url,
|
||||||
@ -49,6 +81,79 @@ class LLM:
|
|||||||
else:
|
else:
|
||||||
self.client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
|
self.client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
|
||||||
|
|
||||||
|
def count_tokens(self, text: str) -> int:
|
||||||
|
"""Calculate the number of tokens in a text"""
|
||||||
|
if not text:
|
||||||
|
return 0
|
||||||
|
return len(self.tokenizer.encode(text))
|
||||||
|
|
||||||
|
def count_message_tokens(self, messages: List[dict]) -> int:
|
||||||
|
"""Calculate the number of tokens in a message list"""
|
||||||
|
token_count = 0
|
||||||
|
for message in messages:
|
||||||
|
# Base token count for each message (according to OpenAI's calculation method)
|
||||||
|
token_count += 4 # Base token count for each message
|
||||||
|
|
||||||
|
# Calculate tokens for the role
|
||||||
|
if "role" in message:
|
||||||
|
token_count += self.count_tokens(message["role"])
|
||||||
|
|
||||||
|
# Calculate tokens for the content
|
||||||
|
if "content" in message and message["content"]:
|
||||||
|
token_count += self.count_tokens(message["content"])
|
||||||
|
|
||||||
|
# Calculate tokens for tool calls
|
||||||
|
if "tool_calls" in message and message["tool_calls"]:
|
||||||
|
for tool_call in message["tool_calls"]:
|
||||||
|
if "function" in tool_call:
|
||||||
|
# Function name
|
||||||
|
if "name" in tool_call["function"]:
|
||||||
|
token_count += self.count_tokens(
|
||||||
|
tool_call["function"]["name"]
|
||||||
|
)
|
||||||
|
# Function arguments
|
||||||
|
if "arguments" in tool_call["function"]:
|
||||||
|
token_count += self.count_tokens(
|
||||||
|
tool_call["function"]["arguments"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate tokens for tool responses
|
||||||
|
if "name" in message and message["name"]:
|
||||||
|
token_count += self.count_tokens(message["name"])
|
||||||
|
|
||||||
|
if "tool_call_id" in message and message["tool_call_id"]:
|
||||||
|
token_count += self.count_tokens(message["tool_call_id"])
|
||||||
|
|
||||||
|
# Add extra tokens for message format
|
||||||
|
token_count += 2 # Extra tokens for message format
|
||||||
|
|
||||||
|
return token_count
|
||||||
|
|
||||||
|
def update_token_count(self, input_tokens: int) -> None:
|
||||||
|
"""Update token counts"""
|
||||||
|
# Only track tokens if max_input_tokens is set
|
||||||
|
self.total_input_tokens += input_tokens
|
||||||
|
logger.info(
|
||||||
|
f"Token usage: Input={input_tokens}, Cumulative Input={self.total_input_tokens}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def check_token_limit(self, input_tokens: int) -> bool:
|
||||||
|
"""Check if token limits are exceeded"""
|
||||||
|
if self.max_input_tokens is not None:
|
||||||
|
return (self.total_input_tokens + input_tokens) <= self.max_input_tokens
|
||||||
|
# If max_input_tokens is not set, always return True
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_limit_error_message(self, input_tokens: int) -> str:
|
||||||
|
"""Generate error message for token limit exceeded"""
|
||||||
|
if (
|
||||||
|
self.max_input_tokens is not None
|
||||||
|
and (self.total_input_tokens + input_tokens) > self.max_input_tokens
|
||||||
|
):
|
||||||
|
return f"Request may exceed input token limit (Current: {self.total_input_tokens}, Needed: {input_tokens}, Max: {self.max_input_tokens})"
|
||||||
|
|
||||||
|
return "Token limit exceeded"
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def format_messages(messages: List[Union[dict, Message]]) -> List[dict]:
|
def format_messages(messages: List[Union[dict, Message]]) -> List[dict]:
|
||||||
"""
|
"""
|
||||||
@ -75,14 +180,15 @@ class LLM:
|
|||||||
formatted_messages = []
|
formatted_messages = []
|
||||||
|
|
||||||
for message in messages:
|
for message in messages:
|
||||||
|
if isinstance(message, Message):
|
||||||
|
message = message.to_dict()
|
||||||
if isinstance(message, dict):
|
if isinstance(message, dict):
|
||||||
# If message is already a dict, ensure it has required fields
|
# If message is a dict, ensure it has required fields
|
||||||
if "role" not in message:
|
if "role" not in message:
|
||||||
raise ValueError("Message dict must contain 'role' field")
|
raise ValueError("Message dict must contain 'role' field")
|
||||||
formatted_messages.append(message)
|
if "content" in message or "tool_calls" in message:
|
||||||
elif isinstance(message, Message):
|
formatted_messages.append(message)
|
||||||
# If message is a Message object, convert it to dict
|
# else: do not include the message
|
||||||
formatted_messages.append(message.to_dict())
|
|
||||||
else:
|
else:
|
||||||
raise TypeError(f"Unsupported message type: {type(message)}")
|
raise TypeError(f"Unsupported message type: {type(message)}")
|
||||||
|
|
||||||
@ -90,16 +196,15 @@ class LLM:
|
|||||||
for msg in formatted_messages:
|
for msg in formatted_messages:
|
||||||
if msg["role"] not in ROLE_VALUES:
|
if msg["role"] not in ROLE_VALUES:
|
||||||
raise ValueError(f"Invalid role: {msg['role']}")
|
raise ValueError(f"Invalid role: {msg['role']}")
|
||||||
if "content" not in msg and "tool_calls" not in msg:
|
|
||||||
raise ValueError(
|
|
||||||
"Message must contain either 'content' or 'tool_calls'"
|
|
||||||
)
|
|
||||||
|
|
||||||
return formatted_messages
|
return formatted_messages
|
||||||
|
|
||||||
@retry(
|
@retry(
|
||||||
wait=wait_random_exponential(min=1, max=60),
|
wait=wait_random_exponential(min=1, max=60),
|
||||||
stop=stop_after_attempt(6),
|
stop=stop_after_attempt(6),
|
||||||
|
retry=retry_if_exception_type(
|
||||||
|
(OpenAIError, Exception, ValueError)
|
||||||
|
), # Don't retry TokenLimitExceeded
|
||||||
)
|
)
|
||||||
async def ask(
|
async def ask(
|
||||||
self,
|
self,
|
||||||
@ -121,6 +226,7 @@ class LLM:
|
|||||||
str: The generated response
|
str: The generated response
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
|
TokenLimitExceeded: If token limits are exceeded
|
||||||
ValueError: If messages are invalid or response is empty
|
ValueError: If messages are invalid or response is empty
|
||||||
OpenAIError: If API call fails after retries
|
OpenAIError: If API call fails after retries
|
||||||
Exception: For unexpected errors
|
Exception: For unexpected errors
|
||||||
@ -133,27 +239,47 @@ class LLM:
|
|||||||
else:
|
else:
|
||||||
messages = self.format_messages(messages)
|
messages = self.format_messages(messages)
|
||||||
|
|
||||||
|
# Calculate input token count
|
||||||
|
input_tokens = self.count_message_tokens(messages)
|
||||||
|
|
||||||
|
# Check if token limits are exceeded
|
||||||
|
if not self.check_token_limit(input_tokens):
|
||||||
|
error_message = self.get_limit_error_message(input_tokens)
|
||||||
|
# Raise a special exception that won't be retried
|
||||||
|
raise TokenLimitExceeded(error_message)
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"model": self.model,
|
||||||
|
"messages": messages,
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.model in REASONING_MODELS:
|
||||||
|
params["max_completion_tokens"] = self.max_tokens
|
||||||
|
else:
|
||||||
|
params["max_tokens"] = self.max_tokens
|
||||||
|
params["temperature"] = (
|
||||||
|
temperature if temperature is not None else self.temperature
|
||||||
|
)
|
||||||
|
|
||||||
if not stream:
|
if not stream:
|
||||||
# Non-streaming request
|
# Non-streaming request
|
||||||
response = await self.client.chat.completions.create(
|
params["stream"] = False
|
||||||
model=self.model,
|
|
||||||
messages=messages,
|
response = await self.client.chat.completions.create(**params)
|
||||||
max_tokens=self.max_tokens,
|
|
||||||
temperature=temperature or self.temperature,
|
|
||||||
stream=False,
|
|
||||||
)
|
|
||||||
if not response.choices or not response.choices[0].message.content:
|
if not response.choices or not response.choices[0].message.content:
|
||||||
raise ValueError("Empty or invalid response from LLM")
|
raise ValueError("Empty or invalid response from LLM")
|
||||||
|
|
||||||
|
# Update token counts
|
||||||
|
self.update_token_count(response.usage.prompt_tokens)
|
||||||
|
|
||||||
return response.choices[0].message.content
|
return response.choices[0].message.content
|
||||||
|
|
||||||
# Streaming request
|
# Streaming request, For streaming, update estimated token count before making the request
|
||||||
response = await self.client.chat.completions.create(
|
self.update_token_count(input_tokens)
|
||||||
model=self.model,
|
|
||||||
messages=messages,
|
params["stream"] = True
|
||||||
max_tokens=self.max_tokens,
|
response = await self.client.chat.completions.create(**params)
|
||||||
temperature=temperature or self.temperature,
|
|
||||||
stream=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
collected_messages = []
|
collected_messages = []
|
||||||
async for chunk in response:
|
async for chunk in response:
|
||||||
@ -165,13 +291,23 @@ class LLM:
|
|||||||
full_response = "".join(collected_messages).strip()
|
full_response = "".join(collected_messages).strip()
|
||||||
if not full_response:
|
if not full_response:
|
||||||
raise ValueError("Empty response from streaming LLM")
|
raise ValueError("Empty response from streaming LLM")
|
||||||
|
|
||||||
return full_response
|
return full_response
|
||||||
|
|
||||||
|
except TokenLimitExceeded:
|
||||||
|
# Re-raise token limit errors without logging
|
||||||
|
raise
|
||||||
except ValueError as ve:
|
except ValueError as ve:
|
||||||
logger.error(f"Validation error: {ve}")
|
logger.error(f"Validation error: {ve}")
|
||||||
raise
|
raise
|
||||||
except OpenAIError as oe:
|
except OpenAIError as oe:
|
||||||
logger.error(f"OpenAI API error: {oe}")
|
logger.error(f"OpenAI API error: {oe}")
|
||||||
|
if isinstance(oe, AuthenticationError):
|
||||||
|
logger.error("Authentication failed. Check API key.")
|
||||||
|
elif isinstance(oe, RateLimitError):
|
||||||
|
logger.error("Rate limit exceeded. Consider increasing retry attempts.")
|
||||||
|
elif isinstance(oe, APIError):
|
||||||
|
logger.error(f"API error: {oe}")
|
||||||
raise
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Unexpected error in ask: {e}")
|
logger.error(f"Unexpected error in ask: {e}")
|
||||||
@ -180,6 +316,9 @@ class LLM:
|
|||||||
@retry(
|
@retry(
|
||||||
wait=wait_random_exponential(min=1, max=60),
|
wait=wait_random_exponential(min=1, max=60),
|
||||||
stop=stop_after_attempt(6),
|
stop=stop_after_attempt(6),
|
||||||
|
retry=retry_if_exception_type(
|
||||||
|
(OpenAIError, Exception, ValueError)
|
||||||
|
), # Don't retry TokenLimitExceeded
|
||||||
)
|
)
|
||||||
async def ask_tool(
|
async def ask_tool(
|
||||||
self,
|
self,
|
||||||
@ -187,7 +326,7 @@ class LLM:
|
|||||||
system_msgs: Optional[List[Union[dict, Message]]] = None,
|
system_msgs: Optional[List[Union[dict, Message]]] = None,
|
||||||
timeout: int = 300,
|
timeout: int = 300,
|
||||||
tools: Optional[List[dict]] = None,
|
tools: Optional[List[dict]] = None,
|
||||||
tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore
|
tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore
|
||||||
temperature: Optional[float] = None,
|
temperature: Optional[float] = None,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
):
|
||||||
@ -207,6 +346,7 @@ class LLM:
|
|||||||
ChatCompletionMessage: The model's response
|
ChatCompletionMessage: The model's response
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
|
TokenLimitExceeded: If token limits are exceeded
|
||||||
ValueError: If tools, tool_choice, or messages are invalid
|
ValueError: If tools, tool_choice, or messages are invalid
|
||||||
OpenAIError: If API call fails after retries
|
OpenAIError: If API call fails after retries
|
||||||
Exception: For unexpected errors
|
Exception: For unexpected errors
|
||||||
@ -223,6 +363,23 @@ class LLM:
|
|||||||
else:
|
else:
|
||||||
messages = self.format_messages(messages)
|
messages = self.format_messages(messages)
|
||||||
|
|
||||||
|
# Calculate input token count
|
||||||
|
input_tokens = self.count_message_tokens(messages)
|
||||||
|
|
||||||
|
# If there are tools, calculate token count for tool descriptions
|
||||||
|
tools_tokens = 0
|
||||||
|
if tools:
|
||||||
|
for tool in tools:
|
||||||
|
tools_tokens += self.count_tokens(str(tool))
|
||||||
|
|
||||||
|
input_tokens += tools_tokens
|
||||||
|
|
||||||
|
# Check if token limits are exceeded
|
||||||
|
if not self.check_token_limit(input_tokens):
|
||||||
|
error_message = self.get_limit_error_message(input_tokens)
|
||||||
|
# Raise a special exception that won't be retried
|
||||||
|
raise TokenLimitExceeded(error_message)
|
||||||
|
|
||||||
# Validate tools if provided
|
# Validate tools if provided
|
||||||
if tools:
|
if tools:
|
||||||
for tool in tools:
|
for tool in tools:
|
||||||
@ -230,28 +387,43 @@ class LLM:
|
|||||||
raise ValueError("Each tool must be a dict with 'type' field")
|
raise ValueError("Each tool must be a dict with 'type' field")
|
||||||
|
|
||||||
# Set up the completion request
|
# Set up the completion request
|
||||||
response = await self.client.chat.completions.create(
|
params = {
|
||||||
model=self.model,
|
"model": self.model,
|
||||||
messages=messages,
|
"messages": messages,
|
||||||
temperature=temperature or self.temperature,
|
"tools": tools,
|
||||||
max_tokens=self.max_tokens,
|
"tool_choice": tool_choice,
|
||||||
tools=tools,
|
"timeout": timeout,
|
||||||
tool_choice=tool_choice,
|
|
||||||
timeout=timeout,
|
|
||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
}
|
||||||
|
|
||||||
|
if self.model in REASONING_MODELS:
|
||||||
|
params["max_completion_tokens"] = self.max_tokens
|
||||||
|
else:
|
||||||
|
params["max_tokens"] = self.max_tokens
|
||||||
|
params["temperature"] = (
|
||||||
|
temperature if temperature is not None else self.temperature
|
||||||
|
)
|
||||||
|
|
||||||
|
response = await self.client.chat.completions.create(**params)
|
||||||
|
|
||||||
# Check if response is valid
|
# Check if response is valid
|
||||||
if not response.choices or not response.choices[0].message:
|
if not response.choices or not response.choices[0].message:
|
||||||
print(response)
|
print(response)
|
||||||
raise ValueError("Invalid or empty response from LLM")
|
raise ValueError("Invalid or empty response from LLM")
|
||||||
|
|
||||||
|
# Update token counts
|
||||||
|
self.update_token_count(response.usage.prompt_tokens)
|
||||||
|
|
||||||
return response.choices[0].message
|
return response.choices[0].message
|
||||||
|
|
||||||
|
except TokenLimitExceeded:
|
||||||
|
# Re-raise token limit errors without logging
|
||||||
|
raise
|
||||||
except ValueError as ve:
|
except ValueError as ve:
|
||||||
logger.error(f"Validation error in ask_tool: {ve}")
|
logger.error(f"Validation error in ask_tool: {ve}")
|
||||||
raise
|
raise
|
||||||
except OpenAIError as oe:
|
except OpenAIError as oe:
|
||||||
|
logger.error(f"OpenAI API error: {oe}")
|
||||||
if isinstance(oe, AuthenticationError):
|
if isinstance(oe, AuthenticationError):
|
||||||
logger.error("Authentication failed. Check API key.")
|
logger.error("Authentication failed. Check API key.")
|
||||||
elif isinstance(oe, RateLimitError):
|
elif isinstance(oe, RateLimitError):
|
||||||
|
@ -8,7 +8,7 @@ FileSaver: Save files locally, such as txt, py, html, etc.
|
|||||||
|
|
||||||
BrowserUseTool: Open, browse, and use web browsers.If you open a local HTML file, you must provide the absolute path to the file.
|
BrowserUseTool: Open, browse, and use web browsers.If you open a local HTML file, you must provide the absolute path to the file.
|
||||||
|
|
||||||
GoogleSearch: Perform web information retrieval
|
WebSearch: Perform web information retrieval
|
||||||
|
|
||||||
Terminate: End the current interaction when the task is complete or when you need additional information from the user. Use this tool to signal that you've finished addressing the user's request or need clarification before proceeding further.
|
Terminate: End the current interaction when the task is complete or when you need additional information from the user. Use this tool to signal that you've finished addressing the user's request or need clarification before proceeding further.
|
||||||
|
|
||||||
|
@ -3,25 +3,32 @@ from typing import Any, List, Literal, Optional, Union
|
|||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
class Role(str, Enum):
|
class Role(str, Enum):
|
||||||
"""Message role options"""
|
"""Message role options"""
|
||||||
|
|
||||||
SYSTEM = "system"
|
SYSTEM = "system"
|
||||||
USER = "user"
|
USER = "user"
|
||||||
ASSISTANT = "assistant"
|
ASSISTANT = "assistant"
|
||||||
TOOL = "tool"
|
TOOL = "tool"
|
||||||
|
|
||||||
|
|
||||||
ROLE_VALUES = tuple(role.value for role in Role)
|
ROLE_VALUES = tuple(role.value for role in Role)
|
||||||
ROLE_TYPE = Literal[ROLE_VALUES] # type: ignore
|
ROLE_TYPE = Literal[ROLE_VALUES] # type: ignore
|
||||||
|
|
||||||
|
|
||||||
class ToolChoice(str, Enum):
|
class ToolChoice(str, Enum):
|
||||||
"""Tool choice options"""
|
"""Tool choice options"""
|
||||||
|
|
||||||
NONE = "none"
|
NONE = "none"
|
||||||
AUTO = "auto"
|
AUTO = "auto"
|
||||||
REQUIRED = "required"
|
REQUIRED = "required"
|
||||||
|
|
||||||
|
|
||||||
TOOL_CHOICE_VALUES = tuple(choice.value for choice in ToolChoice)
|
TOOL_CHOICE_VALUES = tuple(choice.value for choice in ToolChoice)
|
||||||
TOOL_CHOICE_TYPE = Literal[TOOL_CHOICE_VALUES] # type: ignore
|
TOOL_CHOICE_TYPE = Literal[TOOL_CHOICE_VALUES] # type: ignore
|
||||||
|
|
||||||
|
|
||||||
class AgentState(str, Enum):
|
class AgentState(str, Enum):
|
||||||
"""Agent execution states"""
|
"""Agent execution states"""
|
||||||
|
|
||||||
@ -47,7 +54,7 @@ class ToolCall(BaseModel):
|
|||||||
class Message(BaseModel):
|
class Message(BaseModel):
|
||||||
"""Represents a chat message in the conversation"""
|
"""Represents a chat message in the conversation"""
|
||||||
|
|
||||||
role: ROLE_TYPE = Field(...) # type: ignore
|
role: ROLE_TYPE = Field(...) # type: ignore
|
||||||
content: Optional[str] = Field(default=None)
|
content: Optional[str] = Field(default=None)
|
||||||
tool_calls: Optional[List[ToolCall]] = Field(default=None)
|
tool_calls: Optional[List[ToolCall]] = Field(default=None)
|
||||||
name: Optional[str] = Field(default=None)
|
name: Optional[str] = Field(default=None)
|
||||||
@ -104,7 +111,9 @@ class Message(BaseModel):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def tool_message(cls, content: str, name, tool_call_id: str) -> "Message":
|
def tool_message(cls, content: str, name, tool_call_id: str) -> "Message":
|
||||||
"""Create a tool message"""
|
"""Create a tool message"""
|
||||||
return cls(role=Role.TOOL, content=content, name=name, tool_call_id=tool_call_id)
|
return cls(
|
||||||
|
role=Role.TOOL, content=content, name=name, tool_call_id=tool_call_id
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_tool_calls(
|
def from_tool_calls(
|
||||||
|
@ -106,7 +106,7 @@ class BrowserUseTool(BaseTool):
|
|||||||
async def _ensure_browser_initialized(self) -> BrowserContext:
|
async def _ensure_browser_initialized(self) -> BrowserContext:
|
||||||
"""Ensure browser and context are initialized."""
|
"""Ensure browser and context are initialized."""
|
||||||
if self.browser is None:
|
if self.browser is None:
|
||||||
browser_config_kwargs = {"headless": False}
|
browser_config_kwargs = {"headless": False, "disable_security": True}
|
||||||
|
|
||||||
if config.browser_config:
|
if config.browser_config:
|
||||||
from browser_use.browser.browser import ProxySettings
|
from browser_use.browser.browser import ProxySettings
|
||||||
|
@ -2,6 +2,7 @@ import os
|
|||||||
|
|
||||||
import aiofiles
|
import aiofiles
|
||||||
|
|
||||||
|
from app.config import WORKSPACE_ROOT
|
||||||
from app.tool.base import BaseTool
|
from app.tool.base import BaseTool
|
||||||
|
|
||||||
|
|
||||||
@ -45,15 +46,22 @@ The tool accepts content and a file path, and saves the content to that location
|
|||||||
str: A message indicating the result of the operation.
|
str: A message indicating the result of the operation.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
|
# Place the generated file in the workspace directory
|
||||||
|
if os.path.isabs(file_path):
|
||||||
|
file_name = os.path.basename(file_path)
|
||||||
|
full_path = os.path.join(WORKSPACE_ROOT, file_name)
|
||||||
|
else:
|
||||||
|
full_path = os.path.join(WORKSPACE_ROOT, file_path)
|
||||||
|
|
||||||
# Ensure the directory exists
|
# Ensure the directory exists
|
||||||
directory = os.path.dirname(file_path)
|
directory = os.path.dirname(full_path)
|
||||||
if directory and not os.path.exists(directory):
|
if directory and not os.path.exists(directory):
|
||||||
os.makedirs(directory)
|
os.makedirs(directory)
|
||||||
|
|
||||||
# Write directly to the file
|
# Write directly to the file
|
||||||
async with aiofiles.open(file_path, mode, encoding="utf-8") as file:
|
async with aiofiles.open(full_path, mode, encoding="utf-8") as file:
|
||||||
await file.write(content)
|
await file.write(content)
|
||||||
|
|
||||||
return f"Content successfully saved to {file_path}"
|
return f"Content successfully saved to {full_path}"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return f"Error saving file: {str(e)}"
|
return f"Error saving file: {str(e)}"
|
||||||
|
@ -1,48 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from googlesearch import search
|
|
||||||
|
|
||||||
from app.tool.base import BaseTool
|
|
||||||
|
|
||||||
|
|
||||||
class GoogleSearch(BaseTool):
|
|
||||||
name: str = "google_search"
|
|
||||||
description: str = """Perform a Google search and return a list of relevant links.
|
|
||||||
Use this tool when you need to find information on the web, get up-to-date data, or research specific topics.
|
|
||||||
The tool returns a list of URLs that match the search query.
|
|
||||||
"""
|
|
||||||
parameters: dict = {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"query": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "(required) The search query to submit to Google.",
|
|
||||||
},
|
|
||||||
"num_results": {
|
|
||||||
"type": "integer",
|
|
||||||
"description": "(optional) The number of search results to return. Default is 10.",
|
|
||||||
"default": 10,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"required": ["query"],
|
|
||||||
}
|
|
||||||
|
|
||||||
async def execute(self, query: str, num_results: int = 10) -> List[str]:
|
|
||||||
"""
|
|
||||||
Execute a Google search and return a list of URLs.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): The search query to submit to Google.
|
|
||||||
num_results (int, optional): The number of search results to return. Default is 10.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List[str]: A list of URLs matching the search query.
|
|
||||||
"""
|
|
||||||
# Run the search in a thread pool to prevent blocking
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
links = await loop.run_in_executor(
|
|
||||||
None, lambda: list(search(query, num_results=num_results))
|
|
||||||
)
|
|
||||||
|
|
||||||
return links
|
|
@ -1,4 +1,6 @@
|
|||||||
import threading
|
import multiprocessing
|
||||||
|
import sys
|
||||||
|
from io import StringIO
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
from app.tool.base import BaseTool
|
from app.tool.base import BaseTool
|
||||||
@ -20,6 +22,20 @@ class PythonExecute(BaseTool):
|
|||||||
"required": ["code"],
|
"required": ["code"],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def _run_code(self, code: str, result_dict: dict, safe_globals: dict) -> None:
|
||||||
|
original_stdout = sys.stdout
|
||||||
|
try:
|
||||||
|
output_buffer = StringIO()
|
||||||
|
sys.stdout = output_buffer
|
||||||
|
exec(code, safe_globals, safe_globals)
|
||||||
|
result_dict["observation"] = output_buffer.getvalue()
|
||||||
|
result_dict["success"] = True
|
||||||
|
except Exception as e:
|
||||||
|
result_dict["observation"] = str(e)
|
||||||
|
result_dict["success"] = False
|
||||||
|
finally:
|
||||||
|
sys.stdout = original_stdout
|
||||||
|
|
||||||
async def execute(
|
async def execute(
|
||||||
self,
|
self,
|
||||||
code: str,
|
code: str,
|
||||||
@ -35,36 +51,25 @@ class PythonExecute(BaseTool):
|
|||||||
Returns:
|
Returns:
|
||||||
Dict: Contains 'output' with execution output or error message and 'success' status.
|
Dict: Contains 'output' with execution output or error message and 'success' status.
|
||||||
"""
|
"""
|
||||||
result = {"observation": ""}
|
|
||||||
|
|
||||||
def run_code():
|
with multiprocessing.Manager() as manager:
|
||||||
try:
|
result = manager.dict({"observation": "", "success": False})
|
||||||
safe_globals = {"__builtins__": dict(__builtins__)}
|
if isinstance(__builtins__, dict):
|
||||||
|
safe_globals = {"__builtins__": __builtins__}
|
||||||
|
else:
|
||||||
|
safe_globals = {"__builtins__": __builtins__.__dict__.copy()}
|
||||||
|
proc = multiprocessing.Process(
|
||||||
|
target=self._run_code, args=(code, result, safe_globals)
|
||||||
|
)
|
||||||
|
proc.start()
|
||||||
|
proc.join(timeout)
|
||||||
|
|
||||||
import sys
|
# timeout process
|
||||||
from io import StringIO
|
if proc.is_alive():
|
||||||
|
proc.terminate()
|
||||||
output_buffer = StringIO()
|
proc.join(1)
|
||||||
sys.stdout = output_buffer
|
return {
|
||||||
|
"observation": f"Execution timeout after {timeout} seconds",
|
||||||
exec(code, safe_globals, {})
|
"success": False,
|
||||||
|
}
|
||||||
sys.stdout = sys.__stdout__
|
return dict(result)
|
||||||
|
|
||||||
result["observation"] = output_buffer.getvalue()
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
result["observation"] = str(e)
|
|
||||||
result["success"] = False
|
|
||||||
|
|
||||||
thread = threading.Thread(target=run_code)
|
|
||||||
thread.start()
|
|
||||||
thread.join(timeout)
|
|
||||||
|
|
||||||
if thread.is_alive():
|
|
||||||
return {
|
|
||||||
"observation": f"Execution timeout after {timeout} seconds",
|
|
||||||
"success": False,
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
12
app/tool/search/__init__.py
Normal file
12
app/tool/search/__init__.py
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
from app.tool.search.baidu_search import BaiduSearchEngine
|
||||||
|
from app.tool.search.base import WebSearchEngine
|
||||||
|
from app.tool.search.duckduckgo_search import DuckDuckGoSearchEngine
|
||||||
|
from app.tool.search.google_search import GoogleSearchEngine
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"WebSearchEngine",
|
||||||
|
"BaiduSearchEngine",
|
||||||
|
"DuckDuckGoSearchEngine",
|
||||||
|
"GoogleSearchEngine",
|
||||||
|
]
|
9
app/tool/search/baidu_search.py
Normal file
9
app/tool/search/baidu_search.py
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
from baidusearch.baidusearch import search
|
||||||
|
|
||||||
|
from app.tool.search.base import WebSearchEngine
|
||||||
|
|
||||||
|
|
||||||
|
class BaiduSearchEngine(WebSearchEngine):
|
||||||
|
def perform_search(self, query, num_results=10, *args, **kwargs):
|
||||||
|
"""Baidu search engine."""
|
||||||
|
return search(query, num_results=num_results)
|
17
app/tool/search/base.py
Normal file
17
app/tool/search/base.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
class WebSearchEngine(object):
|
||||||
|
def perform_search(
|
||||||
|
self, query: str, num_results: int = 10, *args, **kwargs
|
||||||
|
) -> list[dict]:
|
||||||
|
"""
|
||||||
|
Perform a web search and return a list of URLs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query (str): The search query to submit to the search engine.
|
||||||
|
num_results (int, optional): The number of search results to return. Default is 10.
|
||||||
|
args: Additional arguments.
|
||||||
|
kwargs: Additional keyword arguments.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List: A list of dict matching the search query.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
9
app/tool/search/duckduckgo_search.py
Normal file
9
app/tool/search/duckduckgo_search.py
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
from duckduckgo_search import DDGS
|
||||||
|
|
||||||
|
from app.tool.search.base import WebSearchEngine
|
||||||
|
|
||||||
|
|
||||||
|
class DuckDuckGoSearchEngine(WebSearchEngine):
|
||||||
|
async def perform_search(self, query, num_results=10, *args, **kwargs):
|
||||||
|
"""DuckDuckGo search engine."""
|
||||||
|
return DDGS.text(query, num_results=num_results)
|
9
app/tool/search/google_search.py
Normal file
9
app/tool/search/google_search.py
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
from googlesearch import search
|
||||||
|
|
||||||
|
from app.tool.search.base import WebSearchEngine
|
||||||
|
|
||||||
|
|
||||||
|
class GoogleSearchEngine(WebSearchEngine):
|
||||||
|
def perform_search(self, query, num_results=10, *args, **kwargs):
|
||||||
|
"""Google search engine."""
|
||||||
|
return search(query, num_results=num_results)
|
@ -40,7 +40,7 @@ Note: You MUST append a `sleep 0.05` to the end of the command for commands that
|
|||||||
str: The output, and error of the command execution.
|
str: The output, and error of the command execution.
|
||||||
"""
|
"""
|
||||||
# Split the command by & to handle multiple commands
|
# Split the command by & to handle multiple commands
|
||||||
commands = [cmd.strip() for cmd in command.split('&') if cmd.strip()]
|
commands = [cmd.strip() for cmd in command.split("&") if cmd.strip()]
|
||||||
final_output = CLIResult(output="", error="")
|
final_output = CLIResult(output="", error="")
|
||||||
|
|
||||||
for cmd in commands:
|
for cmd in commands:
|
||||||
@ -61,7 +61,7 @@ Note: You MUST append a `sleep 0.05` to the end of the command for commands that
|
|||||||
stdout, stderr = await self.process.communicate()
|
stdout, stderr = await self.process.communicate()
|
||||||
result = CLIResult(
|
result = CLIResult(
|
||||||
output=stdout.decode().strip(),
|
output=stdout.decode().strip(),
|
||||||
error=stderr.decode().strip()
|
error=stderr.decode().strip(),
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
result = CLIResult(output="", error=str(e))
|
result = CLIResult(output="", error=str(e))
|
||||||
@ -70,9 +70,13 @@ Note: You MUST append a `sleep 0.05` to the end of the command for commands that
|
|||||||
|
|
||||||
# Combine outputs
|
# Combine outputs
|
||||||
if result.output:
|
if result.output:
|
||||||
final_output.output += (result.output + "\n") if final_output.output else result.output
|
final_output.output += (
|
||||||
|
(result.output + "\n") if final_output.output else result.output
|
||||||
|
)
|
||||||
if result.error:
|
if result.error:
|
||||||
final_output.error += (result.error + "\n") if final_output.error else result.error
|
final_output.error += (
|
||||||
|
(result.error + "\n") if final_output.error else result.error
|
||||||
|
)
|
||||||
|
|
||||||
# Remove trailing newlines
|
# Remove trailing newlines
|
||||||
final_output.output = final_output.output.rstrip()
|
final_output.output = final_output.output.rstrip()
|
||||||
@ -124,14 +128,10 @@ Note: You MUST append a `sleep 0.05` to the end of the command for commands that
|
|||||||
if os.path.isdir(new_path):
|
if os.path.isdir(new_path):
|
||||||
self.current_path = new_path
|
self.current_path = new_path
|
||||||
return CLIResult(
|
return CLIResult(
|
||||||
output=f"Changed directory to {self.current_path}",
|
output=f"Changed directory to {self.current_path}", error=""
|
||||||
error=""
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return CLIResult(
|
return CLIResult(output="", error=f"No such directory: {new_path}")
|
||||||
output="",
|
|
||||||
error=f"No such directory: {new_path}"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return CLIResult(output="", error=str(e))
|
return CLIResult(output="", error=str(e))
|
||||||
|
|
||||||
@ -152,7 +152,7 @@ Note: You MUST append a `sleep 0.05` to the end of the command for commands that
|
|||||||
parts = shlex.split(command)
|
parts = shlex.split(command)
|
||||||
if any(cmd in dangerous_commands for cmd in parts):
|
if any(cmd in dangerous_commands for cmd in parts):
|
||||||
raise ValueError("Use of dangerous commands is restricted.")
|
raise ValueError("Use of dangerous commands is restricted.")
|
||||||
except Exception as e:
|
except Exception:
|
||||||
# If shlex.split fails, try basic string comparison
|
# If shlex.split fails, try basic string comparison
|
||||||
if any(cmd in command for cmd in dangerous_commands):
|
if any(cmd in command for cmd in dangerous_commands):
|
||||||
raise ValueError("Use of dangerous commands is restricted.")
|
raise ValueError("Use of dangerous commands is restricted.")
|
||||||
|
99
app/tool/web_search.py
Normal file
99
app/tool/web_search.py
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
import asyncio
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||||
|
|
||||||
|
from app.config import config
|
||||||
|
from app.tool.base import BaseTool
|
||||||
|
from app.tool.search import (
|
||||||
|
BaiduSearchEngine,
|
||||||
|
DuckDuckGoSearchEngine,
|
||||||
|
GoogleSearchEngine,
|
||||||
|
WebSearchEngine,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class WebSearch(BaseTool):
|
||||||
|
name: str = "web_search"
|
||||||
|
description: str = """Perform a web search and return a list of relevant links.
|
||||||
|
This function attempts to use the primary search engine API to get up-to-date results.
|
||||||
|
If an error occurs, it falls back to an alternative search engine."""
|
||||||
|
parameters: dict = {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "(required) The search query to submit to the search engine.",
|
||||||
|
},
|
||||||
|
"num_results": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "(optional) The number of search results to return. Default is 10.",
|
||||||
|
"default": 10,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["query"],
|
||||||
|
}
|
||||||
|
_search_engine: dict[str, WebSearchEngine] = {
|
||||||
|
"google": GoogleSearchEngine(),
|
||||||
|
"baidu": BaiduSearchEngine(),
|
||||||
|
"duckduckgo": DuckDuckGoSearchEngine(),
|
||||||
|
}
|
||||||
|
|
||||||
|
async def execute(self, query: str, num_results: int = 10) -> List[str]:
|
||||||
|
"""
|
||||||
|
Execute a Web search and return a list of URLs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query (str): The search query to submit to the search engine.
|
||||||
|
num_results (int, optional): The number of search results to return. Default is 10.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: A list of URLs matching the search query.
|
||||||
|
"""
|
||||||
|
engine_order = self._get_engine_order()
|
||||||
|
for engine_name in engine_order:
|
||||||
|
engine = self._search_engine[engine_name]
|
||||||
|
try:
|
||||||
|
links = await self._perform_search_with_engine(
|
||||||
|
engine, query, num_results
|
||||||
|
)
|
||||||
|
if links:
|
||||||
|
return links
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Search engine '{engine_name}' failed with error: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _get_engine_order(self) -> List[str]:
|
||||||
|
"""
|
||||||
|
Determines the order in which to try search engines.
|
||||||
|
Preferred engine is first (based on configuration), followed by the remaining engines.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: Ordered list of search engine names.
|
||||||
|
"""
|
||||||
|
preferred = "google"
|
||||||
|
if config.search_config and config.search_config.engine:
|
||||||
|
preferred = config.search_config.engine.lower()
|
||||||
|
|
||||||
|
engine_order = []
|
||||||
|
if preferred in self._search_engine:
|
||||||
|
engine_order.append(preferred)
|
||||||
|
for key in self._search_engine:
|
||||||
|
if key not in engine_order:
|
||||||
|
engine_order.append(key)
|
||||||
|
return engine_order
|
||||||
|
|
||||||
|
@retry(
|
||||||
|
stop=stop_after_attempt(3),
|
||||||
|
wait=wait_exponential(multiplier=1, min=1, max=10),
|
||||||
|
)
|
||||||
|
async def _perform_search_with_engine(
|
||||||
|
self,
|
||||||
|
engine: WebSearchEngine,
|
||||||
|
query: str,
|
||||||
|
num_results: int,
|
||||||
|
) -> List[str]:
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
return await loop.run_in_executor(
|
||||||
|
None, lambda: list(engine.perform_search(query, num_results=num_results))
|
||||||
|
)
|
BIN
assets/logo.jpg
Normal file
BIN
assets/logo.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 64 KiB |
2
config/.gitignore
vendored
Normal file
2
config/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
# prevent the local config file from being uploaded to the remote repository
|
||||||
|
config.toml
|
@ -1,10 +1,10 @@
|
|||||||
# Global LLM configuration
|
# Global LLM configuration
|
||||||
[llm]
|
[llm]
|
||||||
model = "claude-3-5-sonnet"
|
model = "claude-3-7-sonnet-20250219" # The LLM model to use
|
||||||
base_url = "https://api.openai.com/v1"
|
base_url = "https://api.anthropic.com/v1/" # API endpoint URL
|
||||||
api_key = "sk-..."
|
api_key = "YOUR_API_KEY" # Your API key
|
||||||
max_tokens = 4096
|
max_tokens = 8192 # Maximum number of tokens in the response
|
||||||
temperature = 0.0
|
temperature = 0.0 # Controls randomness
|
||||||
|
|
||||||
# [llm] #AZURE OPENAI:
|
# [llm] #AZURE OPENAI:
|
||||||
# api_type= 'azure'
|
# api_type= 'azure'
|
||||||
@ -15,11 +15,29 @@ temperature = 0.0
|
|||||||
# temperature = 0.0
|
# temperature = 0.0
|
||||||
# api_version="AZURE API VERSION" #"2024-08-01-preview"
|
# api_version="AZURE API VERSION" #"2024-08-01-preview"
|
||||||
|
|
||||||
|
# [llm] #OLLAMA:
|
||||||
|
# api_type = 'ollama'
|
||||||
|
# model = "llama3.2"
|
||||||
|
# base_url = "http://localhost:11434/v1"
|
||||||
|
# api_key = "ollama"
|
||||||
|
# max_tokens = 4096
|
||||||
|
# temperature = 0.0
|
||||||
|
|
||||||
# Optional configuration for specific LLM models
|
# Optional configuration for specific LLM models
|
||||||
[llm.vision]
|
[llm.vision]
|
||||||
model = "claude-3-5-sonnet"
|
model = "claude-3-7-sonnet-20250219" # The vision model to use
|
||||||
base_url = "https://api.openai.com/v1"
|
base_url = "https://api.anthropic.com/v1/" # API endpoint URL for vision model
|
||||||
api_key = "sk-..."
|
api_key = "YOUR_API_KEY" # Your API key for vision model
|
||||||
|
max_tokens = 8192 # Maximum number of tokens in the response
|
||||||
|
temperature = 0.0 # Controls randomness for vision model
|
||||||
|
|
||||||
|
# [llm.vision] #OLLAMA VISION:
|
||||||
|
# api_type = 'ollama'
|
||||||
|
# model = "llama3.2-vision"
|
||||||
|
# base_url = "http://localhost:11434/v1"
|
||||||
|
# api_key = "ollama"
|
||||||
|
# max_tokens = 4096
|
||||||
|
# temperature = 0.0
|
||||||
|
|
||||||
# Optional configuration for specific browser configuration
|
# Optional configuration for specific browser configuration
|
||||||
# [browser]
|
# [browser]
|
||||||
@ -42,3 +60,8 @@ api_key = "sk-..."
|
|||||||
# server = "http://proxy-server:port"
|
# server = "http://proxy-server:port"
|
||||||
# username = "proxy-username"
|
# username = "proxy-username"
|
||||||
# password = "proxy-password"
|
# password = "proxy-password"
|
||||||
|
|
||||||
|
# Optional configuration, Search settings.
|
||||||
|
# [search]
|
||||||
|
# Search engine for agent to use. Default is "Google", can be set to "Baidu" or "DuckDuckGo".
|
||||||
|
#engine = "Google"
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
pydantic~=2.10.4
|
pydantic~=2.10.6
|
||||||
openai~=1.58.1
|
openai~=1.66.3
|
||||||
tenacity~=9.0.0
|
tenacity~=9.0.0
|
||||||
pyyaml~=6.0.2
|
pyyaml~=6.0.2
|
||||||
loguru~=0.7.3
|
loguru~=0.7.3
|
||||||
numpy
|
numpy
|
||||||
datasets~=3.2.0
|
datasets~=3.2.0
|
||||||
fastapi~=0.115.11
|
fastapi~=0.115.11
|
||||||
|
tiktoken~=0.9.0
|
||||||
|
|
||||||
html2text~=2024.2.26
|
html2text~=2024.2.26
|
||||||
gymnasium~=1.0.0
|
gymnasium~=1.0.0
|
||||||
@ -15,8 +16,10 @@ uvicorn~=0.34.0
|
|||||||
unidiff~=0.7.5
|
unidiff~=0.7.5
|
||||||
browser-use~=0.1.40
|
browser-use~=0.1.40
|
||||||
googlesearch-python~=1.3.0
|
googlesearch-python~=1.3.0
|
||||||
|
baidusearch~=1.0.3
|
||||||
|
duckduckgo_search~=7.5.1
|
||||||
|
|
||||||
aiofiles~=24.1.0
|
aiofiles~=24.1.0
|
||||||
pydantic_core~=2.27.2
|
pydantic_core~=2.27.2
|
||||||
colorama~=0.4.6
|
colorama~=0.4.6
|
||||||
playwright~=1.49.1
|
playwright~=1.50.0
|
||||||
|
4
setup.py
4
setup.py
@ -16,7 +16,7 @@ setup(
|
|||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=[
|
install_requires=[
|
||||||
"pydantic~=2.10.4",
|
"pydantic~=2.10.4",
|
||||||
"openai~=1.58.1",
|
"openai>=1.58.1,<1.67.0",
|
||||||
"tenacity~=9.0.0",
|
"tenacity~=9.0.0",
|
||||||
"pyyaml~=6.0.2",
|
"pyyaml~=6.0.2",
|
||||||
"loguru~=0.7.3",
|
"loguru~=0.7.3",
|
||||||
@ -31,7 +31,7 @@ setup(
|
|||||||
"browser-use~=0.1.40",
|
"browser-use~=0.1.40",
|
||||||
"googlesearch-python~=1.3.0",
|
"googlesearch-python~=1.3.0",
|
||||||
"aiofiles~=24.1.0",
|
"aiofiles~=24.1.0",
|
||||||
"pydantic_core~=2.27.2",
|
"pydantic_core>=2.27.2,<2.28.0",
|
||||||
"colorama~=0.4.6",
|
"colorama~=0.4.6",
|
||||||
],
|
],
|
||||||
classifiers=[
|
classifiers=[
|
||||||
|
Loading…
x
Reference in New Issue
Block a user