Merge remote-tracking branch 'origin/main'

This commit is contained in:
liangxinbing 2025-03-20 01:39:58 +08:00
commit 8fca2ff1b7
9 changed files with 137 additions and 83 deletions

View File

@ -1,14 +0,0 @@
---
name: "🤔 Request new features"
about: Suggest ideas or features youd like to see implemented in OpenManus.
title: ''
labels: kind/features
assignees: ''
---
**Feature description**
<!-- Provide a clear and concise description of the proposed feature -->
**Your Feature**
<!-- Explain your idea or implementation process. Optionally, include a Pull Request URL. -->
<!-- Ensure accompanying docs/tests/examples are provided for review. -->

View File

@ -0,0 +1,21 @@
name: "🤔 Request new features"
description: Suggest ideas or features youd like to see implemented in OpenManus.
labels: enhancement
body:
- type: textarea
id: feature-description
attributes:
label: Feature description
description: |
Provide a clear and concise description of the proposed feature
validations:
required: true
- type: textarea
id: your-feature
attributes:
label: Your Feature
description: |
Explain your idea or implementation process, if any. Optionally, include a Pull Request URL.
Ensure accompanying docs/tests/examples are provided for review.
validations:
required: false

View File

@ -1,25 +0,0 @@
---
name: "🪲 Show me the Bug"
about: Report a bug encountered while using OpenManus and seek assistance.
title: ''
labels: kind/bug
assignees: ''
---
**Bug description**
<!-- Clearly describe the bug you encountered -->
**Bug solved method**
<!-- If resolved, explain the solution. Optionally, include a Pull Request URL. -->
<!-- If unresolved, provide additional details to aid investigation -->
**Environment information**
<!-- System: e.g., Ubuntu 22.04, Python: e.g., 3.12, OpenManus version: e.g., 0.1.0 -->
- System version:
- Python version:
- OpenManus version or branch:
- Installation method (e.g., `pip install -r requirements.txt` or `pip install -e .`):
**Screenshots or logs**
<!-- Attach screenshots or logs to help diagnose the issue -->

View File

@ -0,0 +1,44 @@
name: "🪲 Show me the Bug"
description: Report a bug encountered while using OpenManus and seek assistance.
labels: bug
body:
- type: textarea
id: bug-description
attributes:
label: Bug Description
description: |
Clearly describe the bug you encountered
validations:
required: true
- type: textarea
id: solve-method
attributes:
label: Bug solved method
description: |
If resolved, explain the solution. Optionally, include a Pull Request URL.
If unresolved, provide additional details to aid investigation
validations:
required: true
- type: textarea
id: environment-information
attributes:
label: Environment information
description: |
System: e.g., Ubuntu 22.04
Python: e.g., 3.12
OpenManus version: e.g., 0.1.0
value: |
- System version:
- Python version:
- OpenManus version or branch:
- Installation method (e.g., `pip install -r requirements.txt` or `pip install -e .`):
validations:
required: true
- type: textarea
id: extra-information
attributes:
label: Extra information
description: |
For example, attach screenshots or logs to help diagnose the issue
validations:
required: false

View File

@ -15,21 +15,20 @@ jobs:
(github.event_name == 'pull_request') || (github.event_name == 'pull_request') ||
(github.event_name == 'issue_comment' && (github.event_name == 'issue_comment' &&
contains(github.event.comment.body, '!pr-diff') && contains(github.event.comment.body, '!pr-diff') &&
(github.event.comment.author_association == 'COLLABORATOR' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') && (github.event.comment.author_association == 'CONTRIBUTOR' || github.event.comment.author_association == 'COLLABORATOR' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') &&
github.event.issue.pull_request) github.event.issue.pull_request)
steps: steps:
- name: Get PR head SHA - name: Get PR head SHA
id: get-pr-sha id: get-pr-sha
run: | run: |
if [ "${{ github.event_name }}" == "pull_request" ]; then PR_URL="${{ github.event.issue.pull_request.url || github.event.pull_request.url }}"
echo "pr_sha=${{ github.event.pull_request.head.sha }}" >> $GITHUB_OUTPUT # https://api.github.com/repos/OpenManus/pulls/1
echo "Retrieved PR head SHA: ${{ github.event.pull_request.head.sha }}" RESPONSE=$(curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" $PR_URL)
else SHA=$(echo $RESPONSE | jq -r '.head.sha')
PR_URL="${{ github.event.issue.pull_request.url }}" TARGET_BRANCH=$(echo $RESPONSE | jq -r '.base.ref')
SHA=$(curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" $PR_URL | jq -r '.head.sha') echo "pr_sha=$SHA" >> $GITHUB_OUTPUT
echo "pr_sha=$SHA" >> $GITHUB_OUTPUT echo "target_branch=$TARGET_BRANCH" >> $GITHUB_OUTPUT
echo "Retrieved PR head SHA from API: $SHA" echo "Retrieved PR head SHA from API: $SHA, target branch: $TARGET_BRANCH"
fi
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
@ -49,6 +48,7 @@ jobs:
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }} OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
PR_NUMBER: ${{ github.event.pull_request.number || github.event.issue.number }} PR_NUMBER: ${{ github.event.pull_request.number || github.event.issue.number }}
TARGET_BRANCH: ${{ steps.get-pr-sha.outputs.target_branch }}
run: |- run: |-
cat << 'EOF' > /tmp/_workflow_core.py cat << 'EOF' > /tmp/_workflow_core.py
import os import os
@ -59,7 +59,7 @@ jobs:
def get_diff(): def get_diff():
result = subprocess.run( result = subprocess.run(
['git', 'diff', 'origin/main...HEAD'], ['git', 'diff', 'origin/' + os.getenv('TARGET_BRANCH') + '...HEAD'],
capture_output=True, text=True, check=True) capture_output=True, text=True, check=True)
return '\n'.join( return '\n'.join(
line for line in result.stdout.split('\n') line for line in result.stdout.split('\n')
@ -86,6 +86,17 @@ jobs:
### Spelling/Offensive Content Check ### Spelling/Offensive Content Check
- No spelling mistakes or offensive content found in the code or comments. - No spelling mistakes or offensive content found in the code or comments.
## 中文(简体)
- 新增了 `ABC` 类
- `foo` 模块中的 `f()` 行为已修复
### 评论高亮
- `config.toml` 需要正确配置才能确保新功能正常运行。
### 内容检查
- 没有发现代码或注释中的拼写错误或不当措辞。
3. Highlight non-English comments 3. Highlight non-English comments
4. Check for spelling/offensive content''' 4. Check for spelling/offensive content'''

View File

@ -71,40 +71,42 @@ class ToolCallAgent(ReActAgent):
return False return False
raise raise
self.tool_calls = response.tool_calls self.tool_calls = tool_calls = (
response.tool_calls if response and response.tool_calls else []
)
content = response.content if response and response.content else ""
# Log response info # Log response info
logger.info(f"{self.name}'s thoughts: {response.content}") logger.info(f"{self.name}'s thoughts: {content}")
logger.info( logger.info(
f"🛠️ {self.name} selected {len(response.tool_calls) if response.tool_calls else 0} tools to use" f"🛠️ {self.name} selected {len(tool_calls) if tool_calls else 0} tools to use"
) )
if response.tool_calls: if tool_calls:
logger.info( logger.info(
f"🧰 Tools being prepared: {[call.function.name for call in response.tool_calls]}" f"🧰 Tools being prepared: {[call.function.name for call in tool_calls]}"
)
logger.info(
f"🔧 Tool arguments: {response.tool_calls[0].function.arguments}"
) )
logger.info(f"🔧 Tool arguments: {tool_calls[0].function.arguments}")
try: try:
if response is None:
raise RuntimeError("No response received from the LLM")
# Handle different tool_choices modes # Handle different tool_choices modes
if self.tool_choices == ToolChoice.NONE: if self.tool_choices == ToolChoice.NONE:
if response.tool_calls: if tool_calls:
logger.warning( logger.warning(
f"🤔 Hmm, {self.name} tried to use tools when they weren't available!" f"🤔 Hmm, {self.name} tried to use tools when they weren't available!"
) )
if response.content: if content:
self.memory.add_message(Message.assistant_message(response.content)) self.memory.add_message(Message.assistant_message(content))
return True return True
return False return False
# Create and add assistant message # Create and add assistant message
assistant_msg = ( assistant_msg = (
Message.from_tool_calls( Message.from_tool_calls(content=content, tool_calls=self.tool_calls)
content=response.content, tool_calls=self.tool_calls
)
if self.tool_calls if self.tool_calls
else Message.assistant_message(response.content) else Message.assistant_message(content)
) )
self.memory.add_message(assistant_msg) self.memory.add_message(assistant_msg)
@ -113,7 +115,7 @@ class ToolCallAgent(ReActAgent):
# For 'auto' mode, continue with content if no commands but content exists # For 'auto' mode, continue with content if no commands but content exists
if self.tool_choices == ToolChoice.AUTO and not self.tool_calls: if self.tool_choices == ToolChoice.AUTO and not self.tool_calls:
return bool(response.content) return bool(content)
return bool(self.tool_calls) return bool(self.tool_calls)
except Exception as e: except Exception as e:
@ -209,7 +211,7 @@ class ToolCallAgent(ReActAgent):
return f"Error: {error_msg}" return f"Error: {error_msg}"
except Exception as e: except Exception as e:
error_msg = f"⚠️ Tool '{name}' encountered a problem: {str(e)}" error_msg = f"⚠️ Tool '{name}' encountered a problem: {str(e)}"
logger.error(error_msg) logger.exception(error_msg)
return f"Error: {error_msg}" return f"Error: {error_msg}"
async def _handle_special_tool(self, name: str, result: Any, **kwargs): async def _handle_special_tool(self, name: str, result: Any, **kwargs):

View File

@ -10,6 +10,7 @@ from openai import (
OpenAIError, OpenAIError,
RateLimitError, RateLimitError,
) )
from openai.types.chat.chat_completion_message import ChatCompletionMessage
from tenacity import ( from tenacity import (
retry, retry,
retry_if_exception_type, retry_if_exception_type,
@ -421,9 +422,9 @@ class LLM:
if not stream: if not stream:
# Non-streaming request # Non-streaming request
params["stream"] = False response = await self.client.chat.completions.create(
**params, stream=False
response = await self.client.chat.completions.create(**params) )
if not response.choices or not response.choices[0].message.content: if not response.choices or not response.choices[0].message.content:
raise ValueError("Empty or invalid response from LLM") raise ValueError("Empty or invalid response from LLM")
@ -438,8 +439,7 @@ class LLM:
# Streaming request, For streaming, update estimated token count before making the request # Streaming request, For streaming, update estimated token count before making the request
self.update_token_count(input_tokens) self.update_token_count(input_tokens)
params["stream"] = True response = await self.client.chat.completions.create(**params, stream=True)
response = await self.client.chat.completions.create(**params)
collected_messages = [] collected_messages = []
completion_text = "" completion_text = ""
@ -466,11 +466,11 @@ class LLM:
except TokenLimitExceeded: except TokenLimitExceeded:
# Re-raise token limit errors without logging # Re-raise token limit errors without logging
raise raise
except ValueError as ve: except ValueError:
logger.error(f"Validation error: {ve}") logger.exception(f"Validation error")
raise raise
except OpenAIError as oe: except OpenAIError as oe:
logger.error(f"OpenAI API error: {oe}") logger.exception(f"OpenAI API error")
if isinstance(oe, AuthenticationError): if isinstance(oe, AuthenticationError):
logger.error("Authentication failed. Check API key.") logger.error("Authentication failed. Check API key.")
elif isinstance(oe, RateLimitError): elif isinstance(oe, RateLimitError):
@ -478,8 +478,8 @@ class LLM:
elif isinstance(oe, APIError): elif isinstance(oe, APIError):
logger.error(f"API error: {oe}") logger.error(f"API error: {oe}")
raise raise
except Exception as e: except Exception:
logger.error(f"Unexpected error in ask: {e}") logger.exception(f"Unexpected error in ask")
raise raise
@retry( @retry(
@ -654,7 +654,7 @@ class LLM:
tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore
temperature: Optional[float] = None, temperature: Optional[float] = None,
**kwargs, **kwargs,
): ) -> ChatCompletionMessage | None:
""" """
Ask LLM using functions/tools and return the response. Ask LLM using functions/tools and return the response.
@ -732,12 +732,15 @@ class LLM:
temperature if temperature is not None else self.temperature temperature if temperature is not None else self.temperature
) )
response = await self.client.chat.completions.create(**params) response: ChatCompletion = await self.client.chat.completions.create(
**params, stream=False
)
# Check if response is valid # Check if response is valid
if not response.choices or not response.choices[0].message: if not response.choices or not response.choices[0].message:
print(response) print(response)
raise ValueError("Invalid or empty response from LLM") # raise ValueError("Invalid or empty response from LLM")
return None
# Update token counts # Update token counts
self.update_token_count( self.update_token_count(

View File

@ -1,4 +1,5 @@
import asyncio import asyncio
import base64
import json import json
from typing import Generic, Optional, TypeVar from typing import Generic, Optional, TypeVar
@ -552,7 +553,16 @@ Page content:
viewport_height = ctx.config.browser_window_size.get("height", 0) viewport_height = ctx.config.browser_window_size.get("height", 0)
# Take a screenshot for the state # Take a screenshot for the state
screenshot = await ctx.take_screenshot(full_page=True) page = await ctx.get_current_page()
await page.bring_to_front()
await page.wait_for_load_state()
screenshot = await page.screenshot(
full_page=True, animations="disabled", type="jpeg", quality=100
)
screenshot = base64.b64encode(screenshot).decode("utf-8")
# Build the state info with all required fields # Build the state info with all required fields
state_info = { state_info = {

View File

@ -42,17 +42,19 @@ class FileOperator(Protocol):
class LocalFileOperator(FileOperator): class LocalFileOperator(FileOperator):
"""File operations implementation for local filesystem.""" """File operations implementation for local filesystem."""
encoding: str = "utf-8"
async def read_file(self, path: PathLike) -> str: async def read_file(self, path: PathLike) -> str:
"""Read content from a local file.""" """Read content from a local file."""
try: try:
return Path(path).read_text() return Path(path).read_text(encoding=self.encoding)
except Exception as e: except Exception as e:
raise ToolError(f"Failed to read {path}: {str(e)}") from None raise ToolError(f"Failed to read {path}: {str(e)}") from None
async def write_file(self, path: PathLike, content: str) -> None: async def write_file(self, path: PathLike, content: str) -> None:
"""Write content to a local file.""" """Write content to a local file."""
try: try:
Path(path).write_text(content) Path(path).write_text(content, encoding=self.encoding)
except Exception as e: except Exception as e:
raise ToolError(f"Failed to write to {path}: {str(e)}") from None raise ToolError(f"Failed to write to {path}: {str(e)}") from None