fix(llm): accept empty choices as valid response and handle that case gracefully

This commit is contained in:
Sheng Fan 2025-03-19 14:09:46 +08:00
parent 4ea7f8e988
commit 94e2ab7c86
2 changed files with 26 additions and 21 deletions

View File

@ -10,7 +10,6 @@ from app.prompt.toolcall import NEXT_STEP_PROMPT, SYSTEM_PROMPT
from app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice
from app.tool import CreateChatCompletion, Terminate, ToolCollection
TOOL_CALL_REQUIRED = "Tool calls required but none provided"
@ -71,40 +70,42 @@ class ToolCallAgent(ReActAgent):
return False
raise
self.tool_calls = response.tool_calls
self.tool_calls = tool_calls = (
response.tool_calls if response and response.tool_calls else []
)
content = response.content if response and response.content else ""
# Log response info
logger.info(f"{self.name}'s thoughts: {response.content}")
logger.info(f"{self.name}'s thoughts: {content}")
logger.info(
f"🛠️ {self.name} selected {len(response.tool_calls) if response.tool_calls else 0} tools to use"
f"🛠️ {self.name} selected {len(tool_calls) if tool_calls else 0} tools to use"
)
if response.tool_calls:
if tool_calls:
logger.info(
f"🧰 Tools being prepared: {[call.function.name for call in response.tool_calls]}"
)
logger.info(
f"🔧 Tool arguments: {response.tool_calls[0].function.arguments}"
f"🧰 Tools being prepared: {[call.function.name for call in tool_calls]}"
)
logger.info(f"🔧 Tool arguments: {tool_calls[0].function.arguments}")
try:
if response is None:
raise RuntimeError("No response received from the LLM")
# Handle different tool_choices modes
if self.tool_choices == ToolChoice.NONE:
if response.tool_calls:
if tool_calls:
logger.warning(
f"🤔 Hmm, {self.name} tried to use tools when they weren't available!"
)
if response.content:
self.memory.add_message(Message.assistant_message(response.content))
if content:
self.memory.add_message(Message.assistant_message(content))
return True
return False
# Create and add assistant message
assistant_msg = (
Message.from_tool_calls(
content=response.content, tool_calls=self.tool_calls
)
Message.from_tool_calls(content=content, tool_calls=self.tool_calls)
if self.tool_calls
else Message.assistant_message(response.content)
else Message.assistant_message(content)
)
self.memory.add_message(assistant_msg)
@ -113,7 +114,7 @@ class ToolCallAgent(ReActAgent):
# For 'auto' mode, continue with content if no commands but content exists
if self.tool_choices == ToolChoice.AUTO and not self.tool_calls:
return bool(response.content)
return bool(content)
return bool(self.tool_calls)
except Exception as e:
@ -209,7 +210,7 @@ class ToolCallAgent(ReActAgent):
return f"Error: {error_msg}"
except Exception as e:
error_msg = f"⚠️ Tool '{name}' encountered a problem: {str(e)}"
logger.error(error_msg)
logger.exception(error_msg)
return f"Error: {error_msg}"
async def _handle_special_tool(self, name: str, result: Any, **kwargs):

View File

@ -10,6 +10,7 @@ from openai import (
OpenAIError,
RateLimitError,
)
from openai.types.chat.chat_completion_message import ChatCompletionMessage
from tenacity import (
retry,
retry_if_exception_type,
@ -653,7 +654,7 @@ class LLM:
tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore
temperature: Optional[float] = None,
**kwargs,
):
) -> ChatCompletionMessage | None:
"""
Ask LLM using functions/tools and return the response.
@ -731,12 +732,15 @@ class LLM:
temperature if temperature is not None else self.temperature
)
response = await self.client.chat.completions.create(**params)
response: ChatCompletion = await self.client.chat.completions.create(
**params, stream=False
)
# Check if response is valid
if not response.choices or not response.choices[0].message:
print(response)
raise ValueError("Invalid or empty response from LLM")
# raise ValueError("Invalid or empty response from LLM")
return None
# Update token counts
self.update_token_count(