fix(llm): accept empty choices as valid response and handle that case gracefully

This commit is contained in:
Sheng Fan 2025-03-19 14:09:46 +08:00
parent 4ea7f8e988
commit 94e2ab7c86
2 changed files with 26 additions and 21 deletions

View File

@ -10,7 +10,6 @@ from app.prompt.toolcall import NEXT_STEP_PROMPT, SYSTEM_PROMPT
from app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice from app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice
from app.tool import CreateChatCompletion, Terminate, ToolCollection from app.tool import CreateChatCompletion, Terminate, ToolCollection
TOOL_CALL_REQUIRED = "Tool calls required but none provided" TOOL_CALL_REQUIRED = "Tool calls required but none provided"
@ -71,40 +70,42 @@ class ToolCallAgent(ReActAgent):
return False return False
raise raise
self.tool_calls = response.tool_calls self.tool_calls = tool_calls = (
response.tool_calls if response and response.tool_calls else []
)
content = response.content if response and response.content else ""
# Log response info # Log response info
logger.info(f"{self.name}'s thoughts: {response.content}") logger.info(f"{self.name}'s thoughts: {content}")
logger.info( logger.info(
f"🛠️ {self.name} selected {len(response.tool_calls) if response.tool_calls else 0} tools to use" f"🛠️ {self.name} selected {len(tool_calls) if tool_calls else 0} tools to use"
) )
if response.tool_calls: if tool_calls:
logger.info( logger.info(
f"🧰 Tools being prepared: {[call.function.name for call in response.tool_calls]}" f"🧰 Tools being prepared: {[call.function.name for call in tool_calls]}"
)
logger.info(
f"🔧 Tool arguments: {response.tool_calls[0].function.arguments}"
) )
logger.info(f"🔧 Tool arguments: {tool_calls[0].function.arguments}")
try: try:
if response is None:
raise RuntimeError("No response received from the LLM")
# Handle different tool_choices modes # Handle different tool_choices modes
if self.tool_choices == ToolChoice.NONE: if self.tool_choices == ToolChoice.NONE:
if response.tool_calls: if tool_calls:
logger.warning( logger.warning(
f"🤔 Hmm, {self.name} tried to use tools when they weren't available!" f"🤔 Hmm, {self.name} tried to use tools when they weren't available!"
) )
if response.content: if content:
self.memory.add_message(Message.assistant_message(response.content)) self.memory.add_message(Message.assistant_message(content))
return True return True
return False return False
# Create and add assistant message # Create and add assistant message
assistant_msg = ( assistant_msg = (
Message.from_tool_calls( Message.from_tool_calls(content=content, tool_calls=self.tool_calls)
content=response.content, tool_calls=self.tool_calls
)
if self.tool_calls if self.tool_calls
else Message.assistant_message(response.content) else Message.assistant_message(content)
) )
self.memory.add_message(assistant_msg) self.memory.add_message(assistant_msg)
@ -113,7 +114,7 @@ class ToolCallAgent(ReActAgent):
# For 'auto' mode, continue with content if no commands but content exists # For 'auto' mode, continue with content if no commands but content exists
if self.tool_choices == ToolChoice.AUTO and not self.tool_calls: if self.tool_choices == ToolChoice.AUTO and not self.tool_calls:
return bool(response.content) return bool(content)
return bool(self.tool_calls) return bool(self.tool_calls)
except Exception as e: except Exception as e:
@ -209,7 +210,7 @@ class ToolCallAgent(ReActAgent):
return f"Error: {error_msg}" return f"Error: {error_msg}"
except Exception as e: except Exception as e:
error_msg = f"⚠️ Tool '{name}' encountered a problem: {str(e)}" error_msg = f"⚠️ Tool '{name}' encountered a problem: {str(e)}"
logger.error(error_msg) logger.exception(error_msg)
return f"Error: {error_msg}" return f"Error: {error_msg}"
async def _handle_special_tool(self, name: str, result: Any, **kwargs): async def _handle_special_tool(self, name: str, result: Any, **kwargs):

View File

@ -10,6 +10,7 @@ from openai import (
OpenAIError, OpenAIError,
RateLimitError, RateLimitError,
) )
from openai.types.chat.chat_completion_message import ChatCompletionMessage
from tenacity import ( from tenacity import (
retry, retry,
retry_if_exception_type, retry_if_exception_type,
@ -653,7 +654,7 @@ class LLM:
tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore
temperature: Optional[float] = None, temperature: Optional[float] = None,
**kwargs, **kwargs,
): ) -> ChatCompletionMessage | None:
""" """
Ask LLM using functions/tools and return the response. Ask LLM using functions/tools and return the response.
@ -731,12 +732,15 @@ class LLM:
temperature if temperature is not None else self.temperature temperature if temperature is not None else self.temperature
) )
response = await self.client.chat.completions.create(**params) response: ChatCompletion = await self.client.chat.completions.create(
**params, stream=False
)
# Check if response is valid # Check if response is valid
if not response.choices or not response.choices[0].message: if not response.choices or not response.choices[0].message:
print(response) print(response)
raise ValueError("Invalid or empty response from LLM") # raise ValueError("Invalid or empty response from LLM")
return None
# Update token counts # Update token counts
self.update_token_count( self.update_token_count(