From 94e2ab7c86607505807def45ddd3c4709ac85969 Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Wed, 19 Mar 2025 14:09:46 +0800 Subject: [PATCH] fix(llm): accept empty choices as valid response and handle that case gracefully --- app/agent/toolcall.py | 37 +++++++++++++++++++------------------ app/llm.py | 10 +++++++--- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/app/agent/toolcall.py b/app/agent/toolcall.py index 131fd91..b9fab70 100644 --- a/app/agent/toolcall.py +++ b/app/agent/toolcall.py @@ -10,7 +10,6 @@ from app.prompt.toolcall import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice from app.tool import CreateChatCompletion, Terminate, ToolCollection - TOOL_CALL_REQUIRED = "Tool calls required but none provided" @@ -71,40 +70,42 @@ class ToolCallAgent(ReActAgent): return False raise - self.tool_calls = response.tool_calls + self.tool_calls = tool_calls = ( + response.tool_calls if response and response.tool_calls else [] + ) + content = response.content if response and response.content else "" # Log response info - logger.info(f"✨ {self.name}'s thoughts: {response.content}") + logger.info(f"✨ {self.name}'s thoughts: {content}") logger.info( - f"🛠️ {self.name} selected {len(response.tool_calls) if response.tool_calls else 0} tools to use" + f"🛠️ {self.name} selected {len(tool_calls) if tool_calls else 0} tools to use" ) - if response.tool_calls: + if tool_calls: logger.info( - f"🧰 Tools being prepared: {[call.function.name for call in response.tool_calls]}" - ) - logger.info( - f"🔧 Tool arguments: {response.tool_calls[0].function.arguments}" + f"🧰 Tools being prepared: {[call.function.name for call in tool_calls]}" ) + logger.info(f"🔧 Tool arguments: {tool_calls[0].function.arguments}") try: + if response is None: + raise RuntimeError("No response received from the LLM") + # Handle different tool_choices modes if self.tool_choices == ToolChoice.NONE: - if response.tool_calls: + if tool_calls: logger.warning( f"🤔 Hmm, {self.name} tried to use tools when they weren't available!" ) - if response.content: - self.memory.add_message(Message.assistant_message(response.content)) + if content: + self.memory.add_message(Message.assistant_message(content)) return True return False # Create and add assistant message assistant_msg = ( - Message.from_tool_calls( - content=response.content, tool_calls=self.tool_calls - ) + Message.from_tool_calls(content=content, tool_calls=self.tool_calls) if self.tool_calls - else Message.assistant_message(response.content) + else Message.assistant_message(content) ) self.memory.add_message(assistant_msg) @@ -113,7 +114,7 @@ class ToolCallAgent(ReActAgent): # For 'auto' mode, continue with content if no commands but content exists if self.tool_choices == ToolChoice.AUTO and not self.tool_calls: - return bool(response.content) + return bool(content) return bool(self.tool_calls) except Exception as e: @@ -209,7 +210,7 @@ class ToolCallAgent(ReActAgent): return f"Error: {error_msg}" except Exception as e: error_msg = f"⚠️ Tool '{name}' encountered a problem: {str(e)}" - logger.error(error_msg) + logger.exception(error_msg) return f"Error: {error_msg}" async def _handle_special_tool(self, name: str, result: Any, **kwargs): diff --git a/app/llm.py b/app/llm.py index f99b6b1..1a4e05b 100644 --- a/app/llm.py +++ b/app/llm.py @@ -10,6 +10,7 @@ from openai import ( OpenAIError, RateLimitError, ) +from openai.types.chat.chat_completion_message import ChatCompletionMessage from tenacity import ( retry, retry_if_exception_type, @@ -653,7 +654,7 @@ class LLM: tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore temperature: Optional[float] = None, **kwargs, - ): + ) -> ChatCompletionMessage | None: """ Ask LLM using functions/tools and return the response. @@ -731,12 +732,15 @@ class LLM: temperature if temperature is not None else self.temperature ) - response = await self.client.chat.completions.create(**params) + response: ChatCompletion = await self.client.chat.completions.create( + **params, stream=False + ) # Check if response is valid if not response.choices or not response.choices[0].message: print(response) - raise ValueError("Invalid or empty response from LLM") + # raise ValueError("Invalid or empty response from LLM") + return None # Update token counts self.update_token_count(