fix(chat): fix location clarification loop and test isolation bugs

- Add _COMMAND_VERBS guard to _is_likely_location_reply() so messages
  starting with imperative verbs (find, search, show, get, ...) are not
  mistakenly treated as user location replies. This prevented 'Find
  good places' from being used as a retry location, which was causing
  the clarification path to never fire and the tool loop to exhaust
  MAX_ALL_FAILURE_ROUNDS instead.
- Extract city from comma-delimited fallback address strings when
  city/country FKs are absent, e.g. 'Little Turnstile 6, London'
  → 'London', so context-based location retry works for manually-
  entered itinerary stops without geocoded FK data.
- Add attempted_location_retry flag: if retry was attempted but all
  retry attempts failed, convert result to an execution failure rather
  than emitting a clarification prompt (user already provided context
  via their itinerary).
- Fix test assertion ordering in test_collection_context_retry_extracts_
  city_from_fallback_address: streaming_content must be consumed before
  checking mock call counts since StreamingHttpResponse is lazy.
This commit is contained in:
2026-03-10 18:37:30 +00:00
parent dbabbdf9f0
commit 89b42126ec
3 changed files with 211 additions and 7 deletions

View File

@@ -74,6 +74,8 @@ Run in this order:
- Chat providers: dynamic catalog from `GET /api/chat/providers/`; configured in `CHAT_PROVIDER_CONFIG`
- Chat model override: dropdown selector fed by `GET /api/chat/providers/{provider}/models/`; persisted in `localStorage` key `voyage_chat_model_prefs`; backend accepts optional `model` param in `send_message`
- Chat context: collection chats inject collection UUID + multi-stop itinerary context; system prompt guides `get_trip_details`-first reasoning and confirms only before first `add_to_itinerary`; `search_places` has a deterministic context-retry fallback — when the LLM omits `location`, the backend retries using the trip destination or first itinerary stop before asking the user for clarification; a dining-intent heuristic infers `category="food"` from user messages when the LLM omits category for restaurant/dining requests
- `itinerary_stops` extraction: when `city`/`country` FK is null, city is extracted from the last non-numeric segment of a comma-separated address (e.g. `"Little Turnstile 6, London"``"London"`); ensures `trip_context_location` is always a geocodable city name, not a street address.
- After itinerary context retries: if all retries fail, result is converted to an execution failure rather than triggering a clarification prompt — the user is never asked for a location they already implied via collection context.
- Chat tool output: `role=tool` messages hidden from display; tool outputs render as concise summaries; persisted tool rows reconstructed on reload via `rebuildConversationMessages()`; tool results are deduplicated by `tool_call_id` at three layers — rebuild from persisted rows (discards server-side pre-populated `tool_results`), SSE ingestion (`appendToolResultDedup`), and render-time (`uniqueToolResultsByCallId`)
- Chat error surfacing: `_safe_error_payload()` maps LiteLLM exceptions to sanitized user-safe categories (never forwards raw `exc.message`); `execute_tool()` catch-all returns a generic sanitized message (never raw `str(exc)`)
- Invalid tool calls (missing required args) are detected and short-circuited with a user-visible error — not replayed into history

View File

@@ -563,6 +563,70 @@ class ChatViewSetSearchPlacesClarificationTests(APITransactionTestCase):
self.assertEqual(second_call_kwargs.get("category"), "food")
self.assertEqual(second_call_kwargs.get("location"), "Rome, Italy")
@patch("chat.views.execute_tool")
@patch("chat.views.stream_chat_completion")
@patch("integrations.utils.auto_profile.update_auto_preference_profile")
def test_collection_context_retry_extracts_city_from_fallback_address(
self,
_mock_auto_profile,
mock_stream_chat_completion,
mock_execute_tool,
):
user = User.objects.create_user(
username="chat-city-extract-user",
email="chat-city-extract-user@example.com",
password="password123",
)
self.client.force_authenticate(user=user)
collection = Collection.objects.create(user=user, name="London Food Trip")
trip_stop = Location.objects.create(
user=user,
name="Turnstile",
location="Little Turnstile 6, London",
)
collection.locations.add(trip_stop)
conversation_response = self.client.post(
"/api/chat/conversations/",
{"title": "Fallback City Extraction Test"},
format="json",
)
self.assertEqual(conversation_response.status_code, 201)
conversation_id = conversation_response.json()["id"]
async def first_stream(*args, **kwargs):
yield 'data: {"tool_calls": [{"index": 0, "id": "call_1", "type": "function", "function": {"name": "search_places", "arguments": "{}"}}]}\n\n'
yield "data: [DONE]\n\n"
async def second_stream(*args, **kwargs):
yield 'data: {"content": "Here are options in London."}\n\n'
yield "data: [DONE]\n\n"
mock_stream_chat_completion.side_effect = [first_stream(), second_stream()]
mock_execute_tool.side_effect = [
{"error": "location is required"},
{"results": [{"name": "Rules Restaurant"}]},
]
response = self.client.post(
f"/api/chat/conversations/{conversation_id}/send_message/",
{
"message": "Find restaurants",
"collection_id": str(collection.id),
"collection_name": collection.name,
},
format="json",
)
self.assertEqual(response.status_code, 200)
# Consume the streaming response before checking mock call counts;
# the event_stream generator only runs when streaming_content is iterated.
list(response.streaming_content)
self.assertEqual(mock_execute_tool.call_count, 2)
second_call_kwargs = mock_execute_tool.call_args_list[1].kwargs
self.assertEqual(second_call_kwargs.get("location"), "London")
@patch("chat.views.execute_tool")
@patch("chat.views.stream_chat_completion")
@patch("integrations.utils.auto_profile.update_auto_preference_profile")
@@ -857,3 +921,79 @@ class ChatViewSetToolExecutionFailureLoopTests(APITransactionTestCase):
)
self.assertEqual(mock_stream_chat_completion.call_count, 1)
self.assertEqual(mock_execute_tool.call_count, 1)
@patch("chat.views.execute_tool")
@patch("chat.views.stream_chat_completion")
@patch("integrations.utils.auto_profile.update_auto_preference_profile")
def test_context_retry_failure_does_not_emit_location_clarification(
self,
_mock_auto_profile,
mock_stream_chat_completion,
mock_execute_tool,
):
user = User.objects.create_user(
username="chat-retry-no-clarify-user",
email="chat-retry-no-clarify-user@example.com",
password="password123",
)
self.client.force_authenticate(user=user)
conversation_response = self.client.post(
"/api/chat/conversations/",
{"title": "Retry Failure No Clarify Test"},
format="json",
)
self.assertEqual(conversation_response.status_code, 201)
conversation_id = conversation_response.json()["id"]
async def failing_stream(*args, **kwargs):
yield 'data: {"tool_calls": [{"index": 0, "id": "call_s", "type": "function", "function": {"name": "search_places", "arguments": "{}"}}]}\n\n'
yield "data: [DONE]\n\n"
mock_stream_chat_completion.side_effect = failing_stream
mock_execute_tool.side_effect = [
{"error": "location is required"},
{"error": "location is required"},
{"error": "location is required"},
{"error": "location is required"},
{"error": "location is required"},
{"error": "location is required"},
]
response = self.client.post(
f"/api/chat/conversations/{conversation_id}/send_message/",
{
"message": "Find restaurants",
"destination": "Lisbon, Portugal",
},
format="json",
)
self.assertEqual(response.status_code, 200)
chunks = [
chunk.decode("utf-8")
if isinstance(chunk, (bytes, bytearray))
else str(chunk)
for chunk in response.streaming_content
]
payload_lines = [
chunk.strip()[len("data: ") :]
for chunk in chunks
if chunk.strip().startswith("data: ")
]
json_payloads = [
json.loads(payload) for payload in payload_lines if payload != "[DONE]"
]
self.assertTrue(
any(
payload.get("error_category") == "tool_execution_error"
for payload in json_payloads
)
)
self.assertFalse(
any(
"specific location" in payload.get("content", "").lower()
for payload in json_payloads
)
)

View File

@@ -326,8 +326,35 @@ class ChatViewSet(viewsets.ModelViewSet):
return None
@staticmethod
def _is_likely_location_reply(user_content):
# Verbs that indicate a command/request rather than a location reply.
_COMMAND_VERBS = frozenset(
[
"find",
"search",
"show",
"get",
"look",
"give",
"tell",
"help",
"recommend",
"suggest",
"list",
"fetch",
"what",
"where",
"which",
"who",
"how",
"can",
"could",
"would",
"please",
]
)
@classmethod
def _is_likely_location_reply(cls, user_content):
if not isinstance(user_content, str):
return False
@@ -345,6 +372,12 @@ class ChatViewSet(viewsets.ModelViewSet):
if len(parts) > 6:
return False
# Exclude messages that start with a command/request verb — those are
# original requests, not replies to a "where?" clarification prompt.
first_word = parts[0].lower().rstrip(".,!;:")
if first_word in cls._COMMAND_VERBS:
return False
return bool(re.search(r"[a-z0-9]", normalized, re.IGNORECASE))
@action(detail=True, methods=["post"])
@@ -445,8 +478,21 @@ class ChatViewSet(viewsets.ModelViewSet):
fallback_name = (location.location or location.name or "").strip()
if not fallback_name:
continue
stop_label = fallback_name
stop_key = f"name:{fallback_name.lower()}"
# When city/country FKs are not set, try to extract a geocodable
# city name from a comma-separated address string.
# e.g. "Little Turnstile 6, London" → "London"
# e.g. "Kingsway 58, London" → "London"
if "," in fallback_name:
parts = [p.strip() for p in fallback_name.split(",")]
# Last non-empty, non-purely-numeric segment is typically the city
city_hint = next(
(p for p in reversed(parts) if p and not p.isdigit()),
None,
)
stop_label = city_hint if city_hint else fallback_name
else:
stop_label = fallback_name
stop_key = f"name:{stop_label.lower()}"
if stop_key in seen_stops:
continue
@@ -595,6 +641,7 @@ class ChatViewSet(viewsets.ModelViewSet):
**prepared_arguments,
)
attempted_location_retry = False
if self._is_search_places_location_retry_candidate_error(
function_name,
result,
@@ -616,6 +663,7 @@ class ChatViewSet(viewsets.ModelViewSet):
):
continue
seen_retry_locations.add(normalized_retry_location)
attempted_location_retry = True
retry_arguments = dict(prepared_arguments)
retry_arguments["location"] = retry_location
@@ -644,6 +692,17 @@ class ChatViewSet(viewsets.ModelViewSet):
}
break
# If we attempted context retries but all failed, convert
# to an execution failure so we never ask the user for a
# location they already implied via itinerary context.
if (
attempted_location_retry
and self._is_required_param_tool_error(result)
):
result = {
"error": "Could not search places at the provided itinerary locations"
}
if self._is_required_param_tool_error(result):
assistant_message_kwargs = {
"conversation": conversation,
@@ -665,9 +724,12 @@ class ChatViewSet(viewsets.ModelViewSet):
thread_sensitive=True,
)(**tool_message)
if self._is_search_places_missing_location_required_error(
function_name,
result,
if (
not attempted_location_retry
and self._is_search_places_missing_location_required_error(
function_name,
result,
)
):
clarification_content = self._build_search_places_location_clarification_message()
await sync_to_async(