fix: resolve three travel agent UI issues

- Connection error: wrap sync get_llm_api_key() in sync_to_async in
  stream_chat_completion() to fix SynchronousOnlyOperation raised when
  the async SSE generator calls a synchronous Django ORM function
- Models not loading: add opencode_zen handler to models endpoint
  returning its default model; fix frontend to show 'Default' instead
  of 'Loading...' indefinitely when no model list is returned
- Location in header: remove destination subtitle from Travel Assistant
  header — collection-wide chat has no single meaningful location
This commit is contained in:
2026-03-09 13:34:35 +00:00
parent 91d907204a
commit 30fdcb078f
3 changed files with 12 additions and 5 deletions

View File

@@ -2,6 +2,7 @@ import json
import logging
import litellm
from asgiref.sync import sync_to_async
from django.conf import settings
from integrations.models import UserAPIKey
@@ -371,7 +372,7 @@ async def stream_chat_completion(user, messages, provider, tools=None, model=Non
yield f"data: {json.dumps(payload)}\n\n"
return
api_key = get_llm_api_key(user, normalized_provider)
api_key = await sync_to_async(get_llm_api_key)(user, normalized_provider)
if provider_config["needs_api_key"] and not api_key:
payload = {

View File

@@ -414,6 +414,9 @@ class ChatProviderCatalogViewSet(viewsets.ViewSet):
pass
return Response({"models": []})
if provider in ["opencode_zen"]:
return Response({"models": ["openai/gpt-5-nano"]})
return Response({"models": []})
except Exception as exc:
logger.error("Failed to fetch models for %s: %s", provider, exc)