From 30fdcb078f5f3d7fa2812ae6cd39a013f559a612 Mon Sep 17 00:00:00 2001 From: alex Date: Mon, 9 Mar 2026 13:34:35 +0000 Subject: [PATCH] fix: resolve three travel agent UI issues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Connection error: wrap sync get_llm_api_key() in sync_to_async in stream_chat_completion() to fix SynchronousOnlyOperation raised when the async SSE generator calls a synchronous Django ORM function - Models not loading: add opencode_zen handler to models endpoint returning its default model; fix frontend to show 'Default' instead of 'Loading...' indefinitely when no model list is returned - Location in header: remove destination subtitle from Travel Assistant header — collection-wide chat has no single meaningful location --- backend/server/chat/llm_client.py | 3 ++- backend/server/chat/views/__init__.py | 3 +++ frontend/src/lib/components/AITravelChat.svelte | 11 +++++++---- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/backend/server/chat/llm_client.py b/backend/server/chat/llm_client.py index a4939e5e..041966f2 100644 --- a/backend/server/chat/llm_client.py +++ b/backend/server/chat/llm_client.py @@ -2,6 +2,7 @@ import json import logging import litellm +from asgiref.sync import sync_to_async from django.conf import settings from integrations.models import UserAPIKey @@ -371,7 +372,7 @@ async def stream_chat_completion(user, messages, provider, tools=None, model=Non yield f"data: {json.dumps(payload)}\n\n" return - api_key = get_llm_api_key(user, normalized_provider) + api_key = await sync_to_async(get_llm_api_key)(user, normalized_provider) if provider_config["needs_api_key"] and not api_key: payload = { diff --git a/backend/server/chat/views/__init__.py b/backend/server/chat/views/__init__.py index 9729c260..10ea2b58 100644 --- a/backend/server/chat/views/__init__.py +++ b/backend/server/chat/views/__init__.py @@ -414,6 +414,9 @@ class ChatProviderCatalogViewSet(viewsets.ViewSet): pass return Response({"models": []}) + if provider in ["opencode_zen"]: + return Response({"models": ["openai/gpt-5-nano"]}) + return Response({"models": []}) except Exception as exc: logger.error("Failed to fetch models for %s: %s", provider, exc) diff --git a/frontend/src/lib/components/AITravelChat.svelte b/frontend/src/lib/components/AITravelChat.svelte index 41c13c0b..01300b9b 100644 --- a/frontend/src/lib/components/AITravelChat.svelte +++ b/frontend/src/lib/components/AITravelChat.svelte @@ -54,6 +54,7 @@ let selectedProvider = ''; let selectedModel = ''; let availableModels: string[] = []; + let modelsLoading = false; let chatProviders: ChatProviderCatalogConfiguredEntry[] = []; let providerError = ''; let selectedProviderDefaultModel = ''; @@ -117,6 +118,7 @@ return; } + modelsLoading = true; try { const res = await fetch(`/api/chat/providers/${selectedProvider}/models/`, { credentials: 'include' @@ -134,6 +136,8 @@ } catch (e) { console.error('Failed to load models:', e); availableModels = []; + } finally { + modelsLoading = false; } } @@ -576,9 +580,6 @@ {$t('travel_assistant')} {/if} - {#if destination} -

{destination}

- {/if}
@@ -600,8 +601,10 @@ bind:value={selectedModel} disabled={chatProviders.length === 0} > - {#if availableModels.length === 0} + {#if modelsLoading} + {:else if availableModels.length === 0} + {:else} {#each availableModels as model}