fix: resolve three travel agent UI issues
- Connection error: wrap sync get_llm_api_key() in sync_to_async in stream_chat_completion() to fix SynchronousOnlyOperation raised when the async SSE generator calls a synchronous Django ORM function - Models not loading: add opencode_zen handler to models endpoint returning its default model; fix frontend to show 'Default' instead of 'Loading...' indefinitely when no model list is returned - Location in header: remove destination subtitle from Travel Assistant header — collection-wide chat has no single meaningful location
This commit is contained in:
@@ -2,6 +2,7 @@ import json
|
||||
import logging
|
||||
|
||||
import litellm
|
||||
from asgiref.sync import sync_to_async
|
||||
from django.conf import settings
|
||||
|
||||
from integrations.models import UserAPIKey
|
||||
@@ -371,7 +372,7 @@ async def stream_chat_completion(user, messages, provider, tools=None, model=Non
|
||||
yield f"data: {json.dumps(payload)}\n\n"
|
||||
return
|
||||
|
||||
api_key = get_llm_api_key(user, normalized_provider)
|
||||
api_key = await sync_to_async(get_llm_api_key)(user, normalized_provider)
|
||||
|
||||
if provider_config["needs_api_key"] and not api_key:
|
||||
payload = {
|
||||
|
||||
@@ -414,6 +414,9 @@ class ChatProviderCatalogViewSet(viewsets.ViewSet):
|
||||
pass
|
||||
return Response({"models": []})
|
||||
|
||||
if provider in ["opencode_zen"]:
|
||||
return Response({"models": ["openai/gpt-5-nano"]})
|
||||
|
||||
return Response({"models": []})
|
||||
except Exception as exc:
|
||||
logger.error("Failed to fetch models for %s: %s", provider, exc)
|
||||
|
||||
@@ -54,6 +54,7 @@
|
||||
let selectedProvider = '';
|
||||
let selectedModel = '';
|
||||
let availableModels: string[] = [];
|
||||
let modelsLoading = false;
|
||||
let chatProviders: ChatProviderCatalogConfiguredEntry[] = [];
|
||||
let providerError = '';
|
||||
let selectedProviderDefaultModel = '';
|
||||
@@ -117,6 +118,7 @@
|
||||
return;
|
||||
}
|
||||
|
||||
modelsLoading = true;
|
||||
try {
|
||||
const res = await fetch(`/api/chat/providers/${selectedProvider}/models/`, {
|
||||
credentials: 'include'
|
||||
@@ -134,6 +136,8 @@
|
||||
} catch (e) {
|
||||
console.error('Failed to load models:', e);
|
||||
availableModels = [];
|
||||
} finally {
|
||||
modelsLoading = false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -576,9 +580,6 @@
|
||||
{$t('travel_assistant')}
|
||||
{/if}
|
||||
</h3>
|
||||
{#if destination}
|
||||
<p class="text-sm text-base-content/70">{destination}</p>
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
<div class="ml-auto flex items-center gap-2">
|
||||
@@ -600,8 +601,10 @@
|
||||
bind:value={selectedModel}
|
||||
disabled={chatProviders.length === 0}
|
||||
>
|
||||
{#if availableModels.length === 0}
|
||||
{#if modelsLoading}
|
||||
<option value="">Loading...</option>
|
||||
{:else if availableModels.length === 0}
|
||||
<option value="">Default</option>
|
||||
{:else}
|
||||
{#each availableModels as model}
|
||||
<option value={model}>{model}</option>
|
||||
|
||||
Reference in New Issue
Block a user