fix(chat): improve OpenCode Zen integration and error handling

- Fetch models dynamically from OpenCode Zen API (36 models vs 5 hardcoded)
- Add function calling support check before using tools
- Add retry logic (num_retries=2) for transient failures
- Improve logging for debugging API calls and errors
- Update system prompt for multi-stop itinerary context
- Clean up unused imports in frontend components
- Remove deleted views.py (moved to views/__init__.py)
This commit is contained in:
2026-03-09 16:11:14 +00:00
parent 21ef73f49d
commit 21954df3ee
24 changed files with 1523 additions and 1669 deletions

View File

@@ -114,7 +114,9 @@ Voyage includes an AI-powered travel chat assistant embedded in the Collections
- **Provider catalog**: The backend dynamically lists all supported LLM providers via `GET /api/chat/providers/`, sourced from LiteLLM's runtime provider list plus custom entries. - **Provider catalog**: The backend dynamically lists all supported LLM providers via `GET /api/chat/providers/`, sourced from LiteLLM's runtime provider list plus custom entries.
- **Supported providers include**: OpenAI, Anthropic, Google Gemini, Ollama, Groq, Mistral, GitHub Models, OpenRouter, and OpenCode Zen. - **Supported providers include**: OpenAI, Anthropic, Google Gemini, Ollama, Groq, Mistral, GitHub Models, OpenRouter, and OpenCode Zen.
- **OpenCode Zen**: An OpenAI-compatible provider (`opencode_zen`) routed through `https://opencode.ai/zen/v1`. - **OpenCode Zen**: An OpenAI-compatible provider (`opencode_zen`) routed through `https://opencode.ai/zen/v1`. Default model: `openai/gpt-5-nano`.
- **Model selection**: The chat composer includes a model override input next to the provider selector. Type any model string supported by the chosen provider (e.g. `openai/gpt-5-nano`, `anthropic/claude-sonnet-4-20250514`). Your model preference is saved per-provider in the browser.
- **Error handling**: Provider errors (auth failures, model not found, rate limits, timeouts) are surfaced as actionable messages in the chat — no raw error details are exposed.
- **Configuration**: Users add API keys for their chosen provider in Settings → API Keys. No server-side environment variables required for chat providers — all keys are per-user. - **Configuration**: Users add API keys for their chosen provider in Settings → API Keys. No server-side environment variables required for chat providers — all keys are per-user.
### Travel Agent (MCP) ### Travel Agent (MCP)

View File

@@ -329,6 +329,11 @@ When modifying itineraries:
- Suggest logical ordering based on geography - Suggest logical ordering based on geography
- Consider travel time between locations - Consider travel time between locations
When chat context includes a trip collection:
- Treat context as itinerary-wide (potentially multiple stops), not a single destination
- Use get_trip_details first when you need complete collection context before searching for places
- Ground place searches in trip stops and dates from the provided trip context
Be conversational, helpful, and enthusiastic about travel. Keep responses concise but informative.""" Be conversational, helpful, and enthusiastic about travel. Keep responses concise but informative."""
if collection and collection.shared_with.count() > 0: if collection and collection.shared_with.count() > 0:
@@ -389,8 +394,8 @@ async def stream_chat_completion(user, messages, provider, tools=None, model=Non
yield f"data: {json.dumps(payload)}\n\n" yield f"data: {json.dumps(payload)}\n\n"
return return
completion_kwargs = { resolved_model = (
"model": model model
or ( or (
settings.VOYAGE_AI_MODEL settings.VOYAGE_AI_MODEL
if normalized_provider if normalized_provider
@@ -398,10 +403,34 @@ async def stream_chat_completion(user, messages, provider, tools=None, model=Non
and settings.VOYAGE_AI_MODEL and settings.VOYAGE_AI_MODEL
else None else None
) )
or provider_config["default_model"], or provider_config["default_model"]
)
if tools and not litellm.supports_function_calling(model=resolved_model):
logger.warning(
"Model %s does not support function calling, disabling tools",
resolved_model,
)
tools = None
logger.info(
"Chat request: provider=%s, model=%s, has_tools=%s",
normalized_provider,
resolved_model,
bool(tools),
)
logger.debug(
"API base: %s, messages count: %s",
provider_config.get("api_base"),
len(messages),
)
completion_kwargs = {
"model": resolved_model,
"messages": messages, "messages": messages,
"stream": True, "stream": True,
"api_key": api_key, "api_key": api_key,
"num_retries": 2,
} }
if tools: if tools:
completion_kwargs["tools"] = tools completion_kwargs["tools"] = tools
@@ -448,6 +477,7 @@ async def stream_chat_completion(user, messages, provider, tools=None, model=Non
yield "data: [DONE]\n\n" yield "data: [DONE]\n\n"
except Exception as exc: except Exception as exc:
logger.error("LiteLLM error: %s: %s", type(exc).__name__, str(exc)[:200])
logger.exception("LLM streaming error") logger.exception("LLM streaming error")
payload = _safe_error_payload(exc) payload = _safe_error_payload(exc)
yield f"data: {json.dumps(payload)}\n\n" yield f"data: {json.dumps(payload)}\n\n"

View File

@@ -1,281 +0,0 @@
import asyncio
import json
from asgiref.sync import sync_to_async
from django.http import StreamingHttpResponse
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .agent_tools import AGENT_TOOLS, execute_tool, serialize_tool_result
from .llm_client import (
get_provider_catalog,
get_system_prompt,
is_chat_provider_available,
stream_chat_completion,
)
from .models import ChatConversation, ChatMessage
from .serializers import ChatConversationSerializer
class ChatViewSet(viewsets.ModelViewSet):
serializer_class = ChatConversationSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return ChatConversation.objects.filter(user=self.request.user).prefetch_related(
"messages"
)
def list(self, request, *args, **kwargs):
conversations = self.get_queryset().only("id", "title", "updated_at")
data = [
{
"id": str(conversation.id),
"title": conversation.title,
"updated_at": conversation.updated_at,
}
for conversation in conversations
]
return Response(data)
def create(self, request, *args, **kwargs):
conversation = ChatConversation.objects.create(
user=request.user,
title=(request.data.get("title") or "").strip(),
)
serializer = self.get_serializer(conversation)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def _build_llm_messages(self, conversation, user):
messages = [{"role": "system", "content": get_system_prompt(user)}]
for message in conversation.messages.all().order_by("created_at"):
payload = {
"role": message.role,
"content": message.content,
}
if message.role == "assistant" and message.tool_calls:
payload["tool_calls"] = message.tool_calls
if message.role == "tool":
payload["tool_call_id"] = message.tool_call_id
payload["name"] = message.name
messages.append(payload)
return messages
def _async_to_sync_generator(self, async_gen):
loop = asyncio.new_event_loop()
try:
while True:
try:
yield loop.run_until_complete(async_gen.__anext__())
except StopAsyncIteration:
break
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
@staticmethod
def _merge_tool_call_delta(accumulator, tool_calls_delta):
for idx, tool_call in enumerate(tool_calls_delta or []):
idx = tool_call.get("index", idx)
while len(accumulator) <= idx:
accumulator.append(
{
"id": None,
"type": "function",
"function": {"name": "", "arguments": ""},
}
)
current = accumulator[idx]
if tool_call.get("id"):
current["id"] = tool_call.get("id")
if tool_call.get("type"):
current["type"] = tool_call.get("type")
function_data = tool_call.get("function") or {}
if function_data.get("name"):
current["function"]["name"] = function_data.get("name")
if function_data.get("arguments"):
current["function"]["arguments"] += function_data.get("arguments")
@action(detail=True, methods=["post"])
def send_message(self, request, pk=None):
conversation = self.get_object()
user_content = (request.data.get("message") or "").strip()
if not user_content:
return Response(
{"error": "message is required"},
status=status.HTTP_400_BAD_REQUEST,
)
provider = (request.data.get("provider") or "openai").strip().lower()
if not is_chat_provider_available(provider):
return Response(
{"error": f"Provider is not available for chat: {provider}."},
status=status.HTTP_400_BAD_REQUEST,
)
ChatMessage.objects.create(
conversation=conversation,
role="user",
content=user_content,
)
conversation.save(update_fields=["updated_at"])
if not conversation.title:
conversation.title = user_content[:120]
conversation.save(update_fields=["title", "updated_at"])
llm_messages = self._build_llm_messages(conversation, request.user)
MAX_TOOL_ITERATIONS = 10
async def event_stream():
current_messages = list(llm_messages)
encountered_error = False
tool_iterations = 0
while tool_iterations < MAX_TOOL_ITERATIONS:
content_chunks = []
tool_calls_accumulator = []
async for chunk in stream_chat_completion(
request.user,
current_messages,
provider,
tools=AGENT_TOOLS,
):
if not chunk.startswith("data: "):
yield chunk
continue
payload = chunk[len("data: ") :].strip()
if payload == "[DONE]":
continue
yield chunk
try:
data = json.loads(payload)
except json.JSONDecodeError:
continue
if data.get("error"):
encountered_error = True
break
if data.get("content"):
content_chunks.append(data["content"])
if data.get("tool_calls"):
self._merge_tool_call_delta(
tool_calls_accumulator,
data["tool_calls"],
)
if encountered_error:
break
assistant_content = "".join(content_chunks)
if tool_calls_accumulator:
assistant_with_tools = {
"role": "assistant",
"content": assistant_content,
"tool_calls": tool_calls_accumulator,
}
current_messages.append(assistant_with_tools)
await sync_to_async(
ChatMessage.objects.create, thread_sensitive=True
)(
conversation=conversation,
role="assistant",
content=assistant_content,
tool_calls=tool_calls_accumulator,
)
await sync_to_async(conversation.save, thread_sensitive=True)(
update_fields=["updated_at"]
)
for tool_call in tool_calls_accumulator:
function_payload = tool_call.get("function") or {}
function_name = function_payload.get("name") or ""
raw_arguments = function_payload.get("arguments") or "{}"
try:
arguments = json.loads(raw_arguments)
except json.JSONDecodeError:
arguments = {}
if not isinstance(arguments, dict):
arguments = {}
result = await sync_to_async(
execute_tool, thread_sensitive=True
)(
function_name,
request.user,
**arguments,
)
result_content = serialize_tool_result(result)
current_messages.append(
{
"role": "tool",
"tool_call_id": tool_call.get("id"),
"name": function_name,
"content": result_content,
}
)
await sync_to_async(
ChatMessage.objects.create, thread_sensitive=True
)(
conversation=conversation,
role="tool",
content=result_content,
tool_call_id=tool_call.get("id"),
name=function_name,
)
await sync_to_async(conversation.save, thread_sensitive=True)(
update_fields=["updated_at"]
)
tool_event = {
"tool_result": {
"tool_call_id": tool_call.get("id"),
"name": function_name,
"result": result,
}
}
yield f"data: {json.dumps(tool_event)}\n\n"
continue
await sync_to_async(ChatMessage.objects.create, thread_sensitive=True)(
conversation=conversation,
role="assistant",
content=assistant_content,
)
await sync_to_async(conversation.save, thread_sensitive=True)(
update_fields=["updated_at"]
)
yield "data: [DONE]\n\n"
break
response = StreamingHttpResponse(
streaming_content=self._async_to_sync_generator(event_stream()),
content_type="text/event-stream",
)
response["Cache-Control"] = "no-cache"
response["X-Accel-Buffering"] = "no"
return response
class ChatProviderCatalogViewSet(viewsets.ViewSet):
permission_classes = [IsAuthenticated]
def list(self, request):
return Response(get_provider_catalog())

View File

@@ -163,6 +163,41 @@ class ChatViewSet(viewsets.ModelViewSet):
except Collection.DoesNotExist: except Collection.DoesNotExist:
pass pass
if collection:
itinerary_stops = []
seen_stops = set()
for location in collection.locations.select_related(
"city", "country"
).all():
city_name = (getattr(location.city, "name", "") or "").strip()
country_name = (getattr(location.country, "name", "") or "").strip()
if city_name or country_name:
stop_label = (
f"{city_name}, {country_name}"
if city_name and country_name
else city_name or country_name
)
stop_key = f"geo:{city_name.lower()}|{country_name.lower()}"
else:
fallback_name = (location.location or location.name or "").strip()
if not fallback_name:
continue
stop_label = fallback_name
stop_key = f"name:{fallback_name.lower()}"
if stop_key in seen_stops:
continue
seen_stops.add(stop_key)
itinerary_stops.append(stop_label)
if len(itinerary_stops) >= 8:
break
if itinerary_stops:
context_parts.append(f"Itinerary stops: {'; '.join(itinerary_stops)}")
system_prompt = get_system_prompt(request.user, collection) system_prompt = get_system_prompt(request.user, collection)
if context_parts: if context_parts:
system_prompt += "\n\n## Trip Context\n" + "\n".join(context_parts) system_prompt += "\n\n## Trip Context\n" + "\n".join(context_parts)
@@ -338,7 +373,7 @@ class ChatProviderCatalogViewSet(viewsets.ViewSet):
@action(detail=True, methods=["get"]) @action(detail=True, methods=["get"])
def models(self, request, pk=None): def models(self, request, pk=None):
"""Fetch available models from a provider's API.""" """Fetch available models from a provider's API."""
from chat.llm_client import get_llm_api_key from chat.llm_client import CHAT_PROVIDER_CONFIG, get_llm_api_key
provider = (pk or "").lower() provider = (pk or "").lower()
@@ -414,8 +449,38 @@ class ChatProviderCatalogViewSet(viewsets.ViewSet):
pass pass
return Response({"models": []}) return Response({"models": []})
if provider in ["opencode_zen"]: if provider == "opencode_zen":
return Response({"models": ["openai/gpt-5-nano"]}) import requests
config = CHAT_PROVIDER_CONFIG.get("opencode_zen", {})
api_base = config.get("api_base", "https://opencode.ai/zen/v1")
response = requests.get(
f"{api_base}/models",
headers={"Authorization": f"Bearer {api_key}"},
timeout=10,
)
if response.ok:
data = response.json()
raw_models = (
data.get("data", data) if isinstance(data, dict) else data
)
model_ids = []
for model_entry in raw_models:
if not isinstance(model_entry, dict):
continue
model_id = model_entry.get("id") or model_entry.get("model_id")
if model_id:
model_ids.append(model_id)
return Response({"models": sorted(set(model_ids))})
logger.warning(
"OpenCode Zen models fetch failed with status %s",
response.status_code,
)
return Response({"models": []})
return Response({"models": []}) return Response({"models": []})
except Exception as exc: except Exception as exc:

View File

@@ -26,6 +26,8 @@ The term "Location" is now used instead of "Adventure" - the usage remains the s
The AI travel chat is embedded in the **Collections → Recommendations** view. Select a collection, switch to the Recommendations tab, and use the chat to brainstorm destinations, ask for travel advice, or get location suggestions. The chat supports multiple LLM providers — configure your API key in **Settings → API Keys** and pick a provider from the dropdown in the chat interface. The provider list is loaded dynamically from the backend, so any provider supported by LiteLLM (plus OpenCode Zen) is available. The AI travel chat is embedded in the **Collections → Recommendations** view. Select a collection, switch to the Recommendations tab, and use the chat to brainstorm destinations, ask for travel advice, or get location suggestions. The chat supports multiple LLM providers — configure your API key in **Settings → API Keys** and pick a provider from the dropdown in the chat interface. The provider list is loaded dynamically from the backend, so any provider supported by LiteLLM (plus OpenCode Zen) is available.
You can also override the default model for any provider by typing a model name in the **Model** input next to the provider selector (e.g. `openai/gpt-5-nano`). Your model choice is saved per-provider in the browser. If the model field is left empty, the provider's default model is used. Provider errors (authentication, model not found, rate limits) are displayed as clear, actionable messages in the chat.
#### Collections #### Collections
- **Collection**: a collection is a way to group locations together. Collections are flexible and can be used in many ways. When no start or end date is added to a collection, it acts like a folder to group locations together. When a start and end date is added to a collection, it acts like a trip to group locations together that were visited during that time period. With start and end dates, the collection is transformed into a full itinerary with a timeline-style day view — each day displays numbered stops as compact cards (without image banners), connector rows between consecutive locations showing distance and travel time via OSRM routing (walking if ≤ 20 min, driving otherwise) with automatic haversine fallback when OSRM is unavailable, and a single `+ Add` control for inserting new places. Lodging placement follows directional rules: on check-in day it appears after the last stop, on check-out day it appears before the first stop, and on days with no locations a single lodging card is shown (or two cards when a checkout and checkin are different lodgings). Connector rows link lodging to adjacent locations. Day-level quick actions include Auto-fill (populates an empty itinerary from dated records) and Optimize (nearest-neighbor route ordering for coordinate-backed stops). The day date pill displays a weather temperature summary when available, with graceful fallback if weather data is unavailable. The itinerary also includes a map showing the route taken between locations. Your most recently updated collections also appear on the dashboard. For example, you could have a collection for a trip to Europe with dates so you can plan where you want to visit, a collection of local hiking trails, or a collection for a list of restaurants you want to try. - **Collection**: a collection is a way to group locations together. Collections are flexible and can be used in many ways. When no start or end date is added to a collection, it acts like a folder to group locations together. When a start and end date is added to a collection, it acts like a trip to group locations together that were visited during that time period. With start and end dates, the collection is transformed into a full itinerary with a timeline-style day view — each day displays numbered stops as compact cards (without image banners), connector rows between consecutive locations showing distance and travel time via OSRM routing (walking if ≤ 20 min, driving otherwise) with automatic haversine fallback when OSRM is unavailable, and a single `+ Add` control for inserting new places. Lodging placement follows directional rules: on check-in day it appears after the last stop, on check-out day it appears before the first stop, and on days with no locations a single lodging card is shown (or two cards when a checkout and checkin are different lodgings). Connector rows link lodging to adjacent locations. Day-level quick actions include Auto-fill (populates an empty itinerary from dated records) and Optimize (nearest-neighbor route ordering for coordinate-backed stops). The day date pill displays a weather temperature summary when available, with graceful fallback if weather data is unavailable. The itinerary also includes a map showing the route taken between locations. Your most recently updated collections also appear on the dashboard. For example, you could have a collection for a trip to Europe with dates so you can plan where you want to visit, a collection of local hiking trails, or a collection for a list of restaurants you want to try.

View File

@@ -69,6 +69,7 @@
const MODEL_PREFS_STORAGE_KEY = 'voyage_chat_model_prefs'; const MODEL_PREFS_STORAGE_KEY = 'voyage_chat_model_prefs';
let initializedModelProvider = ''; let initializedModelProvider = '';
$: promptTripContext = collectionName || destination || '';
onMount(async () => { onMount(async () => {
await Promise.all([loadConversations(), loadProviderCatalog()]); await Promise.all([loadConversations(), loadProviderCatalog()]);
@@ -374,7 +375,7 @@
result.name === 'search_places' && result.name === 'search_places' &&
typeof result.result === 'object' && typeof result.result === 'object' &&
result.result !== null && result.result !== null &&
Array.isArray((result.result as { places?: unknown[] }).places) Array.isArray((result.result as { results?: unknown[] }).results)
); );
} }
@@ -383,7 +384,7 @@
return []; return [];
} }
return (result.result as { places: any[] }).places; return (result.result as { results: any[] }).results;
} }
function hasWebSearchResults(result: ToolResultEntry): boolean { function hasWebSearchResults(result: ToolResultEntry): boolean {
@@ -764,11 +765,13 @@
<div class="p-4 border-t border-base-300"> <div class="p-4 border-t border-base-300">
<div class="max-w-4xl mx-auto"> <div class="max-w-4xl mx-auto">
<div class="flex flex-wrap gap-2 mb-3"> <div class="flex flex-wrap gap-2 mb-3">
{#if destination} {#if promptTripContext}
<button <button
class="btn btn-sm btn-ghost" class="btn btn-sm btn-ghost"
on:click={() => on:click={() =>
sendPresetMessage(`What are the best restaurants in ${destination}?`)} sendPresetMessage(
`What are the best restaurants to include across my ${promptTripContext} itinerary?`
)}
disabled={isStreaming || chatProviders.length === 0} disabled={isStreaming || chatProviders.length === 0}
> >
🍽️ Restaurants 🍽️ Restaurants
@@ -776,7 +779,9 @@
<button <button
class="btn btn-sm btn-ghost" class="btn btn-sm btn-ghost"
on:click={() => on:click={() =>
sendPresetMessage(`What activities can I do in ${destination}?`)} sendPresetMessage(
`What activities should I plan across my ${promptTripContext} itinerary?`
)}
disabled={isStreaming || chatProviders.length === 0} disabled={isStreaming || chatProviders.length === 0}
> >
🎯 Activities 🎯 Activities

View File

@@ -344,11 +344,7 @@
{$t('adventures.start_date')} {$t('adventures.start_date')}
</span> </span>
</label> </label>
<DateInput <DateInput id="start_date" name="start_date" bind:value={collection.start_date} />
id="start_date"
name="start_date"
bind:value={collection.start_date}
/>
</div> </div>
<!-- End Date --> <!-- End Date -->
@@ -359,11 +355,7 @@
{$t('adventures.end_date')} {$t('adventures.end_date')}
</span> </span>
</label> </label>
<DateInput <DateInput id="end_date" name="end_date" bind:value={collection.end_date} />
id="end_date"
name="end_date"
bind:value={collection.end_date}
/>
</div> </div>
<!-- Public Toggle --> <!-- Public Toggle -->

View File

@@ -45,8 +45,15 @@
disabled={disabled || readonly} disabled={disabled || readonly}
> >
<span class={displayDate ? '' : 'opacity-40'}>{displayDate || 'DD/MM/YYYY'}</span> <span class={displayDate ? '' : 'opacity-40'}>{displayDate || 'DD/MM/YYYY'}</span>
<svg xmlns="http://www.w3.org/2000/svg" class="h-4 w-4 opacity-60" viewBox="0 0 24 24" fill="currentColor"> <svg
<path d="M19 3h-1V1h-2v2H8V1H6v2H5c-1.1 0-2 .9-2 2v16c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm0 18H5V8h14v13zM7 10h5v5H7z"/> xmlns="http://www.w3.org/2000/svg"
class="h-4 w-4 opacity-60"
viewBox="0 0 24 24"
fill="currentColor"
>
<path
d="M19 3h-1V1h-2v2H8V1H6v2H5c-1.1 0-2 .9-2 2v16c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm0 18H5V8h14v13zM7 10h5v5H7z"
/>
</svg> </svg>
</button> </button>
<input <input

View File

@@ -49,8 +49,15 @@
disabled={disabled || readonly} disabled={disabled || readonly}
> >
<span class={displayDateTime ? '' : 'opacity-40'}>{displayDateTime || 'DD/MM/YYYY HH:MM'}</span> <span class={displayDateTime ? '' : 'opacity-40'}>{displayDateTime || 'DD/MM/YYYY HH:MM'}</span>
<svg xmlns="http://www.w3.org/2000/svg" class="h-4 w-4 opacity-60" viewBox="0 0 24 24" fill="currentColor"> <svg
<path d="M19 3h-1V1h-2v2H8V1H6v2H5c-1.1 0-2 .9-2 2v16c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm0 18H5V8h14v13zM7 10h5v5H7z"/> xmlns="http://www.w3.org/2000/svg"
class="h-4 w-4 opacity-60"
viewBox="0 0 24 24"
fill="currentColor"
>
<path
d="M19 3h-1V1h-2v2H8V1H6v2H5c-1.1 0-2 .9-2 2v16c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm0 18H5V8h14v13zM7 10h5v5H7z"
/>
</svg> </svg>
</button> </button>
<input <input

View File

@@ -353,8 +353,12 @@
> >
<div class="space-y-1"> <div class="space-y-1">
{#each compactStayMeta as stayMeta} {#each compactStayMeta as stayMeta}
<div class="grid grid-cols-[2.25rem_minmax(0,1fr)] items-baseline gap-1 leading-tight"> <div
<div class="text-[9px] font-medium uppercase tracking-[0.14em] text-base-content/50"> class="grid grid-cols-[2.25rem_minmax(0,1fr)] items-baseline gap-1 leading-tight"
>
<div
class="text-[9px] font-medium uppercase tracking-[0.14em] text-base-content/50"
>
{stayMeta.label} {stayMeta.label}
</div> </div>
<div <div

View File

@@ -120,8 +120,7 @@
{#if note.date && note.date !== ''} {#if note.date && note.date !== ''}
<div class="flex items-center gap-2"> <div class="flex items-center gap-2">
<Calendar class="w-4 h-4 text-primary" /> <Calendar class="w-4 h-4 text-primary" />
<span>{new Date(note.date).toLocaleDateString('en-GB', { timeZone: 'UTC' })}</span <span>{new Date(note.date).toLocaleDateString('en-GB', { timeZone: 'UTC' })}</span>
>
</div> </div>
{/if} {/if}
{#if note.links && note.links?.length > 0} {#if note.links && note.links?.length > 0}

View File

@@ -359,7 +359,9 @@
<div <div
class="badge badge-sm p-1 rounded-full text-base-content shadow-sm" class="badge badge-sm p-1 rounded-full text-base-content shadow-sm"
role="img" role="img"
aria-label={transportation.is_public ? $t('adventures.public') : $t('adventures.private')} aria-label={transportation.is_public
? $t('adventures.public')
: $t('adventures.private')}
> >
{#if transportation.is_public} {#if transportation.is_public}
<Eye class="w-4 h-4" /> <Eye class="w-4 h-4" />
@@ -516,7 +518,6 @@
{#if travelDurationLabel} {#if travelDurationLabel}
<span class="badge badge-ghost badge-sm">⏱️ {travelDurationLabel}</span> <span class="badge badge-ghost badge-sm">⏱️ {travelDurationLabel}</span>
{/if} {/if}
</div> </div>
</div> </div>
</div> </div>

View File

@@ -354,7 +354,6 @@
} }
}} }}
/> />
</div> </div>
<!-- Right Column --> <!-- Right Column -->

View File

@@ -710,7 +710,6 @@
</div> </div>
</div> </div>
{/if} {/if}
</div> </div>
<!-- Right Column --> <!-- Right Column -->

View File

@@ -52,8 +52,7 @@
class="link link-primary" class="link link-primary"
target="_blank" target="_blank"
rel="noopener noreferrer" rel="noopener noreferrer"
href="https://github.com/Alex-Wiesner/voyage" href="https://github.com/Alex-Wiesner/voyage">documentation</a
>documentation</a
>. >.
</p> </p>
</div> </div>

View File

@@ -28,7 +28,7 @@
import FolderMultiple from '~icons/mdi/folder-multiple'; import FolderMultiple from '~icons/mdi/folder-multiple';
import FormatListBulleted from '~icons/mdi/format-list-bulleted'; import FormatListBulleted from '~icons/mdi/format-list-bulleted';
import Timeline from '~icons/mdi/timeline'; import Timeline from '~icons/mdi/timeline';
import Map from '~icons/mdi/map'; import MapIcon from '~icons/mdi/map';
import Lightbulb from '~icons/mdi/lightbulb'; import Lightbulb from '~icons/mdi/lightbulb';
import ChartBar from '~icons/mdi/chart-bar'; import ChartBar from '~icons/mdi/chart-bar';
import Plus from '~icons/mdi/plus'; import Plus from '~icons/mdi/plus';
@@ -261,20 +261,43 @@
return undefined; return undefined;
} }
const firstLocation = current.locations.find((loc) => const maxStops = 4;
Boolean(loc.city?.name || loc.country?.name || loc.location || loc.name) const stops: string[] = [];
); const seen = new Set<string>();
if (!firstLocation) {
for (const loc of current.locations) {
const cityName = loc.city?.name?.trim();
const countryName = loc.country?.name?.trim();
if (cityName || countryName) {
const label =
cityName && countryName ? `${cityName}, ${countryName}` : cityName || countryName;
if (!label) continue;
const key = `geo:${(cityName || '').toLowerCase()}|${(countryName || '').toLowerCase()}`;
if (seen.has(key)) continue;
seen.add(key);
stops.push(label);
continue;
}
const fallbackName = (loc.location || loc.name || '').trim();
if (!fallbackName) continue;
const key = `name:${fallbackName.toLowerCase()}`;
if (seen.has(key)) continue;
seen.add(key);
stops.push(fallbackName);
}
if (stops.length === 0) {
return undefined; return undefined;
} }
const cityName = firstLocation.city?.name; const summarizedStops = stops.slice(0, maxStops).join('; ');
const countryName = firstLocation.country?.name; if (stops.length > maxStops) {
if (cityName && countryName) { return `${summarizedStops}; +${stops.length - maxStops} more`;
return `${cityName}, ${countryName}`;
} }
return cityName || countryName || firstLocation.location || firstLocation.name || undefined; return summarizedStops;
} }
$: collectionDestination = deriveCollectionDestination(collection); $: collectionDestination = deriveCollectionDestination(collection);
@@ -1138,7 +1161,7 @@
class:btn-active={currentView === 'map'} class:btn-active={currentView === 'map'}
on:click={() => switchView('map')} on:click={() => switchView('map')}
> >
<Map class="w-5 h-5 sm:mr-2" aria-hidden="true" /> <MapIcon class="w-5 h-5 sm:mr-2" aria-hidden="true" />
<span class="hidden sm:inline">{$t('navbar.map')}</span> <span class="hidden sm:inline">{$t('navbar.map')}</span>
</button> </button>
{/if} {/if}

View File

@@ -513,7 +513,8 @@
<strong>{$t('adventures.start')}:</strong> <strong>{$t('adventures.start')}:</strong>
{DateTime.fromISO(visit.start_date, { zone: 'utc' }) {DateTime.fromISO(visit.start_date, { zone: 'utc' })
.setZone(visit.timezone) .setZone(visit.timezone)
.toLocaleString(DateTime.DATETIME_MED, { locale: 'en-GB' })}<br /> .toLocaleString(DateTime.DATETIME_MED, { locale: 'en-GB' })}<br
/>
<strong>{$t('adventures.end')}:</strong> <strong>{$t('adventures.end')}:</strong>
{DateTime.fromISO(visit.end_date, { zone: 'utc' }) {DateTime.fromISO(visit.end_date, { zone: 'utc' })
.setZone(visit.timezone) .setZone(visit.timezone)