World Travel Improvements (#925)

* Security Patch Django 5.2.8

* Fix Menus on Safari Browser

* Enhance touch support and event handling for emoji picker and dropdown

* Add touch and pointer event handling to category selection for better mobile support

* Add PWA support for iOS/Safari with touch icons

* Refactor event listener for dropdown to use non-capturing 'click' for improved compatibility on Safari

* Enhance country and region description fetching from Wikipedia

- Refactor `generate_description_view.py` to improve candidate page selection and description retrieval.
- Update `CategoryDropdown.svelte` to simplify emoji selection handling and improve dropdown behavior.
- Add new translation keys in `en.json` for UI elements related to country descriptions.
- Modify `+page.svelte` and `+page.server.ts` in world travel routes to fetch and display country and region descriptions.
- Implement a toggle for showing full descriptions in the UI.

* Update Unraid installation documentation with improved variable formatting and additional resources

* Implement cache invalidation for visited regions and cities to ensure updated visit lists

* Add ClusterMap component for enhanced geographical data visualization
This commit is contained in:
Sean Morley
2025-12-07 11:46:44 -05:00
committed by GitHub
parent 5d799ceacc
commit 037b45fc17
17 changed files with 998 additions and 240 deletions

View File

@@ -1,21 +1,31 @@
import logging
import re
import urllib.parse
from difflib import SequenceMatcher
import requests
from django.conf import settings
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
import requests
from django.conf import settings
import urllib.parse
import logging
logger = logging.getLogger(__name__)
class GenerateDescription(viewsets.ViewSet):
permission_classes = [IsAuthenticated]
# User-Agent header required by Wikipedia API
HEADERS = {
# User-Agent header required by Wikipedia API, Accept-Language patched in per request
BASE_HEADERS = {
'User-Agent': f'AdventureLog/{getattr(settings, "ADVENTURELOG_RELEASE_VERSION", "unknown")}'
}
DEFAULT_LANGUAGE = "en"
LANGUAGE_PATTERN = re.compile(r"^[a-z0-9-]{2,12}$", re.IGNORECASE)
MAX_CANDIDATES = 10 # Increased to find better matches
# Accepted image formats (no SVG)
ACCEPTED_IMAGE_FORMATS = {'.jpg', '.jpeg', '.png', '.webp', '.gif'}
MIN_DESCRIPTION_LENGTH = 50 # Minimum characters for a valid description
@action(detail=False, methods=['get'])
def desc(self, request):
@@ -23,42 +33,48 @@ class GenerateDescription(viewsets.ViewSet):
if not name:
return Response({"error": "Name parameter is required"}, status=400)
# Properly URL decode the name
name = urllib.parse.unquote(name)
search_term = self.get_search_term(name)
if not search_term:
return Response({"error": "No matching Wikipedia article found"}, status=404)
# Properly URL encode the search term for the API
encoded_term = urllib.parse.quote(search_term)
url = f'https://en.wikipedia.org/w/api.php?origin=*&action=query&prop=extracts&exintro&explaintext&format=json&titles={encoded_term}'
name = urllib.parse.unquote(name).strip()
if not name:
return Response({"error": "Name parameter is required"}, status=400)
lang = self.get_language(request)
try:
response = requests.get(url, headers=self.HEADERS, timeout=10)
response.raise_for_status()
data = response.json()
candidates = self.get_candidate_pages(name, lang)
pages = data.get("query", {}).get("pages", {})
if not pages:
return Response({"error": "No page data found"}, status=404)
page_id = next(iter(pages))
page_data = pages[page_id]
# Check if page exists (page_id of -1 means page doesn't exist)
if page_id == "-1":
return Response({"error": "Wikipedia page not found"}, status=404)
if not page_data.get('extract'):
return Response({"error": "No description found"}, status=404)
return Response(page_data)
except requests.exceptions.RequestException as e:
for candidate in candidates:
page_data = self.fetch_page(
lang=lang,
candidate=candidate,
props='extracts|categories',
extra_params={'exintro': 1, 'explaintext': 1}
)
if not page_data or page_data.get('missing'):
continue
# Check if this is a disambiguation page
if self.is_disambiguation_page(page_data):
continue
extract = (page_data.get('extract') or '').strip()
# Filter out pages with very short descriptions
if len(extract) < self.MIN_DESCRIPTION_LENGTH:
continue
# Filter out list/index pages
if self.is_list_or_index_page(page_data):
continue
page_data['lang'] = lang
return Response(page_data)
return Response({"error": "No description found"}, status=404)
except requests.exceptions.RequestException:
logger.exception("Failed to fetch data from Wikipedia")
return Response({"error": "Failed to fetch data from Wikipedia."}, status=500)
except ValueError as e: # JSON decode error
except ValueError:
return Response({"error": "Invalid response from Wikipedia API"}, status=500)
@action(detail=False, methods=['get'])
@@ -67,73 +83,270 @@ class GenerateDescription(viewsets.ViewSet):
if not name:
return Response({"error": "Name parameter is required"}, status=400)
# Properly URL decode the name
name = urllib.parse.unquote(name)
search_term = self.get_search_term(name)
if not search_term:
return Response({"error": "No matching Wikipedia article found"}, status=404)
# Properly URL encode the search term for the API
encoded_term = urllib.parse.quote(search_term)
url = f'https://en.wikipedia.org/w/api.php?origin=*&action=query&prop=pageimages&format=json&piprop=original&titles={encoded_term}'
name = urllib.parse.unquote(name).strip()
if not name:
return Response({"error": "Name parameter is required"}, status=400)
lang = self.get_language(request)
try:
response = requests.get(url, headers=self.HEADERS, timeout=10)
response.raise_for_status()
data = response.json()
candidates = self.get_candidate_pages(name, lang)
pages = data.get("query", {}).get("pages", {})
if not pages:
return Response({"error": "No page data found"}, status=404)
page_id = next(iter(pages))
page_data = pages[page_id]
# Check if page exists
if page_id == "-1":
return Response({"error": "Wikipedia page not found"}, status=404)
original_image = page_data.get('original')
if not original_image:
return Response({"error": "No image found"}, status=404)
return Response(original_image)
except requests.exceptions.RequestException as e:
for candidate in candidates:
page_data = self.fetch_page(
lang=lang,
candidate=candidate,
props='pageimages|categories',
extra_params={'piprop': 'original|thumbnail', 'pithumbsize': 640}
)
if not page_data or page_data.get('missing'):
continue
# Skip disambiguation pages
if self.is_disambiguation_page(page_data):
continue
# Skip list/index pages
if self.is_list_or_index_page(page_data):
continue
# Try original image first
original_image = page_data.get('original')
if original_image and self.is_valid_image(original_image.get('source')):
return Response(original_image)
# Fall back to thumbnail
thumbnail_image = page_data.get('thumbnail')
if thumbnail_image and self.is_valid_image(thumbnail_image.get('source')):
return Response(thumbnail_image)
return Response({"error": "No image found"}, status=404)
except requests.exceptions.RequestException:
logger.exception("Failed to fetch data from Wikipedia")
return Response({"error": "Failed to fetch data from Wikipedia."}, status=500)
except ValueError as e: # JSON decode error
except ValueError:
return Response({"error": "Invalid response from Wikipedia API"}, status=500)
def get_search_term(self, term):
def is_valid_image(self, image_url):
"""Check if image URL is valid and not an SVG"""
if not image_url:
return False
url_lower = image_url.lower()
# Reject SVG images
if '.svg' in url_lower:
return False
# Accept only specific image formats
return any(url_lower.endswith(fmt) or fmt in url_lower for fmt in self.ACCEPTED_IMAGE_FORMATS)
def is_disambiguation_page(self, page_data):
"""Check if page is a disambiguation page"""
categories = page_data.get('categories', [])
for cat in categories:
cat_title = cat.get('title', '').lower()
if 'disambiguation' in cat_title or 'disambig' in cat_title:
return True
# Check title for disambiguation indicators
title = page_data.get('title', '').lower()
if '(disambiguation)' in title:
return True
return False
def is_list_or_index_page(self, page_data):
"""Check if page is a list or index page"""
title = page_data.get('title', '').lower()
# Common patterns for list/index pages
list_patterns = [
'list of',
'index of',
'timeline of',
'glossary of',
'outline of'
]
return any(pattern in title for pattern in list_patterns)
def get_candidate_pages(self, term, lang):
"""Get and rank candidate pages from Wikipedia search"""
if not term:
return None
# Properly URL encode the search term
encoded_term = urllib.parse.quote(term)
url = f'https://en.wikipedia.org/w/api.php?action=opensearch&search={encoded_term}&limit=10&namespace=0&format=json'
return []
url = self.build_api_url(lang)
params = {
'origin': '*',
'action': 'query',
'format': 'json',
'list': 'search',
'srsearch': term,
'srlimit': self.MAX_CANDIDATES,
'srwhat': 'text',
'utf8': 1,
}
response = requests.get(url, headers=self.get_headers(lang), params=params, timeout=10)
response.raise_for_status()
try:
response = requests.get(url, headers=self.HEADERS, timeout=10)
response.raise_for_status()
# Check if response is empty
if not response.text.strip():
return None
data = response.json()
except ValueError:
logger.warning("Invalid response while searching Wikipedia for '%s'", term)
return [{'title': term, 'pageid': None}]
search_results = data.get('query', {}).get('search', [])
if not search_results:
return [{'title': term, 'pageid': None}]
normalized = term.lower()
ranked_results = []
for result in search_results:
title = (result.get('title') or '').strip()
if not title:
continue
# OpenSearch API returns an array with 4 elements:
# [search_term, [titles], [descriptions], [urls]]
if len(data) >= 2 and data[1] and len(data[1]) > 0:
return data[1][0] # Return the first title match
title_lower = title.lower()
# Calculate multiple similarity metrics
similarity = SequenceMatcher(None, normalized, title_lower).ratio()
# Boost score for exact matches
exact_match = int(title_lower == normalized)
# Boost score for titles that start with the search term
starts_with = int(title_lower.startswith(normalized))
# Penalize disambiguation pages
is_disambig = int('disambiguation' in title_lower or '(disambig' in title_lower)
# Penalize list/index pages
is_list = int(any(p in title_lower for p in ['list of', 'index of', 'timeline of']))
score = result.get('score') or 0
ranked_results.append({
'title': title,
'pageid': result.get('pageid'),
'exact': exact_match,
'starts_with': starts_with,
'similarity': similarity,
'score': score,
'is_disambig': is_disambig,
'is_list': is_list
})
if not ranked_results:
return [{'title': term, 'pageid': None}]
# Sort by: exact match > starts with > not disambiguation > not list > similarity > search score
ranked_results.sort(
key=lambda e: (
e['exact'],
e['starts_with'],
-e['is_disambig'],
-e['is_list'],
e['similarity'],
e['score']
),
reverse=True
)
candidates = []
seen_titles = set()
for entry in ranked_results:
title_key = entry['title'].lower()
if title_key in seen_titles:
continue
seen_titles.add(title_key)
candidates.append({'title': entry['title'], 'pageid': entry['pageid']})
if len(candidates) >= self.MAX_CANDIDATES:
break
# Add original term as fallback if not already included
if normalized not in seen_titles:
candidates.append({'title': term, 'pageid': None})
return candidates
def fetch_page(self, *, lang, candidate, props, extra_params=None):
"""Fetch page data from Wikipedia API"""
if not candidate or not candidate.get('title'):
return None
except requests.exceptions.RequestException:
# If search fails, return the original term as fallback
return term
except ValueError: # JSON decode error
# If JSON parsing fails, return the original term as fallback
return term
params = {
'origin': '*',
'action': 'query',
'format': 'json',
'prop': props,
}
page_id = candidate.get('pageid')
if page_id:
params['pageids'] = page_id
else:
params['titles'] = candidate['title']
if extra_params:
params.update(extra_params)
response = requests.get(
self.build_api_url(lang),
headers=self.get_headers(lang),
params=params,
timeout=10
)
response.raise_for_status()
try:
data = response.json()
except ValueError:
logger.warning("Invalid response while fetching Wikipedia page '%s'", candidate['title'])
return None
pages = data.get('query', {}).get('pages', {})
if not pages:
return None
if page_id is not None:
page_data = pages.get(str(page_id))
if page_data:
page_data.setdefault('title', candidate['title'])
return page_data
page_data = next(iter(pages.values()))
if page_data:
page_data.setdefault('title', candidate['title'])
return page_data
def get_language(self, request):
"""Extract and validate language parameter"""
candidate = request.query_params.get('lang')
if not candidate:
candidate = self.DEFAULT_LANGUAGE
if not candidate:
candidate = 'en'
normalized = candidate.replace('_', '-').lower()
if self.LANGUAGE_PATTERN.match(normalized):
return normalized
return 'en'
def get_headers(self, lang):
"""Build headers for Wikipedia API request"""
headers = dict(self.BASE_HEADERS)
headers['Accept-Language'] = lang
headers['Accept'] = 'application/json'
return headers
def build_api_url(self, lang):
"""Build Wikipedia API URL for given language"""
subdomain = lang.split('-', 1)[0]
return f'https://{subdomain}.wikipedia.org/w/api.php'

View File

@@ -14,6 +14,26 @@ from adventures.models import Location
# Cache TTL
CACHE_TTL = 60 * 60 * 24 # 1 day
def invalidate_visit_caches_for_region_and_user(region, user):
"""Invalidate cached visit lists for a given region and user.
Removes both the per-region and per-country per-user cache keys so
UI calls will refetch updated visited lists.
"""
try:
if region is None or user is None:
return
# per-region cache
cache.delete(f"visits_by_region_{region.id}_{user.id}")
# per-country cache (region -> country -> country_code)
country_code = getattr(region.country, 'country_code', None)
if country_code:
cache.delete(f"visits_by_country_{country_code}_{user.id}")
except Exception:
# Avoid raising cache-related exceptions; best-effort invalidation
pass
@cache_page(CACHE_TTL)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
@@ -138,13 +158,22 @@ class VisitedRegionViewSet(viewsets.ModelViewSet):
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
# Invalidate caches for this region and its country for the user
try:
region = serializer.validated_data.get('region')
invalidate_visit_caches_for_region_and_user(region, request.user)
except Exception:
pass
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def destroy(self, request, **kwargs):
region = get_object_or_404(Region, id=kwargs['pk'])
visited_region = VisitedRegion.objects.filter(user=request.user.id, region=region)
if visited_region.exists():
# capture region before deleting so we can invalidate caches
affected_region = visited_region.first().region
visited_region.delete()
invalidate_visit_caches_for_region_and_user(affected_region, request.user)
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response({"error": "Visited region not found."}, status=status.HTTP_404_NOT_FOUND)
@@ -164,9 +193,14 @@ class VisitedCityViewSet(viewsets.ModelViewSet):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
# Ensure a VisitedRegion exists for the city and invalidate caches
region = serializer.validated_data['city'].region
if not VisitedRegion.objects.filter(user=request.user.id, region=region).exists():
VisitedRegion.objects.create(user=request.user, region=region)
try:
invalidate_visit_caches_for_region_and_user(region, request.user)
except Exception:
pass
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@@ -174,7 +208,9 @@ class VisitedCityViewSet(viewsets.ModelViewSet):
city = get_object_or_404(City, id=kwargs['pk'])
visited_city = VisitedCity.objects.filter(user=request.user.id, city=city)
if visited_city.exists():
region = city.region
visited_city.delete()
invalidate_visit_caches_for_region_and_user(region, request.user)
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response({"error": "Visited city not found."}, status=status.HTTP_404_NOT_FOUND)