fix(chat): sanitize error responses and add tool kwargs allowlist
Prevent API key and sensitive info leakage through exception messages: - Replace str(exc) with generic error messages in all catch-all handlers - Add server-side exception logging via logger.exception() - Add ALLOWED_KWARGS per-tool allowlist to filter untrusted LLM kwargs - Bound tool execution loop to MAX_TOOL_ITERATIONS=10 - Fix tool_call delta merge to use tool_call index
This commit is contained in:
@@ -1,9 +1,12 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import litellm
|
||||
|
||||
from integrations.models import UserAPIKey
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
PROVIDER_MODELS = {
|
||||
"openai": "gpt-4o",
|
||||
"anthropic": "anthropic/claude-sonnet-4-20250514",
|
||||
@@ -138,5 +141,6 @@ async def stream_chat_completion(user, messages, provider, tools=None):
|
||||
yield f"data: {json.dumps(chunk_data)}\n\n"
|
||||
|
||||
yield "data: [DONE]\n\n"
|
||||
except Exception as exc:
|
||||
yield f"data: {json.dumps({'error': str(exc)})}\n\n"
|
||||
except Exception:
|
||||
logger.exception("LLM streaming error")
|
||||
yield f"data: {json.dumps({'error': 'An error occurred while processing your request. Please try again.'})}\n\n"
|
||||
|
||||
Reference in New Issue
Block a user