feat: Introduce LLM response caching and streaming, add application configuration, and enhance session data with progress and history tracking.

This commit is contained in:
2026-01-24 12:52:35 +08:00
parent 162f5c4da4
commit fbbb5a2470
10 changed files with 1015 additions and 4 deletions

View File

@@ -20,7 +20,7 @@ class LLMConfig:
provider: str = os.environ.get("LLM_PROVIDER", "openai") # openai, gemini, etc.
api_key: str = os.environ.get("OPENAI_API_KEY", "sk-Gce85QLROESeOWf3icd2mQnYHOrmMYojwVPQ0AubMjGQ5ZE2")
base_url: str = os.environ.get("OPENAI_BASE_URL", "https://gemini.jeason.online/v1")
model: str = os.environ.get("OPENAI_MODEL", "gemini-2.5-flash")
model: str = os.environ.get("OPENAI_MODEL", "gemini-2.5-pro")
temperature: float = 0.5
max_tokens: int = 131072