feat: Introduce LLM response caching and streaming, add application configuration, and enhance session data with progress and history tracking.
This commit is contained in:
@@ -20,7 +20,7 @@ class LLMConfig:
|
||||
provider: str = os.environ.get("LLM_PROVIDER", "openai") # openai, gemini, etc.
|
||||
api_key: str = os.environ.get("OPENAI_API_KEY", "sk-Gce85QLROESeOWf3icd2mQnYHOrmMYojwVPQ0AubMjGQ5ZE2")
|
||||
base_url: str = os.environ.get("OPENAI_BASE_URL", "https://gemini.jeason.online/v1")
|
||||
model: str = os.environ.get("OPENAI_MODEL", "gemini-2.5-flash")
|
||||
model: str = os.environ.get("OPENAI_MODEL", "gemini-2.5-pro")
|
||||
temperature: float = 0.5
|
||||
max_tokens: int = 131072
|
||||
|
||||
|
||||
Reference in New Issue
Block a user