feat: Introduce LLM response caching and streaming, add application configuration, and enhance session data with progress and history tracking.
This commit is contained in:
@@ -20,7 +20,7 @@ class LLMConfig:
|
||||
provider: str = os.environ.get("LLM_PROVIDER", "openai") # openai, gemini, etc.
|
||||
api_key: str = os.environ.get("OPENAI_API_KEY", "sk-2187174de21548b0b8b0c92129700199")
|
||||
base_url: str = os.environ.get("OPENAI_BASE_URL", "http://127.0.0.1:9999/v1")
|
||||
model: str = os.environ.get("OPENAI_MODEL", "gemini-3-flash")
|
||||
model: str = os.environ.get("OPENAI_MODEL", "gemini--flash")
|
||||
temperature: float = 0.5
|
||||
max_tokens: int = 131072
|
||||
|
||||
|
||||
Reference in New Issue
Block a user