150 lines
5.2 KiB
Plaintext
150 lines
5.2 KiB
Plaintext
# .env.example
|
||
# This file contains all the environment variables needed to run the application.
|
||
# Copy this file to .env and fill in the values for your environment.
|
||
|
||
# ============================================================================
|
||
# SERVER CONFIGURATION
|
||
# ============================================================================
|
||
# The host the web server will bind to.
|
||
SERVER_HOST=0.0.0.0
|
||
|
||
# The port for the main Flask web server.
|
||
SERVER_PORT=5001
|
||
|
||
# The port for the WebSocket server for real-time chat.
|
||
WEBSOCKET_PORT=8765
|
||
|
||
# Set to "True" for development to enable debug mode and auto-reloading.
|
||
# Set to "False" for production.
|
||
DEBUG_MODE=False
|
||
|
||
# Logging level for the application. Options: DEBUG, INFO, WARNING, ERROR, CRITICAL
|
||
LOG_LEVEL=INFO
|
||
|
||
|
||
# ============================================================================
|
||
# DATABASE CONFIGURATION
|
||
# ============================================================================
|
||
# The connection string for the primary database.
|
||
# Format for MySQL: mysql+pymysql://<user>:<password>@<host>:<port>/<dbname>?charset=utf8mb4
|
||
# Format for SQLite: sqlite:///./local_test.db
|
||
|
||
# 使用本地 SQLite(推荐用于开发和测试)
|
||
DATABASE_URL=sqlite:///./data/tsp_assistant.db
|
||
|
||
# 远程 MySQL(生产环境使用,需要时取消注释)
|
||
# DATABASE_URL=mysql+pymysql://tsp_assistant:123456@jeason.online/tsp_assistant?charset=utf8mb4
|
||
|
||
# ============================================================================
|
||
# LARGE LANGUAGE MODEL (LLM) CONFIGURATION
|
||
# ============================================================================
|
||
# The provider of the LLM. Supported: "qwen", "openai", "anthropic"
|
||
LLM_PROVIDER=qwen
|
||
|
||
# The API key for your chosen LLM provider.
|
||
LLM_API_KEY=sk-c0dbefa1718d46eaa897199135066f00
|
||
|
||
# The base URL for the LLM API. This is often needed for OpenAI-compatible endpoints.
|
||
LLM_BASE_URL=https://dashscope.aliyuncs.com/compatible-mode/v1
|
||
|
||
# The specific model to use, e.g., "qwen-plus-latest", "gpt-3.5-turbo", "claude-3-sonnet-20240229"
|
||
LLM_MODEL=qwen-plus-latest
|
||
|
||
# The temperature for the model's responses (0.0 to 2.0).
|
||
LLM_TEMPERATURE=0.7
|
||
|
||
# The maximum number of tokens to generate in a response.
|
||
LLM_MAX_TOKENS=2000
|
||
|
||
# The timeout in seconds for API calls to the LLM.
|
||
LLM_TIMEOUT=30
|
||
|
||
|
||
# ============================================================================
|
||
# FEISHU (LARK) INTEGRATION CONFIGURATION
|
||
# ============================================================================
|
||
# The App ID of your Feishu enterprise application.
|
||
FEISHU_APP_ID=cli_a8b50ec0eed1500d
|
||
|
||
# The App Secret of your Feishu enterprise application.
|
||
FEISHU_APP_SECRET=ccxkE7ZCFQZcwkkM1rLy0ccZRXYsT2xK
|
||
|
||
# The Verification Token for validating event callbacks (if configured).
|
||
FEISHU_VERIFICATION_TOKEN=
|
||
|
||
# The Encrypt Key for decrypting event data (if configured).
|
||
FEISHU_ENCRYPT_KEY=
|
||
|
||
# The App Token of the Feishu multi-dimensional table document.
|
||
FEISHU_APP_TOKEN=XXnEbiCmEaMblSs6FDJcFCqsnIg
|
||
|
||
# The ID of the Feishu multi-dimensional table for data synchronization.
|
||
FEISHU_TABLE_ID=tblnl3vJPpgMTSiP
|
||
|
||
|
||
# ============================================================================
|
||
# AI ACCURACY CONFIGURATION
|
||
# ============================================================================
|
||
# The similarity threshold (0.0 to 1.0) for auto-approving an AI suggestion.
|
||
AI_AUTO_APPROVE_THRESHOLD=0.95
|
||
|
||
# The similarity threshold below which the human-provided resolution is preferred.
|
||
AI_USE_HUMAN_RESOLUTION_THRESHOLD=0.90
|
||
|
||
# The similarity threshold for flagging a suggestion for manual review.
|
||
AI_MANUAL_REVIEW_THRESHOLD=0.80
|
||
|
||
# The default confidence score for an AI suggestion.
|
||
AI_SUGGESTION_CONFIDENCE=0.95
|
||
|
||
# The confidence score assigned when a human resolution is used.
|
||
AI_HUMAN_RESOLUTION_CONFIDENCE=0.90
|
||
|
||
|
||
# ============================================================================
|
||
# REDIS CACHE CONFIGURATION
|
||
# ============================================================================
|
||
# Redis server host (use localhost for local development)
|
||
REDIS_HOST=localhost
|
||
|
||
# Redis server port
|
||
REDIS_PORT=6379
|
||
|
||
# Redis database number (0-15)
|
||
REDIS_DB=0
|
||
|
||
# Redis password (leave empty if no password)
|
||
REDIS_PASSWORD=
|
||
|
||
# Redis connection pool size
|
||
REDIS_POOL_SIZE=10
|
||
|
||
# Redis cache default TTL in seconds (3600 = 1 hour)
|
||
REDIS_DEFAULT_TTL=3600
|
||
|
||
# Enable Redis cache (set to False to disable caching)
|
||
REDIS_ENABLED=True
|
||
|
||
|
||
# ============================================================================
|
||
# EMBEDDING CONFIGURATION (知识库向量检索 - 本地模型)
|
||
# ============================================================================
|
||
# 启用 Embedding 语义检索(禁用则降级为关键词匹配)
|
||
EMBEDDING_ENABLED=True
|
||
|
||
# 本地 embedding 模型名称(首次运行自动从 HuggingFace 下载)
|
||
# 推荐模型:
|
||
# BAAI/bge-small-zh-v1.5 (~95MB, 512维, 中文效果好, 内存占用~150MB)
|
||
# BAAI/bge-base-zh-v1.5 (~400MB, 768维, 中文效果更好)
|
||
# shibing624/text2vec-base-chinese (~400MB, 768维, 中文专优)
|
||
EMBEDDING_MODEL=BAAI/bge-small-zh-v1.5
|
||
|
||
# 向量维度(需与模型匹配)
|
||
EMBEDDING_DIMENSION=512
|
||
|
||
# 语义搜索相似度阈值(0.0-1.0,越高越严格)
|
||
EMBEDDING_SIMILARITY_THRESHOLD=0.5
|
||
|
||
# Embedding 缓存过期时间(秒),默认 1 天
|
||
EMBEDDING_CACHE_TTL=86400
|