feat: optimize AI suggestion and workorder sync - support same-day multiple update numbering - insert new suggestions at top maintaining reverse chronological order - reference process history when generating suggestions - simplify prompts to avoid forcing log analysis - fix Chinese comment encoding issues

This commit is contained in:
赵杰 Jie Zhao (雄狮汽车科技)
2025-10-27 10:33:34 +08:00
parent 18d59b71cb
commit a4261ef06f
104 changed files with 14678 additions and 1675 deletions

107
.gitignore vendored
View File

@@ -1,107 +0,0 @@
# Python缓存文件
__pycache__/
*.py[cod]
*$py.class
*.so
# 分发/打包
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
*.manifest
*.spec
# 单元测试/覆盖率报告
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# 环境变量
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# IDE文件
.vscode/
.idea/
*.swp
*.swo
*~
# 操作系统文件
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
# 日志文件
*.log
logs/
# 数据库文件(开发环境)
*.db
*.sqlite
*.sqlite3
# 备份文件
backups/
*.backup
*.bak
# 临时文件
*.tmp
*.temp
temp/
tmp/
# 部署相关
deploy_config.json
dev_deploy/
# 测试文件
test_*.py
*_test.py
test_sample.txt
# 文档草稿
note/
*问题修复*.md
*修复总结*.md
*使用指南*.md
# Excel文件除了模板
*.xlsx
!uploads/workorder_template.xlsx
# 配置文件(敏感信息)
config/local_config.py
.env.local

8
.idea/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,8 @@
# 默认忽略的文件
/shelf/
/workspace.xml
# 基于编辑器的 HTTP 客户端请求
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

12
.idea/dataSources.xml generated Normal file
View File

@@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="DataSourceManagerImpl" format="xml" multifile-model="true">
<data-source source="LOCAL" name="@43.134.68.207" uuid="715b070d-f258-43df-a066-49e825a9b04f">
<driver-ref>mysql.8</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>com.mysql.cj.jdbc.Driver</jdbc-driver>
<jdbc-url>jdbc:mysql://43.134.68.207:3306</jdbc-url>
<working-dir>$ProjectFileDir$</working-dir>
</data-source>
</component>
</project>

View File

@@ -0,0 +1,6 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

7
.idea/misc.xml generated Normal file
View File

@@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Black">
<option name="sdkName" value="Python 3.11 (tsp-assistant)" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11 (tsp-assistant)" project-jdk-type="Python SDK" />
</project>

8
.idea/modules.xml generated Normal file
View File

@@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/tsp-assistant.iml" filepath="$PROJECT_DIR$/.idea/tsp-assistant.iml" />
</modules>
</component>
</project>

14
.idea/tsp-assistant.iml generated Normal file
View File

@@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/.venv" />
</content>
<orderEntry type="jdk" jdkName="Python 3.11 (tsp-assistant)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Plain" />
</component>
</module>

6
.idea/vcs.xml generated Normal file
View File

@@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
</component>
</project>

3
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,3 @@
{
"files.autoGuessEncoding": true
}

View File

@@ -1,57 +0,0 @@
# TSP智能助手Docker镜像 - 优化版本
FROM python:3.11-slim
# 设置工作目录
WORKDIR /app
# 设置环境变量
ENV PYTHONPATH=/app
ENV PYTHONUNBUFFERED=1
ENV PYTHONDONTWRITEBYTECODE=1
ENV PIP_NO_CACHE_DIR=1
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
# 安装系统依赖
RUN apt-get update && apt-get install -y \
gcc \
g++ \
git \
curl \
wget \
vim \
htop \
procps \
&& rm -rf /var/lib/apt/lists/*
# 升级pip并安装wheel
RUN pip install --upgrade pip setuptools wheel
# 复制依赖文件
COPY requirements.txt .
# 安装Python依赖
RUN pip install --no-cache-dir -r requirements.txt
# 复制应用代码
COPY . .
# 创建必要目录
RUN mkdir -p logs data backups uploads config
# 设置权限
RUN chmod +x scripts/deploy.sh scripts/monitor.sh
# 创建非root用户
RUN useradd -m -u 1000 tspuser && \
chown -R tspuser:tspuser /app
USER tspuser
# 暴露端口
EXPOSE 5000 8765
# 健康检查
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:5000/api/health || exit 1
# 启动命令
CMD ["python", "start_dashboard.py"]

144
LLM配置统一说明.md Normal file
View File

@@ -0,0 +1,144 @@
# LLM配置统一管理说明
## ? 概述
本项目已将LLM配置统一管理确保整个项目只在一个地方配置千问模型所有地方都从统一配置获取。
## ?? 配置架构
### 1. 核心配置文件:`config/llm_config.py`
这是**唯一的**LLM配置源定义了千问模型的所有配置
```python
QWEN_CONFIG = LLMConfig(
provider="qwen",
api_key="sk-c0dbefa1718d46eaa897199135066f00",
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model="qwen-plus-latest",
temperature=0.7,
max_tokens=2000
)
# 默认使用千问模型
DEFAULT_CONFIG = QWEN_CONFIG
```
### 2. 统一配置管理器:`src/config/unified_config.py`
统一配置管理器在初始化时自动从 `config/llm_config.py` 加载配置:
```python
def _load_default_llm_config(self) -> LLMConfig:
"""加载默认LLM配置"""
try:
from config.llm_config import DEFAULT_CONFIG
# 转换配置格式
return LLMConfig(...)
except Exception as e:
logger.warning(f"无法加载默认LLM配置使用内置默认值: {e}")
return LLMConfig()
```
### 3. 全局配置实例
通过 `get_config()` 函数获取全局配置实例:
```python
from src.config.unified_config import get_config
config = get_config()
llm_config = config.llm # 获取LLM配置
```
## ? 如何使用
### 在任何需要使用LLM的地方
```python
from src.config.unified_config import get_config
# 获取LLM配置
llm_config = get_config().llm
# 使用配置
print(f"Provider: {llm_config.provider}")
print(f"Model: {llm_config.model}")
print(f"API Key: {llm_config.api_key}")
```
### 示例AI建议服务
```python
class AISuggestionService:
def __init__(self):
# 从统一配置管理器获取LLM配置
self.llm_config = get_config().llm
logger.info(f"使用LLM配置: {self.llm_config.provider} - {self.llm_config.model}")
```
## ? 配置优先级
1. **第一优先级**:统一配置管理器中的配置(可通过配置文件或环境变量设置)
2. **第二优先级**`config/llm_config.py` 中的 `DEFAULT_CONFIG`
3. **最后备选**:内置的默认值
## ? 修改配置
### 方法1修改配置文件推荐
直接编辑 `config/llm_config.py`修改API密钥或模型
```python
QWEN_CONFIG = LLMConfig(
provider="qwen",
api_key="你的新API密钥", # 修改这里
model="qwen-max", # 或修改模型
...
)
```
### 方法2通过统一配置文件
编辑 `config/unified_config.json`(如果存在):
```json
{
"llm": {
"provider": "qwen",
"api_key": "你的新API密钥",
"model": "qwen-plus-latest",
...
}
}
```
### 方法3环境变量可选
```bash
export LLM_API_KEY="你的API密钥"
export LLM_MODEL="qwen-plus-latest"
```
## ? 优势
1. **单一配置源**:只需要在 `config/llm_config.py` 配置一次
2. **统一管理**:所有模块都通过统一配置管理器获取
3. **易于维护**:修改配置不需要修改多处代码
4. **自动同步**:修改配置后,所有使用该配置的地方自动更新
5. **向后兼容**保留fallback机制确保系统稳定运行
## ? 已更新的文件
- ? `config/llm_config.py` - 添加了 `get_default_llm_config()` 函数
- ? `src/config/unified_config.py` - 从 `config/llm_config.py` 加载默认配置
- ? `src/integrations/ai_suggestion_service.py` - 使用统一配置
- ? `src/agent/agent_assistant_core.py` - 使用统一配置
## ? 注意事项
- **不要**在代码中硬编码OpenAI或其他模型的配置
- **不要**直接从 `config/llm_config.py` 导入除非作为fallback
- **总是**通过 `get_config().llm` 获取配置
- 修改配置后,请重启应用使配置生效

Binary file not shown.

Binary file not shown.

View File

@@ -21,7 +21,10 @@
"Has it been updated on the same day": "has_updated_same_day", "Has it been updated on the same day": "has_updated_same_day",
"Operating time": "operating_time", "Operating time": "operating_time",
"AI建议": "ai_suggestion", "AI建议": "ai_suggestion",
"Issue Start Time": "updated_at" "Issue Start Time": "updated_at",
"Wilfulness责任人<E4BBBB>?": "wilfulness",
"父<>?<3F>录": "parent_record",
"AI建<49>??": "ai_suggestion"
}, },
"field_aliases": { "field_aliases": {
"order_id": [ "order_id": [

View File

@@ -34,3 +34,27 @@ ANTHROPIC_CONFIG = LLMConfig(
# 默认使用千问模型 # 默认使用千问模型
DEFAULT_CONFIG = QWEN_CONFIG DEFAULT_CONFIG = QWEN_CONFIG
def get_default_llm_config() -> LLMConfig:
"""
获取默认的LLM配置
优先从统一配置管理器获取,如果失败则使用本地配置
"""
try:
from src.config.unified_config import get_config
config = get_config()
llm_dict = config.get_llm_config()
# 创建LLMConfig对象
return LLMConfig(
provider=llm_dict.get("provider", "qwen"),
api_key=llm_dict.get("api_key", ""),
base_url=llm_dict.get("base_url", "https://dashscope.aliyuncs.com/compatible-mode/v1"),
model=llm_dict.get("model", "qwen-plus-latest"),
temperature=llm_dict.get("temperature", 0.7),
max_tokens=llm_dict.get("max_tokens", 2000)
)
except Exception:
# 如果统一配置不可用,使用本地配置
return DEFAULT_CONFIG

View File

@@ -0,0 +1,52 @@
{
"database": {
"url": "mysql+pymysql://tsp_assistant:password@43.134.68.207/tsp_assistant?charset=utf8mb4",
"pool_size": 10,
"max_overflow": 20,
"pool_timeout": 30,
"pool_recycle": 3600
},
"llm": {
"provider": "openai",
"api_key": "",
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"model": "qwen-turbo",
"temperature": 0.7,
"max_tokens": 2000,
"timeout": 30
},
"server": {
"host": "0.0.0.0",
"port": 5000,
"websocket_port": 8765,
"debug": false,
"log_level": "INFO"
},
"feishu": {
"app_id": "",
"app_secret": "",
"app_token": "",
"table_id": "",
"status": "active",
"sync_limit": 10,
"auto_sync_interval": 0
},
"ai_accuracy": {
"auto_approve_threshold": 0.95,
"use_human_resolution_threshold": 0.9,
"manual_review_threshold": 0.8,
"ai_suggestion_confidence": 0.95,
"human_resolution_confidence": 0.9,
"prefer_human_when_low_accuracy": true,
"enable_auto_approval": true,
"enable_human_fallback": true
},
"system": {
"backup_enabled": true,
"backup_interval": 24,
"max_backup_files": 7,
"cache_enabled": true,
"cache_ttl": 3600,
"monitoring_enabled": true
}
}

View File

@@ -3,5 +3,14 @@
"max_history": 10, "max_history": 10,
"refresh_interval": 10, "refresh_interval": 10,
"auto_monitoring": true, "auto_monitoring": true,
"agent_mode": true "agent_mode": true,
"api_provider": "openai",
"api_base_url": "",
"api_key": "",
"model_name": "qwen-turbo",
"model_temperature": 0.7,
"model_max_tokens": 1000,
"server_port": 5000,
"websocket_port": 8765,
"log_level": "INFO"
} }

View File

@@ -1,153 +0,0 @@
version: '3.8'
services:
tsp-assistant:
build:
context: .
dockerfile: Dockerfile
container_name: tsp_assistant
ports:
- "5000:5000"
- "8765:8765" # WebSocket端口
environment:
- PYTHONPATH=/app
- DATABASE_URL=mysql+pymysql://tsp_user:tsp_password@mysql:3306/tsp_assistant?charset=utf8mb4
- REDIS_URL=redis://redis:6379/0
- LOG_LEVEL=INFO
- TZ=Asia/Shanghai
volumes:
- ./data:/app/data
- ./logs:/app/logs
- ./backups:/app/backups
- ./uploads:/app/uploads
- ./config:/app/config
- tsp_db:/app
depends_on:
mysql:
condition: service_healthy
redis:
condition: service_healthy
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5000/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks:
- tsp_network
# MySQL数据库服务
mysql:
image: mysql:8.0
container_name: tsp_mysql
environment:
MYSQL_ROOT_PASSWORD: root123456
MYSQL_DATABASE: tsp_assistant
MYSQL_USER: tsp_user
MYSQL_PASSWORD: tsp_password
MYSQL_CHARACTER_SET_SERVER: utf8mb4
MYSQL_COLLATION_SERVER: utf8mb4_unicode_ci
ports:
- "3306:3306"
volumes:
- mysql_data:/var/lib/mysql
- ./init.sql:/docker-entrypoint-initdb.d/init.sql
restart: unless-stopped
command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-proot123456"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
networks:
- tsp_network
# Redis缓存服务
redis:
image: redis:7-alpine
container_name: tsp_redis
ports:
- "6379:6379"
volumes:
- redis_data:/data
restart: unless-stopped
command: redis-server --appendonly yes --requirepass redis123456
healthcheck:
test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
networks:
- tsp_network
# Nginx反向代理
nginx:
image: nginx:alpine
container_name: tsp_nginx
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
- ./ssl:/etc/nginx/ssl
- ./logs/nginx:/var/log/nginx
depends_on:
- tsp-assistant
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
networks:
- tsp_network
# 监控服务
prometheus:
image: prom/prometheus:latest
container_name: tsp_prometheus
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
restart: unless-stopped
networks:
- tsp_network
# Grafana仪表板
grafana:
image: grafana/grafana:latest
container_name: tsp_grafana
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin123456
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
restart: unless-stopped
networks:
- tsp_network
volumes:
tsp_db:
mysql_data:
redis_data:
prometheus_data:
grafana_data:
networks:
tsp_network:
driver: bridge

13517
logs/dashboard.log Normal file

File diff suppressed because one or more lines are too long

70
logs/tsp_assistant.log Normal file
View File

@@ -0,0 +1,70 @@
2025-09-19 18:26:27,748 - src.vehicle.vehicle_data_manager - INFO - 添加车辆数据成功: V001 - location
2025-09-19 18:26:27,752 - src.vehicle.vehicle_data_manager - INFO - 添加车辆数据成功: V001 - status
2025-09-19 18:26:27,756 - src.vehicle.vehicle_data_manager - INFO - 添加车辆数据成功: V001 - battery
2025-09-19 18:26:27,759 - src.vehicle.vehicle_data_manager - INFO - 添加车辆数据成功: V001 - engine
2025-09-19 18:26:27,764 - src.vehicle.vehicle_data_manager - INFO - 添加车辆数据成功: V002 - location
2025-09-19 18:26:27,768 - src.vehicle.vehicle_data_manager - INFO - 添加车辆数据成功: V002 - status
2025-09-19 18:26:27,772 - src.vehicle.vehicle_data_manager - INFO - 添加车辆数据成功: V002 - fault
2025-09-19 18:26:27,773 - src.vehicle.vehicle_data_manager - INFO - 示例车辆数据添加成功
2025-09-19 18:53:30,187 - sqlalchemy.pool.impl.QueuePool - ERROR - Exception during reset or similar
Traceback (most recent call last):
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\sqlalchemy\pool\base.py", line 985, in _finalize_fairy
fairy._reset(
~~~~~~~~~~~~^
pool,
^^^^^
...<2 lines>...
asyncio_safe=can_manipulate_connection,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\sqlalchemy\pool\base.py", line 1433, in _reset
pool._dialect.do_rollback(self)
~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\sqlalchemy\engine\default.py", line 711, in do_rollback
dbapi_connection.rollback()
~~~~~~~~~~~~~~~~~~~~~~~~~^^
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\pymysql\connections.py", line 505, in rollback
self._read_ok_packet()
~~~~~~~~~~~~~~~~~~~~^^
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\pymysql\connections.py", line 465, in _read_ok_packet
pkt = self._read_packet()
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\pymysql\connections.py", line 751, in _read_packet
packet_header = self._read_bytes(4)
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\pymysql\connections.py", line 789, in _read_bytes
data = self._rfile.read(num_bytes)
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.13_3.13.2032.0_x64__qbz5n2kfra8p0\Lib\socket.py", line 719, in readinto
return self._sock.recv_into(b)
~~~~~~~~~~~~~~~~~~~~^^^
KeyboardInterrupt
2025-09-19 18:54:31,332 - sqlalchemy.pool.impl.QueuePool - ERROR - Exception during reset or similar
Traceback (most recent call last):
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\sqlalchemy\pool\base.py", line 985, in _finalize_fairy
fairy._reset(
~~~~~~~~~~~~^
pool,
^^^^^
...<2 lines>...
asyncio_safe=can_manipulate_connection,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\sqlalchemy\pool\base.py", line 1433, in _reset
pool._dialect.do_rollback(self)
~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\sqlalchemy\engine\default.py", line 711, in do_rollback
dbapi_connection.rollback()
~~~~~~~~~~~~~~~~~~~~~~~~~^^
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\pymysql\connections.py", line 505, in rollback
self._read_ok_packet()
~~~~~~~~~~~~~~~~~~~~^^
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\pymysql\connections.py", line 465, in _read_ok_packet
pkt = self._read_packet()
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\pymysql\connections.py", line 751, in _read_packet
packet_header = self._read_bytes(4)
File "C:\Users\Administrator.CHERY-NOT-8217.000\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\pymysql\connections.py", line 789, in _read_bytes
data = self._rfile.read(num_bytes)
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.13_3.13.2032.0_x64__qbz5n2kfra8p0\Lib\socket.py", line 719, in readinto
return self._sock.recv_into(b)
~~~~~~~~~~~~~~~~~~~~^^^
KeyboardInterrupt

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -59,21 +59,36 @@ class TSPAgentAssistantCore(TSPAssistant):
if llm_config: if llm_config:
self.llm_manager = LLMManager(llm_config) self.llm_manager = LLMManager(llm_config)
else: else:
# 使用默认配置 - 千问模型 # 从统一配置管理器获取LLM配置
try: try:
from config.llm_config import DEFAULT_CONFIG from src.config.unified_config import get_config
self.llm_manager = LLMManager(DEFAULT_CONFIG) unified_llm = get_config().llm
except ImportError: # 将统一配置的LLMConfig转换为agent需要的LLMConfig
# 如果配置文件不存在,使用内置配置 agent_llm_config = LLMConfig(
default_config = LLMConfig( provider=unified_llm.provider,
provider="openai", api_key=unified_llm.api_key,
api_key="sk-your-qwen-api-key-here", base_url=unified_llm.base_url,
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", model=unified_llm.model,
model="qwen-turbo", temperature=unified_llm.temperature,
temperature=0.7, max_tokens=unified_llm.max_tokens
max_tokens=2000
) )
self.llm_manager = LLMManager(default_config) self.llm_manager = LLMManager(agent_llm_config)
except Exception as e:
logger.warning(f"无法从统一配置加载LLM配置使用config/llm_config.py: {e}")
try:
from config.llm_config import DEFAULT_CONFIG
self.llm_manager = LLMManager(DEFAULT_CONFIG)
except ImportError:
# 最后的fallback
default_config = LLMConfig(
provider="qwen",
api_key="",
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model="qwen-turbo",
temperature=0.7,
max_tokens=2000
)
self.llm_manager = LLMManager(default_config)
def get_agent_status(self) -> Dict[str, Any]: def get_agent_status(self) -> Dict[str, Any]:
"""获取Agent状态""" """获取Agent状态"""

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@@ -84,9 +84,9 @@ class UnifiedConfig:
self.config_dir = Path(config_dir) self.config_dir = Path(config_dir)
self.config_file = self.config_dir / "unified_config.json" self.config_file = self.config_dir / "unified_config.json"
# 默认配置 # 默认配置 - 从config/llm_config.py加载默认LLM配置
self.database = DatabaseConfig() self.database = DatabaseConfig()
self.llm = LLMConfig() self.llm = self._load_default_llm_config()
self.server = ServerConfig() self.server = ServerConfig()
self.feishu = FeishuConfig() self.feishu = FeishuConfig()
self.ai_accuracy = AIAccuracyConfig() self.ai_accuracy = AIAccuracyConfig()
@@ -95,6 +95,23 @@ class UnifiedConfig:
# 加载配置 # 加载配置
self.load_config() self.load_config()
def _load_default_llm_config(self) -> LLMConfig:
"""加载默认LLM配置"""
try:
from config.llm_config import DEFAULT_CONFIG
# 将config/llm_config.py中的配置转换为统一配置的格式
return LLMConfig(
provider=DEFAULT_CONFIG.provider,
api_key=DEFAULT_CONFIG.api_key,
base_url=DEFAULT_CONFIG.base_url,
model=DEFAULT_CONFIG.model,
temperature=DEFAULT_CONFIG.temperature,
max_tokens=DEFAULT_CONFIG.max_tokens
)
except Exception as e:
logger.warning(f"无法加载默认LLM配置使用内置默认值: {e}")
return LLMConfig()
def load_config(self): def load_config(self):
"""加载配置文件""" """加载配置文件"""
try: try:

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -288,9 +288,6 @@ class QueryOptimizer:
category_counts = Counter([wo.category for wo in workorders]) category_counts = Counter([wo.category for wo in workorders])
priority_counts = Counter([wo.priority for wo in workorders]) priority_counts = Counter([wo.priority for wo in workorders])
# 调试信息
logger.info(f"工单状态统计: {dict(status_counts)}")
logger.info(f"工单总数: {total}")
# 处理状态映射(支持中英文状态) # 处理状态映射(支持中英文状态)
status_mapping = { status_mapping = {
@@ -317,8 +314,6 @@ class QueryOptimizer:
if not mapped: if not mapped:
logger.warning(f"未映射的状态: '{status}' (数量: {count})") logger.warning(f"未映射的状态: '{status}' (数量: {count})")
# 调试信息
logger.info(f"映射后的状态统计: {mapped_counts}")
resolved_count = mapped_counts['resolved'] resolved_count = mapped_counts['resolved']
@@ -394,9 +389,6 @@ class QueryOptimizer:
if len(self.query_stats[query_name]) > 100: if len(self.query_stats[query_name]) > 100:
self.query_stats[query_name] = self.query_stats[query_name][-100:] self.query_stats[query_name] = self.query_stats[query_name][-100:]
# 记录慢查询
if query_time > self.slow_query_threshold:
logger.warning(f"慢查询检测: {query_name} 耗时 {query_time:.2f}s")
def get_query_performance_report(self) -> Dict[str, Any]: def get_query_performance_report(self) -> Dict[str, Any]:
"""获取查询性能报告""" """获取查询性能报告"""
@@ -438,7 +430,6 @@ class QueryOptimizer:
logger.warning(f"创建索引失败: {e}") logger.warning(f"创建索引失败: {e}")
session.commit() session.commit()
logger.info("数据库索引优化完成")
return True return True
except Exception as e: except Exception as e:

View File

@@ -75,7 +75,6 @@ class SystemOptimizer:
) )
self.redis_client.ping() self.redis_client.ping()
self.redis_connected = True self.redis_connected = True
logger.info("系统优化Redis连接成功")
except Exception as e: except Exception as e:
logger.debug(f"系统优化Redis连接失败: {e}") logger.debug(f"系统优化Redis连接失败: {e}")
self.redis_client = None self.redis_client = None
@@ -91,7 +90,6 @@ class SystemOptimizer:
monitor_thread = threading.Thread(target=self._monitor_system, daemon=True) monitor_thread = threading.Thread(target=self._monitor_system, daemon=True)
monitor_thread.start() monitor_thread.start()
logger.info("系统监控线程已启动")
except Exception as e: except Exception as e:
logger.error(f"启动监控线程失败: {e}") logger.error(f"启动监控线程失败: {e}")

View File

@@ -1,5 +1,3 @@
# -*- coding: utf-8 -*-
""" """
实时对话管理器 实时对话管理器
提供实时对话功能集成知识库搜索和LLM回复 提供实时对话功能集成知识库搜索和LLM回复
@@ -109,7 +107,8 @@ class RealtimeChatManager:
assistant_response = self._generate_response( assistant_response = self._generate_response(
user_message, user_message,
knowledge_results, knowledge_results,
session["context"] session["context"],
session["work_order_id"]
) )
# 创建助手消息 # 创建助手消息
@@ -167,11 +166,14 @@ class RealtimeChatManager:
logger.error(f"搜索知识库失败: {e}") logger.error(f"搜索知识库失败: {e}")
return [] return []
def _generate_response(self, user_message: str, knowledge_results: List[Dict], context: List[Dict]) -> Dict[str, Any]: def _generate_response(self, user_message: str, knowledge_results: List[Dict], context: List[Dict], work_order_id: Optional[int] = None) -> Dict[str, Any]:
"""生成回复""" """生成回复"""
try: try:
# 检查是否有相关的工单AI建议
ai_suggestions = self._get_workorder_ai_suggestions(work_order_id)
# 构建提示词 # 构建提示词
prompt = self._build_chat_prompt(user_message, knowledge_results, context) prompt = self._build_chat_prompt(user_message, knowledge_results, context, ai_suggestions)
# 调用大模型 # 调用大模型
response = self.llm_client.chat_completion( response = self.llm_client.chat_completion(
@@ -184,24 +186,31 @@ class RealtimeChatManager:
content = response['choices'][0]['message']['content'] content = response['choices'][0]['message']['content']
confidence = self._calculate_confidence(knowledge_results, content) confidence = self._calculate_confidence(knowledge_results, content)
# 如果有AI建议在回复中包含
if ai_suggestions:
content = self._format_response_with_ai_suggestions(content, ai_suggestions)
return { return {
"content": content, "content": content,
"confidence": confidence "confidence": confidence,
"ai_suggestions": ai_suggestions
} }
else: else:
return { return {
"content": "抱歉,我暂时无法处理您的问题。请稍后再试或联系人工客服。", "content": "抱歉,我暂时无法处理您的问题。请稍后再试或联系人工客服。",
"confidence": 0.1 "confidence": 0.1,
"ai_suggestions": ai_suggestions
} }
except Exception as e: except Exception as e:
logger.error(f"生成回复失败: {e}") logger.error(f"生成回复失败: {e}")
return { return {
"content": "抱歉,系统出现错误,请稍后再试。", "content": "抱歉,系统出现错误,请稍后再试。",
"confidence": 0.1 "confidence": 0.1,
"ai_suggestions": []
} }
def _build_chat_prompt(self, user_message: str, knowledge_results: List[Dict], context: List[Dict]) -> str: def _build_chat_prompt(self, user_message: str, knowledge_results: List[Dict], context: List[Dict], ai_suggestions: List[str] = None) -> str:
"""构建聊天提示词""" """构建聊天提示词"""
prompt = f""" prompt = f"""
你是一个专业的奇瑞汽车客服助手。请根据用户的问题和提供的知识库信息,给出专业、友好的回复。 你是一个专业的奇瑞汽车客服助手。请根据用户的问题和提供的知识库信息,给出专业、友好的回复。
@@ -219,6 +228,12 @@ class RealtimeChatManager:
else: else:
prompt += "\n未找到相关知识库信息。\n" prompt += "\n未找到相关知识库信息。\n"
# 添加AI建议信息
if ai_suggestions:
prompt += "\n相关AI建议\n"
for suggestion in ai_suggestions:
prompt += f"- {suggestion}\n"
# 添加上下文 # 添加上下文
if context: if context:
prompt += "\n对话历史:\n" prompt += "\n对话历史:\n"
@@ -233,13 +248,72 @@ class RealtimeChatManager:
4. 如果问题需要进站处理,请明确说明 4. 如果问题需要进站处理,请明确说明
5. 回复要简洁明了,避免冗长 5. 回复要简洁明了,避免冗长
6. 如果涉及技术问题,要提供具体的操作步骤 6. 如果涉及技术问题,要提供具体的操作步骤
7. 始终以"您好"开头,以"如有其他问题,请随时联系"结尾
请直接给出回复内容,不要包含其他格式: 请直接给出回复内容,不要包含其他格式:
""" """
return prompt return prompt
def _get_workorder_ai_suggestions(self, work_order_id: Optional[int]) -> List[str]:
"""
获取工单的AI建议
Args:
work_order_id: 工单ID
Returns:
AI建议列表
"""
try:
if not work_order_id:
return []
with db_manager.get_session() as session:
# 查询工单的AI建议
from ..core.models import WorkOrderSuggestion
suggestions = session.query(WorkOrderSuggestion).filter(
WorkOrderSuggestion.work_order_id == work_order_id
).order_by(WorkOrderSuggestion.created_at.desc()).limit(3).all()
ai_suggestions = []
for suggestion in suggestions:
if suggestion.ai_suggestion:
ai_suggestions.append(suggestion.ai_suggestion)
return ai_suggestions
except Exception as e:
logger.error(f"获取工单AI建议失败: {e}")
return []
def _format_response_with_ai_suggestions(self, content: str, ai_suggestions: List[str]) -> str:
"""
在回复中格式化AI建议
Args:
content: 原始回复内容
ai_suggestions: AI建议列表
Returns:
包含AI建议的格式化回复
"""
try:
if not ai_suggestions:
return content
# 在回复末尾添加AI建议
formatted_content = content
formatted_content += "\n\n📋 **相关AI建议**\n"
for i, suggestion in enumerate(ai_suggestions, 1):
formatted_content += f"{i}. {suggestion}\n"
return formatted_content
except Exception as e:
logger.error(f"格式化AI建议失败: {e}")
return content
def _extract_vin(self, text: str) -> Optional[str]: def _extract_vin(self, text: str) -> Optional[str]:
"""从文本中提取VIN17位I/O/Q不使用常见校验""" """从文本中提取VIN17位I/O/Q不使用常见校验"""
try: try:

Binary file not shown.

View File

@@ -8,7 +8,7 @@ import logging
from typing import Dict, List, Optional, Any from typing import Dict, List, Optional, Any
from src.knowledge_base.knowledge_manager import KnowledgeManager from src.knowledge_base.knowledge_manager import KnowledgeManager
from src.vehicle.vehicle_data_manager import VehicleDataManager from src.vehicle.vehicle_data_manager import VehicleDataManager
from src.agent.llm_client import LLMManager, LLMConfig from src.config.unified_config import get_config # 使用统一配置管理器
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -19,94 +19,298 @@ class AISuggestionService:
self.knowledge_manager = KnowledgeManager() self.knowledge_manager = KnowledgeManager()
self.vehicle_manager = VehicleDataManager() self.vehicle_manager = VehicleDataManager()
# 初始化LLM客户端 # 从统一配置管理器获取LLM配置统一配置会自动从config/llm_config.py加载
try: self.llm_config = get_config().llm
llm_config = LLMConfig( logger.info(f"使用LLM配置: {self.llm_config.provider} - {self.llm_config.model}")
provider="openai",
api_key="your-api-key", # 这里需要从配置文件读取
model="gpt-3.5-turbo",
temperature=0.7,
max_tokens=1000
)
self.llm_manager = LLMManager(llm_config)
except Exception as e:
logger.warning(f"LLM客户端初始化失败: {e}")
self.llm_manager = None
def generate_suggestion(self, tr_description: str, vin: Optional[str] = None) -> str: def generate_suggestion(self, tr_description: str, process_history: Optional[str] = None, vin: Optional[str] = None) -> str:
""" """
生成AI建议 生成AI建议 - 参考处理过程记录生成建议
Args: Args:
tr_description: TR描述 tr_description: TR描述
process_history: 处理过程记录(可选,用于了解当前问题状态)
vin: 车架号(可选) vin: 车架号(可选)
Returns: Returns:
AI建议文本 AI建议文本
""" """
try: try:
# 1. 从知识库搜索相关信息 # 调用实时对话接口生成建议
knowledge_results = self.knowledge_manager.search_knowledge( from ..dialogue.realtime_chat import RealtimeChatManager
query=tr_description,
top_k=5
)
# 2. 如果有VIN查询车辆信息 chat_manager = RealtimeChatManager()
vehicle_info = ""
if vin:
try:
vehicle_data = self.vehicle_manager.get_latest_vehicle_data_by_vin(vin)
if vehicle_data:
vehicle_info = f"车辆信息:{vehicle_data.get('model', '未知车型')},里程:{vehicle_data.get('mileage', '未知')}km"
except Exception as e:
logger.warning(f"查询车辆信息失败: {e}")
# 3. 构建提示词 # 构建上下文信息
context_parts = [] context_info = ""
if process_history and process_history.strip():
context_info = f"""
已处理的步骤:
{process_history}"""
# 添加知识库信息 # 构建用户消息 - 要求生成简洁的简短建议
if knowledge_results: user_message = f"""请为以下问题提供精炼的技术支持操作建议:
knowledge_text = "\n".join([
f"- {item.get('question', '')}: {item.get('answer', '')}"
for item in knowledge_results
])
context_parts.append(f"相关知识库信息:\n{knowledge_text}")
# 添加车辆信息
if vehicle_info:
context_parts.append(vehicle_info)
context = "\n\n".join(context_parts) if context_parts else "无相关背景信息"
# 4. 生成AI建议
prompt = f"""
作为技术支持专家,请基于以下问题描述为工单提供专业的处理建议:
问题描述:{tr_description} 格式要求:
1. 用逗号连接,一句话表达,不要用序号或分行
2. 现状+步骤,语言精炼
3. 总长度控制在150字以内
相关背景信息 根据问题复杂程度选择结尾
{context} - 简单问题:给出具体操作步骤即可,不需要提日志分析
- 复杂问题:如远程操作无法解决,结尾才使用"建议邀请用户进站抓取日志分析"
请提供: 问题描述:{tr_description}{context_info}"""
1. 问题分析
2. 建议的解决步骤
3. 注意事项
4. 如果问题无法解决,建议的后续行动
请用中文回答,简洁明了。
"""
if self.llm_manager: # 创建会话
import asyncio session_id = chat_manager.create_session("ai_suggestion_service")
response = asyncio.run(self.llm_manager.generate(prompt))
return response # 调用实时对话接口
response = chat_manager.process_message(session_id, user_message)
if response and "content" in response:
content = response["content"]
# 记录原始内容用于调试
logger.info(f"AI生成原始内容: {content[:100]}...")
# 二次处理:替换默认建议(在清理前先替换)
content = self._post_process_suggestion(content)
# 清理并限制长度
cleaned = self._clean_response(content)
# 再次检查,确保替换生效
cleaned = self._post_process_suggestion(cleaned)
# 记录清理后的内容
logger.info(f"AI建议清理后: {cleaned[:100]}...")
return cleaned
else: else:
return "AI建议生成失败LLM客户端未初始化。" logger.error(f"AI建议生成失败response内容: {response}")
return "AI建议生成失败无法获取有效响应。"
except Exception as e: except Exception as e:
logger.error(f"生成AI建议失败: {e}") logger.error(f"生成AI建议失败: {e}")
return f"AI建议生成失败{str(e)}" return f"AI建议生成失败{str(e)}"
def _clean_response(self, content: str) -> str:
"""
清理AI建议内容使其简洁
Args:
content: 原始内容
Returns:
清理后的简洁内容(只做基本清理,保留原意)
"""
if not content or not content.strip():
return ""
# 移除多余的格式和提示词
cleaned = content.strip()
# 如果内容很短,直接返回
if len(cleaned) < 10:
return cleaned
# 移除常见的提示词开头(只移除一个)
prefixes = ["建议您", "您可以", "请尝试", "建议先", "建议"]
for prefix in prefixes:
if cleaned.startswith(prefix):
# 移除前缀,保留后面的内容
cleaned = cleaned[len(prefix):].strip()
if cleaned.startswith(('', '', '', ':')):
cleaned = cleaned[1:].strip()
break # 只处理第一个匹配的前缀
# 处理多行内容:只取第一段有效内容
lines = cleaned.split('\n')
filtered_lines = []
for line in lines:
line = line.strip()
# 跳过空行
if not line:
continue
# 跳过明显的提示词行
if any(p in line for p in ["请按照", "要求", "示例", "问题描述", "相关背景"]):
continue
# 检查是否以序号开头(如"1.", "一、", "1017:"等)
if len(line) > 2 and line[0].isdigit() and line[1] in ['.', '', '', ':']:
# 提取序号后的内容
for sep in ['. ', '', '', ':']:
if sep in line:
content_part = line.split(sep, 1)[1].strip()
if content_part and len(content_part) > 5: # 确保内容有意义
filtered_lines.append(content_part)
break
else:
filtered_lines.append(line)
# 如果已经有有效内容,停止处理
if filtered_lines:
break
# 合并内容
if filtered_lines:
cleaned = filtered_lines[0] #移除逗号分隔,只取第一段
else:
# 如果没有找到有效行,使用原来的第一行
cleaned = lines[0].strip() if lines and lines[0].strip() else cleaned
# 限制长度在150字以内确保精炼
if len(cleaned) > 150:
# 尝试在标点符号处截断
truncated = cleaned[:150]
for punct in ['', '', '', '.', ';', ',']:
pos = truncated.rfind(punct)
if pos > 100: # 在100字之后找到标点保留更多内容
cleaned = truncated[:pos + 1]
break
else:
cleaned = truncated
return cleaned
def _post_process_suggestion(self, content: str) -> str:
"""
二次处理建议内容:替换默认建议文案
Args:
content: 清理后的内容
Returns:
处理后的内容
"""
if not content or not content.strip():
return content
# 替换各种形式的"联系售后技术支持"为"邀请用户进站抓取日志分析"
replacements = [
("建议联系售后技术支持进一步排查", "建议邀请用户进站抓取日志分析"),
("联系售后技术支持进行进一步排查", "邀请用户进站抓取日志分析"),
("建议联系售后技术支持", "建议邀请用户进站抓取日志分析"),
("联系售后技术支持", "邀请用户进站抓取日志分析"),
("如问题仍未解决,建议联系售后技术支持进行进一步排查", "如问题仍未解决,建议邀请用户进站抓取日志分析"),
("若仍无效,建议联系售后技术支持进一步排查", "若仍无效,建议邀请用户进站抓取日志分析"),
("仍无效,建议联系售后技术支持", "仍无效,建议邀请用户进站抓取日志分析"),
]
result = content
for old_text, new_text in replacements:
if old_text in result:
result = result.replace(old_text, new_text)
logger.info(f"✓ 替换建议文案: '{old_text}' -> '{new_text}'")
# 如果没有任何替换,记录一下
if result == content:
logger.info(f"未找到需要替换的内容: {content[:100] if len(content) > 100 else content}")
return result
def _clean_and_validate_response(self, content: str) -> str:
"""
清理和校验响应内容
Args:
content: 原始响应内容
Returns:
清理后的内容
"""
try:
# 移除常见的提示词和格式标记
cleaned = content.strip()
# 移除提示词模式
prompt_patterns = [
"作为技术支持专家",
"请基于以下问题描述",
"为工单提供专业的处理建议",
"请提供:",
"1. 问题分析",
"2. 建议的解决步骤",
"3. 注意事项",
"4. 如果问题无法解决",
"请用中文回答,简洁明了",
"模拟LLM响应:",
"问题描述:",
"相关背景信息:",
"无相关背景信息",
"您好",
"感谢您反馈问题",
"关于您反馈的",
"建议您先尝试以下操作:",
"建议您",
"您可以",
"请尝试",
"建议先",
"建议",
"操作:",
"步骤:",
"1.",
"2.",
"3.",
"4.",
"5.",
"关于",
"情况",
"问题",
"无法正常使用",
"",
"",
"。。。。。。"
]
for pattern in prompt_patterns:
cleaned = cleaned.replace(pattern, "").strip()
# 移除多余的标点符号
cleaned = cleaned.replace("", "").replace("。。", "").strip()
# 进一步清理:移除编号和列表格式,提取核心建议
lines = cleaned.split('\n')
cleaned_lines = []
for line in lines:
line = line.strip()
# 跳过空行
if not line:
continue
# 移除行首的编号
line = line.replace('1.', '').replace('2.', '').replace('3.', '').replace('4.', '').replace('5.', '').replace('6.', '').replace('7.', '').replace('8.', '').replace('9.', '').strip()
# 只保留包含具体操作的行,跳过客套话
if line and len(line) > 10 and not any(courtesy in line for courtesy in ['您好', '感谢', '关于', '情况', '问题', '无法正常使用']):
if any(keyword in line for keyword in ['检查', '确保', '重启', '尝试', '联系', '升级', '恢复', '设置', '配置', '确认', '观察', '重装']):
cleaned_lines.append(line)
# 重新组合内容
if cleaned_lines:
cleaned = ''.join(cleaned_lines)
else:
cleaned = cleaned.strip()
# 最终清理:移除多余的标点符号和空格
cleaned = cleaned.replace("", "").replace("。。", "").replace(" ", " ").strip()
# 如果内容太短,返回默认建议
if len(cleaned) < 10:
return "建议邀请用户进站抓取日志分析"
return cleaned
except Exception as e:
logger.error(f"清理响应内容失败: {e}")
return content
def batch_generate_suggestions(self, records: List[Dict[str, Any]], limit: int = 10) -> List[Dict[str, Any]]: def batch_generate_suggestions(self, records: List[Dict[str, Any]], limit: int = 10) -> List[Dict[str, Any]]:
""" """
批量生成AI建议 批量生成AI建议
@@ -118,29 +322,112 @@ class AISuggestionService:
Returns: Returns:
处理后的记录列表 处理后的记录列表
""" """
from datetime import datetime
processed_records = [] processed_records = []
now = datetime.now()
time_str = now.strftime("%m%d") # MMDD格式
for i, record in enumerate(records[:limit]): for i, record in enumerate(records[:limit]):
try: try:
tr_description = record.get("fields", {}).get("TR Description", "") fields = record.get("fields", {})
tr_description = fields.get("TR Description", "")
process_history = fields.get("处理过程", "") # 获取处理过程记录
existing_ai_suggestion = fields.get("AI建议", "") # 获取现有AI建议
vin = self._extract_vin_from_description(tr_description) vin = self._extract_vin_from_description(tr_description)
# 调试日志
logger.info(f"记录 {record.get('record_id', i)} - 现有AI建议长度: {len(existing_ai_suggestion) if existing_ai_suggestion else 0}")
if existing_ai_suggestion:
logger.info(f"记录 {record.get('record_id', i)} - 现有AI建议前100字符: {existing_ai_suggestion[:100]}")
if tr_description: if tr_description:
ai_suggestion = self.generate_suggestion(tr_description, vin) ai_suggestion = self.generate_suggestion(tr_description, process_history, vin)
record["ai_suggestion"] = ai_suggestion # 处理同一天多次更新的情况
logger.info(f"为记录 {record.get('record_id', i)} 生成AI建议") new_suggestion = self._format_ai_suggestion_with_numbering(
time_str, ai_suggestion, existing_ai_suggestion
)
record["ai_suggestion"] = new_suggestion
logger.info(f"为记录 {record.get('record_id', i)} 生成AI建议新建议长度: {len(new_suggestion)}")
else: else:
record["ai_suggestion"] = "无TR描述无法生成建议" record["ai_suggestion"] = f"{time_str}无TR描述无法生成建议"
processed_records.append(record) processed_records.append(record)
except Exception as e: except Exception as e:
logger.error(f"处理记录 {record.get('record_id', i)} 失败: {e}") logger.error(f"处理记录 {record.get('record_id', i)} 失败: {e}")
record["ai_suggestion"] = f"处理失败:{str(e)}" record["ai_suggestion"] = f"{time_str}处理失败:{str(e)}"
processed_records.append(record) processed_records.append(record)
return processed_records return processed_records
def _format_ai_suggestion_with_numbering(self, time_str: str, new_suggestion: str, existing_ai_suggestion: str) -> str:
"""
格式化AI建议支持同一天多次更新的编号
Args:
time_str: 时间字符串MMDD格式
new_suggestion: 新的建议内容
existing_ai_suggestion: 现有的AI建议
Returns:
格式化后的AI建议
"""
logger.info(f"_format_ai_suggestion_with_numbering 调用 - time_str={time_str}, existing长度={len(existing_ai_suggestion) if existing_ai_suggestion else 0}")
if not existing_ai_suggestion or not existing_ai_suggestion.strip():
# 如果没有现有建议,直接返回带时间戳的第一条
logger.info(f"没有现有建议,返回: {time_str}{new_suggestion[:50]}...")
return f"{time_str}{new_suggestion}"
# 检查是否已经有今天的时间戳
if time_str not in existing_ai_suggestion:
# 如果是新的一天,将新建议放在最前面
return f"{time_str}{new_suggestion}\n{existing_ai_suggestion}"
# 如果是同一天,需要找到最大的编号
lines = existing_ai_suggestion.split('\n')
max_number = 0
today_lines = []
other_lines = []
# 分离今天的记录和其他天的记录
for line in lines:
line = line.strip()
if not line:
continue
# 检查是否是今天的记录
if line.startswith(time_str):
today_lines.append(line)
# 查找当前日期后的编号格式1017-1, 1017-2等
if f"{time_str}-" in line:
try:
# 提取编号1017-1... -> 1
number_part = line.split(f"{time_str}-", 1)[1].split('', 1)[0]
number = int(number_part)
if number > max_number:
max_number = number
except (ValueError, IndexError):
pass
else:
other_lines.append(line)
# 生成带编号的新建议
new_number = max_number + 1
new_line = f"{time_str}-{new_number}{new_suggestion}"
# 将新建议放在同一天记录的最前面,与其他天的记录组合
today_lines.insert(0, new_line)
today_text = '\n'.join(today_lines)
# 组合:今天的记录(最新在前) + 其他天的记录
if other_lines:
other_text = '\n'.join(other_lines)
return f"{today_text}\n{other_text}"
else:
return today_text
def _extract_vin_from_description(self, description: str) -> Optional[str]: def _extract_vin_from_description(self, description: str) -> Optional[str]:
""" """
从描述中提取VIN 从描述中提取VIN

View File

@@ -13,6 +13,7 @@ from src.integrations.ai_suggestion_service import AISuggestionService
from src.integrations.flexible_field_mapper import FlexibleFieldMapper from src.integrations.flexible_field_mapper import FlexibleFieldMapper
from src.core.database import db_manager from src.core.database import db_manager
from src.core.models import WorkOrder from src.core.models import WorkOrder
# 工单状态和优先级枚举 # 工单状态和优先级枚举
class WorkOrderStatus: class WorkOrderStatus:
PENDING = "pending" PENDING = "pending"
@@ -51,48 +52,48 @@ class WorkOrderSyncService:
# 保留原有的字段映射作为默认配置(向后兼容) # 保留原有的字段映射作为默认配置(向后兼容)
self.field_mapping = { self.field_mapping = {
# 核心字段 # 核心字段
"TR Number": "order_id", # TR编号映射到工单号 "TR Number": "order_id",
"TR Description": "description", # TR描述作为详细描述 "TR Description": "description",
"Type of problem": "category", # 问题类型作为分类 "Type of problem": "category",
"TR Level": "priority", # TR Level作为优先级 "TR Level": "priority",
"TR Status": "status", # TR Status作为状态 "TR Status": "status",
"Source": "source", # 来源信息Mail, Telegram bot等 "Source": "source",
"Date creation": "created_at", # 创建日期 "Date creation": "created_at",
"处理过程": "solution", # 处理过程作为解决方案 "处理过程": "solution",
"TR tracking": "resolution", # TR跟踪作为解决方案详情 "TR tracking": "resolution",
# 扩展字段 # 扩展字段
"Created by": "created_by", # 创建人 "Created by": "created_by",
"Module模块": "module", # 模块 "Module模块": "module",
"Wilfulness责任人": "wilfulness", # 责任人 "Wilfulness责任人": "wilfulness",
"Date of close TR": "date_of_close", # 关闭日期 "Date of close TR": "date_of_close",
"Vehicle Type01": "vehicle_type", # 车型 "Vehicle Type01": "vehicle_type",
"VIN|sim": "vin_sim", # 车架号/SIM "VIN|sim": "vin_sim",
"App remote control version": "app_remote_control_version", # 应用远程控制版本 "App remote control version": "app_remote_control_version",
"HMI SW": "hmi_sw", # HMI软件版本 "HMI SW": "hmi_sw",
"父记录": "parent_record", # 父记录 "父记录": "parent_record",
"Has it been updated on the same day": "has_updated_same_day", # 是否同日更新 "Has it been updated on the same day": "has_updated_same_day",
"Operating time": "operating_time", # 操作时间 "Operating time": "operating_time",
# AI建议字段 # AI建议字段
"AI建议": "ai_suggestion", # AI建议字段 "AI建议": "ai_suggestion",
"Issue Start Time": "updated_at" # 问题开始时间作为更新时间 "Issue Start Time": "updated_at"
} }
# 将原有映射添加到灵活映射器中 # 将原有映射添加到灵活映射器中
self._init_flexible_mapper() self._init_flexible_mapper()
# 状态映射 - 根据飞书表格中的实际值 # 状态映射
self.status_mapping = { self.status_mapping = {
"close": WorkOrderStatus.CLOSED, # 已关闭 "close": WorkOrderStatus.CLOSED,
"temporary close": WorkOrderStatus.IN_PROGRESS, # 临时关闭对应处理中 "temporary close": WorkOrderStatus.IN_PROGRESS,
"OTA": WorkOrderStatus.IN_PROGRESS, # OTA状态对应处理中 "OTA": WorkOrderStatus.IN_PROGRESS,
"open": WorkOrderStatus.PENDING, # 开放状态对应待处理 "open": WorkOrderStatus.PENDING,
"pending": WorkOrderStatus.PENDING, # 待处理 "pending": WorkOrderStatus.PENDING,
"completed": WorkOrderStatus.COMPLETED # 已完成 "completed": WorkOrderStatus.COMPLETED
} }
# 优先级映射 - 根据飞书表格中的实际值 # 优先级映射
self.priority_mapping = { self.priority_mapping = {
"Low": WorkOrderPriority.LOW, "Low": WorkOrderPriority.LOW,
"Medium": WorkOrderPriority.MEDIUM, "Medium": WorkOrderPriority.MEDIUM,
@@ -106,59 +107,26 @@ class WorkOrderSyncService:
self.field_mapper.add_field_mapping(feishu_field, local_field) self.field_mapper.add_field_mapping(feishu_field, local_field)
def get_field_discovery_report(self, feishu_fields: Dict[str, Any]) -> Dict[str, Any]: def get_field_discovery_report(self, feishu_fields: Dict[str, Any]) -> Dict[str, Any]:
""" """获取字段发现报告"""
获取字段发现报告
Args:
feishu_fields: 飞书字段数据
Returns:
字段发现报告
"""
return self.field_mapper.discover_fields(feishu_fields) return self.field_mapper.discover_fields(feishu_fields)
def add_field_mapping(self, feishu_field: str, local_field: str, def add_field_mapping(self, feishu_field: str, local_field: str,
aliases: List[str] = None, patterns: List[str] = None, aliases: List[str] = None, patterns: List[str] = None,
priority: int = 3) -> bool: priority: int = 3) -> bool:
""" """添加字段映射"""
添加字段映射
Args:
feishu_field: 飞书字段名
local_field: 本地字段名
aliases: 别名列表
patterns: 模式列表
priority: 优先级
Returns:
是否添加成功
"""
return self.field_mapper.add_field_mapping(feishu_field, local_field, aliases, patterns, priority) return self.field_mapper.add_field_mapping(feishu_field, local_field, aliases, patterns, priority)
def remove_field_mapping(self, feishu_field: str) -> bool: def remove_field_mapping(self, feishu_field: str) -> bool:
""" """移除字段映射"""
移除字段映射
Args:
feishu_field: 飞书字段名
Returns:
是否移除成功
"""
return self.field_mapper.remove_field_mapping(feishu_field) return self.field_mapper.remove_field_mapping(feishu_field)
def get_mapping_status(self) -> Dict[str, Any]: def get_mapping_status(self) -> Dict[str, Any]:
""" """获取映射状态"""
获取映射状态
Returns:
映射状态信息
"""
return self.field_mapper.get_mapping_status() return self.field_mapper.get_mapping_status()
def sync_from_feishu(self, generate_ai_suggestions: bool = True, limit: int = 10) -> Dict[str, Any]: def sync_from_feishu(self, generate_ai_suggestions: bool = True, limit: int = 10) -> Dict[str, Any]:
""" """
从飞书同步数据到本地系统 从飞书同步工单数据到本地系统
Args: Args:
generate_ai_suggestions: 是否生成AI建议 generate_ai_suggestions: 是否生成AI建议
@@ -170,18 +138,26 @@ class WorkOrderSyncService:
try: try:
logger.info("开始从飞书同步工单数据...") logger.info("开始从飞书同步工单数据...")
# 获取飞书表格记录(限制数量) # 获取飞书表格记录
records = self.feishu_client.get_table_records(self.app_token, self.table_id, page_size=limit) records = self.feishu_client.get_table_records(self.app_token, self.table_id, page_size=limit)
if records.get("code") != 0: if records.get("code") != 0:
raise Exception(f"获取飞书记录失败: {records.get('msg', '未知错误')}") raise Exception(f"获取飞书记录失败: {records.get('msg', '未知错误')}")
items = records.get("data", {}).get("items", []) items = records.get("data", {}).get("items", [])
logger.info(f"从飞书获取 {len(items)} 条记录") logger.info(f"从飞书获取 {len(items)} 条记录")
# 生成AI建议 # 生成AI建议
if generate_ai_suggestions: if generate_ai_suggestions:
logger.info("开始生成AI建议...") logger.info("开始生成AI建议...")
# 调试:记录第一条记录的结构
if items and len(items) > 0:
logger.info(f"第一条记录结构示例: record_id={items[0].get('record_id')}, 有fields字段={('fields' in items[0])}")
if 'fields' in items[0]:
logger.info(f"第一条记录的fields示例: {list(items[0]['fields'].keys())[:5]}")
logger.info(f"第一条记录的AI建议字段内容: {items[0].get('fields', {}).get('AI建议', '')[:50] if items[0].get('fields', {}).get('AI建议') else ''}")
items = self.ai_service.batch_generate_suggestions(items, limit) items = self.ai_service.batch_generate_suggestions(items, limit)
# 将AI建议更新回飞书表格 # 将AI建议更新回飞书表格
@@ -215,7 +191,6 @@ class WorkOrderSyncService:
WorkOrder.feishu_record_id == feishu_id WorkOrder.feishu_record_id == feishu_id
).first() ).first()
# 转换为本地工单格式
workorder_data = self._convert_feishu_to_local(parsed_fields) workorder_data = self._convert_feishu_to_local(parsed_fields)
workorder_data["feishu_record_id"] = feishu_id workorder_data["feishu_record_id"] = feishu_id
@@ -264,37 +239,25 @@ class WorkOrderSyncService:
} }
def sync_to_feishu(self, workorder_id: int) -> Dict[str, Any]: def sync_to_feishu(self, workorder_id: int) -> Dict[str, Any]:
""" """将本地工单同步到飞书"""
将本地工单同步到飞书
Args:
workorder_id: 工单ID
Returns:
同步结果
"""
try: try:
with db_manager.get_session() as session: with db_manager.get_session() as session:
workorder = session.query(WorkOrder).filter(WorkOrder.id == workorder_id).first() workorder = session.query(WorkOrder).filter(WorkOrder.id == workorder_id).first()
if not workorder: if not workorder:
return {"success": False, "error": "工单不存在"} return {"success": False, "error": "工单不存在"}
# 转换为飞书格式
feishu_fields = self._convert_local_to_feishu(workorder) feishu_fields = self._convert_local_to_feishu(workorder)
if workorder.feishu_record_id: if workorder.feishu_record_id:
# 更新飞书记录
result = self.feishu_client.update_table_record( result = self.feishu_client.update_table_record(
self.app_token, self.table_id, workorder.feishu_record_id, feishu_fields self.app_token, self.table_id, workorder.feishu_record_id, feishu_fields
) )
else: else:
# 创建新飞书记录
result = self.feishu_client.create_table_record( result = self.feishu_client.create_table_record(
self.app_token, self.table_id, feishu_fields self.app_token, self.table_id, feishu_fields
) )
if result.get("code") == 0: if result.get("code") == 0:
# 保存飞书记录ID到本地
workorder.feishu_record_id = result["data"]["record"]["record_id"] workorder.feishu_record_id = result["data"]["record"]["record_id"]
session.commit() session.commit()
@@ -308,19 +271,10 @@ class WorkOrderSyncService:
return {"success": False, "error": str(e)} return {"success": False, "error": str(e)}
def create_workorder_from_feishu_record(self, record_id: str) -> Dict[str, Any]: def create_workorder_from_feishu_record(self, record_id: str) -> Dict[str, Any]:
""" """从飞书单条记录创建工单"""
从飞书单条记录创建工单
Args:
record_id: 飞书记录ID
Returns:
创建结果
"""
try: try:
logger.info(f"从飞书记录 {record_id} 创建工单") logger.info(f"从飞书记录 {record_id} 创建工单")
# 获取单条飞书记录
feishu_data = self.feishu_client.get_table_record( feishu_data = self.feishu_client.get_table_record(
self.app_token, self.app_token,
self.table_id, self.table_id,
@@ -341,12 +295,9 @@ class WorkOrderSyncService:
} }
fields = record.get("fields", {}) fields = record.get("fields", {})
# 转换为本地工单格式
local_data = self._convert_feishu_to_local(fields) local_data = self._convert_feishu_to_local(fields)
local_data["feishu_record_id"] = record_id local_data["feishu_record_id"] = record_id
# 检查是否已存在
existing_workorder = self._find_existing_workorder(record_id) existing_workorder = self._find_existing_workorder(record_id)
if existing_workorder: if existing_workorder:
@@ -355,7 +306,6 @@ class WorkOrderSyncService:
"message": f"工单已存在: {existing_workorder.order_id}" "message": f"工单已存在: {existing_workorder.order_id}"
} }
# 创建新工单
workorder = self._create_workorder(local_data) workorder = self._create_workorder(local_data)
return { return {
@@ -453,15 +403,12 @@ class WorkOrderSyncService:
"""将飞书字段转换为本地工单字段""" """将飞书字段转换为本地工单字段"""
logger.info(f"开始转换飞书字段: {feishu_fields}") logger.info(f"开始转换飞书字段: {feishu_fields}")
# 使用灵活映射器进行字段转换
local_data, conversion_stats = self.field_mapper.convert_fields(feishu_fields) local_data, conversion_stats = self.field_mapper.convert_fields(feishu_fields)
# 记录转换统计信息
logger.info(f"字段转换统计: 总字段 {conversion_stats['total_fields']}, " logger.info(f"字段转换统计: 总字段 {conversion_stats['total_fields']}, "
f"已映射 {conversion_stats['mapped_fields']}, " f"已映射 {conversion_stats['mapped_fields']}, "
f"未映射 {len(conversion_stats['unmapped_fields'])}") f"未映射 {len(conversion_stats['unmapped_fields'])}")
# 如果有未映射的字段,记录详细信息
if conversion_stats['unmapped_fields']: if conversion_stats['unmapped_fields']:
logger.warning(f"未映射字段: {conversion_stats['unmapped_fields']}") logger.warning(f"未映射字段: {conversion_stats['unmapped_fields']}")
for field in conversion_stats['unmapped_fields']: for field in conversion_stats['unmapped_fields']:
@@ -477,18 +424,15 @@ class WorkOrderSyncService:
local_data[local_field] = self.priority_mapping[value] local_data[local_field] = self.priority_mapping[value]
elif local_field in ["created_at", "updated_at", "date_of_close"] and value: elif local_field in ["created_at", "updated_at", "date_of_close"] and value:
try: try:
# 处理飞书时间戳(毫秒)
if isinstance(value, (int, float)): if isinstance(value, (int, float)):
# 飞书时间戳是毫秒,需要转换为秒
local_data[local_field] = datetime.fromtimestamp(value / 1000) local_data[local_field] = datetime.fromtimestamp(value / 1000)
else: else:
# 处理ISO格式时间字符串
local_data[local_field] = datetime.fromisoformat(value.replace('Z', '+00:00')) local_data[local_field] = datetime.fromisoformat(value.replace('Z', '+00:00'))
except Exception as e: except Exception as e:
logger.warning(f"时间字段转换失败: {e}, 使用当前时间") logger.warning(f"时间字段转换失败: {e}, 使用当前时间")
local_data[local_field] = datetime.now() local_data[local_field] = datetime.now()
# 生成标题 - 使用TR Number和问题类型 # 生成标题
tr_number = feishu_fields.get("TR Number", "") tr_number = feishu_fields.get("TR Number", "")
problem_type = feishu_fields.get("Type of problem", "") problem_type = feishu_fields.get("Type of problem", "")
if tr_number and problem_type: if tr_number and problem_type:
@@ -504,7 +448,7 @@ class WorkOrderSyncService:
if "priority" not in local_data: if "priority" not in local_data:
local_data["priority"] = WorkOrderPriority.MEDIUM local_data["priority"] = WorkOrderPriority.MEDIUM
if "category" not in local_data: if "category" not in local_data:
local_data["category"] = "Remote control" # 根据表格中最常见的问题类型 local_data["category"] = "Remote control"
return local_data return local_data
@@ -512,19 +456,15 @@ class WorkOrderSyncService:
"""将本地工单字段转换为飞书字段""" """将本地工单字段转换为飞书字段"""
feishu_fields = {} feishu_fields = {}
# 反向映射
reverse_mapping = {v: k for k, v in self.field_mapping.items()} reverse_mapping = {v: k for k, v in self.field_mapping.items()}
for local_field, feishu_field in reverse_mapping.items(): for local_field, feishu_field in reverse_mapping.items():
value = getattr(workorder, local_field, None) value = getattr(workorder, local_field, None)
if value is not None: if value is not None:
# 特殊字段处理
if local_field == "status": if local_field == "status":
# 反向状态映射
reverse_status = {v: k for k, v in self.status_mapping.items()} reverse_status = {v: k for k, v in self.status_mapping.items()}
value = reverse_status.get(value, str(value)) value = reverse_status.get(value, str(value))
elif local_field == "priority": elif local_field == "priority":
# 反向优先级映射
reverse_priority = {v: k for k, v in self.priority_mapping.items()} reverse_priority = {v: k for k, v in self.priority_mapping.items()}
value = reverse_priority.get(value, str(value)) value = reverse_priority.get(value, str(value))
elif local_field in ["created_at", "updated_at"] and isinstance(value, datetime): elif local_field in ["created_at", "updated_at"] and isinstance(value, datetime):

View File

@@ -39,8 +39,7 @@ class TSPAssistant:
self.token_monitor = TokenMonitor() self.token_monitor = TokenMonitor()
self.ai_success_monitor = AISuccessMonitor() self.ai_success_monitor = AISuccessMonitor()
self.system_optimizer = SystemOptimizer() self.system_optimizer = SystemOptimizer()
self.logger.info("TSP助手初始化完成")
def test_system(self) -> Dict[str, Any]: def test_system(self) -> Dict[str, Any]:
"""测试系统各个组件""" """测试系统各个组件"""

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -14,9 +14,10 @@ from src.web.error_handlers import handle_api_errors, create_error_response, cre
knowledge_bp = Blueprint('knowledge', __name__, url_prefix='/api/knowledge') knowledge_bp = Blueprint('knowledge', __name__, url_prefix='/api/knowledge')
def get_agent_assistant(): def get_agent_assistant():
"""获取Agent助手实例懒加载""" """获取Agent助手实例"""
global _agent_assistant global _agent_assistant
if '_agent_assistant' not in globals(): if '_agent_assistant' not in globals():
from src.agent_assistant import TSPAgentAssistant
_agent_assistant = TSPAgentAssistant() _agent_assistant = TSPAgentAssistant()
return _agent_assistant return _agent_assistant

View File

@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
服务管理器 服务管理器
统一管理各种服务的懒加载实例
""" """
from typing import Optional, Dict, Any from typing import Optional, Dict, Any
@@ -11,17 +10,14 @@ logger = logging.getLogger(__name__)
class ServiceManager: class ServiceManager:
"""服务管理器 - 统一管理各种服务的懒加载实例"""
def __init__(self): def __init__(self):
self._services: Dict[str, Any] = {} self._services: Dict[str, Any] = {}
def get_service(self, service_name: str, factory_func): def get_service(self, service_name: str, factory_func):
"""获取服务实例(懒加载)""" """获取服务实例"""
if service_name not in self._services: if service_name not in self._services:
try: try:
self._services[service_name] = factory_func() self._services[service_name] = factory_func()
logger.info(f"服务 {service_name} 已初始化")
except Exception as e: except Exception as e:
logger.error(f"初始化服务 {service_name} 失败: {e}") logger.error(f"初始化服务 {service_name} 失败: {e}")
raise raise
@@ -59,12 +55,11 @@ class ServiceManager:
"""清除指定服务实例""" """清除指定服务实例"""
if service_name in self._services: if service_name in self._services:
del self._services[service_name] del self._services[service_name]
logger.info(f"服务 {service_name} 已清除")
def clear_all_services(self): def clear_all_services(self):
"""清除所有服务实例""" """清除所有服务实例"""
self._services.clear() self._services.clear()
logger.info("所有服务实例已清除")
# 全局服务管理器实例 # 全局服务管理器实例

View File

@@ -259,10 +259,10 @@ class WebSocketServer:
): ):
await asyncio.Future() # 保持服务器运行 await asyncio.Future() # 保持服务器运行
def _process_request(self, path, request_headers): def _process_request(self, path, request):
"""处理HTTP请求支持CORS""" """处理HTTP请求支持CORS"""
# 检查是否是WebSocket升级请求 # 检查是否是WebSocket升级请求
if request_headers.get("Upgrade", "").lower() == "websocket": if request.headers.get("Upgrade", "").lower() == "websocket":
return None # 允许WebSocket连接 return None # 允许WebSocket连接
# 对于非WebSocket请求返回简单的HTML页面 # 对于非WebSocket请求返回简单的HTML页面

Some files were not shown because too many files have changed in this diff Show More