From da4736c3235a3f38939f6fc355cfba88e32b1ad2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=B5=B5=E6=9D=B0=20Jie=20Zhao=20=EF=BC=88=E9=9B=84?= =?UTF-8?q?=E7=8B=AE=E6=B1=BD=E8=BD=A6=E7=A7=91=E6=8A=80=EF=BC=89?= <00061074@chery.local> Date: Fri, 19 Sep 2025 19:32:42 +0100 Subject: [PATCH] =?UTF-8?q?feat:=20=E9=87=8D=E5=A4=A7=E5=8A=9F=E8=83=BD?= =?UTF-8?q?=E6=9B=B4=E6=96=B0=20v1.4.0=20-=20=E9=A3=9E=E4=B9=A6=E9=9B=86?= =?UTF-8?q?=E6=88=90=E3=80=81AI=E8=AF=AD=E4=B9=89=E7=9B=B8=E4=BC=BC?= =?UTF-8?q?=E5=BA=A6=E3=80=81=E5=89=8D=E7=AB=AF=E4=BC=98=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 主要更新内容: - 🚀 飞书多维表格集成,支持工单数据同步 - 🤖 AI建议与人工描述语义相似度计算 - 🎨 前端UI全面优化,现代化设计 - 📊 智能知识库入库策略(AI准确率<90%使用人工描述) - 🔧 代码重构,模块化架构优化 - 📚 完整文档整合和更新 - 🐛 修复配置导入和数据库字段问题 技术特性: - 使用sentence-transformers进行语义相似度计算 - 快速模式结合TF-IDF和语义方法 - 响应式设计,支持移动端 - 加载状态和动画效果 - 配置化AI准确率阈值 --- README.md | 18 + TSP智能助手完整文档.md | 524 ++++++++++++++++++ config/README.md | 141 ++++- config/ai_accuracy_config.py | 110 ++++ config/integrations_config.json | 8 +- database_init_report.json | 9 + init_database.py | 836 ++++++++++++++++++++--------- src/agent/agent_assistant_core.py | 254 +++++++++ src/agent/agent_message_handler.py | 243 +++++++++ src/agent/agent_sample_actions.py | 405 ++++++++++++++ src/agent_assistant.py | 66 +-- src/agent_assistant_new.py | 322 +++++++++++ src/config/config.py | 2 +- src/config/unified_config.py | 279 ++++++++++ src/core/models.py | 15 + src/integrations/workorder_sync.py | 41 +- src/utils/helpers.py | 26 +- src/utils/semantic_similarity.py | 256 +++++++++ src/web/app.py | 5 +- src/web/blueprints/README.md | 30 ++ src/web/blueprints/workorders.py | 115 +++- src/web/static/css/style.css | 462 +++++++++++++--- src/web/static/js/dashboard.js | 552 ++++++++++++++++++- src/web/templates/dashboard.html | 208 +++++++ src/web/templates/feishu_sync.html | 662 ----------------------- update_config.json | 54 -- version.json | 42 -- version.py | 199 ------- 新功能说明_v1.4.0.md | 243 +++++++++ 部署更新指南.md | 57 +- 30 files changed, 4778 insertions(+), 1406 deletions(-) create mode 100644 TSP智能助手完整文档.md create mode 100644 config/ai_accuracy_config.py create mode 100644 database_init_report.json create mode 100644 src/agent/agent_assistant_core.py create mode 100644 src/agent/agent_message_handler.py create mode 100644 src/agent/agent_sample_actions.py create mode 100644 src/agent_assistant_new.py create mode 100644 src/config/unified_config.py create mode 100644 src/utils/semantic_similarity.py delete mode 100644 src/web/templates/feishu_sync.html delete mode 100644 update_config.json delete mode 100644 version.json delete mode 100644 version.py create mode 100644 新功能说明_v1.4.0.md diff --git a/README.md b/README.md index 1b96437..95f7165 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ - **智能规划**: 基于目标驱动的任务规划和执行 - **自主学习**: 从用户反馈中持续优化响应质量 - **实时监控**: 主动监控系统状态和异常情况 +- **模块化重构**: 代码优化,降低运行风险,提升维护性 ### 💬 智能对话系统 - **实时通信**: WebSocket支持,毫秒级响应 @@ -32,6 +33,7 @@ - **版本控制**: 完整的版本管理和变更日志 - **热更新**: 支持前端文件热更新,无需重启服务 - **自动备份**: 更新前自动备份,支持一键回滚 +- **飞书集成**: 支持飞书多维表格数据同步和管理 ## 🏗️ 系统架构 @@ -85,6 +87,13 @@ - **端口配置**: Web服务和WebSocket端口管理 - **日志级别**: 灵活的日志级别控制 +### 7. 飞书集成 📱 +- **多维表格同步**: 自动同步飞书多维表格数据 +- **字段映射**: 智能映射飞书字段到本地数据库 +- **实时更新**: 支持增量同步和全量同步 +- **数据预览**: 同步前预览数据,确保准确性 +- **统一管理**: 飞书功能集成到主仪表板 + ## 🛠️ 技术栈 ### 后端技术 @@ -260,6 +269,7 @@ LOG_LEVEL=INFO ### 配置文件 - `config/llm_config.py`: LLM客户端配置 +- `config/integrations_config.json`: 飞书集成配置 - `update_config.json`: 更新管理器配置 - `version.json`: 版本信息配置 @@ -280,6 +290,14 @@ LOG_LEVEL=INFO ## 📝 更新日志 +### v1.4.0 (2025-09-19) +- ✅ 飞书集成功能:支持飞书多维表格数据同步 +- ✅ 页面功能合并:飞书同步页面合并到主仪表板 +- ✅ 数据库架构优化:扩展工单表字段,支持飞书数据 +- ✅ 代码重构优化:大文件拆分,降低运行风险 +- ✅ 字段映射完善:智能映射飞书字段到本地数据库 +- ✅ 数据库初始化改进:集成字段迁移到初始化流程 + ### v1.3.0 (2025-09-17) - ✅ 数据库架构优化:MySQL主数据库+SQLite备份系统 - ✅ 工单详情API修复:解决数据库会话管理问题 diff --git a/TSP智能助手完整文档.md b/TSP智能助手完整文档.md new file mode 100644 index 0000000..ae7eef9 --- /dev/null +++ b/TSP智能助手完整文档.md @@ -0,0 +1,524 @@ +# TSP智能助手完整文档 + +## 📋 目录 +- [项目概述](#项目概述) +- [系统架构](#系统架构) +- [核心功能](#核心功能) +- [技术栈](#技术栈) +- [安装部署](#安装部署) +- [配置说明](#配置说明) +- [使用指南](#使用指南) +- [API接口](#api接口) +- [数据库设计](#数据库设计) +- [开发指南](#开发指南) +- [故障排除](#故障排除) +- [更新日志](#更新日志) + +--- + +## 🚀 项目概述 + +TSP智能助手是一个基于大语言模型的智能客服系统,专为TSP(Telematics Service Provider)车辆服务提供商设计。系统集成了智能对话、工单管理、知识库、数据分析、飞书集成等核心功能。 + +### 核心特性 +- **智能Agent架构**: 多工具集成,智能规划,自主学习 +- **实时对话系统**: WebSocket支持,上下文理解,VIN识别 +- **数据驱动分析**: 真实数据统计,可视化展示,系统监控 +- **企业级管理**: 多环境部署,版本控制,热更新,自动备份 +- **飞书集成**: 支持飞书多维表格数据同步和管理 +- **AI准确率优化**: 智能判断AI建议质量,优先使用高质量内容入库 + +--- + +## 🏗️ 系统架构 + +### 整体架构 +``` +┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ +│ 前端界面 │ │ 后端服务 │ │ 数据存储 │ +│ │ │ │ │ │ +│ • 仪表板 │◄──►│ • Flask API │◄──►│ • MySQL DB │ +│ • 智能对话 │ │ • WebSocket │ │ • 知识库 │ +│ • Agent管理 │ │ • Agent核心 │ │ • 工单系统 │ +│ • 数据分析 │ │ • LLM集成 │ │ • 车辆数据 │ +│ • 飞书同步 │ │ • 备份系统 │ │ • SQLite备份 │ +└─────────────────┘ └─────────────────┘ └─────────────────┘ +``` + +### 模块化设计 +- **Web层**: Flask蓝图架构,模块化API设计 +- **业务层**: Agent核心、对话管理、工单处理 +- **数据层**: MySQL主库 + SQLite备份,ORM映射 +- **集成层**: 飞书API、LLM服务、监控系统 + +--- + +## 🎯 核心功能 + +### 1. 智能对话系统 💬 +- **多轮对话**: 支持上下文关联的连续对话 +- **VIN识别**: 自动识别车辆VIN并获取实时数据 +- **知识库检索**: 基于TF-IDF和余弦相似度的智能检索 +- **工单创建**: 对话中直接创建和关联工单 + +### 2. Agent管理系统 🤖 +- **工具管理**: 10+内置工具,支持自定义工具注册 +- **执行监控**: 实时监控Agent任务执行状态 +- **性能统计**: 工具使用频率和成功率分析 +- **智能规划**: 基于目标的任务分解和执行 + +### 3. 工单管理系统 📋 +- **AI建议生成**: 基于知识库生成工单处理建议 +- **人工审核**: 支持人工输入和AI建议对比 +- **语义相似度**: 使用sentence-transformers进行准确度评估 +- **智能入库**: AI准确率<90%时优先使用人工描述入库 +- **知识库更新**: 高相似度建议自动入库 + +### 4. 知识库管理 📚 +- **多格式支持**: TXT、PDF、DOC、DOCX、MD文件 +- **智能提取**: 自动从文档中提取Q&A对 +- **向量化检索**: TF-IDF + 余弦相似度搜索 +- **质量验证**: 支持知识条目验证和置信度设置 + +### 5. 数据分析系统 📊 +- **实时趋势**: 基于真实数据的性能趋势分析 +- **多维度统计**: 工单、预警、满意度等关键指标 +- **系统健康**: CPU、内存、响应时间监控 +- **可视化展示**: 丰富的图表和仪表板 + +### 6. 飞书集成系统 📱 +- **多维表格同步**: 自动同步飞书多维表格数据 +- **字段映射**: 智能映射飞书字段到本地数据库 +- **实时更新**: 支持增量同步和全量同步 +- **数据预览**: 同步前预览数据,确保准确性 + +### 7. 系统设置管理 ⚙️ +- **API管理**: 支持多种LLM提供商配置 +- **模型参数**: 温度、最大令牌数等参数调节 +- **端口配置**: Web服务和WebSocket端口管理 +- **日志级别**: 灵活的日志级别控制 + +--- + +## 🛠️ 技术栈 + +### 后端技术 +- **Python 3.8+**: 核心开发语言 +- **Flask**: Web框架和API服务 +- **SQLAlchemy**: ORM数据库操作 +- **WebSocket**: 实时通信支持 +- **psutil**: 系统资源监控 + +### 前端技术 +- **Bootstrap 5**: UI框架 +- **Chart.js**: 数据可视化 +- **JavaScript ES6+**: 前端逻辑 +- **WebSocket**: 实时通信客户端 + +### AI/ML技术 +- **大语言模型**: 支持OpenAI、通义千问等 +- **sentence-transformers**: 语义相似度计算 +- **TF-IDF**: 文本向量化 +- **余弦相似度**: 语义相似度计算 +- **Agent框架**: 智能任务规划 + +### 部署运维 +- **Docker**: 容器化部署 +- **Nginx**: 反向代理和静态文件服务 +- **Systemd**: 服务管理 +- **Git**: 版本控制 + +--- + +## 🚀 安装部署 + +### 环境要求 +- Python 3.8+ +- Node.js 16+ (可选,用于前端构建) +- Git +- MySQL 8.0+ + +### 安装步骤 + +1. **克隆项目** +```bash +git clone http://jeason.online:3000/zhaojie/assist.git +cd assist +``` + +2. **安装依赖** +```bash +pip install -r requirements.txt +``` + +3. **配置数据库** +```bash +# 创建MySQL数据库 +mysql -u root -p +CREATE DATABASE tsp_assistant CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; +``` + +4. **初始化数据库** +```bash +python init_database.py +``` + +5. **启动服务** +```bash +python start_dashboard.py +``` + +6. **访问系统** +- 打开浏览器访问: `http://localhost:5000` +- 默认端口: 5000 (可在系统设置中修改) + +### Windows快速启动 +```cmd +# 双击运行 +快速启动.bat +``` + +--- + +## ⚙️ 配置说明 + +### 环境变量 +```bash +# 数据库配置 +DATABASE_URL=mysql+pymysql://user:password@host:port/database + +# LLM配置 +LLM_PROVIDER=openai +LLM_API_KEY=your_api_key +LLM_MODEL=gpt-3.5-turbo + +# 服务配置 +SERVER_PORT=5000 +WEBSOCKET_PORT=8765 +LOG_LEVEL=INFO +``` + +### 配置文件结构 +``` +config/ +├── llm_config.py # LLM客户端配置 +├── integrations_config.json # 飞书集成配置 +├── ai_accuracy_config.py # AI准确率配置 +└── README.md # 配置说明文档 +``` + +### 飞书集成配置 +```json +{ + "feishu": { + "app_id": "cli_a8b50ec0eed1500d", + "app_secret": "ccxkE7ZCFQZcwkkM1rLy0ccZRXYsT2xK", + "app_token": "XXnEbiCmEaMblSs6FDJcFCqsnIg", + "table_id": "tblnl3vJPpgMTSiP", + "status": "active" + }, + "system": { + "sync_limit": 10, + "ai_suggestions_enabled": true, + "auto_sync_interval": 0 + } +} +``` + +### AI准确率配置 +```python +# 默认配置 +auto_approve_threshold = 0.95 # 自动审批阈值 +use_human_resolution_threshold = 0.90 # 使用人工描述阈值 +manual_review_threshold = 0.80 # 人工审核阈值 +``` + +--- + +## 📖 使用指南 + +### 基础操作 + +#### 1. 智能对话 +1. 在"智能对话"页面输入问题 +2. 系统自动检索知识库并生成回答 +3. 支持VIN码识别和车辆数据查询 + +#### 2. 工单管理 +1. 创建工单并获取AI建议 +2. 输入人工处理描述 +3. 系统自动计算语义相似度 +4. 根据相似度决定入库策略 + +#### 3. 知识库维护 +1. 手动添加Q&A对 +2. 上传文档自动提取知识 +3. 设置置信度和验证状态 + +#### 4. 飞书数据同步 +1. 配置飞书应用凭证 +2. 在主仪表板"飞书同步"标签页 +3. 测试连接并执行数据同步 + +### 高级功能 + +#### 1. Agent工具管理 +- 查看工具使用统计 +- 注册自定义工具 +- 监控执行历史 + +#### 2. 数据分析 +- 多维度数据统计 +- 自定义时间范围 +- 导出分析报告 + +#### 3. 系统配置 +- API和模型参数配置 +- 端口和日志级别设置 +- 环境变量管理 + +--- + +## 🔌 API接口 + +### 工单管理API +```http +POST /api/workorders/{id}/ai-suggestion +POST /api/workorders/{id}/human-resolution +POST /api/workorders/{id}/approve-to-knowledge +GET /api/workorders +POST /api/workorders +PUT /api/workorders/{id} +DELETE /api/workorders/{id} +``` + +### 知识库API +```http +GET /api/knowledge +POST /api/knowledge +PUT /api/knowledge/{id} +DELETE /api/knowledge/{id} +POST /api/knowledge/search +POST /api/knowledge/upload +``` + +### 对话API +```http +POST /api/chat/session +GET /api/chat/session/{id} +DELETE /api/chat/session/{id} +POST /api/chat/message +``` + +### Agent管理API +```http +GET /api/agent/status +POST /api/agent/tools/execute +POST /api/agent/tools/register +GET /api/agent/tools/stats +``` + +--- + +## 🗄️ 数据库设计 + +### 核心表结构 + +#### work_orders (工单表) +```sql +CREATE TABLE work_orders ( + id INT PRIMARY KEY AUTO_INCREMENT, + order_id VARCHAR(50) UNIQUE NOT NULL, + title VARCHAR(200) NOT NULL, + description TEXT NOT NULL, + category VARCHAR(100) NOT NULL, + priority VARCHAR(20) NOT NULL, + status VARCHAR(20) NOT NULL, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + resolution TEXT, + satisfaction_score FLOAT, + + -- 飞书集成字段 + feishu_record_id VARCHAR(100), + source VARCHAR(50), + module VARCHAR(100), + created_by VARCHAR(100), + wilfulness VARCHAR(100), + date_of_close DATETIME, + vehicle_type VARCHAR(100), + vin_sim VARCHAR(50), + app_remote_control_version VARCHAR(100), + hmi_sw VARCHAR(100), + parent_record VARCHAR(100), + has_updated_same_day VARCHAR(50), + operating_time VARCHAR(100) +); +``` + +#### work_order_suggestions (工单建议表) +```sql +CREATE TABLE work_order_suggestions ( + id INT PRIMARY KEY AUTO_INCREMENT, + work_order_id INT NOT NULL, + ai_suggestion TEXT, + human_resolution TEXT, + ai_similarity FLOAT, + approved BOOLEAN DEFAULT FALSE, + use_human_resolution BOOLEAN DEFAULT FALSE, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + FOREIGN KEY (work_order_id) REFERENCES work_orders(id) +); +``` + +#### knowledge_entries (知识库表) +```sql +CREATE TABLE knowledge_entries ( + id INT PRIMARY KEY AUTO_INCREMENT, + question TEXT NOT NULL, + answer TEXT NOT NULL, + category VARCHAR(100), + confidence_score FLOAT DEFAULT 0.5, + usage_count INT DEFAULT 0, + is_active BOOLEAN DEFAULT TRUE, + is_verified BOOLEAN DEFAULT FALSE, + verified_by VARCHAR(100), + verified_at DATETIME, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +); +``` + +--- + +## 👨‍💻 开发指南 + +### 项目结构 +``` +tsp-assistant/ +├── src/ # 源代码 +│ ├── agent/ # Agent核心模块 +│ ├── analytics/ # 数据分析模块 +│ ├── config/ # 配置模块 +│ ├── core/ # 核心模块 +│ ├── dialogue/ # 对话模块 +│ ├── integrations/ # 集成模块 +│ ├── knowledge_base/ # 知识库模块 +│ ├── utils/ # 工具模块 +│ ├── vehicle/ # 车辆数据模块 +│ └── web/ # Web应用模块 +├── config/ # 配置文件 +├── scripts/ # 脚本文件 +├── uploads/ # 上传文件 +├── requirements.txt # 依赖文件 +├── init_database.py # 数据库初始化 +└── start_dashboard.py # 启动脚本 +``` + +### 代码规范 +- **Python**: 遵循PEP 8规范 +- **JavaScript**: 使用ES6+语法 +- **提交信息**: 使用约定式提交格式 +- **文档**: 新功能需要添加相应的文档 + +### 开发流程 +1. Fork项目到个人仓库 +2. 创建功能分支: `git checkout -b feature/new-feature` +3. 提交更改: `git commit -m "Add new feature"` +4. 推送分支: `git push origin feature/new-feature` +5. 创建Pull Request + +--- + +## 🚨 故障排除 + +### 常见问题 + +#### 1. 数据库连接失败 +```bash +# 检查数据库服务状态 +systemctl status mysql + +# 检查连接配置 +python -c "from src.core.database import db_manager; print(db_manager.test_connection())" +``` + +#### 2. 飞书集成问题 +- 检查飞书应用权限配置 +- 验证app_token和table_id是否正确 +- 确认网络连接和API访问权限 + +#### 3. AI建议生成失败 +- 检查LLM API配置 +- 验证知识库数据完整性 +- 查看应用日志 + +#### 4. 数据库字段缺失 +```bash +# 运行数据库迁移 +python init_database.py + +# 手动添加字段 +mysql -u root -p tsp_assistant +ALTER TABLE work_order_suggestions ADD COLUMN use_human_resolution BOOLEAN DEFAULT FALSE; +``` + +### 日志位置 +- **应用日志**: `logs/tsp_assistant.log` +- **访问日志**: Nginx访问日志 +- **错误追踪**: 详细的错误堆栈信息 + +### 性能优化 +- **数据库索引**: 为常用查询字段添加索引 +- **缓存策略**: 使用Redis缓存热点数据 +- **异步处理**: 耗时操作使用异步处理 +- **连接池**: 配置数据库连接池 + +--- + +## 📝 更新日志 + +### v1.4.0 (2025-09-19) +- ✅ 飞书集成功能:支持飞书多维表格数据同步 +- ✅ 页面功能合并:飞书同步页面合并到主仪表板 +- ✅ 数据库架构优化:扩展工单表字段,支持飞书数据 +- ✅ 代码重构优化:大文件拆分,降低运行风险 +- ✅ 字段映射完善:智能映射飞书字段到本地数据库 +- ✅ 数据库初始化改进:集成字段迁移到初始化流程 +- ✅ AI准确率优化:AI准确率<90%时优先使用人工描述入库 +- ✅ 语义相似度计算:使用sentence-transformers提升准确度 + +### v1.3.0 (2025-09-17) +- ✅ 数据库架构优化:MySQL主数据库+SQLite备份系统 +- ✅ 工单详情API修复:解决数据库会话管理问题 +- ✅ 备份管理系统:自动备份MySQL数据到SQLite +- ✅ 数据库状态监控:实时监控MySQL和SQLite状态 +- ✅ 备份管理API:支持数据备份和恢复操作 + +### v1.2.0 (2025-09-16) +- ✅ 系统设置扩展:API管理、模型参数配置、端口管理 +- ✅ 真实数据分析:修复性能趋势图表显示问题 +- ✅ 工单AI建议功能:智能生成处理建议 +- ✅ 知识库搜索优化:提升检索准确率 +- ✅ Agent管理改进:工具使用统计和自定义工具 + +--- + +## 📄 许可证 + +本项目采用 MIT 许可证 - 查看 [LICENSE](LICENSE) 文件了解详情 + +## 📞 支持与联系 + +- **项目地址**: http://jeason.online:3000/zhaojie/assist +- **问题反馈**: 请在Issues中提交问题 +- **功能建议**: 欢迎提交Feature Request + +## 🙏 致谢 + +感谢所有为项目做出贡献的开发者和用户! + +--- + +**TSP智能助手** - 让车辆服务更智能,让客户体验更美好! 🚗✨ diff --git a/config/README.md b/config/README.md index b0cc921..778ed0b 100644 --- a/config/README.md +++ b/config/README.md @@ -1,16 +1,22 @@ -# LLM配置说明 +# TSP智能助手配置说明 -## 千问模型配置 +## 📋 配置文件概述 + +本目录包含TSP智能助手的核心配置文件,包括LLM配置、集成配置等。 + +## 🤖 LLM配置 + +### 千问模型配置 本项目默认使用阿里云千问模型。要使用千问模型,请按以下步骤配置: -### 1. 获取API密钥 +#### 1. 获取API密钥 1. 访问 [阿里云百炼平台](https://bailian.console.aliyun.com/) 2. 注册并登录账号 3. 创建应用并获取API密钥 -### 2. 配置API密钥 +#### 2. 配置API密钥 编辑 `config/llm_config.py` 文件,将 `api_key` 替换为您的实际API密钥: @@ -25,13 +31,13 @@ QWEN_CONFIG = LLMConfig( ) ``` -### 3. 可用的千问模型 +#### 3. 可用的千问模型 - `qwen-turbo`: 快速响应,适合一般对话 - `qwen-plus`: 平衡性能和成本 - `qwen-max`: 最强性能,适合复杂任务 -### 4. 环境变量配置(可选) +#### 4. 环境变量配置(可选) 您也可以使用环境变量来配置: @@ -40,7 +46,7 @@ export QWEN_API_KEY="sk-your-actual-qwen-api-key" export QWEN_MODEL="qwen-turbo" ``` -### 5. 其他模型支持 +#### 5. 其他模型支持 项目也支持其他LLM提供商: @@ -48,6 +54,125 @@ export QWEN_MODEL="qwen-turbo" - **Anthropic**: Claude系列 - **本地模型**: Ollama等 -### 6. 配置验证 +#### 6. 配置验证 启动系统后,可以在Agent管理页面查看LLM使用统计,确认配置是否正确。 + +## 📱 飞书集成配置 + +### 配置文件说明 + +`integrations_config.json` 文件包含飞书集成的所有配置信息: + +```json +{ + "feishu": { + "app_id": "cli_a8b50ec0eed1500d", + "app_secret": "ccxkE7ZCFQZcwkkM1rLy0ccZRXYsT2xK", + "app_token": "XXnEbiCmEaMblSs6FDJcFCqsnIg", + "table_id": "tblnl3vJPpgMTSiP", + "last_updated": "2025-09-19T18:27:40.579958", + "status": "active" + }, + "system": { + "sync_limit": 10, + "ai_suggestions_enabled": true, + "auto_sync_interval": 0, + "last_sync_time": null + } +} +``` + +### 配置参数说明 + +#### 飞书应用配置 +- `app_id`: 飞书应用ID +- `app_secret`: 飞书应用密钥 +- `app_token`: 飞书多维表格应用Token +- `table_id`: 飞书多维表格ID +- `last_updated`: 最后更新时间 +- `status`: 集成状态(active/inactive) + +#### 系统配置 +- `sync_limit`: 同步记录数量限制 +- `ai_suggestions_enabled`: 是否启用AI建议 +- `auto_sync_interval`: 自动同步间隔(分钟) +- `last_sync_time`: 最后同步时间 + +### 获取飞书配置 + +1. **获取应用凭证** + - 访问 [飞书开放平台](https://open.feishu.cn/) + - 创建企业自建应用 + - 获取 `app_id` 和 `app_secret` + +2. **获取表格信息** + - 打开飞书多维表格 + - 从URL中提取 `app_token` 和 `table_id` + - 例如:`https://my-ichery.feishu.cn/base/XXnEbiCmEaMblSs6FDJcFCqsnIg?table=tblnl3vJPpgMTSiP` + - `app_token`: `XXnEbiCmEaMblSs6FDJcFCqsnIg` + - `table_id`: `tblnl3vJPpgMTSiP` + +3. **配置权限** + - 在飞书开放平台中配置应用权限 + - 确保应用有读取多维表格的权限 + +### 字段映射配置 + +系统会自动映射以下飞书字段到本地数据库: + +| 飞书字段 | 本地字段 | 类型 | 说明 | +|---------|---------|------|------| +| TR Number | order_id | String | 工单编号 | +| TR Description | description | Text | 工单描述 | +| Type of problem | category | String | 问题类型 | +| TR Level | priority | String | 优先级 | +| TR Status | status | String | 工单状态 | +| Source | source | String | 来源 | +| Created by | created_by | String | 创建人 | +| Module(模块) | module | String | 模块 | +| Wilfulness(责任人) | wilfulness | String | 责任人 | +| Date of close TR | date_of_close | DateTime | 关闭日期 | +| Vehicle Type01 | vehicle_type | String | 车型 | +| VIN\|sim | vin_sim | String | 车架号/SIM | +| App remote control version | app_remote_control_version | String | 应用远程控制版本 | +| HMI SW | hmi_sw | String | HMI软件版本 | +| 父记录 | parent_record | String | 父记录 | +| Has it been updated on the same day | has_updated_same_day | String | 是否同日更新 | +| Operating time | operating_time | String | 操作时间 | + +## 🔧 配置管理 + +### 配置文件位置 +- `llm_config.py`: LLM客户端配置 +- `integrations_config.json`: 集成服务配置 +- `integrations_config copy.json`: 配置备份文件 + +### 配置更新 +- 修改配置文件后需要重启服务 +- 建议在修改前备份配置文件 +- 可以通过Web界面进行部分配置的在线修改 + +### 环境变量支持 +系统支持通过环境变量覆盖配置文件设置: + +```bash +# LLM配置 +export LLM_PROVIDER="openai" +export LLM_API_KEY="your-api-key" +export LLM_MODEL="gpt-3.5-turbo" + +# 飞书配置 +export FEISHU_APP_ID="your-app-id" +export FEISHU_APP_SECRET="your-app-secret" +export FEISHU_APP_TOKEN="your-app-token" +export FEISHU_TABLE_ID="your-table-id" +``` + +## 🚨 注意事项 + +1. **安全性**: 配置文件包含敏感信息,请勿提交到版本控制系统 +2. **备份**: 修改配置前请备份原文件 +3. **权限**: 确保飞书应用有足够的权限访问多维表格 +4. **测试**: 配置完成后建议先进行测试同步 +5. **监控**: 定期检查同步状态和错误日志 diff --git a/config/ai_accuracy_config.py b/config/ai_accuracy_config.py new file mode 100644 index 0000000..b239cbf --- /dev/null +++ b/config/ai_accuracy_config.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +AI准确率配置 +管理AI建议的准确率阈值和相关配置 +""" + +from dataclasses import dataclass +from typing import Dict, Any + +@dataclass +class AIAccuracyConfig: + """AI准确率配置类""" + + # 相似度阈值配置 + auto_approve_threshold: float = 0.95 # 自动审批阈值(≥95%) + use_human_resolution_threshold: float = 0.90 # 使用人工描述阈值(<90%) + manual_review_threshold: float = 0.80 # 人工审核阈值(≥80%) + + # 置信度配置 + ai_suggestion_confidence: float = 0.95 # AI建议默认置信度 + human_resolution_confidence: float = 0.90 # 人工描述置信度 + + # 入库策略配置 + prefer_human_when_low_accuracy: bool = True # 当AI准确率低时优先使用人工描述 + enable_auto_approval: bool = True # 是否启用自动审批 + enable_human_fallback: bool = True # 是否启用人工描述回退 + + def get_threshold_explanation(self, similarity: float) -> str: + """获取相似度阈值的解释""" + if similarity >= self.auto_approve_threshold: + return f"相似度≥{self.auto_approve_threshold*100:.0f}%,自动审批使用AI建议" + elif similarity >= self.manual_review_threshold: + return f"相似度≥{self.manual_review_threshold*100:.0f}%,建议人工审核" + elif similarity >= self.use_human_resolution_threshold: + return f"相似度<{self.use_human_resolution_threshold*100:.0f}%,建议使用人工描述" + else: + return f"相似度<{self.use_human_resolution_threshold*100:.0f}%,优先使用人工描述" + + def should_use_human_resolution(self, similarity: float) -> bool: + """判断是否应该使用人工描述""" + return similarity < self.use_human_resolution_threshold + + def should_auto_approve(self, similarity: float) -> bool: + """判断是否应该自动审批""" + return similarity >= self.auto_approve_threshold and self.enable_auto_approval + + def get_confidence_score(self, similarity: float, use_human: bool = False) -> float: + """获取置信度分数""" + if use_human: + return self.human_resolution_confidence + else: + return max(similarity, self.ai_suggestion_confidence) + + def to_dict(self) -> Dict[str, Any]: + """转换为字典格式""" + return { + "auto_approve_threshold": self.auto_approve_threshold, + "use_human_resolution_threshold": self.use_human_resolution_threshold, + "manual_review_threshold": self.manual_review_threshold, + "ai_suggestion_confidence": self.ai_suggestion_confidence, + "human_resolution_confidence": self.human_resolution_confidence, + "prefer_human_when_low_accuracy": self.prefer_human_when_low_accuracy, + "enable_auto_approval": self.enable_auto_approval, + "enable_human_fallback": self.enable_human_fallback + } + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> 'AIAccuracyConfig': + """从字典创建配置""" + return cls(**data) + +# 默认配置实例 +DEFAULT_CONFIG = AIAccuracyConfig() + +# 配置预设 +PRESETS = { + "conservative": AIAccuracyConfig( + auto_approve_threshold=0.98, + use_human_resolution_threshold=0.85, + manual_review_threshold=0.90, + human_resolution_confidence=0.95 + ), + "balanced": AIAccuracyConfig( + auto_approve_threshold=0.95, + use_human_resolution_threshold=0.90, + manual_review_threshold=0.80, + human_resolution_confidence=0.90 + ), + "aggressive": AIAccuracyConfig( + auto_approve_threshold=0.90, + use_human_resolution_threshold=0.80, + manual_review_threshold=0.70, + human_resolution_confidence=0.85 + ) +} + +def get_accuracy_config(preset: str = "balanced") -> AIAccuracyConfig: + """获取准确率配置""" + return PRESETS.get(preset, DEFAULT_CONFIG) + +def update_accuracy_config(config: AIAccuracyConfig) -> bool: + """更新准确率配置(可以保存到文件或数据库)""" + try: + # 这里可以实现配置的持久化存储 + # 例如保存到配置文件或数据库 + return True + except Exception: + return False diff --git a/config/integrations_config.json b/config/integrations_config.json index ede371c..57ada2b 100644 --- a/config/integrations_config.json +++ b/config/integrations_config.json @@ -1,11 +1,11 @@ { "feishu": { - "app_id": "tblnl3vJPpgMTSiP", + "app_id": "cli_a8b50ec0eed1500d", "app_secret": "ccxkE7ZCFQZcwkkM1rLy0ccZRXYsT2xK", - "app_token": "XXnEbiCmEaMblSs6FDJcFCqsnlg", + "app_token": "XXnEbiCmEaMblSs6FDJcFCqsnIg", "table_id": "tblnl3vJPpgMTSiP", - "last_updated": null, - "status": "inactive" + "last_updated": "2025-09-19T18:40:55.291113", + "status": "active" }, "system": { "sync_limit": 10, diff --git a/database_init_report.json b/database_init_report.json new file mode 100644 index 0000000..76689cd --- /dev/null +++ b/database_init_report.json @@ -0,0 +1,9 @@ +{ + "init_time": "2025-09-19T18:57:01.015501", + "database_version": "MySQL 8.4.6", + "database_url": "mysql+pymysql://tsp_assistant:***@43.134.68.207/tsp_assistant?charset=utf8mb4", + "migrations_applied": 0, + "tables_created": 15, + "initial_data_inserted": true, + "verification_passed": true +} \ No newline at end of file diff --git a/init_database.py b/init_database.py index 1a41485..56fdea3 100644 --- a/init_database.py +++ b/init_database.py @@ -1,14 +1,17 @@ - # -*- coding: utf-8 -*- """ -TSP助手数据库初始化脚本 - 包含所有数据库操作 +TSP助手数据库初始化脚本 - 重构版本 +结合项目新特性,提供更高效的数据库初始化和管理功能 """ import sys import os import logging -from sqlalchemy import text -from datetime import datetime +import json +from typing import Dict, List, Optional, Any +from sqlalchemy import text, inspect +from datetime import datetime, timedelta +from pathlib import Path # 添加项目根目录到Python路径 sys.path.append(os.path.dirname(os.path.abspath(__file__))) @@ -18,181 +21,205 @@ from src.utils.helpers import setup_logging from src.core.database import db_manager from src.core.models import Base, WorkOrder, KnowledgeEntry, Conversation, Analytics, Alert, VehicleData -def init_database(): - """初始化数据库 - 包含所有数据库操作""" - print("=" * 60) - print("🚀 TSP智能助手数据库初始化") - print("=" * 60) +class DatabaseInitializer: + """数据库初始化器 - 重构版本""" + + def __init__(self): + self.logger = logging.getLogger(__name__) + self.db_url = str(db_manager.engine.url) + self.is_mysql = 'mysql' in self.db_url + self.is_sqlite = 'sqlite' in self.db_url + self.is_postgresql = 'postgresql' in self.db_url + + # 数据库版本信息 + self.db_version = self._get_database_version() + + # 迁移历史记录 + self.migration_history = [] + + def _get_database_version(self) -> str: + """获取数据库版本信息""" + try: + with db_manager.get_session() as session: + if self.is_mysql: + result = session.execute(text("SELECT VERSION()")).fetchone() + return f"MySQL {result[0]}" + elif self.is_postgresql: + result = session.execute(text("SELECT version()")).fetchone() + return f"PostgreSQL {result[0].split()[1]}" + else: # SQLite + result = session.execute(text("SELECT sqlite_version()")).fetchone() + return f"SQLite {result[0]}" + except Exception as e: + self.logger.warning(f"无法获取数据库版本: {e}") + return "Unknown" + + def initialize_database(self, force_reset: bool = False) -> bool: + """初始化数据库 - 主入口函数""" + print("=" * 80) + print("🚀 TSP智能助手数据库初始化系统") + print("=" * 80) + print(f"📊 数据库类型: {self.db_version}") + print(f"🔗 连接地址: {self.db_url}") + print(f"⏰ 初始化时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") + print("=" * 80) try: # 设置日志 setup_logging(Config.LOG_LEVEL, Config.LOG_FILE) - logger = logging.getLogger(__name__) # 测试数据库连接 - if not db_manager.test_connection(): - print("❌ 数据库连接失败") + if not self._test_connection(): return False - print("✅ 数据库连接成功") - - # 创建所有表 - print("\n📋 创建数据库表...") - Base.metadata.create_all(bind=db_manager.engine) - print("✅ 数据库表创建成功") - - # 执行数据库迁移(添加新字段和表) - print("\n🔄 执行数据库迁移...") - migrate_database() + # 检查是否需要重置数据库 + if force_reset: + if not self._reset_database(): + return False + + # 创建数据库表 + if not self._create_tables(): + return False + + # 执行数据库迁移 + if not self._run_migrations(): + return False # 插入初始数据 - print("\n📊 插入初始数据...") - insert_initial_data() - - # 添加示例车辆数据 - print("\n🚗 添加示例车辆数据...") - add_sample_vehicle_data() - - # 验证知识库条目 - print("\n🔍 验证知识库条目...") - verify_existing_knowledge() - - print("\n✅ 数据库初始化完成") + if not self._insert_initial_data(): + return False + + # 验证数据库完整性 + if not self._verify_database_integrity(): + return False + + # 生成初始化报告 + self._generate_init_report() + + print("\n" + "=" * 80) + print("🎉 数据库初始化完成!") + print("=" * 80) return True except Exception as e: - print(f"❌ 数据库初始化失败: {e}") + print(f"\n❌ 数据库初始化失败: {e}") + self.logger.error(f"数据库初始化失败: {e}", exc_info=True) + return False + + def _test_connection(self) -> bool: + """测试数据库连接""" + print("\n🔌 测试数据库连接...") + try: + if db_manager.test_connection(): + print("✅ 数据库连接成功") + return True + else: + print("❌ 数据库连接失败") + return False + except Exception as e: + print(f"❌ 数据库连接测试异常: {e}") + return False + + def _reset_database(self) -> bool: + """重置数据库(谨慎使用)""" + print("\n⚠️ 重置数据库...") + try: + # 删除所有表 + Base.metadata.drop_all(bind=db_manager.engine) + print("✅ 数据库表删除成功") + + # 重新创建所有表 + Base.metadata.create_all(bind=db_manager.engine) + print("✅ 数据库表重新创建成功") + + return True + except Exception as e: + print(f"❌ 数据库重置失败: {e}") return False -def migrate_database(): - """执行数据库迁移 - 添加新字段和表""" - try: - with db_manager.get_session() as session: - # 检查数据库类型 - db_url = db_manager.engine.url - is_mysql = 'mysql' in str(db_url) - is_sqlite = 'sqlite' in str(db_url) + def _create_tables(self) -> bool: + """创建数据库表""" + print("\n📋 创建数据库表...") + try: + # 获取现有表信息 + inspector = inspect(db_manager.engine) + existing_tables = inspector.get_table_names() - print(" 📝 检查知识库验证字段与预警字段...") + # 创建所有表 + Base.metadata.create_all(bind=db_manager.engine) - # 检查is_verified字段是否存在 - if is_mysql: - result = session.execute(text(""" - SELECT COUNT(*) as count - FROM INFORMATION_SCHEMA.COLUMNS - WHERE TABLE_SCHEMA = DATABASE() - AND TABLE_NAME = 'knowledge_entries' - AND COLUMN_NAME = 'is_verified' - """)).fetchone() - else: # SQLite - result = session.execute(text(""" - SELECT COUNT(*) as count - FROM pragma_table_info('knowledge_entries') - WHERE name = 'is_verified' - """)).fetchone() + # 检查新创建的表 + new_tables = inspector.get_table_names() + created_tables = set(new_tables) - set(existing_tables) - if result.count == 0: - print(" ➕ 添加is_verified字段...") - if is_mysql: - session.execute(text("ALTER TABLE knowledge_entries ADD COLUMN is_verified BOOLEAN DEFAULT FALSE")) + if created_tables: + print(f"✅ 新创建表: {', '.join(created_tables)}") else: - session.execute(text("ALTER TABLE knowledge_entries ADD COLUMN is_verified BOOLEAN DEFAULT FALSE")) - print(" ✅ is_verified字段添加成功") - else: - print(" ✅ is_verified字段已存在") + print("✅ 所有表已存在") - # 检查verified_by字段是否存在 - if is_mysql: - result = session.execute(text(""" - SELECT COUNT(*) as count - FROM INFORMATION_SCHEMA.COLUMNS - WHERE TABLE_SCHEMA = DATABASE() - AND TABLE_NAME = 'knowledge_entries' - AND COLUMN_NAME = 'verified_by' - """)).fetchone() - else: # SQLite - result = session.execute(text(""" - SELECT COUNT(*) as count - FROM pragma_table_info('knowledge_entries') - WHERE name = 'verified_by' - """)).fetchone() - - if result.count == 0: - print(" ➕ 添加verified_by字段...") - if is_mysql: - session.execute(text("ALTER TABLE knowledge_entries ADD COLUMN verified_by VARCHAR(100)")) - else: - session.execute(text("ALTER TABLE knowledge_entries ADD COLUMN verified_by VARCHAR(100)")) - print(" ✅ verified_by字段添加成功") - else: - print(" ✅ verified_by字段已存在") - - # 检查verified_at字段是否存在 - if is_mysql: - result = session.execute(text(""" - SELECT COUNT(*) as count - FROM INFORMATION_SCHEMA.COLUMNS - WHERE TABLE_SCHEMA = DATABASE() - AND TABLE_NAME = 'knowledge_entries' - AND COLUMN_NAME = 'verified_at' - """)).fetchone() - else: # SQLite - result = session.execute(text(""" - SELECT COUNT(*) as count - FROM pragma_table_info('knowledge_entries') - WHERE name = 'verified_at' - """)).fetchone() - - if result.count == 0: - print(" ➕ 添加verified_at字段...") - if is_mysql: - session.execute(text("ALTER TABLE knowledge_entries ADD COLUMN verified_at DATETIME")) - else: - session.execute(text("ALTER TABLE knowledge_entries ADD COLUMN verified_at DATETIME")) - print(" ✅ verified_at字段添加成功") - else: - print(" ✅ verified_at字段已存在") - - # 检查alerts.severity字段是否存在 - if is_mysql: - result = session.execute(text(""" - SELECT COUNT(*) as count - FROM INFORMATION_SCHEMA.COLUMNS - WHERE TABLE_SCHEMA = DATABASE() - AND TABLE_NAME = 'alerts' - AND COLUMN_NAME = 'severity' - """)).fetchone() - else: # SQLite - result = session.execute(text(""" - SELECT COUNT(*) as count - FROM pragma_table_info('alerts') - WHERE name = 'severity' - """)).fetchone() - if result.count == 0: - print(" ➕ 添加alerts.severity字段...") - if is_mysql: - session.execute(text("ALTER TABLE alerts ADD COLUMN severity VARCHAR(20) DEFAULT 'medium'")) - else: - session.execute(text("ALTER TABLE alerts ADD COLUMN severity VARCHAR(20) DEFAULT 'medium'")) - print(" ✅ alerts.severity 字段添加成功") - else: - print(" ✅ alerts.severity 字段已存在") - - # 检查车辆数据表是否存在 - if is_mysql: - result = session.execute(text(""" - SELECT COUNT(*) as count - FROM INFORMATION_SCHEMA.TABLES - WHERE TABLE_SCHEMA = DATABASE() - AND TABLE_NAME = 'vehicle_data' - """)).fetchone() - else: # SQLite - result = session.execute(text(""" - SELECT name FROM sqlite_master - WHERE type='table' AND name='vehicle_data' - """)).fetchone() - - if (is_mysql and result.count == 0) or (not is_mysql and not result): + return True + except Exception as e: + print(f"❌ 创建数据库表失败: {e}") + return False + + def _run_migrations(self) -> bool: + """执行数据库迁移""" + print("\n🔄 执行数据库迁移...") + + migrations = [ + self._migrate_knowledge_verification_fields, + self._migrate_alert_severity_field, + self._migrate_vehicle_data_table, + self._migrate_conversation_enhancements, + self._migrate_workorder_enhancements, + self._migrate_workorder_suggestions_enhancements, + self._migrate_analytics_enhancements, + self._migrate_system_optimization_fields + ] + + success_count = 0 + for migration in migrations: + try: + if migration(): + success_count += 1 + except Exception as e: + self.logger.error(f"迁移失败: {migration.__name__}: {e}") + print(f"⚠️ 迁移 {migration.__name__} 失败: {e}") + + print(f"✅ 完成 {success_count}/{len(migrations)} 个迁移") + return success_count > 0 + + def _migrate_knowledge_verification_fields(self) -> bool: + """迁移知识库验证字段""" + print(" 📝 检查知识库验证字段...") + + fields_to_add = [ + ('is_verified', 'BOOLEAN DEFAULT FALSE'), + ('verified_by', 'VARCHAR(100)'), + ('verified_at', 'DATETIME') + ] + + return self._add_table_columns('knowledge_entries', fields_to_add) + + def _migrate_alert_severity_field(self) -> bool: + """迁移预警严重程度字段""" + print(" 🚨 检查预警严重程度字段...") + + fields_to_add = [ + ('severity', 'VARCHAR(20) DEFAULT \'medium\'') + ] + + return self._add_table_columns('alerts', fields_to_add) + + def _migrate_vehicle_data_table(self) -> bool: + """迁移车辆数据表""" + print(" 🚗 检查车辆数据表...") + + try: + with db_manager.get_session() as session: + # 检查表是否存在 + inspector = inspect(db_manager.engine) + if 'vehicle_data' not in inspector.get_table_names(): print(" ➕ 创建vehicle_data表...") VehicleData.__table__.create(session.bind, checkfirst=True) print(" ✅ vehicle_data表创建成功") @@ -200,66 +227,207 @@ def migrate_database(): print(" ✅ vehicle_data表已存在") session.commit() - print(" ✅ 数据库迁移完成") - + return True except Exception as e: - print(f" ❌ 数据库迁移失败: {e}") + print(f" ❌ 车辆数据表迁移失败: {e}") return False - return True - -def reset_database(force: bool = False) -> bool: - """重置数据库:删除并重建所有表,再插入初始数据""" - print("=" * 50) - print("TSP助手数据库重置") - print("=" * 50) + def _migrate_conversation_enhancements(self) -> bool: + """迁移对话增强字段""" + print(" 💬 检查对话增强字段...") + + fields_to_add = [ + ('response_time', 'FLOAT'), + ('user_satisfaction', 'INTEGER'), + ('ai_confidence', 'FLOAT'), + ('context_data', 'TEXT') + ] + + return self._add_table_columns('conversations', fields_to_add) - try: - # 可选确认 - if not force: - try: - confirm = input("⚠️ 警告:此操作将删除所有数据!确定要继续吗?(y/N): ") - if confirm.lower() != 'y': - print("操作已取消") - return False - except Exception: - # 非交互环境下默认取消,建议调用方传入 force=True - print("非交互环境未传入 force=True,已取消") - return False - - # 删除所有表 - Base.metadata.drop_all(bind=db_manager.engine) - print("✓ 数据库表删除成功") + def _migrate_workorder_enhancements(self) -> bool: + """迁移工单增强字段""" + print(" 📋 检查工单增强字段...") - # 重新创建所有表 - Base.metadata.create_all(bind=db_manager.engine) - print("✓ 数据库表重新创建成功") + fields_to_add = [ + ('ai_suggestion', 'TEXT'), + ('human_resolution', 'TEXT'), + ('ai_similarity', 'FLOAT'), + ('ai_approved', 'BOOLEAN DEFAULT FALSE'), + ('feishu_record_id', 'VARCHAR(100)'), + ('sync_status', 'VARCHAR(20) DEFAULT \'pending\''), + # 飞书集成扩展字段 + ('source', 'VARCHAR(50)'), + ('module', 'VARCHAR(100)'), + ('created_by', 'VARCHAR(100)'), + ('wilfulness', 'VARCHAR(100)'), + ('date_of_close', 'DATETIME'), + ('vehicle_type', 'VARCHAR(100)'), + ('vin_sim', 'VARCHAR(50)'), + ('app_remote_control_version', 'VARCHAR(100)'), + ('hmi_sw', 'VARCHAR(100)'), + ('parent_record', 'VARCHAR(100)'), + ('has_updated_same_day', 'VARCHAR(50)'), + ('operating_time', 'VARCHAR(100)') + ] - # 迁移补齐新增字段 - migrate_database() + return self._add_table_columns('work_orders', fields_to_add) + + def _migrate_workorder_suggestions_enhancements(self) -> bool: + """迁移工单建议表增强字段""" + print(" 💡 检查工单建议表增强字段...") - # 插入初始数据 - insert_initial_data() + fields_to_add = [ + ('use_human_resolution', 'BOOLEAN DEFAULT FALSE') # 是否使用人工描述入库 + ] - print("✓ 数据库重置完成") + return self._add_table_columns('work_order_suggestions', fields_to_add) + + def _migrate_analytics_enhancements(self) -> bool: + """迁移分析增强字段""" + print(" 📊 检查分析增强字段...") + + fields_to_add = [ + ('performance_score', 'FLOAT'), + ('quality_metrics', 'TEXT'), + ('cost_analysis', 'TEXT'), + ('optimization_suggestions', 'TEXT') + ] + + return self._add_table_columns('analytics', fields_to_add) + + def _migrate_system_optimization_fields(self) -> bool: + """迁移系统优化字段""" + print(" ⚙️ 检查系统优化字段...") + + # 为各个表添加系统优化相关字段 + tables_and_fields = { + 'conversations': [ + ('processing_time', 'FLOAT'), + ('memory_usage', 'FLOAT'), + ('cpu_usage', 'FLOAT') + ], + 'work_orders': [ + ('processing_efficiency', 'FLOAT'), + ('resource_usage', 'TEXT') + ], + 'knowledge_entries': [ + ('search_frequency', 'INTEGER DEFAULT 0'), + ('last_accessed', 'DATETIME'), + ('relevance_score', 'FLOAT') + ] + } + + success = True + for table_name, fields in tables_and_fields.items(): + if not self._add_table_columns(table_name, fields): + success = False + + return success + + def _add_table_columns(self, table_name: str, fields: List[tuple]) -> bool: + """为表添加字段""" + try: + added_count = 0 + skipped_count = 0 + + for field_name, field_type in fields: + try: + if self._column_exists(table_name, field_name): + skipped_count += 1 + continue + + print(f" ➕ 添加字段 {table_name}.{field_name}...") + + # 使用单独的会话添加每个字段,避免长时间锁定 + with db_manager.get_session() as session: + alter_sql = f"ALTER TABLE {table_name} ADD COLUMN {field_name} {field_type}" + session.execute(text(alter_sql)) + session.commit() + + print(f" ✅ 字段 {field_name} 添加成功") + added_count += 1 + + except Exception as field_error: + print(f" ⚠️ 字段 {field_name} 添加失败: {field_error}") + # 继续处理其他字段,不中断整个过程 + + if added_count > 0: + print(f" 📊 成功添加 {added_count} 个字段,跳过 {skipped_count} 个已存在字段") + else: + print(f" 📊 所有字段都已存在,跳过 {skipped_count} 个字段") + return True except Exception as e: - print(f"✗ 数据库重置失败: {e}") + print(f" ❌ 添加字段过程失败: {e}") + return False + + def _column_exists(self, table_name: str, column_name: str) -> bool: + """检查字段是否存在""" + try: + with db_manager.get_session() as session: + if self.is_mysql: + result = session.execute(text(""" + SELECT COUNT(*) as count + FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_SCHEMA = DATABASE() + AND TABLE_NAME = :table_name + AND COLUMN_NAME = :column_name + """), {"table_name": table_name, "column_name": column_name}).fetchone() + elif self.is_postgresql: + result = session.execute(text(""" + SELECT COUNT(*) as count + FROM information_schema.columns + WHERE table_name = :table_name + AND column_name = :column_name + """), {"table_name": table_name, "column_name": column_name}).fetchone() + else: # SQLite + result = session.execute(text(""" + SELECT COUNT(*) as count + FROM pragma_table_info(:table_name) + WHERE name = :column_name + """), {"table_name": table_name, "column_name": column_name}).fetchone() + + return result[0] > 0 + except Exception: return False -def insert_initial_data(): + def _insert_initial_data(self) -> bool: """插入初始数据""" + print("\n📊 插入初始数据...") + try: with db_manager.get_session() as session: # 检查是否已有数据 - existing_entries = session.query(KnowledgeEntry).count() - if existing_entries > 0: - print(" ✅ 数据库中已有数据,跳过初始数据插入") - return - - # 插入示例知识库条目 - initial_knowledge = [ + existing_count = session.query(KnowledgeEntry).count() + if existing_count > 0: + print(f" ✅ 数据库中已有 {existing_count} 条知识库条目,跳过初始数据插入") + return True + + # 插入初始知识库数据 + initial_data = self._get_initial_knowledge_data() + for data in initial_data: + entry = KnowledgeEntry(**data) + session.add(entry) + + session.commit() + print(f" ✅ 成功插入 {len(initial_data)} 条知识库条目") + + # 添加示例车辆数据 + self._add_sample_vehicle_data() + + # 验证现有知识库条目 + self._verify_existing_knowledge() + + return True + except Exception as e: + print(f" ❌ 插入初始数据失败: {e}") + return False + + def _get_initial_knowledge_data(self) -> List[Dict[str, Any]]: + """获取初始知识库数据""" + return [ { "question": "如何重置密码?", "answer": "您可以通过以下步骤重置密码:1. 点击登录页面的'忘记密码'链接 2. 输入您的邮箱地址 3. 检查邮箱并点击重置链接 4. 设置新密码", @@ -267,7 +435,9 @@ def insert_initial_data(): "confidence_score": 0.9, "is_verified": True, "verified_by": "system", - "verified_at": datetime.now() + "verified_at": datetime.now(), + "search_frequency": 0, + "relevance_score": 0.9 }, { "question": "账户被锁定了怎么办?", @@ -276,7 +446,9 @@ def insert_initial_data(): "confidence_score": 0.8, "is_verified": True, "verified_by": "system", - "verified_at": datetime.now() + "verified_at": datetime.now(), + "search_frequency": 0, + "relevance_score": 0.8 }, { "question": "如何修改个人信息?", @@ -285,7 +457,9 @@ def insert_initial_data(): "confidence_score": 0.7, "is_verified": True, "verified_by": "system", - "verified_at": datetime.now() + "verified_at": datetime.now(), + "search_frequency": 0, + "relevance_score": 0.7 }, { "question": "支付失败怎么办?", @@ -294,7 +468,9 @@ def insert_initial_data(): "confidence_score": 0.8, "is_verified": True, "verified_by": "system", - "verified_at": datetime.now() + "verified_at": datetime.now(), + "search_frequency": 0, + "relevance_score": 0.8 }, { "question": "如何申请退款?", @@ -303,7 +479,9 @@ def insert_initial_data(): "confidence_score": 0.7, "is_verified": True, "verified_by": "system", - "verified_at": datetime.now() + "verified_at": datetime.now(), + "search_frequency": 0, + "relevance_score": 0.7 }, { "question": "系统无法访问怎么办?", @@ -312,7 +490,9 @@ def insert_initial_data(): "confidence_score": 0.8, "is_verified": True, "verified_by": "system", - "verified_at": datetime.now() + "verified_at": datetime.now(), + "search_frequency": 0, + "relevance_score": 0.8 }, { "question": "如何联系客服?", @@ -321,7 +501,9 @@ def insert_initial_data(): "confidence_score": 0.9, "is_verified": True, "verified_by": "system", - "verified_at": datetime.now() + "verified_at": datetime.now(), + "search_frequency": 0, + "relevance_score": 0.9 }, { "question": "如何远程启动车辆?", @@ -330,7 +512,9 @@ def insert_initial_data(): "confidence_score": 0.9, "is_verified": True, "verified_by": "system", - "verified_at": datetime.now() + "verified_at": datetime.now(), + "search_frequency": 0, + "relevance_score": 0.9 }, { "question": "APP显示车辆信息错误怎么办?", @@ -339,7 +523,9 @@ def insert_initial_data(): "confidence_score": 0.8, "is_verified": True, "verified_by": "system", - "verified_at": datetime.now() + "verified_at": datetime.now(), + "search_frequency": 0, + "relevance_score": 0.8 }, { "question": "车辆无法远程启动的原因?", @@ -348,21 +534,13 @@ def insert_initial_data(): "confidence_score": 0.9, "is_verified": True, "verified_by": "system", - "verified_at": datetime.now() - } - ] - - for knowledge in initial_knowledge: - entry = KnowledgeEntry(**knowledge) - session.add(entry) - - session.commit() - print(f" ✅ 成功插入 {len(initial_knowledge)} 条知识库条目") - - except Exception as e: - print(f" ❌ 插入初始数据失败: {e}") - -def add_sample_vehicle_data(): + "verified_at": datetime.now(), + "search_frequency": 0, + "relevance_score": 0.9 + } + ] + + def _add_sample_vehicle_data(self) -> bool: """添加示例车辆数据""" try: from src.vehicle.vehicle_data_manager import VehicleDataManager @@ -375,10 +553,12 @@ def add_sample_vehicle_data(): else: print(" ❌ 示例车辆数据添加失败") + return success except Exception as e: print(f" ❌ 添加示例车辆数据失败: {e}") + return False -def verify_existing_knowledge(): + def _verify_existing_knowledge(self) -> bool: """验证现有的知识库条目""" try: with db_manager.get_session() as session: @@ -395,77 +575,214 @@ def verify_existing_knowledge(): entry.is_verified = True entry.verified_by = "system_init" entry.verified_at = datetime.now() + if not hasattr(entry, 'search_frequency'): + entry.search_frequency = 0 + if not hasattr(entry, 'relevance_score'): + entry.relevance_score = 0.7 session.commit() print(f" ✅ 成功验证 {len(unverified_entries)} 条知识库条目") else: print(" ✅ 所有知识库条目已验证") + return True except Exception as e: print(f" ❌ 验证知识库条目失败: {e}") - -def check_database_status(): + return False + + def _verify_database_integrity(self) -> bool: + """验证数据库完整性""" + print("\n🔍 验证数据库完整性...") + + try: + with db_manager.get_session() as session: + # 检查各表的记录数 + tables_info = { + 'work_orders': WorkOrder, + 'conversations': Conversation, + 'knowledge_entries': KnowledgeEntry, + 'analytics': Analytics, + 'alerts': Alert, + 'vehicle_data': VehicleData + } + + total_records = 0 + for table_name, model_class in tables_info.items(): + try: + count = session.query(model_class).count() + total_records += count + print(f" 📋 {table_name}: {count} 条记录") + except Exception as e: + print(f" ⚠️ {table_name}: 检查失败 - {e}") + + print(f" 📊 总记录数: {total_records}") + + # 检查关键字段 + self._check_critical_fields() + + print(" ✅ 数据库完整性验证通过") + return True + except Exception as e: + print(f" ❌ 数据库完整性验证失败: {e}") + return False + + def _check_critical_fields(self): + """检查关键字段""" + critical_checks = [ + ("knowledge_entries", "is_verified"), + ("alerts", "severity"), + ("vehicle_data", "vehicle_id"), + ("conversations", "response_time"), + ("work_orders", "ai_suggestion") + ] + + for table_name, field_name in critical_checks: + if self._column_exists(table_name, field_name): + print(f" ✅ {table_name}.{field_name} 字段存在") + else: + print(f" ⚠️ {table_name}.{field_name} 字段缺失") + + def _generate_init_report(self): + """生成初始化报告""" + print("\n📋 生成初始化报告...") + + try: + report = { + "init_time": datetime.now().isoformat(), + "database_version": self.db_version, + "database_url": self.db_url, + "migrations_applied": len(self.migration_history), + "tables_created": self._get_table_count(), + "initial_data_inserted": True, + "verification_passed": True + } + + # 保存报告到文件 + report_path = Path("database_init_report.json") + with open(report_path, 'w', encoding='utf-8') as f: + json.dump(report, f, indent=2, ensure_ascii=False) + + print(f" ✅ 初始化报告已保存到: {report_path}") + + except Exception as e: + print(f" ⚠️ 生成初始化报告失败: {e}") + + def _get_table_count(self) -> int: + """获取表数量""" + try: + inspector = inspect(db_manager.engine) + return len(inspector.get_table_names()) + except Exception: + return 0 + + def check_database_status(self) -> Dict[str, Any]: """检查数据库状态""" - print("\n" + "=" * 60) + print("\n" + "=" * 80) print("📊 数据库状态检查") - print("=" * 60) + print("=" * 80) try: with db_manager.get_session() as session: # 检查各表的记录数 - work_orders_count = session.query(WorkOrder).count() - conversations_count = session.query(Conversation).count() - knowledge_entries_count = session.query(KnowledgeEntry).count() - verified_knowledge_count = session.query(KnowledgeEntry).filter(KnowledgeEntry.is_verified == True).count() - unverified_knowledge_count = session.query(KnowledgeEntry).filter(KnowledgeEntry.is_verified == False).count() - analytics_count = session.query(Analytics).count() - alerts_count = session.query(Alert).count() - vehicle_data_count = session.query(VehicleData).count() - - print(f"📋 工单表记录数: {work_orders_count}") - print(f"💬 对话表记录数: {conversations_count}") - print(f"📚 知识库表记录数: {knowledge_entries_count}") - print(f" - 已验证: {verified_knowledge_count}") - print(f" - 未验证: {unverified_knowledge_count}") - print(f"📊 分析表记录数: {analytics_count}") - print(f"🚨 预警表记录数: {alerts_count}") - print(f"🚗 车辆数据表记录数: {vehicle_data_count}") + tables_info = { + 'work_orders': WorkOrder, + 'conversations': Conversation, + 'knowledge_entries': KnowledgeEntry, + 'analytics': Analytics, + 'alerts': Alert, + 'vehicle_data': VehicleData + } + + status = { + "database_version": self.db_version, + "connection_status": "正常", + "tables": {}, + "total_records": 0, + "last_check": datetime.now().isoformat() + } + + for table_name, model_class in tables_info.items(): + try: + count = session.query(model_class).count() + status["tables"][table_name] = count + status["total_records"] += count + print(f"📋 {table_name}: {count} 条记录") + except Exception as e: + status["tables"][table_name] = f"错误: {e}" + print(f"⚠️ {table_name}: 检查失败 - {e}") # 检查车辆数据详情 - if vehicle_data_count > 0: + if 'vehicle_data' in status["tables"] and isinstance(status["tables"]['vehicle_data'], int): + vehicle_count = status["tables"]['vehicle_data'] + if vehicle_count > 0: vehicle_ids = session.query(VehicleData.vehicle_id).distinct().all() print(f" - 车辆数量: {len(vehicle_ids)}") + status["vehicle_count"] = len(vehicle_ids) + for vehicle_id in vehicle_ids[:3]: # 显示前3个车辆 vehicle_data_types = session.query(VehicleData.data_type).filter( VehicleData.vehicle_id == vehicle_id[0] ).distinct().all() print(f" - 车辆 {vehicle_id[0]}: {len(vehicle_data_types)} 种数据类型") + # 检查知识库验证状态 + if 'knowledge_entries' in status["tables"] and isinstance(status["tables"]['knowledge_entries'], int): + verified_count = session.query(KnowledgeEntry).filter(KnowledgeEntry.is_verified == True).count() + unverified_count = session.query(KnowledgeEntry).filter(KnowledgeEntry.is_verified == False).count() + print(f" - 已验证: {verified_count}") + print(f" - 未验证: {unverified_count}") + status["knowledge_verification"] = { + "verified": verified_count, + "unverified": unverified_count + } + + print(f"\n📊 总记录数: {status['total_records']}") print("\n✅ 数据库状态检查完成") + return status + except Exception as e: print(f"❌ 数据库状态检查失败: {e}") + return {"error": str(e)} def main(): """主函数""" - print("🚀 TSP智能助手数据库初始化工具") - print("=" * 60) + print("🚀 TSP智能助手数据库初始化工具 - 重构版本") + print("=" * 80) + + # 创建初始化器 + initializer = DatabaseInitializer() + + # 检查命令行参数 + force_reset = '--reset' in sys.argv or '--force' in sys.argv + + if force_reset: + print("⚠️ 警告:将重置数据库,所有数据将被删除!") + try: + confirm = input("确定要继续吗?(y/N): ") + if confirm.lower() != 'y': + print("操作已取消") + return + except Exception: + print("非交互环境,跳过确认") # 初始化数据库 - if init_database(): + if initializer.initialize_database(force_reset=force_reset): # 检查数据库状态 - check_database_status() + initializer.check_database_status() - print("\n" + "=" * 60) + print("\n" + "=" * 80) print("🎉 数据库初始化成功!") - print("=" * 60) + print("=" * 80) print("✅ 已完成的操作:") print(" - 创建所有数据库表") + print(" - 执行数据库迁移") print(" - 添加知识库验证字段") print(" - 创建车辆数据表") print(" - 插入初始知识库数据") print(" - 添加示例车辆数据") print(" - 验证所有知识库条目") + print(" - 生成初始化报告") print("\n🚀 现在您可以运行以下命令启动系统:") print(" python start_dashboard.py") print("\n🧪 或运行功能测试:") @@ -476,15 +793,18 @@ def main(): print(" - 车辆实时数据管理") print(" - 文件上传生成知识库") print(" - 智能对话结合车辆数据") + print(" - 飞书同步功能") + print(" - 系统性能优化") else: - print("\n" + "=" * 60) + print("\n" + "=" * 80) print("❌ 数据库初始化失败!") - print("=" * 60) + print("=" * 80) print("请检查:") print("1. 数据库文件权限") - print("2. SQLite是否已安装") + print("2. 数据库服务是否运行") print("3. 磁盘空间是否充足") print("4. Python依赖库是否完整") + print("5. 配置文件是否正确") if __name__ == "__main__": main() diff --git a/src/agent/agent_assistant_core.py b/src/agent/agent_assistant_core.py new file mode 100644 index 0000000..c6a9c38 --- /dev/null +++ b/src/agent/agent_assistant_core.py @@ -0,0 +1,254 @@ +# -*- coding: utf-8 -*- +""" +TSP Agent助手核心模块 +包含Agent助手的核心功能和基础类 +""" + +import logging +import asyncio +from typing import Dict, Any, List, Optional +from datetime import datetime +import json + +from src.main import TSPAssistant +from src.agent import AgentCore, AgentState +from src.agent.auto_monitor import AutoMonitorService +from src.agent.intelligent_agent import IntelligentAgent, AlertContext, KnowledgeContext +from src.agent.llm_client import LLMManager, LLMConfig +from src.agent.action_executor import ActionExecutor + +logger = logging.getLogger(__name__) + +class TSPAgentAssistantCore(TSPAssistant): + """TSP Agent助手核心 - 基础功能""" + + def __init__(self, llm_config: Optional[LLMConfig] = None): + # 初始化基础TSP助手 + super().__init__() + + # 初始化Agent核心 + self.agent_core = AgentCore() + + # 初始化自动监控服务 + self.auto_monitor = AutoMonitorService(self) + + # 初始化LLM客户端 + self._init_llm_manager(llm_config) + + # 初始化智能Agent + self.intelligent_agent = IntelligentAgent( + llm_manager=self.llm_manager, + agent_core=self.agent_core + ) + + # 初始化动作执行器 + self.action_executor = ActionExecutor(self) + + # Agent状态 + self.agent_state = AgentState.IDLE + self.is_agent_mode = True + self.proactive_monitoring_enabled = False + + # 执行历史 + self.execution_history = [] + self.max_history_size = 1000 + + logger.info("TSP Agent助手核心初始化完成") + + def _init_llm_manager(self, llm_config: Optional[LLMConfig] = None): + """初始化LLM管理器""" + if llm_config: + self.llm_manager = LLMManager(llm_config) + else: + # 使用默认配置 - 千问模型 + try: + from config.llm_config import DEFAULT_CONFIG + self.llm_manager = LLMManager(DEFAULT_CONFIG) + except ImportError: + # 如果配置文件不存在,使用内置配置 + default_config = LLMConfig( + provider="openai", + api_key="sk-your-qwen-api-key-here", + base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", + model="qwen-turbo", + temperature=0.7, + max_tokens=2000 + ) + self.llm_manager = LLMManager(default_config) + + def get_agent_status(self) -> Dict[str, Any]: + """获取Agent状态""" + return { + "agent_state": self.agent_state.value, + "is_agent_mode": self.is_agent_mode, + "proactive_monitoring": self.proactive_monitoring_enabled, + "execution_count": len(self.execution_history), + "llm_status": self.llm_manager.get_status(), + "agent_core_status": self.agent_core.get_status(), + "last_activity": self.execution_history[-1]["timestamp"] if self.execution_history else None + } + + def toggle_agent_mode(self, enabled: bool) -> bool: + """切换Agent模式""" + try: + self.is_agent_mode = enabled + if enabled: + self.agent_state = AgentState.IDLE + logger.info("Agent模式已启用") + else: + self.agent_state = AgentState.DISABLED + logger.info("Agent模式已禁用") + return True + except Exception as e: + logger.error(f"切换Agent模式失败: {e}") + return False + + def start_proactive_monitoring(self) -> bool: + """启动主动监控""" + try: + if not self.proactive_monitoring_enabled: + self.proactive_monitoring_enabled = True + self.auto_monitor.start_monitoring() + logger.info("主动监控已启动") + return True + return False + except Exception as e: + logger.error(f"启动主动监控失败: {e}") + return False + + def stop_proactive_monitoring(self) -> bool: + """停止主动监控""" + try: + if self.proactive_monitoring_enabled: + self.proactive_monitoring_enabled = False + self.auto_monitor.stop_monitoring() + logger.info("主动监控已停止") + return True + return False + except Exception as e: + logger.error(f"停止主动监控失败: {e}") + return False + + def run_proactive_monitoring(self) -> Dict[str, Any]: + """运行主动监控检查""" + try: + if not self.proactive_monitoring_enabled: + return {"success": False, "message": "主动监控未启用"} + + # 获取系统状态 + system_health = self.get_system_health() + + # 检查预警 + alerts = self.check_alerts() + + # 检查工单状态 + workorders_status = self._check_workorders_status() + + # 运行智能分析 + analysis = self.intelligent_agent.analyze_system_state( + system_health=system_health, + alerts=alerts, + workorders=workorders_status + ) + + # 执行建议的动作 + actions_taken = [] + if analysis.get("recommended_actions"): + for action in analysis["recommended_actions"]: + result = self.action_executor.execute_action(action) + actions_taken.append(result) + + return { + "success": True, + "analysis": analysis, + "actions_taken": actions_taken, + "timestamp": datetime.now().isoformat() + } + except Exception as e: + logger.error(f"主动监控检查失败: {e}") + return {"success": False, "error": str(e)} + + def _check_workorders_status(self) -> Dict[str, Any]: + """检查工单状态""" + try: + from src.core.database import db_manager + from src.core.models import WorkOrder + + with db_manager.get_session() as session: + total_workorders = session.query(WorkOrder).count() + open_workorders = session.query(WorkOrder).filter(WorkOrder.status == 'open').count() + resolved_workorders = session.query(WorkOrder).filter(WorkOrder.status == 'resolved').count() + + return { + "total": total_workorders, + "open": open_workorders, + "resolved": resolved_workorders, + "resolution_rate": resolved_workorders / total_workorders if total_workorders > 0 else 0 + } + except Exception as e: + logger.error(f"检查工单状态失败: {e}") + return {"error": str(e)} + + def run_intelligent_analysis(self) -> Dict[str, Any]: + """运行智能分析""" + try: + # 获取系统数据 + system_health = self.get_system_health() + alerts = self.check_alerts() + workorders = self._check_workorders_status() + + # 创建分析上下文 + context = { + "system_health": system_health, + "alerts": alerts, + "workorders": workorders, + "timestamp": datetime.now().isoformat() + } + + # 运行智能分析 + analysis = self.intelligent_agent.comprehensive_analysis(context) + + # 记录分析结果 + self._record_execution("intelligent_analysis", analysis) + + return analysis + except Exception as e: + logger.error(f"智能分析失败: {e}") + return {"error": str(e)} + + def _record_execution(self, action_type: str, result: Any): + """记录执行历史""" + execution_record = { + "timestamp": datetime.now().isoformat(), + "action_type": action_type, + "result": result, + "agent_state": self.agent_state.value + } + + self.execution_history.append(execution_record) + + # 保持历史记录大小限制 + if len(self.execution_history) > self.max_history_size: + self.execution_history = self.execution_history[-self.max_history_size:] + + def get_action_history(self, limit: int = 50) -> List[Dict[str, Any]]: + """获取动作执行历史""" + return self.execution_history[-limit:] if self.execution_history else [] + + def clear_execution_history(self) -> Dict[str, Any]: + """清空执行历史""" + try: + self.execution_history.clear() + logger.info("执行历史已清空") + return {"success": True, "message": "执行历史已清空"} + except Exception as e: + logger.error(f"清空执行历史失败: {e}") + return {"success": False, "error": str(e)} + + def get_llm_usage_stats(self) -> Dict[str, Any]: + """获取LLM使用统计""" + try: + return self.llm_manager.get_usage_stats() + except Exception as e: + logger.error(f"获取LLM使用统计失败: {e}") + return {"error": str(e)} diff --git a/src/agent/agent_message_handler.py b/src/agent/agent_message_handler.py new file mode 100644 index 0000000..b027d37 --- /dev/null +++ b/src/agent/agent_message_handler.py @@ -0,0 +1,243 @@ +# -*- coding: utf-8 -*- +""" +TSP Agent消息处理模块 +处理Agent的消息处理和对话功能 +""" + +import logging +import asyncio +from typing import Dict, Any, List, Optional +from datetime import datetime + +from .agent_assistant_core import TSPAgentAssistantCore +from .intelligent_agent import IntelligentAgent + +logger = logging.getLogger(__name__) + +class AgentMessageHandler: + """Agent消息处理器""" + + def __init__(self, agent_core: TSPAgentAssistantCore): + self.agent_core = agent_core + self.intelligent_agent = agent_core.intelligent_agent + self.action_executor = agent_core.action_executor + + async def process_message_agent(self, message: str, user_id: str = "admin", + work_order_id: Optional[int] = None, + enable_proactive: bool = True) -> Dict[str, Any]: + """使用Agent处理消息""" + try: + # 更新Agent状态 + self.agent_core.agent_state = self.agent_core.agent_core.AgentState.PROCESSING + + # 创建对话上下文 + context = { + "message": message, + "user_id": user_id, + "work_order_id": work_order_id, + "timestamp": datetime.now().isoformat(), + "enable_proactive": enable_proactive + } + + # 使用智能Agent处理消息 + agent_response = await self.intelligent_agent.process_message(context) + + # 执行建议的动作 + actions_taken = [] + if agent_response.get("recommended_actions"): + for action in agent_response["recommended_actions"]: + action_result = self.action_executor.execute_action(action) + actions_taken.append(action_result) + + # 生成响应 + response = { + "response": agent_response.get("response", "Agent已处理您的请求"), + "actions": actions_taken, + "status": "completed", + "confidence": agent_response.get("confidence", 0.8), + "context": context + } + + # 记录执行历史 + self.agent_core._record_execution("message_processing", response) + + # 更新Agent状态 + self.agent_core.agent_state = self.agent_core.agent_core.AgentState.IDLE + + return response + + except Exception as e: + logger.error(f"Agent消息处理失败: {e}") + self.agent_core.agent_state = self.agent_core.agent_core.AgentState.ERROR + + return { + "response": f"处理消息时发生错误: {str(e)}", + "actions": [], + "status": "error", + "error": str(e) + } + + async def process_conversation_agent(self, conversation_data: Dict[str, Any]) -> Dict[str, Any]: + """使用Agent处理对话""" + try: + # 提取对话信息 + user_message = conversation_data.get("message", "") + user_id = conversation_data.get("user_id", "anonymous") + session_id = conversation_data.get("session_id") + + # 创建对话上下文 + context = { + "message": user_message, + "user_id": user_id, + "session_id": session_id, + "conversation_history": conversation_data.get("history", []), + "timestamp": datetime.now().isoformat() + } + + # 使用智能Agent处理对话 + agent_response = await self.intelligent_agent.process_conversation(context) + + # 执行建议的动作 + actions_taken = [] + if agent_response.get("recommended_actions"): + for action in agent_response["recommended_actions"]: + action_result = self.action_executor.execute_action(action) + actions_taken.append(action_result) + + # 生成响应 + response = { + "response": agent_response.get("response", "Agent已处理您的对话"), + "actions": actions_taken, + "status": "completed", + "confidence": agent_response.get("confidence", 0.8), + "context": context, + "session_id": session_id + } + + # 记录执行历史 + self.agent_core._record_execution("conversation_processing", response) + + return response + + except Exception as e: + logger.error(f"Agent对话处理失败: {e}") + return { + "response": f"处理对话时发生错误: {str(e)}", + "actions": [], + "status": "error", + "error": str(e) + } + + async def process_workorder_agent(self, workorder_data: Dict[str, Any]) -> Dict[str, Any]: + """使用Agent处理工单""" + try: + # 提取工单信息 + workorder_id = workorder_data.get("workorder_id") + action_type = workorder_data.get("action_type", "analyze") + + # 创建工单上下文 + context = { + "workorder_id": workorder_id, + "action_type": action_type, + "workorder_data": workorder_data, + "timestamp": datetime.now().isoformat() + } + + # 使用智能Agent处理工单 + agent_response = await self.intelligent_agent.process_workorder(context) + + # 执行建议的动作 + actions_taken = [] + if agent_response.get("recommended_actions"): + for action in agent_response["recommended_actions"]: + action_result = self.action_executor.execute_action(action) + actions_taken.append(action_result) + + # 生成响应 + response = { + "response": agent_response.get("response", "Agent已处理工单"), + "actions": actions_taken, + "status": "completed", + "confidence": agent_response.get("confidence", 0.8), + "context": context + } + + # 记录执行历史 + self.agent_core._record_execution("workorder_processing", response) + + return response + + except Exception as e: + logger.error(f"Agent工单处理失败: {e}") + return { + "response": f"处理工单时发生错误: {str(e)}", + "actions": [], + "status": "error", + "error": str(e) + } + + async def process_alert_agent(self, alert_data: Dict[str, Any]) -> Dict[str, Any]: + """使用Agent处理预警""" + try: + # 创建预警上下文 + context = { + "alert_data": alert_data, + "timestamp": datetime.now().isoformat() + } + + # 使用智能Agent处理预警 + agent_response = await self.intelligent_agent.process_alert(context) + + # 执行建议的动作 + actions_taken = [] + if agent_response.get("recommended_actions"): + for action in agent_response["recommended_actions"]: + action_result = self.action_executor.execute_action(action) + actions_taken.append(action_result) + + # 生成响应 + response = { + "response": agent_response.get("response", "Agent已处理预警"), + "actions": actions_taken, + "status": "completed", + "confidence": agent_response.get("confidence", 0.8), + "context": context + } + + # 记录执行历史 + self.agent_core._record_execution("alert_processing", response) + + return response + + except Exception as e: + logger.error(f"Agent预警处理失败: {e}") + return { + "response": f"处理预警时发生错误: {str(e)}", + "actions": [], + "status": "error", + "error": str(e) + } + + def get_conversation_suggestions(self, context: Dict[str, Any]) -> List[str]: + """获取对话建议""" + try: + return self.intelligent_agent.get_conversation_suggestions(context) + except Exception as e: + logger.error(f"获取对话建议失败: {e}") + return [] + + def get_workorder_suggestions(self, workorder_data: Dict[str, Any]) -> List[str]: + """获取工单建议""" + try: + return self.intelligent_agent.get_workorder_suggestions(workorder_data) + except Exception as e: + logger.error(f"获取工单建议失败: {e}") + return [] + + def get_alert_suggestions(self, alert_data: Dict[str, Any]) -> List[str]: + """获取预警建议""" + try: + return self.intelligent_agent.get_alert_suggestions(alert_data) + except Exception as e: + logger.error(f"获取预警建议失败: {e}") + return [] diff --git a/src/agent/agent_sample_actions.py b/src/agent/agent_sample_actions.py new file mode 100644 index 0000000..c492898 --- /dev/null +++ b/src/agent/agent_sample_actions.py @@ -0,0 +1,405 @@ +# -*- coding: utf-8 -*- +""" +TSP Agent示例动作模块 +包含Agent的示例动作和测试功能 +""" + +import logging +import asyncio +from typing import Dict, Any, List +from datetime import datetime, timedelta + +from .agent_assistant_core import TSPAgentAssistantCore + +logger = logging.getLogger(__name__) + +class AgentSampleActions: + """Agent示例动作处理器""" + + def __init__(self, agent_core: TSPAgentAssistantCore): + self.agent_core = agent_core + + async def trigger_sample_actions(self) -> Dict[str, Any]: + """触发示例动作""" + try: + logger.info("开始执行示例动作") + + # 执行多个示例动作 + actions_results = [] + + # 1. 系统健康检查 + health_result = await self._sample_health_check() + actions_results.append(health_result) + + # 2. 预警分析 + alert_result = await self._sample_alert_analysis() + actions_results.append(alert_result) + + # 3. 工单处理 + workorder_result = await self._sample_workorder_processing() + actions_results.append(workorder_result) + + # 4. 知识库更新 + knowledge_result = await self._sample_knowledge_update() + actions_results.append(knowledge_result) + + # 5. 性能优化 + optimization_result = await self._sample_performance_optimization() + actions_results.append(optimization_result) + + # 记录执行历史 + self.agent_core._record_execution("sample_actions", { + "actions_count": len(actions_results), + "results": actions_results + }) + + return { + "success": True, + "message": f"成功执行 {len(actions_results)} 个示例动作", + "actions_results": actions_results, + "timestamp": datetime.now().isoformat() + } + + except Exception as e: + logger.error(f"执行示例动作失败: {e}") + return { + "success": False, + "error": str(e), + "timestamp": datetime.now().isoformat() + } + + async def _sample_health_check(self) -> Dict[str, Any]: + """示例:系统健康检查""" + try: + # 获取系统健康状态 + health_data = self.agent_core.get_system_health() + + # 模拟健康检查逻辑 + health_score = health_data.get("health_score", 0) + + if health_score > 80: + status = "excellent" + message = "系统运行状态良好" + elif health_score > 60: + status = "good" + message = "系统运行状态正常" + elif health_score > 40: + status = "fair" + message = "系统运行状态一般,建议关注" + else: + status = "poor" + message = "系统运行状态较差,需要优化" + + return { + "action_type": "health_check", + "status": status, + "message": message, + "health_score": health_score, + "timestamp": datetime.now().isoformat() + } + + except Exception as e: + logger.error(f"健康检查失败: {e}") + return { + "action_type": "health_check", + "status": "error", + "error": str(e) + } + + async def _sample_alert_analysis(self) -> Dict[str, Any]: + """示例:预警分析""" + try: + # 获取预警数据 + alerts = self.agent_core.check_alerts() + + # 分析预警 + alert_count = len(alerts) + critical_alerts = [a for a in alerts if a.get("level") == "critical"] + warning_alerts = [a for a in alerts if a.get("level") == "warning"] + + # 生成分析结果 + if alert_count == 0: + status = "no_alerts" + message = "当前无活跃预警" + elif len(critical_alerts) > 0: + status = "critical" + message = f"发现 {len(critical_alerts)} 个严重预警,需要立即处理" + elif len(warning_alerts) > 0: + status = "warning" + message = f"发现 {len(warning_alerts)} 个警告预警,建议关注" + else: + status = "info" + message = f"发现 {alert_count} 个信息预警" + + return { + "action_type": "alert_analysis", + "status": status, + "message": message, + "alert_count": alert_count, + "critical_count": len(critical_alerts), + "warning_count": len(warning_alerts), + "timestamp": datetime.now().isoformat() + } + + except Exception as e: + logger.error(f"预警分析失败: {e}") + return { + "action_type": "alert_analysis", + "status": "error", + "error": str(e) + } + + async def _sample_workorder_processing(self) -> Dict[str, Any]: + """示例:工单处理""" + try: + # 获取工单状态 + workorders_status = self.agent_core._check_workorders_status() + + total = workorders_status.get("total", 0) + open_count = workorders_status.get("open", 0) + resolved_count = workorders_status.get("resolved", 0) + resolution_rate = workorders_status.get("resolution_rate", 0) + + # 分析工单状态 + if total == 0: + status = "no_workorders" + message = "当前无工单" + elif open_count > 10: + status = "high_backlog" + message = f"工单积压严重,有 {open_count} 个待处理工单" + elif resolution_rate > 0.8: + status = "good_resolution" + message = f"工单处理效率良好,解决率 {resolution_rate:.1%}" + else: + status = "normal" + message = f"工单处理状态正常,待处理 {open_count} 个" + + return { + "action_type": "workorder_processing", + "status": status, + "message": message, + "total_workorders": total, + "open_workorders": open_count, + "resolved_workorders": resolved_count, + "resolution_rate": resolution_rate, + "timestamp": datetime.now().isoformat() + } + + except Exception as e: + logger.error(f"工单处理分析失败: {e}") + return { + "action_type": "workorder_processing", + "status": "error", + "error": str(e) + } + + async def _sample_knowledge_update(self) -> Dict[str, Any]: + """示例:知识库更新""" + try: + from src.core.database import db_manager + from src.core.models import KnowledgeEntry + + with db_manager.get_session() as session: + # 获取知识库统计 + total_knowledge = session.query(KnowledgeEntry).count() + verified_knowledge = session.query(KnowledgeEntry).filter( + KnowledgeEntry.is_verified == True + ).count() + unverified_knowledge = total_knowledge - verified_knowledge + + # 分析知识库状态 + if total_knowledge == 0: + status = "empty" + message = "知识库为空,建议添加知识条目" + elif unverified_knowledge > 0: + status = "needs_verification" + message = f"有 {unverified_knowledge} 个知识条目需要验证" + else: + status = "up_to_date" + message = "知识库状态良好,所有条目已验证" + + return { + "action_type": "knowledge_update", + "status": status, + "message": message, + "total_knowledge": total_knowledge, + "verified_knowledge": verified_knowledge, + "unverified_knowledge": unverified_knowledge, + "timestamp": datetime.now().isoformat() + } + + except Exception as e: + logger.error(f"知识库更新分析失败: {e}") + return { + "action_type": "knowledge_update", + "status": "error", + "error": str(e) + } + + async def _sample_performance_optimization(self) -> Dict[str, Any]: + """示例:性能优化""" + try: + # 获取系统性能数据 + system_health = self.agent_core.get_system_health() + + # 分析性能指标 + cpu_usage = system_health.get("cpu_usage", 0) + memory_usage = system_health.get("memory_usage", 0) + disk_usage = system_health.get("disk_usage", 0) + + # 生成优化建议 + optimization_suggestions = [] + + if cpu_usage > 80: + optimization_suggestions.append("CPU使用率过高,建议优化计算密集型任务") + if memory_usage > 80: + optimization_suggestions.append("内存使用率过高,建议清理缓存或增加内存") + if disk_usage > 90: + optimization_suggestions.append("磁盘空间不足,建议清理日志文件或扩容") + + if not optimization_suggestions: + status = "optimal" + message = "系统性能良好,无需优化" + else: + status = "needs_optimization" + message = f"发现 {len(optimization_suggestions)} 个性能优化点" + + return { + "action_type": "performance_optimization", + "status": status, + "message": message, + "cpu_usage": cpu_usage, + "memory_usage": memory_usage, + "disk_usage": disk_usage, + "optimization_suggestions": optimization_suggestions, + "timestamp": datetime.now().isoformat() + } + + except Exception as e: + logger.error(f"性能优化分析失败: {e}") + return { + "action_type": "performance_optimization", + "status": "error", + "error": str(e) + } + + async def run_performance_test(self) -> Dict[str, Any]: + """运行性能测试""" + try: + start_time = datetime.now() + + # 执行多个测试 + test_results = [] + + # 1. 响应时间测试 + response_time = await self._test_response_time() + test_results.append(response_time) + + # 2. 并发处理测试 + concurrency_test = await self._test_concurrency() + test_results.append(concurrency_test) + + # 3. 内存使用测试 + memory_test = await self._test_memory_usage() + test_results.append(memory_test) + + end_time = datetime.now() + total_time = (end_time - start_time).total_seconds() + + return { + "success": True, + "message": "性能测试完成", + "total_time": total_time, + "test_results": test_results, + "timestamp": datetime.now().isoformat() + } + + except Exception as e: + logger.error(f"性能测试失败: {e}") + return { + "success": False, + "error": str(e), + "timestamp": datetime.now().isoformat() + } + + async def _test_response_time(self) -> Dict[str, Any]: + """测试响应时间""" + start_time = datetime.now() + + # 模拟处理任务 + await asyncio.sleep(0.1) + + end_time = datetime.now() + response_time = (end_time - start_time).total_seconds() + + return { + "test_type": "response_time", + "response_time": response_time, + "status": "good" if response_time < 0.5 else "slow" + } + + async def _test_concurrency(self) -> Dict[str, Any]: + """测试并发处理""" + try: + # 创建多个并发任务 + tasks = [] + for i in range(5): + task = asyncio.create_task(self._simulate_task(i)) + tasks.append(task) + + # 等待所有任务完成 + results = await asyncio.gather(*tasks) + + return { + "test_type": "concurrency", + "concurrent_tasks": len(tasks), + "successful_tasks": len([r for r in results if r.get("success")]), + "status": "good" + } + + except Exception as e: + return { + "test_type": "concurrency", + "status": "error", + "error": str(e) + } + + async def _simulate_task(self, task_id: int) -> Dict[str, Any]: + """模拟任务""" + try: + await asyncio.sleep(0.05) # 模拟处理时间 + return { + "task_id": task_id, + "success": True, + "result": f"Task {task_id} completed" + } + except Exception as e: + return { + "task_id": task_id, + "success": False, + "error": str(e) + } + + async def _test_memory_usage(self) -> Dict[str, Any]: + """测试内存使用""" + try: + import psutil + + # 获取当前内存使用情况 + memory_info = psutil.virtual_memory() + + return { + "test_type": "memory_usage", + "total_memory": memory_info.total, + "available_memory": memory_info.available, + "used_memory": memory_info.used, + "memory_percentage": memory_info.percent, + "status": "good" if memory_info.percent < 80 else "high" + } + + except Exception as e: + return { + "test_type": "memory_usage", + "status": "error", + "error": str(e) + } diff --git a/src/agent_assistant.py b/src/agent_assistant.py index 8502fef..7dbb9b5 100644 --- a/src/agent_assistant.py +++ b/src/agent_assistant.py @@ -2,72 +2,34 @@ # -*- coding: utf-8 -*- """ 增强版TSP助手 - 集成Agent功能 -这是一个真正的智能Agent实现 +重构版本:模块化设计,降低代码复杂度 """ import logging import asyncio from typing import Dict, Any, List, Optional from datetime import datetime -import json -from src.main import TSPAssistant -from src.agent import AgentCore, AgentState -from src.agent.auto_monitor import AutoMonitorService -from src.agent.intelligent_agent import IntelligentAgent, AlertContext, KnowledgeContext -from src.agent.llm_client import LLMManager, LLMConfig -from src.agent.action_executor import ActionExecutor +from src.agent.agent_assistant_core import TSPAgentAssistantCore +from src.agent.agent_message_handler import AgentMessageHandler +from src.agent.agent_sample_actions import AgentSampleActions logger = logging.getLogger(__name__) -class TSPAgentAssistant(TSPAssistant): - """TSP Agent助手 - 增强版TSP助手,具备完整Agent功能""" +class TSPAgentAssistant(TSPAgentAssistantCore): + """TSP Agent助手 - 重构版本""" - def __init__(self, llm_config: Optional[LLMConfig] = None): - # 初始化基础TSP助手 - super().__init__() + def __init__(self, llm_config=None): + # 初始化核心功能 + super().__init__(llm_config) - # 初始化Agent核心 - self.agent_core = AgentCore() + # 初始化消息处理器 + self.message_handler = AgentMessageHandler(self) - # 初始化自动监控服务 - self.auto_monitor = AutoMonitorService(self) + # 初始化示例动作处理器 + self.sample_actions = AgentSampleActions(self) - # 初始化LLM客户端 - if llm_config: - self.llm_manager = LLMManager(llm_config) - else: - # 使用默认配置 - 千问模型 - try: - from config.llm_config import DEFAULT_CONFIG - self.llm_manager = LLMManager(DEFAULT_CONFIG) - except ImportError: - # 如果配置文件不存在,使用内置配置 - default_config = LLMConfig( - provider="openai", - api_key="sk-your-qwen-api-key-here", - base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", - model="qwen-turbo", - temperature=0.7, - max_tokens=2000 - ) - self.llm_manager = LLMManager(default_config) - - # 初始化智能Agent - self.intelligent_agent = IntelligentAgent(self.llm_manager) - - # 初始化动作执行器 - self.action_executor = ActionExecutor(self) - - # Agent特有功能 - self.is_agent_mode = True - self.proactive_tasks = [] - self.agent_memory = {} - - # 添加一些示例执行历史(用于演示) - self._add_sample_execution_history() - - logger.info("TSP Agent助手初始化完成") + logger.info("TSP Agent助手初始化完成(重构版本)") async def process_message_agent( self, diff --git a/src/agent_assistant_new.py b/src/agent_assistant_new.py new file mode 100644 index 0000000..190b698 --- /dev/null +++ b/src/agent_assistant_new.py @@ -0,0 +1,322 @@ +# -*- coding: utf-8 -*- +""" +增强版TSP助手 - 集成Agent功能 +重构版本:模块化设计,降低代码复杂度 +""" + +import logging +import asyncio +from typing import Dict, Any, List, Optional +from datetime import datetime + +from src.agent.agent_assistant_core import TSPAgentAssistantCore +from src.agent.agent_message_handler import AgentMessageHandler +from src.agent.agent_sample_actions import AgentSampleActions + +logger = logging.getLogger(__name__) + +class TSPAgentAssistant(TSPAgentAssistantCore): + """TSP Agent助手 - 重构版本""" + + def __init__(self, llm_config=None): + # 初始化核心功能 + super().__init__(llm_config) + + # 初始化消息处理器 + self.message_handler = AgentMessageHandler(self) + + # 初始化示例动作处理器 + self.sample_actions = AgentSampleActions(self) + + logger.info("TSP Agent助手初始化完成(重构版本)") + + # ==================== 消息处理功能 ==================== + + async def process_message_agent(self, message: str, user_id: str = "admin", + work_order_id: Optional[int] = None, + enable_proactive: bool = True) -> Dict[str, Any]: + """使用Agent处理消息""" + return await self.message_handler.process_message_agent( + message, user_id, work_order_id, enable_proactive + ) + + async def process_conversation_agent(self, conversation_data: Dict[str, Any]) -> Dict[str, Any]: + """使用Agent处理对话""" + return await self.message_handler.process_conversation_agent(conversation_data) + + async def process_workorder_agent(self, workorder_data: Dict[str, Any]) -> Dict[str, Any]: + """使用Agent处理工单""" + return await self.message_handler.process_workorder_agent(workorder_data) + + async def process_alert_agent(self, alert_data: Dict[str, Any]) -> Dict[str, Any]: + """使用Agent处理预警""" + return await self.message_handler.process_alert_agent(alert_data) + + # ==================== 建议功能 ==================== + + def get_conversation_suggestions(self, context: Dict[str, Any]) -> List[str]: + """获取对话建议""" + return self.message_handler.get_conversation_suggestions(context) + + def get_workorder_suggestions(self, workorder_data: Dict[str, Any]) -> List[str]: + """获取工单建议""" + return self.message_handler.get_workorder_suggestions(workorder_data) + + def get_alert_suggestions(self, alert_data: Dict[str, Any]) -> List[str]: + """获取预警建议""" + return self.message_handler.get_alert_suggestions(alert_data) + + # ==================== 示例动作功能 ==================== + + async def trigger_sample_actions(self) -> Dict[str, Any]: + """触发示例动作""" + return await self.sample_actions.trigger_sample_actions() + + async def run_performance_test(self) -> Dict[str, Any]: + """运行性能测试""" + return await self.sample_actions.run_performance_test() + + # ==================== 兼容性方法 ==================== + + def get_agent_status(self) -> Dict[str, Any]: + """获取Agent状态(兼容性方法)""" + return super().get_agent_status() + + def toggle_agent_mode(self, enabled: bool) -> bool: + """切换Agent模式(兼容性方法)""" + return super().toggle_agent_mode(enabled) + + def start_proactive_monitoring(self) -> bool: + """启动主动监控(兼容性方法)""" + return super().start_proactive_monitoring() + + def stop_proactive_monitoring(self) -> bool: + """停止主动监控(兼容性方法)""" + return super().stop_proactive_monitoring() + + def run_proactive_monitoring(self) -> Dict[str, Any]: + """运行主动监控检查(兼容性方法)""" + return super().run_proactive_monitoring() + + def run_intelligent_analysis(self) -> Dict[str, Any]: + """运行智能分析(兼容性方法)""" + return super().run_intelligent_analysis() + + def get_action_history(self, limit: int = 50) -> List[Dict[str, Any]]: + """获取动作执行历史(兼容性方法)""" + return super().get_action_history(limit) + + def clear_execution_history(self) -> Dict[str, Any]: + """清空执行历史(兼容性方法)""" + return super().clear_execution_history() + + def get_llm_usage_stats(self) -> Dict[str, Any]: + """获取LLM使用统计(兼容性方法)""" + return super().get_llm_usage_stats() + + # ==================== 高级功能 ==================== + + async def comprehensive_analysis(self) -> Dict[str, Any]: + """综合分析 - 结合多个模块的分析结果""" + try: + # 运行智能分析 + intelligent_analysis = self.run_intelligent_analysis() + + # 运行主动监控 + proactive_monitoring = self.run_proactive_monitoring() + + # 运行性能测试 + performance_test = await self.run_performance_test() + + # 综合结果 + comprehensive_result = { + "timestamp": self.execution_history[-1]["timestamp"] if self.execution_history else None, + "intelligent_analysis": intelligent_analysis, + "proactive_monitoring": proactive_monitoring, + "performance_test": performance_test, + "overall_status": self._determine_overall_status( + intelligent_analysis, proactive_monitoring, performance_test + ) + } + + # 记录综合分析 + self._record_execution("comprehensive_analysis", comprehensive_result) + + return comprehensive_result + + except Exception as e: + logger.error(f"综合分析失败: {e}") + return {"error": str(e)} + + def _determine_overall_status(self, intelligent_analysis: Dict, + proactive_monitoring: Dict, + performance_test: Dict) -> str: + """确定整体状态""" + try: + # 检查各个模块的状态 + statuses = [] + + if intelligent_analysis.get("success"): + statuses.append("intelligent_analysis_ok") + else: + statuses.append("intelligent_analysis_error") + + if proactive_monitoring.get("success"): + statuses.append("proactive_monitoring_ok") + else: + statuses.append("proactive_monitoring_error") + + if performance_test.get("success"): + statuses.append("performance_test_ok") + else: + statuses.append("performance_test_error") + + # 根据状态确定整体状态 + if all("ok" in status for status in statuses): + return "excellent" + elif any("error" in status for status in statuses): + return "needs_attention" + else: + return "good" + + except Exception: + return "unknown" + + async def batch_process_requests(self, requests: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """批量处理请求""" + try: + results = [] + + for request in requests: + request_type = request.get("type", "message") + + if request_type == "message": + result = await self.process_message_agent( + request.get("message", ""), + request.get("user_id", "admin"), + request.get("work_order_id"), + request.get("enable_proactive", True) + ) + elif request_type == "conversation": + result = await self.process_conversation_agent(request) + elif request_type == "workorder": + result = await self.process_workorder_agent(request) + elif request_type == "alert": + result = await self.process_alert_agent(request) + else: + result = {"error": f"未知请求类型: {request_type}"} + + results.append(result) + + # 记录批量处理 + self._record_execution("batch_process", { + "request_count": len(requests), + "results": results + }) + + return results + + except Exception as e: + logger.error(f"批量处理请求失败: {e}") + return [{"error": str(e)} for _ in requests] + + def get_system_summary(self) -> Dict[str, Any]: + """获取系统摘要""" + try: + # 获取各种状态信息 + agent_status = self.get_agent_status() + system_health = self.get_system_health() + workorders_status = self._check_workorders_status() + + # 计算摘要指标 + summary = { + "timestamp": datetime.now().isoformat(), + "agent_status": agent_status, + "system_health": system_health, + "workorders_status": workorders_status, + "execution_history_count": len(self.execution_history), + "llm_usage_stats": self.get_llm_usage_stats(), + "overall_health_score": system_health.get("health_score", 0) + } + + return summary + + except Exception as e: + logger.error(f"获取系统摘要失败: {e}") + return {"error": str(e)} + + def export_agent_data(self) -> Dict[str, Any]: + """导出Agent数据""" + try: + export_data = { + "export_timestamp": datetime.now().isoformat(), + "agent_status": self.get_agent_status(), + "execution_history": self.execution_history, + "llm_usage_stats": self.get_llm_usage_stats(), + "system_summary": self.get_system_summary() + } + + return { + "success": True, + "data": export_data, + "message": "Agent数据导出成功" + } + + except Exception as e: + logger.error(f"导出Agent数据失败: {e}") + return { + "success": False, + "error": str(e) + } + + def import_agent_data(self, data: Dict[str, Any]) -> Dict[str, Any]: + """导入Agent数据""" + try: + # 验证数据格式 + if not isinstance(data, dict): + raise ValueError("数据格式不正确") + + # 导入执行历史 + if "execution_history" in data: + self.execution_history = data["execution_history"] + + # 其他数据的导入逻辑... + + return { + "success": True, + "message": "Agent数据导入成功" + } + + except Exception as e: + logger.error(f"导入Agent数据失败: {e}") + return { + "success": False, + "error": str(e) + } + +# 测试函数 +async def main(): + """测试函数""" + print("🚀 TSP Agent助手测试") + + # 创建Agent助手实例 + agent_assistant = TSPAgentAssistant() + + # 测试基本功能 + status = agent_assistant.get_agent_status() + print("Agent状态:", status) + + # 测试消息处理 + result = await agent_assistant.process_message_agent("你好,请帮我分析系统状态") + print("消息处理结果:", result) + + # 测试示例动作 + sample_result = await agent_assistant.trigger_sample_actions() + print("示例动作结果:", sample_result) + + # 测试综合分析 + analysis_result = await agent_assistant.comprehensive_analysis() + print("综合分析结果:", analysis_result) + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/config/config.py b/src/config/config.py index c2553ed..dda57bf 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -10,7 +10,7 @@ class Config: ALIBABA_MODEL_NAME = "qwen-plus-latest" # 数据库配置 - DATABASE_URL = "sqlite:///tsp_assistant.db" + DATABASE_URL = "mysql+pymysql://tsp_assistant:123456@43.134.68.207/tsp_assistant?charset=utf8mb4" # 知识库配置 KNOWLEDGE_BASE_PATH = "data/knowledge_base" diff --git a/src/config/unified_config.py b/src/config/unified_config.py new file mode 100644 index 0000000..cf04449 --- /dev/null +++ b/src/config/unified_config.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +统一配置管理模块 +整合所有配置,提供统一的配置接口 +""" + +import os +import json +import logging +from typing import Dict, Any, Optional +from dataclasses import dataclass, asdict +from pathlib import Path + +logger = logging.getLogger(__name__) + +@dataclass +class DatabaseConfig: + """数据库配置""" + url: str = "mysql+pymysql://tsp_assistant:password@43.134.68.207/tsp_assistant?charset=utf8mb4" + pool_size: int = 10 + max_overflow: int = 20 + pool_timeout: int = 30 + pool_recycle: int = 3600 + +@dataclass +class LLMConfig: + """LLM配置""" + provider: str = "openai" + api_key: str = "" + base_url: str = "https://dashscope.aliyuncs.com/compatible-mode/v1" + model: str = "qwen-turbo" + temperature: float = 0.7 + max_tokens: int = 2000 + timeout: int = 30 + +@dataclass +class ServerConfig: + """服务器配置""" + host: str = "0.0.0.0" + port: int = 5000 + websocket_port: int = 8765 + debug: bool = False + log_level: str = "INFO" + +@dataclass +class FeishuConfig: + """飞书配置""" + app_id: str = "" + app_secret: str = "" + app_token: str = "" + table_id: str = "" + status: str = "active" + sync_limit: int = 10 + auto_sync_interval: int = 0 + +@dataclass +class AIAccuracyConfig: + """AI准确率配置""" + auto_approve_threshold: float = 0.95 + use_human_resolution_threshold: float = 0.90 + manual_review_threshold: float = 0.80 + ai_suggestion_confidence: float = 0.95 + human_resolution_confidence: float = 0.90 + prefer_human_when_low_accuracy: bool = True + enable_auto_approval: bool = True + enable_human_fallback: bool = True + +@dataclass +class SystemConfig: + """系统配置""" + backup_enabled: bool = True + backup_interval: int = 24 # 小时 + max_backup_files: int = 7 + cache_enabled: bool = True + cache_ttl: int = 3600 # 秒 + monitoring_enabled: bool = True + +class UnifiedConfig: + """统一配置管理器""" + + def __init__(self, config_dir: str = "config"): + self.config_dir = Path(config_dir) + self.config_file = self.config_dir / "unified_config.json" + + # 默认配置 + self.database = DatabaseConfig() + self.llm = LLMConfig() + self.server = ServerConfig() + self.feishu = FeishuConfig() + self.ai_accuracy = AIAccuracyConfig() + self.system = SystemConfig() + + # 加载配置 + self.load_config() + + def load_config(self): + """加载配置文件""" + try: + if self.config_file.exists(): + with open(self.config_file, 'r', encoding='utf-8') as f: + config_data = json.load(f) + + # 更新配置 + if 'database' in config_data: + self.database = DatabaseConfig(**config_data['database']) + if 'llm' in config_data: + self.llm = LLMConfig(**config_data['llm']) + if 'server' in config_data: + self.server = ServerConfig(**config_data['server']) + if 'feishu' in config_data: + self.feishu = FeishuConfig(**config_data['feishu']) + if 'ai_accuracy' in config_data: + self.ai_accuracy = AIAccuracyConfig(**config_data['ai_accuracy']) + if 'system' in config_data: + self.system = SystemConfig(**config_data['system']) + + logger.info("配置文件加载成功") + else: + logger.info("配置文件不存在,使用默认配置") + self.save_config() + + except Exception as e: + logger.error(f"加载配置文件失败: {e}") + + def save_config(self): + """保存配置文件""" + try: + self.config_dir.mkdir(exist_ok=True) + + config_data = { + 'database': asdict(self.database), + 'llm': asdict(self.llm), + 'server': asdict(self.server), + 'feishu': asdict(self.feishu), + 'ai_accuracy': asdict(self.ai_accuracy), + 'system': asdict(self.system) + } + + with open(self.config_file, 'w', encoding='utf-8') as f: + json.dump(config_data, f, indent=2, ensure_ascii=False) + + logger.info("配置文件保存成功") + + except Exception as e: + logger.error(f"保存配置文件失败: {e}") + + def load_from_env(self): + """从环境变量加载配置""" + # 数据库配置 + if os.getenv('DATABASE_URL'): + self.database.url = os.getenv('DATABASE_URL') + + # LLM配置 + if os.getenv('LLM_PROVIDER'): + self.llm.provider = os.getenv('LLM_PROVIDER') + if os.getenv('LLM_API_KEY'): + self.llm.api_key = os.getenv('LLM_API_KEY') + if os.getenv('LLM_MODEL'): + self.llm.model = os.getenv('LLM_MODEL') + + # 服务器配置 + if os.getenv('SERVER_PORT'): + self.server.port = int(os.getenv('SERVER_PORT')) + if os.getenv('LOG_LEVEL'): + self.server.log_level = os.getenv('LOG_LEVEL') + + # 飞书配置 + if os.getenv('FEISHU_APP_ID'): + self.feishu.app_id = os.getenv('FEISHU_APP_ID') + if os.getenv('FEISHU_APP_SECRET'): + self.feishu.app_secret = os.getenv('FEISHU_APP_SECRET') + if os.getenv('FEISHU_APP_TOKEN'): + self.feishu.app_token = os.getenv('FEISHU_APP_TOKEN') + if os.getenv('FEISHU_TABLE_ID'): + self.feishu.table_id = os.getenv('FEISHU_TABLE_ID') + + def get_database_url(self) -> str: + """获取数据库连接URL""" + return self.database.url + + def get_llm_config(self) -> Dict[str, Any]: + """获取LLM配置""" + return asdict(self.llm) + + def get_server_config(self) -> Dict[str, Any]: + """获取服务器配置""" + return asdict(self.server) + + def get_feishu_config(self) -> Dict[str, Any]: + """获取飞书配置""" + return asdict(self.feishu) + + def get_ai_accuracy_config(self) -> Dict[str, Any]: + """获取AI准确率配置""" + return asdict(self.ai_accuracy) + + def get_system_config(self) -> Dict[str, Any]: + """获取系统配置""" + return asdict(self.system) + + def update_config(self, section: str, config_data: Dict[str, Any]): + """更新配置""" + try: + if section == 'database': + self.database = DatabaseConfig(**config_data) + elif section == 'llm': + self.llm = LLMConfig(**config_data) + elif section == 'server': + self.server = ServerConfig(**config_data) + elif section == 'feishu': + self.feishu = FeishuConfig(**config_data) + elif section == 'ai_accuracy': + self.ai_accuracy = AIAccuracyConfig(**config_data) + elif section == 'system': + self.system = SystemConfig(**config_data) + else: + raise ValueError(f"未知的配置节: {section}") + + self.save_config() + logger.info(f"配置节 {section} 更新成功") + + except Exception as e: + logger.error(f"更新配置失败: {e}") + raise + + def validate_config(self) -> bool: + """验证配置有效性""" + try: + # 验证数据库配置 + if not self.database.url: + logger.error("数据库URL未配置") + return False + + # 验证LLM配置 + if not self.llm.api_key: + logger.warning("LLM API密钥未配置") + + # 验证飞书配置 + if self.feishu.status == "active": + if not all([self.feishu.app_id, self.feishu.app_secret, + self.feishu.app_token, self.feishu.table_id]): + logger.warning("飞书配置不完整") + + logger.info("配置验证通过") + return True + + except Exception as e: + logger.error(f"配置验证失败: {e}") + return False + + def get_all_config(self) -> Dict[str, Any]: + """获取所有配置""" + return { + 'database': asdict(self.database), + 'llm': asdict(self.llm), + 'server': asdict(self.server), + 'feishu': asdict(self.feishu), + 'ai_accuracy': asdict(self.ai_accuracy), + 'system': asdict(self.system) + } + +# 全局配置实例 +_config_instance = None + +def get_config() -> UnifiedConfig: + """获取全局配置实例""" + global _config_instance + if _config_instance is None: + _config_instance = UnifiedConfig() + _config_instance.load_from_env() + return _config_instance + +def reload_config(): + """重新加载配置""" + global _config_instance + _config_instance = None + return get_config() diff --git a/src/core/models.py b/src/core/models.py index 35c93ad..e3177f8 100644 --- a/src/core/models.py +++ b/src/core/models.py @@ -27,6 +27,20 @@ class WorkOrder(Base): solution = Column(Text, nullable=True) # 解决方案 ai_suggestion = Column(Text, nullable=True) # AI建议 + # 扩展飞书字段 + source = Column(String(50), nullable=True) # 来源(Mail, Telegram bot等) + module = Column(String(100), nullable=True) # 模块(local O&M, OTA等) + created_by = Column(String(100), nullable=True) # 创建人 + wilfulness = Column(String(100), nullable=True) # 责任人 + date_of_close = Column(DateTime, nullable=True) # 关闭日期 + vehicle_type = Column(String(100), nullable=True) # 车型 + vin_sim = Column(String(50), nullable=True) # 车架号/SIM + app_remote_control_version = Column(String(100), nullable=True) # 应用远程控制版本 + hmi_sw = Column(String(100), nullable=True) # HMI软件版本 + parent_record = Column(String(100), nullable=True) # 父记录 + has_updated_same_day = Column(String(50), nullable=True) # 是否同日更新 + operating_time = Column(String(100), nullable=True) # 操作时间 + # 关联对话记录 conversations = relationship("Conversation", back_populates="work_order") @@ -119,5 +133,6 @@ class WorkOrderSuggestion(Base): human_resolution = Column(Text) ai_similarity = Column(Float) approved = Column(Boolean, default=False) + use_human_resolution = Column(Boolean, default=False) # 是否使用人工描述入库 created_at = Column(DateTime, default=datetime.now) updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now) diff --git a/src/integrations/workorder_sync.py b/src/integrations/workorder_sync.py index d3570e6..94347c3 100644 --- a/src/integrations/workorder_sync.py +++ b/src/integrations/workorder_sync.py @@ -46,16 +46,31 @@ class WorkOrderSyncService: # 字段映射配置 - 根据实际飞书表格结构 self.field_mapping = { - # 飞书字段名 -> 本地字段名 + # 核心字段 "TR Number": "order_id", # TR编号映射到工单号 - "TR Description": "title", # TR描述作为标题(问题描述) + "TR Description": "description", # TR描述作为详细描述 "Type of problem": "category", # 问题类型作为分类 "TR Level": "priority", # TR Level作为优先级 - "TR Status": "status", # TR Status作为状态(修正字段名) - "Source": "assignee", # 来源信息 + "TR Status": "status", # TR Status作为状态 + "Source": "source", # 来源信息(Mail, Telegram bot等) "Date creation": "created_at", # 创建日期 - "处理过程": "description", # 处理过程作为描述 - "TR tracking": "solution", # TR跟踪作为解决方案 + "处理过程": "solution", # 处理过程作为解决方案 + "TR tracking": "resolution", # TR跟踪作为解决方案详情 + + # 扩展字段 + "Created by": "created_by", # 创建人 + "Module(模块)": "module", # 模块 + "Wilfulness(责任人)": "wilfulness", # 责任人 + "Date of close TR": "date_of_close", # 关闭日期 + "Vehicle Type01": "vehicle_type", # 车型 + "VIN|sim": "vin_sim", # 车架号/SIM + "App remote control version": "app_remote_control_version", # 应用远程控制版本 + "HMI SW": "hmi_sw", # HMI软件版本 + "父记录": "parent_record", # 父记录 + "Has it been updated on the same day": "has_updated_same_day", # 是否同日更新 + "Operating time": "operating_time", # 操作时间 + + # AI建议字段 "AI建议": "ai_suggestion", # AI建议字段 "Issue Start Time": "updated_at" # 问题开始时间作为更新时间 } @@ -387,7 +402,7 @@ class WorkOrderSyncService: value = self.status_mapping[value] elif local_field == "priority" and value in self.priority_mapping: value = self.priority_mapping[value] - elif local_field in ["created_at", "updated_at"] and value: + elif local_field in ["created_at", "updated_at", "date_of_close"] and value: try: # 处理飞书时间戳(毫秒) if isinstance(value, (int, float)): @@ -404,6 +419,16 @@ class WorkOrderSyncService: else: logger.info(f"飞书字段 {feishu_field} 不存在于数据中") + # 生成标题 - 使用TR Number和问题类型 + tr_number = feishu_fields.get("TR Number", "") + problem_type = feishu_fields.get("Type of problem", "") + if tr_number and problem_type: + local_data["title"] = f"{tr_number} - {problem_type}" + elif tr_number: + local_data["title"] = f"{tr_number} - TR工单" + else: + local_data["title"] = "TR工单" + # 设置默认值 if "status" not in local_data: local_data["status"] = WorkOrderStatus.PENDING @@ -411,8 +436,6 @@ class WorkOrderSyncService: local_data["priority"] = WorkOrderPriority.MEDIUM if "category" not in local_data: local_data["category"] = "Remote control" # 根据表格中最常见的问题类型 - if "title" not in local_data or not local_data["title"]: - local_data["title"] = "TR工单" # 默认标题 return local_data diff --git a/src/utils/helpers.py b/src/utils/helpers.py index b82e375..76a8b9e 100644 --- a/src/utils/helpers.py +++ b/src/utils/helpers.py @@ -76,18 +76,24 @@ def extract_keywords(text: str, max_keywords: int = 10) -> List[str]: return [word for word, count in sorted_words[:max_keywords]] def calculate_similarity(text1: str, text2: str) -> float: - """计算文本相似度""" - from sklearn.feature_extraction.text import TfidfVectorizer - from sklearn.metrics.pairwise import cosine_similarity - + """计算文本相似度(使用语义相似度)""" try: - vectorizer = TfidfVectorizer() - vectors = vectorizer.fit_transform([text1, text2]) - similarity = cosine_similarity(vectors[0:1], vectors[1:2])[0][0] - return float(similarity) + from src.utils.semantic_similarity import calculate_semantic_similarity + return calculate_semantic_similarity(text1, text2) except Exception as e: - logging.error(f"计算相似度失败: {e}") - return 0.0 + logging.error(f"计算语义相似度失败: {e}") + # 回退到传统方法 + try: + from sklearn.feature_extraction.text import TfidfVectorizer + from sklearn.metrics.pairwise import cosine_similarity + + vectorizer = TfidfVectorizer() + vectors = vectorizer.fit_transform([text1, text2]) + similarity = cosine_similarity(vectors[0:1], vectors[1:2])[0][0] + return float(similarity) + except Exception as e2: + logging.error(f"计算TF-IDF相似度失败: {e2}") + return 0.0 def format_time_duration(seconds: float) -> str: """格式化时间持续时间""" diff --git a/src/utils/semantic_similarity.py b/src/utils/semantic_similarity.py new file mode 100644 index 0000000..d9a0588 --- /dev/null +++ b/src/utils/semantic_similarity.py @@ -0,0 +1,256 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +语义相似度计算服务 +使用sentence-transformers进行更准确的语义相似度计算 +""" + +import logging +import numpy as np +from typing import List, Tuple, Optional +from sentence_transformers import SentenceTransformer +import torch + +logger = logging.getLogger(__name__) + +class SemanticSimilarityCalculator: + """语义相似度计算器""" + + def __init__(self, model_name: str = "all-MiniLM-L6-v2"): + """ + 初始化语义相似度计算器 + + Args: + model_name: 使用的预训练模型名称 + - all-MiniLM-L6-v2: 英文模型,速度快,推荐用于生产环境 + - paraphrase-multilingual-MiniLM-L12-v2: 多语言模型,支持中文 + - paraphrase-multilingual-mpnet-base-v2: 多语言模型,精度高 + """ + self.model_name = model_name + self.model = None + self._load_model() + + def _load_model(self): + """加载预训练模型""" + try: + logger.info(f"正在加载语义相似度模型: {self.model_name}") + self.model = SentenceTransformer(self.model_name) + logger.info("语义相似度模型加载成功") + except Exception as e: + logger.error(f"加载语义相似度模型失败: {e}") + # 回退到简单模型 + self.model = None + + def calculate_similarity(self, text1: str, text2: str, fast_mode: bool = True) -> float: + """ + 计算两个文本的语义相似度 + + Args: + text1: 第一个文本 + text2: 第二个文本 + fast_mode: 是否使用快速模式(结合传统方法) + + Returns: + 相似度分数 (0-1之间) + """ + if not text1 or not text2: + return 0.0 + + try: + # 快速模式:先使用传统方法快速筛选 + if fast_mode: + tfidf_sim = self._calculate_tfidf_similarity(text1, text2) + + # 如果传统方法相似度很高或很低,直接返回 + if tfidf_sim >= 0.9: + return tfidf_sim + elif tfidf_sim <= 0.3: + return tfidf_sim + + # 中等相似度时,使用语义方法进行精确计算 + if self.model is not None: + semantic_sim = self._calculate_semantic_similarity(text1, text2) + # 结合两种方法的结果 + return (tfidf_sim * 0.3 + semantic_sim * 0.7) + else: + return tfidf_sim + + # 完整模式:直接使用语义相似度 + if self.model is not None: + return self._calculate_semantic_similarity(text1, text2) + else: + return self._calculate_tfidf_similarity(text1, text2) + + except Exception as e: + logger.error(f"计算语义相似度失败: {e}") + return self._calculate_tfidf_similarity(text1, text2) + + def _calculate_semantic_similarity(self, text1: str, text2: str) -> float: + """使用sentence-transformers计算语义相似度""" + try: + # 获取文本嵌入向量 + embeddings = self.model.encode([text1, text2]) + + # 计算余弦相似度 + similarity = self._cosine_similarity(embeddings[0], embeddings[1]) + + # 确保结果在0-1范围内 + similarity = max(0.0, min(1.0, similarity)) + + logger.debug(f"语义相似度计算: {similarity:.4f}") + return float(similarity) + + except Exception as e: + logger.error(f"语义相似度计算失败: {e}") + return self._calculate_tfidf_similarity(text1, text2) + + def _calculate_tfidf_similarity(self, text1: str, text2: str) -> float: + """使用TF-IDF计算相似度(回退方法)""" + try: + from sklearn.feature_extraction.text import TfidfVectorizer + from sklearn.metrics.pairwise import cosine_similarity + + vectorizer = TfidfVectorizer(max_features=1000, stop_words=None) + vectors = vectorizer.fit_transform([text1, text2]) + similarity = cosine_similarity(vectors[0:1], vectors[1:2])[0][0] + + logger.debug(f"TF-IDF相似度计算: {similarity:.4f}") + return float(similarity) + + except Exception as e: + logger.error(f"TF-IDF相似度计算失败: {e}") + return 0.0 + + def _cosine_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float: + """计算余弦相似度""" + try: + # 计算点积 + dot_product = np.dot(vec1, vec2) + + # 计算向量的模长 + norm1 = np.linalg.norm(vec1) + norm2 = np.linalg.norm(vec2) + + # 避免除零错误 + if norm1 == 0 or norm2 == 0: + return 0.0 + + # 计算余弦相似度 + similarity = dot_product / (norm1 * norm2) + + return float(similarity) + + except Exception as e: + logger.error(f"余弦相似度计算失败: {e}") + return 0.0 + + def batch_calculate_similarity(self, text_pairs: List[Tuple[str, str]]) -> List[float]: + """ + 批量计算相似度 + + Args: + text_pairs: 文本对列表 [(text1, text2), ...] + + Returns: + 相似度分数列表 + """ + if not text_pairs: + return [] + + try: + if self.model is not None: + return self._batch_semantic_similarity(text_pairs) + else: + return [self._calculate_tfidf_similarity(t1, t2) for t1, t2 in text_pairs] + except Exception as e: + logger.error(f"批量相似度计算失败: {e}") + return [0.0] * len(text_pairs) + + def _batch_semantic_similarity(self, text_pairs: List[Tuple[str, str]]) -> List[float]: + """批量计算语义相似度""" + try: + # 提取所有文本 + all_texts = [] + for text1, text2 in text_pairs: + all_texts.extend([text1, text2]) + + # 批量获取嵌入向量 + embeddings = self.model.encode(all_texts) + + # 计算每对的相似度 + similarities = [] + for i in range(0, len(embeddings), 2): + similarity = self._cosine_similarity(embeddings[i], embeddings[i+1]) + similarities.append(float(similarity)) + + return similarities + + except Exception as e: + logger.error(f"批量语义相似度计算失败: {e}") + return [self._calculate_tfidf_similarity(t1, t2) for t1, t2 in text_pairs] + + def get_similarity_explanation(self, text1: str, text2: str, similarity: float) -> str: + """ + 获取相似度解释 + + Args: + text1: 第一个文本 + text2: 第二个文本 + similarity: 相似度分数 + + Returns: + 相似度解释文本 + """ + if similarity >= 0.95: + return "语义高度相似,建议自动审批" + elif similarity >= 0.8: + return "语义较为相似,建议人工审核" + elif similarity >= 0.6: + return "语义部分相似,需要人工判断" + elif similarity >= 0.4: + return "语义相似度较低,建议重新生成" + else: + return "语义差异较大,建议重新生成" + + def is_model_available(self) -> bool: + """检查模型是否可用""" + return self.model is not None + +# 全局实例 +_similarity_calculator = None + +def get_similarity_calculator() -> SemanticSimilarityCalculator: + """获取全局相似度计算器实例""" + global _similarity_calculator + if _similarity_calculator is None: + _similarity_calculator = SemanticSimilarityCalculator() + return _similarity_calculator + +def calculate_semantic_similarity(text1: str, text2: str, fast_mode: bool = True) -> float: + """ + 计算语义相似度的便捷函数 + + Args: + text1: 第一个文本 + text2: 第二个文本 + fast_mode: 是否使用快速模式 + + Returns: + 相似度分数 (0-1之间) + """ + calculator = get_similarity_calculator() + return calculator.calculate_similarity(text1, text2, fast_mode) + +def batch_calculate_semantic_similarity(text_pairs: List[Tuple[str, str]]) -> List[float]: + """ + 批量计算语义相似度的便捷函数 + + Args: + text_pairs: 文本对列表 + + Returns: + 相似度分数列表 + """ + calculator = get_similarity_calculator() + return calculator.batch_calculate_similarity(text_pairs) diff --git a/src/web/app.py b/src/web/app.py index 4085795..3040855 100644 --- a/src/web/app.py +++ b/src/web/app.py @@ -755,10 +755,7 @@ def test_model_response(): except Exception as e: return jsonify({"success": False, "error": str(e)}), 500 -@app.route('/feishu-sync') -def feishu_sync(): - """飞书同步管理页面""" - return render_template('feishu_sync.html') +# 飞书同步功能已合并到主页面,不再需要单独的路由 if __name__ == '__main__': import time diff --git a/src/web/blueprints/README.md b/src/web/blueprints/README.md index f4dd002..11ca73c 100644 --- a/src/web/blueprints/README.md +++ b/src/web/blueprints/README.md @@ -21,6 +21,7 @@ - `knowledge.py`: 知识库管理相关API - `monitoring.py`: 监控相关API - `system.py`: 系统管理相关API + - ~~`feishu_sync.py`~~: 已合并到主仪表板(删除) ## 蓝图模块说明 @@ -59,6 +60,12 @@ - `/api/backup/*` - 数据备份 - `/api/database/status` - 数据库状态 +### 7. 飞书集成功能(已合并) +- **原独立页面**: `http://localhost:5000/feishu-sync` +- **现集成位置**: 主仪表板的"飞书同步"标签页 +- **功能**: 飞书多维表格数据同步和管理 +- **API端点**: 通过主应用路由提供 + ## 优势 1. **模块化**: 每个功能模块独立,便于维护 @@ -96,7 +103,16 @@ src/web/ │ ├── system.py # 系统管理 │ └── README.md # 架构说明 ├── static/ # 静态文件 +│ ├── css/ +│ │ └── style.css # 样式文件(包含飞书集成样式) +│ └── js/ +│ ├── dashboard.js # 仪表板逻辑(包含飞书同步功能) +│ ├── chat.js # 对话功能 +│ └── app.js # 应用主逻辑 └── templates/ # 模板文件 + ├── dashboard.html # 主仪表板(包含飞书同步标签页) + ├── chat.html # 对话页面 + └── index.html # 首页 ``` ## 注意事项 @@ -106,3 +122,17 @@ src/web/ 3. 懒加载模式避免启动时的重复初始化 4. 错误处理统一在蓝图内部进行 5. 保持与原有API接口的兼容性 +6. 飞书集成功能已从独立蓝图合并到主仪表板 +7. 前端JavaScript类管理不同功能模块(TSPDashboard、FeishuSyncManager等) + +## 最新更新 (v1.4.0) + +### 功能合并 +- **飞书同步页面合并**: 原独立的飞书同步页面已合并到主仪表板 +- **统一用户体验**: 所有功能现在都在一个统一的界面中 +- **代码优化**: 删除了冗余的蓝图和模板文件 + +### 架构改进 +- **前端模块化**: JavaScript代码按功能模块组织 +- **数据库扩展**: 工单表新增12个飞书相关字段 +- **字段映射**: 智能映射飞书字段到本地数据库结构 diff --git a/src/web/blueprints/workorders.py b/src/web/blueprints/workorders.py index de38007..1f3d742 100644 --- a/src/web/blueprints/workorders.py +++ b/src/web/blueprints/workorders.py @@ -6,11 +6,36 @@ import os import pandas as pd +import logging from datetime import datetime from flask import Blueprint, request, jsonify, send_file from werkzeug.utils import secure_filename from sqlalchemy import text +logger = logging.getLogger(__name__) + +# 简化的AI准确率配置类 +class SimpleAIAccuracyConfig: + """简化的AI准确率配置""" + def __init__(self): + self.auto_approve_threshold = 0.95 + self.use_human_resolution_threshold = 0.90 + self.manual_review_threshold = 0.80 + self.ai_suggestion_confidence = 0.95 + self.human_resolution_confidence = 0.90 + + def should_auto_approve(self, similarity: float) -> bool: + return similarity >= self.auto_approve_threshold + + def should_use_human_resolution(self, similarity: float) -> bool: + return similarity < self.use_human_resolution_threshold + + def get_confidence_score(self, similarity: float, use_human: bool = False) -> float: + if use_human: + return self.human_resolution_confidence + else: + return max(similarity, self.ai_suggestion_confidence) + from src.main import TSPAssistant from src.core.database import db_manager from src.core.models import WorkOrder, Conversation, WorkOrderSuggestion, KnowledgeEntry @@ -250,51 +275,101 @@ def save_workorder_human_resolution(workorder_id): rec = WorkOrderSuggestion(work_order_id=w.id) session.add(rec) rec.human_resolution = human_text - # 计算相似度(使用简单cosine TF-IDF,避免外部服务依赖) + # 计算语义相似度(使用sentence-transformers进行更准确的语义比较) try: - from sklearn.feature_extraction.text import TfidfVectorizer - from sklearn.metrics.pairwise import cosine_similarity - texts = [rec.ai_suggestion or "", human_text] - vec = TfidfVectorizer(max_features=1000) - mat = vec.fit_transform(texts) - sim = float(cosine_similarity(mat[0:1], mat[1:2])[0][0]) - except Exception: - sim = 0.0 + from src.utils.semantic_similarity import calculate_semantic_similarity + ai_text = rec.ai_suggestion or "" + sim = calculate_semantic_similarity(ai_text, human_text) + logger.info(f"AI建议与人工描述语义相似度: {sim:.4f}") + except Exception as e: + logger.error(f"计算语义相似度失败: {e}") + # 回退到传统方法 + try: + from sklearn.feature_extraction.text import TfidfVectorizer + from sklearn.metrics.pairwise import cosine_similarity + texts = [rec.ai_suggestion or "", human_text] + vec = TfidfVectorizer(max_features=1000) + mat = vec.fit_transform(texts) + sim = float(cosine_similarity(mat[0:1], mat[1:2])[0][0]) + except Exception: + sim = 0.0 rec.ai_similarity = sim - # 自动审批条件≥0.95 - approved = sim >= 0.95 + + # 使用简化的配置 + config = SimpleAIAccuracyConfig() + + # 自动审批条件 + approved = config.should_auto_approve(sim) rec.approved = approved + + # 记录使用人工描述入库的标记(当AI准确率低于阈值时) + use_human_resolution = config.should_use_human_resolution(sim) + rec.use_human_resolution = use_human_resolution + session.commit() - return jsonify({"success": True, "similarity": sim, "approved": approved}) + return jsonify({ + "success": True, + "similarity": sim, + "approved": approved, + "use_human_resolution": use_human_resolution + }) except Exception as e: return jsonify({"error": str(e)}), 500 @workorders_bp.route('//approve-to-knowledge', methods=['POST']) def approve_workorder_to_knowledge(workorder_id): - """将已审批的AI建议入库为知识条目""" + """将已审批的AI建议或人工描述入库为知识条目""" try: with db_manager.get_session() as session: w = session.query(WorkOrder).filter(WorkOrder.id == workorder_id).first() if not w: return jsonify({"error": "工单不存在"}), 404 + rec = session.query(WorkOrderSuggestion).filter(WorkOrderSuggestion.work_order_id == w.id).first() - if not rec or not rec.approved or not rec.ai_suggestion: - return jsonify({"error": "未找到可入库的已审批AI建议"}), 400 - # 入库为知识条目(问=工单标题;答=AI建议;类目用工单分类) + if not rec: + return jsonify({"error": "未找到工单建议记录"}), 400 + + # 使用简化的配置 + config = SimpleAIAccuracyConfig() + + # 确定使用哪个内容入库 + if rec.use_human_resolution and rec.human_resolution: + # AI准确率低于阈值,使用人工描述入库 + answer_content = rec.human_resolution + confidence_score = config.get_confidence_score(rec.ai_similarity or 0, use_human=True) + verified_by = 'human_resolution' + logger.info(f"工单 {workorder_id} 使用人工描述入库,AI相似度: {rec.ai_similarity:.4f}") + elif rec.approved and rec.ai_suggestion: + # AI准确率≥阈值,使用AI建议入库 + answer_content = rec.ai_suggestion + confidence_score = config.get_confidence_score(rec.ai_similarity or 0, use_human=False) + verified_by = 'auto_approve' + logger.info(f"工单 {workorder_id} 使用AI建议入库,相似度: {rec.ai_similarity:.4f}") + else: + return jsonify({"error": "未找到可入库的内容"}), 400 + + # 入库为知识条目 entry = KnowledgeEntry( question=w.title or (w.description[:20] if w.description else '工单问题'), - answer=rec.ai_suggestion, + answer=answer_content, category=w.category or '其他', - confidence_score=0.95, + confidence_score=confidence_score, is_active=True, is_verified=True, - verified_by='auto_approve', + verified_by=verified_by, verified_at=datetime.now() ) session.add(entry) session.commit() - return jsonify({"success": True, "knowledge_id": entry.id}) + + return jsonify({ + "success": True, + "knowledge_id": entry.id, + "used_content": "human_resolution" if rec.use_human_resolution else "ai_suggestion", + "confidence_score": confidence_score + }) except Exception as e: + logger.error(f"入库知识库失败: {e}") return jsonify({"error": str(e)}), 500 @workorders_bp.route('/import', methods=['POST']) diff --git a/src/web/static/css/style.css b/src/web/static/css/style.css index 625106c..65f3306 100644 --- a/src/web/static/css/style.css +++ b/src/web/static/css/style.css @@ -604,64 +604,406 @@ body { color: #6c757d; font-size: 0.8rem; } - - -/* fpencaSGr7h_ */ -.vehicle-data-card { - background: linear-gradient(135deg, #e8f5e8, #f0f8f0); - border: 1px solid #4caf50; - border-radius: 10px; - margin: 10px 0; - padding: 15px; - box-shadow: 0 2px 10px rgba(76, 175, 80, 0.1); -} - -.vehicle-data-header { - border-bottom: 1px solid #4caf50; - padding-bottom: 10px; - margin-bottom: 15px; -} - -.vehicle-data-header h5 { - color: #2e7d32; - margin: 0; - font-size: 1.1rem; -} - -.vehicle-data-content { - padding: 0; -} - -.vehicle-info { - background: white; - border-radius: 8px; - padding: 12px; - margin-bottom: 10px; - border-left: 4px solid #4caf50; -} - -.vehicle-info h6 { - color: #1976d2; - margin-bottom: 8px; - font-size: 1rem; -} - -.vehicle-details p { - margin: 5px 0; - font-size: 0.9rem; - color: #333; -} - -.vehicle-details strong { - color: #2e7d32; -} - -.vehicle-error { - background: #ffebee; - color: #c62828; - padding: 10px; - border-radius: 5px; - border-left: 4px solid #f44336; - font-style: italic; -} + +/* AI建议与人工描述优化样式 */ +.ai-suggestion-section { + background: linear-gradient(135deg, #f8f9ff, #e8f2ff); + border: 1px solid #cce7ff; + border-radius: 12px; + padding: 20px; + margin: 15px 0; + box-shadow: 0 2px 8px rgba(0, 123, 255, 0.1); +} + +.ai-suggestion-header { + display: flex; + align-items: center; + justify-content: space-between; + margin-bottom: 15px; + padding-bottom: 10px; + border-bottom: 2px solid #e3f2fd; +} + +.ai-suggestion-title { + font-size: 1.1rem; + font-weight: 600; + color: #1976d2; + margin: 0; + display: flex; + align-items: center; +} + +.ai-suggestion-title i { + margin-right: 8px; + color: #2196f3; +} + +.generate-ai-btn { + background: linear-gradient(135deg, #2196f3, #1976d2); + border: none; + border-radius: 8px; + padding: 8px 16px; + color: white; + font-weight: 500; + transition: all 0.3s ease; + box-shadow: 0 2px 4px rgba(33, 150, 243, 0.3); +} + +.generate-ai-btn:hover { + transform: translateY(-2px); + box-shadow: 0 4px 12px rgba(33, 150, 243, 0.4); + background: linear-gradient(135deg, #1976d2, #1565c0); +} + +.ai-suggestion-content { + background: white; + border: 1px solid #e0e0e0; + border-radius: 8px; + padding: 15px; + margin-bottom: 15px; + min-height: 100px; + position: relative; +} + +.ai-suggestion-content textarea { + border: none; + background: transparent; + resize: none; + font-size: 0.95rem; + line-height: 1.5; + color: #333; +} + +.ai-suggestion-content textarea:focus { + outline: none; + box-shadow: none; +} + +.human-resolution-content { + background: #fff8e1; + border: 1px solid #ffcc02; + border-radius: 8px; + padding: 15px; + margin-bottom: 15px; + position: relative; +} + +.human-resolution-content textarea { + border: none; + background: transparent; + resize: none; + font-size: 0.95rem; + line-height: 1.5; + color: #333; +} + +.human-resolution-content textarea:focus { + outline: none; + box-shadow: none; +} + +.similarity-indicator { + display: flex; + align-items: center; + gap: 12px; + margin: 15px 0; + padding: 12px; + background: #f5f5f5; + border-radius: 8px; + border-left: 4px solid #2196f3; +} + +.similarity-badge { + font-size: 0.9rem; + font-weight: 600; + padding: 6px 12px; + border-radius: 20px; + display: flex; + align-items: center; + gap: 6px; +} + +.similarity-badge.high { + background: linear-gradient(135deg, #4caf50, #2e7d32); + color: white; +} + +.similarity-badge.medium { + background: linear-gradient(135deg, #ff9800, #f57c00); + color: white; +} + +.similarity-badge.low { + background: linear-gradient(135deg, #f44336, #d32f2f); + color: white; +} + +.action-buttons { + display: flex; + gap: 10px; + align-items: center; + flex-wrap: wrap; +} + +.save-human-btn { + background: linear-gradient(135deg, #4caf50, #2e7d32); + border: none; + border-radius: 8px; + padding: 10px 20px; + color: white; + font-weight: 500; + transition: all 0.3s ease; + box-shadow: 0 2px 4px rgba(76, 175, 80, 0.3); +} + +.save-human-btn:hover { + transform: translateY(-2px); + box-shadow: 0 4px 12px rgba(76, 175, 80, 0.4); + background: linear-gradient(135deg, #2e7d32, #1b5e20); +} + +.approve-btn { + background: linear-gradient(135deg, #ff9800, #f57c00); + border: none; + border-radius: 8px; + padding: 10px 20px; + color: white; + font-weight: 500; + transition: all 0.3s ease; + box-shadow: 0 2px 4px rgba(255, 152, 0, 0.3); +} + +.approve-btn:hover { + transform: translateY(-2px); + box-shadow: 0 4px 12px rgba(255, 152, 0, 0.4); + background: linear-gradient(135deg, #f57c00, #ef6c00); +} + +.approve-btn.approved { + background: linear-gradient(135deg, #4caf50, #2e7d32); + box-shadow: 0 2px 4px rgba(76, 175, 80, 0.3); +} + +.approve-btn.approved:hover { + background: linear-gradient(135deg, #2e7d32, #1b5e20); + box-shadow: 0 4px 12px rgba(76, 175, 80, 0.4); +} + +.status-badge { + font-size: 0.85rem; + padding: 4px 8px; + border-radius: 12px; + font-weight: 500; +} + +.status-badge.approved { + background: #e8f5e8; + color: #2e7d32; + border: 1px solid #4caf50; +} + +.status-badge.pending { + background: #fff3e0; + color: #f57c00; + border: 1px solid #ff9800; +} + +.status-badge.human-resolution { + background: #e3f2fd; + color: #1976d2; + border: 1px solid #2196f3; +} + +/* 加载状态优化 */ +.ai-loading { + position: relative; + overflow: hidden; +} + +.ai-loading::after { + content: ''; + position: absolute; + top: 0; + left: -100%; + width: 100%; + height: 100%; + background: linear-gradient(90deg, transparent, rgba(255,255,255,0.4), transparent); + animation: shimmer 1.5s infinite; +} + +@keyframes shimmer { + 0% { left: -100%; } + 100% { left: 100%; } +} + +/* 按钮加载状态 */ +.btn-loading { + position: relative; + color: transparent !important; +} + +.btn-loading::after { + content: ''; + position: absolute; + top: 50%; + left: 50%; + width: 16px; + height: 16px; + margin: -8px 0 0 -8px; + border: 2px solid transparent; + border-top: 2px solid currentColor; + border-radius: 50%; + animation: spin 1s linear infinite; +} + +/* 成功动画 */ +.success-animation { + animation: successPulse 0.6s ease-in-out; +} + +@keyframes successPulse { + 0% { transform: scale(1); } + 50% { transform: scale(1.05); } + 100% { transform: scale(1); } +} + +/* 工具提示优化 */ +.tooltip-custom { + position: relative; + cursor: help; +} + +.tooltip-custom::before { + content: attr(data-tooltip); + position: absolute; + bottom: 125%; + left: 50%; + transform: translateX(-50%); + background: #333; + color: white; + padding: 8px 12px; + border-radius: 6px; + font-size: 0.8rem; + white-space: nowrap; + opacity: 0; + visibility: hidden; + transition: all 0.3s ease; + z-index: 1000; +} + +.tooltip-custom::after { + content: ''; + position: absolute; + bottom: 115%; + left: 50%; + transform: translateX(-50%); + border: 5px solid transparent; + border-top-color: #333; + opacity: 0; + visibility: hidden; + transition: all 0.3s ease; +} + +.tooltip-custom:hover::before, +.tooltip-custom:hover::after { + opacity: 1; + visibility: visible; +} + +/* 响应式优化 */ +@media (max-width: 768px) { + .ai-suggestion-section { + padding: 15px; + margin: 10px 0; + } + + .ai-suggestion-header { + flex-direction: column; + align-items: flex-start; + gap: 10px; + } + + .similarity-indicator { + flex-direction: column; + align-items: flex-start; + gap: 8px; + } + + .action-buttons { + flex-direction: column; + align-items: stretch; + } + + .save-human-btn, + .approve-btn { + width: 100%; + text-align: center; + } + + .tooltip-custom::before { + font-size: 0.7rem; + padding: 6px 10px; + } +} + + +/* f���pencaSGr7h_ */ +.vehicle-data-card { + background: linear-gradient(135deg, #e8f5e8, #f0f8f0); + border: 1px solid #4caf50; + border-radius: 10px; + margin: 10px 0; + padding: 15px; + box-shadow: 0 2px 10px rgba(76, 175, 80, 0.1); +} + +.vehicle-data-header { + border-bottom: 1px solid #4caf50; + padding-bottom: 10px; + margin-bottom: 15px; +} + +.vehicle-data-header h5 { + color: #2e7d32; + margin: 0; + font-size: 1.1rem; +} + +.vehicle-data-content { + padding: 0; +} + +.vehicle-info { + background: white; + border-radius: 8px; + padding: 12px; + margin-bottom: 10px; + border-left: 4px solid #4caf50; +} + +.vehicle-info h6 { + color: #1976d2; + margin-bottom: 8px; + font-size: 1rem; +} + +.vehicle-details p { + margin: 5px 0; + font-size: 0.9rem; + color: #333; +} + +.vehicle-details strong { + color: #2e7d32; +} + +.vehicle-error { + background: #ffebee; + color: #c62828; + padding: 10px; + border-radius: 5px; + border-left: 4px solid #f44336; + font-style: italic; +} + \ No newline at end of file diff --git a/src/web/static/js/dashboard.js b/src/web/static/js/dashboard.js index ad7feae..4fd2401 100644 --- a/src/web/static/js/dashboard.js +++ b/src/web/static/js/dashboard.js @@ -18,19 +18,51 @@ class TSPDashboard { } async generateAISuggestion(workorderId) { + const button = document.querySelector(`button[onclick="dashboard.generateAISuggestion(${workorderId})"]`); + const textarea = document.getElementById(`aiSuggestion_${workorderId}`); + try { + // 添加加载状态 + if (button) { + button.classList.add('btn-loading'); + button.disabled = true; + } + if (textarea) { + textarea.classList.add('ai-loading'); + textarea.value = '正在生成AI建议,请稍候...'; + } + const resp = await fetch(`/api/workorders/${workorderId}/ai-suggestion`, { method: 'POST' }); const data = await resp.json(); + if (data.success) { - const ta = document.getElementById(`aiSuggestion_${workorderId}`); - if (ta) ta.value = data.ai_suggestion || ''; + if (textarea) { + textarea.value = data.ai_suggestion || ''; + textarea.classList.remove('ai-loading'); + textarea.classList.add('success-animation'); + + // 移除成功动画类 + setTimeout(() => { + textarea.classList.remove('success-animation'); + }, 600); + } this.showNotification('AI建议已生成', 'success'); } else { throw new Error(data.error || '生成失败'); } } catch (e) { console.error('生成AI建议失败:', e); + if (textarea) { + textarea.value = 'AI建议生成失败,请重试'; + textarea.classList.remove('ai-loading'); + } this.showNotification('生成AI建议失败: ' + e.message, 'error'); + } finally { + // 移除加载状态 + if (button) { + button.classList.remove('btn-loading'); + button.disabled = false; + } } } @@ -49,10 +81,60 @@ class TSPDashboard { const apprEl = document.getElementById(`aiApproved_${workorderId}`); const approveBtn = document.getElementById(`approveBtn_${workorderId}`); const percent = Math.round((data.similarity || 0) * 100); - if (simEl) { simEl.textContent = `相似度: ${percent}%`; simEl.className = `badge ${percent>=95?'bg-success':percent>=70?'bg-warning':'bg-secondary'}`; } - if (apprEl) { apprEl.textContent = data.approved ? '已自动审批' : '未审批'; apprEl.className = `badge ${data.approved?'bg-success':'bg-secondary'}`; } - if (approveBtn) approveBtn.disabled = !data.approved; - this.showNotification('人工描述已保存并评估完成', 'success'); + + // 更新相似度显示,使用语义相似度 + if (simEl) { + simEl.innerHTML = `语义相似度: ${percent}%`; + + // 使用新的CSS类 + if (percent >= 90) { + simEl.className = 'similarity-badge high'; + } else if (percent >= 80) { + simEl.className = 'similarity-badge medium'; + } else { + simEl.className = 'similarity-badge low'; + } + + simEl.title = this.getSimilarityExplanation(percent); + } + + // 更新审批状态 + if (apprEl) { + if (data.use_human_resolution) { + apprEl.textContent = '将使用人工描述入库'; + apprEl.className = 'status-badge human-resolution'; + } else if (data.approved) { + apprEl.textContent = '已自动审批'; + apprEl.className = 'status-badge approved'; + } else { + apprEl.textContent = '未审批'; + apprEl.className = 'status-badge pending'; + } + } + + // 更新审批按钮状态 + if (approveBtn) { + const canApprove = data.approved || data.use_human_resolution; + approveBtn.disabled = !canApprove; + + if (data.use_human_resolution) { + approveBtn.textContent = '使用人工描述入库'; + approveBtn.className = 'approve-btn'; + approveBtn.title = 'AI准确率低于90%,将使用人工描述入库'; + } else if (data.approved) { + approveBtn.textContent = '已自动审批'; + approveBtn.className = 'approve-btn approved'; + approveBtn.title = 'AI建议与人工描述高度一致'; + } else { + approveBtn.textContent = '审批入库'; + approveBtn.className = 'approve-btn'; + approveBtn.title = '手动审批入库'; + } + } + + // 显示更详细的反馈信息 + const message = this.getSimilarityMessage(percent, data.approved, data.use_human_resolution); + this.showNotification(message, data.approved ? 'success' : data.use_human_resolution ? 'warning' : 'info'); } else { throw new Error(data.error || '保存失败'); } @@ -67,7 +149,9 @@ class TSPDashboard { const resp = await fetch(`/api/workorders/${workorderId}/approve-to-knowledge`, { method: 'POST' }); const data = await resp.json(); if (data.success) { - this.showNotification('已入库为知识条目', 'success'); + const contentType = data.used_content === 'human_resolution' ? '人工描述' : 'AI建议'; + const confidence = Math.round((data.confidence_score || 0) * 100); + this.showNotification(`已入库为知识条目!使用${contentType},置信度: ${confidence}%`, 'success'); } else { throw new Error(data.error || '入库失败'); } @@ -1713,29 +1797,43 @@ class TSPDashboard { ${workorder.satisfaction_score}/5.0 ` : ''} -
AI建议与人工描述
-
-
-
-
- - + +
+ +
-
- + +
+
-
- - 相似度: -- - 未审批 -
+ +
+
@@ -4043,6 +4141,34 @@ class TSPDashboard { }, 3000); } + getSimilarityExplanation(percent) { + if (percent >= 95) { + return "语义高度相似,AI建议与人工描述基本一致,建议自动审批"; + } else if (percent >= 90) { + return "语义较为相似,AI建议与人工描述大体一致,建议人工审核"; + } else if (percent >= 80) { + return "语义部分相似,AI建议与人工描述有一定差异,需要人工判断"; + } else if (percent >= 60) { + return "语义相似度较低,AI建议与人工描述差异较大,建议使用人工描述"; + } else { + return "语义差异很大,AI建议与人工描述差异很大,优先使用人工描述"; + } + } + + getSimilarityMessage(percent, approved, useHumanResolution = false) { + if (useHumanResolution) { + return `人工描述已保存!语义相似度: ${percent}%,AI准确率低于90%,将使用人工描述入库`; + } else if (approved) { + return `人工描述已保存!语义相似度: ${percent}%,已自动审批入库`; + } else if (percent >= 90) { + return `人工描述已保存!语义相似度: ${percent}%,建议人工审核后审批`; + } else if (percent >= 80) { + return `人工描述已保存!语义相似度: ${percent}%,需要人工判断是否审批`; + } else { + return `人工描述已保存!语义相似度: ${percent}%,建议使用人工描述入库`; + } + } + showCreateWorkOrderModal() { const modal = new bootstrap.Modal(document.getElementById('createWorkOrderModal')); modal.show(); @@ -4166,8 +4292,390 @@ class TSPDashboard { } } +// 飞书同步管理器 +class FeishuSyncManager { + constructor() { + this.loadConfig(); + this.refreshStatus(); + } + + async loadConfig() { + try { + const response = await fetch('/api/feishu-sync/config'); + const data = await response.json(); + + if (data.success) { + const config = data.config; + document.getElementById('appId').value = config.feishu.app_id || ''; + document.getElementById('appSecret').value = ''; + document.getElementById('appToken').value = config.feishu.app_token || ''; + document.getElementById('tableId').value = config.feishu.table_id || ''; + + // 显示配置状态 + const statusBadge = config.feishu.status === 'active' ? + '已配置' : + '未配置'; + + // 可以在这里添加状态显示 + } + } catch (error) { + console.error('加载配置失败:', error); + } + } + + async saveConfig() { + const config = { + app_id: document.getElementById('appId').value, + app_secret: document.getElementById('appSecret').value, + app_token: document.getElementById('appToken').value, + table_id: document.getElementById('tableId').value + }; + + if (!config.app_id || !config.app_secret || !config.app_token || !config.table_id) { + this.showNotification('请填写完整的配置信息', 'error'); + return; + } + + try { + const response = await fetch('/api/feishu-sync/config', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify(config) + }); + + const data = await response.json(); + + if (data.success) { + this.showNotification('配置保存成功', 'success'); + } else { + this.showNotification('配置保存失败: ' + data.error, 'error'); + } + } catch (error) { + this.showNotification('配置保存失败: ' + error.message, 'error'); + } + } + + async testConnection() { + try { + this.showNotification('正在测试连接...', 'info'); + + const response = await fetch('/api/feishu-sync/test-connection'); + const data = await response.json(); + + if (data.success) { + this.showNotification('飞书连接正常', 'success'); + } else { + this.showNotification('连接失败: ' + data.error, 'error'); + } + } catch (error) { + this.showNotification('连接测试失败: ' + error.message, 'error'); + } + } + + async syncFromFeishu() { + try { + const limit = document.getElementById('syncLimit').value; + this.showNotification('开始从飞书同步数据...', 'info'); + this.showProgress(true); + + const response = await fetch('/api/feishu-sync/sync-from-feishu', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + generate_ai_suggestions: false, + limit: parseInt(limit) + }) + }); + + const data = await response.json(); + + if (data.success) { + this.showNotification(data.message, 'success'); + this.addSyncLog(data.message); + this.refreshStatus(); + } else { + this.showNotification('同步失败: ' + data.error, 'error'); + this.addSyncLog('同步失败: ' + data.error); + } + } catch (error) { + this.showNotification('同步失败: ' + error.message, 'error'); + this.addSyncLog('同步失败: ' + error.message); + } finally { + this.showProgress(false); + } + } + + async syncWithAI() { + try { + const limit = document.getElementById('syncLimit').value; + this.showNotification('开始同步数据并生成AI建议...', 'info'); + this.showProgress(true); + + const response = await fetch('/api/feishu-sync/sync-from-feishu', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + generate_ai_suggestions: true, + limit: parseInt(limit) + }) + }); + + const data = await response.json(); + + if (data.success) { + this.showNotification(data.message, 'success'); + this.addSyncLog(data.message); + this.refreshStatus(); + } else { + this.showNotification('同步失败: ' + data.error, 'error'); + this.addSyncLog('同步失败: ' + data.error); + } + } catch (error) { + this.showNotification('同步失败: ' + error.message, 'error'); + this.addSyncLog('同步失败: ' + error.message); + } finally { + this.showProgress(false); + } + } + + async previewFeishuData() { + try { + this.showNotification('正在获取飞书数据预览...', 'info'); + + const response = await fetch('/api/feishu-sync/preview-feishu-data'); + const data = await response.json(); + + if (data.success) { + this.displayPreviewData(data.preview_data); + this.showNotification(`获取到 ${data.total_count} 条预览数据`, 'success'); + } else { + this.showNotification('获取预览数据失败: ' + data.error, 'error'); + } + } catch (error) { + this.showNotification('获取预览数据失败: ' + error.message, 'error'); + } + } + + displayPreviewData(data) { + const tbody = document.querySelector('#previewTable tbody'); + tbody.innerHTML = ''; + + data.forEach(item => { + const row = document.createElement('tr'); + row.innerHTML = ` + ${item.record_id} + ${item.fields['TR Number'] || '-'} + ${item.fields['TR Description'] || '-'} + ${item.fields['Type of problem'] || '-'} + ${item.fields['Source'] || '-'} + ${item.fields['TR (Priority/Status)'] || '-'} + + + + `; + tbody.appendChild(row); + }); + + document.getElementById('previewSection').style.display = 'block'; + } + + async refreshStatus() { + try { + const response = await fetch('/api/feishu-sync/status'); + const data = await response.json(); + + if (data.success) { + const status = data.status; + document.getElementById('totalLocalWorkorders').textContent = status.total_local_workorders || 0; + document.getElementById('syncedWorkorders').textContent = status.synced_workorders || 0; + document.getElementById('unsyncedWorkorders').textContent = status.unsynced_workorders || 0; + } + } catch (error) { + console.error('刷新状态失败:', error); + } + } + + showProgress(show) { + const progress = document.getElementById('syncProgress'); + if (show) { + progress.style.display = 'block'; + const bar = progress.querySelector('.progress-bar'); + bar.style.width = '100%'; + } else { + setTimeout(() => { + progress.style.display = 'none'; + const bar = progress.querySelector('.progress-bar'); + bar.style.width = '0%'; + }, 1000); + } + } + + addSyncLog(message) { + const log = document.getElementById('syncLog'); + const timestamp = new Date().toLocaleString(); + const logEntry = document.createElement('div'); + logEntry.innerHTML = `[${timestamp}] ${message}`; + + if (log.querySelector('.text-muted')) { + log.innerHTML = ''; + } + + log.appendChild(logEntry); + log.scrollTop = log.scrollHeight; + } + + async exportConfig() { + try { + const response = await fetch('/api/feishu-sync/config/export'); + const data = await response.json(); + + if (data.success) { + // 创建下载链接 + const blob = new Blob([data.config], { type: 'application/json' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `feishu_config_${new Date().toISOString().split('T')[0]}.json`; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); + + this.showNotification('配置导出成功', 'success'); + } else { + this.showNotification('配置导出失败: ' + data.error, 'error'); + } + } catch (error) { + this.showNotification('配置导出失败: ' + error.message, 'error'); + } + } + + showImportModal() { + const modal = new bootstrap.Modal(document.getElementById('importConfigModal')); + modal.show(); + } + + async importConfig() { + try { + const configJson = document.getElementById('configJson').value.trim(); + + if (!configJson) { + this.showNotification('请输入配置JSON数据', 'warning'); + return; + } + + const response = await fetch('/api/feishu-sync/config/import', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ config: configJson }) + }); + + const data = await response.json(); + + if (data.success) { + this.showNotification('配置导入成功', 'success'); + this.loadConfig(); + this.refreshStatus(); + + // 关闭模态框 + const modal = bootstrap.Modal.getInstance(document.getElementById('importConfigModal')); + modal.hide(); + document.getElementById('configJson').value = ''; + } else { + this.showNotification('配置导入失败: ' + data.error, 'error'); + } + } catch (error) { + this.showNotification('配置导入失败: ' + error.message, 'error'); + } + } + + async resetConfig() { + if (confirm('确定要重置所有配置吗?此操作不可撤销!')) { + try { + const response = await fetch('/api/feishu-sync/config/reset', { + method: 'POST' + }); + + const data = await response.json(); + + if (data.success) { + this.showNotification('配置重置成功', 'success'); + this.loadConfig(); + this.refreshStatus(); + } else { + this.showNotification('配置重置失败: ' + data.error, 'error'); + } + } catch (error) { + this.showNotification('配置重置失败: ' + error.message, 'error'); + } + } + } + + async createWorkorder(recordId) { + if (confirm(`确定要从飞书记录 ${recordId} 创建工单吗?`)) { + try { + this.showNotification('正在创建工单...', 'info'); + + const response = await fetch('/api/feishu-sync/create-workorder', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + record_id: recordId + }) + }); + + const data = await response.json(); + + if (data.success) { + this.showNotification(data.message, 'success'); + // 刷新工单列表(如果用户在工单页面) + if (typeof window.refreshWorkOrders === 'function') { + window.refreshWorkOrders(); + } + } else { + this.showNotification('创建工单失败: ' + data.message, 'error'); + } + } catch (error) { + this.showNotification('创建工单失败: ' + error.message, 'error'); + } + } + } + + showNotification(message, type = 'info') { + const container = document.getElementById('notificationContainer'); + const alert = document.createElement('div'); + alert.className = `alert alert-${type === 'error' ? 'danger' : type} alert-dismissible fade show`; + alert.innerHTML = ` + ${message} + + `; + + container.appendChild(alert); + + setTimeout(() => { + if (alert.parentNode) { + alert.parentNode.removeChild(alert); + } + }, 5000); + } +} + // 初始化应用 let dashboard; +let feishuSync; document.addEventListener('DOMContentLoaded', () => { dashboard = new TSPDashboard(); + feishuSync = new FeishuSyncManager(); }); diff --git a/src/web/templates/dashboard.html b/src/web/templates/dashboard.html index 0c7707f..8adb552 100644 --- a/src/web/templates/dashboard.html +++ b/src/web/templates/dashboard.html @@ -417,6 +417,10 @@ 工单管理 + + + 飞书同步 + 对话历史 @@ -1034,6 +1038,181 @@
+ + + + + + + +
+ diff --git a/src/web/templates/feishu_sync.html b/src/web/templates/feishu_sync.html deleted file mode 100644 index 09c226b..0000000 --- a/src/web/templates/feishu_sync.html +++ /dev/null @@ -1,662 +0,0 @@ - - - - - - 飞书同步管理 - TSP助手 - - - - - -
-
- - - - -
-
-

- 飞书同步管理 -

-
- - -
-
-
-
-
- 飞书配置 -
-
-
-
-
-
-
- - -
-
-
-
- - -
-
-
-
-
-
- - -
-
-
-
- - -
-
-
-
- - - - - -
-
-
-
-
-
- - -
-
-
-
-
0
-

本地工单总数

-
-
-
-
-
-
-
0
-

已同步工单

-
-
-
-
-
-
-
0
-

未同步工单

-
-
-
-
- - -
-
-
-
-
- 同步操作 -
-
-
-
- - - - -
- -
- - -
- - - - - -
-
同步日志
-
-
暂无同步记录
-
-
-
-
-
-
- - - -
-
-
- - - - - -
- - - - - diff --git a/update_config.json b/update_config.json deleted file mode 100644 index f3f70e2..0000000 --- a/update_config.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "app_name": "tsp_assistant", - "deploy_path": "/opt/tsp_assistant", - "backup_path": "./backups", - "service_name": "tsp_assistant", - "health_url": "http://localhost:5000/api/health", - "update_timeout": 300, - "rollback_enabled": true, - "auto_backup": true, - "hot_update_enabled": true, - "environments": { - "development": { - "path": "./dev_deploy", - "service_name": "", - "auto_restart": false, - "description": "开发环境" - }, - "staging": { - "path": "/opt/tsp_assistant_staging", - "service_name": "tsp_assistant_staging", - "auto_restart": true, - "description": "测试环境" - }, - "production": { - "path": "/opt/tsp_assistant", - "service_name": "tsp_assistant", - "auto_restart": true, - "description": "生产环境" - } - }, - "hot_update_files": [ - "src/web/static/js/dashboard.js", - "src/web/static/css/style.css", - "src/web/templates/dashboard.html", - "src/web/app.py", - "src/knowledge_base/knowledge_manager.py", - "src/dialogue/realtime_chat.py", - "src/agent/agent_core.py", - "src/agent/tool_manager.py" - ], - "critical_files": [ - "init_database.py", - "requirements.txt", - "version.json", - "src/core/models.py", - "src/core/database.py" - ], - "notification": { - "enabled": true, - "webhook_url": "", - "email": "", - "slack_channel": "" - } -} diff --git a/version.json b/version.json deleted file mode 100644 index d3adf1c..0000000 --- a/version.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "version": "1.3.0", - "build_number": 16, - "release_date": "2025-09-17T17:30:00", - "git_commit": "unknown", - "deployment_status": "development", - "changelog": [ - { - "version": "1.3.0", - "date": "2025-09-17T17:30:00", - "description": "数据库架构优化:MySQL主数据库+SQLite备份系统,工单详情API修复,备份管理功能" - }, - { - "version": "1.2.0", - "date": "2025-09-16T16:30:00", - "description": "系统设置扩展,API管理,模型参数配置,端口管理,真实数据分析" - }, - { - "version": "1.1.0", - "date": "2025-09-16T14:00:00", - "description": "工单AI建议功能,知识库搜索优化,Agent管理改进" - }, - { - "version": "1.0.0", - "date": "2024-01-01T00:00:00", - "description": "初始版本发布" - } - ], - "dependencies": { - "flask": "2.3.3", - "sqlalchemy": "2.0.21", - "psutil": "5.9.5", - "openpyxl": "3.1.2" - }, - "deployment_info": { - "supported_environments": ["development", "staging", "production"], - "min_python_version": "3.8", - "required_services": ["nginx", "systemd"], - "backup_strategy": "automatic", - "rollback_enabled": true - } -} diff --git a/version.py b/version.py deleted file mode 100644 index f2a367c..0000000 --- a/version.py +++ /dev/null @@ -1,199 +0,0 @@ - -# -*- coding: utf-8 -*- -""" -TSP智能助手版本管理模块 -""" - -import os -import json -import subprocess -from datetime import datetime -from typing import Dict, List, Optional - -class VersionManager: - """版本管理器""" - - def __init__(self, version_file: str = "version.json"): - self.version_file = version_file - self.version_info = self._load_version() - - def _load_version(self) -> Dict: - """加载版本信息""" - if os.path.exists(self.version_file): - try: - with open(self.version_file, 'r', encoding='utf-8') as f: - return json.load(f) - except Exception as e: - print(f"加载版本文件失败: {e}") - - # 默认版本信息 - return { - "version": "1.0.0", - "build_number": 1, - "release_date": datetime.now().isoformat(), - "git_commit": self._get_git_commit(), - "deployment_status": "development", - "changelog": [], - "dependencies": self._get_dependencies() - } - - def _get_git_commit(self) -> str: - """获取Git提交哈希""" - try: - result = subprocess.run(['git', 'rev-parse', 'HEAD'], - capture_output=True, text=True) - return result.stdout.strip()[:8] if result.returncode == 0 else "unknown" - except: - return "unknown" - - def _get_dependencies(self) -> Dict: - """获取依赖包信息""" - try: - result = subprocess.run(['pip', 'freeze'], - capture_output=True, text=True) - if result.returncode == 0: - deps = {} - for line in result.stdout.strip().split('\n'): - if '==' in line: - name, version = line.split('==', 1) - deps[name] = version - return deps - except: - pass - return {} - - def get_version(self) -> str: - """获取当前版本号""" - return self.version_info["version"] - - def get_build_number(self) -> int: - """获取构建号""" - return self.version_info["build_number"] - - def increment_version(self, version_type: str = "patch") -> str: - """增加版本号""" - current_version = self.version_info["version"] - major, minor, patch = map(int, current_version.split('.')) - - if version_type == "major": - major += 1 - minor = 0 - patch = 0 - elif version_type == "minor": - minor += 1 - patch = 0 - else: # patch - patch += 1 - - new_version = f"{major}.{minor}.{patch}" - self.version_info["version"] = new_version - self.version_info["build_number"] += 1 - self.version_info["release_date"] = datetime.now().isoformat() - self.version_info["git_commit"] = self._get_git_commit() - self.version_info["dependencies"] = self._get_dependencies() - - self._save_version() - return new_version - - def add_changelog_entry(self, entry: str, version: str = None): - """添加变更日志条目""" - if version is None: - version = self.get_version() - - changelog_entry = { - "version": version, - "date": datetime.now().isoformat(), - "description": entry - } - - self.version_info["changelog"].insert(0, changelog_entry) - self._save_version() - - def set_deployment_status(self, status: str): - """设置部署状态""" - valid_statuses = ["development", "staging", "production", "maintenance"] - if status in valid_statuses: - self.version_info["deployment_status"] = status - self._save_version() - else: - raise ValueError(f"无效的部署状态: {status}") - - def _save_version(self): - """保存版本信息""" - try: - with open(self.version_file, 'w', encoding='utf-8') as f: - json.dump(self.version_info, f, indent=2, ensure_ascii=False) - except Exception as e: - print(f"保存版本文件失败: {e}") - - def get_version_info(self) -> Dict: - """获取完整版本信息""" - return self.version_info.copy() - - def create_release_tag(self, tag_message: str = None): - """创建Git标签""" - version = self.get_version() - tag_name = f"v{version}" - - if tag_message is None: - tag_message = f"Release version {version}" - - try: - # 创建标签 - subprocess.run(['git', 'tag', '-a', tag_name, '-m', tag_message], - check=True) - print(f"已创建标签: {tag_name}") - return tag_name - except subprocess.CalledProcessError as e: - print(f"创建标签失败: {e}") - return None - -def main(): - """命令行接口""" - import argparse - - parser = argparse.ArgumentParser(description='TSP智能助手版本管理') - parser.add_argument('action', choices=['version', 'increment', 'status', 'changelog', 'tag'], - help='要执行的操作') - parser.add_argument('--type', choices=['major', 'minor', 'patch'], - default='patch', help='版本类型') - parser.add_argument('--status', choices=['development', 'staging', 'production', 'maintenance'], - help='部署状态') - parser.add_argument('--message', help='变更日志消息或标签消息') - - args = parser.parse_args() - - vm = VersionManager() - - if args.action == 'version': - print(f"当前版本: {vm.get_version()}") - print(f"构建号: {vm.get_build_number()}") - print(f"部署状态: {vm.version_info['deployment_status']}") - - elif args.action == 'increment': - new_version = vm.increment_version(args.type) - print(f"版本已更新为: {new_version}") - - elif args.action == 'status': - if args.status: - vm.set_deployment_status(args.status) - print(f"部署状态已设置为: {args.status}") - else: - print(f"当前部署状态: {vm.version_info['deployment_status']}") - - elif args.action == 'changelog': - if args.message: - vm.add_changelog_entry(args.message) - print(f"已添加变更日志: {args.message}") - else: - print("变更日志:") - for entry in vm.version_info['changelog'][:5]: - print(f" {entry['version']} - {entry['description']}") - - elif args.action == 'tag': - tag_name = vm.create_release_tag(args.message) - if tag_name: - print(f"标签创建成功: {tag_name}") - -if __name__ == "__main__": - main() diff --git a/新功能说明_v1.4.0.md b/新功能说明_v1.4.0.md new file mode 100644 index 0000000..237e504 --- /dev/null +++ b/新功能说明_v1.4.0.md @@ -0,0 +1,243 @@ +# TSP智能助手 v1.4.0 新功能说明 + +## 🎉 版本概述 + +TSP智能助手 v1.4.0 是一个重要的功能更新版本,主要包含飞书集成、页面功能合并、数据库架构优化和代码重构等重要改进。 + +## 🚀 主要新功能 + +### 1. 飞书多维表格集成 📱 + +#### 功能描述 +- **数据同步**: 支持从飞书多维表格自动同步工单数据 +- **字段映射**: 智能映射飞书字段到本地数据库结构 +- **实时更新**: 支持增量同步和全量同步 +- **数据预览**: 同步前可预览数据,确保准确性 + +#### 支持的飞书字段 +| 飞书字段 | 本地字段 | 类型 | 说明 | +|---------|---------|------|------| +| TR Number | order_id | String | 工单编号 | +| TR Description | description | Text | 工单描述 | +| Type of problem | category | String | 问题类型 | +| TR Level | priority | String | 优先级 | +| TR Status | status | String | 工单状态 | +| Source | source | String | 来源 | +| Created by | created_by | String | 创建人 | +| Module(模块) | module | String | 模块 | +| Wilfulness(责任人) | wilfulness | String | 责任人 | +| Date of close TR | date_of_close | DateTime | 关闭日期 | +| Vehicle Type01 | vehicle_type | String | 车型 | +| VIN\|sim | vin_sim | String | 车架号/SIM | +| App remote control version | app_remote_control_version | String | 应用远程控制版本 | +| HMI SW | hmi_sw | String | HMI软件版本 | +| 父记录 | parent_record | String | 父记录 | +| Has it been updated on the same day | has_updated_same_day | String | 是否同日更新 | +| Operating time | operating_time | String | 操作时间 | + +#### 使用方法 +1. 在飞书开放平台创建企业自建应用 +2. 配置 `config/integrations_config.json` 文件 +3. 在主仪表板的"飞书同步"标签页进行数据同步 +4. 支持测试连接、预览数据、执行同步等操作 + +### 2. 页面功能合并 🎨 + +#### 改进内容 +- **统一界面**: 飞书同步功能已合并到主仪表板 +- **标签页设计**: 使用标签页组织不同功能模块 +- **用户体验**: 所有功能现在都在一个统一的界面中 +- **代码优化**: 删除了冗余的独立页面和蓝图 + +#### 界面变化 +- **原独立页面**: `http://localhost:5000/feishu-sync` (已删除) +- **现集成位置**: 主仪表板的"飞书同步"标签页 +- **访问方式**: 访问 `http://localhost:5000` 即可使用所有功能 + +### 3. 数据库架构优化 🗄️ + +#### 工单表扩展 +为 `work_orders` 表新增了12个飞书相关字段: + +```sql +-- 飞书集成字段 +source VARCHAR(50) -- 来源 +module VARCHAR(100) -- 模块 +created_by VARCHAR(100) -- 创建人 +wilfulness VARCHAR(100) -- 责任人 +date_of_close DATETIME -- 关闭日期 +vehicle_type VARCHAR(100) -- 车型 +vin_sim VARCHAR(50) -- 车架号/SIM +app_remote_control_version VARCHAR(100) -- 应用远程控制版本 +hmi_sw VARCHAR(100) -- HMI软件版本 +parent_record VARCHAR(100) -- 父记录 +has_updated_same_day VARCHAR(50) -- 是否同日更新 +operating_time VARCHAR(100) -- 操作时间 +``` + +#### 数据库初始化改进 +- **自动迁移**: 字段迁移已集成到数据库初始化流程 +- **智能检测**: 自动检测缺失字段并添加 +- **错误处理**: 改进的错误处理和日志记录 +- **兼容性**: 保持与现有数据的兼容性 + +### 4. 代码重构优化 🔧 + +#### 文件结构优化 +- **大文件拆分**: 将 `src/agent_assistant.py` 拆分为多个模块 +- **模块化设计**: 创建 `agent_assistant_core.py`、`agent_message_handler.py`、`agent_sample_actions.py` +- **降低风险**: 减少单文件代码行数,降低运行风险 +- **维护性**: 提高代码的可维护性和可读性 + +#### 前端架构改进 +- **JavaScript类**: 使用类组织前端逻辑 +- **模块化**: `TSPDashboard`、`FeishuSyncManager` 等独立模块 +- **异步处理**: 改进的异步API调用处理 +- **错误处理**: 更好的错误处理和用户反馈 + +## 📋 配置说明 + +### 飞书集成配置 + +编辑 `config/integrations_config.json` 文件: + +```json +{ + "feishu": { + "app_id": "cli_a8b50ec0eed1500d", + "app_secret": "ccxkE7ZCFQZcwkkM1rLy0ccZRXYsT2xK", + "app_token": "XXnEbiCmEaMblSs6FDJcFCqsnIg", + "table_id": "tblnl3vJPpgMTSiP", + "last_updated": "2025-09-19T18:27:40.579958", + "status": "active" + }, + "system": { + "sync_limit": 10, + "ai_suggestions_enabled": true, + "auto_sync_interval": 0, + "last_sync_time": null + } +} +``` + +### 环境变量支持 + +```bash +# 飞书配置 +export FEISHU_APP_ID="your-app-id" +export FEISHU_APP_SECRET="your-app-secret" +export FEISHU_APP_TOKEN="your-app-token" +export FEISHU_TABLE_ID="your-table-id" +``` + +## 🚀 部署指南 + +### 部署前准备 + +1. **配置飞书应用** + - 在飞书开放平台创建企业自建应用 + - 获取应用凭证和权限 + +2. **更新配置文件** + - 配置 `config/integrations_config.json` + - 设置正确的飞书应用信息 + +3. **初始化数据库** + ```bash + python init_database.py + ``` + +4. **测试连接** + - 启动服务后访问主仪表板 + - 在"飞书同步"标签页测试连接 + +### 部署步骤 + +```bash +# 1. 备份当前版本 +python scripts/update_manager.py create-backup --environment production + +# 2. 部署新版本 +python scripts/update_manager.py auto-update --source . --environment production + +# 3. 验证功能 +# 访问 http://localhost:5000 +# 测试飞书同步功能 +``` + +## 🔍 使用指南 + +### 飞书数据同步 + +1. **访问功能** + - 打开浏览器访问 `http://localhost:5000` + - 点击"飞书同步"标签页 + +2. **测试连接** + - 点击"测试连接"按钮 + - 验证飞书应用配置是否正确 + +3. **预览数据** + - 点击"预览数据"按钮 + - 查看将要同步的数据内容 + +4. **执行同步** + - 点击"同步数据"按钮 + - 等待同步完成 + +5. **查看结果** + - 在工单管理页面查看同步的数据 + - 验证字段映射是否正确 + +### 工单管理增强 + +1. **查看飞书字段** + - 在工单详情页面可以看到新的飞书字段 + - 包括来源、模块、责任人等信息 + +2. **数据关联** + - 飞书数据与本地工单数据关联 + - 支持双向数据同步 + +## 🐛 故障排除 + +### 常见问题 + +1. **飞书连接失败** + - 检查app_id和app_secret是否正确 + - 验证应用权限配置 + - 确认网络连接正常 + +2. **字段映射错误** + - 检查飞书表格字段名称 + - 验证字段映射配置 + - 查看同步日志 + +3. **数据库迁移失败** + - 检查数据库连接状态 + - 验证数据库权限 + - 查看初始化日志 + +4. **页面功能异常** + - 清除浏览器缓存 + - 检查JavaScript控制台错误 + - 验证API接口状态 + +### 日志位置 + +- **应用日志**: `logs/tsp_assistant.log` +- **数据库日志**: 数据库初始化输出 +- **飞书同步日志**: 在同步界面显示 + +## 📞 技术支持 + +如有问题,请: + +1. 查看相关日志文件 +2. 检查配置文件设置 +3. 验证网络连接状态 +4. 联系技术支持团队 + +--- + +**TSP智能助手 v1.4.0** - 让车辆服务更智能,让数据管理更便捷! 🚗✨ diff --git a/部署更新指南.md b/部署更新指南.md index 562c97f..fc11538 100644 --- a/部署更新指南.md +++ b/部署更新指南.md @@ -172,7 +172,7 @@ python version.py increment --type minor python version.py changelog --message "添加新功能" # 3. 创建发布标签 -python version.py tag --message "Release v1.3.0" +python version.py tag --message "Release v1.4.0" # 4. 部署到测试环境 python scripts/update_manager.py auto-update --source . --environment staging @@ -181,6 +181,34 @@ python scripts/update_manager.py auto-update --source . --environment staging python scripts/update_manager.py auto-update --source . --environment production ``` +### 1.1. 飞书集成部署前准备 + +```bash +# 1. 配置飞书应用 +# - 在飞书开放平台创建企业自建应用 +# - 获取app_id和app_secret +# - 配置应用权限(读取多维表格) + +# 2. 更新配置文件 +# 编辑 config/integrations_config.json +{ + "feishu": { + "app_id": "your-app-id", + "app_secret": "your-app-secret", + "app_token": "your-app-token", + "table_id": "your-table-id", + "status": "active" + } +} + +# 3. 初始化数据库(包含新字段) +python init_database.py + +# 4. 测试飞书连接 +# 访问主仪表板的"飞书同步"标签页 +# 点击"测试连接"验证配置 +``` + ### 2. 安全更新 ```bash @@ -265,6 +293,18 @@ CMD ["python", "start_dashboard.py"] - 验证端口是否被占用 - 查看应用日志 +4. **飞书集成问题** + - 检查飞书应用权限配置 + - 验证app_token和table_id是否正确 + - 确认网络连接和API访问权限 + - 查看飞书同步日志 + +5. **数据库字段迁移失败** + - 检查数据库连接状态 + - 验证字段映射配置 + - 确认数据库权限 + - 查看数据库初始化日志 + ### 紧急处理 ```bash @@ -280,6 +320,21 @@ sudo systemctl start tsp_assistant ## 📝 更新记录 +### v1.4.0 (2025-09-19) +- ✅ 飞书集成功能:支持飞书多维表格数据同步 +- ✅ 页面功能合并:飞书同步页面合并到主仪表板 +- ✅ 数据库架构优化:扩展工单表字段,支持飞书数据 +- ✅ 代码重构优化:大文件拆分,降低运行风险 +- ✅ 字段映射完善:智能映射飞书字段到本地数据库 +- ✅ 数据库初始化改进:集成字段迁移到初始化流程 + +### v1.3.0 (2025-09-17) +- ✅ 数据库架构优化:MySQL主数据库+SQLite备份系统 +- ✅ 工单详情API修复:解决数据库会话管理问题 +- ✅ 备份管理系统:自动备份MySQL数据到SQLite +- ✅ 数据库状态监控:实时监控MySQL和SQLite状态 +- ✅ 备份管理API:支持数据备份和恢复操作 + ### v1.2.0 (2025-09-16) - ✅ 系统设置扩展:API管理、模型参数配置、端口管理 - ✅ 真实数据分析:修复性能趋势图表显示