feat: 重大功能更新 v1.4.0 - 飞书集成、AI语义相似度、前端优化
主要更新内容: - 🚀 飞书多维表格集成,支持工单数据同步 - 🤖 AI建议与人工描述语义相似度计算 - 🎨 前端UI全面优化,现代化设计 - 📊 智能知识库入库策略(AI准确率<90%使用人工描述) - 🔧 代码重构,模块化架构优化 - 📚 完整文档整合和更新 - 🐛 修复配置导入和数据库字段问题 技术特性: - 使用sentence-transformers进行语义相似度计算 - 快速模式结合TF-IDF和语义方法 - 响应式设计,支持移动端 - 加载状态和动画效果 - 配置化AI准确率阈值
This commit is contained in:
254
src/agent/agent_assistant_core.py
Normal file
254
src/agent/agent_assistant_core.py
Normal file
@@ -0,0 +1,254 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
TSP Agent助手核心模块
|
||||
包含Agent助手的核心功能和基础类
|
||||
"""
|
||||
|
||||
import logging
|
||||
import asyncio
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
import json
|
||||
|
||||
from src.main import TSPAssistant
|
||||
from src.agent import AgentCore, AgentState
|
||||
from src.agent.auto_monitor import AutoMonitorService
|
||||
from src.agent.intelligent_agent import IntelligentAgent, AlertContext, KnowledgeContext
|
||||
from src.agent.llm_client import LLMManager, LLMConfig
|
||||
from src.agent.action_executor import ActionExecutor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class TSPAgentAssistantCore(TSPAssistant):
|
||||
"""TSP Agent助手核心 - 基础功能"""
|
||||
|
||||
def __init__(self, llm_config: Optional[LLMConfig] = None):
|
||||
# 初始化基础TSP助手
|
||||
super().__init__()
|
||||
|
||||
# 初始化Agent核心
|
||||
self.agent_core = AgentCore()
|
||||
|
||||
# 初始化自动监控服务
|
||||
self.auto_monitor = AutoMonitorService(self)
|
||||
|
||||
# 初始化LLM客户端
|
||||
self._init_llm_manager(llm_config)
|
||||
|
||||
# 初始化智能Agent
|
||||
self.intelligent_agent = IntelligentAgent(
|
||||
llm_manager=self.llm_manager,
|
||||
agent_core=self.agent_core
|
||||
)
|
||||
|
||||
# 初始化动作执行器
|
||||
self.action_executor = ActionExecutor(self)
|
||||
|
||||
# Agent状态
|
||||
self.agent_state = AgentState.IDLE
|
||||
self.is_agent_mode = True
|
||||
self.proactive_monitoring_enabled = False
|
||||
|
||||
# 执行历史
|
||||
self.execution_history = []
|
||||
self.max_history_size = 1000
|
||||
|
||||
logger.info("TSP Agent助手核心初始化完成")
|
||||
|
||||
def _init_llm_manager(self, llm_config: Optional[LLMConfig] = None):
|
||||
"""初始化LLM管理器"""
|
||||
if llm_config:
|
||||
self.llm_manager = LLMManager(llm_config)
|
||||
else:
|
||||
# 使用默认配置 - 千问模型
|
||||
try:
|
||||
from config.llm_config import DEFAULT_CONFIG
|
||||
self.llm_manager = LLMManager(DEFAULT_CONFIG)
|
||||
except ImportError:
|
||||
# 如果配置文件不存在,使用内置配置
|
||||
default_config = LLMConfig(
|
||||
provider="openai",
|
||||
api_key="sk-your-qwen-api-key-here",
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
model="qwen-turbo",
|
||||
temperature=0.7,
|
||||
max_tokens=2000
|
||||
)
|
||||
self.llm_manager = LLMManager(default_config)
|
||||
|
||||
def get_agent_status(self) -> Dict[str, Any]:
|
||||
"""获取Agent状态"""
|
||||
return {
|
||||
"agent_state": self.agent_state.value,
|
||||
"is_agent_mode": self.is_agent_mode,
|
||||
"proactive_monitoring": self.proactive_monitoring_enabled,
|
||||
"execution_count": len(self.execution_history),
|
||||
"llm_status": self.llm_manager.get_status(),
|
||||
"agent_core_status": self.agent_core.get_status(),
|
||||
"last_activity": self.execution_history[-1]["timestamp"] if self.execution_history else None
|
||||
}
|
||||
|
||||
def toggle_agent_mode(self, enabled: bool) -> bool:
|
||||
"""切换Agent模式"""
|
||||
try:
|
||||
self.is_agent_mode = enabled
|
||||
if enabled:
|
||||
self.agent_state = AgentState.IDLE
|
||||
logger.info("Agent模式已启用")
|
||||
else:
|
||||
self.agent_state = AgentState.DISABLED
|
||||
logger.info("Agent模式已禁用")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"切换Agent模式失败: {e}")
|
||||
return False
|
||||
|
||||
def start_proactive_monitoring(self) -> bool:
|
||||
"""启动主动监控"""
|
||||
try:
|
||||
if not self.proactive_monitoring_enabled:
|
||||
self.proactive_monitoring_enabled = True
|
||||
self.auto_monitor.start_monitoring()
|
||||
logger.info("主动监控已启动")
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"启动主动监控失败: {e}")
|
||||
return False
|
||||
|
||||
def stop_proactive_monitoring(self) -> bool:
|
||||
"""停止主动监控"""
|
||||
try:
|
||||
if self.proactive_monitoring_enabled:
|
||||
self.proactive_monitoring_enabled = False
|
||||
self.auto_monitor.stop_monitoring()
|
||||
logger.info("主动监控已停止")
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"停止主动监控失败: {e}")
|
||||
return False
|
||||
|
||||
def run_proactive_monitoring(self) -> Dict[str, Any]:
|
||||
"""运行主动监控检查"""
|
||||
try:
|
||||
if not self.proactive_monitoring_enabled:
|
||||
return {"success": False, "message": "主动监控未启用"}
|
||||
|
||||
# 获取系统状态
|
||||
system_health = self.get_system_health()
|
||||
|
||||
# 检查预警
|
||||
alerts = self.check_alerts()
|
||||
|
||||
# 检查工单状态
|
||||
workorders_status = self._check_workorders_status()
|
||||
|
||||
# 运行智能分析
|
||||
analysis = self.intelligent_agent.analyze_system_state(
|
||||
system_health=system_health,
|
||||
alerts=alerts,
|
||||
workorders=workorders_status
|
||||
)
|
||||
|
||||
# 执行建议的动作
|
||||
actions_taken = []
|
||||
if analysis.get("recommended_actions"):
|
||||
for action in analysis["recommended_actions"]:
|
||||
result = self.action_executor.execute_action(action)
|
||||
actions_taken.append(result)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"analysis": analysis,
|
||||
"actions_taken": actions_taken,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"主动监控检查失败: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def _check_workorders_status(self) -> Dict[str, Any]:
|
||||
"""检查工单状态"""
|
||||
try:
|
||||
from src.core.database import db_manager
|
||||
from src.core.models import WorkOrder
|
||||
|
||||
with db_manager.get_session() as session:
|
||||
total_workorders = session.query(WorkOrder).count()
|
||||
open_workorders = session.query(WorkOrder).filter(WorkOrder.status == 'open').count()
|
||||
resolved_workorders = session.query(WorkOrder).filter(WorkOrder.status == 'resolved').count()
|
||||
|
||||
return {
|
||||
"total": total_workorders,
|
||||
"open": open_workorders,
|
||||
"resolved": resolved_workorders,
|
||||
"resolution_rate": resolved_workorders / total_workorders if total_workorders > 0 else 0
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"检查工单状态失败: {e}")
|
||||
return {"error": str(e)}
|
||||
|
||||
def run_intelligent_analysis(self) -> Dict[str, Any]:
|
||||
"""运行智能分析"""
|
||||
try:
|
||||
# 获取系统数据
|
||||
system_health = self.get_system_health()
|
||||
alerts = self.check_alerts()
|
||||
workorders = self._check_workorders_status()
|
||||
|
||||
# 创建分析上下文
|
||||
context = {
|
||||
"system_health": system_health,
|
||||
"alerts": alerts,
|
||||
"workorders": workorders,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
# 运行智能分析
|
||||
analysis = self.intelligent_agent.comprehensive_analysis(context)
|
||||
|
||||
# 记录分析结果
|
||||
self._record_execution("intelligent_analysis", analysis)
|
||||
|
||||
return analysis
|
||||
except Exception as e:
|
||||
logger.error(f"智能分析失败: {e}")
|
||||
return {"error": str(e)}
|
||||
|
||||
def _record_execution(self, action_type: str, result: Any):
|
||||
"""记录执行历史"""
|
||||
execution_record = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"action_type": action_type,
|
||||
"result": result,
|
||||
"agent_state": self.agent_state.value
|
||||
}
|
||||
|
||||
self.execution_history.append(execution_record)
|
||||
|
||||
# 保持历史记录大小限制
|
||||
if len(self.execution_history) > self.max_history_size:
|
||||
self.execution_history = self.execution_history[-self.max_history_size:]
|
||||
|
||||
def get_action_history(self, limit: int = 50) -> List[Dict[str, Any]]:
|
||||
"""获取动作执行历史"""
|
||||
return self.execution_history[-limit:] if self.execution_history else []
|
||||
|
||||
def clear_execution_history(self) -> Dict[str, Any]:
|
||||
"""清空执行历史"""
|
||||
try:
|
||||
self.execution_history.clear()
|
||||
logger.info("执行历史已清空")
|
||||
return {"success": True, "message": "执行历史已清空"}
|
||||
except Exception as e:
|
||||
logger.error(f"清空执行历史失败: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def get_llm_usage_stats(self) -> Dict[str, Any]:
|
||||
"""获取LLM使用统计"""
|
||||
try:
|
||||
return self.llm_manager.get_usage_stats()
|
||||
except Exception as e:
|
||||
logger.error(f"获取LLM使用统计失败: {e}")
|
||||
return {"error": str(e)}
|
||||
243
src/agent/agent_message_handler.py
Normal file
243
src/agent/agent_message_handler.py
Normal file
@@ -0,0 +1,243 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
TSP Agent消息处理模块
|
||||
处理Agent的消息处理和对话功能
|
||||
"""
|
||||
|
||||
import logging
|
||||
import asyncio
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
|
||||
from .agent_assistant_core import TSPAgentAssistantCore
|
||||
from .intelligent_agent import IntelligentAgent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AgentMessageHandler:
|
||||
"""Agent消息处理器"""
|
||||
|
||||
def __init__(self, agent_core: TSPAgentAssistantCore):
|
||||
self.agent_core = agent_core
|
||||
self.intelligent_agent = agent_core.intelligent_agent
|
||||
self.action_executor = agent_core.action_executor
|
||||
|
||||
async def process_message_agent(self, message: str, user_id: str = "admin",
|
||||
work_order_id: Optional[int] = None,
|
||||
enable_proactive: bool = True) -> Dict[str, Any]:
|
||||
"""使用Agent处理消息"""
|
||||
try:
|
||||
# 更新Agent状态
|
||||
self.agent_core.agent_state = self.agent_core.agent_core.AgentState.PROCESSING
|
||||
|
||||
# 创建对话上下文
|
||||
context = {
|
||||
"message": message,
|
||||
"user_id": user_id,
|
||||
"work_order_id": work_order_id,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"enable_proactive": enable_proactive
|
||||
}
|
||||
|
||||
# 使用智能Agent处理消息
|
||||
agent_response = await self.intelligent_agent.process_message(context)
|
||||
|
||||
# 执行建议的动作
|
||||
actions_taken = []
|
||||
if agent_response.get("recommended_actions"):
|
||||
for action in agent_response["recommended_actions"]:
|
||||
action_result = self.action_executor.execute_action(action)
|
||||
actions_taken.append(action_result)
|
||||
|
||||
# 生成响应
|
||||
response = {
|
||||
"response": agent_response.get("response", "Agent已处理您的请求"),
|
||||
"actions": actions_taken,
|
||||
"status": "completed",
|
||||
"confidence": agent_response.get("confidence", 0.8),
|
||||
"context": context
|
||||
}
|
||||
|
||||
# 记录执行历史
|
||||
self.agent_core._record_execution("message_processing", response)
|
||||
|
||||
# 更新Agent状态
|
||||
self.agent_core.agent_state = self.agent_core.agent_core.AgentState.IDLE
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Agent消息处理失败: {e}")
|
||||
self.agent_core.agent_state = self.agent_core.agent_core.AgentState.ERROR
|
||||
|
||||
return {
|
||||
"response": f"处理消息时发生错误: {str(e)}",
|
||||
"actions": [],
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def process_conversation_agent(self, conversation_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""使用Agent处理对话"""
|
||||
try:
|
||||
# 提取对话信息
|
||||
user_message = conversation_data.get("message", "")
|
||||
user_id = conversation_data.get("user_id", "anonymous")
|
||||
session_id = conversation_data.get("session_id")
|
||||
|
||||
# 创建对话上下文
|
||||
context = {
|
||||
"message": user_message,
|
||||
"user_id": user_id,
|
||||
"session_id": session_id,
|
||||
"conversation_history": conversation_data.get("history", []),
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
# 使用智能Agent处理对话
|
||||
agent_response = await self.intelligent_agent.process_conversation(context)
|
||||
|
||||
# 执行建议的动作
|
||||
actions_taken = []
|
||||
if agent_response.get("recommended_actions"):
|
||||
for action in agent_response["recommended_actions"]:
|
||||
action_result = self.action_executor.execute_action(action)
|
||||
actions_taken.append(action_result)
|
||||
|
||||
# 生成响应
|
||||
response = {
|
||||
"response": agent_response.get("response", "Agent已处理您的对话"),
|
||||
"actions": actions_taken,
|
||||
"status": "completed",
|
||||
"confidence": agent_response.get("confidence", 0.8),
|
||||
"context": context,
|
||||
"session_id": session_id
|
||||
}
|
||||
|
||||
# 记录执行历史
|
||||
self.agent_core._record_execution("conversation_processing", response)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Agent对话处理失败: {e}")
|
||||
return {
|
||||
"response": f"处理对话时发生错误: {str(e)}",
|
||||
"actions": [],
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def process_workorder_agent(self, workorder_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""使用Agent处理工单"""
|
||||
try:
|
||||
# 提取工单信息
|
||||
workorder_id = workorder_data.get("workorder_id")
|
||||
action_type = workorder_data.get("action_type", "analyze")
|
||||
|
||||
# 创建工单上下文
|
||||
context = {
|
||||
"workorder_id": workorder_id,
|
||||
"action_type": action_type,
|
||||
"workorder_data": workorder_data,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
# 使用智能Agent处理工单
|
||||
agent_response = await self.intelligent_agent.process_workorder(context)
|
||||
|
||||
# 执行建议的动作
|
||||
actions_taken = []
|
||||
if agent_response.get("recommended_actions"):
|
||||
for action in agent_response["recommended_actions"]:
|
||||
action_result = self.action_executor.execute_action(action)
|
||||
actions_taken.append(action_result)
|
||||
|
||||
# 生成响应
|
||||
response = {
|
||||
"response": agent_response.get("response", "Agent已处理工单"),
|
||||
"actions": actions_taken,
|
||||
"status": "completed",
|
||||
"confidence": agent_response.get("confidence", 0.8),
|
||||
"context": context
|
||||
}
|
||||
|
||||
# 记录执行历史
|
||||
self.agent_core._record_execution("workorder_processing", response)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Agent工单处理失败: {e}")
|
||||
return {
|
||||
"response": f"处理工单时发生错误: {str(e)}",
|
||||
"actions": [],
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def process_alert_agent(self, alert_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""使用Agent处理预警"""
|
||||
try:
|
||||
# 创建预警上下文
|
||||
context = {
|
||||
"alert_data": alert_data,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
# 使用智能Agent处理预警
|
||||
agent_response = await self.intelligent_agent.process_alert(context)
|
||||
|
||||
# 执行建议的动作
|
||||
actions_taken = []
|
||||
if agent_response.get("recommended_actions"):
|
||||
for action in agent_response["recommended_actions"]:
|
||||
action_result = self.action_executor.execute_action(action)
|
||||
actions_taken.append(action_result)
|
||||
|
||||
# 生成响应
|
||||
response = {
|
||||
"response": agent_response.get("response", "Agent已处理预警"),
|
||||
"actions": actions_taken,
|
||||
"status": "completed",
|
||||
"confidence": agent_response.get("confidence", 0.8),
|
||||
"context": context
|
||||
}
|
||||
|
||||
# 记录执行历史
|
||||
self.agent_core._record_execution("alert_processing", response)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Agent预警处理失败: {e}")
|
||||
return {
|
||||
"response": f"处理预警时发生错误: {str(e)}",
|
||||
"actions": [],
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
def get_conversation_suggestions(self, context: Dict[str, Any]) -> List[str]:
|
||||
"""获取对话建议"""
|
||||
try:
|
||||
return self.intelligent_agent.get_conversation_suggestions(context)
|
||||
except Exception as e:
|
||||
logger.error(f"获取对话建议失败: {e}")
|
||||
return []
|
||||
|
||||
def get_workorder_suggestions(self, workorder_data: Dict[str, Any]) -> List[str]:
|
||||
"""获取工单建议"""
|
||||
try:
|
||||
return self.intelligent_agent.get_workorder_suggestions(workorder_data)
|
||||
except Exception as e:
|
||||
logger.error(f"获取工单建议失败: {e}")
|
||||
return []
|
||||
|
||||
def get_alert_suggestions(self, alert_data: Dict[str, Any]) -> List[str]:
|
||||
"""获取预警建议"""
|
||||
try:
|
||||
return self.intelligent_agent.get_alert_suggestions(alert_data)
|
||||
except Exception as e:
|
||||
logger.error(f"获取预警建议失败: {e}")
|
||||
return []
|
||||
405
src/agent/agent_sample_actions.py
Normal file
405
src/agent/agent_sample_actions.py
Normal file
@@ -0,0 +1,405 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
TSP Agent示例动作模块
|
||||
包含Agent的示例动作和测试功能
|
||||
"""
|
||||
|
||||
import logging
|
||||
import asyncio
|
||||
from typing import Dict, Any, List
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from .agent_assistant_core import TSPAgentAssistantCore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AgentSampleActions:
|
||||
"""Agent示例动作处理器"""
|
||||
|
||||
def __init__(self, agent_core: TSPAgentAssistantCore):
|
||||
self.agent_core = agent_core
|
||||
|
||||
async def trigger_sample_actions(self) -> Dict[str, Any]:
|
||||
"""触发示例动作"""
|
||||
try:
|
||||
logger.info("开始执行示例动作")
|
||||
|
||||
# 执行多个示例动作
|
||||
actions_results = []
|
||||
|
||||
# 1. 系统健康检查
|
||||
health_result = await self._sample_health_check()
|
||||
actions_results.append(health_result)
|
||||
|
||||
# 2. 预警分析
|
||||
alert_result = await self._sample_alert_analysis()
|
||||
actions_results.append(alert_result)
|
||||
|
||||
# 3. 工单处理
|
||||
workorder_result = await self._sample_workorder_processing()
|
||||
actions_results.append(workorder_result)
|
||||
|
||||
# 4. 知识库更新
|
||||
knowledge_result = await self._sample_knowledge_update()
|
||||
actions_results.append(knowledge_result)
|
||||
|
||||
# 5. 性能优化
|
||||
optimization_result = await self._sample_performance_optimization()
|
||||
actions_results.append(optimization_result)
|
||||
|
||||
# 记录执行历史
|
||||
self.agent_core._record_execution("sample_actions", {
|
||||
"actions_count": len(actions_results),
|
||||
"results": actions_results
|
||||
})
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"成功执行 {len(actions_results)} 个示例动作",
|
||||
"actions_results": actions_results,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"执行示例动作失败: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
async def _sample_health_check(self) -> Dict[str, Any]:
|
||||
"""示例:系统健康检查"""
|
||||
try:
|
||||
# 获取系统健康状态
|
||||
health_data = self.agent_core.get_system_health()
|
||||
|
||||
# 模拟健康检查逻辑
|
||||
health_score = health_data.get("health_score", 0)
|
||||
|
||||
if health_score > 80:
|
||||
status = "excellent"
|
||||
message = "系统运行状态良好"
|
||||
elif health_score > 60:
|
||||
status = "good"
|
||||
message = "系统运行状态正常"
|
||||
elif health_score > 40:
|
||||
status = "fair"
|
||||
message = "系统运行状态一般,建议关注"
|
||||
else:
|
||||
status = "poor"
|
||||
message = "系统运行状态较差,需要优化"
|
||||
|
||||
return {
|
||||
"action_type": "health_check",
|
||||
"status": status,
|
||||
"message": message,
|
||||
"health_score": health_score,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"健康检查失败: {e}")
|
||||
return {
|
||||
"action_type": "health_check",
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def _sample_alert_analysis(self) -> Dict[str, Any]:
|
||||
"""示例:预警分析"""
|
||||
try:
|
||||
# 获取预警数据
|
||||
alerts = self.agent_core.check_alerts()
|
||||
|
||||
# 分析预警
|
||||
alert_count = len(alerts)
|
||||
critical_alerts = [a for a in alerts if a.get("level") == "critical"]
|
||||
warning_alerts = [a for a in alerts if a.get("level") == "warning"]
|
||||
|
||||
# 生成分析结果
|
||||
if alert_count == 0:
|
||||
status = "no_alerts"
|
||||
message = "当前无活跃预警"
|
||||
elif len(critical_alerts) > 0:
|
||||
status = "critical"
|
||||
message = f"发现 {len(critical_alerts)} 个严重预警,需要立即处理"
|
||||
elif len(warning_alerts) > 0:
|
||||
status = "warning"
|
||||
message = f"发现 {len(warning_alerts)} 个警告预警,建议关注"
|
||||
else:
|
||||
status = "info"
|
||||
message = f"发现 {alert_count} 个信息预警"
|
||||
|
||||
return {
|
||||
"action_type": "alert_analysis",
|
||||
"status": status,
|
||||
"message": message,
|
||||
"alert_count": alert_count,
|
||||
"critical_count": len(critical_alerts),
|
||||
"warning_count": len(warning_alerts),
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"预警分析失败: {e}")
|
||||
return {
|
||||
"action_type": "alert_analysis",
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def _sample_workorder_processing(self) -> Dict[str, Any]:
|
||||
"""示例:工单处理"""
|
||||
try:
|
||||
# 获取工单状态
|
||||
workorders_status = self.agent_core._check_workorders_status()
|
||||
|
||||
total = workorders_status.get("total", 0)
|
||||
open_count = workorders_status.get("open", 0)
|
||||
resolved_count = workorders_status.get("resolved", 0)
|
||||
resolution_rate = workorders_status.get("resolution_rate", 0)
|
||||
|
||||
# 分析工单状态
|
||||
if total == 0:
|
||||
status = "no_workorders"
|
||||
message = "当前无工单"
|
||||
elif open_count > 10:
|
||||
status = "high_backlog"
|
||||
message = f"工单积压严重,有 {open_count} 个待处理工单"
|
||||
elif resolution_rate > 0.8:
|
||||
status = "good_resolution"
|
||||
message = f"工单处理效率良好,解决率 {resolution_rate:.1%}"
|
||||
else:
|
||||
status = "normal"
|
||||
message = f"工单处理状态正常,待处理 {open_count} 个"
|
||||
|
||||
return {
|
||||
"action_type": "workorder_processing",
|
||||
"status": status,
|
||||
"message": message,
|
||||
"total_workorders": total,
|
||||
"open_workorders": open_count,
|
||||
"resolved_workorders": resolved_count,
|
||||
"resolution_rate": resolution_rate,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"工单处理分析失败: {e}")
|
||||
return {
|
||||
"action_type": "workorder_processing",
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def _sample_knowledge_update(self) -> Dict[str, Any]:
|
||||
"""示例:知识库更新"""
|
||||
try:
|
||||
from src.core.database import db_manager
|
||||
from src.core.models import KnowledgeEntry
|
||||
|
||||
with db_manager.get_session() as session:
|
||||
# 获取知识库统计
|
||||
total_knowledge = session.query(KnowledgeEntry).count()
|
||||
verified_knowledge = session.query(KnowledgeEntry).filter(
|
||||
KnowledgeEntry.is_verified == True
|
||||
).count()
|
||||
unverified_knowledge = total_knowledge - verified_knowledge
|
||||
|
||||
# 分析知识库状态
|
||||
if total_knowledge == 0:
|
||||
status = "empty"
|
||||
message = "知识库为空,建议添加知识条目"
|
||||
elif unverified_knowledge > 0:
|
||||
status = "needs_verification"
|
||||
message = f"有 {unverified_knowledge} 个知识条目需要验证"
|
||||
else:
|
||||
status = "up_to_date"
|
||||
message = "知识库状态良好,所有条目已验证"
|
||||
|
||||
return {
|
||||
"action_type": "knowledge_update",
|
||||
"status": status,
|
||||
"message": message,
|
||||
"total_knowledge": total_knowledge,
|
||||
"verified_knowledge": verified_knowledge,
|
||||
"unverified_knowledge": unverified_knowledge,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"知识库更新分析失败: {e}")
|
||||
return {
|
||||
"action_type": "knowledge_update",
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def _sample_performance_optimization(self) -> Dict[str, Any]:
|
||||
"""示例:性能优化"""
|
||||
try:
|
||||
# 获取系统性能数据
|
||||
system_health = self.agent_core.get_system_health()
|
||||
|
||||
# 分析性能指标
|
||||
cpu_usage = system_health.get("cpu_usage", 0)
|
||||
memory_usage = system_health.get("memory_usage", 0)
|
||||
disk_usage = system_health.get("disk_usage", 0)
|
||||
|
||||
# 生成优化建议
|
||||
optimization_suggestions = []
|
||||
|
||||
if cpu_usage > 80:
|
||||
optimization_suggestions.append("CPU使用率过高,建议优化计算密集型任务")
|
||||
if memory_usage > 80:
|
||||
optimization_suggestions.append("内存使用率过高,建议清理缓存或增加内存")
|
||||
if disk_usage > 90:
|
||||
optimization_suggestions.append("磁盘空间不足,建议清理日志文件或扩容")
|
||||
|
||||
if not optimization_suggestions:
|
||||
status = "optimal"
|
||||
message = "系统性能良好,无需优化"
|
||||
else:
|
||||
status = "needs_optimization"
|
||||
message = f"发现 {len(optimization_suggestions)} 个性能优化点"
|
||||
|
||||
return {
|
||||
"action_type": "performance_optimization",
|
||||
"status": status,
|
||||
"message": message,
|
||||
"cpu_usage": cpu_usage,
|
||||
"memory_usage": memory_usage,
|
||||
"disk_usage": disk_usage,
|
||||
"optimization_suggestions": optimization_suggestions,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"性能优化分析失败: {e}")
|
||||
return {
|
||||
"action_type": "performance_optimization",
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def run_performance_test(self) -> Dict[str, Any]:
|
||||
"""运行性能测试"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
|
||||
# 执行多个测试
|
||||
test_results = []
|
||||
|
||||
# 1. 响应时间测试
|
||||
response_time = await self._test_response_time()
|
||||
test_results.append(response_time)
|
||||
|
||||
# 2. 并发处理测试
|
||||
concurrency_test = await self._test_concurrency()
|
||||
test_results.append(concurrency_test)
|
||||
|
||||
# 3. 内存使用测试
|
||||
memory_test = await self._test_memory_usage()
|
||||
test_results.append(memory_test)
|
||||
|
||||
end_time = datetime.now()
|
||||
total_time = (end_time - start_time).total_seconds()
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": "性能测试完成",
|
||||
"total_time": total_time,
|
||||
"test_results": test_results,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"性能测试失败: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
async def _test_response_time(self) -> Dict[str, Any]:
|
||||
"""测试响应时间"""
|
||||
start_time = datetime.now()
|
||||
|
||||
# 模拟处理任务
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
end_time = datetime.now()
|
||||
response_time = (end_time - start_time).total_seconds()
|
||||
|
||||
return {
|
||||
"test_type": "response_time",
|
||||
"response_time": response_time,
|
||||
"status": "good" if response_time < 0.5 else "slow"
|
||||
}
|
||||
|
||||
async def _test_concurrency(self) -> Dict[str, Any]:
|
||||
"""测试并发处理"""
|
||||
try:
|
||||
# 创建多个并发任务
|
||||
tasks = []
|
||||
for i in range(5):
|
||||
task = asyncio.create_task(self._simulate_task(i))
|
||||
tasks.append(task)
|
||||
|
||||
# 等待所有任务完成
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
return {
|
||||
"test_type": "concurrency",
|
||||
"concurrent_tasks": len(tasks),
|
||||
"successful_tasks": len([r for r in results if r.get("success")]),
|
||||
"status": "good"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"test_type": "concurrency",
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def _simulate_task(self, task_id: int) -> Dict[str, Any]:
|
||||
"""模拟任务"""
|
||||
try:
|
||||
await asyncio.sleep(0.05) # 模拟处理时间
|
||||
return {
|
||||
"task_id": task_id,
|
||||
"success": True,
|
||||
"result": f"Task {task_id} completed"
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"task_id": task_id,
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
async def _test_memory_usage(self) -> Dict[str, Any]:
|
||||
"""测试内存使用"""
|
||||
try:
|
||||
import psutil
|
||||
|
||||
# 获取当前内存使用情况
|
||||
memory_info = psutil.virtual_memory()
|
||||
|
||||
return {
|
||||
"test_type": "memory_usage",
|
||||
"total_memory": memory_info.total,
|
||||
"available_memory": memory_info.available,
|
||||
"used_memory": memory_info.used,
|
||||
"memory_percentage": memory_info.percent,
|
||||
"status": "good" if memory_info.percent < 80 else "high"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"test_type": "memory_usage",
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
@@ -2,72 +2,34 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
增强版TSP助手 - 集成Agent功能
|
||||
这是一个真正的智能Agent实现
|
||||
重构版本:模块化设计,降低代码复杂度
|
||||
"""
|
||||
|
||||
import logging
|
||||
import asyncio
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
import json
|
||||
|
||||
from src.main import TSPAssistant
|
||||
from src.agent import AgentCore, AgentState
|
||||
from src.agent.auto_monitor import AutoMonitorService
|
||||
from src.agent.intelligent_agent import IntelligentAgent, AlertContext, KnowledgeContext
|
||||
from src.agent.llm_client import LLMManager, LLMConfig
|
||||
from src.agent.action_executor import ActionExecutor
|
||||
from src.agent.agent_assistant_core import TSPAgentAssistantCore
|
||||
from src.agent.agent_message_handler import AgentMessageHandler
|
||||
from src.agent.agent_sample_actions import AgentSampleActions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class TSPAgentAssistant(TSPAssistant):
|
||||
"""TSP Agent助手 - 增强版TSP助手,具备完整Agent功能"""
|
||||
class TSPAgentAssistant(TSPAgentAssistantCore):
|
||||
"""TSP Agent助手 - 重构版本"""
|
||||
|
||||
def __init__(self, llm_config: Optional[LLMConfig] = None):
|
||||
# 初始化基础TSP助手
|
||||
super().__init__()
|
||||
def __init__(self, llm_config=None):
|
||||
# 初始化核心功能
|
||||
super().__init__(llm_config)
|
||||
|
||||
# 初始化Agent核心
|
||||
self.agent_core = AgentCore()
|
||||
# 初始化消息处理器
|
||||
self.message_handler = AgentMessageHandler(self)
|
||||
|
||||
# 初始化自动监控服务
|
||||
self.auto_monitor = AutoMonitorService(self)
|
||||
# 初始化示例动作处理器
|
||||
self.sample_actions = AgentSampleActions(self)
|
||||
|
||||
# 初始化LLM客户端
|
||||
if llm_config:
|
||||
self.llm_manager = LLMManager(llm_config)
|
||||
else:
|
||||
# 使用默认配置 - 千问模型
|
||||
try:
|
||||
from config.llm_config import DEFAULT_CONFIG
|
||||
self.llm_manager = LLMManager(DEFAULT_CONFIG)
|
||||
except ImportError:
|
||||
# 如果配置文件不存在,使用内置配置
|
||||
default_config = LLMConfig(
|
||||
provider="openai",
|
||||
api_key="sk-your-qwen-api-key-here",
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
model="qwen-turbo",
|
||||
temperature=0.7,
|
||||
max_tokens=2000
|
||||
)
|
||||
self.llm_manager = LLMManager(default_config)
|
||||
|
||||
# 初始化智能Agent
|
||||
self.intelligent_agent = IntelligentAgent(self.llm_manager)
|
||||
|
||||
# 初始化动作执行器
|
||||
self.action_executor = ActionExecutor(self)
|
||||
|
||||
# Agent特有功能
|
||||
self.is_agent_mode = True
|
||||
self.proactive_tasks = []
|
||||
self.agent_memory = {}
|
||||
|
||||
# 添加一些示例执行历史(用于演示)
|
||||
self._add_sample_execution_history()
|
||||
|
||||
logger.info("TSP Agent助手初始化完成")
|
||||
logger.info("TSP Agent助手初始化完成(重构版本)")
|
||||
|
||||
async def process_message_agent(
|
||||
self,
|
||||
|
||||
322
src/agent_assistant_new.py
Normal file
322
src/agent_assistant_new.py
Normal file
@@ -0,0 +1,322 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
增强版TSP助手 - 集成Agent功能
|
||||
重构版本:模块化设计,降低代码复杂度
|
||||
"""
|
||||
|
||||
import logging
|
||||
import asyncio
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
|
||||
from src.agent.agent_assistant_core import TSPAgentAssistantCore
|
||||
from src.agent.agent_message_handler import AgentMessageHandler
|
||||
from src.agent.agent_sample_actions import AgentSampleActions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class TSPAgentAssistant(TSPAgentAssistantCore):
|
||||
"""TSP Agent助手 - 重构版本"""
|
||||
|
||||
def __init__(self, llm_config=None):
|
||||
# 初始化核心功能
|
||||
super().__init__(llm_config)
|
||||
|
||||
# 初始化消息处理器
|
||||
self.message_handler = AgentMessageHandler(self)
|
||||
|
||||
# 初始化示例动作处理器
|
||||
self.sample_actions = AgentSampleActions(self)
|
||||
|
||||
logger.info("TSP Agent助手初始化完成(重构版本)")
|
||||
|
||||
# ==================== 消息处理功能 ====================
|
||||
|
||||
async def process_message_agent(self, message: str, user_id: str = "admin",
|
||||
work_order_id: Optional[int] = None,
|
||||
enable_proactive: bool = True) -> Dict[str, Any]:
|
||||
"""使用Agent处理消息"""
|
||||
return await self.message_handler.process_message_agent(
|
||||
message, user_id, work_order_id, enable_proactive
|
||||
)
|
||||
|
||||
async def process_conversation_agent(self, conversation_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""使用Agent处理对话"""
|
||||
return await self.message_handler.process_conversation_agent(conversation_data)
|
||||
|
||||
async def process_workorder_agent(self, workorder_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""使用Agent处理工单"""
|
||||
return await self.message_handler.process_workorder_agent(workorder_data)
|
||||
|
||||
async def process_alert_agent(self, alert_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""使用Agent处理预警"""
|
||||
return await self.message_handler.process_alert_agent(alert_data)
|
||||
|
||||
# ==================== 建议功能 ====================
|
||||
|
||||
def get_conversation_suggestions(self, context: Dict[str, Any]) -> List[str]:
|
||||
"""获取对话建议"""
|
||||
return self.message_handler.get_conversation_suggestions(context)
|
||||
|
||||
def get_workorder_suggestions(self, workorder_data: Dict[str, Any]) -> List[str]:
|
||||
"""获取工单建议"""
|
||||
return self.message_handler.get_workorder_suggestions(workorder_data)
|
||||
|
||||
def get_alert_suggestions(self, alert_data: Dict[str, Any]) -> List[str]:
|
||||
"""获取预警建议"""
|
||||
return self.message_handler.get_alert_suggestions(alert_data)
|
||||
|
||||
# ==================== 示例动作功能 ====================
|
||||
|
||||
async def trigger_sample_actions(self) -> Dict[str, Any]:
|
||||
"""触发示例动作"""
|
||||
return await self.sample_actions.trigger_sample_actions()
|
||||
|
||||
async def run_performance_test(self) -> Dict[str, Any]:
|
||||
"""运行性能测试"""
|
||||
return await self.sample_actions.run_performance_test()
|
||||
|
||||
# ==================== 兼容性方法 ====================
|
||||
|
||||
def get_agent_status(self) -> Dict[str, Any]:
|
||||
"""获取Agent状态(兼容性方法)"""
|
||||
return super().get_agent_status()
|
||||
|
||||
def toggle_agent_mode(self, enabled: bool) -> bool:
|
||||
"""切换Agent模式(兼容性方法)"""
|
||||
return super().toggle_agent_mode(enabled)
|
||||
|
||||
def start_proactive_monitoring(self) -> bool:
|
||||
"""启动主动监控(兼容性方法)"""
|
||||
return super().start_proactive_monitoring()
|
||||
|
||||
def stop_proactive_monitoring(self) -> bool:
|
||||
"""停止主动监控(兼容性方法)"""
|
||||
return super().stop_proactive_monitoring()
|
||||
|
||||
def run_proactive_monitoring(self) -> Dict[str, Any]:
|
||||
"""运行主动监控检查(兼容性方法)"""
|
||||
return super().run_proactive_monitoring()
|
||||
|
||||
def run_intelligent_analysis(self) -> Dict[str, Any]:
|
||||
"""运行智能分析(兼容性方法)"""
|
||||
return super().run_intelligent_analysis()
|
||||
|
||||
def get_action_history(self, limit: int = 50) -> List[Dict[str, Any]]:
|
||||
"""获取动作执行历史(兼容性方法)"""
|
||||
return super().get_action_history(limit)
|
||||
|
||||
def clear_execution_history(self) -> Dict[str, Any]:
|
||||
"""清空执行历史(兼容性方法)"""
|
||||
return super().clear_execution_history()
|
||||
|
||||
def get_llm_usage_stats(self) -> Dict[str, Any]:
|
||||
"""获取LLM使用统计(兼容性方法)"""
|
||||
return super().get_llm_usage_stats()
|
||||
|
||||
# ==================== 高级功能 ====================
|
||||
|
||||
async def comprehensive_analysis(self) -> Dict[str, Any]:
|
||||
"""综合分析 - 结合多个模块的分析结果"""
|
||||
try:
|
||||
# 运行智能分析
|
||||
intelligent_analysis = self.run_intelligent_analysis()
|
||||
|
||||
# 运行主动监控
|
||||
proactive_monitoring = self.run_proactive_monitoring()
|
||||
|
||||
# 运行性能测试
|
||||
performance_test = await self.run_performance_test()
|
||||
|
||||
# 综合结果
|
||||
comprehensive_result = {
|
||||
"timestamp": self.execution_history[-1]["timestamp"] if self.execution_history else None,
|
||||
"intelligent_analysis": intelligent_analysis,
|
||||
"proactive_monitoring": proactive_monitoring,
|
||||
"performance_test": performance_test,
|
||||
"overall_status": self._determine_overall_status(
|
||||
intelligent_analysis, proactive_monitoring, performance_test
|
||||
)
|
||||
}
|
||||
|
||||
# 记录综合分析
|
||||
self._record_execution("comprehensive_analysis", comprehensive_result)
|
||||
|
||||
return comprehensive_result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"综合分析失败: {e}")
|
||||
return {"error": str(e)}
|
||||
|
||||
def _determine_overall_status(self, intelligent_analysis: Dict,
|
||||
proactive_monitoring: Dict,
|
||||
performance_test: Dict) -> str:
|
||||
"""确定整体状态"""
|
||||
try:
|
||||
# 检查各个模块的状态
|
||||
statuses = []
|
||||
|
||||
if intelligent_analysis.get("success"):
|
||||
statuses.append("intelligent_analysis_ok")
|
||||
else:
|
||||
statuses.append("intelligent_analysis_error")
|
||||
|
||||
if proactive_monitoring.get("success"):
|
||||
statuses.append("proactive_monitoring_ok")
|
||||
else:
|
||||
statuses.append("proactive_monitoring_error")
|
||||
|
||||
if performance_test.get("success"):
|
||||
statuses.append("performance_test_ok")
|
||||
else:
|
||||
statuses.append("performance_test_error")
|
||||
|
||||
# 根据状态确定整体状态
|
||||
if all("ok" in status for status in statuses):
|
||||
return "excellent"
|
||||
elif any("error" in status for status in statuses):
|
||||
return "needs_attention"
|
||||
else:
|
||||
return "good"
|
||||
|
||||
except Exception:
|
||||
return "unknown"
|
||||
|
||||
async def batch_process_requests(self, requests: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""批量处理请求"""
|
||||
try:
|
||||
results = []
|
||||
|
||||
for request in requests:
|
||||
request_type = request.get("type", "message")
|
||||
|
||||
if request_type == "message":
|
||||
result = await self.process_message_agent(
|
||||
request.get("message", ""),
|
||||
request.get("user_id", "admin"),
|
||||
request.get("work_order_id"),
|
||||
request.get("enable_proactive", True)
|
||||
)
|
||||
elif request_type == "conversation":
|
||||
result = await self.process_conversation_agent(request)
|
||||
elif request_type == "workorder":
|
||||
result = await self.process_workorder_agent(request)
|
||||
elif request_type == "alert":
|
||||
result = await self.process_alert_agent(request)
|
||||
else:
|
||||
result = {"error": f"未知请求类型: {request_type}"}
|
||||
|
||||
results.append(result)
|
||||
|
||||
# 记录批量处理
|
||||
self._record_execution("batch_process", {
|
||||
"request_count": len(requests),
|
||||
"results": results
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"批量处理请求失败: {e}")
|
||||
return [{"error": str(e)} for _ in requests]
|
||||
|
||||
def get_system_summary(self) -> Dict[str, Any]:
|
||||
"""获取系统摘要"""
|
||||
try:
|
||||
# 获取各种状态信息
|
||||
agent_status = self.get_agent_status()
|
||||
system_health = self.get_system_health()
|
||||
workorders_status = self._check_workorders_status()
|
||||
|
||||
# 计算摘要指标
|
||||
summary = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"agent_status": agent_status,
|
||||
"system_health": system_health,
|
||||
"workorders_status": workorders_status,
|
||||
"execution_history_count": len(self.execution_history),
|
||||
"llm_usage_stats": self.get_llm_usage_stats(),
|
||||
"overall_health_score": system_health.get("health_score", 0)
|
||||
}
|
||||
|
||||
return summary
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"获取系统摘要失败: {e}")
|
||||
return {"error": str(e)}
|
||||
|
||||
def export_agent_data(self) -> Dict[str, Any]:
|
||||
"""导出Agent数据"""
|
||||
try:
|
||||
export_data = {
|
||||
"export_timestamp": datetime.now().isoformat(),
|
||||
"agent_status": self.get_agent_status(),
|
||||
"execution_history": self.execution_history,
|
||||
"llm_usage_stats": self.get_llm_usage_stats(),
|
||||
"system_summary": self.get_system_summary()
|
||||
}
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"data": export_data,
|
||||
"message": "Agent数据导出成功"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"导出Agent数据失败: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
def import_agent_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""导入Agent数据"""
|
||||
try:
|
||||
# 验证数据格式
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError("数据格式不正确")
|
||||
|
||||
# 导入执行历史
|
||||
if "execution_history" in data:
|
||||
self.execution_history = data["execution_history"]
|
||||
|
||||
# 其他数据的导入逻辑...
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Agent数据导入成功"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"导入Agent数据失败: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# 测试函数
|
||||
async def main():
|
||||
"""测试函数"""
|
||||
print("🚀 TSP Agent助手测试")
|
||||
|
||||
# 创建Agent助手实例
|
||||
agent_assistant = TSPAgentAssistant()
|
||||
|
||||
# 测试基本功能
|
||||
status = agent_assistant.get_agent_status()
|
||||
print("Agent状态:", status)
|
||||
|
||||
# 测试消息处理
|
||||
result = await agent_assistant.process_message_agent("你好,请帮我分析系统状态")
|
||||
print("消息处理结果:", result)
|
||||
|
||||
# 测试示例动作
|
||||
sample_result = await agent_assistant.trigger_sample_actions()
|
||||
print("示例动作结果:", sample_result)
|
||||
|
||||
# 测试综合分析
|
||||
analysis_result = await agent_assistant.comprehensive_analysis()
|
||||
print("综合分析结果:", analysis_result)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -10,7 +10,7 @@ class Config:
|
||||
ALIBABA_MODEL_NAME = "qwen-plus-latest"
|
||||
|
||||
# 数据库配置
|
||||
DATABASE_URL = "sqlite:///tsp_assistant.db"
|
||||
DATABASE_URL = "mysql+pymysql://tsp_assistant:123456@43.134.68.207/tsp_assistant?charset=utf8mb4"
|
||||
|
||||
# 知识库配置
|
||||
KNOWLEDGE_BASE_PATH = "data/knowledge_base"
|
||||
|
||||
279
src/config/unified_config.py
Normal file
279
src/config/unified_config.py
Normal file
@@ -0,0 +1,279 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
统一配置管理模块
|
||||
整合所有配置,提供统一的配置接口
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, Any, Optional
|
||||
from dataclasses import dataclass, asdict
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class DatabaseConfig:
|
||||
"""数据库配置"""
|
||||
url: str = "mysql+pymysql://tsp_assistant:password@43.134.68.207/tsp_assistant?charset=utf8mb4"
|
||||
pool_size: int = 10
|
||||
max_overflow: int = 20
|
||||
pool_timeout: int = 30
|
||||
pool_recycle: int = 3600
|
||||
|
||||
@dataclass
|
||||
class LLMConfig:
|
||||
"""LLM配置"""
|
||||
provider: str = "openai"
|
||||
api_key: str = ""
|
||||
base_url: str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
model: str = "qwen-turbo"
|
||||
temperature: float = 0.7
|
||||
max_tokens: int = 2000
|
||||
timeout: int = 30
|
||||
|
||||
@dataclass
|
||||
class ServerConfig:
|
||||
"""服务器配置"""
|
||||
host: str = "0.0.0.0"
|
||||
port: int = 5000
|
||||
websocket_port: int = 8765
|
||||
debug: bool = False
|
||||
log_level: str = "INFO"
|
||||
|
||||
@dataclass
|
||||
class FeishuConfig:
|
||||
"""飞书配置"""
|
||||
app_id: str = ""
|
||||
app_secret: str = ""
|
||||
app_token: str = ""
|
||||
table_id: str = ""
|
||||
status: str = "active"
|
||||
sync_limit: int = 10
|
||||
auto_sync_interval: int = 0
|
||||
|
||||
@dataclass
|
||||
class AIAccuracyConfig:
|
||||
"""AI准确率配置"""
|
||||
auto_approve_threshold: float = 0.95
|
||||
use_human_resolution_threshold: float = 0.90
|
||||
manual_review_threshold: float = 0.80
|
||||
ai_suggestion_confidence: float = 0.95
|
||||
human_resolution_confidence: float = 0.90
|
||||
prefer_human_when_low_accuracy: bool = True
|
||||
enable_auto_approval: bool = True
|
||||
enable_human_fallback: bool = True
|
||||
|
||||
@dataclass
|
||||
class SystemConfig:
|
||||
"""系统配置"""
|
||||
backup_enabled: bool = True
|
||||
backup_interval: int = 24 # 小时
|
||||
max_backup_files: int = 7
|
||||
cache_enabled: bool = True
|
||||
cache_ttl: int = 3600 # 秒
|
||||
monitoring_enabled: bool = True
|
||||
|
||||
class UnifiedConfig:
|
||||
"""统一配置管理器"""
|
||||
|
||||
def __init__(self, config_dir: str = "config"):
|
||||
self.config_dir = Path(config_dir)
|
||||
self.config_file = self.config_dir / "unified_config.json"
|
||||
|
||||
# 默认配置
|
||||
self.database = DatabaseConfig()
|
||||
self.llm = LLMConfig()
|
||||
self.server = ServerConfig()
|
||||
self.feishu = FeishuConfig()
|
||||
self.ai_accuracy = AIAccuracyConfig()
|
||||
self.system = SystemConfig()
|
||||
|
||||
# 加载配置
|
||||
self.load_config()
|
||||
|
||||
def load_config(self):
|
||||
"""加载配置文件"""
|
||||
try:
|
||||
if self.config_file.exists():
|
||||
with open(self.config_file, 'r', encoding='utf-8') as f:
|
||||
config_data = json.load(f)
|
||||
|
||||
# 更新配置
|
||||
if 'database' in config_data:
|
||||
self.database = DatabaseConfig(**config_data['database'])
|
||||
if 'llm' in config_data:
|
||||
self.llm = LLMConfig(**config_data['llm'])
|
||||
if 'server' in config_data:
|
||||
self.server = ServerConfig(**config_data['server'])
|
||||
if 'feishu' in config_data:
|
||||
self.feishu = FeishuConfig(**config_data['feishu'])
|
||||
if 'ai_accuracy' in config_data:
|
||||
self.ai_accuracy = AIAccuracyConfig(**config_data['ai_accuracy'])
|
||||
if 'system' in config_data:
|
||||
self.system = SystemConfig(**config_data['system'])
|
||||
|
||||
logger.info("配置文件加载成功")
|
||||
else:
|
||||
logger.info("配置文件不存在,使用默认配置")
|
||||
self.save_config()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"加载配置文件失败: {e}")
|
||||
|
||||
def save_config(self):
|
||||
"""保存配置文件"""
|
||||
try:
|
||||
self.config_dir.mkdir(exist_ok=True)
|
||||
|
||||
config_data = {
|
||||
'database': asdict(self.database),
|
||||
'llm': asdict(self.llm),
|
||||
'server': asdict(self.server),
|
||||
'feishu': asdict(self.feishu),
|
||||
'ai_accuracy': asdict(self.ai_accuracy),
|
||||
'system': asdict(self.system)
|
||||
}
|
||||
|
||||
with open(self.config_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(config_data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
logger.info("配置文件保存成功")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"保存配置文件失败: {e}")
|
||||
|
||||
def load_from_env(self):
|
||||
"""从环境变量加载配置"""
|
||||
# 数据库配置
|
||||
if os.getenv('DATABASE_URL'):
|
||||
self.database.url = os.getenv('DATABASE_URL')
|
||||
|
||||
# LLM配置
|
||||
if os.getenv('LLM_PROVIDER'):
|
||||
self.llm.provider = os.getenv('LLM_PROVIDER')
|
||||
if os.getenv('LLM_API_KEY'):
|
||||
self.llm.api_key = os.getenv('LLM_API_KEY')
|
||||
if os.getenv('LLM_MODEL'):
|
||||
self.llm.model = os.getenv('LLM_MODEL')
|
||||
|
||||
# 服务器配置
|
||||
if os.getenv('SERVER_PORT'):
|
||||
self.server.port = int(os.getenv('SERVER_PORT'))
|
||||
if os.getenv('LOG_LEVEL'):
|
||||
self.server.log_level = os.getenv('LOG_LEVEL')
|
||||
|
||||
# 飞书配置
|
||||
if os.getenv('FEISHU_APP_ID'):
|
||||
self.feishu.app_id = os.getenv('FEISHU_APP_ID')
|
||||
if os.getenv('FEISHU_APP_SECRET'):
|
||||
self.feishu.app_secret = os.getenv('FEISHU_APP_SECRET')
|
||||
if os.getenv('FEISHU_APP_TOKEN'):
|
||||
self.feishu.app_token = os.getenv('FEISHU_APP_TOKEN')
|
||||
if os.getenv('FEISHU_TABLE_ID'):
|
||||
self.feishu.table_id = os.getenv('FEISHU_TABLE_ID')
|
||||
|
||||
def get_database_url(self) -> str:
|
||||
"""获取数据库连接URL"""
|
||||
return self.database.url
|
||||
|
||||
def get_llm_config(self) -> Dict[str, Any]:
|
||||
"""获取LLM配置"""
|
||||
return asdict(self.llm)
|
||||
|
||||
def get_server_config(self) -> Dict[str, Any]:
|
||||
"""获取服务器配置"""
|
||||
return asdict(self.server)
|
||||
|
||||
def get_feishu_config(self) -> Dict[str, Any]:
|
||||
"""获取飞书配置"""
|
||||
return asdict(self.feishu)
|
||||
|
||||
def get_ai_accuracy_config(self) -> Dict[str, Any]:
|
||||
"""获取AI准确率配置"""
|
||||
return asdict(self.ai_accuracy)
|
||||
|
||||
def get_system_config(self) -> Dict[str, Any]:
|
||||
"""获取系统配置"""
|
||||
return asdict(self.system)
|
||||
|
||||
def update_config(self, section: str, config_data: Dict[str, Any]):
|
||||
"""更新配置"""
|
||||
try:
|
||||
if section == 'database':
|
||||
self.database = DatabaseConfig(**config_data)
|
||||
elif section == 'llm':
|
||||
self.llm = LLMConfig(**config_data)
|
||||
elif section == 'server':
|
||||
self.server = ServerConfig(**config_data)
|
||||
elif section == 'feishu':
|
||||
self.feishu = FeishuConfig(**config_data)
|
||||
elif section == 'ai_accuracy':
|
||||
self.ai_accuracy = AIAccuracyConfig(**config_data)
|
||||
elif section == 'system':
|
||||
self.system = SystemConfig(**config_data)
|
||||
else:
|
||||
raise ValueError(f"未知的配置节: {section}")
|
||||
|
||||
self.save_config()
|
||||
logger.info(f"配置节 {section} 更新成功")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"更新配置失败: {e}")
|
||||
raise
|
||||
|
||||
def validate_config(self) -> bool:
|
||||
"""验证配置有效性"""
|
||||
try:
|
||||
# 验证数据库配置
|
||||
if not self.database.url:
|
||||
logger.error("数据库URL未配置")
|
||||
return False
|
||||
|
||||
# 验证LLM配置
|
||||
if not self.llm.api_key:
|
||||
logger.warning("LLM API密钥未配置")
|
||||
|
||||
# 验证飞书配置
|
||||
if self.feishu.status == "active":
|
||||
if not all([self.feishu.app_id, self.feishu.app_secret,
|
||||
self.feishu.app_token, self.feishu.table_id]):
|
||||
logger.warning("飞书配置不完整")
|
||||
|
||||
logger.info("配置验证通过")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"配置验证失败: {e}")
|
||||
return False
|
||||
|
||||
def get_all_config(self) -> Dict[str, Any]:
|
||||
"""获取所有配置"""
|
||||
return {
|
||||
'database': asdict(self.database),
|
||||
'llm': asdict(self.llm),
|
||||
'server': asdict(self.server),
|
||||
'feishu': asdict(self.feishu),
|
||||
'ai_accuracy': asdict(self.ai_accuracy),
|
||||
'system': asdict(self.system)
|
||||
}
|
||||
|
||||
# 全局配置实例
|
||||
_config_instance = None
|
||||
|
||||
def get_config() -> UnifiedConfig:
|
||||
"""获取全局配置实例"""
|
||||
global _config_instance
|
||||
if _config_instance is None:
|
||||
_config_instance = UnifiedConfig()
|
||||
_config_instance.load_from_env()
|
||||
return _config_instance
|
||||
|
||||
def reload_config():
|
||||
"""重新加载配置"""
|
||||
global _config_instance
|
||||
_config_instance = None
|
||||
return get_config()
|
||||
@@ -27,6 +27,20 @@ class WorkOrder(Base):
|
||||
solution = Column(Text, nullable=True) # 解决方案
|
||||
ai_suggestion = Column(Text, nullable=True) # AI建议
|
||||
|
||||
# 扩展飞书字段
|
||||
source = Column(String(50), nullable=True) # 来源(Mail, Telegram bot等)
|
||||
module = Column(String(100), nullable=True) # 模块(local O&M, OTA等)
|
||||
created_by = Column(String(100), nullable=True) # 创建人
|
||||
wilfulness = Column(String(100), nullable=True) # 责任人
|
||||
date_of_close = Column(DateTime, nullable=True) # 关闭日期
|
||||
vehicle_type = Column(String(100), nullable=True) # 车型
|
||||
vin_sim = Column(String(50), nullable=True) # 车架号/SIM
|
||||
app_remote_control_version = Column(String(100), nullable=True) # 应用远程控制版本
|
||||
hmi_sw = Column(String(100), nullable=True) # HMI软件版本
|
||||
parent_record = Column(String(100), nullable=True) # 父记录
|
||||
has_updated_same_day = Column(String(50), nullable=True) # 是否同日更新
|
||||
operating_time = Column(String(100), nullable=True) # 操作时间
|
||||
|
||||
# 关联对话记录
|
||||
conversations = relationship("Conversation", back_populates="work_order")
|
||||
|
||||
@@ -119,5 +133,6 @@ class WorkOrderSuggestion(Base):
|
||||
human_resolution = Column(Text)
|
||||
ai_similarity = Column(Float)
|
||||
approved = Column(Boolean, default=False)
|
||||
use_human_resolution = Column(Boolean, default=False) # 是否使用人工描述入库
|
||||
created_at = Column(DateTime, default=datetime.now)
|
||||
updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now)
|
||||
|
||||
@@ -46,16 +46,31 @@ class WorkOrderSyncService:
|
||||
|
||||
# 字段映射配置 - 根据实际飞书表格结构
|
||||
self.field_mapping = {
|
||||
# 飞书字段名 -> 本地字段名
|
||||
# 核心字段
|
||||
"TR Number": "order_id", # TR编号映射到工单号
|
||||
"TR Description": "title", # TR描述作为标题(问题描述)
|
||||
"TR Description": "description", # TR描述作为详细描述
|
||||
"Type of problem": "category", # 问题类型作为分类
|
||||
"TR Level": "priority", # TR Level作为优先级
|
||||
"TR Status": "status", # TR Status作为状态(修正字段名)
|
||||
"Source": "assignee", # 来源信息
|
||||
"TR Status": "status", # TR Status作为状态
|
||||
"Source": "source", # 来源信息(Mail, Telegram bot等)
|
||||
"Date creation": "created_at", # 创建日期
|
||||
"处理过程": "description", # 处理过程作为描述
|
||||
"TR tracking": "solution", # TR跟踪作为解决方案
|
||||
"处理过程": "solution", # 处理过程作为解决方案
|
||||
"TR tracking": "resolution", # TR跟踪作为解决方案详情
|
||||
|
||||
# 扩展字段
|
||||
"Created by": "created_by", # 创建人
|
||||
"Module(模块)": "module", # 模块
|
||||
"Wilfulness(责任人)": "wilfulness", # 责任人
|
||||
"Date of close TR": "date_of_close", # 关闭日期
|
||||
"Vehicle Type01": "vehicle_type", # 车型
|
||||
"VIN|sim": "vin_sim", # 车架号/SIM
|
||||
"App remote control version": "app_remote_control_version", # 应用远程控制版本
|
||||
"HMI SW": "hmi_sw", # HMI软件版本
|
||||
"父记录": "parent_record", # 父记录
|
||||
"Has it been updated on the same day": "has_updated_same_day", # 是否同日更新
|
||||
"Operating time": "operating_time", # 操作时间
|
||||
|
||||
# AI建议字段
|
||||
"AI建议": "ai_suggestion", # AI建议字段
|
||||
"Issue Start Time": "updated_at" # 问题开始时间作为更新时间
|
||||
}
|
||||
@@ -387,7 +402,7 @@ class WorkOrderSyncService:
|
||||
value = self.status_mapping[value]
|
||||
elif local_field == "priority" and value in self.priority_mapping:
|
||||
value = self.priority_mapping[value]
|
||||
elif local_field in ["created_at", "updated_at"] and value:
|
||||
elif local_field in ["created_at", "updated_at", "date_of_close"] and value:
|
||||
try:
|
||||
# 处理飞书时间戳(毫秒)
|
||||
if isinstance(value, (int, float)):
|
||||
@@ -404,6 +419,16 @@ class WorkOrderSyncService:
|
||||
else:
|
||||
logger.info(f"飞书字段 {feishu_field} 不存在于数据中")
|
||||
|
||||
# 生成标题 - 使用TR Number和问题类型
|
||||
tr_number = feishu_fields.get("TR Number", "")
|
||||
problem_type = feishu_fields.get("Type of problem", "")
|
||||
if tr_number and problem_type:
|
||||
local_data["title"] = f"{tr_number} - {problem_type}"
|
||||
elif tr_number:
|
||||
local_data["title"] = f"{tr_number} - TR工单"
|
||||
else:
|
||||
local_data["title"] = "TR工单"
|
||||
|
||||
# 设置默认值
|
||||
if "status" not in local_data:
|
||||
local_data["status"] = WorkOrderStatus.PENDING
|
||||
@@ -411,8 +436,6 @@ class WorkOrderSyncService:
|
||||
local_data["priority"] = WorkOrderPriority.MEDIUM
|
||||
if "category" not in local_data:
|
||||
local_data["category"] = "Remote control" # 根据表格中最常见的问题类型
|
||||
if "title" not in local_data or not local_data["title"]:
|
||||
local_data["title"] = "TR工单" # 默认标题
|
||||
|
||||
return local_data
|
||||
|
||||
|
||||
@@ -76,18 +76,24 @@ def extract_keywords(text: str, max_keywords: int = 10) -> List[str]:
|
||||
return [word for word, count in sorted_words[:max_keywords]]
|
||||
|
||||
def calculate_similarity(text1: str, text2: str) -> float:
|
||||
"""计算文本相似度"""
|
||||
from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
|
||||
"""计算文本相似度(使用语义相似度)"""
|
||||
try:
|
||||
vectorizer = TfidfVectorizer()
|
||||
vectors = vectorizer.fit_transform([text1, text2])
|
||||
similarity = cosine_similarity(vectors[0:1], vectors[1:2])[0][0]
|
||||
return float(similarity)
|
||||
from src.utils.semantic_similarity import calculate_semantic_similarity
|
||||
return calculate_semantic_similarity(text1, text2)
|
||||
except Exception as e:
|
||||
logging.error(f"计算相似度失败: {e}")
|
||||
return 0.0
|
||||
logging.error(f"计算语义相似度失败: {e}")
|
||||
# 回退到传统方法
|
||||
try:
|
||||
from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
|
||||
vectorizer = TfidfVectorizer()
|
||||
vectors = vectorizer.fit_transform([text1, text2])
|
||||
similarity = cosine_similarity(vectors[0:1], vectors[1:2])[0][0]
|
||||
return float(similarity)
|
||||
except Exception as e2:
|
||||
logging.error(f"计算TF-IDF相似度失败: {e2}")
|
||||
return 0.0
|
||||
|
||||
def format_time_duration(seconds: float) -> str:
|
||||
"""格式化时间持续时间"""
|
||||
|
||||
256
src/utils/semantic_similarity.py
Normal file
256
src/utils/semantic_similarity.py
Normal file
@@ -0,0 +1,256 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
语义相似度计算服务
|
||||
使用sentence-transformers进行更准确的语义相似度计算
|
||||
"""
|
||||
|
||||
import logging
|
||||
import numpy as np
|
||||
from typing import List, Tuple, Optional
|
||||
from sentence_transformers import SentenceTransformer
|
||||
import torch
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class SemanticSimilarityCalculator:
|
||||
"""语义相似度计算器"""
|
||||
|
||||
def __init__(self, model_name: str = "all-MiniLM-L6-v2"):
|
||||
"""
|
||||
初始化语义相似度计算器
|
||||
|
||||
Args:
|
||||
model_name: 使用的预训练模型名称
|
||||
- all-MiniLM-L6-v2: 英文模型,速度快,推荐用于生产环境
|
||||
- paraphrase-multilingual-MiniLM-L12-v2: 多语言模型,支持中文
|
||||
- paraphrase-multilingual-mpnet-base-v2: 多语言模型,精度高
|
||||
"""
|
||||
self.model_name = model_name
|
||||
self.model = None
|
||||
self._load_model()
|
||||
|
||||
def _load_model(self):
|
||||
"""加载预训练模型"""
|
||||
try:
|
||||
logger.info(f"正在加载语义相似度模型: {self.model_name}")
|
||||
self.model = SentenceTransformer(self.model_name)
|
||||
logger.info("语义相似度模型加载成功")
|
||||
except Exception as e:
|
||||
logger.error(f"加载语义相似度模型失败: {e}")
|
||||
# 回退到简单模型
|
||||
self.model = None
|
||||
|
||||
def calculate_similarity(self, text1: str, text2: str, fast_mode: bool = True) -> float:
|
||||
"""
|
||||
计算两个文本的语义相似度
|
||||
|
||||
Args:
|
||||
text1: 第一个文本
|
||||
text2: 第二个文本
|
||||
fast_mode: 是否使用快速模式(结合传统方法)
|
||||
|
||||
Returns:
|
||||
相似度分数 (0-1之间)
|
||||
"""
|
||||
if not text1 or not text2:
|
||||
return 0.0
|
||||
|
||||
try:
|
||||
# 快速模式:先使用传统方法快速筛选
|
||||
if fast_mode:
|
||||
tfidf_sim = self._calculate_tfidf_similarity(text1, text2)
|
||||
|
||||
# 如果传统方法相似度很高或很低,直接返回
|
||||
if tfidf_sim >= 0.9:
|
||||
return tfidf_sim
|
||||
elif tfidf_sim <= 0.3:
|
||||
return tfidf_sim
|
||||
|
||||
# 中等相似度时,使用语义方法进行精确计算
|
||||
if self.model is not None:
|
||||
semantic_sim = self._calculate_semantic_similarity(text1, text2)
|
||||
# 结合两种方法的结果
|
||||
return (tfidf_sim * 0.3 + semantic_sim * 0.7)
|
||||
else:
|
||||
return tfidf_sim
|
||||
|
||||
# 完整模式:直接使用语义相似度
|
||||
if self.model is not None:
|
||||
return self._calculate_semantic_similarity(text1, text2)
|
||||
else:
|
||||
return self._calculate_tfidf_similarity(text1, text2)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"计算语义相似度失败: {e}")
|
||||
return self._calculate_tfidf_similarity(text1, text2)
|
||||
|
||||
def _calculate_semantic_similarity(self, text1: str, text2: str) -> float:
|
||||
"""使用sentence-transformers计算语义相似度"""
|
||||
try:
|
||||
# 获取文本嵌入向量
|
||||
embeddings = self.model.encode([text1, text2])
|
||||
|
||||
# 计算余弦相似度
|
||||
similarity = self._cosine_similarity(embeddings[0], embeddings[1])
|
||||
|
||||
# 确保结果在0-1范围内
|
||||
similarity = max(0.0, min(1.0, similarity))
|
||||
|
||||
logger.debug(f"语义相似度计算: {similarity:.4f}")
|
||||
return float(similarity)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"语义相似度计算失败: {e}")
|
||||
return self._calculate_tfidf_similarity(text1, text2)
|
||||
|
||||
def _calculate_tfidf_similarity(self, text1: str, text2: str) -> float:
|
||||
"""使用TF-IDF计算相似度(回退方法)"""
|
||||
try:
|
||||
from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
|
||||
vectorizer = TfidfVectorizer(max_features=1000, stop_words=None)
|
||||
vectors = vectorizer.fit_transform([text1, text2])
|
||||
similarity = cosine_similarity(vectors[0:1], vectors[1:2])[0][0]
|
||||
|
||||
logger.debug(f"TF-IDF相似度计算: {similarity:.4f}")
|
||||
return float(similarity)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"TF-IDF相似度计算失败: {e}")
|
||||
return 0.0
|
||||
|
||||
def _cosine_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float:
|
||||
"""计算余弦相似度"""
|
||||
try:
|
||||
# 计算点积
|
||||
dot_product = np.dot(vec1, vec2)
|
||||
|
||||
# 计算向量的模长
|
||||
norm1 = np.linalg.norm(vec1)
|
||||
norm2 = np.linalg.norm(vec2)
|
||||
|
||||
# 避免除零错误
|
||||
if norm1 == 0 or norm2 == 0:
|
||||
return 0.0
|
||||
|
||||
# 计算余弦相似度
|
||||
similarity = dot_product / (norm1 * norm2)
|
||||
|
||||
return float(similarity)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"余弦相似度计算失败: {e}")
|
||||
return 0.0
|
||||
|
||||
def batch_calculate_similarity(self, text_pairs: List[Tuple[str, str]]) -> List[float]:
|
||||
"""
|
||||
批量计算相似度
|
||||
|
||||
Args:
|
||||
text_pairs: 文本对列表 [(text1, text2), ...]
|
||||
|
||||
Returns:
|
||||
相似度分数列表
|
||||
"""
|
||||
if not text_pairs:
|
||||
return []
|
||||
|
||||
try:
|
||||
if self.model is not None:
|
||||
return self._batch_semantic_similarity(text_pairs)
|
||||
else:
|
||||
return [self._calculate_tfidf_similarity(t1, t2) for t1, t2 in text_pairs]
|
||||
except Exception as e:
|
||||
logger.error(f"批量相似度计算失败: {e}")
|
||||
return [0.0] * len(text_pairs)
|
||||
|
||||
def _batch_semantic_similarity(self, text_pairs: List[Tuple[str, str]]) -> List[float]:
|
||||
"""批量计算语义相似度"""
|
||||
try:
|
||||
# 提取所有文本
|
||||
all_texts = []
|
||||
for text1, text2 in text_pairs:
|
||||
all_texts.extend([text1, text2])
|
||||
|
||||
# 批量获取嵌入向量
|
||||
embeddings = self.model.encode(all_texts)
|
||||
|
||||
# 计算每对的相似度
|
||||
similarities = []
|
||||
for i in range(0, len(embeddings), 2):
|
||||
similarity = self._cosine_similarity(embeddings[i], embeddings[i+1])
|
||||
similarities.append(float(similarity))
|
||||
|
||||
return similarities
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"批量语义相似度计算失败: {e}")
|
||||
return [self._calculate_tfidf_similarity(t1, t2) for t1, t2 in text_pairs]
|
||||
|
||||
def get_similarity_explanation(self, text1: str, text2: str, similarity: float) -> str:
|
||||
"""
|
||||
获取相似度解释
|
||||
|
||||
Args:
|
||||
text1: 第一个文本
|
||||
text2: 第二个文本
|
||||
similarity: 相似度分数
|
||||
|
||||
Returns:
|
||||
相似度解释文本
|
||||
"""
|
||||
if similarity >= 0.95:
|
||||
return "语义高度相似,建议自动审批"
|
||||
elif similarity >= 0.8:
|
||||
return "语义较为相似,建议人工审核"
|
||||
elif similarity >= 0.6:
|
||||
return "语义部分相似,需要人工判断"
|
||||
elif similarity >= 0.4:
|
||||
return "语义相似度较低,建议重新生成"
|
||||
else:
|
||||
return "语义差异较大,建议重新生成"
|
||||
|
||||
def is_model_available(self) -> bool:
|
||||
"""检查模型是否可用"""
|
||||
return self.model is not None
|
||||
|
||||
# 全局实例
|
||||
_similarity_calculator = None
|
||||
|
||||
def get_similarity_calculator() -> SemanticSimilarityCalculator:
|
||||
"""获取全局相似度计算器实例"""
|
||||
global _similarity_calculator
|
||||
if _similarity_calculator is None:
|
||||
_similarity_calculator = SemanticSimilarityCalculator()
|
||||
return _similarity_calculator
|
||||
|
||||
def calculate_semantic_similarity(text1: str, text2: str, fast_mode: bool = True) -> float:
|
||||
"""
|
||||
计算语义相似度的便捷函数
|
||||
|
||||
Args:
|
||||
text1: 第一个文本
|
||||
text2: 第二个文本
|
||||
fast_mode: 是否使用快速模式
|
||||
|
||||
Returns:
|
||||
相似度分数 (0-1之间)
|
||||
"""
|
||||
calculator = get_similarity_calculator()
|
||||
return calculator.calculate_similarity(text1, text2, fast_mode)
|
||||
|
||||
def batch_calculate_semantic_similarity(text_pairs: List[Tuple[str, str]]) -> List[float]:
|
||||
"""
|
||||
批量计算语义相似度的便捷函数
|
||||
|
||||
Args:
|
||||
text_pairs: 文本对列表
|
||||
|
||||
Returns:
|
||||
相似度分数列表
|
||||
"""
|
||||
calculator = get_similarity_calculator()
|
||||
return calculator.batch_calculate_similarity(text_pairs)
|
||||
@@ -755,10 +755,7 @@ def test_model_response():
|
||||
except Exception as e:
|
||||
return jsonify({"success": False, "error": str(e)}), 500
|
||||
|
||||
@app.route('/feishu-sync')
|
||||
def feishu_sync():
|
||||
"""飞书同步管理页面"""
|
||||
return render_template('feishu_sync.html')
|
||||
# 飞书同步功能已合并到主页面,不再需要单独的路由
|
||||
|
||||
if __name__ == '__main__':
|
||||
import time
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
- `knowledge.py`: 知识库管理相关API
|
||||
- `monitoring.py`: 监控相关API
|
||||
- `system.py`: 系统管理相关API
|
||||
- ~~`feishu_sync.py`~~: 已合并到主仪表板(删除)
|
||||
|
||||
## 蓝图模块说明
|
||||
|
||||
@@ -59,6 +60,12 @@
|
||||
- `/api/backup/*` - 数据备份
|
||||
- `/api/database/status` - 数据库状态
|
||||
|
||||
### 7. 飞书集成功能(已合并)
|
||||
- **原独立页面**: `http://localhost:5000/feishu-sync`
|
||||
- **现集成位置**: 主仪表板的"飞书同步"标签页
|
||||
- **功能**: 飞书多维表格数据同步和管理
|
||||
- **API端点**: 通过主应用路由提供
|
||||
|
||||
## 优势
|
||||
|
||||
1. **模块化**: 每个功能模块独立,便于维护
|
||||
@@ -96,7 +103,16 @@ src/web/
|
||||
│ ├── system.py # 系统管理
|
||||
│ └── README.md # 架构说明
|
||||
├── static/ # 静态文件
|
||||
│ ├── css/
|
||||
│ │ └── style.css # 样式文件(包含飞书集成样式)
|
||||
│ └── js/
|
||||
│ ├── dashboard.js # 仪表板逻辑(包含飞书同步功能)
|
||||
│ ├── chat.js # 对话功能
|
||||
│ └── app.js # 应用主逻辑
|
||||
└── templates/ # 模板文件
|
||||
├── dashboard.html # 主仪表板(包含飞书同步标签页)
|
||||
├── chat.html # 对话页面
|
||||
└── index.html # 首页
|
||||
```
|
||||
|
||||
## 注意事项
|
||||
@@ -106,3 +122,17 @@ src/web/
|
||||
3. 懒加载模式避免启动时的重复初始化
|
||||
4. 错误处理统一在蓝图内部进行
|
||||
5. 保持与原有API接口的兼容性
|
||||
6. 飞书集成功能已从独立蓝图合并到主仪表板
|
||||
7. 前端JavaScript类管理不同功能模块(TSPDashboard、FeishuSyncManager等)
|
||||
|
||||
## 最新更新 (v1.4.0)
|
||||
|
||||
### 功能合并
|
||||
- **飞书同步页面合并**: 原独立的飞书同步页面已合并到主仪表板
|
||||
- **统一用户体验**: 所有功能现在都在一个统一的界面中
|
||||
- **代码优化**: 删除了冗余的蓝图和模板文件
|
||||
|
||||
### 架构改进
|
||||
- **前端模块化**: JavaScript代码按功能模块组织
|
||||
- **数据库扩展**: 工单表新增12个飞书相关字段
|
||||
- **字段映射**: 智能映射飞书字段到本地数据库结构
|
||||
|
||||
@@ -6,11 +6,36 @@
|
||||
|
||||
import os
|
||||
import pandas as pd
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from flask import Blueprint, request, jsonify, send_file
|
||||
from werkzeug.utils import secure_filename
|
||||
from sqlalchemy import text
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 简化的AI准确率配置类
|
||||
class SimpleAIAccuracyConfig:
|
||||
"""简化的AI准确率配置"""
|
||||
def __init__(self):
|
||||
self.auto_approve_threshold = 0.95
|
||||
self.use_human_resolution_threshold = 0.90
|
||||
self.manual_review_threshold = 0.80
|
||||
self.ai_suggestion_confidence = 0.95
|
||||
self.human_resolution_confidence = 0.90
|
||||
|
||||
def should_auto_approve(self, similarity: float) -> bool:
|
||||
return similarity >= self.auto_approve_threshold
|
||||
|
||||
def should_use_human_resolution(self, similarity: float) -> bool:
|
||||
return similarity < self.use_human_resolution_threshold
|
||||
|
||||
def get_confidence_score(self, similarity: float, use_human: bool = False) -> float:
|
||||
if use_human:
|
||||
return self.human_resolution_confidence
|
||||
else:
|
||||
return max(similarity, self.ai_suggestion_confidence)
|
||||
|
||||
from src.main import TSPAssistant
|
||||
from src.core.database import db_manager
|
||||
from src.core.models import WorkOrder, Conversation, WorkOrderSuggestion, KnowledgeEntry
|
||||
@@ -250,51 +275,101 @@ def save_workorder_human_resolution(workorder_id):
|
||||
rec = WorkOrderSuggestion(work_order_id=w.id)
|
||||
session.add(rec)
|
||||
rec.human_resolution = human_text
|
||||
# 计算相似度(使用简单cosine TF-IDF,避免外部服务依赖)
|
||||
# 计算语义相似度(使用sentence-transformers进行更准确的语义比较)
|
||||
try:
|
||||
from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
texts = [rec.ai_suggestion or "", human_text]
|
||||
vec = TfidfVectorizer(max_features=1000)
|
||||
mat = vec.fit_transform(texts)
|
||||
sim = float(cosine_similarity(mat[0:1], mat[1:2])[0][0])
|
||||
except Exception:
|
||||
sim = 0.0
|
||||
from src.utils.semantic_similarity import calculate_semantic_similarity
|
||||
ai_text = rec.ai_suggestion or ""
|
||||
sim = calculate_semantic_similarity(ai_text, human_text)
|
||||
logger.info(f"AI建议与人工描述语义相似度: {sim:.4f}")
|
||||
except Exception as e:
|
||||
logger.error(f"计算语义相似度失败: {e}")
|
||||
# 回退到传统方法
|
||||
try:
|
||||
from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
texts = [rec.ai_suggestion or "", human_text]
|
||||
vec = TfidfVectorizer(max_features=1000)
|
||||
mat = vec.fit_transform(texts)
|
||||
sim = float(cosine_similarity(mat[0:1], mat[1:2])[0][0])
|
||||
except Exception:
|
||||
sim = 0.0
|
||||
rec.ai_similarity = sim
|
||||
# 自动审批条件≥0.95
|
||||
approved = sim >= 0.95
|
||||
|
||||
# 使用简化的配置
|
||||
config = SimpleAIAccuracyConfig()
|
||||
|
||||
# 自动审批条件
|
||||
approved = config.should_auto_approve(sim)
|
||||
rec.approved = approved
|
||||
|
||||
# 记录使用人工描述入库的标记(当AI准确率低于阈值时)
|
||||
use_human_resolution = config.should_use_human_resolution(sim)
|
||||
rec.use_human_resolution = use_human_resolution
|
||||
|
||||
session.commit()
|
||||
return jsonify({"success": True, "similarity": sim, "approved": approved})
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"similarity": sim,
|
||||
"approved": approved,
|
||||
"use_human_resolution": use_human_resolution
|
||||
})
|
||||
except Exception as e:
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
@workorders_bp.route('/<int:workorder_id>/approve-to-knowledge', methods=['POST'])
|
||||
def approve_workorder_to_knowledge(workorder_id):
|
||||
"""将已审批的AI建议入库为知识条目"""
|
||||
"""将已审批的AI建议或人工描述入库为知识条目"""
|
||||
try:
|
||||
with db_manager.get_session() as session:
|
||||
w = session.query(WorkOrder).filter(WorkOrder.id == workorder_id).first()
|
||||
if not w:
|
||||
return jsonify({"error": "工单不存在"}), 404
|
||||
|
||||
rec = session.query(WorkOrderSuggestion).filter(WorkOrderSuggestion.work_order_id == w.id).first()
|
||||
if not rec or not rec.approved or not rec.ai_suggestion:
|
||||
return jsonify({"error": "未找到可入库的已审批AI建议"}), 400
|
||||
# 入库为知识条目(问=工单标题;答=AI建议;类目用工单分类)
|
||||
if not rec:
|
||||
return jsonify({"error": "未找到工单建议记录"}), 400
|
||||
|
||||
# 使用简化的配置
|
||||
config = SimpleAIAccuracyConfig()
|
||||
|
||||
# 确定使用哪个内容入库
|
||||
if rec.use_human_resolution and rec.human_resolution:
|
||||
# AI准确率低于阈值,使用人工描述入库
|
||||
answer_content = rec.human_resolution
|
||||
confidence_score = config.get_confidence_score(rec.ai_similarity or 0, use_human=True)
|
||||
verified_by = 'human_resolution'
|
||||
logger.info(f"工单 {workorder_id} 使用人工描述入库,AI相似度: {rec.ai_similarity:.4f}")
|
||||
elif rec.approved and rec.ai_suggestion:
|
||||
# AI准确率≥阈值,使用AI建议入库
|
||||
answer_content = rec.ai_suggestion
|
||||
confidence_score = config.get_confidence_score(rec.ai_similarity or 0, use_human=False)
|
||||
verified_by = 'auto_approve'
|
||||
logger.info(f"工单 {workorder_id} 使用AI建议入库,相似度: {rec.ai_similarity:.4f}")
|
||||
else:
|
||||
return jsonify({"error": "未找到可入库的内容"}), 400
|
||||
|
||||
# 入库为知识条目
|
||||
entry = KnowledgeEntry(
|
||||
question=w.title or (w.description[:20] if w.description else '工单问题'),
|
||||
answer=rec.ai_suggestion,
|
||||
answer=answer_content,
|
||||
category=w.category or '其他',
|
||||
confidence_score=0.95,
|
||||
confidence_score=confidence_score,
|
||||
is_active=True,
|
||||
is_verified=True,
|
||||
verified_by='auto_approve',
|
||||
verified_by=verified_by,
|
||||
verified_at=datetime.now()
|
||||
)
|
||||
session.add(entry)
|
||||
session.commit()
|
||||
return jsonify({"success": True, "knowledge_id": entry.id})
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"knowledge_id": entry.id,
|
||||
"used_content": "human_resolution" if rec.use_human_resolution else "ai_suggestion",
|
||||
"confidence_score": confidence_score
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"入库知识库失败: {e}")
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
@workorders_bp.route('/import', methods=['POST'])
|
||||
|
||||
@@ -604,64 +604,406 @@ body {
|
||||
color: #6c757d;
|
||||
font-size: 0.8rem;
|
||||
}
|
||||
|
||||
| ||||