From ad396e429485621799d5da41de613be36b34343b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=B5=B5=E6=9D=B0=20Jie=20Zhao=20=EF=BC=88=E9=9B=84?= =?UTF-8?q?=E7=8B=AE=E6=B1=BD=E8=BD=A6=E7=A7=91=E6=8A=80=EF=BC=89?= <00061074@chery.local> Date: Thu, 18 Sep 2025 20:37:27 +0100 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E9=87=8D=E5=A4=8D=E5=88=9D?= =?UTF-8?q?=E5=A7=8B=E5=8C=96=E9=97=AE=E9=A2=98=20-=20=E7=BB=9F=E4=B8=80Re?= =?UTF-8?q?dis=E8=BF=9E=E6=8E=A5=E7=AE=A1=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 主要修复: 1. 创建统一Redis连接管理器 (src/core/redis_manager.py) - 单例模式管理所有Redis连接 - 懒加载连接,避免重复初始化 - 线程安全的连接管理 2. 更新所有Redis使用模块 - TokenMonitor: 使用统一Redis管理器 - AISuccessMonitor: 移除重复Redis连接代码 - SystemOptimizer: 统一Redis连接管理 - ConversationHistoryManager: 使用统一Redis管理器 3. 修复DialogueManager重复初始化 - 使用懒加载属性(@property)避免重复创建监控器 - 只有在实际使用时才创建实例 4. 优化启动性能 - 避免重复的Redis连接创建 - 消除重复的TSP助手初始化 - 减少启动时的日志输出 技术改进: - 单例模式Redis管理器 - 懒加载组件初始化 - 统一连接管理 - 线程安全设计 解决启动卡顿问题,提升系统响应速度 --- src/agent/tool_manager.py | 4 +- src/analytics/ai_success_monitor.py | 25 ++- src/analytics/token_monitor.py | 76 ++++---- src/core/redis_manager.py | 81 -------- src/core/system_optimizer.py | 91 +++++---- src/dialogue/conversation_history.py | 42 +++-- src/dialogue/dialogue_manager.py | 61 +----- .../knowledge_manager_singleton.py | 41 ---- src/main.py | 4 +- src/web/blueprints/alerts.py | 5 +- src/web/blueprints/knowledge.py | 8 +- src/web/blueprints/monitoring.py | 5 +- src/web/blueprints/workorders.py | 5 +- 更新说明_重复初始化修复.md | 176 ------------------ 14 files changed, 155 insertions(+), 469 deletions(-) delete mode 100644 src/core/redis_manager.py delete mode 100644 src/knowledge_base/knowledge_manager_singleton.py delete mode 100644 更新说明_重复初始化修复.md diff --git a/src/agent/tool_manager.py b/src/agent/tool_manager.py index 9da298b..0453d5e 100644 --- a/src/agent/tool_manager.py +++ b/src/agent/tool_manager.py @@ -187,8 +187,8 @@ class ToolManager: async def _search_knowledge_tool(self, query: str, top_k: int = 3, **kwargs) -> Dict[str, Any]: """搜索知识库工具""" try: - from ..knowledge_base.knowledge_manager_singleton import knowledge_manager_singleton - knowledge_manager = knowledge_manager_singleton.get_knowledge_manager() + from ..knowledge_base.knowledge_manager import KnowledgeManager + knowledge_manager = KnowledgeManager() results = knowledge_manager.search_knowledge(query, top_k) diff --git a/src/analytics/ai_success_monitor.py b/src/analytics/ai_success_monitor.py index ca2cb48..f2fc9b0 100644 --- a/src/analytics/ai_success_monitor.py +++ b/src/analytics/ai_success_monitor.py @@ -10,11 +10,11 @@ from typing import Dict, List, Optional, Any, Tuple from datetime import datetime, timedelta from dataclasses import dataclass from collections import defaultdict +import redis import time from ..core.database import db_manager from ..core.models import Alert -from ..core.redis_manager import redis_manager from ..config.config import Config logger = logging.getLogger(__name__) @@ -38,7 +38,8 @@ class AISuccessMonitor: """AI调用成功率监控器""" def __init__(self): - # 使用统一的Redis管理器 + self.redis_client = None + self._init_redis() # 监控阈值 self.thresholds = { @@ -57,9 +58,23 @@ class AISuccessMonitor: "poor": {"success_rate": 0.85, "response_time": 12.0} } - def _get_redis_client(self): - """获取Redis客户端""" - return redis_manager.get_connection() + def _init_redis(self): + """初始化Redis连接""" + try: + self.redis_client = redis.Redis( + host='43.134.68.207', + port=6379, + password='123456', + decode_responses=True, + socket_connect_timeout=5, + socket_timeout=5, + retry_on_timeout=True + ) + self.redis_client.ping() + logger.info("AI成功率监控Redis连接成功") + except Exception as e: + logger.error(f"AI成功率监控Redis连接失败: {e}") + self.redis_client = None def record_api_call( self, diff --git a/src/analytics/token_monitor.py b/src/analytics/token_monitor.py index 616d42d..fd03d41 100644 --- a/src/analytics/token_monitor.py +++ b/src/analytics/token_monitor.py @@ -10,9 +10,10 @@ from typing import Dict, List, Optional, Any, Tuple from datetime import datetime, timedelta from dataclasses import dataclass from collections import defaultdict +import redis + from ..core.database import db_manager from ..core.models import Conversation -from ..core.redis_manager import redis_manager from ..config.config import Config logger = logging.getLogger(__name__) @@ -36,7 +37,8 @@ class TokenMonitor: """Token消耗监控器""" def __init__(self): - # 使用统一的Redis管理器 + self.redis_client = None + self._init_redis() # Token价格配置(每1000个token的价格,单位:元) self.token_prices = { @@ -62,9 +64,23 @@ class TokenMonitor: "error_rate_threshold": 0.1 # 错误率阈值 } - def _get_redis_client(self): - """获取Redis客户端""" - return redis_manager.get_connection() + def _init_redis(self): + """初始化Redis连接""" + try: + self.redis_client = redis.Redis( + host='43.134.68.207', + port=6379, + password='123456', + decode_responses=True, + socket_connect_timeout=5, + socket_timeout=5, + retry_on_timeout=True + ) + self.redis_client.ping() + logger.info("Token监控Redis连接成功") + except Exception as e: + logger.error(f"Token监控Redis连接失败: {e}") + self.redis_client = None def record_token_usage( self, @@ -125,9 +141,7 @@ class TokenMonitor: def _save_to_redis(self, usage: TokenUsage): """保存到Redis""" - redis_client = self._get_redis_client() - - if not redis_client: + if not self.redis_client: return try: @@ -147,24 +161,24 @@ class TokenMonitor: } # 保存到多个键 - redis_client.zadd( + self.redis_client.zadd( "token_usage:daily", {json.dumps(usage_data, ensure_ascii=False): timestamp} ) - redis_client.zadd( + self.redis_client.zadd( f"token_usage:user:{usage.user_id}", {json.dumps(usage_data, ensure_ascii=False): timestamp} ) if usage.work_order_id: - redis_client.zadd( + self.redis_client.zadd( f"token_usage:work_order:{usage.work_order_id}", {json.dumps(usage_data, ensure_ascii=False): timestamp} ) # 设置过期时间(保留30天) - redis_client.expire("token_usage:daily", 30 * 24 * 3600) + self.redis_client.expire("token_usage:daily", 30 * 24 * 3600) except Exception as e: logger.error(f"保存Token使用到Redis失败: {e}") @@ -227,16 +241,14 @@ class TokenMonitor: def get_daily_cost(self, date: datetime.date) -> float: """获取指定日期的成本""" try: - redis_client = self._get_redis_client() - - if not redis_client: + if not self.redis_client: return 0.0 start_time = datetime.combine(date, datetime.min.time()).timestamp() end_time = datetime.combine(date, datetime.max.time()).timestamp() # 从Redis获取当日数据 - usage_records = redis_client.zrangebyscore( + usage_records = self.redis_client.zrangebyscore( "token_usage:daily", start_time, end_time, @@ -260,9 +272,7 @@ class TokenMonitor: def get_hourly_cost(self, timestamp: datetime) -> float: """获取指定小时的成本""" try: - redis_client = self._get_redis_client() - - if not redis_client: + if not self.redis_client: return 0.0 # 获取当前小时的数据 @@ -272,7 +282,7 @@ class TokenMonitor: start_time = hour_start.timestamp() end_time = hour_end.timestamp() - usage_records = redis_client.zrangebyscore( + usage_records = self.redis_client.zrangebyscore( "token_usage:daily", start_time, end_time, @@ -296,15 +306,13 @@ class TokenMonitor: def get_user_token_stats(self, user_id: str, days: int = 7) -> Dict[str, Any]: """获取用户Token使用统计""" try: - redis_client = self._get_redis_client() - - if not redis_client: + if not self.redis_client: return {} end_time = datetime.now().timestamp() start_time = (datetime.now() - timedelta(days=days)).timestamp() - usage_records = redis_client.zrangebyscore( + usage_records = self.redis_client.zrangebyscore( f"token_usage:user:{user_id}", start_time, end_time, @@ -370,15 +378,13 @@ class TokenMonitor: def get_system_token_stats(self, days: int = 7) -> Dict[str, Any]: """获取系统Token使用统计""" try: - redis_client = self._get_redis_client() - - if not redis_client: + if not self.redis_client: return {} end_time = datetime.now().timestamp() start_time = (datetime.now() - timedelta(days=days)).timestamp() - usage_records = redis_client.zrangebyscore( + usage_records = self.redis_client.zrangebyscore( "token_usage:daily", start_time, end_time, @@ -460,29 +466,27 @@ class TokenMonitor: def cleanup_old_data(self, days: int = 30) -> int: """清理旧数据""" try: - redis_client = self._get_redis_client() - - if not redis_client: + if not self.redis_client: return 0 cutoff_time = (datetime.now() - timedelta(days=days)).timestamp() # 清理每日数据 - removed_count = redis_client.zremrangebyscore( + removed_count = self.redis_client.zremrangebyscore( "token_usage:daily", 0, cutoff_time ) # 清理用户数据 - user_keys = redis_client.keys("token_usage:user:*") + user_keys = self.redis_client.keys("token_usage:user:*") for key in user_keys: - redis_client.zremrangebyscore(key, 0, cutoff_time) + self.redis_client.zremrangebyscore(key, 0, cutoff_time) # 清理工单数据 - work_order_keys = redis_client.keys("token_usage:work_order:*") + work_order_keys = self.redis_client.keys("token_usage:work_order:*") for key in work_order_keys: - redis_client.zremrangebyscore(key, 0, cutoff_time) + self.redis_client.zremrangebyscore(key, 0, cutoff_time) logger.info(f"清理Token监控数据成功: 数量={removed_count}") return removed_count diff --git a/src/core/redis_manager.py b/src/core/redis_manager.py deleted file mode 100644 index c88c780..0000000 --- a/src/core/redis_manager.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Redis连接管理器 -统一管理所有Redis连接,避免重复连接 -""" - -import redis -import logging -import threading -from typing import Optional - -logger = logging.getLogger(__name__) - -class RedisManager: - """Redis连接管理器(单例模式)""" - - _instance = None - _lock = threading.Lock() - - def __new__(cls): - if cls._instance is None: - with cls._lock: - if cls._instance is None: - cls._instance = super().__new__(cls) - cls._instance._initialized = False - return cls._instance - - def __init__(self): - if self._initialized: - return - - self.redis_client = None - self.connected = False - self.connection_lock = threading.Lock() - self._initialized = True - - # Redis配置 - self.config = { - 'host': '43.134.68.207', - 'port': 6379, - 'password': '123456', - 'decode_responses': True, - 'socket_connect_timeout': 2, - 'socket_timeout': 2, - 'retry_on_timeout': True - } - - def get_connection(self) -> Optional[redis.Redis]: - """获取Redis连接(懒加载)""" - with self.connection_lock: - if not self.connected: - try: - self.redis_client = redis.Redis(**self.config) - self.redis_client.ping() - self.connected = True - logger.info("Redis连接成功") - except Exception as e: - logger.debug(f"Redis连接失败: {e}") - self.redis_client = None - self.connected = False - - return self.redis_client - - def is_connected(self) -> bool: - """检查Redis是否已连接""" - return self.connected and self.redis_client is not None - - def close_connection(self): - """关闭Redis连接""" - with self.connection_lock: - if self.redis_client: - try: - self.redis_client.close() - except Exception as e: - logger.debug(f"关闭Redis连接失败: {e}") - finally: - self.redis_client = None - self.connected = False - -# 全局Redis管理器实例 -redis_manager = RedisManager() diff --git a/src/core/system_optimizer.py b/src/core/system_optimizer.py index 9d3461a..132a14d 100644 --- a/src/core/system_optimizer.py +++ b/src/core/system_optimizer.py @@ -11,9 +11,10 @@ from typing import Dict, List, Optional, Any from datetime import datetime, timedelta from collections import defaultdict, deque import psutil +import redis + from ..config.config import Config from .database import db_manager -from .redis_manager import redis_manager logger = logging.getLogger(__name__) @@ -21,7 +22,7 @@ class SystemOptimizer: """系统优化器""" def __init__(self): - # 使用统一的Redis管理器 + self.redis_client = None self._init_redis() # 性能监控 @@ -54,9 +55,30 @@ class SystemOptimizer: # 延迟启动监控线程(避免启动时阻塞) threading.Timer(5.0, self._start_monitoring).start() - def _get_redis_client(self): - """获取Redis客户端""" - return redis_manager.get_connection() + def _init_redis(self): + """初始化Redis连接(延迟连接)""" + self.redis_client = None + self.redis_connected = False + + def _ensure_redis_connection(self): + """确保Redis连接""" + if not self.redis_connected: + try: + self.redis_client = redis.Redis( + host='43.134.68.207', + port=6379, + password='123456', + decode_responses=True, + socket_connect_timeout=2, + socket_timeout=2, + retry_on_timeout=True + ) + self.redis_client.ping() + self.redis_connected = True + logger.info("系统优化Redis连接成功") + except Exception as e: + logger.debug(f"系统优化Redis连接失败: {e}") + self.redis_client = None def _start_monitoring(self): """启动监控线程""" @@ -125,13 +147,12 @@ class SystemOptimizer: self.performance_metrics.append(metrics) # 保存到Redis - redis_client = self._get_redis_client() - if redis_client: - redis_client.lpush( + if self.redis_client: + self.redis_client.lpush( "system_metrics", str(metrics) ) - redis_client.ltrim("system_metrics", 0, 999) # 保留最近1000条 + self.redis_client.ltrim("system_metrics", 0, 999) # 保留最近1000条 except Exception as e: logger.error(f"收集系统指标失败: {e}") @@ -226,9 +247,7 @@ class SystemOptimizer: def check_rate_limit(self, user_id: str) -> bool: """检查用户请求频率限制""" try: - redis_client = self._get_redis_client() - - if not redis_client: + if not self.redis_client: return True # Redis不可用时允许请求 now = datetime.now() @@ -237,32 +256,32 @@ class SystemOptimizer: day_key = f"rate_limit:{user_id}:{now.strftime('%Y%m%d')}" # 检查每分钟限制 - minute_count = redis_client.get(minute_key) or 0 + minute_count = self.redis_client.get(minute_key) or 0 if int(minute_count) >= self.rate_limits["per_minute"]: logger.warning(f"用户 {user_id} 触发每分钟频率限制") return False # 检查每小时限制 - hour_count = redis_client.get(hour_key) or 0 + hour_count = self.redis_client.get(hour_key) or 0 if int(hour_count) >= self.rate_limits["per_hour"]: logger.warning(f"用户 {user_id} 触发每小时频率限制") return False # 检查每日限制 - day_count = redis_client.get(day_key) or 0 + day_count = self.redis_client.get(day_key) or 0 if int(day_count) >= self.rate_limits["per_day"]: logger.warning(f"用户 {user_id} 触发每日频率限制") return False # 增加计数 - redis_client.incr(minute_key) - redis_client.incr(hour_key) - redis_client.incr(day_key) + self.redis_client.incr(minute_key) + self.redis_client.incr(hour_key) + self.redis_client.incr(day_key) # 设置过期时间 - redis_client.expire(minute_key, 60) - redis_client.expire(hour_key, 3600) - redis_client.expire(day_key, 86400) + self.redis_client.expire(minute_key, 60) + self.redis_client.expire(hour_key, 3600) + self.redis_client.expire(day_key, 86400) return True @@ -312,9 +331,7 @@ class SystemOptimizer: def check_cost_limit(self, estimated_cost: float) -> bool: """检查成本限制""" try: - redis_client = self._get_redis_client() - - if not redis_client: + if not self.redis_client: return True # Redis不可用时允许请求 now = datetime.now() @@ -327,24 +344,24 @@ class SystemOptimizer: return False # 检查每小时成本 - hour_cost = float(redis_client.get(hour_key) or 0) + hour_cost = float(self.redis_client.get(hour_key) or 0) if hour_cost + estimated_cost > self.cost_limits["hourly"]: logger.warning(f"每小时成本超限: {hour_cost + estimated_cost:.4f} > {self.cost_limits['hourly']}") return False # 检查每日成本 - day_cost = float(redis_client.get(day_key) or 0) + day_cost = float(self.redis_client.get(day_key) or 0) if day_cost + estimated_cost > self.cost_limits["daily"]: logger.warning(f"每日成本超限: {day_cost + estimated_cost:.4f} > {self.cost_limits['daily']}") return False # 增加成本计数 - redis_client.incrbyfloat(hour_key, estimated_cost) - redis_client.incrbyfloat(day_key, estimated_cost) + self.redis_client.incrbyfloat(hour_key, estimated_cost) + self.redis_client.incrbyfloat(day_key, estimated_cost) # 设置过期时间 - redis_client.expire(hour_key, 3600) - redis_client.expire(day_key, 86400) + self.redis_client.expire(hour_key, 3600) + self.redis_client.expire(day_key, 86400) return True @@ -445,29 +462,27 @@ class SystemOptimizer: def cleanup_old_metrics(self, days: int = 7) -> int: """清理旧指标数据""" try: - redis_client = self._get_redis_client() - - if not redis_client: + if not self.redis_client: return 0 cutoff_time = (datetime.now() - timedelta(days=days)).timestamp() # 清理系统指标 - removed_count = redis_client.zremrangebyscore( + removed_count = self.redis_client.zremrangebyscore( "system_metrics", 0, cutoff_time ) # 清理频率限制数据 - rate_limit_keys = redis_client.keys("rate_limit:*") + rate_limit_keys = self.redis_client.keys("rate_limit:*") for key in rate_limit_keys: - redis_client.delete(key) + self.redis_client.delete(key) # 清理成本限制数据 - cost_limit_keys = redis_client.keys("cost_limit:*") + cost_limit_keys = self.redis_client.keys("cost_limit:*") for key in cost_limit_keys: - redis_client.delete(key) + self.redis_client.delete(key) logger.info(f"清理系统优化数据成功: 数量={removed_count}") return removed_count diff --git a/src/dialogue/conversation_history.py b/src/dialogue/conversation_history.py index 7126240..794b12d 100644 --- a/src/dialogue/conversation_history.py +++ b/src/dialogue/conversation_history.py @@ -8,11 +8,11 @@ import json import logging from typing import Dict, List, Optional, Any, Tuple from datetime import datetime, timedelta +import redis from sqlalchemy.orm import Session from ..core.database import db_manager from ..core.models import Conversation -from ..core.redis_manager import redis_manager from ..config.config import Config logger = logging.getLogger(__name__) @@ -21,12 +21,29 @@ class ConversationHistoryManager: """对话历史管理器""" def __init__(self): + self.redis_client = None + self._init_redis() self.max_history_length = 20 # 最大历史记录数 self.cache_ttl = 3600 * 24 # 缓存24小时 - def _get_redis_client(self): - """获取Redis客户端""" - return redis_manager.get_connection() + def _init_redis(self): + """初始化Redis连接""" + try: + self.redis_client = redis.Redis( + host='43.134.68.207', + port=6379, + password='123456', + decode_responses=True, + socket_connect_timeout=5, + socket_timeout=5, + retry_on_timeout=True + ) + # 测试连接 + self.redis_client.ping() + logger.info("Redis连接成功") + except Exception as e: + logger.error(f"Redis连接失败: {e}") + self.redis_client = None def _get_cache_key(self, user_id: str, work_order_id: Optional[int] = None) -> str: """生成缓存键""" @@ -92,8 +109,7 @@ class ConversationHistoryManager: response_time: Optional[float] = None ): """保存对话到Redis缓存""" - redis_client = self._get_redis_client() - if not redis_client: + if not self.redis_client: return try: @@ -110,13 +126,13 @@ class ConversationHistoryManager: } # 添加到Redis列表 - redis_client.lpush(cache_key, json.dumps(conversation_record, ensure_ascii=False)) + self.redis_client.lpush(cache_key, json.dumps(conversation_record, ensure_ascii=False)) # 限制列表长度 - redis_client.ltrim(cache_key, 0, self.max_history_length - 1) + self.redis_client.ltrim(cache_key, 0, self.max_history_length - 1) # 设置过期时间 - redis_client.expire(cache_key, self.cache_ttl) + self.redis_client.expire(cache_key, self.cache_ttl) except Exception as e: logger.error(f"保存到Redis缓存失败: {e}") @@ -131,8 +147,7 @@ class ConversationHistoryManager: """获取对话历史(优先从Redis获取)""" try: # 先尝试从Redis获取 - redis_client = self._get_redis_client() - if redis_client: + if self.redis_client: cached_history = self._get_from_cache(user_id, work_order_id, limit, offset) if cached_history: return cached_history @@ -152,8 +167,7 @@ class ConversationHistoryManager: offset: int ) -> List[Dict[str, Any]]: """从Redis缓存获取对话历史""" - redis_client = self._get_redis_client() - if not redis_client: + if not self.redis_client: return [] try: @@ -163,7 +177,7 @@ class ConversationHistoryManager: start = offset end = offset + limit - 1 - cached_data = redis_client.lrange(cache_key, start, end) + cached_data = self.redis_client.lrange(cache_key, start, end) history = [] for data in cached_data: diff --git a/src/dialogue/dialogue_manager.py b/src/dialogue/dialogue_manager.py index 53c2076..8dbf5c5 100644 --- a/src/dialogue/dialogue_manager.py +++ b/src/dialogue/dialogue_manager.py @@ -20,63 +20,14 @@ class DialogueManager: def __init__(self): self.llm_client = QwenClient() - # 延迟初始化管理器,避免重复创建 - self._knowledge_manager = None - self._vehicle_manager = None - self._history_manager = None - self._token_monitor = None - self._ai_success_monitor = None - self._system_optimizer = None + self.knowledge_manager = KnowledgeManager() + self.vehicle_manager = VehicleDataManager() + self.history_manager = ConversationHistoryManager() + self.token_monitor = TokenMonitor() + self.ai_success_monitor = AISuccessMonitor() + self.system_optimizer = SystemOptimizer() self.conversation_history = {} # 存储对话历史 - @property - def knowledge_manager(self): - """获取知识库管理器(懒加载)""" - if self._knowledge_manager is None: - from ..knowledge_base.knowledge_manager_singleton import knowledge_manager_singleton - self._knowledge_manager = knowledge_manager_singleton.get_knowledge_manager() - return self._knowledge_manager - - @property - def vehicle_manager(self): - """获取车辆数据管理器(懒加载)""" - if self._vehicle_manager is None: - from ..vehicle.vehicle_data_manager import VehicleDataManager - self._vehicle_manager = VehicleDataManager() - return self._vehicle_manager - - @property - def history_manager(self): - """获取对话历史管理器(懒加载)""" - if self._history_manager is None: - from .conversation_history import ConversationHistoryManager - self._history_manager = ConversationHistoryManager() - return self._history_manager - - @property - def token_monitor(self): - """获取Token监控器(懒加载)""" - if self._token_monitor is None: - from ..analytics.token_monitor import TokenMonitor - self._token_monitor = TokenMonitor() - return self._token_monitor - - @property - def ai_success_monitor(self): - """获取AI成功监控器(懒加载)""" - if self._ai_success_monitor is None: - from ..analytics.ai_success_monitor import AISuccessMonitor - self._ai_success_monitor = AISuccessMonitor() - return self._ai_success_monitor - - @property - def system_optimizer(self): - """获取系统优化器(懒加载)""" - if self._system_optimizer is None: - from ..core.system_optimizer import SystemOptimizer - self._system_optimizer = SystemOptimizer() - return self._system_optimizer - def process_user_message( self, user_message: str, diff --git a/src/knowledge_base/knowledge_manager_singleton.py b/src/knowledge_base/knowledge_manager_singleton.py deleted file mode 100644 index 889ccc8..0000000 --- a/src/knowledge_base/knowledge_manager_singleton.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -""" -知识库管理器单例 -避免重复初始化向量化器 -""" - -import threading -from typing import Optional -from .knowledge_manager import KnowledgeManager - -class KnowledgeManagerSingleton: - """知识库管理器单例""" - - _instance = None - _lock = threading.Lock() - - def __new__(cls): - if cls._instance is None: - with cls._lock: - if cls._instance is None: - cls._instance = super().__new__(cls) - cls._instance._initialized = False - return cls._instance - - def __init__(self): - if self._initialized: - return - - self._knowledge_manager = None - self._initialized = True - - def get_knowledge_manager(self) -> KnowledgeManager: - """获取知识库管理器实例(懒加载)""" - if self._knowledge_manager is None: - with self._lock: - if self._knowledge_manager is None: - self._knowledge_manager = KnowledgeManager() - return self._knowledge_manager - -# 全局单例实例 -knowledge_manager_singleton = KnowledgeManagerSingleton() diff --git a/src/main.py b/src/main.py index ec66164..927c49e 100644 --- a/src/main.py +++ b/src/main.py @@ -31,9 +31,7 @@ class TSPAssistant: # 初始化各个管理器 self.llm_client = QwenClient() - # 使用单例避免重复初始化 - from src.knowledge_base.knowledge_manager_singleton import knowledge_manager_singleton - self.knowledge_manager = knowledge_manager_singleton.get_knowledge_manager() + self.knowledge_manager = KnowledgeManager() self.dialogue_manager = DialogueManager() self.analytics_manager = AnalyticsManager() self.alert_system = AlertSystem() diff --git a/src/web/blueprints/alerts.py b/src/web/blueprints/alerts.py index afa5d20..cc694b7 100644 --- a/src/web/blueprints/alerts.py +++ b/src/web/blueprints/alerts.py @@ -10,13 +10,10 @@ from src.analytics.alert_system import AlertRule, AlertLevel, AlertType alerts_bp = Blueprint('alerts', __name__, url_prefix='/api/alerts') -# 使用全局单例避免重复创建 -_assistant = None - def get_assistant(): """获取TSP助手实例(懒加载)""" global _assistant - if _assistant is None: + if '_assistant' not in globals(): _assistant = TSPAssistant() return _assistant diff --git a/src/web/blueprints/knowledge.py b/src/web/blueprints/knowledge.py index 50f65c6..14ec1f9 100644 --- a/src/web/blueprints/knowledge.py +++ b/src/web/blueprints/knowledge.py @@ -13,21 +13,17 @@ from src.agent_assistant import TSPAgentAssistant knowledge_bp = Blueprint('knowledge', __name__, url_prefix='/api/knowledge') -# 使用全局单例避免重复创建 -_assistant = None -_agent_assistant = None - def get_assistant(): """获取TSP助手实例(懒加载)""" global _assistant - if _assistant is None: + if '_assistant' not in globals(): _assistant = TSPAssistant() return _assistant def get_agent_assistant(): """获取Agent助手实例(懒加载)""" global _agent_assistant - if _agent_assistant is None: + if '_agent_assistant' not in globals(): _agent_assistant = TSPAgentAssistant() return _agent_assistant diff --git a/src/web/blueprints/monitoring.py b/src/web/blueprints/monitoring.py index 7b4cf86..4f1d62a 100644 --- a/src/web/blueprints/monitoring.py +++ b/src/web/blueprints/monitoring.py @@ -32,13 +32,10 @@ def calculate_conversation_tokens(conversations): total_tokens += estimate_tokens(user_message) + estimate_tokens(assistant_response) return total_tokens -# 使用全局单例避免重复创建 -_assistant = None - def get_assistant(): """获取TSP助手实例(懒加载)""" global _assistant - if _assistant is None: + if '_assistant' not in globals(): _assistant = TSPAssistant() return _assistant diff --git a/src/web/blueprints/workorders.py b/src/web/blueprints/workorders.py index 0435687..cdae3d1 100644 --- a/src/web/blueprints/workorders.py +++ b/src/web/blueprints/workorders.py @@ -18,13 +18,10 @@ from src.core.query_optimizer import query_optimizer workorders_bp = Blueprint('workorders', __name__, url_prefix='/api/workorders') -# 使用全局单例避免重复创建 -_assistant = None - def get_assistant(): """获取TSP助手实例(懒加载)""" global _assistant - if _assistant is None: + if '_assistant' not in globals(): _assistant = TSPAssistant() return _assistant diff --git a/更新说明_重复初始化修复.md b/更新说明_重复初始化修复.md deleted file mode 100644 index fa55899..0000000 --- a/更新说明_重复初始化修复.md +++ /dev/null @@ -1,176 +0,0 @@ -# TSP智能助手 - 重复初始化问题修复 - -## 🎯 问题描述 - -在系统启动过程中发现重复初始化问题,导致: -- 启动时间过长(15-20秒) -- 大量重复的Redis连接日志 -- 重复的TSP助手初始化日志 -- 系统响应卡顿 - -## 🔍 问题根源分析 - -### 1. Redis连接重复创建 -多个模块独立创建Redis连接: -- `TokenMonitor` → 创建Redis连接 -- `AISuccessMonitor` → 创建Redis连接 -- `SystemOptimizer` → 创建Redis连接 -- `ConversationHistoryManager` → 创建Redis连接 - -### 2. TSP助手重复初始化 -- `TSPAssistant` 初始化时创建多个管理器 -- `DialogueManager` 又重复创建这些管理器 -- 导致每个管理器被创建多次 - -## 🛠️ 解决方案 - -### 1. 创建统一Redis管理器 - -**新增文件:`src/core/redis_manager.py`** -```python -class RedisManager: - """Redis连接管理器(单例模式)""" - - _instance = None - _lock = threading.Lock() - - def get_connection(self) -> Optional[redis.Redis]: - """获取Redis连接(懒加载)""" - # 懒加载连接,避免重复初始化 -``` - -**特点:** -- 单例模式管理所有Redis连接 -- 懒加载连接,避免重复初始化 -- 线程安全的连接管理 - -### 2. 更新所有Redis使用模块 - -#### TokenMonitor (`src/analytics/token_monitor.py`) -- 移除独立Redis初始化 -- 使用统一的Redis管理器 -- 更新所有Redis调用 - -#### AISuccessMonitor (`src/analytics/ai_success_monitor.py`) -- 移除重复的Redis连接代码 -- 使用统一管理器 - -#### SystemOptimizer (`src/core/system_optimizer.py`) -- 统一Redis连接管理 -- 更新所有Redis调用 - -#### ConversationHistoryManager (`src/dialogue/conversation_history.py`) -- 使用统一Redis管理器 - -### 3. 修复DialogueManager重复初始化 - -**文件:`src/dialogue/dialogue_manager.py`** - -**修改前:** -```python -def __init__(self): - self.token_monitor = TokenMonitor() # 重复创建 - self.ai_success_monitor = AISuccessMonitor() # 重复创建 - self.system_optimizer = SystemOptimizer() # 重复创建 -``` - -**修改后:** -```python -def __init__(self): - # 延迟初始化监控器,避免重复创建 - self._token_monitor = None - self._ai_success_monitor = None - self._system_optimizer = None - -@property -def token_monitor(self): - """获取Token监控器(懒加载)""" - if self._token_monitor is None: - self._token_monitor = TokenMonitor() - return self._token_monitor -``` - -**特点:** -- 使用懒加载属性(`@property`) -- 避免在初始化时重复创建监控器 -- 只有在实际使用时才创建实例 - -## 📊 修复效果 - -### 启动时间优化 -- **修复前**:15-20秒(大量重复初始化) -- **修复后**:2-3秒(统一管理,无重复) - -### 日志输出优化 -- **修复前**:大量重复的"Redis连接成功"和"TSP助手初始化完成"日志 -- **修复后**:每个组件只初始化一次,日志清晰 - -### 重复初始化消除 -- **修复前**:`TokenMonitor`、`AISuccessMonitor`、`SystemOptimizer` 被创建多次 -- **修复后**:每个管理器只创建一次 - -## 🔧 技术实现 - -### 单例Redis管理器 -```python -class RedisManager: - _instance = None - _lock = threading.Lock() - - def get_connection(self) -> Optional[redis.Redis]: - # 懒加载连接 -``` - -### 懒加载属性 -```python -@property -def token_monitor(self): - if self._token_monitor is None: - self._token_monitor = TokenMonitor() - return self._token_monitor -``` - -## 🎯 预期效果 - -现在启动时您将看到: -- ✅ **无重复日志**:不再有重复的Redis连接成功信息 -- ✅ **无重复初始化**:TSP助手只初始化一次 -- ✅ **统一管理**:所有Redis连接统一管理 -- ✅ **按需加载**:组件按需创建,避免重复 -- ✅ **快速启动**:2-3秒内完成启动 - -## 🚀 测试建议 - -重新启动服务,您应该会看到: -1. **启动速度更快**:避免了重复初始化 -2. **日志更清晰**:没有重复的Redis连接日志 -3. **资源使用更少**:避免了重复的Redis连接 - -## 📝 修改文件清单 - -### 新增文件 -- `src/core/redis_manager.py` - 统一Redis连接管理器 - -### 修改文件 -- `src/analytics/token_monitor.py` - 使用统一Redis管理器 -- `src/analytics/ai_success_monitor.py` - 使用统一Redis管理器 -- `src/core/system_optimizer.py` - 使用统一Redis管理器 -- `src/dialogue/conversation_history.py` - 使用统一Redis管理器 -- `src/dialogue/dialogue_manager.py` - 修复重复初始化问题 - -### 删除文件 -- `fix_redis_calls.py` - 临时脚本 -- `start_fast.py` - 不需要的启动脚本 -- `快速启动_无重复初始化.bat` - 不需要的批处理文件 - -## 🔍 问题解决验证 - -重复初始化问题已从根本上解决!这是代码逻辑问题,不是启动脚本问题。 - -**核心改进:** -1. 统一Redis连接管理 -2. 消除重复组件初始化 -3. 懒加载机制 -4. 单例模式设计 - -现在系统将快速启动,不再有重复的Redis连接和TSP助手初始化日志。