feat: 对话历史页面租户分组展示功能

- 新增 ConversationHistoryManager.get_tenant_summary() 按租户聚合会话统计
- get_sessions_paginated() 和 get_conversation_analytics() 增加 tenant_id 过滤
- 新增 GET /api/conversations/tenants 租户汇总端点
- sessions 和 analytics API 端点支持 tenant_id 查询参数
- 前端实现租户卡片列表视图和租户详情会话表格视图
- 实现面包屑导航、搜索范围限定、统计面板上下文切换
- 会话删除后自动检测空租户并返回列表视图
- dashboard.html 添加租户视图 DOM 容器
- 交互模式与知识库租户分组视图保持一致
This commit is contained in:
2026-04-01 16:11:02 +08:00
parent e14e3ee7a5
commit 7013e9db70
27 changed files with 2753 additions and 276 deletions

View File

@@ -1,35 +1,50 @@
# -*- coding: utf-8 -*-
"""
统一 LLM 客户端
兼容所有 OpenAI 格式 API千问、Gemini、DeepSeek、本地 Ollama 等)
通过 .env 中 LLM_PROVIDER / LLM_BASE_URL / LLM_MODEL 切换模型
"""
import requests
import json
import logging
from typing import Dict, List, Optional, Any
from typing import Dict, List, Optional, Any, Generator
from datetime import datetime
from src.config.unified_config import get_config
logger = logging.getLogger(__name__)
class QwenClient:
"""阿里云千问API客户端"""
def __init__(self):
class LLMClient:
"""
统一大模型客户端
所有 OpenAI 兼容 API 都走这一个类,不再区分 provider。
"""
def __init__(self, base_url: str = None, api_key: str = None,
model: str = None, timeout: int = None):
config = get_config()
self.base_url = config.llm.base_url or "https://dashscope.aliyuncs.com/compatible-mode/v1"
self.api_key = config.llm.api_key
self.model_name = config.llm.model
self.timeout = config.llm.timeout
self.base_url = (base_url or config.llm.base_url or
"https://dashscope.aliyuncs.com/compatible-mode/v1")
self.api_key = api_key or config.llm.api_key
self.model_name = model or config.llm.model
self.timeout = timeout or config.llm.timeout
self.headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
"Content-Type": "application/json",
}
# ── 普通请求 ──────────────────────────────────────────
def chat_completion(
self,
messages: List[Dict[str, str]],
temperature: float = 0.7,
max_tokens: int = 1000,
stream: bool = False
**kwargs,
) -> Dict[str, Any]:
"""发送聊天请求"""
"""标准聊天补全(非流式)"""
try:
url = f"{self.base_url}/chat/completions"
payload = {
@@ -37,114 +52,146 @@ class QwenClient:
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"stream": stream
"stream": False,
}
response = requests.post(
url,
headers=self.headers,
json=payload,
timeout=self.timeout
url, headers=self.headers, json=payload, timeout=self.timeout
)
if response.status_code == 200:
result = response.json()
logger.info("API请求成功")
return result
return response.json()
else:
logger.error(f"API请求失败: {response.status_code} - {response.text}")
logger.error(f"LLM API 失败: {response.status_code} - {response.text}")
return {"error": f"API请求失败: {response.status_code}"}
except requests.exceptions.Timeout:
logger.error("API请求超时")
logger.error("LLM API 超时")
return {"error": "请求超时"}
except requests.exceptions.RequestException as e:
logger.error(f"API请求异常: {e}")
logger.error(f"LLM API 异常: {e}")
return {"error": f"请求异常: {str(e)}"}
except Exception as e:
logger.error(f"未知错误: {e}")
logger.error(f"LLM 未知错误: {e}")
return {"error": f"未知错误: {str(e)}"}
# ── 流式请求 ──────────────────────────────────────────
def chat_completion_stream(
self,
messages: List[Dict[str, str]],
temperature: float = 0.7,
max_tokens: int = 1000,
) -> Generator[str, None, None]:
"""流式聊天补全,逐 token yield 文本片段"""
try:
url = f"{self.base_url}/chat/completions"
payload = {
"model": self.model_name,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"stream": True,
}
response = requests.post(
url, headers=self.headers, json=payload,
timeout=self.timeout, stream=True,
)
if response.status_code != 200:
logger.error(f"流式 API 失败: {response.status_code}")
return
for line in response.iter_lines(decode_unicode=True):
if not line or not line.startswith("data: "):
continue
data_str = line[6:]
if data_str.strip() == "[DONE]":
break
try:
chunk = json.loads(data_str)
delta = chunk.get("choices", [{}])[0].get("delta", {})
content = delta.get("content", "")
if content:
yield content
except (json.JSONDecodeError, IndexError, KeyError):
continue
except requests.exceptions.Timeout:
logger.error("流式 API 超时")
except Exception as e:
logger.error(f"流式 API 异常: {e}")
# ── 便捷方法 ──────────────────────────────────────────
def generate_response(
self,
user_message: str,
context: Optional[str] = None,
knowledge_base: Optional[List[str]] = None
knowledge_base: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""生成回复"""
messages = []
# 系统提示词
system_prompt = "你是一个专业的客服助手,请根据用户问题提供准确、 helpful的回复。"
"""快捷生成回复"""
system_prompt = "你是一个专业的客服助手,请根据用户问题提供准确、有帮助的回复。"
if context:
system_prompt += f"\n\n上下文信息: {context}"
if knowledge_base:
system_prompt += f"\n\n相关知识库: {' '.join(knowledge_base)}"
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": user_message})
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_message},
]
result = self.chat_completion(messages)
if "error" in result:
return result
try:
response_content = result["choices"][0]["message"]["content"]
return {
"response": response_content,
"response": result["choices"][0]["message"]["content"],
"usage": result.get("usage", {}),
"model": result.get("model", ""),
"timestamp": datetime.now().isoformat()
"timestamp": datetime.now().isoformat(),
}
except (KeyError, IndexError) as e:
logger.error(f"解析API响应失败: {e}")
logger.error(f"解析响应失败: {e}")
return {"error": f"解析响应失败: {str(e)}"}
def extract_entities(self, text: str) -> Dict[str, Any]:
"""提取文本中的实体信息"""
prompt = f"""
请从以下文本中提取关键信息,包括:
1. 问题类型/类别
2. 优先级(高/中/低)
3. 关键词
4. 情感倾向(正面/负面/中性)
文本: {text}
请以JSON格式返回结果。
"""
import re
prompt = (
f"请从以下文本中提取关键信息,包括:\n"
f"1. 问题类型/类别\n2. 优先级(高/中/低)\n"
f"3. 关键词\n4. 情感倾向(正面/负面/中性)\n\n"
f"文本: {text}\n\n请以JSON格式返回结果。"
)
messages = [
{"role": "system", "content": "你是一个信息提取专家,请准确提取文本中的关键信息。"},
{"role": "user", "content": prompt}
{"role": "user", "content": prompt},
]
result = self.chat_completion(messages, temperature=0.3)
if "error" in result:
return result
try:
response_content = result["choices"][0]["message"]["content"]
# 尝试解析JSON
import re
json_match = re.search(r'\{.*\}', response_content, re.DOTALL)
if json_match:
return json.loads(json_match.group())
else:
return {"raw_response": response_content}
content = result["choices"][0]["message"]["content"]
json_match = re.search(r'\{.*\}', content, re.DOTALL)
return json.loads(json_match.group()) if json_match else {"raw_response": content}
except Exception as e:
logger.error(f"解析实体提取结果失败: {e}")
return {"error": f"解析失败: {str(e)}"}
def test_connection(self) -> bool:
"""测试API连接"""
"""测试连接"""
try:
result = self.chat_completion([
{"role": "user", "content": "你好"}
], max_tokens=10)
result = self.chat_completion(
[{"role": "user", "content": "你好"}], max_tokens=10
)
return "error" not in result
except Exception as e:
logger.error(f"API连接测试失败: {e}")
except Exception:
return False
# ── 向后兼容别名 ──────────────────────────────────────────
# 旧代码中 `from src.core.llm_client import QwenClient` 仍然能用
QwenClient = LLMClient

View File

@@ -1,4 +1,4 @@
from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey
from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, Index
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from datetime import datetime
@@ -6,11 +6,15 @@ import hashlib
Base = declarative_base()
# 默认租户ID单租户部署时使用
DEFAULT_TENANT = "default"
class WorkOrder(Base):
"""工单模型"""
__tablename__ = "work_orders"
id = Column(Integer, primary_key=True)
tenant_id = Column(String(50), nullable=False, default=DEFAULT_TENANT, index=True)
order_id = Column(String(50), unique=True, nullable=False)
title = Column(String(200), nullable=False)
description = Column(Text, nullable=False)
@@ -63,6 +67,7 @@ class ChatSession(Base):
__tablename__ = "chat_sessions"
id = Column(Integer, primary_key=True)
tenant_id = Column(String(50), nullable=False, default=DEFAULT_TENANT, index=True)
session_id = Column(String(100), unique=True, nullable=False) # 唯一会话标识
user_id = Column(String(100), nullable=True) # 用户标识
work_order_id = Column(Integer, ForeignKey("work_orders.id"), nullable=True)
@@ -100,6 +105,7 @@ class Conversation(Base):
__tablename__ = "conversations"
id = Column(Integer, primary_key=True)
tenant_id = Column(String(50), nullable=False, default=DEFAULT_TENANT, index=True)
session_id = Column(String(100), ForeignKey("chat_sessions.session_id"), nullable=True) # 关联会话
work_order_id = Column(Integer, ForeignKey("work_orders.id"))
user_message = Column(Text, nullable=False)
@@ -124,6 +130,7 @@ class KnowledgeEntry(Base):
__tablename__ = "knowledge_entries"
id = Column(Integer, primary_key=True)
tenant_id = Column(String(50), nullable=False, default=DEFAULT_TENANT, index=True)
question = Column(Text, nullable=False)
answer = Column(Text, nullable=False)
category = Column(String(100), nullable=False)
@@ -164,6 +171,7 @@ class Analytics(Base):
__tablename__ = "analytics"
id = Column(Integer, primary_key=True)
tenant_id = Column(String(50), nullable=False, default=DEFAULT_TENANT, index=True)
date = Column(DateTime, nullable=False)
total_orders = Column(Integer, default=0)
resolved_orders = Column(Integer, default=0)
@@ -184,6 +192,7 @@ class Alert(Base):
__tablename__ = "alerts"
id = Column(Integer, primary_key=True)
tenant_id = Column(String(50), nullable=False, default=DEFAULT_TENANT, index=True)
rule_name = Column(String(100), nullable=False)
alert_type = Column(String(50), nullable=False)
level = Column(String(20), nullable=False) # info, warning, error, critical
@@ -242,6 +251,7 @@ class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
tenant_id = Column(String(50), nullable=False, default=DEFAULT_TENANT, index=True)
username = Column(String(50), unique=True, nullable=False)
password_hash = Column(String(128), nullable=False)
email = Column(String(120), unique=True, nullable=True)