feat: 配置默认使用千问模型
- 修改默认LLM配置为千问模型(qwen-turbo) - 创建LLM配置文件,支持千问、OpenAI、Anthropic等多种模型 - 添加千问模型的特殊支持和模拟响应 - 创建配置说明文档,指导用户如何配置千问API密钥 - 优化智能Agent的模拟响应,体现千问模型的特色 - 支持通过配置文件灵活切换不同的LLM提供商
This commit is contained in:
53
config/README.md
Normal file
53
config/README.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# LLM配置说明
|
||||
|
||||
## 千问模型配置
|
||||
|
||||
本项目默认使用阿里云千问模型。要使用千问模型,请按以下步骤配置:
|
||||
|
||||
### 1. 获取API密钥
|
||||
|
||||
1. 访问 [阿里云百炼平台](https://bailian.console.aliyun.com/)
|
||||
2. 注册并登录账号
|
||||
3. 创建应用并获取API密钥
|
||||
|
||||
### 2. 配置API密钥
|
||||
|
||||
编辑 `config/llm_config.py` 文件,将 `api_key` 替换为您的实际API密钥:
|
||||
|
||||
```python
|
||||
QWEN_CONFIG = LLMConfig(
|
||||
provider="openai",
|
||||
api_key="sk-your-actual-qwen-api-key", # 替换为您的实际密钥
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
model="qwen-turbo",
|
||||
temperature=0.7,
|
||||
max_tokens=2000
|
||||
)
|
||||
```
|
||||
|
||||
### 3. 可用的千问模型
|
||||
|
||||
- `qwen-turbo`: 快速响应,适合一般对话
|
||||
- `qwen-plus`: 平衡性能和成本
|
||||
- `qwen-max`: 最强性能,适合复杂任务
|
||||
|
||||
### 4. 环境变量配置(可选)
|
||||
|
||||
您也可以使用环境变量来配置:
|
||||
|
||||
```bash
|
||||
export QWEN_API_KEY="sk-your-actual-qwen-api-key"
|
||||
export QWEN_MODEL="qwen-turbo"
|
||||
```
|
||||
|
||||
### 5. 其他模型支持
|
||||
|
||||
项目也支持其他LLM提供商:
|
||||
|
||||
- **OpenAI**: GPT-3.5/GPT-4
|
||||
- **Anthropic**: Claude系列
|
||||
- **本地模型**: Ollama等
|
||||
|
||||
### 6. 配置验证
|
||||
|
||||
启动系统后,可以在Agent管理页面查看LLM使用统计,确认配置是否正确。
|
||||
37
config/llm_config.py
Normal file
37
config/llm_config.py
Normal file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
LLM配置文件 - 千问模型配置
|
||||
"""
|
||||
|
||||
from src.agent.llm_client import LLMConfig
|
||||
|
||||
# 千问模型配置
|
||||
QWEN_CONFIG = LLMConfig(
|
||||
provider="openai",
|
||||
api_key="sk-your-qwen-api-key-here", # 请替换为您的千问API密钥
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
model="qwen-turbo", # 可选: qwen-turbo, qwen-plus, qwen-max
|
||||
temperature=0.7,
|
||||
max_tokens=2000
|
||||
)
|
||||
|
||||
# 其他模型配置示例
|
||||
OPENAI_CONFIG = LLMConfig(
|
||||
provider="openai",
|
||||
api_key="sk-your-openai-api-key-here",
|
||||
model="gpt-3.5-turbo",
|
||||
temperature=0.7,
|
||||
max_tokens=2000
|
||||
)
|
||||
|
||||
ANTHROPIC_CONFIG = LLMConfig(
|
||||
provider="anthropic",
|
||||
api_key="sk-ant-your-anthropic-api-key-here",
|
||||
model="claude-3-sonnet-20240229",
|
||||
temperature=0.7,
|
||||
max_tokens=2000
|
||||
)
|
||||
|
||||
# 默认使用千问模型
|
||||
DEFAULT_CONFIG = QWEN_CONFIG
|
||||
@@ -222,16 +222,16 @@ class IntelligentAgent:
|
||||
return self._simulate_llm_response(prompt)
|
||||
|
||||
def _simulate_llm_response(self, prompt: str) -> Dict[str, Any]:
|
||||
"""模拟大模型响应"""
|
||||
"""模拟大模型响应 - 千问模型风格"""
|
||||
if "预警信息" in prompt:
|
||||
return {
|
||||
"analysis": "系统性能下降,需要立即处理",
|
||||
"analysis": "【千问分析】系统性能下降,需要立即处理。根据历史数据分析,这可能是由于资源不足或配置问题导致的。",
|
||||
"immediate_actions": [
|
||||
{
|
||||
"action": "重启相关服务",
|
||||
"priority": 5,
|
||||
"confidence": 0.9,
|
||||
"parameters": {"service": "main_service"}
|
||||
"parameters": {"service": "main_service", "reason": "服务响应超时"}
|
||||
}
|
||||
],
|
||||
"follow_up_actions": [
|
||||
@@ -239,32 +239,35 @@ class IntelligentAgent:
|
||||
"action": "检查系统日志",
|
||||
"priority": 3,
|
||||
"confidence": 0.7,
|
||||
"parameters": {"log_level": "error"}
|
||||
"parameters": {"log_level": "error", "time_range": "last_hour"}
|
||||
}
|
||||
],
|
||||
"prevention_measures": [
|
||||
"增加监控频率",
|
||||
"优化系统配置"
|
||||
"增加监控频率,提前发现问题",
|
||||
"优化系统配置,提升性能",
|
||||
"建立预警机制,减少故障影响"
|
||||
]
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"confidence_analysis": "当前答案置信度较低,需要更多上下文信息",
|
||||
"confidence_analysis": "【千问分析】当前答案置信度较低,需要更多上下文信息。建议结合用户反馈和历史工单数据来提升答案质量。",
|
||||
"enhancement_suggestions": [
|
||||
"添加更多示例",
|
||||
"提供详细步骤"
|
||||
"添加更多实际案例和操作步骤",
|
||||
"提供详细的故障排除指南",
|
||||
"结合系统架构图进行说明"
|
||||
],
|
||||
"actions": [
|
||||
{
|
||||
"action": "更新知识库条目",
|
||||
"priority": 4,
|
||||
"confidence": 0.8,
|
||||
"parameters": {"enhanced_answer": "增强后的答案"}
|
||||
"parameters": {"enhanced_answer": "基于千问模型分析的增强答案"}
|
||||
}
|
||||
],
|
||||
"learning_opportunities": [
|
||||
"收集用户反馈",
|
||||
"分析相似问题"
|
||||
"收集用户反馈,持续优化答案",
|
||||
"分析相似问题,建立知识关联",
|
||||
"利用千问模型的学习能力,提升知识质量"
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ class BaseLLMClient(ABC):
|
||||
pass
|
||||
|
||||
class OpenAIClient(BaseLLMClient):
|
||||
"""OpenAI客户端"""
|
||||
"""OpenAI客户端 - 支持OpenAI和兼容OpenAI API的模型(如千问)"""
|
||||
|
||||
def __init__(self, config: LLMConfig):
|
||||
self.config = config
|
||||
@@ -93,11 +93,15 @@ class OpenAIClient(BaseLLMClient):
|
||||
|
||||
def _simulate_response(self, prompt: str) -> str:
|
||||
"""模拟响应"""
|
||||
if "千问" in self.config.model or "qwen" in self.config.model.lower():
|
||||
return f"【千问模型模拟响应】根据您的问题,我建议采取以下措施:{prompt[:50]}... 这是一个智能化的解决方案。"
|
||||
return f"模拟LLM响应: {prompt[:100]}..."
|
||||
|
||||
def _simulate_chat(self, messages: List[Dict[str, str]]) -> str:
|
||||
"""模拟对话响应"""
|
||||
last_message = messages[-1]["content"] if messages else ""
|
||||
if "千问" in self.config.model or "qwen" in self.config.model.lower():
|
||||
return f"【千问模型模拟对话】我理解您的问题:{last_message[:50]}... 让我为您提供专业的建议。"
|
||||
return f"模拟对话响应: {last_message[:100]}..."
|
||||
|
||||
class AnthropicClient(BaseLLMClient):
|
||||
|
||||
@@ -37,11 +37,19 @@ class TSPAgentAssistant(TSPAssistant):
|
||||
if llm_config:
|
||||
self.llm_manager = LLMManager(llm_config)
|
||||
else:
|
||||
# 使用默认配置
|
||||
# 使用默认配置 - 千问模型
|
||||
try:
|
||||
from config.llm_config import DEFAULT_CONFIG
|
||||
self.llm_manager = LLMManager(DEFAULT_CONFIG)
|
||||
except ImportError:
|
||||
# 如果配置文件不存在,使用内置配置
|
||||
default_config = LLMConfig(
|
||||
provider="openai",
|
||||
api_key="your-api-key-here",
|
||||
model="gpt-3.5-turbo"
|
||||
api_key="sk-your-qwen-api-key-here",
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
model="qwen-turbo",
|
||||
temperature=0.7,
|
||||
max_tokens=2000
|
||||
)
|
||||
self.llm_manager = LLMManager(default_config)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user