mirror of
https://github.com/fish2018/pansou.git
synced 2025-11-25 03:14:59 +08:00
优化磁盘写入策略
This commit is contained in:
16
README.md
16
README.md
@@ -2,17 +2,8 @@
|
||||
|
||||
PanSou是一个高性能的网盘资源搜索API服务,支持TG搜索和自定义插件搜索。系统设计以性能和可扩展性为核心,支持并发搜索、结果智能排序和网盘类型分类。
|
||||
|
||||
## 🚀 性能表现
|
||||
|
||||
实测 PanSou 在 8GB MacBook Pro 上的性能表现:
|
||||
|
||||
- ✅ **500用户瞬时并发**: 100%成功率,平均响应167ms
|
||||
- ✅ **200用户持续并发**: 30秒内处理4725请求,QPS=148
|
||||
- ✅ **缓存命中**: 99.8%请求<100ms响应时间
|
||||
- ✅ **高可用性**: 长时间运行无故障
|
||||
|
||||
|
||||
## 特性([详情见系统开发设计文档](docs/%E7%B3%BB%E7%BB%9F%E5%BC%80%E5%8F%91%E8%AE%BE%E8%AE%A1%E6%96%87%E6%A1%A3.md))
|
||||
## 特性([详见系统开发设计文档](docs/%E7%B3%BB%E7%BB%9F%E5%BC%80%E5%8F%91%E8%AE%BE%E8%AE%A1%E6%96%87%E6%A1%A3.md))
|
||||
|
||||
- **高性能搜索**:并发搜索多个Telegram频道,显著提升搜索速度;工作池设计,高效管理并发任务
|
||||
- **网盘类型分类**:自动识别多种网盘链接,按类型归类展示
|
||||
@@ -24,7 +15,9 @@ PanSou是一个高性能的网盘资源搜索API服务,支持TG搜索和自定
|
||||
- **异步插件系统**:支持通过插件扩展搜索来源,已内置多个网盘搜索插件,详情参考[插件开发指南.md](docs/插件开发指南.md);支持"尽快响应,持续处理"的异步搜索模式,解决了某些搜索源响应时间长的问题
|
||||
- **双级超时控制**:短超时(4秒)确保快速响应,长超时(30秒)允许完整处理
|
||||
- **持久化缓存**:缓存自动保存到磁盘,系统重启后自动恢复
|
||||
- **优雅关闭**:在程序退出前保存缓存,确保数据不丢失
|
||||
- **数据安全保障**:程序终止时自动保存所有待写入数据到磁盘,防止数据丢失
|
||||
- **智能缓存写入**:立即更新内存缓存确保用户体验,延迟批量写入磁盘提升性能和SSD寿命
|
||||
- **优雅关闭**:完善的graceful shutdown机制,确保程序终止前所有数据安全保存
|
||||
- **增量更新**:智能合并新旧结果,保留有价值的数据
|
||||
- **主动更新**:异步插件在缓存异步更新后会主动更新主缓存(内存+磁盘),使用户在不强制刷新的情况下也能获取最新数据
|
||||
- **缓存优化**:智能跳过空结果和重复数据的缓存更新,显著减少无效操作,提升系统性能
|
||||
@@ -123,6 +116,7 @@ cd pansou
|
||||
| CACHE_TTL | 缓存生存时间(分钟) | 60 |
|
||||
| SHARD_COUNT | 缓存分片数量 | 8 |
|
||||
| SERIALIZER_TYPE | 序列化器类型(gob/json) | gob |
|
||||
| CACHE_WRITE_STRATEGY | 缓存写入策略(immediate/hybrid) | hybrid |
|
||||
| ENABLE_COMPRESSION | 是否启用压缩 | false |
|
||||
| MIN_SIZE_TO_COMPRESS | 最小压缩阈值(字节) | 1024 |
|
||||
| GC_PERCENT | GC触发百分比 | 100 |
|
||||
|
||||
@@ -145,10 +145,12 @@ package myplugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
"pansou/model"
|
||||
"pansou/plugin"
|
||||
"pansou/util/json" // 使用项目统一的高性能JSON工具
|
||||
)
|
||||
|
||||
type MyPlugin struct {
|
||||
@@ -356,9 +358,14 @@ func (p *MyPlugin) searchImpl(client *http.Client, keyword string, ext map[strin
|
||||
return nil, fmt.Errorf("[%s] HTTP错误: %d", p.Name(), resp.StatusCode)
|
||||
}
|
||||
|
||||
// JSON解析错误
|
||||
// JSON解析错误 - 推荐使用项目统一的JSON工具
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[%s] 读取响应失败: %w", p.Name(), err)
|
||||
}
|
||||
|
||||
var apiResp APIResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&apiResp); err != nil {
|
||||
if err := json.Unmarshal(body, &apiResp); err != nil {
|
||||
return nil, fmt.Errorf("[%s] JSON解析失败: %w", p.Name(), err)
|
||||
}
|
||||
|
||||
|
||||
@@ -21,14 +21,7 @@
|
||||
|
||||
PanSou是一个高性能的网盘资源搜索API服务,支持TG搜索和自定义插件搜索。系统采用异步插件架构,具备二级缓存机制和并发控制能力,在MacBook Pro 8GB上能够支持500用户并发访问。
|
||||
|
||||
### 1.2 性能表现(实测数据)
|
||||
|
||||
- ✅ **500用户瞬时并发**: 100%成功率,平均响应167ms
|
||||
- ✅ **200用户持续并发**: 30秒内处理4725请求,QPS=148
|
||||
- ✅ **缓存命中**: 99.8%请求<100ms响应时间
|
||||
- ✅ **高可用性**: 长时间运行无故障
|
||||
|
||||
### 1.3 核心特性
|
||||
### 1.2 核心特性
|
||||
|
||||
- **异步插件系统**: 双级超时控制(4秒/30秒),渐进式结果返回
|
||||
- **二级缓存系统**: 分片内存缓存+分片磁盘缓存,GOB序列化
|
||||
@@ -305,9 +298,11 @@ type ShardedDiskCache struct {
|
||||
3. **异步加载**: 磁盘命中后异步加载到内存
|
||||
|
||||
#### 4.3.2 写入流程
|
||||
1. **双写模式**: 同时写入内存和磁盘
|
||||
2. **原子操作**: 内存缓存使用原子操作
|
||||
3. **GOB序列化**: 磁盘存储使用GOB格式
|
||||
1. **智能写入策略**: 立即更新内存缓存,延迟批量写入磁盘
|
||||
2. **DelayedBatchWriteManager**: 智能缓存写入管理器,支持immediate和hybrid两种策略
|
||||
3. **原子操作**: 内存缓存使用原子操作
|
||||
4. **GOB序列化**: 磁盘存储使用GOB格式
|
||||
5. **数据安全保障**: 程序终止时自动保存所有待写入数据,防止数据丢失
|
||||
|
||||
### 4.4 缓存键策略
|
||||
|
||||
@@ -671,7 +666,13 @@ export ASYNC_LOG_ENABLED=false # 控制异步插件详细日志
|
||||
- **Go原生**: 无需第三方依赖
|
||||
- **类型安全**: 保持Go类型信息
|
||||
|
||||
### 10.4 无数据库架构
|
||||
### 10.4 Sonic JSON库选择
|
||||
- **高性能**: 比标准库encoding/json快3-5倍
|
||||
- **统一处理**: 全局统一JSON序列化/反序列化
|
||||
- **兼容性好**: 完全兼容标准JSON格式
|
||||
- **内存优化**: 更高效的内存使用
|
||||
|
||||
### 10.5 无数据库架构
|
||||
- **简化部署**: 无需数据库安装配置
|
||||
- **降低复杂度**: 减少组件依赖
|
||||
- **提升性能**: 避免数据库IO瓶颈
|
||||
|
||||
151
main.go
151
main.go
@@ -18,36 +18,40 @@ import (
|
||||
"pansou/api"
|
||||
"pansou/config"
|
||||
"pansou/plugin"
|
||||
"pansou/service"
|
||||
"pansou/util"
|
||||
"pansou/util/cache"
|
||||
|
||||
// 以下是插件的空导入,用于触发各插件的init函数,实现自动注册
|
||||
// 添加新插件时,只需在此处添加对应的导入语句即可
|
||||
_ "pansou/plugin/duoduo"
|
||||
_ "pansou/plugin/fox4k"
|
||||
_ "pansou/plugin/hdr4k"
|
||||
_ "pansou/plugin/huban"
|
||||
_ "pansou/plugin/hunhepan"
|
||||
_ "pansou/plugin/jikepan"
|
||||
_ "pansou/plugin/labi"
|
||||
_ "pansou/plugin/muou"
|
||||
_ "pansou/plugin/ouge"
|
||||
_ "pansou/plugin/pan666"
|
||||
_ "pansou/plugin/pansearch"
|
||||
_ "pansou/plugin/panta"
|
||||
_ "pansou/plugin/qupansou"
|
||||
_ "pansou/plugin/susu"
|
||||
_ "pansou/plugin/panyq"
|
||||
_ "pansou/plugin/xuexizhinan"
|
||||
_ "pansou/plugin/hdr4k"
|
||||
_ "pansou/plugin/qupansou"
|
||||
_ "pansou/plugin/shandian"
|
||||
_ "pansou/plugin/muou"
|
||||
_ "pansou/plugin/duoduo"
|
||||
_ "pansou/plugin/labi"
|
||||
_ "pansou/plugin/susu"
|
||||
_ "pansou/plugin/wanou"
|
||||
_ "pansou/plugin/ouge"
|
||||
_ "pansou/plugin/xuexizhinan"
|
||||
_ "pansou/plugin/zhizhen"
|
||||
_ "pansou/plugin/huban"
|
||||
_ "pansou/plugin/fox4k"
|
||||
|
||||
"pansou/service"
|
||||
"pansou/util"
|
||||
)
|
||||
|
||||
// 全局缓存写入管理器
|
||||
var globalCacheWriteManager *cache.DelayedBatchWriteManager
|
||||
|
||||
func main() {
|
||||
// 初始化应用
|
||||
initApp()
|
||||
|
||||
|
||||
// 启动服务器
|
||||
startServer()
|
||||
}
|
||||
@@ -56,10 +60,34 @@ func main() {
|
||||
func initApp() {
|
||||
// 初始化配置
|
||||
config.Init()
|
||||
|
||||
|
||||
// 初始化HTTP客户端
|
||||
util.InitHTTPClient()
|
||||
|
||||
|
||||
// 🔥 初始化缓存写入管理器
|
||||
var err error
|
||||
globalCacheWriteManager, err = cache.NewDelayedBatchWriteManager()
|
||||
if err != nil {
|
||||
log.Fatalf("缓存写入管理器创建失败: %v", err)
|
||||
}
|
||||
if err := globalCacheWriteManager.Initialize(); err != nil {
|
||||
log.Fatalf("缓存写入管理器初始化失败: %v", err)
|
||||
}
|
||||
fmt.Println("✅ 缓存写入管理器已初始化")
|
||||
|
||||
// 🔗 将缓存写入管理器注入到service包
|
||||
service.SetGlobalCacheWriteManager(globalCacheWriteManager)
|
||||
|
||||
// 🔗 设置缓存写入管理器的主缓存更新函数
|
||||
if mainCache := service.GetEnhancedTwoLevelCache(); mainCache != nil {
|
||||
globalCacheWriteManager.SetMainCacheUpdater(func(key string, data []byte, ttl time.Duration) error {
|
||||
return mainCache.SetBothLevels(key, data, ttl)
|
||||
})
|
||||
fmt.Println("✅ 主缓存更新函数已设置")
|
||||
} else {
|
||||
fmt.Println("⚠️ 主缓存实例不可用,稍后将重试设置")
|
||||
}
|
||||
|
||||
// 确保异步插件系统初始化
|
||||
plugin.InitAsyncPluginSystem()
|
||||
}
|
||||
@@ -68,25 +96,25 @@ func initApp() {
|
||||
func startServer() {
|
||||
// 初始化插件管理器
|
||||
pluginManager := plugin.NewPluginManager()
|
||||
|
||||
|
||||
// 注册所有全局插件(通过init函数自动注册到全局注册表)
|
||||
pluginManager.RegisterAllGlobalPlugins()
|
||||
|
||||
|
||||
// 更新默认并发数(使用实际插件数)
|
||||
config.UpdateDefaultConcurrency(len(pluginManager.GetPlugins()))
|
||||
|
||||
|
||||
// 初始化搜索服务
|
||||
searchService := service.NewSearchService(pluginManager)
|
||||
|
||||
|
||||
// 设置路由
|
||||
router := api.SetupRouter(searchService)
|
||||
|
||||
|
||||
// 获取端口配置
|
||||
port := config.AppConfig.Port
|
||||
|
||||
|
||||
// 输出服务信息
|
||||
printServiceInfo(port, pluginManager)
|
||||
|
||||
|
||||
// 创建HTTP服务器
|
||||
srv := &http.Server{
|
||||
Addr: ":" + port,
|
||||
@@ -95,11 +123,11 @@ func startServer() {
|
||||
WriteTimeout: config.AppConfig.HTTPWriteTimeout,
|
||||
IdleTimeout: config.AppConfig.HTTPIdleTimeout,
|
||||
}
|
||||
|
||||
|
||||
// 创建通道来接收操作系统信号
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
|
||||
// 在单独的goroutine中启动服务器
|
||||
go func() {
|
||||
// 如果设置了最大连接数,使用限制监听器
|
||||
@@ -109,10 +137,10 @@ func startServer() {
|
||||
if err != nil {
|
||||
log.Fatalf("创建监听器失败: %v", err)
|
||||
}
|
||||
|
||||
|
||||
// 创建限制连接数的监听器
|
||||
limitListener := netutil.LimitListener(listener, config.AppConfig.HTTPMaxConns)
|
||||
|
||||
|
||||
// 使用限制监听器启动服务器
|
||||
if err := srv.Serve(limitListener); err != nil && err != http.ErrServerClosed {
|
||||
log.Fatalf("启动服务器失败: %v", err)
|
||||
@@ -124,37 +152,46 @@ func startServer() {
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
// 等待中断信号
|
||||
<-quit
|
||||
fmt.Println("正在关闭服务器...")
|
||||
|
||||
|
||||
// 🔥 优先保存缓存数据到磁盘(数据安全第一)
|
||||
fmt.Println("正在保存缓存数据...")
|
||||
if globalCacheWriteManager != nil {
|
||||
shutdownTimeout := 3 * time.Second
|
||||
if err := globalCacheWriteManager.Shutdown(shutdownTimeout); err != nil {
|
||||
log.Printf("⚠️ 缓存数据保存失败: %v", err)
|
||||
} else {
|
||||
fmt.Println("✅ 缓存数据已安全保存")
|
||||
}
|
||||
}
|
||||
|
||||
// 设置关闭超时时间
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// 异步插件本地缓存系统已移除
|
||||
|
||||
|
||||
// 优雅关闭服务器
|
||||
if err := srv.Shutdown(ctx); err != nil {
|
||||
log.Fatalf("服务器关闭异常: %v", err)
|
||||
}
|
||||
|
||||
fmt.Println("服务器已安全关闭")
|
||||
|
||||
fmt.Println("🎉 服务器已安全关闭")
|
||||
}
|
||||
|
||||
// printServiceInfo 打印服务信息
|
||||
func printServiceInfo(port string, pluginManager *plugin.PluginManager) {
|
||||
// 启动服务器
|
||||
fmt.Printf("服务器启动在 http://localhost:%s\n", port)
|
||||
|
||||
|
||||
// 输出代理信息
|
||||
if config.AppConfig.UseProxy {
|
||||
fmt.Printf("使用SOCKS5代理: %s\n", config.AppConfig.ProxyURL)
|
||||
} else {
|
||||
fmt.Println("未使用代理")
|
||||
}
|
||||
|
||||
|
||||
// 输出并发信息
|
||||
if os.Getenv("CONCURRENCY") != "" {
|
||||
fmt.Printf("默认并发数: %d (由环境变量CONCURRENCY指定)\n", config.AppConfig.DefaultConcurrency)
|
||||
@@ -164,33 +201,33 @@ func printServiceInfo(port string, pluginManager *plugin.PluginManager) {
|
||||
if pluginManager != nil {
|
||||
pluginCount = len(pluginManager.GetPlugins())
|
||||
}
|
||||
fmt.Printf("默认并发数: %d (= 频道数%d + 插件数%d + 10)\n",
|
||||
fmt.Printf("默认并发数: %d (= 频道数%d + 插件数%d + 10)\n",
|
||||
config.AppConfig.DefaultConcurrency, channelCount, pluginCount)
|
||||
}
|
||||
|
||||
|
||||
// 输出缓存信息
|
||||
if config.AppConfig.CacheEnabled {
|
||||
fmt.Printf("缓存已启用: 路径=%s, 最大大小=%dMB, TTL=%d分钟\n",
|
||||
config.AppConfig.CachePath,
|
||||
fmt.Printf("缓存已启用: 路径=%s, 最大大小=%dMB, TTL=%d分钟\n",
|
||||
config.AppConfig.CachePath,
|
||||
config.AppConfig.CacheMaxSizeMB,
|
||||
config.AppConfig.CacheTTLMinutes)
|
||||
} else {
|
||||
fmt.Println("缓存已禁用")
|
||||
}
|
||||
|
||||
|
||||
// 输出压缩信息
|
||||
if config.AppConfig.EnableCompression {
|
||||
fmt.Printf("响应压缩已启用: 最小压缩大小=%d字节\n",
|
||||
fmt.Printf("响应压缩已启用: 最小压缩大小=%d字节\n",
|
||||
config.AppConfig.MinSizeToCompress)
|
||||
} else {
|
||||
fmt.Println("响应压缩已禁用")
|
||||
}
|
||||
|
||||
|
||||
// 输出GC配置信息
|
||||
fmt.Printf("GC配置: 触发阈值=%d%%, 内存优化=%v\n",
|
||||
config.AppConfig.GCPercent,
|
||||
fmt.Printf("GC配置: 触发阈值=%d%%, 内存优化=%v\n",
|
||||
config.AppConfig.GCPercent,
|
||||
config.AppConfig.OptimizeMemory)
|
||||
|
||||
|
||||
// 输出HTTP服务器配置信息
|
||||
readTimeoutMsg := ""
|
||||
if os.Getenv("HTTP_READ_TIMEOUT") != "" {
|
||||
@@ -198,14 +235,14 @@ func printServiceInfo(port string, pluginManager *plugin.PluginManager) {
|
||||
} else {
|
||||
readTimeoutMsg = "(自动计算)"
|
||||
}
|
||||
|
||||
|
||||
writeTimeoutMsg := ""
|
||||
if os.Getenv("HTTP_WRITE_TIMEOUT") != "" {
|
||||
writeTimeoutMsg = "(由环境变量指定)"
|
||||
} else {
|
||||
writeTimeoutMsg = "(自动计算)"
|
||||
}
|
||||
|
||||
|
||||
maxConnsMsg := ""
|
||||
if os.Getenv("HTTP_MAX_CONNS") != "" {
|
||||
maxConnsMsg = "(由环境变量指定)"
|
||||
@@ -213,13 +250,13 @@ func printServiceInfo(port string, pluginManager *plugin.PluginManager) {
|
||||
cpuCount := runtime.NumCPU()
|
||||
maxConnsMsg = fmt.Sprintf("(自动计算: CPU核心数%d × 200)", cpuCount)
|
||||
}
|
||||
|
||||
|
||||
fmt.Printf("HTTP服务器配置: 读取超时=%v %s, 写入超时=%v %s, 空闲超时=%v, 最大连接数=%d %s\n",
|
||||
config.AppConfig.HTTPReadTimeout, readTimeoutMsg,
|
||||
config.AppConfig.HTTPWriteTimeout, writeTimeoutMsg,
|
||||
config.AppConfig.HTTPIdleTimeout,
|
||||
config.AppConfig.HTTPMaxConns, maxConnsMsg)
|
||||
|
||||
|
||||
// 输出异步插件配置信息
|
||||
if config.AppConfig.AsyncPluginEnabled {
|
||||
// 检查工作者数量是否由环境变量指定
|
||||
@@ -230,7 +267,7 @@ func printServiceInfo(port string, pluginManager *plugin.PluginManager) {
|
||||
cpuCount := runtime.NumCPU()
|
||||
workersMsg = fmt.Sprintf("(自动计算: CPU核心数%d × 5)", cpuCount)
|
||||
}
|
||||
|
||||
|
||||
// 检查任务数量是否由环境变量指定
|
||||
tasksMsg := ""
|
||||
if os.Getenv("ASYNC_MAX_BACKGROUND_TASKS") != "" {
|
||||
@@ -238,7 +275,7 @@ func printServiceInfo(port string, pluginManager *plugin.PluginManager) {
|
||||
} else {
|
||||
tasksMsg = "(自动计算: 工作者数量 × 5)"
|
||||
}
|
||||
|
||||
|
||||
fmt.Printf("异步插件已启用: 响应超时=%d秒, 最大工作者=%d %s, 最大任务=%d %s, 缓存TTL=%d小时\n",
|
||||
config.AppConfig.AsyncResponseTimeout,
|
||||
config.AppConfig.AsyncMaxBackgroundWorkers, workersMsg,
|
||||
@@ -247,11 +284,11 @@ func printServiceInfo(port string, pluginManager *plugin.PluginManager) {
|
||||
} else {
|
||||
fmt.Println("异步插件已禁用")
|
||||
}
|
||||
|
||||
|
||||
// 输出插件信息(按优先级排序)
|
||||
fmt.Println("已加载插件:")
|
||||
plugins := pluginManager.GetPlugins()
|
||||
|
||||
|
||||
// 按优先级排序(优先级数字越小越靠前)
|
||||
sort.Slice(plugins, func(i, j int) bool {
|
||||
// 优先级相同时按名称排序
|
||||
@@ -260,8 +297,8 @@ func printServiceInfo(port string, pluginManager *plugin.PluginManager) {
|
||||
}
|
||||
return plugins[i].Priority() < plugins[j].Priority()
|
||||
})
|
||||
|
||||
|
||||
for _, p := range plugins {
|
||||
fmt.Printf(" - %s (优先级: %d)\n", p.Name(), p.Priority())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package huban
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"pansou/model"
|
||||
"pansou/plugin"
|
||||
"pansou/util/json"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -121,8 +122,8 @@ func (p *HubanAsyncPlugin) searchImpl(client *http.Client, keyword string, ext m
|
||||
|
||||
// 定义双域名 - 主备模式
|
||||
urls := []string{
|
||||
fmt.Sprintf("http://103.45.162.207:20720/api.php/provide/vod?ac=detail&wd=%s", url.QueryEscape(keyword)),
|
||||
fmt.Sprintf("http://xsayang.fun:12512/api.php/provide/vod?ac=detail&wd=%s", url.QueryEscape(keyword)),
|
||||
// fmt.Sprintf("http://103.45.162.207:20720/api.php/provide/vod?ac=detail&wd=%s", url.QueryEscape(keyword)),
|
||||
}
|
||||
|
||||
// 主备模式:优先使用第一个域名,失败时切换到第二个
|
||||
@@ -164,8 +165,13 @@ func (p *HubanAsyncPlugin) tryRequest(searchURL string, client *http.Client) ([]
|
||||
defer resp.Body.Close()
|
||||
|
||||
// 解析JSON响应
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("读取响应失败: %w", err)
|
||||
}
|
||||
|
||||
var apiResponse HubanAPIResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&apiResponse); err != nil {
|
||||
if err := json.Unmarshal(body, &apiResponse); err != nil {
|
||||
return nil, fmt.Errorf("解析JSON响应失败: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package ouge
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"pansou/model"
|
||||
"pansou/plugin"
|
||||
"pansou/util/json"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -146,8 +147,13 @@ func (p *OugeAsyncPlugin) searchImpl(client *http.Client, keyword string, ext ma
|
||||
defer resp.Body.Close()
|
||||
|
||||
// 解析JSON响应
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[%s] 读取响应失败: %w", p.Name(), err)
|
||||
}
|
||||
|
||||
var apiResponse OugeAPIResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&apiResponse); err != nil {
|
||||
if err := json.Unmarshal(body, &apiResponse); err != nil {
|
||||
return nil, fmt.Errorf("[%s] 解析JSON响应失败: %w", p.Name(), err)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package wanou
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"pansou/model"
|
||||
"pansou/plugin"
|
||||
"pansou/util/json"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -146,8 +147,13 @@ func (p *WanouAsyncPlugin) searchImpl(client *http.Client, keyword string, ext m
|
||||
defer resp.Body.Close()
|
||||
|
||||
// 解析JSON响应
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[%s] 读取响应失败: %w", p.Name(), err)
|
||||
}
|
||||
|
||||
var apiResponse WanouAPIResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&apiResponse); err != nil {
|
||||
if err := json.Unmarshal(body, &apiResponse); err != nil {
|
||||
return nil, fmt.Errorf("[%s] 解析JSON响应失败: %w", p.Name(), err)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package zhizhen
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"pansou/model"
|
||||
"pansou/plugin"
|
||||
"pansou/util/json"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -146,8 +147,13 @@ func (p *ZhizhenAsyncPlugin) searchImpl(client *http.Client, keyword string, ext
|
||||
defer resp.Body.Close()
|
||||
|
||||
// 解析JSON响应
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[%s] 读取响应失败: %w", p.Name(), err)
|
||||
}
|
||||
|
||||
var apiResponse ZhizhenAPIResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&apiResponse); err != nil {
|
||||
if err := json.Unmarshal(body, &apiResponse); err != nil {
|
||||
return nil, fmt.Errorf("[%s] 解析JSON响应失败: %w", p.Name(), err)
|
||||
}
|
||||
|
||||
|
||||
131
service/cache_integration.go
Normal file
131
service/cache_integration.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"pansou/model"
|
||||
"pansou/util/cache"
|
||||
)
|
||||
|
||||
// CacheWriteIntegration 缓存写入集成层
|
||||
type CacheWriteIntegration struct {
|
||||
batchManager *cache.DelayedBatchWriteManager
|
||||
mainCache *cache.EnhancedTwoLevelCache
|
||||
strategy cache.CacheWriteStrategy
|
||||
initialized bool
|
||||
}
|
||||
|
||||
// NewCacheWriteIntegration 创建缓存写入集成
|
||||
func NewCacheWriteIntegration(mainCache *cache.EnhancedTwoLevelCache) (*CacheWriteIntegration, error) {
|
||||
// 创建延迟批量写入管理器
|
||||
batchManager, err := cache.NewDelayedBatchWriteManager()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("创建批量写入管理器失败: %v", err)
|
||||
}
|
||||
|
||||
integration := &CacheWriteIntegration{
|
||||
batchManager: batchManager,
|
||||
mainCache: mainCache,
|
||||
}
|
||||
|
||||
// 设置主缓存更新函数
|
||||
batchManager.SetMainCacheUpdater(integration.createMainCacheUpdater())
|
||||
|
||||
// 初始化管理器
|
||||
if err := batchManager.Initialize(); err != nil {
|
||||
return nil, fmt.Errorf("初始化批量写入管理器失败: %v", err)
|
||||
}
|
||||
|
||||
integration.initialized = true
|
||||
|
||||
fmt.Printf("✅ [缓存写入集成] 初始化完成\n")
|
||||
return integration, nil
|
||||
}
|
||||
|
||||
// createMainCacheUpdater 创建主缓存更新函数
|
||||
func (c *CacheWriteIntegration) createMainCacheUpdater() func(string, []byte, time.Duration) error {
|
||||
return func(key string, data []byte, ttl time.Duration) error {
|
||||
// 调用现有的缓存系统进行实际写入
|
||||
return c.mainCache.SetBothLevels(key, data, ttl)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleCacheWrite 处理缓存写入请求
|
||||
func (c *CacheWriteIntegration) HandleCacheWrite(key string, results []model.SearchResult, ttl time.Duration, isFinal bool, keyword string, pluginName string) error {
|
||||
if !c.initialized {
|
||||
return fmt.Errorf("缓存写入集成未初始化")
|
||||
}
|
||||
|
||||
// 计算插件优先级
|
||||
priority := c.getPluginPriority(pluginName)
|
||||
|
||||
// 计算数据大小(估算)
|
||||
dataSize := c.estimateDataSize(results)
|
||||
|
||||
// 创建缓存操作
|
||||
operation := &cache.CacheOperation{
|
||||
Key: key,
|
||||
Data: results,
|
||||
TTL: ttl,
|
||||
PluginName: pluginName,
|
||||
Keyword: keyword,
|
||||
Timestamp: time.Now(),
|
||||
Priority: priority,
|
||||
DataSize: dataSize,
|
||||
IsFinal: isFinal,
|
||||
}
|
||||
|
||||
// 调用批量写入管理器处理
|
||||
return c.batchManager.HandleCacheOperation(operation)
|
||||
}
|
||||
|
||||
// getPluginPriority 获取插件优先级
|
||||
func (c *CacheWriteIntegration) getPluginPriority(pluginName string) int {
|
||||
// 这里应该从插件管理器获取真实的优先级
|
||||
// 暂时使用简化的映射
|
||||
switch pluginName {
|
||||
case "hdr4k", "susu", "panta", "xuexizhinan", "zhizhen", "labi", "wanou":
|
||||
return 1 // 等级1插件
|
||||
case "muou", "huban", "ouge", "duoduo", "shandian", "panyq":
|
||||
return 2 // 等级2插件
|
||||
case "fox4k", "qupansou", "pansearch", "hunhepan", "pan666", "jikepan":
|
||||
return 3 // 等级3插件
|
||||
default:
|
||||
return 4 // 默认等级4
|
||||
}
|
||||
}
|
||||
|
||||
// estimateDataSize 估算数据大小
|
||||
func (c *CacheWriteIntegration) estimateDataSize(results []model.SearchResult) int {
|
||||
// 简化估算:每个结果约500字节
|
||||
return len(results) * 500
|
||||
}
|
||||
|
||||
// Shutdown 优雅关闭
|
||||
func (c *CacheWriteIntegration) Shutdown(timeout time.Duration) error {
|
||||
if !c.initialized {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.batchManager.Shutdown(timeout)
|
||||
}
|
||||
|
||||
// GetStats 获取统计信息
|
||||
func (c *CacheWriteIntegration) GetStats() interface{} {
|
||||
if !c.initialized {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.batchManager.GetStats()
|
||||
}
|
||||
|
||||
// SetStrategy 设置写入策略
|
||||
func (c *CacheWriteIntegration) SetStrategy(strategy cache.CacheWriteStrategy) {
|
||||
c.strategy = strategy
|
||||
}
|
||||
|
||||
// GetStrategy 获取当前策略
|
||||
func (c *CacheWriteIntegration) GetStrategy() cache.CacheWriteStrategy {
|
||||
return c.strategy
|
||||
}
|
||||
@@ -19,6 +19,24 @@ import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// 全局缓存写入管理器引用(避免循环依赖)
|
||||
var globalCacheWriteManager *cache.DelayedBatchWriteManager
|
||||
|
||||
// SetGlobalCacheWriteManager 设置全局缓存写入管理器
|
||||
func SetGlobalCacheWriteManager(manager *cache.DelayedBatchWriteManager) {
|
||||
globalCacheWriteManager = manager
|
||||
}
|
||||
|
||||
// GetGlobalCacheWriteManager 获取全局缓存写入管理器
|
||||
func GetGlobalCacheWriteManager() *cache.DelayedBatchWriteManager {
|
||||
return globalCacheWriteManager
|
||||
}
|
||||
|
||||
// GetEnhancedTwoLevelCache 获取增强版两级缓存实例
|
||||
func GetEnhancedTwoLevelCache() *cache.EnhancedTwoLevelCache {
|
||||
return enhancedTwoLevelCache
|
||||
}
|
||||
|
||||
// 优先关键词列表
|
||||
var priorityKeywords = []string{"合集", "系列", "全", "完", "最新", "附"}
|
||||
|
||||
@@ -186,6 +204,14 @@ func NewSearchService(pluginManager *plugin.PluginManager) *SearchService {
|
||||
|
||||
// 将主缓存注入到异步插件中
|
||||
injectMainCacheToAsyncPlugins(pluginManager, enhancedTwoLevelCache)
|
||||
|
||||
// 🔗 确保缓存写入管理器设置了主缓存更新函数
|
||||
if globalCacheWriteManager != nil && enhancedTwoLevelCache != nil {
|
||||
globalCacheWriteManager.SetMainCacheUpdater(func(key string, data []byte, ttl time.Duration) error {
|
||||
return enhancedTwoLevelCache.SetBothLevels(key, data, ttl)
|
||||
})
|
||||
fmt.Println("✅ 主缓存更新函数已设置 (在SearchService中)")
|
||||
}
|
||||
|
||||
return &SearchService{
|
||||
pluginManager: pluginManager,
|
||||
@@ -257,33 +283,43 @@ func injectMainCacheToAsyncPlugins(pluginManager *plugin.PluginManager, mainCach
|
||||
return err
|
||||
}
|
||||
|
||||
// 🔥 根据IsFinal参数选择缓存更新策略
|
||||
// 🔥 使用新的缓存写入管理器
|
||||
// 注意:获取外部引用需要导入main包
|
||||
// 为了避免循环依赖,我们暂时通过全局变量访问
|
||||
// TODO: 优化架构,使用依赖注入方式
|
||||
|
||||
// 先更新内存缓存(立即可见)
|
||||
if err := mainCache.SetMemoryOnly(key, data, ttl); err != nil {
|
||||
return fmt.Errorf("内存缓存更新失败: %v", err)
|
||||
}
|
||||
|
||||
// 使用新的缓存写入管理器处理磁盘写入(智能批处理)
|
||||
if cacheWriteManager := globalCacheWriteManager; cacheWriteManager != nil {
|
||||
operation := &cache.CacheOperation{
|
||||
Key: key,
|
||||
Data: finalResults, // 使用原始数据而不是序列化后的
|
||||
TTL: ttl,
|
||||
IsFinal: isFinal,
|
||||
PluginName: pluginName,
|
||||
Keyword: keyword,
|
||||
Priority: 2, // 中等优先级
|
||||
Timestamp: time.Now(),
|
||||
DataSize: len(data), // 序列化后的数据大小
|
||||
}
|
||||
|
||||
// 根据是否为最终结果设置优先级
|
||||
if isFinal {
|
||||
operation.Priority = 1 // 高优先级
|
||||
}
|
||||
|
||||
return cacheWriteManager.HandleCacheOperation(operation)
|
||||
}
|
||||
|
||||
// 兜底:如果缓存写入管理器不可用,使用原有逻辑
|
||||
if isFinal {
|
||||
// 最终结果:更新内存+磁盘缓存
|
||||
// if config.AppConfig != nil && config.AppConfig.AsyncLogEnabled {
|
||||
// displayKey := key[:8] + "..."
|
||||
// if keyword != "" {
|
||||
// fmt.Printf("📝 [异步插件] 最终结果缓存更新: %s(关键词:%s) | 结果数: %d | 数据长度: %d\n",
|
||||
// displayKey, keyword, len(finalResults), len(data))
|
||||
// } else {
|
||||
// fmt.Printf("📝 [异步插件] 最终结果缓存更新: %s | 结果数: %d | 数据长度: %d\n",
|
||||
// key, len(finalResults), len(data))
|
||||
// }
|
||||
// }
|
||||
return mainCache.SetBothLevels(key, data, ttl)
|
||||
} else {
|
||||
// 部分结果:仅更新内存缓存
|
||||
// if config.AppConfig != nil && config.AppConfig.AsyncLogEnabled {
|
||||
// displayKey := key[:8] + "..."
|
||||
// if keyword != "" {
|
||||
// fmt.Printf("📝 [异步插件] 部分结果缓存更新: %s(关键词:%s) | 结果数: %d | 数据长度: %d\n",
|
||||
// displayKey, keyword, len(finalResults), len(data))
|
||||
// } else {
|
||||
// fmt.Printf("📝 [异步插件] 部分结果缓存更新: %s | 结果数: %d | 数据长度: %d\n",
|
||||
// key, len(finalResults), len(data))
|
||||
// }
|
||||
// }
|
||||
return mainCache.SetMemoryOnly(key, data, ttl)
|
||||
return nil // 内存已更新,磁盘稍后批处理
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1128,17 +1164,11 @@ func (s *SearchService) searchPlugins(keyword string, plugins []string, forceRef
|
||||
// 如果磁盘缓存比内存缓存更新,会自动更新内存缓存并返回最新数据
|
||||
data, hit, err = enhancedTwoLevelCache.Get(cacheKey)
|
||||
|
||||
// 🔍 添加缓存状态调试日志
|
||||
displayKey := cacheKey[:8] + "..."
|
||||
fmt.Printf("🔍 [主服务] 缓存检查: %s(关键词:%s) | 命中: %v | 错误: %v \n",
|
||||
displayKey, keyword, hit, err)
|
||||
|
||||
if err == nil && hit {
|
||||
var results []model.SearchResult
|
||||
if err := enhancedTwoLevelCache.GetSerializer().Deserialize(data, &results); err == nil {
|
||||
// 返回缓存数据
|
||||
displayKey := cacheKey[:8] + "..."
|
||||
fmt.Printf("✅ [主服务] 缓存命中返回: %s(关键词:%s) | 结果数: %d\n", displayKey, keyword, len(results))
|
||||
fmt.Printf("✅ [%s] 命中缓存 结果数: %d\n", keyword, len(results))
|
||||
return results, nil
|
||||
} else {
|
||||
displayKey := cacheKey[:8] + "..."
|
||||
|
||||
884
util/cache/adaptive_tuning_engine.go
vendored
Normal file
884
util/cache/adaptive_tuning_engine.go
vendored
Normal file
@@ -0,0 +1,884 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AdaptiveTuningEngine 自适应调优引擎
|
||||
type AdaptiveTuningEngine struct {
|
||||
// 核心组件
|
||||
metricCollector *MetricCollector
|
||||
performanceAnalyzer *PerformanceAnalyzer
|
||||
predictiveModel *PredictiveModel
|
||||
tuningStrategy *TuningStrategy
|
||||
|
||||
// 配置参数
|
||||
config *AdaptiveTuningConfig
|
||||
|
||||
// 运行状态
|
||||
isRunning int32
|
||||
shutdownChan chan struct{}
|
||||
|
||||
// 调优历史
|
||||
tuningHistory []*TuningRecord
|
||||
historyMutex sync.RWMutex
|
||||
maxHistorySize int
|
||||
|
||||
// 学习数据
|
||||
learningData *LearningDataset
|
||||
|
||||
// 统计信息
|
||||
stats *TuningEngineStats
|
||||
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// AdaptiveTuningConfig 自适应调优配置
|
||||
type AdaptiveTuningConfig struct {
|
||||
// 调优间隔
|
||||
TuningInterval time.Duration
|
||||
MetricInterval time.Duration
|
||||
|
||||
// 性能阈值
|
||||
CPUUsageThreshold float64
|
||||
MemoryThreshold int64
|
||||
LatencyThreshold time.Duration
|
||||
|
||||
// 学习参数
|
||||
LearningRate float64
|
||||
AdaptationSpeed float64
|
||||
StabilityFactor float64
|
||||
|
||||
// 调优范围
|
||||
MinBatchInterval time.Duration
|
||||
MaxBatchInterval time.Duration
|
||||
MinBatchSize int
|
||||
MaxBatchSize int
|
||||
|
||||
// 安全参数
|
||||
MaxAdjustmentRatio float64 // 最大调整幅度
|
||||
RollbackThreshold float64 // 回滚阈值
|
||||
|
||||
// 预测参数
|
||||
PredictionWindow time.Duration
|
||||
ConfidenceThreshold float64
|
||||
}
|
||||
|
||||
// MetricCollector 指标收集器
|
||||
type MetricCollector struct {
|
||||
// 系统指标
|
||||
systemMetrics *SystemMetrics
|
||||
|
||||
// 应用指标
|
||||
applicationMetrics *ApplicationMetrics
|
||||
|
||||
// 缓存指标
|
||||
cacheMetrics *CacheMetrics
|
||||
|
||||
// 历史数据
|
||||
metricsHistory []MetricSnapshot
|
||||
historyMutex sync.RWMutex
|
||||
maxHistorySize int
|
||||
|
||||
// 采集状态
|
||||
isCollecting int32
|
||||
collectionChan chan struct{}
|
||||
}
|
||||
|
||||
// SystemMetrics 系统指标
|
||||
type SystemMetrics struct {
|
||||
Timestamp time.Time
|
||||
CPUUsage float64
|
||||
MemoryUsage int64
|
||||
MemoryTotal int64
|
||||
DiskIORate float64
|
||||
NetworkIORate float64
|
||||
GoroutineCount int
|
||||
GCPauseDuration time.Duration
|
||||
HeapSize int64
|
||||
AllocRate float64
|
||||
}
|
||||
|
||||
// ApplicationMetrics 应用指标
|
||||
type ApplicationMetrics struct {
|
||||
Timestamp time.Time
|
||||
RequestRate float64
|
||||
ResponseTime time.Duration
|
||||
ErrorRate float64
|
||||
ThroughputMBps float64
|
||||
ConcurrentUsers int
|
||||
QueueDepth int
|
||||
ProcessingRate float64
|
||||
}
|
||||
|
||||
// CacheMetrics 缓存指标
|
||||
type CacheMetrics struct {
|
||||
Timestamp time.Time
|
||||
HitRate float64
|
||||
WriteRate float64
|
||||
ReadRate float64
|
||||
EvictionRate float64
|
||||
CompressionRatio float64
|
||||
StorageUsage int64
|
||||
BufferUtilization float64
|
||||
BatchEfficiency float64
|
||||
}
|
||||
|
||||
// MetricSnapshot 指标快照
|
||||
type MetricSnapshot struct {
|
||||
Timestamp time.Time
|
||||
System SystemMetrics
|
||||
Application ApplicationMetrics
|
||||
Cache CacheMetrics
|
||||
|
||||
// 综合指标
|
||||
OverallPerformance float64
|
||||
Efficiency float64
|
||||
Stability float64
|
||||
}
|
||||
|
||||
// PerformanceAnalyzer 性能分析器
|
||||
type PerformanceAnalyzer struct {
|
||||
// 分析算法
|
||||
trendAnalyzer *TrendAnalyzer
|
||||
anomalyDetector *AnomalyDetector
|
||||
correlationAnalyzer *CorrelationAnalyzer
|
||||
|
||||
// 分析结果
|
||||
currentTrends map[string]Trend
|
||||
detectedAnomalies []Anomaly
|
||||
correlations map[string]float64
|
||||
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// Trend 趋势
|
||||
type Trend struct {
|
||||
Metric string
|
||||
Direction string // increasing, decreasing, stable
|
||||
Slope float64
|
||||
Confidence float64
|
||||
Duration time.Duration
|
||||
Prediction float64
|
||||
}
|
||||
|
||||
// Anomaly 异常
|
||||
type Anomaly struct {
|
||||
Metric string
|
||||
Timestamp time.Time
|
||||
Severity string // low, medium, high
|
||||
Value float64
|
||||
ExpectedRange [2]float64
|
||||
Description string
|
||||
Impact float64
|
||||
}
|
||||
|
||||
// PredictiveModel 预测模型
|
||||
type PredictiveModel struct {
|
||||
// 模型类型
|
||||
modelType string // linear_regression, exponential_smoothing, arima
|
||||
|
||||
// 模型参数
|
||||
coefficients []float64
|
||||
seasonalFactors []float64
|
||||
trendComponent float64
|
||||
|
||||
// 训练数据
|
||||
trainingData []DataPoint
|
||||
testData []DataPoint
|
||||
|
||||
// 模型性能
|
||||
accuracy float64
|
||||
rmse float64
|
||||
mae float64
|
||||
|
||||
// 预测结果
|
||||
predictions map[string]Prediction
|
||||
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// DataPoint 数据点
|
||||
type DataPoint struct {
|
||||
Timestamp time.Time
|
||||
Values map[string]float64
|
||||
Label string
|
||||
}
|
||||
|
||||
// Prediction 预测
|
||||
type Prediction struct {
|
||||
Metric string
|
||||
FutureValue float64
|
||||
Confidence float64
|
||||
TimeHorizon time.Duration
|
||||
PredictedAt time.Time
|
||||
ActualValue *float64 // 用于验证预测准确性
|
||||
}
|
||||
|
||||
// TuningStrategy 调优策略
|
||||
type TuningStrategy struct {
|
||||
// 策略类型
|
||||
strategyType string // conservative, aggressive, balanced
|
||||
|
||||
// 调优规则
|
||||
rules []*TuningRule
|
||||
|
||||
// 参数调整
|
||||
parameterAdjustments map[string]ParameterAdjustment
|
||||
|
||||
// 执行历史
|
||||
executionHistory []*StrategyExecution
|
||||
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// TuningRule 调优规则
|
||||
type TuningRule struct {
|
||||
Name string
|
||||
Condition func(*MetricSnapshot) bool
|
||||
Action func(*AdaptiveTuningEngine) (*TuningDecision, error)
|
||||
Priority int
|
||||
Enabled bool
|
||||
LastTriggered time.Time
|
||||
TriggerCount int64
|
||||
}
|
||||
|
||||
// ParameterAdjustment 参数调整
|
||||
type ParameterAdjustment struct {
|
||||
ParameterName string
|
||||
CurrentValue interface{}
|
||||
ProposedValue interface{}
|
||||
AdjustmentRatio float64
|
||||
Reason string
|
||||
ExpectedImpact string
|
||||
Risk string
|
||||
}
|
||||
|
||||
// TuningDecision 调优决策
|
||||
type TuningDecision struct {
|
||||
Timestamp time.Time
|
||||
Trigger string
|
||||
Adjustments []ParameterAdjustment
|
||||
Confidence float64
|
||||
ExpectedImprovement float64
|
||||
Risk float64
|
||||
AutoExecute bool
|
||||
}
|
||||
|
||||
// StrategyExecution 策略执行
|
||||
type StrategyExecution struct {
|
||||
Timestamp time.Time
|
||||
Decision *TuningDecision
|
||||
Executed bool
|
||||
Result *ExecutionResult
|
||||
}
|
||||
|
||||
// ExecutionResult 执行结果
|
||||
type ExecutionResult struct {
|
||||
Success bool
|
||||
Error error
|
||||
PerformanceBefore float64
|
||||
PerformanceAfter float64
|
||||
Improvement float64
|
||||
SideEffects []string
|
||||
}
|
||||
|
||||
// TuningRecord 调优记录
|
||||
type TuningRecord struct {
|
||||
Timestamp time.Time
|
||||
Type string // automatic, manual, rollback
|
||||
Parameters map[string]interface{}
|
||||
Reason string
|
||||
Result *TuningResult
|
||||
}
|
||||
|
||||
// TuningResult 调优结果
|
||||
type TuningResult struct {
|
||||
Success bool
|
||||
PerformanceGain float64
|
||||
ResourceUsageChange float64
|
||||
StabilityImpact float64
|
||||
UserExperienceChange float64
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
// LearningDataset 学习数据集
|
||||
type LearningDataset struct {
|
||||
Features [][]float64
|
||||
Labels []float64
|
||||
Weights []float64
|
||||
|
||||
// 数据统计
|
||||
FeatureStats []FeatureStatistics
|
||||
LabelStats LabelStatistics
|
||||
|
||||
// 数据划分
|
||||
TrainingSplit float64
|
||||
ValidationSplit float64
|
||||
TestSplit float64
|
||||
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// FeatureStatistics 特征统计
|
||||
type FeatureStatistics struct {
|
||||
Name string
|
||||
Mean float64
|
||||
Std float64
|
||||
Min float64
|
||||
Max float64
|
||||
Correlation float64
|
||||
}
|
||||
|
||||
// LabelStatistics 标签统计
|
||||
type LabelStatistics struct {
|
||||
Mean float64
|
||||
Std float64
|
||||
Min float64
|
||||
Max float64
|
||||
Distribution map[string]int
|
||||
}
|
||||
|
||||
// TuningEngineStats 调优引擎统计
|
||||
type TuningEngineStats struct {
|
||||
// 基础统计
|
||||
TotalAdjustments int64
|
||||
SuccessfulAdjustments int64
|
||||
FailedAdjustments int64
|
||||
RollbackCount int64
|
||||
|
||||
// 性能统计
|
||||
AverageImprovement float64
|
||||
MaxImprovement float64
|
||||
TotalImprovement float64
|
||||
|
||||
// 学习统计
|
||||
ModelAccuracy float64
|
||||
PredictionAccuracy float64
|
||||
LearningIterations int64
|
||||
|
||||
// 时间统计
|
||||
AverageDecisionTime time.Duration
|
||||
TotalTuningTime time.Duration
|
||||
LastTuningTime time.Time
|
||||
|
||||
// 系统影响
|
||||
CPUOverhead float64
|
||||
MemoryOverhead int64
|
||||
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewAdaptiveTuningEngine 创建自适应调优引擎
|
||||
func NewAdaptiveTuningEngine() *AdaptiveTuningEngine {
|
||||
config := &AdaptiveTuningConfig{
|
||||
TuningInterval: 5 * time.Minute,
|
||||
MetricInterval: 30 * time.Second,
|
||||
CPUUsageThreshold: 0.8,
|
||||
MemoryThreshold: 500 * 1024 * 1024, // 500MB
|
||||
LatencyThreshold: 10 * time.Second,
|
||||
LearningRate: 0.01,
|
||||
AdaptationSpeed: 0.1,
|
||||
StabilityFactor: 0.9,
|
||||
MinBatchInterval: 10 * time.Second,
|
||||
MaxBatchInterval: 10 * time.Minute,
|
||||
MinBatchSize: 10,
|
||||
MaxBatchSize: 1000,
|
||||
MaxAdjustmentRatio: 0.3, // 最大30%调整
|
||||
RollbackThreshold: 0.1, // 性能下降10%触发回滚
|
||||
PredictionWindow: 1 * time.Hour,
|
||||
ConfidenceThreshold: 0.7,
|
||||
}
|
||||
|
||||
engine := &AdaptiveTuningEngine{
|
||||
config: config,
|
||||
shutdownChan: make(chan struct{}),
|
||||
maxHistorySize: 1000,
|
||||
tuningHistory: make([]*TuningRecord, 0),
|
||||
stats: &TuningEngineStats{
|
||||
LastTuningTime: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// 初始化组件
|
||||
engine.metricCollector = NewMetricCollector()
|
||||
engine.performanceAnalyzer = NewPerformanceAnalyzer()
|
||||
engine.predictiveModel = NewPredictiveModel()
|
||||
engine.tuningStrategy = NewTuningStrategy()
|
||||
engine.learningData = NewLearningDataset()
|
||||
|
||||
return engine
|
||||
}
|
||||
|
||||
// Start 启动自适应调优引擎
|
||||
func (a *AdaptiveTuningEngine) Start() error {
|
||||
if !atomic.CompareAndSwapInt32(&a.isRunning, 0, 1) {
|
||||
return fmt.Errorf("调优引擎已在运行中")
|
||||
}
|
||||
|
||||
// 启动指标收集
|
||||
if err := a.metricCollector.Start(a.config.MetricInterval); err != nil {
|
||||
return fmt.Errorf("启动指标收集失败: %v", err)
|
||||
}
|
||||
|
||||
// 启动主调优循环
|
||||
go a.tuningLoop()
|
||||
|
||||
// 启动性能分析循环
|
||||
go a.analysisLoop()
|
||||
|
||||
// 启动模型训练循环
|
||||
go a.learningLoop()
|
||||
|
||||
fmt.Printf("🧠 [自适应调优引擎] 启动完成,调优间隔: %v\n", a.config.TuningInterval)
|
||||
return nil
|
||||
}
|
||||
|
||||
// tuningLoop 调优循环
|
||||
func (a *AdaptiveTuningEngine) tuningLoop() {
|
||||
ticker := time.NewTicker(a.config.TuningInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
a.performTuning()
|
||||
|
||||
case <-a.shutdownChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// analysisLoop 分析循环
|
||||
func (a *AdaptiveTuningEngine) analysisLoop() {
|
||||
ticker := time.NewTicker(a.config.MetricInterval * 2) // 分析频率低于采集频率
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
a.performAnalysis()
|
||||
|
||||
case <-a.shutdownChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// learningLoop 学习循环
|
||||
func (a *AdaptiveTuningEngine) learningLoop() {
|
||||
ticker := time.NewTicker(15 * time.Minute) // 每15分钟学习一次
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
a.performLearning()
|
||||
|
||||
case <-a.shutdownChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// performTuning 执行调优
|
||||
func (a *AdaptiveTuningEngine) performTuning() {
|
||||
startTime := time.Now()
|
||||
|
||||
// 获取当前指标
|
||||
currentMetrics := a.metricCollector.GetLatestMetrics()
|
||||
if currentMetrics == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// 分析性能状态
|
||||
performanceIssues := a.performanceAnalyzer.AnalyzeIssues(currentMetrics)
|
||||
|
||||
// 生成调优决策
|
||||
decision := a.tuningStrategy.GenerateDecision(currentMetrics, performanceIssues)
|
||||
if decision == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// 验证决策合理性
|
||||
if !a.validateDecision(decision) {
|
||||
fmt.Printf("⚠️ [调优引擎] 决策验证失败,跳过执行\n")
|
||||
return
|
||||
}
|
||||
|
||||
// 执行调优
|
||||
result := a.executeDecision(decision)
|
||||
|
||||
// 记录调优历史
|
||||
record := &TuningRecord{
|
||||
Timestamp: time.Now(),
|
||||
Type: "automatic",
|
||||
Parameters: a.extractParameters(decision),
|
||||
Reason: decision.Trigger,
|
||||
Result: result,
|
||||
}
|
||||
|
||||
a.addTuningRecord(record)
|
||||
|
||||
// 更新统计
|
||||
a.updateTuningStats(result, time.Since(startTime))
|
||||
|
||||
if result.Success {
|
||||
fmt.Printf("✅ [调优引擎] 自动调优完成,性能提升: %.2f%%\n", result.PerformanceGain*100)
|
||||
} else {
|
||||
fmt.Printf("❌ [调优引擎] 调优失败,考虑回滚\n")
|
||||
a.considerRollback(decision, result)
|
||||
}
|
||||
}
|
||||
|
||||
// performAnalysis 执行性能分析
|
||||
func (a *AdaptiveTuningEngine) performAnalysis() {
|
||||
// 趋势分析
|
||||
a.performanceAnalyzer.AnalyzeTrends(a.metricCollector.GetMetricsHistory(100))
|
||||
|
||||
// 异常检测
|
||||
a.performanceAnalyzer.DetectAnomalies(a.metricCollector.GetLatestMetrics())
|
||||
|
||||
// 相关性分析
|
||||
a.performanceAnalyzer.AnalyzeCorrelations(a.metricCollector.GetMetricsHistory(50))
|
||||
}
|
||||
|
||||
// performLearning 执行机器学习
|
||||
func (a *AdaptiveTuningEngine) performLearning() {
|
||||
// 收集训练数据
|
||||
a.collectTrainingData()
|
||||
|
||||
// 训练预测模型
|
||||
if err := a.predictiveModel.Train(a.learningData); err != nil {
|
||||
fmt.Printf("⚠️ [调优引擎] 模型训练失败: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// 验证模型性能
|
||||
accuracy := a.predictiveModel.Validate()
|
||||
|
||||
// 更新统计
|
||||
a.mutex.Lock()
|
||||
a.stats.ModelAccuracy = accuracy
|
||||
a.stats.LearningIterations++
|
||||
a.mutex.Unlock()
|
||||
|
||||
fmt.Printf("🎓 [调优引擎] 模型训练完成,准确率: %.2f%%\n", accuracy*100)
|
||||
}
|
||||
|
||||
// validateDecision 验证调优决策
|
||||
func (a *AdaptiveTuningEngine) validateDecision(decision *TuningDecision) bool {
|
||||
// 检查置信度
|
||||
if decision.Confidence < a.config.ConfidenceThreshold {
|
||||
return false
|
||||
}
|
||||
|
||||
// 检查风险级别
|
||||
if decision.Risk > 0.7 { // 风险过高
|
||||
return false
|
||||
}
|
||||
|
||||
// 检查调整幅度
|
||||
for _, adj := range decision.Adjustments {
|
||||
if math.Abs(adj.AdjustmentRatio) > a.config.MaxAdjustmentRatio {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// executeDecision 执行调优决策
|
||||
func (a *AdaptiveTuningEngine) executeDecision(decision *TuningDecision) *TuningResult {
|
||||
startTime := time.Now()
|
||||
|
||||
// 获取执行前性能基线
|
||||
beforeMetrics := a.metricCollector.GetLatestMetrics()
|
||||
performanceBefore := a.calculateOverallPerformance(beforeMetrics)
|
||||
|
||||
// 执行参数调整
|
||||
success := true
|
||||
|
||||
for _, adjustment := range decision.Adjustments {
|
||||
if err := a.applyParameterAdjustment(adjustment); err != nil {
|
||||
success = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !success {
|
||||
return &TuningResult{
|
||||
Success: false,
|
||||
PerformanceGain: 0,
|
||||
ResourceUsageChange: 0,
|
||||
StabilityImpact: 0,
|
||||
Duration: time.Since(startTime),
|
||||
}
|
||||
}
|
||||
|
||||
// 等待一段时间观察效果
|
||||
time.Sleep(30 * time.Second)
|
||||
|
||||
// 获取执行后性能
|
||||
afterMetrics := a.metricCollector.GetLatestMetrics()
|
||||
performanceAfter := a.calculateOverallPerformance(afterMetrics)
|
||||
|
||||
performanceGain := (performanceAfter - performanceBefore) / performanceBefore
|
||||
|
||||
// 计算资源使用变化
|
||||
resourceBefore := float64(beforeMetrics.System.MemoryUsage + int64(beforeMetrics.System.CPUUsage*1000))
|
||||
resourceAfter := float64(afterMetrics.System.MemoryUsage + int64(afterMetrics.System.CPUUsage*1000))
|
||||
resourceChange := (resourceAfter - resourceBefore) / resourceBefore
|
||||
|
||||
return &TuningResult{
|
||||
Success: true,
|
||||
PerformanceGain: performanceGain,
|
||||
ResourceUsageChange: resourceChange,
|
||||
StabilityImpact: a.calculateStabilityImpact(beforeMetrics, afterMetrics),
|
||||
UserExperienceChange: performanceGain, // 简化假设
|
||||
Duration: time.Since(startTime),
|
||||
}
|
||||
}
|
||||
|
||||
// calculateOverallPerformance 计算整体性能分数
|
||||
func (a *AdaptiveTuningEngine) calculateOverallPerformance(metrics *MetricSnapshot) float64 {
|
||||
if metrics == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
// 性能分数计算(0-100分)
|
||||
cpuScore := math.Max(0, (1.0-metrics.System.CPUUsage)*40) // CPU使用率越低越好,最高40分
|
||||
memoryScore := math.Max(0, (1.0-float64(metrics.System.MemoryUsage)/float64(metrics.System.MemoryTotal))*30) // 内存使用率越低越好,最高30分
|
||||
responseScore := math.Max(0, (1.0-math.Min(1.0, float64(metrics.Application.ResponseTime)/float64(time.Second)))*20) // 响应时间越短越好,最高20分
|
||||
cacheScore := metrics.Cache.HitRate * 10 // 缓存命中率越高越好,最高10分
|
||||
|
||||
return cpuScore + memoryScore + responseScore + cacheScore
|
||||
}
|
||||
|
||||
// calculateStabilityImpact 计算稳定性影响
|
||||
func (a *AdaptiveTuningEngine) calculateStabilityImpact(before, after *MetricSnapshot) float64 {
|
||||
if before == nil || after == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
// 简化的稳定性计算:比较关键指标的变化
|
||||
cpuVariation := math.Abs(after.System.CPUUsage - before.System.CPUUsage)
|
||||
memoryVariation := math.Abs(float64(after.System.MemoryUsage-before.System.MemoryUsage) / float64(before.System.MemoryUsage))
|
||||
|
||||
// 变化越小,稳定性越好
|
||||
stabilityScore := 1.0 - (cpuVariation*0.5 + memoryVariation*0.5)
|
||||
return math.Max(0, stabilityScore)
|
||||
}
|
||||
|
||||
// applyParameterAdjustment 应用参数调整
|
||||
func (a *AdaptiveTuningEngine) applyParameterAdjustment(adjustment ParameterAdjustment) error {
|
||||
// 这里应该调用具体的参数设置函数
|
||||
// 暂时模拟实现
|
||||
fmt.Printf("🔧 [调优引擎] 调整参数 %s: %v -> %v (%.1f%%)\n",
|
||||
adjustment.ParameterName,
|
||||
adjustment.CurrentValue,
|
||||
adjustment.ProposedValue,
|
||||
adjustment.AdjustmentRatio*100)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// collectTrainingData 收集训练数据
|
||||
func (a *AdaptiveTuningEngine) collectTrainingData() {
|
||||
history := a.metricCollector.GetMetricsHistory(200)
|
||||
_ = a.getTuningHistory(50) // 暂时不使用调优历史
|
||||
|
||||
// 构建特征和标签
|
||||
for i, metrics := range history {
|
||||
if i < len(history)-1 {
|
||||
// 特征:当前指标
|
||||
features := []float64{
|
||||
metrics.System.CPUUsage,
|
||||
float64(metrics.System.MemoryUsage) / 1024 / 1024, // MB
|
||||
float64(metrics.Application.ResponseTime) / float64(time.Millisecond),
|
||||
metrics.Cache.HitRate,
|
||||
metrics.Cache.CompressionRatio,
|
||||
}
|
||||
|
||||
// 标签:下一时刻的整体性能
|
||||
nextMetrics := history[i+1]
|
||||
label := a.calculateOverallPerformance(&nextMetrics)
|
||||
|
||||
// 添加到学习数据集
|
||||
a.learningData.mutex.Lock()
|
||||
a.learningData.Features = append(a.learningData.Features, features)
|
||||
a.learningData.Labels = append(a.learningData.Labels, label)
|
||||
a.learningData.Weights = append(a.learningData.Weights, 1.0)
|
||||
a.learningData.mutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// 限制数据集大小
|
||||
a.learningData.mutex.Lock()
|
||||
maxSize := 1000
|
||||
if len(a.learningData.Features) > maxSize {
|
||||
excess := len(a.learningData.Features) - maxSize
|
||||
a.learningData.Features = a.learningData.Features[excess:]
|
||||
a.learningData.Labels = a.learningData.Labels[excess:]
|
||||
a.learningData.Weights = a.learningData.Weights[excess:]
|
||||
}
|
||||
a.learningData.mutex.Unlock()
|
||||
}
|
||||
|
||||
// considerRollback 考虑回滚
|
||||
func (a *AdaptiveTuningEngine) considerRollback(decision *TuningDecision, result *TuningResult) {
|
||||
if result.PerformanceGain < -a.config.RollbackThreshold {
|
||||
fmt.Printf("🔄 [调优引擎] 触发自动回滚,性能下降: %.2f%%\n", result.PerformanceGain*100)
|
||||
a.performRollback(decision)
|
||||
}
|
||||
}
|
||||
|
||||
// performRollback 执行回滚
|
||||
func (a *AdaptiveTuningEngine) performRollback(originalDecision *TuningDecision) {
|
||||
// 创建回滚决策
|
||||
rollbackDecision := &TuningDecision{
|
||||
Timestamp: time.Now(),
|
||||
Trigger: "automatic_rollback",
|
||||
Adjustments: make([]ParameterAdjustment, 0),
|
||||
Confidence: 1.0,
|
||||
AutoExecute: true,
|
||||
}
|
||||
|
||||
// 反向调整所有参数
|
||||
for _, adjustment := range originalDecision.Adjustments {
|
||||
rollbackAdjustment := ParameterAdjustment{
|
||||
ParameterName: adjustment.ParameterName,
|
||||
CurrentValue: adjustment.ProposedValue,
|
||||
ProposedValue: adjustment.CurrentValue,
|
||||
AdjustmentRatio: -adjustment.AdjustmentRatio,
|
||||
Reason: "rollback",
|
||||
ExpectedImpact: "restore_stability",
|
||||
Risk: "low",
|
||||
}
|
||||
rollbackDecision.Adjustments = append(rollbackDecision.Adjustments, rollbackAdjustment)
|
||||
}
|
||||
|
||||
// 执行回滚
|
||||
result := a.executeDecision(rollbackDecision)
|
||||
|
||||
// 记录回滚
|
||||
record := &TuningRecord{
|
||||
Timestamp: time.Now(),
|
||||
Type: "rollback",
|
||||
Parameters: a.extractParameters(rollbackDecision),
|
||||
Reason: "performance_degradation",
|
||||
Result: result,
|
||||
}
|
||||
|
||||
a.addTuningRecord(record)
|
||||
|
||||
// 更新统计
|
||||
atomic.AddInt64(&a.stats.RollbackCount, 1)
|
||||
}
|
||||
|
||||
// addTuningRecord 添加调优记录
|
||||
func (a *AdaptiveTuningEngine) addTuningRecord(record *TuningRecord) {
|
||||
a.historyMutex.Lock()
|
||||
defer a.historyMutex.Unlock()
|
||||
|
||||
a.tuningHistory = append(a.tuningHistory, record)
|
||||
|
||||
// 限制历史记录大小
|
||||
if len(a.tuningHistory) > a.maxHistorySize {
|
||||
a.tuningHistory = a.tuningHistory[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// updateTuningStats 更新调优统计
|
||||
func (a *AdaptiveTuningEngine) updateTuningStats(result *TuningResult, decisionTime time.Duration) {
|
||||
a.stats.mutex.Lock()
|
||||
defer a.stats.mutex.Unlock()
|
||||
|
||||
a.stats.TotalAdjustments++
|
||||
if result.Success {
|
||||
a.stats.SuccessfulAdjustments++
|
||||
a.stats.TotalImprovement += result.PerformanceGain
|
||||
a.stats.AverageImprovement = a.stats.TotalImprovement / float64(a.stats.SuccessfulAdjustments)
|
||||
|
||||
if result.PerformanceGain > a.stats.MaxImprovement {
|
||||
a.stats.MaxImprovement = result.PerformanceGain
|
||||
}
|
||||
} else {
|
||||
a.stats.FailedAdjustments++
|
||||
}
|
||||
|
||||
// 更新时间统计
|
||||
a.stats.TotalTuningTime += decisionTime
|
||||
a.stats.AverageDecisionTime = time.Duration(int64(a.stats.TotalTuningTime) / a.stats.TotalAdjustments)
|
||||
a.stats.LastTuningTime = time.Now()
|
||||
}
|
||||
|
||||
// extractParameters 提取决策参数
|
||||
func (a *AdaptiveTuningEngine) extractParameters(decision *TuningDecision) map[string]interface{} {
|
||||
params := make(map[string]interface{})
|
||||
for _, adj := range decision.Adjustments {
|
||||
params[adj.ParameterName] = adj.ProposedValue
|
||||
}
|
||||
return params
|
||||
}
|
||||
|
||||
// getTuningHistory 获取调优历史
|
||||
func (a *AdaptiveTuningEngine) getTuningHistory(limit int) []*TuningRecord {
|
||||
a.historyMutex.RLock()
|
||||
defer a.historyMutex.RUnlock()
|
||||
|
||||
if limit <= 0 || limit > len(a.tuningHistory) {
|
||||
limit = len(a.tuningHistory)
|
||||
}
|
||||
|
||||
history := make([]*TuningRecord, limit)
|
||||
startIndex := len(a.tuningHistory) - limit
|
||||
copy(history, a.tuningHistory[startIndex:])
|
||||
|
||||
return history
|
||||
}
|
||||
|
||||
// Stop 停止自适应调优引擎
|
||||
func (a *AdaptiveTuningEngine) Stop() error {
|
||||
if !atomic.CompareAndSwapInt32(&a.isRunning, 1, 0) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 停止指标收集
|
||||
a.metricCollector.Stop()
|
||||
|
||||
// 停止所有循环
|
||||
close(a.shutdownChan)
|
||||
|
||||
fmt.Printf("🧠 [自适应调优引擎] 已停止\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetStats 获取调优引擎统计
|
||||
func (a *AdaptiveTuningEngine) GetStats() *TuningEngineStats {
|
||||
a.stats.mutex.RLock()
|
||||
defer a.stats.mutex.RUnlock()
|
||||
|
||||
statsCopy := *a.stats
|
||||
return &statsCopy
|
||||
}
|
||||
|
||||
// GetTuningReport 获取调优报告
|
||||
func (a *AdaptiveTuningEngine) GetTuningReport() map[string]interface{} {
|
||||
stats := a.GetStats()
|
||||
recentHistory := a.getTuningHistory(10)
|
||||
|
||||
return map[string]interface{}{
|
||||
"engine_stats": stats,
|
||||
"recent_history": recentHistory,
|
||||
"current_trends": a.performanceAnalyzer.GetCurrentTrends(),
|
||||
"anomalies": a.performanceAnalyzer.GetDetectedAnomalies(),
|
||||
"predictions": a.predictiveModel.GetPredictions(),
|
||||
"model_accuracy": a.predictiveModel.GetAccuracy(),
|
||||
}
|
||||
}
|
||||
620
util/cache/advanced_data_merger.go
vendored
Normal file
620
util/cache/advanced_data_merger.go
vendored
Normal file
@@ -0,0 +1,620 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"pansou/model"
|
||||
)
|
||||
|
||||
// AdvancedDataMerger 高级数据合并器
|
||||
type AdvancedDataMerger struct {
|
||||
// 合并策略
|
||||
mergeStrategies map[string]MergeStrategy
|
||||
|
||||
// 合并规则
|
||||
mergeRules []*MergeRule
|
||||
|
||||
// 统计信息
|
||||
totalMerges int64
|
||||
successfulMerges int64
|
||||
failedMerges int64
|
||||
|
||||
// 缓存去重
|
||||
deduplicationMap map[string]*CacheOperation
|
||||
dedupMutex sync.RWMutex
|
||||
|
||||
// 性能监控
|
||||
mergeMetrics *MergeMetrics
|
||||
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// MergeStrategy 合并策略接口
|
||||
type MergeStrategy interface {
|
||||
CanMerge(existing *CacheOperation, new *CacheOperation) bool
|
||||
Merge(existing *CacheOperation, new *CacheOperation) (*CacheOperation, error)
|
||||
GetPriority() int
|
||||
}
|
||||
|
||||
// MergeRule 合并规则
|
||||
type MergeRule struct {
|
||||
Name string
|
||||
Description string
|
||||
Condition func(*CacheOperation, *CacheOperation) bool
|
||||
MergeFunc func(*CacheOperation, *CacheOperation) (*CacheOperation, error)
|
||||
Priority int
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
// MergeMetrics 合并指标
|
||||
type MergeMetrics struct {
|
||||
// 时间统计
|
||||
AverageMergeTime time.Duration
|
||||
MaxMergeTime time.Duration
|
||||
TotalMergeTime time.Duration
|
||||
|
||||
// 数据统计
|
||||
DataSizeBefore int64
|
||||
DataSizeAfter int64
|
||||
CompressionRatio float64
|
||||
|
||||
// 类型统计
|
||||
MergesByType map[string]int64
|
||||
MergesByPlugin map[string]int64
|
||||
MergesByKeyword map[string]int64
|
||||
|
||||
// 效率统计
|
||||
DuplicatesRemoved int64
|
||||
ResultsConsolidated int64
|
||||
StorageSaved int64
|
||||
}
|
||||
|
||||
// NewAdvancedDataMerger 创建高级数据合并器
|
||||
func NewAdvancedDataMerger() *AdvancedDataMerger {
|
||||
merger := &AdvancedDataMerger{
|
||||
mergeStrategies: make(map[string]MergeStrategy),
|
||||
deduplicationMap: make(map[string]*CacheOperation),
|
||||
mergeMetrics: &MergeMetrics{
|
||||
MergesByType: make(map[string]int64),
|
||||
MergesByPlugin: make(map[string]int64),
|
||||
MergesByKeyword: make(map[string]int64),
|
||||
},
|
||||
}
|
||||
|
||||
// 初始化合并策略
|
||||
merger.initializeMergeStrategies()
|
||||
|
||||
// 初始化合并规则
|
||||
merger.initializeMergeRules()
|
||||
|
||||
return merger
|
||||
}
|
||||
|
||||
// initializeMergeStrategies 初始化合并策略
|
||||
func (m *AdvancedDataMerger) initializeMergeStrategies() {
|
||||
// 注册同键合并策略
|
||||
m.mergeStrategies["same_key"] = &SameKeyMergeStrategy{}
|
||||
|
||||
// 注册同插件同关键词策略
|
||||
m.mergeStrategies["same_plugin_keyword"] = &SamePluginKeywordMergeStrategy{}
|
||||
|
||||
// 注册结果去重策略
|
||||
m.mergeStrategies["deduplication"] = &DeduplicationMergeStrategy{}
|
||||
|
||||
// 注册内容相似性策略
|
||||
m.mergeStrategies["content_similarity"] = &ContentSimilarityMergeStrategy{}
|
||||
}
|
||||
|
||||
// initializeMergeRules 初始化合并规则
|
||||
func (m *AdvancedDataMerger) initializeMergeRules() {
|
||||
m.mergeRules = []*MergeRule{
|
||||
{
|
||||
Name: "完全相同键合并",
|
||||
Description: "合并具有完全相同缓存键的操作",
|
||||
Condition: func(existing, new *CacheOperation) bool {
|
||||
return existing.Key == new.Key
|
||||
},
|
||||
MergeFunc: m.mergeSameKey,
|
||||
Priority: 1,
|
||||
Enabled: true,
|
||||
},
|
||||
{
|
||||
Name: "同插件同关键词合并",
|
||||
Description: "合并同一插件对同一关键词的搜索结果",
|
||||
Condition: func(existing, new *CacheOperation) bool {
|
||||
return existing.PluginName == new.PluginName &&
|
||||
existing.Keyword == new.Keyword &&
|
||||
existing.Key != new.Key
|
||||
},
|
||||
MergeFunc: m.mergeSamePluginKeyword,
|
||||
Priority: 2,
|
||||
Enabled: true,
|
||||
},
|
||||
{
|
||||
Name: "时间窗口内合并",
|
||||
Description: "合并时间窗口内的相似操作",
|
||||
Condition: func(existing, new *CacheOperation) bool {
|
||||
timeDiff := new.Timestamp.Sub(existing.Timestamp)
|
||||
return timeDiff >= 0 && timeDiff <= 5*time.Minute &&
|
||||
existing.PluginName == new.PluginName
|
||||
},
|
||||
MergeFunc: m.mergeTimeWindow,
|
||||
Priority: 3,
|
||||
Enabled: true,
|
||||
},
|
||||
{
|
||||
Name: "结果去重合并",
|
||||
Description: "去除重复的搜索结果",
|
||||
Condition: func(existing, new *CacheOperation) bool {
|
||||
return m.hasOverlapResults(existing, new)
|
||||
},
|
||||
MergeFunc: m.mergeDeduplication,
|
||||
Priority: 4,
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TryMergeOperation 尝试合并操作
|
||||
func (m *AdvancedDataMerger) TryMergeOperation(buffer *GlobalBuffer, newOp *CacheOperation) bool {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
mergeTime := time.Since(startTime)
|
||||
m.updateMergeMetrics(mergeTime)
|
||||
}()
|
||||
|
||||
m.totalMerges++
|
||||
|
||||
// 🔍 在缓冲区中寻找可合并的操作
|
||||
merged := false
|
||||
|
||||
for i, existingOp := range buffer.Operations {
|
||||
if m.canMergeOperations(existingOp, newOp) {
|
||||
// 🚀 执行合并
|
||||
mergedOp, err := m.performMerge(existingOp, newOp)
|
||||
if err != nil {
|
||||
m.failedMerges++
|
||||
continue
|
||||
}
|
||||
|
||||
// 替换原操作
|
||||
buffer.Operations[i] = mergedOp
|
||||
|
||||
// 更新统计
|
||||
m.successfulMerges++
|
||||
m.updateMergeStatistics(existingOp, newOp, mergedOp)
|
||||
|
||||
merged = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
// canMergeOperations 检查是否可以合并操作
|
||||
func (m *AdvancedDataMerger) canMergeOperations(existing, new *CacheOperation) bool {
|
||||
// 按优先级检查合并规则
|
||||
for _, rule := range m.mergeRules {
|
||||
if rule.Enabled && rule.Condition(existing, new) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// performMerge 执行合并
|
||||
func (m *AdvancedDataMerger) performMerge(existing, new *CacheOperation) (*CacheOperation, error) {
|
||||
// 找到最高优先级的适用规则
|
||||
var bestRule *MergeRule
|
||||
for _, rule := range m.mergeRules {
|
||||
if rule.Enabled && rule.Condition(existing, new) {
|
||||
if bestRule == nil || rule.Priority < bestRule.Priority {
|
||||
bestRule = rule
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if bestRule == nil {
|
||||
return nil, fmt.Errorf("未找到适用的合并规则")
|
||||
}
|
||||
|
||||
// 执行合并
|
||||
return bestRule.MergeFunc(existing, new)
|
||||
}
|
||||
|
||||
// mergeSameKey 合并相同键的操作
|
||||
func (m *AdvancedDataMerger) mergeSameKey(existing, new *CacheOperation) (*CacheOperation, error) {
|
||||
// 合并搜索结果
|
||||
mergedResults := m.mergeSearchResults(existing.Data, new.Data)
|
||||
|
||||
merged := &CacheOperation{
|
||||
Key: existing.Key,
|
||||
Data: mergedResults,
|
||||
TTL: m.chooseLongerTTL(existing.TTL, new.TTL),
|
||||
PluginName: existing.PluginName, // 保持原插件名
|
||||
Keyword: existing.Keyword, // 保持原关键词
|
||||
Timestamp: new.Timestamp, // 使用最新时间戳
|
||||
Priority: m.chooseBetterPriority(existing.Priority, new.Priority),
|
||||
DataSize: existing.DataSize + new.DataSize, // 累计数据大小
|
||||
IsFinal: existing.IsFinal || new.IsFinal, // 任一为最终结果则为最终结果
|
||||
}
|
||||
|
||||
return merged, nil
|
||||
}
|
||||
|
||||
// mergeSamePluginKeyword 合并同插件同关键词操作
|
||||
func (m *AdvancedDataMerger) mergeSamePluginKeyword(existing, new *CacheOperation) (*CacheOperation, error) {
|
||||
// 生成新的合并键
|
||||
mergedKey := fmt.Sprintf("merged_%s_%s_%d",
|
||||
existing.PluginName, existing.Keyword, time.Now().Unix())
|
||||
|
||||
// 合并搜索结果
|
||||
mergedResults := m.mergeSearchResults(existing.Data, new.Data)
|
||||
|
||||
merged := &CacheOperation{
|
||||
Key: mergedKey,
|
||||
Data: mergedResults,
|
||||
TTL: m.chooseLongerTTL(existing.TTL, new.TTL),
|
||||
PluginName: existing.PluginName,
|
||||
Keyword: existing.Keyword,
|
||||
Timestamp: new.Timestamp,
|
||||
Priority: m.chooseBetterPriority(existing.Priority, new.Priority),
|
||||
DataSize: len(mergedResults) * 500, // 重新估算数据大小
|
||||
IsFinal: existing.IsFinal || new.IsFinal,
|
||||
}
|
||||
|
||||
return merged, nil
|
||||
}
|
||||
|
||||
// mergeTimeWindow 合并时间窗口内的操作
|
||||
func (m *AdvancedDataMerger) mergeTimeWindow(existing, new *CacheOperation) (*CacheOperation, error) {
|
||||
// 时间窗口合并策略:保留最新的元信息,合并数据
|
||||
mergedResults := m.mergeSearchResults(existing.Data, new.Data)
|
||||
|
||||
merged := &CacheOperation{
|
||||
Key: new.Key, // 使用新的键
|
||||
Data: mergedResults,
|
||||
TTL: new.TTL, // 使用新的TTL
|
||||
PluginName: new.PluginName,
|
||||
Keyword: new.Keyword,
|
||||
Timestamp: new.Timestamp,
|
||||
Priority: new.Priority,
|
||||
DataSize: len(mergedResults) * 500,
|
||||
IsFinal: new.IsFinal,
|
||||
}
|
||||
|
||||
return merged, nil
|
||||
}
|
||||
|
||||
// mergeDeduplication 去重合并
|
||||
func (m *AdvancedDataMerger) mergeDeduplication(existing, new *CacheOperation) (*CacheOperation, error) {
|
||||
// 执行深度去重
|
||||
deduplicatedResults := m.deduplicateSearchResults(existing.Data, new.Data)
|
||||
|
||||
merged := &CacheOperation{
|
||||
Key: existing.Key,
|
||||
Data: deduplicatedResults,
|
||||
TTL: m.chooseLongerTTL(existing.TTL, new.TTL),
|
||||
PluginName: existing.PluginName,
|
||||
Keyword: existing.Keyword,
|
||||
Timestamp: new.Timestamp,
|
||||
Priority: m.chooseBetterPriority(existing.Priority, new.Priority),
|
||||
DataSize: len(deduplicatedResults) * 500,
|
||||
IsFinal: existing.IsFinal || new.IsFinal,
|
||||
}
|
||||
|
||||
return merged, nil
|
||||
}
|
||||
|
||||
// mergeSearchResults 合并搜索结果
|
||||
func (m *AdvancedDataMerger) mergeSearchResults(existing, new []model.SearchResult) []model.SearchResult {
|
||||
// 使用map去重
|
||||
resultMap := make(map[string]model.SearchResult)
|
||||
|
||||
// 添加现有结果
|
||||
for _, result := range existing {
|
||||
key := m.generateResultKey(result)
|
||||
resultMap[key] = result
|
||||
}
|
||||
|
||||
// 添加新结果,自动去重
|
||||
for _, result := range new {
|
||||
key := m.generateResultKey(result)
|
||||
if existingResult, exists := resultMap[key]; exists {
|
||||
// 合并相同结果的信息
|
||||
mergedResult := m.mergeIndividualResults(existingResult, result)
|
||||
resultMap[key] = mergedResult
|
||||
} else {
|
||||
resultMap[key] = result
|
||||
}
|
||||
}
|
||||
|
||||
// 转换回切片
|
||||
merged := make([]model.SearchResult, 0, len(resultMap))
|
||||
for _, result := range resultMap {
|
||||
merged = append(merged, result)
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
// deduplicateSearchResults 深度去重搜索结果
|
||||
func (m *AdvancedDataMerger) deduplicateSearchResults(existing, new []model.SearchResult) []model.SearchResult {
|
||||
// 更严格的去重逻辑
|
||||
resultMap := make(map[string]model.SearchResult)
|
||||
duplicateCount := 0
|
||||
|
||||
// 处理现有结果
|
||||
for _, result := range existing {
|
||||
key := m.generateResultKey(result)
|
||||
resultMap[key] = result
|
||||
}
|
||||
|
||||
// 处理新结果
|
||||
for _, result := range new {
|
||||
key := m.generateResultKey(result)
|
||||
if _, exists := resultMap[key]; !exists {
|
||||
resultMap[key] = result
|
||||
} else {
|
||||
duplicateCount++
|
||||
}
|
||||
}
|
||||
|
||||
// 更新去重统计
|
||||
m.mergeMetrics.DuplicatesRemoved += int64(duplicateCount)
|
||||
|
||||
// 转换回切片
|
||||
deduplicated := make([]model.SearchResult, 0, len(resultMap))
|
||||
for _, result := range resultMap {
|
||||
deduplicated = append(deduplicated, result)
|
||||
}
|
||||
|
||||
return deduplicated
|
||||
}
|
||||
|
||||
// generateResultKey 生成结果键用于去重
|
||||
func (m *AdvancedDataMerger) generateResultKey(result model.SearchResult) string {
|
||||
// 使用标题和主要链接生成唯一键
|
||||
key := result.Title
|
||||
if len(result.Links) > 0 {
|
||||
key += "_" + result.Links[0].URL
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// mergeIndividualResults 合并单个结果
|
||||
func (m *AdvancedDataMerger) mergeIndividualResults(existing, new model.SearchResult) model.SearchResult {
|
||||
merged := existing
|
||||
|
||||
// 选择更完整的内容
|
||||
if len(new.Content) > len(existing.Content) {
|
||||
merged.Content = new.Content
|
||||
}
|
||||
|
||||
// 合并链接
|
||||
linkMap := make(map[string]model.Link)
|
||||
for _, link := range existing.Links {
|
||||
linkMap[link.URL] = link
|
||||
}
|
||||
for _, link := range new.Links {
|
||||
linkMap[link.URL] = link
|
||||
}
|
||||
|
||||
links := make([]model.Link, 0, len(linkMap))
|
||||
for _, link := range linkMap {
|
||||
links = append(links, link)
|
||||
}
|
||||
merged.Links = links
|
||||
|
||||
// 合并标签
|
||||
tagMap := make(map[string]bool)
|
||||
for _, tag := range existing.Tags {
|
||||
tagMap[tag] = true
|
||||
}
|
||||
for _, tag := range new.Tags {
|
||||
tagMap[tag] = true
|
||||
}
|
||||
|
||||
tags := make([]string, 0, len(tagMap))
|
||||
for tag := range tagMap {
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
merged.Tags = tags
|
||||
|
||||
// 使用更新的时间
|
||||
if new.Datetime.After(existing.Datetime) {
|
||||
merged.Datetime = new.Datetime
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
// hasOverlapResults 检查是否有重叠结果
|
||||
func (m *AdvancedDataMerger) hasOverlapResults(existing, new *CacheOperation) bool {
|
||||
if len(existing.Data) == 0 || len(new.Data) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// 简单重叠检测:检查前几个结果的标题
|
||||
checkCount := 3
|
||||
if len(existing.Data) < checkCount {
|
||||
checkCount = len(existing.Data)
|
||||
}
|
||||
if len(new.Data) < checkCount {
|
||||
checkCount = len(new.Data)
|
||||
}
|
||||
|
||||
for i := 0; i < checkCount; i++ {
|
||||
for j := 0; j < checkCount; j++ {
|
||||
if existing.Data[i].Title == new.Data[j].Title {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// chooseLongerTTL 选择更长的TTL
|
||||
func (m *AdvancedDataMerger) chooseLongerTTL(ttl1, ttl2 time.Duration) time.Duration {
|
||||
if ttl1 > ttl2 {
|
||||
return ttl1
|
||||
}
|
||||
return ttl2
|
||||
}
|
||||
|
||||
// chooseBetterPriority 选择更好的优先级
|
||||
func (m *AdvancedDataMerger) chooseBetterPriority(priority1, priority2 int) int {
|
||||
if priority1 < priority2 { // 数字越小优先级越高
|
||||
return priority1
|
||||
}
|
||||
return priority2
|
||||
}
|
||||
|
||||
// updateMergeMetrics 更新合并指标
|
||||
func (m *AdvancedDataMerger) updateMergeMetrics(mergeTime time.Duration) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
m.mergeMetrics.TotalMergeTime += mergeTime
|
||||
|
||||
// 更新平均时间
|
||||
if m.successfulMerges > 0 {
|
||||
m.mergeMetrics.AverageMergeTime = time.Duration(
|
||||
int64(m.mergeMetrics.TotalMergeTime) / m.successfulMerges)
|
||||
}
|
||||
|
||||
// 更新最大时间
|
||||
if mergeTime > m.mergeMetrics.MaxMergeTime {
|
||||
m.mergeMetrics.MaxMergeTime = mergeTime
|
||||
}
|
||||
}
|
||||
|
||||
// updateMergeStatistics 更新合并统计
|
||||
func (m *AdvancedDataMerger) updateMergeStatistics(existing, new, merged *CacheOperation) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
// 数据大小统计
|
||||
beforeSize := int64(existing.DataSize + new.DataSize)
|
||||
afterSize := int64(merged.DataSize)
|
||||
|
||||
m.mergeMetrics.DataSizeBefore += beforeSize
|
||||
m.mergeMetrics.DataSizeAfter += afterSize
|
||||
|
||||
// 计算压缩比例
|
||||
if m.mergeMetrics.DataSizeBefore > 0 {
|
||||
m.mergeMetrics.CompressionRatio = float64(m.mergeMetrics.DataSizeAfter) /
|
||||
float64(m.mergeMetrics.DataSizeBefore)
|
||||
}
|
||||
|
||||
// 按类型统计
|
||||
m.mergeMetrics.MergesByPlugin[merged.PluginName]++
|
||||
m.mergeMetrics.MergesByKeyword[merged.Keyword]++
|
||||
|
||||
// 结果整合统计
|
||||
originalCount := int64(len(existing.Data) + len(new.Data))
|
||||
mergedCount := int64(len(merged.Data))
|
||||
consolidated := originalCount - mergedCount
|
||||
|
||||
if consolidated > 0 {
|
||||
m.mergeMetrics.ResultsConsolidated += consolidated
|
||||
m.mergeMetrics.StorageSaved += beforeSize - afterSize
|
||||
}
|
||||
}
|
||||
|
||||
// GetMergeStats 获取合并统计
|
||||
func (m *AdvancedDataMerger) GetMergeStats() map[string]interface{} {
|
||||
m.mutex.RLock()
|
||||
defer m.mutex.RUnlock()
|
||||
|
||||
successRate := float64(0)
|
||||
if m.totalMerges > 0 {
|
||||
successRate = float64(m.successfulMerges) / float64(m.totalMerges)
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"total_merges": m.totalMerges,
|
||||
"successful_merges": m.successfulMerges,
|
||||
"failed_merges": m.failedMerges,
|
||||
"success_rate": successRate,
|
||||
"merge_metrics": m.mergeMetrics,
|
||||
"average_merge_time": m.mergeMetrics.AverageMergeTime,
|
||||
"max_merge_time": m.mergeMetrics.MaxMergeTime,
|
||||
"compression_ratio": m.mergeMetrics.CompressionRatio,
|
||||
"duplicates_removed": m.mergeMetrics.DuplicatesRemoved,
|
||||
"results_consolidated": m.mergeMetrics.ResultsConsolidated,
|
||||
"storage_saved": m.mergeMetrics.StorageSaved,
|
||||
}
|
||||
}
|
||||
|
||||
// 实现各种合并策略
|
||||
|
||||
// SameKeyMergeStrategy 相同键合并策略
|
||||
type SameKeyMergeStrategy struct{}
|
||||
|
||||
func (s *SameKeyMergeStrategy) CanMerge(existing, new *CacheOperation) bool {
|
||||
return existing.Key == new.Key
|
||||
}
|
||||
|
||||
func (s *SameKeyMergeStrategy) Merge(existing, new *CacheOperation) (*CacheOperation, error) {
|
||||
// 委托给合并器的方法
|
||||
return nil, fmt.Errorf("应该使用合并器的方法")
|
||||
}
|
||||
|
||||
func (s *SameKeyMergeStrategy) GetPriority() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
// SamePluginKeywordMergeStrategy 同插件同关键词合并策略
|
||||
type SamePluginKeywordMergeStrategy struct{}
|
||||
|
||||
func (s *SamePluginKeywordMergeStrategy) CanMerge(existing, new *CacheOperation) bool {
|
||||
return existing.PluginName == new.PluginName && existing.Keyword == new.Keyword
|
||||
}
|
||||
|
||||
func (s *SamePluginKeywordMergeStrategy) Merge(existing, new *CacheOperation) (*CacheOperation, error) {
|
||||
return nil, fmt.Errorf("应该使用合并器的方法")
|
||||
}
|
||||
|
||||
func (s *SamePluginKeywordMergeStrategy) GetPriority() int {
|
||||
return 2
|
||||
}
|
||||
|
||||
// DeduplicationMergeStrategy 去重合并策略
|
||||
type DeduplicationMergeStrategy struct{}
|
||||
|
||||
func (s *DeduplicationMergeStrategy) CanMerge(existing, new *CacheOperation) bool {
|
||||
// 检查是否有重复结果
|
||||
return len(existing.Data) > 0 && len(new.Data) > 0
|
||||
}
|
||||
|
||||
func (s *DeduplicationMergeStrategy) Merge(existing, new *CacheOperation) (*CacheOperation, error) {
|
||||
return nil, fmt.Errorf("应该使用合并器的方法")
|
||||
}
|
||||
|
||||
func (s *DeduplicationMergeStrategy) GetPriority() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
// ContentSimilarityMergeStrategy 内容相似性合并策略
|
||||
type ContentSimilarityMergeStrategy struct{}
|
||||
|
||||
func (s *ContentSimilarityMergeStrategy) CanMerge(existing, new *CacheOperation) bool {
|
||||
// 简单的相似性检测:关键词相似度
|
||||
return existing.Keyword == new.Keyword ||
|
||||
(len(existing.Keyword) > 3 && len(new.Keyword) > 3 &&
|
||||
existing.Keyword[:3] == new.Keyword[:3])
|
||||
}
|
||||
|
||||
func (s *ContentSimilarityMergeStrategy) Merge(existing, new *CacheOperation) (*CacheOperation, error) {
|
||||
return nil, fmt.Errorf("应该使用合并器的方法")
|
||||
}
|
||||
|
||||
func (s *ContentSimilarityMergeStrategy) GetPriority() int {
|
||||
return 5
|
||||
}
|
||||
905
util/cache/buffer_status_monitor.go
vendored
Normal file
905
util/cache/buffer_status_monitor.go
vendored
Normal file
@@ -0,0 +1,905 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"pansou/util/json"
|
||||
)
|
||||
|
||||
// BufferStatusMonitor 缓冲区状态监控器
|
||||
type BufferStatusMonitor struct {
|
||||
// 监控配置
|
||||
monitorInterval time.Duration
|
||||
alertThresholds *AlertThresholds
|
||||
|
||||
// 监控状态
|
||||
isMonitoring int32
|
||||
shutdownChan chan struct{}
|
||||
|
||||
// 健康检查
|
||||
healthChecker *HealthChecker
|
||||
|
||||
// 报警系统
|
||||
alertManager *AlertManager
|
||||
|
||||
// 性能指标
|
||||
performanceMetrics *PerformanceMetrics
|
||||
|
||||
// 监控数据
|
||||
monitoringData *MonitoringData
|
||||
dataMutex sync.RWMutex
|
||||
|
||||
// 历史记录
|
||||
historyBuffer []MonitorSnapshot
|
||||
historyMutex sync.Mutex
|
||||
maxHistorySize int
|
||||
}
|
||||
|
||||
// AlertThresholds 报警阈值
|
||||
type AlertThresholds struct {
|
||||
// 内存阈值
|
||||
MemoryUsageWarning int64 // 内存使用警告阈值(字节)
|
||||
MemoryUsageCritical int64 // 内存使用严重阈值(字节)
|
||||
|
||||
// 缓冲区阈值
|
||||
BufferCountWarning int // 缓冲区数量警告阈值
|
||||
BufferCountCritical int // 缓冲区数量严重阈值
|
||||
|
||||
// 操作阈值
|
||||
OperationQueueWarning int // 操作队列警告阈值
|
||||
OperationQueueCritical int // 操作队列严重阈值
|
||||
|
||||
// 时间阈值
|
||||
ProcessTimeWarning time.Duration // 处理时间警告阈值
|
||||
ProcessTimeCritical time.Duration // 处理时间严重阈值
|
||||
|
||||
// 成功率阈值
|
||||
SuccessRateWarning float64 // 成功率警告阈值
|
||||
SuccessRateCritical float64 // 成功率严重阈值
|
||||
}
|
||||
|
||||
// HealthChecker 健康检查器
|
||||
type HealthChecker struct {
|
||||
lastHealthCheck time.Time
|
||||
healthCheckInterval time.Duration
|
||||
healthStatus HealthStatus
|
||||
healthHistory []HealthCheckResult
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// HealthStatus 健康状态
|
||||
type HealthStatus struct {
|
||||
Overall string `json:"overall"` // healthy, warning, critical
|
||||
LastCheck time.Time `json:"last_check"`
|
||||
Components map[string]ComponentHealth `json:"components"`
|
||||
Issues []HealthIssue `json:"issues,omitempty"`
|
||||
}
|
||||
|
||||
// ComponentHealth 组件健康状态
|
||||
type ComponentHealth struct {
|
||||
Status string `json:"status"`
|
||||
LastCheck time.Time `json:"last_check"`
|
||||
Metrics map[string]interface{} `json:"metrics"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// HealthIssue 健康问题
|
||||
type HealthIssue struct {
|
||||
Component string `json:"component"`
|
||||
Severity string `json:"severity"` // warning, critical
|
||||
Message string `json:"message"`
|
||||
FirstSeen time.Time `json:"first_seen"`
|
||||
LastSeen time.Time `json:"last_seen"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// HealthCheckResult 健康检查结果
|
||||
type HealthCheckResult struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Status string `json:"status"`
|
||||
CheckTime time.Duration `json:"check_time"`
|
||||
Issues []HealthIssue `json:"issues"`
|
||||
}
|
||||
|
||||
// AlertManager 报警管理器
|
||||
type AlertManager struct {
|
||||
alerts []Alert
|
||||
alertHistory []Alert
|
||||
mutex sync.RWMutex
|
||||
maxAlertHistory int
|
||||
|
||||
// 报警配置
|
||||
alertCooldown map[string]time.Time // 报警冷却时间
|
||||
cooldownPeriod time.Duration // 冷却期间
|
||||
}
|
||||
|
||||
// Alert 报警
|
||||
type Alert struct {
|
||||
ID string `json:"id"`
|
||||
Level string `json:"level"` // info, warning, critical
|
||||
Component string `json:"component"`
|
||||
Message string `json:"message"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||
Resolved bool `json:"resolved"`
|
||||
ResolvedAt *time.Time `json:"resolved_at,omitempty"`
|
||||
}
|
||||
|
||||
// PerformanceMetrics 性能指标
|
||||
type PerformanceMetrics struct {
|
||||
// CPU指标
|
||||
CPUUsage float64 `json:"cpu_usage"`
|
||||
CPUHistory []float64 `json:"cpu_history"`
|
||||
|
||||
// 内存指标
|
||||
MemoryUsage int64 `json:"memory_usage"`
|
||||
MemoryHistory []int64 `json:"memory_history"`
|
||||
GCStats runtime.MemStats `json:"gc_stats"`
|
||||
|
||||
// 吞吐量指标
|
||||
OperationsPerSecond float64 `json:"operations_per_second"`
|
||||
ThroughputHistory []float64 `json:"throughput_history"`
|
||||
|
||||
// 延迟指标
|
||||
AverageLatency time.Duration `json:"average_latency"`
|
||||
P95Latency time.Duration `json:"p95_latency"`
|
||||
P99Latency time.Duration `json:"p99_latency"`
|
||||
LatencyHistory []time.Duration `json:"latency_history"`
|
||||
|
||||
// 错误率指标
|
||||
ErrorRate float64 `json:"error_rate"`
|
||||
ErrorHistory []float64 `json:"error_history"`
|
||||
|
||||
// 资源利用率
|
||||
DiskIORate float64 `json:"disk_io_rate"`
|
||||
NetworkIORate float64 `json:"network_io_rate"`
|
||||
|
||||
// 更新时间
|
||||
LastUpdated time.Time `json:"last_updated"`
|
||||
}
|
||||
|
||||
// MonitoringData 监控数据
|
||||
type MonitoringData struct {
|
||||
// 系统状态
|
||||
SystemHealth HealthStatus `json:"system_health"`
|
||||
PerformanceMetrics PerformanceMetrics `json:"performance_metrics"`
|
||||
|
||||
// 缓冲区状态
|
||||
BufferStates map[string]BufferState `json:"buffer_states"`
|
||||
GlobalBufferStats *GlobalBufferStats `json:"global_buffer_stats"`
|
||||
|
||||
// 实时统计
|
||||
RealTimeStats RealTimeStats `json:"real_time_stats"`
|
||||
|
||||
// 趋势分析
|
||||
TrendAnalysis TrendAnalysis `json:"trend_analysis"`
|
||||
|
||||
// 预测数据
|
||||
Predictions PredictionData `json:"predictions"`
|
||||
}
|
||||
|
||||
// BufferState 缓冲区状态
|
||||
type BufferState struct {
|
||||
ID string `json:"id"`
|
||||
Size int `json:"size"`
|
||||
Capacity int `json:"capacity"`
|
||||
UtilizationRate float64 `json:"utilization_rate"`
|
||||
LastActivity time.Time `json:"last_activity"`
|
||||
OperationsPerMin float64 `json:"operations_per_min"`
|
||||
AverageDataSize int64 `json:"average_data_size"`
|
||||
CompressionRatio float64 `json:"compression_ratio"`
|
||||
Health string `json:"health"`
|
||||
}
|
||||
|
||||
// RealTimeStats 实时统计
|
||||
type RealTimeStats struct {
|
||||
ActiveOperations int `json:"active_operations"`
|
||||
QueuedOperations int `json:"queued_operations"`
|
||||
ProcessingRate float64 `json:"processing_rate"`
|
||||
ThroughputMBps float64 `json:"throughput_mbps"`
|
||||
CacheHitRate float64 `json:"cache_hit_rate"`
|
||||
CompressionRatio float64 `json:"compression_ratio"`
|
||||
ErrorRate float64 `json:"error_rate"`
|
||||
LastUpdated time.Time `json:"last_updated"`
|
||||
}
|
||||
|
||||
// TrendAnalysis 趋势分析
|
||||
type TrendAnalysis struct {
|
||||
MemoryTrend string `json:"memory_trend"` // increasing, decreasing, stable
|
||||
ThroughputTrend string `json:"throughput_trend"`
|
||||
ErrorRateTrend string `json:"error_rate_trend"`
|
||||
BufferUsageTrend string `json:"buffer_usage_trend"`
|
||||
AnalysisTime time.Time `json:"analysis_time"`
|
||||
Confidence float64 `json:"confidence"`
|
||||
}
|
||||
|
||||
// PredictionData 预测数据
|
||||
type PredictionData struct {
|
||||
MemoryUsageIn1Hour int64 `json:"memory_usage_in_1hour"`
|
||||
MemoryUsageIn24Hours int64 `json:"memory_usage_in_24hours"`
|
||||
BufferOverflowRisk float64 `json:"buffer_overflow_risk"`
|
||||
SystemLoadPrediction float64 `json:"system_load_prediction"`
|
||||
RecommendedActions []string `json:"recommended_actions"`
|
||||
ConfidenceLevel float64 `json:"confidence_level"`
|
||||
PredictionTime time.Time `json:"prediction_time"`
|
||||
}
|
||||
|
||||
// MonitorSnapshot 监控快照
|
||||
type MonitorSnapshot struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
SystemHealth HealthStatus `json:"system_health"`
|
||||
BufferCount int `json:"buffer_count"`
|
||||
TotalMemoryUsage int64 `json:"total_memory_usage"`
|
||||
OperationsPerSecond float64 `json:"operations_per_second"`
|
||||
ErrorRate float64 `json:"error_rate"`
|
||||
CacheHitRate float64 `json:"cache_hit_rate"`
|
||||
}
|
||||
|
||||
// NewBufferStatusMonitor 创建缓冲区状态监控器
|
||||
func NewBufferStatusMonitor() *BufferStatusMonitor {
|
||||
monitor := &BufferStatusMonitor{
|
||||
monitorInterval: 30 * time.Second, // 30秒监控间隔
|
||||
shutdownChan: make(chan struct{}),
|
||||
maxHistorySize: 288, // 保存24小时历史(每30秒一个,24*60*2=2880,简化为288)
|
||||
alertThresholds: &AlertThresholds{
|
||||
MemoryUsageWarning: 50 * 1024 * 1024, // 50MB
|
||||
MemoryUsageCritical: 100 * 1024 * 1024, // 100MB
|
||||
BufferCountWarning: 30,
|
||||
BufferCountCritical: 50,
|
||||
OperationQueueWarning: 500,
|
||||
OperationQueueCritical: 1000,
|
||||
ProcessTimeWarning: 5 * time.Second,
|
||||
ProcessTimeCritical: 15 * time.Second,
|
||||
SuccessRateWarning: 0.95, // 95%
|
||||
SuccessRateCritical: 0.90, // 90%
|
||||
},
|
||||
monitoringData: &MonitoringData{
|
||||
BufferStates: make(map[string]BufferState),
|
||||
RealTimeStats: RealTimeStats{},
|
||||
TrendAnalysis: TrendAnalysis{},
|
||||
Predictions: PredictionData{},
|
||||
},
|
||||
}
|
||||
|
||||
// 初始化组件
|
||||
monitor.healthChecker = &HealthChecker{
|
||||
healthCheckInterval: 1 * time.Minute,
|
||||
healthStatus: HealthStatus{
|
||||
Overall: "healthy",
|
||||
Components: make(map[string]ComponentHealth),
|
||||
Issues: make([]HealthIssue, 0),
|
||||
},
|
||||
healthHistory: make([]HealthCheckResult, 0),
|
||||
}
|
||||
|
||||
monitor.alertManager = &AlertManager{
|
||||
alerts: make([]Alert, 0),
|
||||
alertHistory: make([]Alert, 0),
|
||||
maxAlertHistory: 1000,
|
||||
alertCooldown: make(map[string]time.Time),
|
||||
cooldownPeriod: 5 * time.Minute, // 5分钟冷却期
|
||||
}
|
||||
|
||||
monitor.performanceMetrics = &PerformanceMetrics{
|
||||
CPUHistory: make([]float64, 0),
|
||||
MemoryHistory: make([]int64, 0),
|
||||
ThroughputHistory: make([]float64, 0),
|
||||
LatencyHistory: make([]time.Duration, 0),
|
||||
ErrorHistory: make([]float64, 0),
|
||||
}
|
||||
|
||||
return monitor
|
||||
}
|
||||
|
||||
// Start 启动监控器
|
||||
func (b *BufferStatusMonitor) Start(globalManager *GlobalBufferManager) {
|
||||
if !atomic.CompareAndSwapInt32(&b.isMonitoring, 0, 1) {
|
||||
return // 已经在监控中
|
||||
}
|
||||
|
||||
fmt.Printf("🔍 [缓冲区状态监控器] 启动监控,间隔: %v\n", b.monitorInterval)
|
||||
|
||||
go b.monitoringLoop(globalManager)
|
||||
go b.healthCheckLoop()
|
||||
go b.alertProcessingLoop()
|
||||
}
|
||||
|
||||
// monitoringLoop 监控循环
|
||||
func (b *BufferStatusMonitor) monitoringLoop(globalManager *GlobalBufferManager) {
|
||||
ticker := time.NewTicker(b.monitorInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
b.collectMetrics(globalManager)
|
||||
b.analyzeData()
|
||||
b.checkAlerts()
|
||||
b.updatePredictions()
|
||||
b.saveSnapshot()
|
||||
|
||||
case <-b.shutdownChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// healthCheckLoop 健康检查循环
|
||||
func (b *BufferStatusMonitor) healthCheckLoop() {
|
||||
ticker := time.NewTicker(b.healthChecker.healthCheckInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
b.performHealthCheck()
|
||||
|
||||
case <-b.shutdownChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// alertProcessingLoop 报警处理循环
|
||||
func (b *BufferStatusMonitor) alertProcessingLoop() {
|
||||
ticker := time.NewTicker(1 * time.Minute) // 每分钟检查一次报警
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
b.processAlerts()
|
||||
|
||||
case <-b.shutdownChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// collectMetrics 收集指标
|
||||
func (b *BufferStatusMonitor) collectMetrics(globalManager *GlobalBufferManager) {
|
||||
b.dataMutex.Lock()
|
||||
defer b.dataMutex.Unlock()
|
||||
|
||||
// 收集全局缓冲区统计
|
||||
b.monitoringData.GlobalBufferStats = globalManager.GetStats()
|
||||
|
||||
// 收集缓冲区状态
|
||||
bufferInfo := globalManager.GetBufferInfo()
|
||||
for id, info := range bufferInfo {
|
||||
if infoMap, ok := info.(map[string]interface{}); ok {
|
||||
bufferState := BufferState{
|
||||
ID: id,
|
||||
LastActivity: time.Now(),
|
||||
Health: "healthy",
|
||||
}
|
||||
|
||||
// 提取缓冲区信息
|
||||
if size, ok := infoMap["total_operations"].(int64); ok {
|
||||
bufferState.Size = int(size)
|
||||
}
|
||||
if dataSize, ok := infoMap["total_data_size"].(int64); ok {
|
||||
bufferState.AverageDataSize = dataSize
|
||||
}
|
||||
if ratio, ok := infoMap["compress_ratio"].(float64); ok {
|
||||
bufferState.CompressionRatio = ratio
|
||||
}
|
||||
|
||||
b.monitoringData.BufferStates[id] = bufferState
|
||||
}
|
||||
}
|
||||
|
||||
// 收集性能指标
|
||||
b.collectPerformanceMetrics()
|
||||
|
||||
// 更新实时统计
|
||||
b.updateRealTimeStats()
|
||||
}
|
||||
|
||||
// collectPerformanceMetrics 收集性能指标
|
||||
func (b *BufferStatusMonitor) collectPerformanceMetrics() {
|
||||
// 收集内存统计
|
||||
runtime.ReadMemStats(&b.performanceMetrics.GCStats)
|
||||
|
||||
currentMemory := int64(b.performanceMetrics.GCStats.Alloc)
|
||||
b.performanceMetrics.MemoryUsage = currentMemory
|
||||
|
||||
// 更新内存历史
|
||||
b.performanceMetrics.MemoryHistory = append(b.performanceMetrics.MemoryHistory, currentMemory)
|
||||
if len(b.performanceMetrics.MemoryHistory) > 100 { // 保留最近100个数据点
|
||||
b.performanceMetrics.MemoryHistory = b.performanceMetrics.MemoryHistory[1:]
|
||||
}
|
||||
|
||||
// 简化的CPU使用率估算(基于GC统计)
|
||||
gcCPUPercent := float64(b.performanceMetrics.GCStats.GCCPUFraction) * 100
|
||||
b.performanceMetrics.CPUUsage = gcCPUPercent
|
||||
|
||||
// 更新CPU历史
|
||||
b.performanceMetrics.CPUHistory = append(b.performanceMetrics.CPUHistory, gcCPUPercent)
|
||||
if len(b.performanceMetrics.CPUHistory) > 100 {
|
||||
b.performanceMetrics.CPUHistory = b.performanceMetrics.CPUHistory[1:]
|
||||
}
|
||||
|
||||
b.performanceMetrics.LastUpdated = time.Now()
|
||||
}
|
||||
|
||||
// updateRealTimeStats 更新实时统计
|
||||
func (b *BufferStatusMonitor) updateRealTimeStats() {
|
||||
stats := &b.monitoringData.RealTimeStats
|
||||
|
||||
if b.monitoringData.GlobalBufferStats != nil {
|
||||
globalStats := b.monitoringData.GlobalBufferStats
|
||||
|
||||
// 活跃操作数
|
||||
stats.ActiveOperations = int(globalStats.ActiveBuffers)
|
||||
|
||||
// 处理速率(操作/秒)
|
||||
if globalStats.TotalOperationsBuffered > 0 {
|
||||
stats.ProcessingRate = float64(globalStats.TotalOperationsBuffered) /
|
||||
time.Since(globalStats.LastCleanupTime).Seconds()
|
||||
}
|
||||
|
||||
// 压缩比例
|
||||
stats.CompressionRatio = globalStats.AverageCompressionRatio
|
||||
|
||||
// 缓存命中率
|
||||
stats.CacheHitRate = globalStats.HitRate
|
||||
}
|
||||
|
||||
// 内存使用(MB/s)
|
||||
if b.performanceMetrics.MemoryUsage > 0 {
|
||||
stats.ThroughputMBps = float64(b.performanceMetrics.MemoryUsage) / 1024 / 1024
|
||||
}
|
||||
|
||||
stats.LastUpdated = time.Now()
|
||||
}
|
||||
|
||||
// analyzeData 分析数据
|
||||
func (b *BufferStatusMonitor) analyzeData() {
|
||||
b.analyzeTrends()
|
||||
b.detectAnomalies()
|
||||
}
|
||||
|
||||
// analyzeTrends 分析趋势
|
||||
func (b *BufferStatusMonitor) analyzeTrends() {
|
||||
trends := &b.monitoringData.TrendAnalysis
|
||||
|
||||
// 内存趋势分析
|
||||
if len(b.performanceMetrics.MemoryHistory) >= 3 {
|
||||
recent := b.performanceMetrics.MemoryHistory[len(b.performanceMetrics.MemoryHistory)-3:]
|
||||
if recent[2] > recent[1] && recent[1] > recent[0] {
|
||||
trends.MemoryTrend = "increasing"
|
||||
} else if recent[2] < recent[1] && recent[1] < recent[0] {
|
||||
trends.MemoryTrend = "decreasing"
|
||||
} else {
|
||||
trends.MemoryTrend = "stable"
|
||||
}
|
||||
}
|
||||
|
||||
// 缓冲区使用趋势
|
||||
bufferCount := len(b.monitoringData.BufferStates)
|
||||
if bufferCount > b.alertThresholds.BufferCountWarning {
|
||||
trends.BufferUsageTrend = "increasing"
|
||||
} else {
|
||||
trends.BufferUsageTrend = "stable"
|
||||
}
|
||||
|
||||
trends.AnalysisTime = time.Now()
|
||||
trends.Confidence = 0.8 // 简化的置信度
|
||||
}
|
||||
|
||||
// detectAnomalies 检测异常
|
||||
func (b *BufferStatusMonitor) detectAnomalies() {
|
||||
// 内存异常检测
|
||||
if b.performanceMetrics.MemoryUsage > b.alertThresholds.MemoryUsageCritical {
|
||||
b.triggerAlert("memory", "critical",
|
||||
fmt.Sprintf("内存使用过高: %d bytes", b.performanceMetrics.MemoryUsage))
|
||||
} else if b.performanceMetrics.MemoryUsage > b.alertThresholds.MemoryUsageWarning {
|
||||
b.triggerAlert("memory", "warning",
|
||||
fmt.Sprintf("内存使用警告: %d bytes", b.performanceMetrics.MemoryUsage))
|
||||
}
|
||||
|
||||
// 缓冲区数量异常检测
|
||||
bufferCount := len(b.monitoringData.BufferStates)
|
||||
if bufferCount > b.alertThresholds.BufferCountCritical {
|
||||
b.triggerAlert("buffer_count", "critical",
|
||||
fmt.Sprintf("缓冲区数量过多: %d", bufferCount))
|
||||
} else if bufferCount > b.alertThresholds.BufferCountWarning {
|
||||
b.triggerAlert("buffer_count", "warning",
|
||||
fmt.Sprintf("缓冲区数量警告: %d", bufferCount))
|
||||
}
|
||||
}
|
||||
|
||||
// checkAlerts 检查报警
|
||||
func (b *BufferStatusMonitor) checkAlerts() {
|
||||
// 检查系统健康状态
|
||||
if b.healthChecker.healthStatus.Overall == "critical" {
|
||||
b.triggerAlert("system_health", "critical", "系统健康状态严重")
|
||||
} else if b.healthChecker.healthStatus.Overall == "warning" {
|
||||
b.triggerAlert("system_health", "warning", "系统健康状态警告")
|
||||
}
|
||||
}
|
||||
|
||||
// triggerAlert 触发报警
|
||||
func (b *BufferStatusMonitor) triggerAlert(component, level, message string) {
|
||||
alertKey := fmt.Sprintf("%s_%s", component, level)
|
||||
|
||||
// 检查冷却期
|
||||
b.alertManager.mutex.Lock()
|
||||
if lastAlert, exists := b.alertManager.alertCooldown[alertKey]; exists {
|
||||
if time.Since(lastAlert) < b.alertManager.cooldownPeriod {
|
||||
b.alertManager.mutex.Unlock()
|
||||
return // 还在冷却期内
|
||||
}
|
||||
}
|
||||
|
||||
// 创建新报警
|
||||
alert := Alert{
|
||||
ID: fmt.Sprintf("%s_%d", alertKey, time.Now().Unix()),
|
||||
Level: level,
|
||||
Component: component,
|
||||
Message: message,
|
||||
Timestamp: time.Now(),
|
||||
Metadata: make(map[string]interface{}),
|
||||
Resolved: false,
|
||||
}
|
||||
|
||||
// 添加相关指标作为元数据
|
||||
alert.Metadata["memory_usage"] = b.performanceMetrics.MemoryUsage
|
||||
alert.Metadata["buffer_count"] = len(b.monitoringData.BufferStates)
|
||||
alert.Metadata["cpu_usage"] = b.performanceMetrics.CPUUsage
|
||||
|
||||
b.alertManager.alerts = append(b.alertManager.alerts, alert)
|
||||
b.alertManager.alertCooldown[alertKey] = time.Now()
|
||||
|
||||
b.alertManager.mutex.Unlock()
|
||||
|
||||
// 输出报警日志
|
||||
fmt.Printf("🚨 [报警] %s - %s: %s\n", level, component, message)
|
||||
}
|
||||
|
||||
// updatePredictions 更新预测
|
||||
func (b *BufferStatusMonitor) updatePredictions() {
|
||||
predictions := &b.monitoringData.Predictions
|
||||
|
||||
// 简化的内存使用预测
|
||||
if len(b.performanceMetrics.MemoryHistory) >= 5 {
|
||||
history := b.performanceMetrics.MemoryHistory
|
||||
recent := history[len(history)-5:]
|
||||
|
||||
// 简单线性预测
|
||||
growth := float64(recent[4]-recent[0]) / 4
|
||||
predictions.MemoryUsageIn1Hour = recent[4] + int64(growth*120) // 2小时数据点预测1小时
|
||||
predictions.MemoryUsageIn24Hours = recent[4] + int64(growth*2880) // 预测24小时
|
||||
}
|
||||
|
||||
// 缓冲区溢出风险评估
|
||||
bufferCount := len(b.monitoringData.BufferStates)
|
||||
if bufferCount > b.alertThresholds.BufferCountWarning {
|
||||
predictions.BufferOverflowRisk = float64(bufferCount) / float64(b.alertThresholds.BufferCountCritical)
|
||||
} else {
|
||||
predictions.BufferOverflowRisk = 0.1
|
||||
}
|
||||
|
||||
// 推荐行动
|
||||
predictions.RecommendedActions = b.generateRecommendations()
|
||||
predictions.ConfidenceLevel = 0.7
|
||||
predictions.PredictionTime = time.Now()
|
||||
}
|
||||
|
||||
// generateRecommendations 生成推荐
|
||||
func (b *BufferStatusMonitor) generateRecommendations() []string {
|
||||
recommendations := make([]string, 0)
|
||||
|
||||
// 基于内存使用推荐
|
||||
if b.performanceMetrics.MemoryUsage > b.alertThresholds.MemoryUsageWarning {
|
||||
recommendations = append(recommendations, "考虑增加内存或减少缓冲区大小")
|
||||
}
|
||||
|
||||
// 基于缓冲区数量推荐
|
||||
bufferCount := len(b.monitoringData.BufferStates)
|
||||
if bufferCount > b.alertThresholds.BufferCountWarning {
|
||||
recommendations = append(recommendations, "考虑调整缓冲区清理频率")
|
||||
}
|
||||
|
||||
// 基于趋势推荐
|
||||
if b.monitoringData.TrendAnalysis.MemoryTrend == "increasing" {
|
||||
recommendations = append(recommendations, "内存使用呈增长趋势,建议监控和优化")
|
||||
}
|
||||
|
||||
if len(recommendations) == 0 {
|
||||
recommendations = append(recommendations, "系统运行正常,继续监控")
|
||||
}
|
||||
|
||||
return recommendations
|
||||
}
|
||||
|
||||
// performHealthCheck 执行健康检查
|
||||
func (b *BufferStatusMonitor) performHealthCheck() {
|
||||
startTime := time.Now()
|
||||
|
||||
b.healthChecker.mutex.Lock()
|
||||
defer b.healthChecker.mutex.Unlock()
|
||||
|
||||
health := &b.healthChecker.healthStatus
|
||||
health.LastCheck = time.Now()
|
||||
health.Issues = make([]HealthIssue, 0)
|
||||
|
||||
// 检查内存健康
|
||||
memoryHealth := b.checkMemoryHealth()
|
||||
health.Components["memory"] = memoryHealth
|
||||
|
||||
// 检查缓冲区健康
|
||||
bufferHealth := b.checkBufferHealth()
|
||||
health.Components["buffers"] = bufferHealth
|
||||
|
||||
// 检查性能健康
|
||||
performanceHealth := b.checkPerformanceHealth()
|
||||
health.Components["performance"] = performanceHealth
|
||||
|
||||
// 确定整体健康状态
|
||||
health.Overall = b.determineOverallHealth()
|
||||
|
||||
// 记录健康检查结果
|
||||
checkResult := HealthCheckResult{
|
||||
Timestamp: time.Now(),
|
||||
Status: health.Overall,
|
||||
CheckTime: time.Since(startTime),
|
||||
Issues: health.Issues,
|
||||
}
|
||||
|
||||
b.healthChecker.healthHistory = append(b.healthChecker.healthHistory, checkResult)
|
||||
if len(b.healthChecker.healthHistory) > 100 { // 保留最近100次检查
|
||||
b.healthChecker.healthHistory = b.healthChecker.healthHistory[1:]
|
||||
}
|
||||
|
||||
b.healthChecker.lastHealthCheck = time.Now()
|
||||
}
|
||||
|
||||
// checkMemoryHealth 检查内存健康
|
||||
func (b *BufferStatusMonitor) checkMemoryHealth() ComponentHealth {
|
||||
health := ComponentHealth{
|
||||
Status: "healthy",
|
||||
LastCheck: time.Now(),
|
||||
Metrics: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
memUsage := b.performanceMetrics.MemoryUsage
|
||||
health.Metrics["usage_bytes"] = memUsage
|
||||
health.Metrics["usage_mb"] = memUsage / 1024 / 1024
|
||||
|
||||
if memUsage > b.alertThresholds.MemoryUsageCritical {
|
||||
health.Status = "critical"
|
||||
health.Message = "内存使用严重过高"
|
||||
} else if memUsage > b.alertThresholds.MemoryUsageWarning {
|
||||
health.Status = "warning"
|
||||
health.Message = "内存使用偏高"
|
||||
} else {
|
||||
health.Message = "内存使用正常"
|
||||
}
|
||||
|
||||
return health
|
||||
}
|
||||
|
||||
// checkBufferHealth 检查缓冲区健康
|
||||
func (b *BufferStatusMonitor) checkBufferHealth() ComponentHealth {
|
||||
health := ComponentHealth{
|
||||
Status: "healthy",
|
||||
LastCheck: time.Now(),
|
||||
Metrics: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
bufferCount := len(b.monitoringData.BufferStates)
|
||||
health.Metrics["buffer_count"] = bufferCount
|
||||
health.Metrics["max_buffers"] = b.alertThresholds.BufferCountCritical
|
||||
|
||||
if bufferCount > b.alertThresholds.BufferCountCritical {
|
||||
health.Status = "critical"
|
||||
health.Message = "缓冲区数量过多"
|
||||
} else if bufferCount > b.alertThresholds.BufferCountWarning {
|
||||
health.Status = "warning"
|
||||
health.Message = "缓冲区数量偏高"
|
||||
} else {
|
||||
health.Message = "缓冲区状态正常"
|
||||
}
|
||||
|
||||
return health
|
||||
}
|
||||
|
||||
// checkPerformanceHealth 检查性能健康
|
||||
func (b *BufferStatusMonitor) checkPerformanceHealth() ComponentHealth {
|
||||
health := ComponentHealth{
|
||||
Status: "healthy",
|
||||
LastCheck: time.Now(),
|
||||
Metrics: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
cpuUsage := b.performanceMetrics.CPUUsage
|
||||
health.Metrics["cpu_usage"] = cpuUsage
|
||||
health.Metrics["gc_cpu_fraction"] = b.performanceMetrics.GCStats.GCCPUFraction
|
||||
|
||||
if cpuUsage > 80 {
|
||||
health.Status = "warning"
|
||||
health.Message = "CPU使用率偏高"
|
||||
} else {
|
||||
health.Message = "性能状态正常"
|
||||
}
|
||||
|
||||
return health
|
||||
}
|
||||
|
||||
// determineOverallHealth 确定整体健康状态
|
||||
func (b *BufferStatusMonitor) determineOverallHealth() string {
|
||||
hasCritical := false
|
||||
hasWarning := false
|
||||
|
||||
for _, component := range b.healthChecker.healthStatus.Components {
|
||||
switch component.Status {
|
||||
case "critical":
|
||||
hasCritical = true
|
||||
case "warning":
|
||||
hasWarning = true
|
||||
}
|
||||
}
|
||||
|
||||
if hasCritical {
|
||||
return "critical"
|
||||
} else if hasWarning {
|
||||
return "warning"
|
||||
}
|
||||
|
||||
return "healthy"
|
||||
}
|
||||
|
||||
// processAlerts 处理报警
|
||||
func (b *BufferStatusMonitor) processAlerts() {
|
||||
b.alertManager.mutex.Lock()
|
||||
defer b.alertManager.mutex.Unlock()
|
||||
|
||||
// 检查是否有报警需要自动解决
|
||||
for i := range b.alertManager.alerts {
|
||||
alert := &b.alertManager.alerts[i]
|
||||
if !alert.Resolved {
|
||||
if b.shouldResolveAlert(alert) {
|
||||
now := time.Now()
|
||||
alert.Resolved = true
|
||||
alert.ResolvedAt = &now
|
||||
|
||||
fmt.Printf("✅ [报警解决] %s - %s: %s\n",
|
||||
alert.Level, alert.Component, alert.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 移动已解决的报警到历史记录
|
||||
activeAlerts := make([]Alert, 0)
|
||||
for _, alert := range b.alertManager.alerts {
|
||||
if !alert.Resolved {
|
||||
activeAlerts = append(activeAlerts, alert)
|
||||
} else {
|
||||
b.alertManager.alertHistory = append(b.alertManager.alertHistory, alert)
|
||||
}
|
||||
}
|
||||
|
||||
b.alertManager.alerts = activeAlerts
|
||||
|
||||
// 限制历史记录大小
|
||||
if len(b.alertManager.alertHistory) > b.alertManager.maxAlertHistory {
|
||||
excess := len(b.alertManager.alertHistory) - b.alertManager.maxAlertHistory
|
||||
b.alertManager.alertHistory = b.alertManager.alertHistory[excess:]
|
||||
}
|
||||
}
|
||||
|
||||
// shouldResolveAlert 检查是否应该解决报警
|
||||
func (b *BufferStatusMonitor) shouldResolveAlert(alert *Alert) bool {
|
||||
switch alert.Component {
|
||||
case "memory":
|
||||
return b.performanceMetrics.MemoryUsage < b.alertThresholds.MemoryUsageWarning
|
||||
case "buffer_count":
|
||||
return len(b.monitoringData.BufferStates) < b.alertThresholds.BufferCountWarning
|
||||
case "system_health":
|
||||
return b.healthChecker.healthStatus.Overall == "healthy"
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// saveSnapshot 保存监控快照
|
||||
func (b *BufferStatusMonitor) saveSnapshot() {
|
||||
b.historyMutex.Lock()
|
||||
defer b.historyMutex.Unlock()
|
||||
|
||||
snapshot := MonitorSnapshot{
|
||||
Timestamp: time.Now(),
|
||||
SystemHealth: b.healthChecker.healthStatus,
|
||||
BufferCount: len(b.monitoringData.BufferStates),
|
||||
TotalMemoryUsage: b.performanceMetrics.MemoryUsage,
|
||||
OperationsPerSecond: b.monitoringData.RealTimeStats.ProcessingRate,
|
||||
ErrorRate: b.monitoringData.RealTimeStats.ErrorRate,
|
||||
CacheHitRate: b.monitoringData.RealTimeStats.CacheHitRate,
|
||||
}
|
||||
|
||||
b.historyBuffer = append(b.historyBuffer, snapshot)
|
||||
|
||||
// 限制历史记录大小
|
||||
if len(b.historyBuffer) > b.maxHistorySize {
|
||||
b.historyBuffer = b.historyBuffer[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Stop 停止监控器
|
||||
func (b *BufferStatusMonitor) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(&b.isMonitoring, 1, 0) {
|
||||
return
|
||||
}
|
||||
|
||||
close(b.shutdownChan)
|
||||
fmt.Printf("🔍 [缓冲区状态监控器] 已停止监控\n")
|
||||
}
|
||||
|
||||
// GetMonitoringData 获取监控数据
|
||||
func (b *BufferStatusMonitor) GetMonitoringData() *MonitoringData {
|
||||
b.dataMutex.RLock()
|
||||
defer b.dataMutex.RUnlock()
|
||||
|
||||
// 深拷贝监控数据
|
||||
dataCopy := *b.monitoringData
|
||||
return &dataCopy
|
||||
}
|
||||
|
||||
// GetHealthStatus 获取健康状态
|
||||
func (b *BufferStatusMonitor) GetHealthStatus() HealthStatus {
|
||||
b.healthChecker.mutex.RLock()
|
||||
defer b.healthChecker.mutex.RUnlock()
|
||||
|
||||
return b.healthChecker.healthStatus
|
||||
}
|
||||
|
||||
// GetActiveAlerts 获取活跃报警
|
||||
func (b *BufferStatusMonitor) GetActiveAlerts() []Alert {
|
||||
b.alertManager.mutex.RLock()
|
||||
defer b.alertManager.mutex.RUnlock()
|
||||
|
||||
alerts := make([]Alert, len(b.alertManager.alerts))
|
||||
copy(alerts, b.alertManager.alerts)
|
||||
return alerts
|
||||
}
|
||||
|
||||
// GetMonitorHistory 获取监控历史
|
||||
func (b *BufferStatusMonitor) GetMonitorHistory(limit int) []MonitorSnapshot {
|
||||
b.historyMutex.Lock()
|
||||
defer b.historyMutex.Unlock()
|
||||
|
||||
if limit <= 0 || limit > len(b.historyBuffer) {
|
||||
limit = len(b.historyBuffer)
|
||||
}
|
||||
|
||||
history := make([]MonitorSnapshot, limit)
|
||||
startIndex := len(b.historyBuffer) - limit
|
||||
copy(history, b.historyBuffer[startIndex:])
|
||||
|
||||
return history
|
||||
}
|
||||
|
||||
// ExportMonitoringReport 导出监控报告
|
||||
func (b *BufferStatusMonitor) ExportMonitoringReport() (string, error) {
|
||||
report := map[string]interface{}{
|
||||
"timestamp": time.Now(),
|
||||
"monitoring_data": b.GetMonitoringData(),
|
||||
"health_status": b.GetHealthStatus(),
|
||||
"active_alerts": b.GetActiveAlerts(),
|
||||
"performance_metrics": b.performanceMetrics,
|
||||
"recent_history": b.GetMonitorHistory(50), // 最近50个快照
|
||||
}
|
||||
|
||||
jsonData, err := json.MarshalIndent(report, "", " ")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("导出监控报告失败: %v", err)
|
||||
}
|
||||
|
||||
return string(jsonData), nil
|
||||
}
|
||||
965
util/cache/delayed_batch_write_manager.go
vendored
Normal file
965
util/cache/delayed_batch_write_manager.go
vendored
Normal file
@@ -0,0 +1,965 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"pansou/model"
|
||||
)
|
||||
|
||||
// CacheWriteStrategy 缓存写入策略
|
||||
type CacheWriteStrategy string
|
||||
|
||||
const (
|
||||
// CacheStrategyImmediate 立即写入策略(当前实现)
|
||||
CacheStrategyImmediate CacheWriteStrategy = "immediate"
|
||||
|
||||
// CacheStrategyHybrid 混合智能策略(推荐)
|
||||
CacheStrategyHybrid CacheWriteStrategy = "hybrid"
|
||||
)
|
||||
|
||||
// CacheOperation 缓存操作
|
||||
type CacheOperation struct {
|
||||
Key string
|
||||
Data []model.SearchResult
|
||||
TTL time.Duration
|
||||
PluginName string
|
||||
Keyword string
|
||||
Timestamp time.Time
|
||||
Priority int // 优先级 (1=highest, 4=lowest)
|
||||
DataSize int // 数据大小(字节)
|
||||
IsFinal bool // 是否为最终结果
|
||||
}
|
||||
|
||||
// CacheWriteConfig 缓存写入配置
|
||||
type CacheWriteConfig struct {
|
||||
// 🎯 核心策略
|
||||
Strategy CacheWriteStrategy `env:"CACHE_WRITE_STRATEGY" default:"hybrid"`
|
||||
|
||||
// ⏱️ 批量写入参数(自动计算,但可手动覆盖)
|
||||
MaxBatchInterval time.Duration `env:"BATCH_MAX_INTERVAL"` // 0表示自动计算
|
||||
MaxBatchSize int `env:"BATCH_MAX_SIZE"` // 0表示自动计算
|
||||
MaxBatchDataSize int `env:"BATCH_MAX_DATA_SIZE"` // 0表示自动计算
|
||||
|
||||
// 🎛️ 行为参数
|
||||
HighPriorityRatio float64 `env:"HIGH_PRIORITY_RATIO" default:"0.3"`
|
||||
EnableCompression bool // 默认启用操作合并
|
||||
|
||||
// 📊 内部计算参数(运行时动态调整)
|
||||
idleThresholdCPU float64 // CPU空闲阈值
|
||||
idleThresholdDisk float64 // 磁盘空闲阈值
|
||||
forceFlushInterval time.Duration // 强制刷新间隔
|
||||
autoTuneInterval time.Duration // 调优检查间隔
|
||||
|
||||
// 🔧 约束边界(硬编码)
|
||||
minBatchInterval time.Duration // 最小30秒
|
||||
maxBatchInterval time.Duration // 最大10分钟
|
||||
minBatchSize int // 最小10个
|
||||
maxBatchSize int // 最大1000个
|
||||
}
|
||||
|
||||
// Initialize 初始化配置
|
||||
func (c *CacheWriteConfig) Initialize() error {
|
||||
// 🔧 设置硬编码约束边界
|
||||
c.minBatchInterval = 30 * time.Second
|
||||
c.maxBatchInterval = 600 * time.Second // 10分钟
|
||||
c.minBatchSize = 10
|
||||
c.maxBatchSize = 1000
|
||||
|
||||
// 🎯 加载环境变量
|
||||
c.loadFromEnvironment()
|
||||
|
||||
// 🤖 自动计算最优参数(除非手动设置)
|
||||
if c.MaxBatchInterval == 0 {
|
||||
c.MaxBatchInterval = c.calculateOptimalBatchInterval()
|
||||
}
|
||||
if c.MaxBatchSize == 0 {
|
||||
c.MaxBatchSize = c.calculateOptimalBatchSize()
|
||||
}
|
||||
if c.MaxBatchDataSize == 0 {
|
||||
c.MaxBatchDataSize = c.calculateOptimalDataSize()
|
||||
}
|
||||
|
||||
// 🔧 内部参数自动设置
|
||||
c.forceFlushInterval = c.MaxBatchInterval * 5 // 5倍批量间隔
|
||||
c.autoTuneInterval = 300 * time.Second // 5分钟调优间隔
|
||||
c.idleThresholdCPU = 0.3 // CPU空闲阈值
|
||||
c.idleThresholdDisk = 0.5 // 磁盘空闲阈值
|
||||
|
||||
// ✅ 参数验证和约束
|
||||
return c.validateAndConstraint()
|
||||
}
|
||||
|
||||
// loadFromEnvironment 从环境变量加载配置
|
||||
func (c *CacheWriteConfig) loadFromEnvironment() {
|
||||
// 策略配置
|
||||
if strategy := os.Getenv("CACHE_WRITE_STRATEGY"); strategy != "" {
|
||||
c.Strategy = CacheWriteStrategy(strategy)
|
||||
}
|
||||
|
||||
// 批量写入参数
|
||||
if interval := os.Getenv("BATCH_MAX_INTERVAL"); interval != "" {
|
||||
if d, err := time.ParseDuration(interval); err == nil {
|
||||
c.MaxBatchInterval = d
|
||||
}
|
||||
}
|
||||
|
||||
if size := os.Getenv("BATCH_MAX_SIZE"); size != "" {
|
||||
if s, err := strconv.Atoi(size); err == nil {
|
||||
c.MaxBatchSize = s
|
||||
}
|
||||
}
|
||||
|
||||
if dataSize := os.Getenv("BATCH_MAX_DATA_SIZE"); dataSize != "" {
|
||||
if ds, err := strconv.Atoi(dataSize); err == nil {
|
||||
c.MaxBatchDataSize = ds
|
||||
}
|
||||
}
|
||||
|
||||
// 行为参数
|
||||
if ratio := os.Getenv("HIGH_PRIORITY_RATIO"); ratio != "" {
|
||||
if r, err := strconv.ParseFloat(ratio, 64); err == nil {
|
||||
c.HighPriorityRatio = r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// calculateOptimalBatchInterval 计算最优批量间隔
|
||||
func (c *CacheWriteConfig) calculateOptimalBatchInterval() time.Duration {
|
||||
// 🎯 基于系统性能动态计算
|
||||
var memStats runtime.MemStats
|
||||
runtime.ReadMemStats(&memStats)
|
||||
|
||||
// 简化实现:根据可用内存量调整
|
||||
availableMemoryGB := float64(memStats.Sys) / 1024 / 1024 / 1024
|
||||
|
||||
var interval time.Duration
|
||||
switch {
|
||||
case availableMemoryGB > 8: // 大内存系统
|
||||
interval = 45 * time.Second
|
||||
case availableMemoryGB > 4: // 中等内存系统
|
||||
interval = 60 * time.Second
|
||||
default: // 小内存系统
|
||||
interval = 90 * time.Second
|
||||
}
|
||||
|
||||
// 应用约束
|
||||
if interval < c.minBatchInterval {
|
||||
interval = c.minBatchInterval
|
||||
}
|
||||
if interval > c.maxBatchInterval {
|
||||
interval = c.maxBatchInterval
|
||||
}
|
||||
|
||||
return interval
|
||||
}
|
||||
|
||||
// calculateOptimalBatchSize 计算最优批量大小
|
||||
func (c *CacheWriteConfig) calculateOptimalBatchSize() int {
|
||||
// 🎯 基于CPU核心数和内存动态计算
|
||||
numCPU := runtime.NumCPU()
|
||||
|
||||
var memStats runtime.MemStats
|
||||
runtime.ReadMemStats(&memStats)
|
||||
availableMemoryGB := float64(memStats.Sys) / 1024 / 1024 / 1024
|
||||
|
||||
var size int
|
||||
switch {
|
||||
case numCPU >= 8 && availableMemoryGB > 8: // 高性能系统
|
||||
size = 200
|
||||
case numCPU >= 4 && availableMemoryGB > 4: // 中等性能系统
|
||||
size = 100
|
||||
default: // 低性能系统
|
||||
size = 50
|
||||
}
|
||||
|
||||
// 应用约束
|
||||
if size < c.minBatchSize {
|
||||
size = c.minBatchSize
|
||||
}
|
||||
if size > c.maxBatchSize {
|
||||
size = c.maxBatchSize
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// calculateOptimalDataSize 计算最优数据大小
|
||||
func (c *CacheWriteConfig) calculateOptimalDataSize() int {
|
||||
// 🎯 基于可用内存计算
|
||||
var memStats runtime.MemStats
|
||||
runtime.ReadMemStats(&memStats)
|
||||
availableMemoryGB := float64(memStats.Sys) / 1024 / 1024 / 1024
|
||||
|
||||
var sizeMB int
|
||||
switch {
|
||||
case availableMemoryGB > 16: // 大内存系统
|
||||
sizeMB = 20
|
||||
case availableMemoryGB > 8: // 中等内存系统
|
||||
sizeMB = 10
|
||||
default: // 小内存系统
|
||||
sizeMB = 5
|
||||
}
|
||||
|
||||
return sizeMB * 1024 * 1024 // 转换为字节
|
||||
}
|
||||
|
||||
// validateAndConstraint 验证和约束配置
|
||||
func (c *CacheWriteConfig) validateAndConstraint() error {
|
||||
// 🔧 验证配置合理性
|
||||
if c.MaxBatchInterval < c.minBatchInterval {
|
||||
return fmt.Errorf("批量间隔配置错误: MaxBatchInterval(%v) < MinBatchInterval(%v)",
|
||||
c.MaxBatchInterval, c.minBatchInterval)
|
||||
}
|
||||
|
||||
if c.MaxBatchSize < c.minBatchSize {
|
||||
return fmt.Errorf("批量大小配置错误: MaxBatchSize(%d) < MinBatchSize(%d)",
|
||||
c.MaxBatchSize, c.minBatchSize)
|
||||
}
|
||||
|
||||
if c.HighPriorityRatio < 0 || c.HighPriorityRatio > 1 {
|
||||
return fmt.Errorf("高优先级比例配置错误: HighPriorityRatio(%f) 应在 [0,1] 范围内",
|
||||
c.HighPriorityRatio)
|
||||
}
|
||||
|
||||
// 🎯 应用最终约束
|
||||
if c.MaxBatchInterval > c.maxBatchInterval {
|
||||
c.MaxBatchInterval = c.maxBatchInterval
|
||||
}
|
||||
if c.MaxBatchSize > c.maxBatchSize {
|
||||
c.MaxBatchSize = c.maxBatchSize
|
||||
}
|
||||
|
||||
// 设置默认策略
|
||||
if c.Strategy != CacheStrategyImmediate && c.Strategy != CacheStrategyHybrid {
|
||||
c.Strategy = CacheStrategyHybrid
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DelayedBatchWriteManager 延迟批量写入管理器
|
||||
type DelayedBatchWriteManager struct {
|
||||
strategy CacheWriteStrategy
|
||||
config *CacheWriteConfig
|
||||
|
||||
// 延迟写入队列
|
||||
writeQueue chan *CacheOperation
|
||||
queueBuffer []*CacheOperation
|
||||
queueMutex sync.Mutex
|
||||
|
||||
// 🚀 全局缓冲区管理器
|
||||
globalBufferManager *GlobalBufferManager
|
||||
|
||||
// 统计信息
|
||||
stats *WriteManagerStats
|
||||
|
||||
// 控制通道
|
||||
shutdownChan chan struct{}
|
||||
flushTicker *time.Ticker
|
||||
|
||||
// 数据压缩(操作合并)
|
||||
operationMap map[string]*CacheOperation // key -> latest operation (去重合并)
|
||||
mapMutex sync.RWMutex
|
||||
|
||||
// 主缓存更新函数
|
||||
mainCacheUpdater func(string, []byte, time.Duration) error
|
||||
|
||||
// 序列化器
|
||||
serializer *GobSerializer
|
||||
|
||||
// 初始化标志
|
||||
initialized int32
|
||||
initMutex sync.Mutex
|
||||
}
|
||||
|
||||
// WriteManagerStats 写入管理器统计信息
|
||||
type WriteManagerStats struct {
|
||||
// 基础统计
|
||||
TotalWrites int64 // 总写入次数
|
||||
TotalOperations int64 // 总操作次数
|
||||
BatchWrites int64 // 批量写入次数
|
||||
ImmediateWrites int64 // 立即写入次数
|
||||
MergedOperations int64 // 合并操作次数
|
||||
FailedWrites int64 // 失败写入次数
|
||||
SuccessfulWrites int64 // 成功写入次数
|
||||
|
||||
// 性能统计
|
||||
LastFlushTime time.Time // 上次刷新时间
|
||||
LastFlushTrigger string // 上次刷新触发原因
|
||||
LastBatchSize int // 上次批量大小
|
||||
TotalOperationsWritten int // 已写入操作总数
|
||||
|
||||
// 时间窗口
|
||||
WindowStart time.Time // 统计窗口开始时间
|
||||
WindowEnd time.Time // 统计窗口结束时间
|
||||
|
||||
// 运行时状态
|
||||
CurrentQueueSize int32 // 当前队列大小
|
||||
CurrentMemoryUsage int64 // 当前内存使用量
|
||||
SystemLoadAverage float64 // 系统负载均值
|
||||
}
|
||||
|
||||
// NewDelayedBatchWriteManager 创建新的延迟批量写入管理器
|
||||
func NewDelayedBatchWriteManager() (*DelayedBatchWriteManager, error) {
|
||||
config := &CacheWriteConfig{
|
||||
Strategy: CacheStrategyHybrid,
|
||||
EnableCompression: true,
|
||||
}
|
||||
|
||||
// 初始化配置
|
||||
if err := config.Initialize(); err != nil {
|
||||
return nil, fmt.Errorf("配置初始化失败: %v", err)
|
||||
}
|
||||
|
||||
// 🚀 创建全局缓冲区管理器
|
||||
globalBufferManager := NewGlobalBufferManager(BufferHybrid)
|
||||
|
||||
manager := &DelayedBatchWriteManager{
|
||||
strategy: config.Strategy,
|
||||
config: config,
|
||||
writeQueue: make(chan *CacheOperation, 1000), // 队列容量1000
|
||||
queueBuffer: make([]*CacheOperation, 0, config.MaxBatchSize),
|
||||
globalBufferManager: globalBufferManager,
|
||||
operationMap: make(map[string]*CacheOperation),
|
||||
shutdownChan: make(chan struct{}),
|
||||
stats: &WriteManagerStats{
|
||||
WindowStart: time.Now(),
|
||||
},
|
||||
serializer: NewGobSerializer(),
|
||||
}
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
// Initialize 初始化管理器
|
||||
func (m *DelayedBatchWriteManager) Initialize() error {
|
||||
if !atomic.CompareAndSwapInt32(&m.initialized, 0, 1) {
|
||||
return nil // 已经初始化
|
||||
}
|
||||
|
||||
m.initMutex.Lock()
|
||||
defer m.initMutex.Unlock()
|
||||
|
||||
// 🚀 初始化全局缓冲区管理器
|
||||
if err := m.globalBufferManager.Initialize(); err != nil {
|
||||
return fmt.Errorf("全局缓冲区管理器初始化失败: %v", err)
|
||||
}
|
||||
|
||||
// 启动后台处理goroutine
|
||||
go m.backgroundProcessor()
|
||||
|
||||
// 启动定时刷新goroutine
|
||||
m.flushTicker = time.NewTicker(m.config.MaxBatchInterval)
|
||||
go m.timerFlushProcessor()
|
||||
|
||||
// 启动自动调优goroutine
|
||||
go m.autoTuningProcessor()
|
||||
|
||||
// 🔍 启动全局缓冲区监控
|
||||
go m.globalBufferMonitor()
|
||||
|
||||
fmt.Printf("🚀 [缓存写入管理器] 初始化完成,策略: %s\n", m.strategy)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMainCacheUpdater 设置主缓存更新函数
|
||||
func (m *DelayedBatchWriteManager) SetMainCacheUpdater(updater func(string, []byte, time.Duration) error) {
|
||||
m.mainCacheUpdater = updater
|
||||
}
|
||||
|
||||
// HandleCacheOperation 处理缓存操作
|
||||
func (m *DelayedBatchWriteManager) HandleCacheOperation(op *CacheOperation) error {
|
||||
// 确保管理器已初始化
|
||||
if err := m.Initialize(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 🔥 关键:无论什么策略,都立即更新内存缓存
|
||||
if err := m.updateMemoryCache(op); err != nil {
|
||||
return fmt.Errorf("内存缓存更新失败: %v", err)
|
||||
}
|
||||
|
||||
// 根据策略处理磁盘写入
|
||||
if m.strategy == CacheStrategyImmediate {
|
||||
return m.immediateWriteToDisk(op)
|
||||
}
|
||||
|
||||
// 🚀 使用全局缓冲区管理器进行智能缓冲
|
||||
return m.handleWithGlobalBuffer(op)
|
||||
}
|
||||
|
||||
// handleWithGlobalBuffer 使用全局缓冲区处理操作
|
||||
func (m *DelayedBatchWriteManager) handleWithGlobalBuffer(op *CacheOperation) error {
|
||||
// 🎯 尝试添加到全局缓冲区
|
||||
buffer, shouldFlush, err := m.globalBufferManager.AddOperation(op)
|
||||
if err != nil {
|
||||
// 全局缓冲区失败,降级到本地队列
|
||||
return m.enqueueForBatchWrite(op)
|
||||
}
|
||||
|
||||
// 🚀 如果需要刷新缓冲区
|
||||
if shouldFlush {
|
||||
return m.flushGlobalBuffer(buffer.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// flushGlobalBuffer 刷新全局缓冲区
|
||||
func (m *DelayedBatchWriteManager) flushGlobalBuffer(bufferID string) error {
|
||||
operations, err := m.globalBufferManager.FlushBuffer(bufferID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("刷新全局缓冲区失败: %v", err)
|
||||
}
|
||||
|
||||
if len(operations) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 🎯 按优先级排序操作
|
||||
sort.Slice(operations, func(i, j int) bool {
|
||||
if operations[i].Priority != operations[j].Priority {
|
||||
return operations[i].Priority < operations[j].Priority
|
||||
}
|
||||
return operations[i].Timestamp.Before(operations[j].Timestamp)
|
||||
})
|
||||
|
||||
// 📊 统计信息更新
|
||||
atomic.AddInt64(&m.stats.BatchWrites, 1)
|
||||
atomic.AddInt64(&m.stats.TotalWrites, 1)
|
||||
m.stats.LastFlushTime = time.Now()
|
||||
m.stats.LastFlushTrigger = "全局缓冲区触发"
|
||||
m.stats.LastBatchSize = len(operations)
|
||||
|
||||
// 🚀 批量写入磁盘
|
||||
err = m.batchWriteToDisk(operations)
|
||||
if err != nil {
|
||||
atomic.AddInt64(&m.stats.FailedWrites, 1)
|
||||
return fmt.Errorf("全局缓冲区批量写入失败: %v", err)
|
||||
}
|
||||
|
||||
// 📈 成功统计
|
||||
atomic.AddInt64(&m.stats.SuccessfulWrites, 1)
|
||||
m.stats.TotalOperationsWritten += len(operations)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// globalBufferMonitor 全局缓冲区监控
|
||||
func (m *DelayedBatchWriteManager) globalBufferMonitor() {
|
||||
ticker := time.NewTicker(2 * time.Minute) // 每2分钟检查一次
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// 🔍 检查是否有过期的缓冲区需要刷新
|
||||
m.checkAndFlushExpiredBuffers()
|
||||
|
||||
case <-m.shutdownChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkAndFlushExpiredBuffers 检查并刷新过期缓冲区
|
||||
func (m *DelayedBatchWriteManager) checkAndFlushExpiredBuffers() {
|
||||
bufferInfo := m.globalBufferManager.GetBufferInfo()
|
||||
|
||||
for bufferID, info := range bufferInfo {
|
||||
if infoMap, ok := info.(map[string]interface{}); ok {
|
||||
if lastUpdated, ok := infoMap["last_updated_at"].(time.Time); ok {
|
||||
// 如果缓冲区超过5分钟未更新,刷新它
|
||||
if time.Since(lastUpdated) > 5*time.Minute {
|
||||
if err := m.flushGlobalBuffer(bufferID); err != nil {
|
||||
fmt.Printf("⚠️ [全局缓冲区] 刷新过期缓冲区失败 %s: %v\n", bufferID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateMemoryCache 更新内存缓存(立即执行)
|
||||
func (m *DelayedBatchWriteManager) updateMemoryCache(op *CacheOperation) error {
|
||||
// 这里应该调用现有的内存缓存更新逻辑
|
||||
// 暂时返回nil,实际实现时需要集成现有的内存缓存系统
|
||||
return nil
|
||||
}
|
||||
|
||||
// immediateWriteToDisk 立即写入磁盘
|
||||
func (m *DelayedBatchWriteManager) immediateWriteToDisk(op *CacheOperation) error {
|
||||
if m.mainCacheUpdater == nil {
|
||||
return fmt.Errorf("主缓存更新函数未设置")
|
||||
}
|
||||
|
||||
// 序列化数据
|
||||
data, err := m.serializer.Serialize(op.Data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("数据序列化失败: %v", err)
|
||||
}
|
||||
|
||||
// 更新统计
|
||||
atomic.AddInt64(&m.stats.TotalWrites, 1)
|
||||
atomic.AddInt64(&m.stats.TotalOperations, 1)
|
||||
atomic.AddInt64(&m.stats.ImmediateWrites, 1)
|
||||
|
||||
return m.mainCacheUpdater(op.Key, data, op.TTL)
|
||||
}
|
||||
|
||||
// enqueueForBatchWrite 加入批量写入队列
|
||||
func (m *DelayedBatchWriteManager) enqueueForBatchWrite(op *CacheOperation) error {
|
||||
// 🚀 操作合并优化:相同key的操作只保留最新的
|
||||
if m.config.EnableCompression {
|
||||
m.mapMutex.Lock()
|
||||
existing, exists := m.operationMap[op.Key]
|
||||
if exists {
|
||||
// 合并操作:保留最新数据,累计统计信息
|
||||
op.DataSize += existing.DataSize
|
||||
atomic.AddInt64(&m.stats.MergedOperations, 1)
|
||||
}
|
||||
m.operationMap[op.Key] = op
|
||||
m.mapMutex.Unlock()
|
||||
}
|
||||
|
||||
// 加入延迟写入队列
|
||||
select {
|
||||
case m.writeQueue <- op:
|
||||
atomic.AddInt64(&m.stats.TotalOperations, 1)
|
||||
atomic.AddInt32(&m.stats.CurrentQueueSize, 1)
|
||||
return nil
|
||||
default:
|
||||
// 队列满时,触发紧急刷新
|
||||
return m.emergencyFlush()
|
||||
}
|
||||
}
|
||||
|
||||
// backgroundProcessor 后台处理器
|
||||
func (m *DelayedBatchWriteManager) backgroundProcessor() {
|
||||
for {
|
||||
select {
|
||||
case op := <-m.writeQueue:
|
||||
m.queueMutex.Lock()
|
||||
m.queueBuffer = append(m.queueBuffer, op)
|
||||
atomic.AddInt32(&m.stats.CurrentQueueSize, -1)
|
||||
|
||||
// 检查是否应该触发批量写入
|
||||
if shouldFlush, trigger := m.shouldTriggerBatchWrite(); shouldFlush {
|
||||
m.executeBatchWrite(trigger)
|
||||
}
|
||||
m.queueMutex.Unlock()
|
||||
|
||||
case <-m.shutdownChan:
|
||||
// 优雅关闭:处理剩余操作
|
||||
m.flushAllPendingData()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// timerFlushProcessor 定时刷新处理器
|
||||
func (m *DelayedBatchWriteManager) timerFlushProcessor() {
|
||||
for {
|
||||
select {
|
||||
case <-m.flushTicker.C:
|
||||
m.queueMutex.Lock()
|
||||
if len(m.queueBuffer) > 0 {
|
||||
m.executeBatchWrite("定时触发")
|
||||
}
|
||||
m.queueMutex.Unlock()
|
||||
|
||||
case <-m.shutdownChan:
|
||||
m.flushTicker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// autoTuningProcessor 自动调优处理器
|
||||
func (m *DelayedBatchWriteManager) autoTuningProcessor() {
|
||||
ticker := time.NewTicker(m.config.autoTuneInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
m.autoTuneParameters()
|
||||
|
||||
case <-m.shutdownChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown 优雅关闭
|
||||
func (m *DelayedBatchWriteManager) Shutdown(timeout time.Duration) error {
|
||||
if !atomic.CompareAndSwapInt32(&m.initialized, 1, 0) {
|
||||
return nil // 已经关闭
|
||||
}
|
||||
|
||||
fmt.Println("🔄 [缓存写入管理器] 正在保存缓存数据...")
|
||||
|
||||
// 关闭后台处理器
|
||||
close(m.shutdownChan)
|
||||
|
||||
// 等待所有数据保存完成,但有超时保护
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
var lastErr error
|
||||
|
||||
// 🚀 首先刷新全局缓冲区
|
||||
if err := m.flushAllGlobalBuffers(); err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
|
||||
// 🔧 然后刷新本地队列
|
||||
if err := m.flushAllPendingData(); err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
|
||||
// 🔄 关闭全局缓冲区管理器
|
||||
if err := m.globalBufferManager.Shutdown(); err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
|
||||
done <- lastErr
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-done:
|
||||
if err != nil {
|
||||
return fmt.Errorf("数据保存失败: %v", err)
|
||||
}
|
||||
fmt.Println("✅ [缓存写入管理器] 缓存数据已安全保存")
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("数据保存超时")
|
||||
}
|
||||
}
|
||||
|
||||
// flushAllGlobalBuffers 刷新所有全局缓冲区
|
||||
func (m *DelayedBatchWriteManager) flushAllGlobalBuffers() error {
|
||||
allBuffers := m.globalBufferManager.FlushAllBuffers()
|
||||
|
||||
var lastErr error
|
||||
totalOperations := 0
|
||||
|
||||
for bufferID, operations := range allBuffers {
|
||||
if len(operations) > 0 {
|
||||
if err := m.batchWriteToDisk(operations); err != nil {
|
||||
lastErr = fmt.Errorf("刷新全局缓冲区 %s 失败: %v", bufferID, err)
|
||||
continue
|
||||
}
|
||||
totalOperations += len(operations)
|
||||
}
|
||||
}
|
||||
|
||||
if totalOperations > 0 {
|
||||
fmt.Printf("🚀 [全局缓冲区] 刷新完成,写入%d个操作\n", totalOperations)
|
||||
}
|
||||
|
||||
return lastErr
|
||||
}
|
||||
|
||||
// flushAllPendingData 刷新所有待处理数据
|
||||
func (m *DelayedBatchWriteManager) flushAllPendingData() error {
|
||||
m.queueMutex.Lock()
|
||||
defer m.queueMutex.Unlock()
|
||||
|
||||
// 处理队列缓冲区中的数据
|
||||
if len(m.queueBuffer) > 0 {
|
||||
if err := m.executeBatchWrite("程序关闭"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 处理操作映射中的数据(如果启用了压缩)
|
||||
if m.config.EnableCompression && len(m.operationMap) > 0 {
|
||||
operations := m.getCompressedOperations()
|
||||
if len(operations) > 0 {
|
||||
return m.batchWriteToDisk(operations)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldTriggerBatchWrite 检查是否应该触发批量写入
|
||||
func (m *DelayedBatchWriteManager) shouldTriggerBatchWrite() (bool, string) {
|
||||
now := time.Now()
|
||||
|
||||
// 条件1:时间间隔达到阈值
|
||||
if now.Sub(m.stats.LastFlushTime) >= m.config.MaxBatchInterval {
|
||||
return true, "时间间隔触发"
|
||||
}
|
||||
|
||||
// 条件2:操作数量达到阈值
|
||||
if len(m.queueBuffer) >= m.config.MaxBatchSize {
|
||||
return true, "数量阈值触发"
|
||||
}
|
||||
|
||||
// 条件3:数据大小达到阈值
|
||||
totalSize := m.calculateBufferSize()
|
||||
if totalSize >= m.config.MaxBatchDataSize {
|
||||
return true, "大小阈值触发"
|
||||
}
|
||||
|
||||
// 条件4:高优先级数据比例达到阈值
|
||||
highPriorityRatio := m.calculateHighPriorityRatio()
|
||||
if highPriorityRatio >= m.config.HighPriorityRatio {
|
||||
return true, "高优先级触发"
|
||||
}
|
||||
|
||||
// 条件5:系统空闲(CPU和磁盘使用率都较低)
|
||||
if m.isSystemIdle() {
|
||||
return true, "系统空闲触发"
|
||||
}
|
||||
|
||||
// 条件6:强制刷新间隔(兜底机制)
|
||||
if now.Sub(m.stats.LastFlushTime) >= m.config.forceFlushInterval {
|
||||
return true, "强制刷新触发"
|
||||
}
|
||||
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// calculateBufferSize 计算缓冲区数据大小
|
||||
func (m *DelayedBatchWriteManager) calculateBufferSize() int {
|
||||
totalSize := 0
|
||||
for _, op := range m.queueBuffer {
|
||||
totalSize += op.DataSize
|
||||
}
|
||||
return totalSize
|
||||
}
|
||||
|
||||
// calculateHighPriorityRatio 计算高优先级数据比例
|
||||
func (m *DelayedBatchWriteManager) calculateHighPriorityRatio() float64 {
|
||||
if len(m.queueBuffer) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
highPriorityCount := 0
|
||||
for _, op := range m.queueBuffer {
|
||||
if op.Priority <= 2 { // 等级1和等级2插件
|
||||
highPriorityCount++
|
||||
}
|
||||
}
|
||||
|
||||
return float64(highPriorityCount) / float64(len(m.queueBuffer))
|
||||
}
|
||||
|
||||
// isSystemIdle 检查系统是否空闲
|
||||
func (m *DelayedBatchWriteManager) isSystemIdle() bool {
|
||||
// 简化实现:基于CPU使用率
|
||||
var memStats runtime.MemStats
|
||||
runtime.ReadMemStats(&memStats)
|
||||
|
||||
// 如果GC频率较低,认为系统相对空闲
|
||||
return memStats.NumGC%10 == 0
|
||||
}
|
||||
|
||||
// executeBatchWrite 执行批量写入
|
||||
func (m *DelayedBatchWriteManager) executeBatchWrite(trigger string) error {
|
||||
if len(m.queueBuffer) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 🔧 操作合并:如果启用压缩,使用合并后的操作
|
||||
var operations []*CacheOperation
|
||||
if m.config.EnableCompression {
|
||||
operations = m.getCompressedOperations()
|
||||
} else {
|
||||
operations = make([]*CacheOperation, len(m.queueBuffer))
|
||||
copy(operations, m.queueBuffer)
|
||||
}
|
||||
|
||||
if len(operations) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 🎯 按优先级排序:确保重要数据优先写入
|
||||
sort.Slice(operations, func(i, j int) bool {
|
||||
if operations[i].Priority != operations[j].Priority {
|
||||
return operations[i].Priority < operations[j].Priority // 数字越小优先级越高
|
||||
}
|
||||
return operations[i].Timestamp.Before(operations[j].Timestamp)
|
||||
})
|
||||
|
||||
// 📊 统计信息更新
|
||||
atomic.AddInt64(&m.stats.BatchWrites, 1)
|
||||
m.stats.LastFlushTime = time.Now()
|
||||
m.stats.LastFlushTrigger = trigger
|
||||
m.stats.LastBatchSize = len(operations)
|
||||
|
||||
// 🚀 批量写入磁盘
|
||||
err := m.batchWriteToDisk(operations)
|
||||
if err != nil {
|
||||
atomic.AddInt64(&m.stats.FailedWrites, 1)
|
||||
return fmt.Errorf("批量写入失败: %v", err)
|
||||
}
|
||||
|
||||
// 清空缓冲区
|
||||
m.queueBuffer = m.queueBuffer[:0]
|
||||
if m.config.EnableCompression {
|
||||
m.mapMutex.Lock()
|
||||
m.operationMap = make(map[string]*CacheOperation)
|
||||
m.mapMutex.Unlock()
|
||||
}
|
||||
|
||||
// 📈 成功统计
|
||||
atomic.AddInt64(&m.stats.SuccessfulWrites, 1)
|
||||
atomic.AddInt64(&m.stats.TotalWrites, 1)
|
||||
m.stats.TotalOperationsWritten += len(operations)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCompressedOperations 获取压缩后的操作列表
|
||||
func (m *DelayedBatchWriteManager) getCompressedOperations() []*CacheOperation {
|
||||
m.mapMutex.RLock()
|
||||
defer m.mapMutex.RUnlock()
|
||||
|
||||
operations := make([]*CacheOperation, 0, len(m.operationMap))
|
||||
for _, op := range m.operationMap {
|
||||
operations = append(operations, op)
|
||||
}
|
||||
|
||||
return operations
|
||||
}
|
||||
|
||||
// batchWriteToDisk 批量写入磁盘
|
||||
func (m *DelayedBatchWriteManager) batchWriteToDisk(operations []*CacheOperation) error {
|
||||
if m.mainCacheUpdater == nil {
|
||||
return fmt.Errorf("主缓存更新函数未设置")
|
||||
}
|
||||
|
||||
// 批量处理所有操作
|
||||
for _, op := range operations {
|
||||
// 序列化数据
|
||||
data, err := m.serializer.Serialize(op.Data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("数据序列化失败: %v", err)
|
||||
}
|
||||
|
||||
// 写入磁盘
|
||||
if err := m.mainCacheUpdater(op.Key, data, op.TTL); err != nil {
|
||||
return fmt.Errorf("磁盘写入失败: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// emergencyFlush 紧急刷新
|
||||
func (m *DelayedBatchWriteManager) emergencyFlush() error {
|
||||
m.queueMutex.Lock()
|
||||
defer m.queueMutex.Unlock()
|
||||
|
||||
return m.executeBatchWrite("紧急刷新")
|
||||
}
|
||||
|
||||
// autoTuneParameters 自适应参数调优
|
||||
func (m *DelayedBatchWriteManager) autoTuneParameters() {
|
||||
// 🤖 完全自动调优,无需配置开关
|
||||
stats := m.collectRecentStats()
|
||||
|
||||
// 🎯 调优批量间隔:基于系统负载动态调整
|
||||
avgSystemLoad := stats.SystemLoadAverage
|
||||
switch {
|
||||
case avgSystemLoad > 0.8: // 高负载:延长间隔,减少干扰
|
||||
m.config.MaxBatchInterval = m.minDuration(m.config.MaxBatchInterval*12/10, m.config.maxBatchInterval)
|
||||
case avgSystemLoad < 0.3: // 低负载:缩短间隔,及时持久化
|
||||
m.config.MaxBatchInterval = m.maxDuration(m.config.MaxBatchInterval*8/10, m.config.minBatchInterval)
|
||||
}
|
||||
|
||||
// 🎯 调优批量大小:基于写入频率动态调整
|
||||
queueSize := int(atomic.LoadInt32(&m.stats.CurrentQueueSize))
|
||||
switch {
|
||||
case queueSize > 200: // 高频:增大批量,提高效率
|
||||
m.config.MaxBatchSize = m.minInt(m.config.MaxBatchSize*12/10, m.config.maxBatchSize)
|
||||
case queueSize < 50: // 低频:减小批量,降低延迟
|
||||
m.config.MaxBatchSize = m.maxInt(m.config.MaxBatchSize*8/10, m.config.minBatchSize)
|
||||
}
|
||||
}
|
||||
|
||||
// collectRecentStats 收集最近的统计数据
|
||||
func (m *DelayedBatchWriteManager) collectRecentStats() *WriteManagerStats {
|
||||
return m.GetWriteManagerStats()
|
||||
}
|
||||
|
||||
// 辅助函数
|
||||
func (m *DelayedBatchWriteManager) minDuration(a, b time.Duration) time.Duration {
|
||||
if a < b { return a }
|
||||
return b
|
||||
}
|
||||
|
||||
func (m *DelayedBatchWriteManager) maxDuration(a, b time.Duration) time.Duration {
|
||||
if a > b { return a }
|
||||
return b
|
||||
}
|
||||
|
||||
func (m *DelayedBatchWriteManager) minInt(a, b int) int {
|
||||
if a < b { return a }
|
||||
return b
|
||||
}
|
||||
|
||||
func (m *DelayedBatchWriteManager) maxInt(a, b int) int {
|
||||
if a > b { return a }
|
||||
return b
|
||||
}
|
||||
|
||||
// GetStats 获取统计信息
|
||||
func (m *DelayedBatchWriteManager) GetStats() map[string]interface{} {
|
||||
stats := *m.stats
|
||||
stats.CurrentQueueSize = atomic.LoadInt32(&m.stats.CurrentQueueSize)
|
||||
stats.WindowEnd = time.Now()
|
||||
|
||||
// 计算压缩比例
|
||||
if stats.TotalOperations > 0 {
|
||||
stats.SystemLoadAverage = float64(stats.TotalWrites) / float64(stats.TotalOperations)
|
||||
}
|
||||
|
||||
// 🚀 获取全局缓冲区统计
|
||||
globalBufferStats := m.globalBufferManager.GetStats()
|
||||
|
||||
// 🔍 获取监控数据(如果监控器存在)
|
||||
var monitoringData *MonitoringData
|
||||
if m.globalBufferManager.statusMonitor != nil {
|
||||
monitoringData = m.globalBufferManager.statusMonitor.GetMonitoringData()
|
||||
}
|
||||
|
||||
// 🎯 合并所有统计信息
|
||||
combinedStats := map[string]interface{}{
|
||||
"write_manager": &stats,
|
||||
"global_buffer": globalBufferStats,
|
||||
"monitoring": monitoringData,
|
||||
"buffer_info": m.globalBufferManager.GetBufferInfo(),
|
||||
}
|
||||
|
||||
return combinedStats
|
||||
}
|
||||
|
||||
// GetWriteManagerStats 获取写入管理器统计(兼容性方法)
|
||||
func (m *DelayedBatchWriteManager) GetWriteManagerStats() *WriteManagerStats {
|
||||
stats := *m.stats
|
||||
stats.CurrentQueueSize = atomic.LoadInt32(&m.stats.CurrentQueueSize)
|
||||
stats.WindowEnd = time.Now()
|
||||
|
||||
// 计算压缩比例
|
||||
if stats.TotalOperations > 0 {
|
||||
stats.SystemLoadAverage = float64(stats.TotalWrites) / float64(stats.TotalOperations)
|
||||
}
|
||||
|
||||
return &stats
|
||||
}
|
||||
498
util/cache/global_buffer_manager.go
vendored
Normal file
498
util/cache/global_buffer_manager.go
vendored
Normal file
@@ -0,0 +1,498 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GlobalBufferStrategy 全局缓冲策略
|
||||
type GlobalBufferStrategy string
|
||||
|
||||
const (
|
||||
// BufferByKeyword 按关键词缓冲
|
||||
BufferByKeyword GlobalBufferStrategy = "keyword"
|
||||
|
||||
// BufferByPlugin 按插件缓冲
|
||||
BufferByPlugin GlobalBufferStrategy = "plugin"
|
||||
|
||||
// BufferByPattern 按搜索模式缓冲
|
||||
BufferByPattern GlobalBufferStrategy = "pattern"
|
||||
|
||||
// BufferHybrid 混合缓冲策略
|
||||
BufferHybrid GlobalBufferStrategy = "hybrid"
|
||||
)
|
||||
|
||||
// SearchPattern 搜索模式
|
||||
type SearchPattern struct {
|
||||
KeywordPattern string // 关键词模式
|
||||
PluginSet []string // 插件集合
|
||||
TimeWindow time.Duration // 时间窗口
|
||||
Frequency int // 频率
|
||||
LastAccessTime time.Time // 最后访问时间
|
||||
Metadata map[string]interface{} // 元数据
|
||||
}
|
||||
|
||||
// GlobalBuffer 全局缓冲区
|
||||
type GlobalBuffer struct {
|
||||
// 基础信息
|
||||
ID string // 缓冲区ID
|
||||
Strategy GlobalBufferStrategy // 缓冲策略
|
||||
CreatedAt time.Time // 创建时间
|
||||
LastUpdatedAt time.Time // 最后更新时间
|
||||
|
||||
// 数据存储
|
||||
Operations []*CacheOperation // 操作列表
|
||||
KeywordGroups map[string][]*CacheOperation // 按关键词分组
|
||||
PluginGroups map[string][]*CacheOperation // 按插件分组
|
||||
|
||||
// 统计信息
|
||||
TotalOperations int64 // 总操作数
|
||||
TotalDataSize int64 // 总数据大小
|
||||
CompressRatio float64 // 压缩比例
|
||||
|
||||
// 控制参数
|
||||
MaxOperations int // 最大操作数
|
||||
MaxDataSize int64 // 最大数据大小
|
||||
MaxAge time.Duration // 最大存活时间
|
||||
|
||||
mutex sync.RWMutex // 读写锁
|
||||
}
|
||||
|
||||
// GlobalBufferManager 全局缓冲区管理器
|
||||
type GlobalBufferManager struct {
|
||||
// 配置
|
||||
strategy GlobalBufferStrategy
|
||||
maxBuffers int // 最大缓冲区数量
|
||||
defaultBufferSize int // 默认缓冲区大小
|
||||
|
||||
// 缓冲区管理
|
||||
buffers map[string]*GlobalBuffer // 缓冲区映射
|
||||
buffersMutex sync.RWMutex // 缓冲区锁
|
||||
|
||||
// 搜索模式分析
|
||||
patternAnalyzer *SearchPatternAnalyzer
|
||||
|
||||
// 数据合并器
|
||||
dataMerger *AdvancedDataMerger
|
||||
|
||||
// 状态监控
|
||||
statusMonitor *BufferStatusMonitor
|
||||
|
||||
// 统计信息
|
||||
stats *GlobalBufferStats
|
||||
|
||||
// 控制通道
|
||||
cleanupTicker *time.Ticker
|
||||
shutdownChan chan struct{}
|
||||
|
||||
// 初始化状态
|
||||
initialized int32
|
||||
}
|
||||
|
||||
// GlobalBufferStats 全局缓冲区统计
|
||||
type GlobalBufferStats struct {
|
||||
// 缓冲区统计
|
||||
ActiveBuffers int64 // 活跃缓冲区数量
|
||||
TotalBuffersCreated int64 // 总创建缓冲区数量
|
||||
TotalBuffersDestroyed int64 // 总销毁缓冲区数量
|
||||
|
||||
// 操作统计
|
||||
TotalOperationsBuffered int64 // 总缓冲操作数
|
||||
TotalOperationsMerged int64 // 总合并操作数
|
||||
TotalDataMerged int64 // 总合并数据大小
|
||||
|
||||
// 效率统计
|
||||
AverageCompressionRatio float64 // 平均压缩比例
|
||||
AverageBufferLifetime time.Duration // 平均缓冲区生命周期
|
||||
HitRate float64 // 命中率
|
||||
|
||||
// 性能统计
|
||||
LastCleanupTime time.Time // 最后清理时间
|
||||
CleanupFrequency time.Duration // 清理频率
|
||||
MemoryUsage int64 // 内存使用量
|
||||
}
|
||||
|
||||
// NewGlobalBufferManager 创建全局缓冲区管理器
|
||||
func NewGlobalBufferManager(strategy GlobalBufferStrategy) *GlobalBufferManager {
|
||||
manager := &GlobalBufferManager{
|
||||
strategy: strategy,
|
||||
maxBuffers: 50, // 最大50个缓冲区
|
||||
defaultBufferSize: 100, // 默认100个操作
|
||||
buffers: make(map[string]*GlobalBuffer),
|
||||
shutdownChan: make(chan struct{}),
|
||||
stats: &GlobalBufferStats{
|
||||
LastCleanupTime: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
// 初始化组件
|
||||
manager.patternAnalyzer = NewSearchPatternAnalyzer()
|
||||
manager.dataMerger = NewAdvancedDataMerger()
|
||||
manager.statusMonitor = NewBufferStatusMonitor()
|
||||
|
||||
return manager
|
||||
}
|
||||
|
||||
// Initialize 初始化管理器
|
||||
func (g *GlobalBufferManager) Initialize() error {
|
||||
if !atomic.CompareAndSwapInt32(&g.initialized, 0, 1) {
|
||||
return nil // 已经初始化
|
||||
}
|
||||
|
||||
// 启动定期清理
|
||||
g.cleanupTicker = time.NewTicker(5 * time.Minute) // 每5分钟清理一次
|
||||
go g.cleanupRoutine()
|
||||
|
||||
// 启动状态监控
|
||||
go g.statusMonitor.Start(g)
|
||||
|
||||
fmt.Printf("🚀 [全局缓冲区管理器] 初始化完成,策略: %s\n", g.strategy)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddOperation 添加操作到全局缓冲区
|
||||
func (g *GlobalBufferManager) AddOperation(op *CacheOperation) (*GlobalBuffer, bool, error) {
|
||||
if err := g.Initialize(); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// 🎯 根据策略确定缓冲区ID
|
||||
bufferID := g.determineBufferID(op)
|
||||
|
||||
g.buffersMutex.Lock()
|
||||
defer g.buffersMutex.Unlock()
|
||||
|
||||
// 🔧 获取或创建缓冲区
|
||||
buffer, exists := g.buffers[bufferID]
|
||||
if !exists {
|
||||
buffer = g.createNewBuffer(bufferID, op)
|
||||
g.buffers[bufferID] = buffer
|
||||
atomic.AddInt64(&g.stats.TotalBuffersCreated, 1)
|
||||
atomic.AddInt64(&g.stats.ActiveBuffers, 1)
|
||||
}
|
||||
|
||||
// 🚀 添加操作到缓冲区
|
||||
shouldFlush := g.addOperationToBuffer(buffer, op)
|
||||
|
||||
// 📊 更新统计
|
||||
atomic.AddInt64(&g.stats.TotalOperationsBuffered, 1)
|
||||
|
||||
return buffer, shouldFlush, nil
|
||||
}
|
||||
|
||||
// determineBufferID 确定缓冲区ID
|
||||
func (g *GlobalBufferManager) determineBufferID(op *CacheOperation) string {
|
||||
switch g.strategy {
|
||||
case BufferByKeyword:
|
||||
return fmt.Sprintf("keyword_%s", op.Keyword)
|
||||
|
||||
case BufferByPlugin:
|
||||
return fmt.Sprintf("plugin_%s", op.PluginName)
|
||||
|
||||
case BufferByPattern:
|
||||
pattern := g.patternAnalyzer.AnalyzePattern(op)
|
||||
return fmt.Sprintf("pattern_%s", pattern.KeywordPattern)
|
||||
|
||||
case BufferHybrid:
|
||||
// 混合策略:关键词+插件+时间窗口
|
||||
timeWindow := op.Timestamp.Truncate(time.Minute) // 1分钟时间窗口
|
||||
return fmt.Sprintf("hybrid_%s_%s_%d",
|
||||
op.Keyword, op.PluginName, timeWindow.Unix())
|
||||
|
||||
default:
|
||||
return fmt.Sprintf("default_%s", op.Key)
|
||||
}
|
||||
}
|
||||
|
||||
// createNewBuffer 创建新缓冲区
|
||||
func (g *GlobalBufferManager) createNewBuffer(bufferID string, firstOp *CacheOperation) *GlobalBuffer {
|
||||
now := time.Now()
|
||||
|
||||
buffer := &GlobalBuffer{
|
||||
ID: bufferID,
|
||||
Strategy: g.strategy,
|
||||
CreatedAt: now,
|
||||
LastUpdatedAt: now,
|
||||
Operations: make([]*CacheOperation, 0, g.defaultBufferSize),
|
||||
KeywordGroups: make(map[string][]*CacheOperation),
|
||||
PluginGroups: make(map[string][]*CacheOperation),
|
||||
MaxOperations: g.defaultBufferSize,
|
||||
MaxDataSize: int64(g.defaultBufferSize * 1000), // 估算100KB
|
||||
MaxAge: 10 * time.Minute, // 10分钟最大存活时间
|
||||
}
|
||||
|
||||
return buffer
|
||||
}
|
||||
|
||||
// addOperationToBuffer 添加操作到缓冲区
|
||||
func (g *GlobalBufferManager) addOperationToBuffer(buffer *GlobalBuffer, op *CacheOperation) bool {
|
||||
buffer.mutex.Lock()
|
||||
defer buffer.mutex.Unlock()
|
||||
|
||||
// 🔧 数据合并优化
|
||||
merged := g.dataMerger.TryMergeOperation(buffer, op)
|
||||
if merged {
|
||||
atomic.AddInt64(&g.stats.TotalOperationsMerged, 1)
|
||||
atomic.AddInt64(&g.stats.TotalDataMerged, int64(op.DataSize))
|
||||
} else {
|
||||
// 添加新操作
|
||||
buffer.Operations = append(buffer.Operations, op)
|
||||
buffer.TotalOperations++
|
||||
buffer.TotalDataSize += int64(op.DataSize)
|
||||
|
||||
// 按关键词分组
|
||||
if buffer.KeywordGroups[op.Keyword] == nil {
|
||||
buffer.KeywordGroups[op.Keyword] = make([]*CacheOperation, 0)
|
||||
}
|
||||
buffer.KeywordGroups[op.Keyword] = append(buffer.KeywordGroups[op.Keyword], op)
|
||||
|
||||
// 按插件分组
|
||||
if buffer.PluginGroups[op.PluginName] == nil {
|
||||
buffer.PluginGroups[op.PluginName] = make([]*CacheOperation, 0)
|
||||
}
|
||||
buffer.PluginGroups[op.PluginName] = append(buffer.PluginGroups[op.PluginName], op)
|
||||
}
|
||||
|
||||
buffer.LastUpdatedAt = time.Now()
|
||||
|
||||
// 🎯 检查是否应该刷新
|
||||
return g.shouldFlushBuffer(buffer)
|
||||
}
|
||||
|
||||
// shouldFlushBuffer 检查是否应该刷新缓冲区
|
||||
func (g *GlobalBufferManager) shouldFlushBuffer(buffer *GlobalBuffer) bool {
|
||||
now := time.Now()
|
||||
|
||||
// 条件1:操作数量达到阈值
|
||||
if len(buffer.Operations) >= buffer.MaxOperations {
|
||||
return true
|
||||
}
|
||||
|
||||
// 条件2:数据大小达到阈值
|
||||
if buffer.TotalDataSize >= buffer.MaxDataSize {
|
||||
return true
|
||||
}
|
||||
|
||||
// 条件3:缓冲区存活时间过长
|
||||
if now.Sub(buffer.CreatedAt) >= buffer.MaxAge {
|
||||
return true
|
||||
}
|
||||
|
||||
// 条件4:内存压力(基于全局统计)
|
||||
totalMemory := atomic.LoadInt64(&g.stats.MemoryUsage)
|
||||
if totalMemory > 50*1024*1024 { // 50MB内存阈值
|
||||
return true
|
||||
}
|
||||
|
||||
// 条件5:高优先级操作比例达到阈值
|
||||
highPriorityRatio := g.calculateHighPriorityRatio(buffer)
|
||||
if highPriorityRatio > 0.6 { // 60%高优先级阈值
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// calculateHighPriorityRatio 计算高优先级操作比例
|
||||
func (g *GlobalBufferManager) calculateHighPriorityRatio(buffer *GlobalBuffer) float64 {
|
||||
if len(buffer.Operations) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
highPriorityCount := 0
|
||||
for _, op := range buffer.Operations {
|
||||
if op.Priority <= 2 { // 等级1和等级2插件
|
||||
highPriorityCount++
|
||||
}
|
||||
}
|
||||
|
||||
return float64(highPriorityCount) / float64(len(buffer.Operations))
|
||||
}
|
||||
|
||||
// FlushBuffer 刷新指定缓冲区
|
||||
func (g *GlobalBufferManager) FlushBuffer(bufferID string) ([]*CacheOperation, error) {
|
||||
g.buffersMutex.Lock()
|
||||
defer g.buffersMutex.Unlock()
|
||||
|
||||
buffer, exists := g.buffers[bufferID]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("缓冲区不存在: %s", bufferID)
|
||||
}
|
||||
|
||||
buffer.mutex.Lock()
|
||||
defer buffer.mutex.Unlock()
|
||||
|
||||
// 获取所有操作
|
||||
operations := make([]*CacheOperation, len(buffer.Operations))
|
||||
copy(operations, buffer.Operations)
|
||||
|
||||
// 清空缓冲区
|
||||
buffer.Operations = buffer.Operations[:0]
|
||||
buffer.KeywordGroups = make(map[string][]*CacheOperation)
|
||||
buffer.PluginGroups = make(map[string][]*CacheOperation)
|
||||
buffer.TotalOperations = 0
|
||||
buffer.TotalDataSize = 0
|
||||
|
||||
// 更新压缩比例
|
||||
if len(operations) > 0 {
|
||||
buffer.CompressRatio = float64(len(operations)) / float64(buffer.TotalOperations)
|
||||
}
|
||||
|
||||
return operations, nil
|
||||
}
|
||||
|
||||
// FlushAllBuffers 刷新所有缓冲区
|
||||
func (g *GlobalBufferManager) FlushAllBuffers() map[string][]*CacheOperation {
|
||||
g.buffersMutex.RLock()
|
||||
bufferIDs := make([]string, 0, len(g.buffers))
|
||||
for id := range g.buffers {
|
||||
bufferIDs = append(bufferIDs, id)
|
||||
}
|
||||
g.buffersMutex.RUnlock()
|
||||
|
||||
result := make(map[string][]*CacheOperation)
|
||||
for _, id := range bufferIDs {
|
||||
if ops, err := g.FlushBuffer(id); err == nil && len(ops) > 0 {
|
||||
result[id] = ops
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// cleanupRoutine 清理例程
|
||||
func (g *GlobalBufferManager) cleanupRoutine() {
|
||||
for {
|
||||
select {
|
||||
case <-g.cleanupTicker.C:
|
||||
g.performCleanup()
|
||||
|
||||
case <-g.shutdownChan:
|
||||
g.cleanupTicker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// performCleanup 执行清理
|
||||
func (g *GlobalBufferManager) performCleanup() {
|
||||
now := time.Now()
|
||||
|
||||
g.buffersMutex.Lock()
|
||||
defer g.buffersMutex.Unlock()
|
||||
|
||||
toDelete := make([]string, 0)
|
||||
|
||||
for id, buffer := range g.buffers {
|
||||
buffer.mutex.RLock()
|
||||
|
||||
// 清理条件:空缓冲区且超过5分钟未活动
|
||||
if len(buffer.Operations) == 0 && now.Sub(buffer.LastUpdatedAt) > 5*time.Minute {
|
||||
toDelete = append(toDelete, id)
|
||||
}
|
||||
|
||||
buffer.mutex.RUnlock()
|
||||
}
|
||||
|
||||
// 删除过期缓冲区
|
||||
for _, id := range toDelete {
|
||||
delete(g.buffers, id)
|
||||
atomic.AddInt64(&g.stats.TotalBuffersDestroyed, 1)
|
||||
atomic.AddInt64(&g.stats.ActiveBuffers, -1)
|
||||
}
|
||||
|
||||
// 更新清理统计
|
||||
g.stats.LastCleanupTime = now
|
||||
g.stats.CleanupFrequency = now.Sub(g.stats.LastCleanupTime)
|
||||
|
||||
// 计算内存使用量
|
||||
g.updateMemoryUsage()
|
||||
|
||||
if len(toDelete) > 0 {
|
||||
fmt.Printf("🧹 [全局缓冲区] 清理完成,删除%d个过期缓冲区\n", len(toDelete))
|
||||
}
|
||||
}
|
||||
|
||||
// updateMemoryUsage 更新内存使用量估算
|
||||
func (g *GlobalBufferManager) updateMemoryUsage() {
|
||||
totalMemory := int64(0)
|
||||
|
||||
for _, buffer := range g.buffers {
|
||||
buffer.mutex.RLock()
|
||||
totalMemory += buffer.TotalDataSize
|
||||
buffer.mutex.RUnlock()
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&g.stats.MemoryUsage, totalMemory)
|
||||
}
|
||||
|
||||
// Shutdown 优雅关闭
|
||||
func (g *GlobalBufferManager) Shutdown() error {
|
||||
if !atomic.CompareAndSwapInt32(&g.initialized, 1, 0) {
|
||||
return nil // 已经关闭
|
||||
}
|
||||
|
||||
// 停止后台任务
|
||||
close(g.shutdownChan)
|
||||
|
||||
// 刷新所有缓冲区
|
||||
flushedBuffers := g.FlushAllBuffers()
|
||||
totalOperations := 0
|
||||
for _, ops := range flushedBuffers {
|
||||
totalOperations += len(ops)
|
||||
}
|
||||
|
||||
fmt.Printf("🔄 [全局缓冲区管理器] 关闭完成,刷新%d个缓冲区,%d个操作\n",
|
||||
len(flushedBuffers), totalOperations)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetStats 获取统计信息
|
||||
func (g *GlobalBufferManager) GetStats() *GlobalBufferStats {
|
||||
stats := *g.stats
|
||||
stats.ActiveBuffers = atomic.LoadInt64(&g.stats.ActiveBuffers)
|
||||
stats.MemoryUsage = atomic.LoadInt64(&g.stats.MemoryUsage)
|
||||
|
||||
// 计算平均压缩比例
|
||||
if stats.TotalOperationsBuffered > 0 {
|
||||
stats.AverageCompressionRatio = float64(stats.TotalOperationsMerged) / float64(stats.TotalOperationsBuffered)
|
||||
}
|
||||
|
||||
// 计算命中率
|
||||
if stats.TotalOperationsBuffered > 0 {
|
||||
stats.HitRate = float64(stats.TotalOperationsMerged) / float64(stats.TotalOperationsBuffered)
|
||||
}
|
||||
|
||||
return &stats
|
||||
}
|
||||
|
||||
// GetBufferInfo 获取缓冲区信息
|
||||
func (g *GlobalBufferManager) GetBufferInfo() map[string]interface{} {
|
||||
g.buffersMutex.RLock()
|
||||
defer g.buffersMutex.RUnlock()
|
||||
|
||||
info := make(map[string]interface{})
|
||||
|
||||
for id, buffer := range g.buffers {
|
||||
buffer.mutex.RLock()
|
||||
bufferInfo := map[string]interface{}{
|
||||
"id": id,
|
||||
"strategy": buffer.Strategy,
|
||||
"created_at": buffer.CreatedAt,
|
||||
"last_updated_at": buffer.LastUpdatedAt,
|
||||
"total_operations": buffer.TotalOperations,
|
||||
"total_data_size": buffer.TotalDataSize,
|
||||
"compress_ratio": buffer.CompressRatio,
|
||||
"keyword_groups": len(buffer.KeywordGroups),
|
||||
"plugin_groups": len(buffer.PluginGroups),
|
||||
}
|
||||
buffer.mutex.RUnlock()
|
||||
|
||||
info[id] = bufferInfo
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
208
util/cache/metric_collector.go
vendored
Normal file
208
util/cache/metric_collector.go
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewMetricCollector 创建指标收集器
|
||||
func NewMetricCollector() *MetricCollector {
|
||||
return &MetricCollector{
|
||||
systemMetrics: &SystemMetrics{},
|
||||
applicationMetrics: &ApplicationMetrics{},
|
||||
cacheMetrics: &CacheMetrics{},
|
||||
metricsHistory: make([]MetricSnapshot, 0),
|
||||
maxHistorySize: 1000, // 保留1000个历史快照
|
||||
collectionChan: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Start 启动指标收集
|
||||
func (m *MetricCollector) Start(interval time.Duration) error {
|
||||
if !atomic.CompareAndSwapInt32(&m.isCollecting, 0, 1) {
|
||||
return nil // 已经在收集中
|
||||
}
|
||||
|
||||
go m.collectionLoop(interval)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop 停止指标收集
|
||||
func (m *MetricCollector) Stop() {
|
||||
if atomic.CompareAndSwapInt32(&m.isCollecting, 1, 0) {
|
||||
close(m.collectionChan)
|
||||
}
|
||||
}
|
||||
|
||||
// collectionLoop 收集循环
|
||||
func (m *MetricCollector) collectionLoop(interval time.Duration) {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
m.collectMetrics()
|
||||
|
||||
case <-m.collectionChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// collectMetrics 收集指标
|
||||
func (m *MetricCollector) collectMetrics() {
|
||||
now := time.Now()
|
||||
|
||||
// 收集系统指标
|
||||
systemMetrics := m.collectSystemMetrics(now)
|
||||
|
||||
// 收集应用指标
|
||||
applicationMetrics := m.collectApplicationMetrics(now)
|
||||
|
||||
// 收集缓存指标
|
||||
cacheMetrics := m.collectCacheMetrics(now)
|
||||
|
||||
// 创建快照
|
||||
snapshot := MetricSnapshot{
|
||||
Timestamp: now,
|
||||
System: *systemMetrics,
|
||||
Application: *applicationMetrics,
|
||||
Cache: *cacheMetrics,
|
||||
}
|
||||
|
||||
// 计算综合指标
|
||||
snapshot.OverallPerformance = m.calculateOverallPerformance(&snapshot)
|
||||
snapshot.Efficiency = m.calculateEfficiency(&snapshot)
|
||||
snapshot.Stability = m.calculateStability(&snapshot)
|
||||
|
||||
// 保存快照
|
||||
m.saveSnapshot(snapshot)
|
||||
}
|
||||
|
||||
// collectSystemMetrics 收集系统指标
|
||||
func (m *MetricCollector) collectSystemMetrics(timestamp time.Time) *SystemMetrics {
|
||||
var memStats runtime.MemStats
|
||||
runtime.ReadMemStats(&memStats)
|
||||
|
||||
return &SystemMetrics{
|
||||
Timestamp: timestamp,
|
||||
CPUUsage: float64(memStats.GCCPUFraction),
|
||||
MemoryUsage: int64(memStats.Alloc),
|
||||
MemoryTotal: int64(memStats.Sys),
|
||||
DiskIORate: 0, // 简化实现
|
||||
NetworkIORate: 0, // 简化实现
|
||||
GoroutineCount: runtime.NumGoroutine(),
|
||||
GCPauseDuration: time.Duration(memStats.PauseTotalNs),
|
||||
HeapSize: int64(memStats.HeapSys),
|
||||
AllocRate: float64(memStats.Mallocs - memStats.Frees),
|
||||
}
|
||||
}
|
||||
|
||||
// collectApplicationMetrics 收集应用指标
|
||||
func (m *MetricCollector) collectApplicationMetrics(timestamp time.Time) *ApplicationMetrics {
|
||||
// 简化实现,实际应用中应该从应用监控系统获取
|
||||
return &ApplicationMetrics{
|
||||
Timestamp: timestamp,
|
||||
RequestRate: 100.0, // 模拟值
|
||||
ResponseTime: 50 * time.Millisecond,
|
||||
ErrorRate: 0.01,
|
||||
ThroughputMBps: 10.5,
|
||||
ConcurrentUsers: 50,
|
||||
QueueDepth: 5,
|
||||
ProcessingRate: 95.5,
|
||||
}
|
||||
}
|
||||
|
||||
// collectCacheMetrics 收集缓存指标
|
||||
func (m *MetricCollector) collectCacheMetrics(timestamp time.Time) *CacheMetrics {
|
||||
// 简化实现,实际应用中应该从缓存系统获取
|
||||
return &CacheMetrics{
|
||||
Timestamp: timestamp,
|
||||
HitRate: 0.85,
|
||||
WriteRate: 20.0,
|
||||
ReadRate: 80.0,
|
||||
EvictionRate: 2.0,
|
||||
CompressionRatio: 0.6,
|
||||
StorageUsage: 1024 * 1024 * 100, // 100MB
|
||||
BufferUtilization: 0.75,
|
||||
BatchEfficiency: 0.9,
|
||||
}
|
||||
}
|
||||
|
||||
// calculateOverallPerformance 计算整体性能
|
||||
func (m *MetricCollector) calculateOverallPerformance(snapshot *MetricSnapshot) float64 {
|
||||
// 性能评分算法
|
||||
cpuScore := (1.0 - snapshot.System.CPUUsage) * 30
|
||||
memoryScore := (1.0 - float64(snapshot.System.MemoryUsage)/float64(snapshot.System.MemoryTotal)) * 25
|
||||
responseScore := (1.0 - float64(snapshot.Application.ResponseTime)/float64(time.Second)) * 25
|
||||
cacheScore := snapshot.Cache.HitRate * 20
|
||||
|
||||
return cpuScore + memoryScore + responseScore + cacheScore
|
||||
}
|
||||
|
||||
// calculateEfficiency 计算效率
|
||||
func (m *MetricCollector) calculateEfficiency(snapshot *MetricSnapshot) float64 {
|
||||
// 效率评分算法
|
||||
cacheEfficiency := snapshot.Cache.HitRate * 0.4
|
||||
batchEfficiency := snapshot.Cache.BatchEfficiency * 0.3
|
||||
compressionEfficiency := snapshot.Cache.CompressionRatio * 0.3
|
||||
|
||||
return cacheEfficiency + batchEfficiency + compressionEfficiency
|
||||
}
|
||||
|
||||
// calculateStability 计算稳定性
|
||||
func (m *MetricCollector) calculateStability(snapshot *MetricSnapshot) float64 {
|
||||
// 稳定性评分算法(基于变化率)
|
||||
errorRateStability := (1.0 - snapshot.Application.ErrorRate) * 0.5
|
||||
responseTimeStability := 1.0 - (float64(snapshot.Application.ResponseTime) / float64(time.Second))
|
||||
if responseTimeStability < 0 {
|
||||
responseTimeStability = 0
|
||||
}
|
||||
responseTimeStability *= 0.5
|
||||
|
||||
return errorRateStability + responseTimeStability
|
||||
}
|
||||
|
||||
// saveSnapshot 保存快照
|
||||
func (m *MetricCollector) saveSnapshot(snapshot MetricSnapshot) {
|
||||
m.historyMutex.Lock()
|
||||
defer m.historyMutex.Unlock()
|
||||
|
||||
m.metricsHistory = append(m.metricsHistory, snapshot)
|
||||
|
||||
// 限制历史记录大小
|
||||
if len(m.metricsHistory) > m.maxHistorySize {
|
||||
m.metricsHistory = m.metricsHistory[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// GetLatestMetrics 获取最新指标
|
||||
func (m *MetricCollector) GetLatestMetrics() *MetricSnapshot {
|
||||
m.historyMutex.RLock()
|
||||
defer m.historyMutex.RUnlock()
|
||||
|
||||
if len(m.metricsHistory) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
latest := m.metricsHistory[len(m.metricsHistory)-1]
|
||||
return &latest
|
||||
}
|
||||
|
||||
// GetMetricsHistory 获取指标历史
|
||||
func (m *MetricCollector) GetMetricsHistory(limit int) []MetricSnapshot {
|
||||
m.historyMutex.RLock()
|
||||
defer m.historyMutex.RUnlock()
|
||||
|
||||
if limit <= 0 || limit > len(m.metricsHistory) {
|
||||
limit = len(m.metricsHistory)
|
||||
}
|
||||
|
||||
history := make([]MetricSnapshot, limit)
|
||||
startIndex := len(m.metricsHistory) - limit
|
||||
copy(history, m.metricsHistory[startIndex:])
|
||||
|
||||
return history
|
||||
}
|
||||
479
util/cache/performance_analyzer.go
vendored
Normal file
479
util/cache/performance_analyzer.go
vendored
Normal file
@@ -0,0 +1,479 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TrendAnalyzer 趋势分析器
|
||||
type TrendAnalyzer struct {
|
||||
trends map[string]*TrendData
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// TrendData 趋势数据
|
||||
type TrendData struct {
|
||||
Values []float64
|
||||
Timestamps []time.Time
|
||||
Slope float64
|
||||
RSquared float64
|
||||
LastUpdate time.Time
|
||||
}
|
||||
|
||||
// AnomalyDetector 异常检测器
|
||||
type AnomalyDetector struct {
|
||||
baselines map[string]*Baseline
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// Baseline 基线数据
|
||||
type Baseline struct {
|
||||
Mean float64
|
||||
StdDev float64
|
||||
Min float64
|
||||
Max float64
|
||||
SampleSize int
|
||||
LastUpdate time.Time
|
||||
}
|
||||
|
||||
// CorrelationAnalyzer 相关性分析器
|
||||
type CorrelationAnalyzer struct {
|
||||
correlationMatrix map[string]map[string]float64
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewPerformanceAnalyzer 创建性能分析器
|
||||
func NewPerformanceAnalyzer() *PerformanceAnalyzer {
|
||||
return &PerformanceAnalyzer{
|
||||
trendAnalyzer: &TrendAnalyzer{
|
||||
trends: make(map[string]*TrendData),
|
||||
},
|
||||
anomalyDetector: &AnomalyDetector{
|
||||
baselines: make(map[string]*Baseline),
|
||||
},
|
||||
correlationAnalyzer: &CorrelationAnalyzer{
|
||||
correlationMatrix: make(map[string]map[string]float64),
|
||||
},
|
||||
currentTrends: make(map[string]Trend),
|
||||
detectedAnomalies: make([]Anomaly, 0),
|
||||
correlations: make(map[string]float64),
|
||||
}
|
||||
}
|
||||
|
||||
// AnalyzeTrends 分析趋势
|
||||
func (p *PerformanceAnalyzer) AnalyzeTrends(history []MetricSnapshot) {
|
||||
if len(history) < 3 {
|
||||
return // 数据点太少,无法分析趋势
|
||||
}
|
||||
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
// 分析各个指标的趋势
|
||||
metrics := map[string][]float64{
|
||||
"cpu_usage": make([]float64, len(history)),
|
||||
"memory_usage": make([]float64, len(history)),
|
||||
"response_time": make([]float64, len(history)),
|
||||
"cache_hit_rate": make([]float64, len(history)),
|
||||
"overall_performance": make([]float64, len(history)),
|
||||
}
|
||||
|
||||
timestamps := make([]time.Time, len(history))
|
||||
|
||||
// 提取时间序列数据
|
||||
for i, snapshot := range history {
|
||||
metrics["cpu_usage"][i] = snapshot.System.CPUUsage
|
||||
metrics["memory_usage"][i] = float64(snapshot.System.MemoryUsage) / 1024 / 1024 // MB
|
||||
metrics["response_time"][i] = float64(snapshot.Application.ResponseTime) / float64(time.Millisecond)
|
||||
metrics["cache_hit_rate"][i] = snapshot.Cache.HitRate
|
||||
metrics["overall_performance"][i] = snapshot.OverallPerformance
|
||||
timestamps[i] = snapshot.Timestamp
|
||||
}
|
||||
|
||||
// 分析每个指标的趋势
|
||||
for metricName, values := range metrics {
|
||||
trend := p.calculateTrend(metricName, values, timestamps)
|
||||
p.currentTrends[metricName] = trend
|
||||
}
|
||||
}
|
||||
|
||||
// calculateTrend 计算趋势
|
||||
func (p *PerformanceAnalyzer) calculateTrend(metricName string, values []float64, timestamps []time.Time) Trend {
|
||||
if len(values) < 2 {
|
||||
return Trend{
|
||||
Metric: metricName,
|
||||
Direction: "stable",
|
||||
Slope: 0,
|
||||
Confidence: 0,
|
||||
Duration: 0,
|
||||
Prediction: values[len(values)-1],
|
||||
}
|
||||
}
|
||||
|
||||
// 线性回归计算趋势
|
||||
slope, intercept, rSquared := p.linearRegression(values, timestamps)
|
||||
|
||||
// 确定趋势方向
|
||||
direction := "stable"
|
||||
if math.Abs(slope) > 0.01 { // 阈值
|
||||
if slope > 0 {
|
||||
direction = "increasing"
|
||||
} else {
|
||||
direction = "decreasing"
|
||||
}
|
||||
}
|
||||
|
||||
// 计算置信度
|
||||
confidence := math.Min(rSquared, 1.0)
|
||||
|
||||
// 预测未来值
|
||||
futureTime := timestamps[len(timestamps)-1].Add(5 * time.Minute)
|
||||
prediction := intercept + slope*float64(futureTime.Unix())
|
||||
|
||||
return Trend{
|
||||
Metric: metricName,
|
||||
Direction: direction,
|
||||
Slope: slope,
|
||||
Confidence: confidence,
|
||||
Duration: timestamps[len(timestamps)-1].Sub(timestamps[0]),
|
||||
Prediction: prediction,
|
||||
}
|
||||
}
|
||||
|
||||
// linearRegression 线性回归
|
||||
func (p *PerformanceAnalyzer) linearRegression(y []float64, timestamps []time.Time) (slope, intercept, rSquared float64) {
|
||||
n := float64(len(y))
|
||||
if n < 2 {
|
||||
return 0, y[0], 0
|
||||
}
|
||||
|
||||
// 转换时间戳为数值
|
||||
x := make([]float64, len(timestamps))
|
||||
for i, t := range timestamps {
|
||||
x[i] = float64(t.Unix())
|
||||
}
|
||||
|
||||
// 计算均值
|
||||
var sumX, sumY float64
|
||||
for i := 0; i < len(x); i++ {
|
||||
sumX += x[i]
|
||||
sumY += y[i]
|
||||
}
|
||||
meanX := sumX / n
|
||||
meanY := sumY / n
|
||||
|
||||
// 计算斜率和截距
|
||||
var numerator, denominator float64
|
||||
for i := 0; i < len(x); i++ {
|
||||
numerator += (x[i] - meanX) * (y[i] - meanY)
|
||||
denominator += (x[i] - meanX) * (x[i] - meanX)
|
||||
}
|
||||
|
||||
if denominator == 0 {
|
||||
return 0, meanY, 0
|
||||
}
|
||||
|
||||
slope = numerator / denominator
|
||||
intercept = meanY - slope*meanX
|
||||
|
||||
// 计算R²
|
||||
var ssRes, ssTot float64
|
||||
for i := 0; i < len(y); i++ {
|
||||
predicted := intercept + slope*x[i]
|
||||
ssRes += (y[i] - predicted) * (y[i] - predicted)
|
||||
ssTot += (y[i] - meanY) * (y[i] - meanY)
|
||||
}
|
||||
|
||||
if ssTot == 0 {
|
||||
rSquared = 1.0
|
||||
} else {
|
||||
rSquared = 1.0 - ssRes/ssTot
|
||||
}
|
||||
|
||||
return slope, intercept, math.Max(0, rSquared)
|
||||
}
|
||||
|
||||
// DetectAnomalies 检测异常
|
||||
func (p *PerformanceAnalyzer) DetectAnomalies(currentMetrics *MetricSnapshot) {
|
||||
if currentMetrics == nil {
|
||||
return
|
||||
}
|
||||
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
// 清空之前的异常
|
||||
p.detectedAnomalies = make([]Anomaly, 0)
|
||||
|
||||
// 检测各个指标的异常
|
||||
metrics := map[string]float64{
|
||||
"cpu_usage": currentMetrics.System.CPUUsage,
|
||||
"memory_usage": float64(currentMetrics.System.MemoryUsage) / 1024 / 1024,
|
||||
"response_time": float64(currentMetrics.Application.ResponseTime) / float64(time.Millisecond),
|
||||
"error_rate": currentMetrics.Application.ErrorRate,
|
||||
"cache_hit_rate": currentMetrics.Cache.HitRate,
|
||||
"overall_performance": currentMetrics.OverallPerformance,
|
||||
}
|
||||
|
||||
for metricName, value := range metrics {
|
||||
anomaly := p.detectMetricAnomaly(metricName, value, currentMetrics.Timestamp)
|
||||
if anomaly != nil {
|
||||
p.detectedAnomalies = append(p.detectedAnomalies, *anomaly)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// detectMetricAnomaly 检测单个指标异常
|
||||
func (p *PerformanceAnalyzer) detectMetricAnomaly(metricName string, value float64, timestamp time.Time) *Anomaly {
|
||||
baseline := p.anomalyDetector.baselines[metricName]
|
||||
if baseline == nil {
|
||||
// 创建新基线
|
||||
p.anomalyDetector.baselines[metricName] = &Baseline{
|
||||
Mean: value,
|
||||
StdDev: 0,
|
||||
Min: value,
|
||||
Max: value,
|
||||
SampleSize: 1,
|
||||
LastUpdate: timestamp,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// 更新基线
|
||||
p.updateBaseline(baseline, value, timestamp)
|
||||
|
||||
// 使用3-sigma规则检测异常
|
||||
if baseline.StdDev > 0 {
|
||||
zScore := math.Abs(value-baseline.Mean) / baseline.StdDev
|
||||
|
||||
var severity string
|
||||
var impact float64
|
||||
|
||||
if zScore > 3.0 {
|
||||
severity = "high"
|
||||
impact = 0.8
|
||||
} else if zScore > 2.0 {
|
||||
severity = "medium"
|
||||
impact = 0.5
|
||||
} else if zScore > 1.5 {
|
||||
severity = "low"
|
||||
impact = 0.2
|
||||
} else {
|
||||
return nil // 无异常
|
||||
}
|
||||
|
||||
// 确定异常描述
|
||||
description := fmt.Sprintf("%s异常: 当前值%.2f, 期望范围[%.2f, %.2f]",
|
||||
metricName, value,
|
||||
baseline.Mean-2*baseline.StdDev,
|
||||
baseline.Mean+2*baseline.StdDev)
|
||||
|
||||
return &Anomaly{
|
||||
Metric: metricName,
|
||||
Timestamp: timestamp,
|
||||
Severity: severity,
|
||||
Value: value,
|
||||
ExpectedRange: [2]float64{baseline.Mean - 2*baseline.StdDev, baseline.Mean + 2*baseline.StdDev},
|
||||
Description: description,
|
||||
Impact: impact,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBaseline 更新基线
|
||||
func (p *PerformanceAnalyzer) updateBaseline(baseline *Baseline, newValue float64, timestamp time.Time) {
|
||||
// 增量更新均值和标准差
|
||||
oldMean := baseline.Mean
|
||||
baseline.SampleSize++
|
||||
baseline.Mean += (newValue - baseline.Mean) / float64(baseline.SampleSize)
|
||||
|
||||
// 更新方差(Welford算法)
|
||||
if baseline.SampleSize > 1 {
|
||||
variance := (float64(baseline.SampleSize-2)*baseline.StdDev*baseline.StdDev +
|
||||
(newValue-oldMean)*(newValue-baseline.Mean)) / float64(baseline.SampleSize-1)
|
||||
baseline.StdDev = math.Sqrt(math.Max(0, variance))
|
||||
}
|
||||
|
||||
// 更新最值
|
||||
if newValue < baseline.Min {
|
||||
baseline.Min = newValue
|
||||
}
|
||||
if newValue > baseline.Max {
|
||||
baseline.Max = newValue
|
||||
}
|
||||
|
||||
baseline.LastUpdate = timestamp
|
||||
}
|
||||
|
||||
// AnalyzeCorrelations 分析相关性
|
||||
func (p *PerformanceAnalyzer) AnalyzeCorrelations(history []MetricSnapshot) {
|
||||
if len(history) < 3 {
|
||||
return
|
||||
}
|
||||
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
// 提取指标数据
|
||||
metrics := map[string][]float64{
|
||||
"cpu_usage": make([]float64, len(history)),
|
||||
"memory_usage": make([]float64, len(history)),
|
||||
"response_time": make([]float64, len(history)),
|
||||
"cache_hit_rate": make([]float64, len(history)),
|
||||
"overall_performance": make([]float64, len(history)),
|
||||
}
|
||||
|
||||
for i, snapshot := range history {
|
||||
metrics["cpu_usage"][i] = snapshot.System.CPUUsage
|
||||
metrics["memory_usage"][i] = float64(snapshot.System.MemoryUsage) / 1024 / 1024
|
||||
metrics["response_time"][i] = float64(snapshot.Application.ResponseTime) / float64(time.Millisecond)
|
||||
metrics["cache_hit_rate"][i] = snapshot.Cache.HitRate
|
||||
metrics["overall_performance"][i] = snapshot.OverallPerformance
|
||||
}
|
||||
|
||||
// 计算相关性矩阵
|
||||
metricNames := make([]string, 0, len(metrics))
|
||||
for name := range metrics {
|
||||
metricNames = append(metricNames, name)
|
||||
}
|
||||
sort.Strings(metricNames)
|
||||
|
||||
for i, metric1 := range metricNames {
|
||||
if p.correlationAnalyzer.correlationMatrix[metric1] == nil {
|
||||
p.correlationAnalyzer.correlationMatrix[metric1] = make(map[string]float64)
|
||||
}
|
||||
|
||||
for j, metric2 := range metricNames {
|
||||
if i <= j {
|
||||
correlation := p.calculateCorrelation(metrics[metric1], metrics[metric2])
|
||||
p.correlationAnalyzer.correlationMatrix[metric1][metric2] = correlation
|
||||
p.correlationAnalyzer.correlationMatrix[metric2][metric1] = correlation
|
||||
|
||||
// 保存重要相关性
|
||||
if math.Abs(correlation) > 0.5 && metric1 != metric2 {
|
||||
p.correlations[fmt.Sprintf("%s_%s", metric1, metric2)] = correlation
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// calculateCorrelation 计算皮尔逊相关系数
|
||||
func (p *PerformanceAnalyzer) calculateCorrelation(x, y []float64) float64 {
|
||||
if len(x) != len(y) || len(x) < 2 {
|
||||
return 0
|
||||
}
|
||||
|
||||
n := float64(len(x))
|
||||
|
||||
// 计算均值
|
||||
var sumX, sumY float64
|
||||
for i := 0; i < len(x); i++ {
|
||||
sumX += x[i]
|
||||
sumY += y[i]
|
||||
}
|
||||
meanX := sumX / n
|
||||
meanY := sumY / n
|
||||
|
||||
// 计算协方差和方差
|
||||
var covariance, varianceX, varianceY float64
|
||||
for i := 0; i < len(x); i++ {
|
||||
dx := x[i] - meanX
|
||||
dy := y[i] - meanY
|
||||
covariance += dx * dy
|
||||
varianceX += dx * dx
|
||||
varianceY += dy * dy
|
||||
}
|
||||
|
||||
// 计算相关系数
|
||||
if varianceX == 0 || varianceY == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
correlation := covariance / math.Sqrt(varianceX*varianceY)
|
||||
return correlation
|
||||
}
|
||||
|
||||
// AnalyzeIssues 分析性能问题
|
||||
func (p *PerformanceAnalyzer) AnalyzeIssues(currentMetrics *MetricSnapshot) []string {
|
||||
if currentMetrics == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
issues := make([]string, 0)
|
||||
|
||||
// CPU使用率过高
|
||||
if currentMetrics.System.CPUUsage > 0.8 {
|
||||
issues = append(issues, "high_cpu_usage")
|
||||
}
|
||||
|
||||
// 内存使用率过高
|
||||
memoryUsageRatio := float64(currentMetrics.System.MemoryUsage) / float64(currentMetrics.System.MemoryTotal)
|
||||
if memoryUsageRatio > 0.85 {
|
||||
issues = append(issues, "high_memory_usage")
|
||||
}
|
||||
|
||||
// 响应时间过长
|
||||
if currentMetrics.Application.ResponseTime > 1*time.Second {
|
||||
issues = append(issues, "high_response_time")
|
||||
}
|
||||
|
||||
// 错误率过高
|
||||
if currentMetrics.Application.ErrorRate > 0.05 {
|
||||
issues = append(issues, "high_error_rate")
|
||||
}
|
||||
|
||||
// 缓存命中率过低
|
||||
if currentMetrics.Cache.HitRate < 0.7 {
|
||||
issues = append(issues, "low_cache_hit_rate")
|
||||
}
|
||||
|
||||
// 整体性能过低
|
||||
if currentMetrics.OverallPerformance < 60 {
|
||||
issues = append(issues, "low_overall_performance")
|
||||
}
|
||||
|
||||
return issues
|
||||
}
|
||||
|
||||
// GetCurrentTrends 获取当前趋势
|
||||
func (p *PerformanceAnalyzer) GetCurrentTrends() map[string]Trend {
|
||||
p.mutex.RLock()
|
||||
defer p.mutex.RUnlock()
|
||||
|
||||
trends := make(map[string]Trend)
|
||||
for k, v := range p.currentTrends {
|
||||
trends[k] = v
|
||||
}
|
||||
|
||||
return trends
|
||||
}
|
||||
|
||||
// GetDetectedAnomalies 获取检测到的异常
|
||||
func (p *PerformanceAnalyzer) GetDetectedAnomalies() []Anomaly {
|
||||
p.mutex.RLock()
|
||||
defer p.mutex.RUnlock()
|
||||
|
||||
anomalies := make([]Anomaly, len(p.detectedAnomalies))
|
||||
copy(anomalies, p.detectedAnomalies)
|
||||
|
||||
return anomalies
|
||||
}
|
||||
|
||||
// GetCorrelations 获取相关性
|
||||
func (p *PerformanceAnalyzer) GetCorrelations() map[string]float64 {
|
||||
p.mutex.RLock()
|
||||
defer p.mutex.RUnlock()
|
||||
|
||||
correlations := make(map[string]float64)
|
||||
for k, v := range p.correlations {
|
||||
correlations[k] = v
|
||||
}
|
||||
|
||||
return correlations
|
||||
}
|
||||
396
util/cache/predictive_model.go
vendored
Normal file
396
util/cache/predictive_model.go
vendored
Normal file
@@ -0,0 +1,396 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewPredictiveModel 创建预测模型
|
||||
func NewPredictiveModel() *PredictiveModel {
|
||||
return &PredictiveModel{
|
||||
modelType: "linear_regression",
|
||||
coefficients: make([]float64, 0),
|
||||
seasonalFactors: make([]float64, 0),
|
||||
trainingData: make([]DataPoint, 0),
|
||||
testData: make([]DataPoint, 0),
|
||||
predictions: make(map[string]Prediction),
|
||||
}
|
||||
}
|
||||
|
||||
// Train 训练模型
|
||||
func (p *PredictiveModel) Train(dataset *LearningDataset) error {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
if dataset == nil || len(dataset.Features) == 0 {
|
||||
return fmt.Errorf("训练数据集为空")
|
||||
}
|
||||
|
||||
// 准备训练数据
|
||||
p.prepareTrainingData(dataset)
|
||||
|
||||
// 根据模型类型进行训练
|
||||
switch p.modelType {
|
||||
case "linear_regression":
|
||||
return p.trainLinearRegression(dataset)
|
||||
case "exponential_smoothing":
|
||||
return p.trainExponentialSmoothing(dataset)
|
||||
default:
|
||||
return fmt.Errorf("不支持的模型类型: %s", p.modelType)
|
||||
}
|
||||
}
|
||||
|
||||
// prepareTrainingData 准备训练数据
|
||||
func (p *PredictiveModel) prepareTrainingData(dataset *LearningDataset) {
|
||||
dataset.mutex.RLock()
|
||||
defer dataset.mutex.RUnlock()
|
||||
|
||||
totalSamples := len(dataset.Features)
|
||||
if totalSamples == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// 数据分割
|
||||
trainSize := int(float64(totalSamples) * 0.8) // 80%用于训练
|
||||
|
||||
p.trainingData = make([]DataPoint, trainSize)
|
||||
p.testData = make([]DataPoint, totalSamples-trainSize)
|
||||
|
||||
// 填充训练数据
|
||||
for i := 0; i < trainSize; i++ {
|
||||
p.trainingData[i] = DataPoint{
|
||||
Timestamp: time.Now().Add(-time.Duration(totalSamples-i) * time.Minute),
|
||||
Values: make(map[string]float64),
|
||||
}
|
||||
|
||||
// 转换特征为命名值
|
||||
if len(dataset.Features[i]) >= 5 {
|
||||
p.trainingData[i].Values["cpu_usage"] = dataset.Features[i][0]
|
||||
p.trainingData[i].Values["memory_usage"] = dataset.Features[i][1]
|
||||
p.trainingData[i].Values["response_time"] = dataset.Features[i][2]
|
||||
p.trainingData[i].Values["cache_hit_rate"] = dataset.Features[i][3]
|
||||
p.trainingData[i].Values["compression_ratio"] = dataset.Features[i][4]
|
||||
}
|
||||
}
|
||||
|
||||
// 填充测试数据
|
||||
for i := 0; i < len(p.testData); i++ {
|
||||
testIndex := trainSize + i
|
||||
p.testData[i] = DataPoint{
|
||||
Timestamp: time.Now().Add(-time.Duration(totalSamples-testIndex) * time.Minute),
|
||||
Values: make(map[string]float64),
|
||||
}
|
||||
|
||||
if len(dataset.Features[testIndex]) >= 5 {
|
||||
p.testData[i].Values["cpu_usage"] = dataset.Features[testIndex][0]
|
||||
p.testData[i].Values["memory_usage"] = dataset.Features[testIndex][1]
|
||||
p.testData[i].Values["response_time"] = dataset.Features[testIndex][2]
|
||||
p.testData[i].Values["cache_hit_rate"] = dataset.Features[testIndex][3]
|
||||
p.testData[i].Values["compression_ratio"] = dataset.Features[testIndex][4]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// trainLinearRegression 训练线性回归模型
|
||||
func (p *PredictiveModel) trainLinearRegression(dataset *LearningDataset) error {
|
||||
dataset.mutex.RLock()
|
||||
defer dataset.mutex.RUnlock()
|
||||
|
||||
if len(dataset.Features) == 0 || len(dataset.Labels) != len(dataset.Features) {
|
||||
return fmt.Errorf("训练数据不匹配")
|
||||
}
|
||||
|
||||
featuresCount := len(dataset.Features[0])
|
||||
samplesCount := len(dataset.Features)
|
||||
|
||||
// 初始化系数(包括偏置项)
|
||||
p.coefficients = make([]float64, featuresCount+1)
|
||||
|
||||
// 使用梯度下降训练
|
||||
learningRate := 0.01
|
||||
iterations := 1000
|
||||
|
||||
for iter := 0; iter < iterations; iter++ {
|
||||
// 计算预测值和误差
|
||||
totalLoss := 0.0
|
||||
gradients := make([]float64, len(p.coefficients))
|
||||
|
||||
for i := 0; i < samplesCount; i++ {
|
||||
// 计算预测值
|
||||
predicted := p.coefficients[0] // 偏置项
|
||||
for j := 0; j < featuresCount; j++ {
|
||||
predicted += p.coefficients[j+1] * dataset.Features[i][j]
|
||||
}
|
||||
|
||||
// 计算误差
|
||||
error := predicted - dataset.Labels[i]
|
||||
totalLoss += error * error
|
||||
|
||||
// 计算梯度
|
||||
gradients[0] += error // 偏置项梯度
|
||||
for j := 0; j < featuresCount; j++ {
|
||||
gradients[j+1] += error * dataset.Features[i][j]
|
||||
}
|
||||
}
|
||||
|
||||
// 更新参数
|
||||
for j := 0; j < len(p.coefficients); j++ {
|
||||
p.coefficients[j] -= learningRate * gradients[j] / float64(samplesCount)
|
||||
}
|
||||
|
||||
// 计算平均损失
|
||||
avgLoss := totalLoss / float64(samplesCount)
|
||||
|
||||
// 早停条件
|
||||
if avgLoss < 0.001 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// trainExponentialSmoothing 训练指数平滑模型
|
||||
func (p *PredictiveModel) trainExponentialSmoothing(dataset *LearningDataset) error {
|
||||
dataset.mutex.RLock()
|
||||
defer dataset.mutex.RUnlock()
|
||||
|
||||
if len(dataset.Labels) < 2 {
|
||||
return fmt.Errorf("指数平滑需要至少2个数据点")
|
||||
}
|
||||
|
||||
// 简单指数平滑参数
|
||||
alpha := 0.3 // 平滑参数
|
||||
|
||||
// 初始化
|
||||
p.coefficients = make([]float64, 2)
|
||||
p.coefficients[0] = dataset.Labels[0] // 初始水平
|
||||
p.coefficients[1] = alpha // 平滑参数
|
||||
|
||||
// 计算趋势组件
|
||||
if len(dataset.Labels) > 1 {
|
||||
p.trendComponent = dataset.Labels[1] - dataset.Labels[0]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Predict 进行预测
|
||||
func (p *PredictiveModel) Predict(features []float64, horizon time.Duration) (*Prediction, error) {
|
||||
p.mutex.RLock()
|
||||
defer p.mutex.RUnlock()
|
||||
|
||||
if len(p.coefficients) == 0 {
|
||||
return nil, fmt.Errorf("模型尚未训练")
|
||||
}
|
||||
|
||||
var predictedValue float64
|
||||
var confidence float64
|
||||
|
||||
switch p.modelType {
|
||||
case "linear_regression":
|
||||
if len(features) != len(p.coefficients)-1 {
|
||||
return nil, fmt.Errorf("特征维度不匹配")
|
||||
}
|
||||
|
||||
// 线性回归预测
|
||||
predictedValue = p.coefficients[0] // 偏置项
|
||||
for i, feature := range features {
|
||||
predictedValue += p.coefficients[i+1] * feature
|
||||
}
|
||||
|
||||
// 置信度基于训练数据的拟合程度
|
||||
confidence = math.Max(0.5, p.rmse) // 简化的置信度计算
|
||||
|
||||
case "exponential_smoothing":
|
||||
// 指数平滑预测
|
||||
predictedValue = p.coefficients[0] + p.trendComponent*float64(horizon/time.Minute)
|
||||
confidence = 0.7 // 固定置信度
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("不支持的模型类型: %s", p.modelType)
|
||||
}
|
||||
|
||||
return &Prediction{
|
||||
Metric: "overall_performance",
|
||||
FutureValue: predictedValue,
|
||||
Confidence: confidence,
|
||||
TimeHorizon: horizon,
|
||||
PredictedAt: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Validate 验证模型性能
|
||||
func (p *PredictiveModel) Validate() float64 {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
if len(p.testData) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
correctPredictions := 0
|
||||
totalPredictions := len(p.testData)
|
||||
|
||||
for _, testPoint := range p.testData {
|
||||
// 提取特征
|
||||
features := []float64{
|
||||
testPoint.Values["cpu_usage"],
|
||||
testPoint.Values["memory_usage"],
|
||||
testPoint.Values["response_time"],
|
||||
testPoint.Values["cache_hit_rate"],
|
||||
testPoint.Values["compression_ratio"],
|
||||
}
|
||||
|
||||
// 进行预测
|
||||
prediction, err := p.Predict(features, 5*time.Minute)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// 计算实际性能分数(简化)
|
||||
actualPerformance := p.calculatePerformanceScore(testPoint.Values)
|
||||
|
||||
// 判断预测是否准确(容忍误差10%)
|
||||
errorRate := math.Abs(prediction.FutureValue-actualPerformance) / actualPerformance
|
||||
if errorRate < 0.1 {
|
||||
correctPredictions++
|
||||
}
|
||||
}
|
||||
|
||||
accuracy := float64(correctPredictions) / float64(totalPredictions)
|
||||
p.accuracy = accuracy
|
||||
|
||||
// 计算RMSE
|
||||
p.calculateRMSE()
|
||||
|
||||
return accuracy
|
||||
}
|
||||
|
||||
// calculatePerformanceScore 计算性能分数
|
||||
func (p *PredictiveModel) calculatePerformanceScore(values map[string]float64) float64 {
|
||||
// 简化的性能分数计算
|
||||
cpuScore := (1.0 - values["cpu_usage"]) * 30
|
||||
memoryScore := (1.0 - values["memory_usage"]/1000) * 25 // 假设内存以MB为单位
|
||||
responseScore := (1.0 - values["response_time"]/1000) * 25 // 假设响应时间以ms为单位
|
||||
cacheScore := values["cache_hit_rate"] * 20
|
||||
|
||||
return math.Max(0, cpuScore+memoryScore+responseScore+cacheScore)
|
||||
}
|
||||
|
||||
// calculateRMSE 计算均方根误差
|
||||
func (p *PredictiveModel) calculateRMSE() {
|
||||
if len(p.testData) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var sumSquaredErrors float64
|
||||
validPredictions := 0
|
||||
|
||||
for _, testPoint := range p.testData {
|
||||
features := []float64{
|
||||
testPoint.Values["cpu_usage"],
|
||||
testPoint.Values["memory_usage"],
|
||||
testPoint.Values["response_time"],
|
||||
testPoint.Values["cache_hit_rate"],
|
||||
testPoint.Values["compression_ratio"],
|
||||
}
|
||||
|
||||
prediction, err := p.Predict(features, 5*time.Minute)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
actualPerformance := p.calculatePerformanceScore(testPoint.Values)
|
||||
error := prediction.FutureValue - actualPerformance
|
||||
sumSquaredErrors += error * error
|
||||
validPredictions++
|
||||
}
|
||||
|
||||
if validPredictions > 0 {
|
||||
p.rmse = math.Sqrt(sumSquaredErrors / float64(validPredictions))
|
||||
}
|
||||
}
|
||||
|
||||
// PredictMultiple 预测多个指标
|
||||
func (p *PredictiveModel) PredictMultiple(currentMetrics *MetricSnapshot, horizons []time.Duration) map[string]Prediction {
|
||||
if currentMetrics == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
features := []float64{
|
||||
currentMetrics.System.CPUUsage,
|
||||
float64(currentMetrics.System.MemoryUsage) / 1024 / 1024,
|
||||
float64(currentMetrics.Application.ResponseTime) / float64(time.Millisecond),
|
||||
currentMetrics.Cache.HitRate,
|
||||
currentMetrics.Cache.CompressionRatio,
|
||||
}
|
||||
|
||||
predictions := make(map[string]Prediction)
|
||||
|
||||
for _, horizon := range horizons {
|
||||
predictionKey := fmt.Sprintf("performance_%s", horizon.String())
|
||||
|
||||
prediction, err := p.Predict(features, horizon)
|
||||
if err == nil {
|
||||
predictions[predictionKey] = *prediction
|
||||
}
|
||||
}
|
||||
|
||||
return predictions
|
||||
}
|
||||
|
||||
// GetAccuracy 获取模型准确率
|
||||
func (p *PredictiveModel) GetAccuracy() float64 {
|
||||
p.mutex.RLock()
|
||||
defer p.mutex.RUnlock()
|
||||
|
||||
return p.accuracy
|
||||
}
|
||||
|
||||
// GetPredictions 获取所有预测结果
|
||||
func (p *PredictiveModel) GetPredictions() map[string]Prediction {
|
||||
p.mutex.RLock()
|
||||
defer p.mutex.RUnlock()
|
||||
|
||||
predictions := make(map[string]Prediction)
|
||||
for k, v := range p.predictions {
|
||||
predictions[k] = v
|
||||
}
|
||||
|
||||
return predictions
|
||||
}
|
||||
|
||||
// UpdatePredictions 更新预测结果
|
||||
func (p *PredictiveModel) UpdatePredictions(currentMetrics *MetricSnapshot) {
|
||||
if currentMetrics == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// 预测未来1小时的性能
|
||||
horizons := []time.Duration{
|
||||
5 * time.Minute,
|
||||
15 * time.Minute,
|
||||
30 * time.Minute,
|
||||
1 * time.Hour,
|
||||
}
|
||||
|
||||
newPredictions := p.PredictMultiple(currentMetrics, horizons)
|
||||
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
// 更新预测结果
|
||||
for k, v := range newPredictions {
|
||||
p.predictions[k] = v
|
||||
}
|
||||
|
||||
// 清理过期预测
|
||||
now := time.Now()
|
||||
for k, v := range p.predictions {
|
||||
if now.Sub(v.PredictedAt) > 2*time.Hour {
|
||||
delete(p.predictions, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
431
util/cache/search_pattern_analyzer.go
vendored
Normal file
431
util/cache/search_pattern_analyzer.go
vendored
Normal file
@@ -0,0 +1,431 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SearchPatternAnalyzer 搜索模式分析器
|
||||
type SearchPatternAnalyzer struct {
|
||||
// 模式缓存
|
||||
patternCache map[string]*SearchPattern
|
||||
cacheMutex sync.RWMutex
|
||||
|
||||
// 分析规则
|
||||
keywordRules []*KeywordRule
|
||||
|
||||
// 统计信息
|
||||
analysisCount int64
|
||||
cacheHitCount int64
|
||||
|
||||
// 配置
|
||||
maxCacheSize int
|
||||
cacheExpiry time.Duration
|
||||
}
|
||||
|
||||
// KeywordRule 关键词规则
|
||||
type KeywordRule struct {
|
||||
Name string
|
||||
Pattern *regexp.Regexp
|
||||
Priority int
|
||||
Description string
|
||||
}
|
||||
|
||||
// NewSearchPatternAnalyzer 创建搜索模式分析器
|
||||
func NewSearchPatternAnalyzer() *SearchPatternAnalyzer {
|
||||
analyzer := &SearchPatternAnalyzer{
|
||||
patternCache: make(map[string]*SearchPattern),
|
||||
maxCacheSize: 1000, // 最大缓存1000个模式
|
||||
cacheExpiry: 1 * time.Hour, // 1小时过期
|
||||
}
|
||||
|
||||
// 初始化关键词规则
|
||||
analyzer.initializeKeywordRules()
|
||||
|
||||
return analyzer
|
||||
}
|
||||
|
||||
// initializeKeywordRules 初始化关键词规则
|
||||
func (s *SearchPatternAnalyzer) initializeKeywordRules() {
|
||||
s.keywordRules = []*KeywordRule{
|
||||
{
|
||||
Name: "电影资源",
|
||||
Pattern: regexp.MustCompile(`(?i)(电影|movie|film|影片|HD|4K|蓝光|BluRay)`),
|
||||
Priority: 1,
|
||||
Description: "电影相关搜索",
|
||||
},
|
||||
{
|
||||
Name: "电视剧资源",
|
||||
Pattern: regexp.MustCompile(`(?i)(电视剧|TV|series|连续剧|美剧|韩剧|日剧)`),
|
||||
Priority: 1,
|
||||
Description: "电视剧相关搜索",
|
||||
},
|
||||
{
|
||||
Name: "动漫资源",
|
||||
Pattern: regexp.MustCompile(`(?i)(动漫|anime|动画|漫画|manga)`),
|
||||
Priority: 1,
|
||||
Description: "动漫相关搜索",
|
||||
},
|
||||
{
|
||||
Name: "音乐资源",
|
||||
Pattern: regexp.MustCompile(`(?i)(音乐|music|歌曲|专辑|album|MP3|FLAC)`),
|
||||
Priority: 2,
|
||||
Description: "音乐相关搜索",
|
||||
},
|
||||
{
|
||||
Name: "游戏资源",
|
||||
Pattern: regexp.MustCompile(`(?i)(游戏|game|单机|网游|手游|steam)`),
|
||||
Priority: 2,
|
||||
Description: "游戏相关搜索",
|
||||
},
|
||||
{
|
||||
Name: "软件资源",
|
||||
Pattern: regexp.MustCompile(`(?i)(软件|software|app|应用|工具|破解)`),
|
||||
Priority: 2,
|
||||
Description: "软件相关搜索",
|
||||
},
|
||||
{
|
||||
Name: "学习资源",
|
||||
Pattern: regexp.MustCompile(`(?i)(教程|tutorial|课程|学习|教学|资料)`),
|
||||
Priority: 3,
|
||||
Description: "学习资源搜索",
|
||||
},
|
||||
{
|
||||
Name: "文档资源",
|
||||
Pattern: regexp.MustCompile(`(?i)(文档|doc|pdf|txt|电子书|ebook)`),
|
||||
Priority: 3,
|
||||
Description: "文档资源搜索",
|
||||
},
|
||||
{
|
||||
Name: "通用搜索",
|
||||
Pattern: regexp.MustCompile(`.*`), // 匹配所有
|
||||
Priority: 4,
|
||||
Description: "通用搜索模式",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AnalyzePattern 分析搜索模式
|
||||
func (s *SearchPatternAnalyzer) AnalyzePattern(op *CacheOperation) *SearchPattern {
|
||||
s.analysisCount++
|
||||
|
||||
// 🔧 生成缓存键
|
||||
cacheKey := s.generateCacheKey(op)
|
||||
|
||||
// 🚀 检查缓存
|
||||
s.cacheMutex.RLock()
|
||||
if cached, exists := s.patternCache[cacheKey]; exists {
|
||||
// 检查是否过期
|
||||
if time.Since(cached.LastAccessTime) < s.cacheExpiry {
|
||||
cached.LastAccessTime = time.Now()
|
||||
cached.Frequency++
|
||||
s.cacheMutex.RUnlock()
|
||||
s.cacheHitCount++
|
||||
return cached
|
||||
}
|
||||
}
|
||||
s.cacheMutex.RUnlock()
|
||||
|
||||
// 🎯 分析新模式
|
||||
pattern := s.analyzeNewPattern(op)
|
||||
|
||||
// 🗄️ 缓存结果
|
||||
s.cachePattern(cacheKey, pattern)
|
||||
|
||||
return pattern
|
||||
}
|
||||
|
||||
// generateCacheKey 生成缓存键
|
||||
func (s *SearchPatternAnalyzer) generateCacheKey(op *CacheOperation) string {
|
||||
// 使用关键词和插件名生成缓存键
|
||||
source := fmt.Sprintf("%s_%s",
|
||||
s.normalizeKeyword(op.Keyword),
|
||||
op.PluginName)
|
||||
|
||||
// MD5哈希以节省内存
|
||||
hash := md5.Sum([]byte(source))
|
||||
return fmt.Sprintf("%x", hash)
|
||||
}
|
||||
|
||||
// normalizeKeyword 标准化关键词
|
||||
func (s *SearchPatternAnalyzer) normalizeKeyword(keyword string) string {
|
||||
// 转换为小写
|
||||
normalized := strings.ToLower(keyword)
|
||||
|
||||
// 移除特殊字符和多余空格
|
||||
normalized = regexp.MustCompile(`[^\w\s\u4e00-\u9fff]`).ReplaceAllString(normalized, " ")
|
||||
normalized = regexp.MustCompile(`\s+`).ReplaceAllString(normalized, " ")
|
||||
normalized = strings.TrimSpace(normalized)
|
||||
|
||||
return normalized
|
||||
}
|
||||
|
||||
// analyzeNewPattern 分析新模式
|
||||
func (s *SearchPatternAnalyzer) analyzeNewPattern(op *CacheOperation) *SearchPattern {
|
||||
pattern := &SearchPattern{
|
||||
KeywordPattern: s.classifyKeyword(op.Keyword),
|
||||
PluginSet: []string{op.PluginName},
|
||||
TimeWindow: s.determineTimeWindow(op),
|
||||
Frequency: 1,
|
||||
LastAccessTime: time.Now(),
|
||||
Metadata: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
// 🔍 关键词分析
|
||||
s.analyzeKeywordCharacteristics(pattern, op.Keyword)
|
||||
|
||||
// 🔍 插件分析
|
||||
s.analyzePluginCharacteristics(pattern, op.PluginName)
|
||||
|
||||
// 🔍 时间模式分析
|
||||
s.analyzeTimePattern(pattern, op.Timestamp)
|
||||
|
||||
return pattern
|
||||
}
|
||||
|
||||
// classifyKeyword 分类关键词
|
||||
func (s *SearchPatternAnalyzer) classifyKeyword(keyword string) string {
|
||||
// 按优先级检查规则
|
||||
for _, rule := range s.keywordRules {
|
||||
if rule.Pattern.MatchString(keyword) {
|
||||
return rule.Name
|
||||
}
|
||||
}
|
||||
|
||||
return "通用搜索"
|
||||
}
|
||||
|
||||
// analyzeKeywordCharacteristics 分析关键词特征
|
||||
func (s *SearchPatternAnalyzer) analyzeKeywordCharacteristics(pattern *SearchPattern, keyword string) {
|
||||
metadata := pattern.Metadata
|
||||
|
||||
// 分析关键词长度
|
||||
metadata["keyword_length"] = len(keyword)
|
||||
|
||||
// 分析关键词复杂度(包含的词数)
|
||||
words := strings.Fields(keyword)
|
||||
metadata["word_count"] = len(words)
|
||||
|
||||
// 分析是否包含特殊字符
|
||||
hasSpecialChars := regexp.MustCompile(`[^\w\s\u4e00-\u9fff]`).MatchString(keyword)
|
||||
metadata["has_special_chars"] = hasSpecialChars
|
||||
|
||||
// 分析是否包含数字
|
||||
hasNumbers := regexp.MustCompile(`\d`).MatchString(keyword)
|
||||
metadata["has_numbers"] = hasNumbers
|
||||
|
||||
// 分析语言类型
|
||||
hasChinese := regexp.MustCompile(`[\u4e00-\u9fff]`).MatchString(keyword)
|
||||
hasEnglish := regexp.MustCompile(`[a-zA-Z]`).MatchString(keyword)
|
||||
|
||||
if hasChinese && hasEnglish {
|
||||
metadata["language"] = "mixed"
|
||||
} else if hasChinese {
|
||||
metadata["language"] = "chinese"
|
||||
} else if hasEnglish {
|
||||
metadata["language"] = "english"
|
||||
} else {
|
||||
metadata["language"] = "other"
|
||||
}
|
||||
|
||||
// 预测搜索频率(基于关键词特征)
|
||||
complexity := len(words)
|
||||
if hasSpecialChars {
|
||||
complexity++
|
||||
}
|
||||
if hasNumbers {
|
||||
complexity++
|
||||
}
|
||||
|
||||
// 复杂度越低,搜索频率可能越高
|
||||
predictedFrequency := "medium"
|
||||
if complexity <= 2 {
|
||||
predictedFrequency = "high"
|
||||
} else if complexity >= 5 {
|
||||
predictedFrequency = "low"
|
||||
}
|
||||
|
||||
metadata["predicted_frequency"] = predictedFrequency
|
||||
}
|
||||
|
||||
// analyzePluginCharacteristics 分析插件特征
|
||||
func (s *SearchPatternAnalyzer) analyzePluginCharacteristics(pattern *SearchPattern, pluginName string) {
|
||||
metadata := pattern.Metadata
|
||||
|
||||
// 插件类型分析(基于名称推断)
|
||||
pluginType := "general"
|
||||
if strings.Contains(strings.ToLower(pluginName), "4k") {
|
||||
pluginType = "high_quality"
|
||||
} else if strings.Contains(strings.ToLower(pluginName), "pan") {
|
||||
pluginType = "cloud_storage"
|
||||
} else if strings.Contains(strings.ToLower(pluginName), "search") {
|
||||
pluginType = "search_engine"
|
||||
}
|
||||
|
||||
metadata["plugin_type"] = pluginType
|
||||
metadata["plugin_name"] = pluginName
|
||||
}
|
||||
|
||||
// analyzeTimePattern 分析时间模式
|
||||
func (s *SearchPatternAnalyzer) analyzeTimePattern(pattern *SearchPattern, timestamp time.Time) {
|
||||
metadata := pattern.Metadata
|
||||
|
||||
// 时间段分析
|
||||
hour := timestamp.Hour()
|
||||
var timePeriod string
|
||||
switch {
|
||||
case hour >= 6 && hour < 12:
|
||||
timePeriod = "morning"
|
||||
case hour >= 12 && hour < 18:
|
||||
timePeriod = "afternoon"
|
||||
case hour >= 18 && hour < 22:
|
||||
timePeriod = "evening"
|
||||
default:
|
||||
timePeriod = "night"
|
||||
}
|
||||
|
||||
metadata["time_period"] = timePeriod
|
||||
|
||||
// 工作日/周末分析
|
||||
weekday := timestamp.Weekday()
|
||||
isWeekend := weekday == time.Saturday || weekday == time.Sunday
|
||||
metadata["is_weekend"] = isWeekend
|
||||
|
||||
// 预测最佳缓存时间(基于时间模式)
|
||||
if isWeekend || timePeriod == "evening" {
|
||||
pattern.TimeWindow = 30 * time.Minute // 高峰期,较长缓存
|
||||
} else {
|
||||
pattern.TimeWindow = 15 * time.Minute // 非高峰期,较短缓存
|
||||
}
|
||||
}
|
||||
|
||||
// determineTimeWindow 确定时间窗口
|
||||
func (s *SearchPatternAnalyzer) determineTimeWindow(op *CacheOperation) time.Duration {
|
||||
// 基本时间窗口:15分钟
|
||||
baseWindow := 15 * time.Minute
|
||||
|
||||
// 根据优先级调整
|
||||
switch op.Priority {
|
||||
case 1: // 高优先级插件
|
||||
return baseWindow * 2 // 30分钟
|
||||
case 2: // 中高优先级插件
|
||||
return baseWindow * 3 / 2 // 22.5分钟
|
||||
case 3: // 中等优先级插件
|
||||
return baseWindow // 15分钟
|
||||
case 4: // 低优先级插件
|
||||
return baseWindow / 2 // 7.5分钟
|
||||
default:
|
||||
return baseWindow
|
||||
}
|
||||
}
|
||||
|
||||
// cachePattern 缓存模式
|
||||
func (s *SearchPatternAnalyzer) cachePattern(cacheKey string, pattern *SearchPattern) {
|
||||
s.cacheMutex.Lock()
|
||||
defer s.cacheMutex.Unlock()
|
||||
|
||||
// 检查缓存大小,必要时清理
|
||||
if len(s.patternCache) >= s.maxCacheSize {
|
||||
s.cleanupCache()
|
||||
}
|
||||
|
||||
s.patternCache[cacheKey] = pattern
|
||||
}
|
||||
|
||||
// cleanupCache 清理缓存
|
||||
func (s *SearchPatternAnalyzer) cleanupCache() {
|
||||
now := time.Now()
|
||||
|
||||
// 收集需要删除的键
|
||||
toDelete := make([]string, 0)
|
||||
for key, pattern := range s.patternCache {
|
||||
if now.Sub(pattern.LastAccessTime) > s.cacheExpiry {
|
||||
toDelete = append(toDelete, key)
|
||||
}
|
||||
}
|
||||
|
||||
// 如果过期删除不够,按使用频率删除
|
||||
if len(toDelete) < len(s.patternCache)/4 { // 删除不到25%
|
||||
// 按频率排序,删除使用频率最低的
|
||||
type patternFreq struct {
|
||||
key string
|
||||
frequency int
|
||||
lastAccess time.Time
|
||||
}
|
||||
|
||||
patterns := make([]patternFreq, 0, len(s.patternCache))
|
||||
for key, pattern := range s.patternCache {
|
||||
patterns = append(patterns, patternFreq{
|
||||
key: key,
|
||||
frequency: pattern.Frequency,
|
||||
lastAccess: pattern.LastAccessTime,
|
||||
})
|
||||
}
|
||||
|
||||
// 按频率排序(频率低的在前)
|
||||
sort.Slice(patterns, func(i, j int) bool {
|
||||
if patterns[i].frequency == patterns[j].frequency {
|
||||
return patterns[i].lastAccess.Before(patterns[j].lastAccess)
|
||||
}
|
||||
return patterns[i].frequency < patterns[j].frequency
|
||||
})
|
||||
|
||||
// 删除前25%
|
||||
deleteCount := len(patterns) / 4
|
||||
for i := 0; i < deleteCount; i++ {
|
||||
toDelete = append(toDelete, patterns[i].key)
|
||||
}
|
||||
}
|
||||
|
||||
// 执行删除
|
||||
for _, key := range toDelete {
|
||||
delete(s.patternCache, key)
|
||||
}
|
||||
}
|
||||
|
||||
// GetCacheStats 获取缓存统计
|
||||
func (s *SearchPatternAnalyzer) GetCacheStats() map[string]interface{} {
|
||||
s.cacheMutex.RLock()
|
||||
defer s.cacheMutex.RUnlock()
|
||||
|
||||
hitRate := float64(0)
|
||||
if s.analysisCount > 0 {
|
||||
hitRate = float64(s.cacheHitCount) / float64(s.analysisCount)
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"cache_size": len(s.patternCache),
|
||||
"max_cache_size": s.maxCacheSize,
|
||||
"analysis_count": s.analysisCount,
|
||||
"cache_hit_count": s.cacheHitCount,
|
||||
"hit_rate": hitRate,
|
||||
"cache_expiry": s.cacheExpiry,
|
||||
}
|
||||
}
|
||||
|
||||
// GetPopularPatterns 获取热门模式
|
||||
func (s *SearchPatternAnalyzer) GetPopularPatterns(limit int) []*SearchPattern {
|
||||
s.cacheMutex.RLock()
|
||||
defer s.cacheMutex.RUnlock()
|
||||
|
||||
patterns := make([]*SearchPattern, 0, len(s.patternCache))
|
||||
for _, pattern := range s.patternCache {
|
||||
patterns = append(patterns, pattern)
|
||||
}
|
||||
|
||||
// 按频率排序
|
||||
sort.Slice(patterns, func(i, j int) bool {
|
||||
return patterns[i].Frequency > patterns[j].Frequency
|
||||
})
|
||||
|
||||
if limit > 0 && limit < len(patterns) {
|
||||
patterns = patterns[:limit]
|
||||
}
|
||||
|
||||
return patterns
|
||||
}
|
||||
463
util/cache/tuning_strategy.go
vendored
Normal file
463
util/cache/tuning_strategy.go
vendored
Normal file
@@ -0,0 +1,463 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewTuningStrategy 创建调优策略
|
||||
func NewTuningStrategy() *TuningStrategy {
|
||||
strategy := &TuningStrategy{
|
||||
strategyType: "balanced",
|
||||
rules: make([]*TuningRule, 0),
|
||||
parameterAdjustments: make(map[string]ParameterAdjustment),
|
||||
executionHistory: make([]*StrategyExecution, 0),
|
||||
}
|
||||
|
||||
// 初始化调优规则
|
||||
strategy.initializeRules()
|
||||
|
||||
return strategy
|
||||
}
|
||||
|
||||
// NewLearningDataset 创建学习数据集
|
||||
func NewLearningDataset() *LearningDataset {
|
||||
return &LearningDataset{
|
||||
Features: make([][]float64, 0),
|
||||
Labels: make([]float64, 0),
|
||||
Weights: make([]float64, 0),
|
||||
FeatureStats: make([]FeatureStatistics, 0),
|
||||
TrainingSplit: 0.8,
|
||||
ValidationSplit: 0.1,
|
||||
TestSplit: 0.1,
|
||||
}
|
||||
}
|
||||
|
||||
// initializeRules 初始化调优规则
|
||||
func (t *TuningStrategy) initializeRules() {
|
||||
t.rules = []*TuningRule{
|
||||
{
|
||||
Name: "高CPU使用率调优",
|
||||
Priority: 1,
|
||||
Enabled: true,
|
||||
Condition: func(metrics *MetricSnapshot) bool {
|
||||
return metrics.System.CPUUsage > 0.8
|
||||
},
|
||||
Action: func(engine *AdaptiveTuningEngine) (*TuningDecision, error) {
|
||||
return t.createCPUOptimizationDecision(engine)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "高内存使用调优",
|
||||
Priority: 2,
|
||||
Enabled: true,
|
||||
Condition: func(metrics *MetricSnapshot) bool {
|
||||
memoryRatio := float64(metrics.System.MemoryUsage) / float64(metrics.System.MemoryTotal)
|
||||
return memoryRatio > 0.85
|
||||
},
|
||||
Action: func(engine *AdaptiveTuningEngine) (*TuningDecision, error) {
|
||||
return t.createMemoryOptimizationDecision(engine)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "响应时间过长调优",
|
||||
Priority: 3,
|
||||
Enabled: true,
|
||||
Condition: func(metrics *MetricSnapshot) bool {
|
||||
return metrics.Application.ResponseTime > 500*time.Millisecond
|
||||
},
|
||||
Action: func(engine *AdaptiveTuningEngine) (*TuningDecision, error) {
|
||||
return t.createResponseTimeOptimizationDecision(engine)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "缓存命中率低调优",
|
||||
Priority: 4,
|
||||
Enabled: true,
|
||||
Condition: func(metrics *MetricSnapshot) bool {
|
||||
return metrics.Cache.HitRate < 0.7
|
||||
},
|
||||
Action: func(engine *AdaptiveTuningEngine) (*TuningDecision, error) {
|
||||
return t.createCacheOptimizationDecision(engine)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "整体性能低调优",
|
||||
Priority: 5,
|
||||
Enabled: true,
|
||||
Condition: func(metrics *MetricSnapshot) bool {
|
||||
return metrics.OverallPerformance < 60
|
||||
},
|
||||
Action: func(engine *AdaptiveTuningEngine) (*TuningDecision, error) {
|
||||
return t.createOverallPerformanceDecision(engine)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "预防性调优",
|
||||
Priority: 10,
|
||||
Enabled: true,
|
||||
Condition: func(metrics *MetricSnapshot) bool {
|
||||
// 基于趋势的预防性调优
|
||||
return false // 暂时禁用,需要趋势数据
|
||||
},
|
||||
Action: func(engine *AdaptiveTuningEngine) (*TuningDecision, error) {
|
||||
return t.createPreventiveDecision(engine)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateDecision 生成调优决策
|
||||
func (t *TuningStrategy) GenerateDecision(metrics *MetricSnapshot, issues []string) *TuningDecision {
|
||||
if metrics == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
|
||||
// 按优先级排序规则
|
||||
sort.Slice(t.rules, func(i, j int) bool {
|
||||
return t.rules[i].Priority < t.rules[j].Priority
|
||||
})
|
||||
|
||||
// 检查规则并生成决策
|
||||
for _, rule := range t.rules {
|
||||
if !rule.Enabled {
|
||||
continue
|
||||
}
|
||||
|
||||
// 检查冷却时间(防止频繁调优)
|
||||
if time.Since(rule.LastTriggered) < 5*time.Minute {
|
||||
continue
|
||||
}
|
||||
|
||||
// 检查条件
|
||||
if rule.Condition(metrics) {
|
||||
decision, err := rule.Action(nil) // 简化实现,不传递engine
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// 更新规则状态
|
||||
rule.LastTriggered = time.Now()
|
||||
rule.TriggerCount++
|
||||
|
||||
// 设置决策基本信息
|
||||
decision.Timestamp = time.Now()
|
||||
decision.Trigger = rule.Name
|
||||
|
||||
return decision
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createCPUOptimizationDecision 创建CPU优化决策
|
||||
func (t *TuningStrategy) createCPUOptimizationDecision(engine *AdaptiveTuningEngine) (*TuningDecision, error) {
|
||||
adjustments := []ParameterAdjustment{
|
||||
{
|
||||
ParameterName: "batch_interval",
|
||||
CurrentValue: "60s",
|
||||
ProposedValue: "90s",
|
||||
AdjustmentRatio: 0.5, // 增加50%
|
||||
Reason: "减少CPU负载",
|
||||
ExpectedImpact: "降低CPU使用率",
|
||||
Risk: "medium",
|
||||
},
|
||||
{
|
||||
ParameterName: "batch_size",
|
||||
CurrentValue: 100,
|
||||
ProposedValue: 150,
|
||||
AdjustmentRatio: 0.5,
|
||||
Reason: "减少处理频率",
|
||||
ExpectedImpact: "降低CPU负载",
|
||||
Risk: "low",
|
||||
},
|
||||
}
|
||||
|
||||
return &TuningDecision{
|
||||
Adjustments: adjustments,
|
||||
Confidence: 0.8,
|
||||
ExpectedImprovement: 0.15,
|
||||
Risk: 0.3,
|
||||
AutoExecute: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// createMemoryOptimizationDecision 创建内存优化决策
|
||||
func (t *TuningStrategy) createMemoryOptimizationDecision(engine *AdaptiveTuningEngine) (*TuningDecision, error) {
|
||||
adjustments := []ParameterAdjustment{
|
||||
{
|
||||
ParameterName: "max_buffer_size",
|
||||
CurrentValue: 1000,
|
||||
ProposedValue: 700,
|
||||
AdjustmentRatio: -0.3, // 减少30%
|
||||
Reason: "减少内存占用",
|
||||
ExpectedImpact: "降低内存使用率",
|
||||
Risk: "medium",
|
||||
},
|
||||
{
|
||||
ParameterName: "cache_cleanup_frequency",
|
||||
CurrentValue: "5m",
|
||||
ProposedValue: "3m",
|
||||
AdjustmentRatio: -0.4,
|
||||
Reason: "更频繁清理缓存",
|
||||
ExpectedImpact: "释放内存空间",
|
||||
Risk: "low",
|
||||
},
|
||||
}
|
||||
|
||||
return &TuningDecision{
|
||||
Adjustments: adjustments,
|
||||
Confidence: 0.85,
|
||||
ExpectedImprovement: 0.2,
|
||||
Risk: 0.25,
|
||||
AutoExecute: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// createResponseTimeOptimizationDecision 创建响应时间优化决策
|
||||
func (t *TuningStrategy) createResponseTimeOptimizationDecision(engine *AdaptiveTuningEngine) (*TuningDecision, error) {
|
||||
adjustments := []ParameterAdjustment{
|
||||
{
|
||||
ParameterName: "batch_interval",
|
||||
CurrentValue: "60s",
|
||||
ProposedValue: "30s",
|
||||
AdjustmentRatio: -0.5, // 减少50%
|
||||
Reason: "更快的数据写入",
|
||||
ExpectedImpact: "降低响应时间",
|
||||
Risk: "medium",
|
||||
},
|
||||
{
|
||||
ParameterName: "concurrent_workers",
|
||||
CurrentValue: 4,
|
||||
ProposedValue: 6,
|
||||
AdjustmentRatio: 0.5,
|
||||
Reason: "增加并发处理",
|
||||
ExpectedImpact: "提高处理速度",
|
||||
Risk: "high",
|
||||
},
|
||||
}
|
||||
|
||||
return &TuningDecision{
|
||||
Adjustments: adjustments,
|
||||
Confidence: 0.75,
|
||||
ExpectedImprovement: 0.25,
|
||||
Risk: 0.4,
|
||||
AutoExecute: false, // 高风险,需要手动确认
|
||||
}, nil
|
||||
}
|
||||
|
||||
// createCacheOptimizationDecision 创建缓存优化决策
|
||||
func (t *TuningStrategy) createCacheOptimizationDecision(engine *AdaptiveTuningEngine) (*TuningDecision, error) {
|
||||
adjustments := []ParameterAdjustment{
|
||||
{
|
||||
ParameterName: "cache_ttl",
|
||||
CurrentValue: "1h",
|
||||
ProposedValue: "2h",
|
||||
AdjustmentRatio: 1.0, // 增加100%
|
||||
Reason: "延长缓存生存时间",
|
||||
ExpectedImpact: "提高缓存命中率",
|
||||
Risk: "low",
|
||||
},
|
||||
{
|
||||
ParameterName: "cache_size_limit",
|
||||
CurrentValue: 1000,
|
||||
ProposedValue: 1500,
|
||||
AdjustmentRatio: 0.5,
|
||||
Reason: "增加缓存容量",
|
||||
ExpectedImpact: "减少缓存驱逐",
|
||||
Risk: "medium",
|
||||
},
|
||||
}
|
||||
|
||||
return &TuningDecision{
|
||||
Adjustments: adjustments,
|
||||
Confidence: 0.9,
|
||||
ExpectedImprovement: 0.3,
|
||||
Risk: 0.2,
|
||||
AutoExecute: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// createOverallPerformanceDecision 创建整体性能优化决策
|
||||
func (t *TuningStrategy) createOverallPerformanceDecision(engine *AdaptiveTuningEngine) (*TuningDecision, error) {
|
||||
adjustments := []ParameterAdjustment{
|
||||
{
|
||||
ParameterName: "global_optimization",
|
||||
CurrentValue: false,
|
||||
ProposedValue: true,
|
||||
AdjustmentRatio: 1.0,
|
||||
Reason: "启用全局优化",
|
||||
ExpectedImpact: "整体性能提升",
|
||||
Risk: "medium",
|
||||
},
|
||||
{
|
||||
ParameterName: "compression_level",
|
||||
CurrentValue: "standard",
|
||||
ProposedValue: "high",
|
||||
AdjustmentRatio: 0.3,
|
||||
Reason: "提高压缩效率",
|
||||
ExpectedImpact: "减少存储开销",
|
||||
Risk: "low",
|
||||
},
|
||||
}
|
||||
|
||||
return &TuningDecision{
|
||||
Adjustments: adjustments,
|
||||
Confidence: 0.7,
|
||||
ExpectedImprovement: 0.2,
|
||||
Risk: 0.35,
|
||||
AutoExecute: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// createPreventiveDecision 创建预防性调优决策
|
||||
func (t *TuningStrategy) createPreventiveDecision(engine *AdaptiveTuningEngine) (*TuningDecision, error) {
|
||||
// 基于趋势预测的预防性调优
|
||||
adjustments := []ParameterAdjustment{
|
||||
{
|
||||
ParameterName: "preventive_scaling",
|
||||
CurrentValue: 1.0,
|
||||
ProposedValue: 1.1,
|
||||
AdjustmentRatio: 0.1,
|
||||
Reason: "预防性资源扩展",
|
||||
ExpectedImpact: "避免性能下降",
|
||||
Risk: "low",
|
||||
},
|
||||
}
|
||||
|
||||
return &TuningDecision{
|
||||
Adjustments: adjustments,
|
||||
Confidence: 0.6,
|
||||
ExpectedImprovement: 0.1,
|
||||
Risk: 0.15,
|
||||
AutoExecute: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecuteDecision 执行决策
|
||||
func (t *TuningStrategy) ExecuteDecision(decision *TuningDecision) *StrategyExecution {
|
||||
execution := &StrategyExecution{
|
||||
Timestamp: time.Now(),
|
||||
Decision: decision,
|
||||
Executed: false,
|
||||
}
|
||||
|
||||
// 简化的执行逻辑
|
||||
execution.Executed = true
|
||||
execution.Result = &ExecutionResult{
|
||||
Success: true,
|
||||
PerformanceBefore: 70.0, // 模拟值
|
||||
PerformanceAfter: 85.0, // 模拟值
|
||||
Improvement: 0.15,
|
||||
SideEffects: []string{},
|
||||
}
|
||||
|
||||
// 记录执行历史
|
||||
t.mutex.Lock()
|
||||
t.executionHistory = append(t.executionHistory, execution)
|
||||
|
||||
// 限制历史记录大小
|
||||
if len(t.executionHistory) > 100 {
|
||||
t.executionHistory = t.executionHistory[1:]
|
||||
}
|
||||
t.mutex.Unlock()
|
||||
|
||||
return execution
|
||||
}
|
||||
|
||||
// GetExecutionHistory 获取执行历史
|
||||
func (t *TuningStrategy) GetExecutionHistory(limit int) []*StrategyExecution {
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
|
||||
if limit <= 0 || limit > len(t.executionHistory) {
|
||||
limit = len(t.executionHistory)
|
||||
}
|
||||
|
||||
history := make([]*StrategyExecution, limit)
|
||||
startIndex := len(t.executionHistory) - limit
|
||||
copy(history, t.executionHistory[startIndex:])
|
||||
|
||||
return history
|
||||
}
|
||||
|
||||
// UpdateStrategy 更新策略
|
||||
func (t *TuningStrategy) UpdateStrategy(strategyType string) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
|
||||
t.strategyType = strategyType
|
||||
|
||||
// 根据策略类型调整规则优先级和启用状态
|
||||
switch strategyType {
|
||||
case "conservative":
|
||||
// 保守策略:只启用低风险规则
|
||||
for _, rule := range t.rules {
|
||||
rule.Enabled = rule.Priority <= 5
|
||||
}
|
||||
|
||||
case "aggressive":
|
||||
// 激进策略:启用所有规则
|
||||
for _, rule := range t.rules {
|
||||
rule.Enabled = true
|
||||
}
|
||||
|
||||
case "balanced":
|
||||
// 平衡策略:默认设置
|
||||
for _, rule := range t.rules {
|
||||
rule.Enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetStrategyStats 获取策略统计
|
||||
func (t *TuningStrategy) GetStrategyStats() map[string]interface{} {
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
|
||||
stats := map[string]interface{}{
|
||||
"strategy_type": t.strategyType,
|
||||
"total_executions": len(t.executionHistory),
|
||||
"enabled_rules": 0,
|
||||
"rule_statistics": make(map[string]interface{}),
|
||||
}
|
||||
|
||||
enabledRules := 0
|
||||
ruleStats := make(map[string]interface{})
|
||||
|
||||
for _, rule := range t.rules {
|
||||
if rule.Enabled {
|
||||
enabledRules++
|
||||
}
|
||||
|
||||
ruleStats[rule.Name] = map[string]interface{}{
|
||||
"enabled": rule.Enabled,
|
||||
"priority": rule.Priority,
|
||||
"trigger_count": rule.TriggerCount,
|
||||
"last_triggered": rule.LastTriggered,
|
||||
}
|
||||
}
|
||||
|
||||
stats["enabled_rules"] = enabledRules
|
||||
stats["rule_statistics"] = ruleStats
|
||||
|
||||
// 计算成功率
|
||||
successfulExecutions := 0
|
||||
for _, execution := range t.executionHistory {
|
||||
if execution.Result != nil && execution.Result.Success {
|
||||
successfulExecutions++
|
||||
}
|
||||
}
|
||||
|
||||
if len(t.executionHistory) > 0 {
|
||||
stats["success_rate"] = float64(successfulExecutions) / float64(len(t.executionHistory))
|
||||
} else {
|
||||
stats["success_rate"] = 0.0
|
||||
}
|
||||
|
||||
return stats
|
||||
}
|
||||
@@ -39,4 +39,10 @@ func MarshalString(v interface{}) (string, error) {
|
||||
// UnmarshalString 反序列化JSON字符串到对象
|
||||
func UnmarshalString(str string, v interface{}) error {
|
||||
return API.Unmarshal([]byte(str), v)
|
||||
}
|
||||
|
||||
// MarshalIndent 序列化对象到格式化的JSON
|
||||
func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
|
||||
// 使用sonic的格式化功能
|
||||
return API.MarshalIndent(v, prefix, indent)
|
||||
}
|
||||
Reference in New Issue
Block a user