feat(cache): improve cache management (#1339)

* feat(cache): improve cache management

* feat(disk-usage): add cache

* feat(disk-usage): add refresh

* fix(disk-usage): cache with ttl

* feat(cache): implement KeyedCache and TypedCache for improved caching mechanism

* fix(copy): update object retrieval to use Get instead of GetUnwrap

* refactor(cache): simplify DirectoryCache structure and improve object management

* fix(cache): correct cache entry initialization and key deletion logic in TypedCache

* refactor(driver): remove GetObjInfo interface and simplify Link function logic
https://github.com/OpenListTeam/OpenList/pull/888/files#r2430925783

* fix(link): optimize link retrieval and caching logic

* refactor(cache): consolidate cache management and improve directory cache handling

* fix(cache): add cache control based on storage configuration in List function

* .

* refactor: replace fmt.Sprintf with strconv for integer conversions

* refactor(cache): enhance cache entry management with Expirable interface

* fix(cache): improve link reference acquisition logic to handle expiration

* refactor: replace OnlyLinkMFile with NoLinkSF in driver configurations and logic

* refactor(link): enhance link caching logic with dynamic type keys based on IP and User-Agent

* feat(drivers): add LinkCacheType to driver configurations for enhanced caching

* refactor(cache): streamline directory object management in cache operations

* refactor(cache): remove unnecessary 'dirty' field from CacheEntry structure

* refactor(cache): replace 'dirty' field with bitwise flags

* refactor(io): 调高SyncClosers.AcquireReference的优先级

* refactor(link): 优化链接获取逻辑,增加重

* refactor(link): 添加RequireReference字段以增强链接管理

* refactor(link): 移除MFile字段,改用RangeReader

* refactor: 移除不必要的NoLinkSF字段

* refactor(cache): 修改目录缓存的脏标志定义和更新逻辑

* feat(cache): add expiration gc

---------

Co-authored-by: KirCute <951206789@qq.com>
Co-authored-by: KirCute <kircute@foxmail.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
This commit is contained in:
ILoveScratch
2025-10-18 21:47:18 +08:00
committed by GitHub
parent 549e60136b
commit febbcd6027
56 changed files with 880 additions and 509 deletions

101
internal/cache/keyed_cache.go vendored Normal file
View File

@@ -0,0 +1,101 @@
package cache
import (
"sync"
"time"
)
type KeyedCache[T any] struct {
entries map[string]*CacheEntry[T]
mu sync.RWMutex
ttl time.Duration
}
func NewKeyedCache[T any](ttl time.Duration) *KeyedCache[T] {
c := &KeyedCache[T]{
entries: make(map[string]*CacheEntry[T]),
ttl: ttl,
}
gcFuncs = append(gcFuncs, c.GC)
return c
}
func (c *KeyedCache[T]) Set(key string, value T) {
c.SetWithExpirable(key, value, ExpirationTime(time.Now().Add(c.ttl)))
}
func (c *KeyedCache[T]) SetWithTTL(key string, value T, ttl time.Duration) {
c.SetWithExpirable(key, value, ExpirationTime(time.Now().Add(ttl)))
}
func (c *KeyedCache[T]) SetWithExpirable(key string, value T, exp Expirable) {
c.mu.Lock()
defer c.mu.Unlock()
c.entries[key] = &CacheEntry[T]{
data: value,
Expirable: exp,
}
}
func (c *KeyedCache[T]) Get(key string) (T, bool) {
c.mu.RLock()
entry, exists := c.entries[key]
if !exists {
c.mu.RUnlock()
return *new(T), false
}
expired := entry.Expired()
c.mu.RUnlock()
if !expired {
return entry.data, true
}
c.mu.Lock()
if c.entries[key] == entry {
delete(c.entries, key)
c.mu.Unlock()
return *new(T), false
}
c.mu.Unlock()
return *new(T), false
}
func (c *KeyedCache[T]) Delete(key string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.entries, key)
}
func (c *KeyedCache[T]) Take(key string) (T, bool) {
c.mu.Lock()
defer c.mu.Unlock()
if entry, exists := c.entries[key]; exists {
delete(c.entries, key)
return entry.data, true
}
return *new(T), false
}
func (c *KeyedCache[T]) Clear() {
c.mu.Lock()
defer c.mu.Unlock()
c.entries = make(map[string]*CacheEntry[T])
}
func (c *KeyedCache[T]) GC() {
c.mu.Lock()
defer c.mu.Unlock()
expiredKeys := make([]string, 0, len(c.entries))
for key, entry := range c.entries {
if entry.Expired() {
expiredKeys = append(expiredKeys, key)
}
}
for _, key := range expiredKeys {
delete(c.entries, key)
}
}

18
internal/cache/type.go vendored Normal file
View File

@@ -0,0 +1,18 @@
package cache
import "time"
type Expirable interface {
Expired() bool
}
type ExpirationTime time.Time
func (e ExpirationTime) Expired() bool {
return time.Now().After(time.Time(e))
}
type CacheEntry[T any] struct {
Expirable
data T
}

122
internal/cache/typed_cache.go vendored Normal file
View File

@@ -0,0 +1,122 @@
package cache
import (
"sync"
"time"
)
type TypedCache[T any] struct {
entries map[string]map[string]*CacheEntry[T]
mu sync.RWMutex
ttl time.Duration
}
func NewTypedCache[T any](ttl time.Duration) *TypedCache[T] {
c := &TypedCache[T]{
entries: make(map[string]map[string]*CacheEntry[T]),
ttl: ttl,
}
gcFuncs = append(gcFuncs, c.GC)
return c
}
func (c *TypedCache[T]) SetType(key, typeKey string, value T) {
c.SetTypeWithExpirable(key, typeKey, value, ExpirationTime(time.Now().Add(c.ttl)))
}
func (c *TypedCache[T]) SetTypeWithTTL(key, typeKey string, value T, ttl time.Duration) {
c.SetTypeWithExpirable(key, typeKey, value, ExpirationTime(time.Now().Add(ttl)))
}
func (c *TypedCache[T]) SetTypeWithExpirable(key, typeKey string, value T, exp Expirable) {
c.mu.Lock()
defer c.mu.Unlock()
cache, exists := c.entries[key]
if !exists {
cache = make(map[string]*CacheEntry[T])
c.entries[key] = cache
}
cache[typeKey] = &CacheEntry[T]{
data: value,
Expirable: exp,
}
}
// Prefer to use typeKeys for lookup; if none match, use fallbackTypeKey for lookup
func (c *TypedCache[T]) GetType(key, fallbackTypeKey string, typeKeys ...string) (T, bool) {
c.mu.RLock()
cache, exists := c.entries[key]
if !exists {
c.mu.RUnlock()
return *new(T), false
}
entry, exists := cache[fallbackTypeKey]
if len(typeKeys) > 0 {
for _, tk := range typeKeys {
if entry, exists = cache[tk]; exists {
fallbackTypeKey = tk
break
}
}
}
if !exists {
c.mu.RUnlock()
return *new(T), false
}
expired := entry.Expired()
c.mu.RUnlock()
if !expired {
return entry.data, true
}
c.mu.Lock()
if cache[fallbackTypeKey] == entry {
delete(cache, fallbackTypeKey)
if len(cache) == 0 {
delete(c.entries, key)
}
c.mu.Unlock()
return *new(T), false
}
c.mu.Unlock()
return *new(T), false
}
func (c *TypedCache[T]) DeleteKey(key string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.entries, key)
}
func (c *TypedCache[T]) Clear() {
c.mu.Lock()
defer c.mu.Unlock()
c.entries = make(map[string]map[string]*CacheEntry[T])
}
func (c *TypedCache[T]) GC() {
c.mu.Lock()
defer c.mu.Unlock()
expiredKeys := make(map[string][]string)
for tk, entries := range c.entries {
for key, entry := range entries {
if !entry.Expired() {
continue
}
if _, ok := expiredKeys[tk]; !ok {
expiredKeys[tk] = make([]string, 0, len(entries))
}
expiredKeys[tk] = append(expiredKeys[tk], key)
}
}
for tk, keys := range expiredKeys {
for _, key := range keys {
delete(c.entries[tk], key)
}
if len(c.entries[tk]) == 0 {
delete(c.entries, tk)
}
}
}

24
internal/cache/utils.go vendored Normal file
View File

@@ -0,0 +1,24 @@
package cache
import (
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
log "github.com/sirupsen/logrus"
)
var (
cacheGcCron *cron.Cron
gcFuncs []func()
)
func init() {
// TODO Move to bootstrap
cacheGcCron = cron.NewCron(time.Hour)
cacheGcCron.Do(func() {
log.Infof("Start cache GC")
for _, f := range gcFuncs {
f()
}
})
}

View File

@@ -3,11 +3,9 @@ package driver
type Config struct {
Name string `json:"name"`
LocalSort bool `json:"local_sort"`
// if the driver returns Link with MFile, this should be set to true
OnlyLinkMFile bool `json:"only_local"`
OnlyProxy bool `json:"only_proxy"`
NoCache bool `json:"no_cache"`
NoUpload bool `json:"no_upload"`
OnlyProxy bool `json:"only_proxy"`
NoCache bool `json:"no_cache"`
NoUpload bool `json:"no_upload"`
// if need get message from user, such as validate code
NeedMs bool `json:"need_ms"`
DefaultRoot string `json:"default_root"`
@@ -19,8 +17,12 @@ type Config struct {
ProxyRangeOption bool `json:"-"`
// if the driver returns Link without URL, this should be set to true
NoLinkURL bool `json:"-"`
// LinkCacheType=1 add IP to cache key
//
// LinkCacheType=2 add UserAgent to cache key
LinkCacheType uint8 `json:"-"`
}
func (c Config) MustProxy() bool {
return c.OnlyProxy || c.OnlyLinkMFile || c.NoLinkURL
return c.OnlyProxy || c.NoLinkURL
}

View File

@@ -47,11 +47,6 @@ type Getter interface {
Get(ctx context.Context, path string) (model.Obj, error)
}
type GetObjInfo interface {
// GetObjInfo get file info by path
GetObjInfo(ctx context.Context, path string) (model.Obj, error)
}
//type Writer interface {
// Mkdir
// Move

View File

@@ -1,12 +1,11 @@
package errs
import "errors"
func UnwrapOrSelf(err error) error {
// errors.Unwrap has no fallback mechanism
unwrapped := errors.Unwrap(err)
if unwrapped == nil {
u, ok := err.(interface {
Unwrap() error
})
if !ok {
return err
}
return unwrapped
return u.Unwrap()
}

View File

@@ -152,7 +152,7 @@ func transfer(ctx context.Context, taskType taskType, srcObjPath, dstDirPath str
if taskType == move {
task_group.RefreshAndRemove(dstDirPath, task_group.SrcPathToRemove(srcObjPath))
} else {
op.DeleteCache(t.DstStorage, dstDirActualPath)
op.Cache.DeleteDirectory(t.DstStorage, dstDirActualPath)
}
}
return nil, err
@@ -186,7 +186,7 @@ func (t *FileTransferTask) RunWithNextTaskCallback(f func(nextTask *FileTransfer
dstActualPath := stdpath.Join(t.DstActualPath, srcObj.GetName())
if t.TaskType == copy {
if t.Ctx().Value(conf.NoTaskKey) != nil {
defer op.DeleteCache(t.DstStorage, dstActualPath)
defer op.Cache.DeleteDirectory(t.DstStorage, dstActualPath)
} else {
task_group.TransferCoordinator.AppendPayload(t.groupID, task_group.DstPathToRefresh(dstActualPath))
}

View File

@@ -15,7 +15,7 @@ func get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
path = utils.FixAndCleanPath(path)
// maybe a virtual file
if path != "/" {
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails)
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails, false)
for _, f := range virtualFiles {
if f.GetName() == stdpath.Base(path) {
return f, nil

View File

@@ -15,7 +15,7 @@ import (
func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
meta, _ := ctx.Value(conf.MetaKey).(*model.Meta)
user, _ := ctx.Value(conf.UserKey).(*model.User)
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails)
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails, args.Refresh)
storage, actualPath, err := op.GetStorageAndActualPath(path)
if err != nil && len(virtualFiles) == 0 {
return nil, errors.WithMessage(err, "failed get storage")

View File

@@ -28,7 +28,6 @@ type Link struct {
URL string `json:"url"` // most common way
Header http.Header `json:"header"` // needed header (for url)
RangeReader RangeReaderIF `json:"-"` // recommended way if can't use URL
MFile File `json:"-"` // best for local,smb... file system, which exposes MFile
Expiration *time.Duration // local cache expire Duration
@@ -38,6 +37,8 @@ type Link struct {
ContentLength int64 `json:"-"` // 转码视频、缩略图
utils.SyncClosers `json:"-"`
// 如果SyncClosers中的资源被关闭后Link将不可用则此值应为 true
RequireReference bool `json:"-"`
}
type OtherArgs struct {

View File

@@ -12,6 +12,7 @@ import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/rclone/rclone/lib/mmap"
@@ -403,7 +404,7 @@ var errInfiniteRetry = errors.New("infinite retry")
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
resp, err := d.cfg.HttpClient(d.ctx, params)
if err != nil {
statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError)
statusCode, ok := errs.UnwrapOrSelf(err).(HttpStatusCodeError)
if !ok {
return 0, err
}

View File

@@ -10,6 +10,7 @@ import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
"github.com/OpenListTeam/OpenList/v4/internal/cache"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
@@ -17,12 +18,12 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/go-cache"
gocache "github.com/OpenListTeam/go-cache"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
var archiveMetaCache = cache.NewMemCache(cache.WithShards[*model.ArchiveMetaProvider](64))
var archiveMetaCache = gocache.NewMemCache(gocache.WithShards[*model.ArchiveMetaProvider](64))
var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider]
func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
@@ -37,14 +38,14 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err)
}
if m.Expiration != nil {
archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](*m.Expiration))
archiveMetaCache.Set(key, m, gocache.WithEx[*model.ArchiveMetaProvider](*m.Expiration))
}
return m, nil
}
if storage.Config().OnlyLinkMFile {
meta, err := fn()
return meta, err
}
// if storage.Config().NoLinkSingleflight {
// meta, err := fn()
// return meta, err
// }
if !args.Refresh {
if meta, ok := archiveMetaCache.Get(key); ok {
log.Debugf("use cache when get %s archive meta", path)
@@ -158,7 +159,7 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
return obj, archiveMetaProvider, err
}
var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
var archiveListCache = gocache.NewMemCache(gocache.WithShards[[]model.Obj](64))
var archiveListG singleflight.Group[[]model.Obj]
func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
@@ -199,7 +200,7 @@ func ListArchive(ctx context.Context, storage driver.Driver, path string, args m
if !storage.Config().NoCache {
if len(files) > 0 {
log.Debugf("set cache: %s => %+v", key, files)
archiveListCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
archiveListCache.Set(key, files, gocache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
} else {
log.Debugf("del cache: %s", key)
archiveListCache.Del(key)
@@ -354,75 +355,50 @@ func ArchiveGet(ctx context.Context, storage driver.Driver, path string, args mo
return nil, nil, errors.WithStack(errs.ObjectNotFound)
}
type extractLink struct {
*model.Link
Obj model.Obj
type objWithLink struct {
link *model.Link
obj model.Obj
}
var extractCache = cache.NewMemCache(cache.WithShards[*extractLink](16))
var extractG = singleflight.Group[*extractLink]{Remember: true}
var extractCache = cache.NewKeyedCache[*objWithLink](5 * time.Minute)
var extractG = singleflight.Group[*objWithLink]{}
func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
key := stdpath.Join(Key(storage, path), args.InnerPath)
if link, ok := extractCache.Get(key); ok {
return link.Link, link.Obj, nil
if ol, ok := extractCache.Get(key); ok {
if ol.link.Expiration != nil || ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
return ol.link, ol.obj, nil
}
}
var forget any
var linkM *extractLink
fn := func() (*extractLink, error) {
link, err := driverExtract(ctx, storage, path, args)
fn := func() (*objWithLink, error) {
ol, err := driverExtract(ctx, storage, path, args)
if err != nil {
return nil, errors.Wrapf(err, "failed extract archive")
}
if link.MFile != nil && forget != nil {
linkM = link
return nil, errLinkMFileCache
if ol.link.Expiration != nil {
extractCache.SetWithTTL(key, ol, *ol.link.Expiration)
} else {
extractCache.SetWithExpirable(key, ol, &ol.link.SyncClosers)
}
if link.Link.Expiration != nil {
extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration))
}
link.AddIfCloser(forget)
return link, nil
return ol, nil
}
if storage.Config().OnlyLinkMFile {
link, err := fn()
for {
ol, err, _ := extractG.Do(key, fn)
if err != nil {
return nil, nil, err
}
return link.Link, link.Obj, nil
}
forget = utils.CloseFunc(func() error {
if forget != nil {
forget = nil
linkG.Forget(key)
if ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
return ol.link, ol.obj, nil
}
return nil
})
link, err, _ := extractG.Do(key, fn)
for err == nil && !link.AcquireReference() {
link, err, _ = extractG.Do(key, fn)
}
if err == errLinkMFileCache {
if linkM != nil {
return linkM.Link, linkM.Obj, nil
}
forget = nil
link, err = fn()
}
if err != nil {
return nil, nil, err
}
return link.Link, link.Obj, nil
}
func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*extractLink, error) {
func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*objWithLink, error) {
storageAr, ok := storage.(driver.ArchiveReader)
if !ok {
return nil, errs.DriverExtractNotSupported
@@ -438,7 +414,7 @@ func driverExtract(ctx context.Context, storage driver.Driver, path string, args
return nil, errors.WithStack(errs.NotFile)
}
link, err := storageAr.Extract(ctx, archiveFile, args)
return &extractLink{Link: link, Obj: extracted}, err
return &objWithLink{link: link, obj: extracted}, err
}
type streamWithParent struct {
@@ -500,16 +476,16 @@ func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstD
if err == nil {
if len(newObjs) > 0 {
for _, newObj := range newObjs {
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
}
} else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
Cache.DeleteDirectory(storage, dstDirPath)
}
}
case driver.ArchiveDecompress:
err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
Cache.DeleteDirectory(storage, dstDirPath)
}
default:
return errs.NotImplement

257
internal/op/cache.go Normal file
View File

@@ -0,0 +1,257 @@
package op
import (
stdpath "path"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/cache"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
)
type CacheManager struct {
dirCache *cache.KeyedCache[*directoryCache] // Cache for directory listings
linkCache *cache.TypedCache[*objWithLink] // Cache for file links
userCache *cache.KeyedCache[*model.User] // Cache for user data
settingCache *cache.KeyedCache[any] // Cache for settings
detailCache *cache.KeyedCache[*model.StorageDetails] // Cache for storage details
}
func NewCacheManager() *CacheManager {
return &CacheManager{
dirCache: cache.NewKeyedCache[*directoryCache](time.Minute * 5),
linkCache: cache.NewTypedCache[*objWithLink](time.Minute * 30),
userCache: cache.NewKeyedCache[*model.User](time.Hour),
settingCache: cache.NewKeyedCache[any](time.Hour),
detailCache: cache.NewKeyedCache[*model.StorageDetails](time.Minute * 30),
}
}
// global instance
var Cache = NewCacheManager()
func Key(storage driver.Driver, path string) string {
return stdpath.Join(storage.GetStorage().MountPath, path)
}
// update object in dirCache.
// if it's a directory, remove all its children from dirCache too.
// if it's a file, remove its link from linkCache.
func (cm *CacheManager) updateDirectoryObject(storage driver.Driver, dirPath string, oldObj model.Obj, newObj model.Obj) {
key := Key(storage, dirPath)
if !oldObj.IsDir() {
cm.linkCache.DeleteKey(stdpath.Join(key, oldObj.GetName()))
cm.linkCache.DeleteKey(stdpath.Join(key, newObj.GetName()))
}
if storage.Config().NoCache {
return
}
if cache, exist := cm.dirCache.Get(key); exist {
if oldObj.IsDir() {
cm.deleteDirectoryTree(stdpath.Join(key, oldObj.GetName()))
}
cache.UpdateObject(oldObj.GetName(), newObj)
}
}
// add new object to dirCache
func (cm *CacheManager) addDirectoryObject(storage driver.Driver, dirPath string, newObj model.Obj) {
if storage.Config().NoCache {
return
}
cache, exist := cm.dirCache.Get(Key(storage, dirPath))
if exist {
cache.UpdateObject(newObj.GetName(), newObj)
}
}
// recursively delete directory and its children from dirCache
func (cm *CacheManager) DeleteDirectoryTree(storage driver.Driver, dirPath string) {
if storage.Config().NoCache {
return
}
cm.deleteDirectoryTree(Key(storage, dirPath))
}
func (cm *CacheManager) deleteDirectoryTree(key string) {
if dirCache, exists := cm.dirCache.Take(key); exists {
for _, obj := range dirCache.objs {
if obj.IsDir() {
cm.deleteDirectoryTree(stdpath.Join(key, obj.GetName()))
}
}
}
}
// remove directory from dirCache
func (cm *CacheManager) DeleteDirectory(storage driver.Driver, dirPath string) {
if storage.Config().NoCache {
return
}
cm.dirCache.Delete(Key(storage, dirPath))
}
// remove object from dirCache.
// if it's a directory, remove all its children from dirCache too.
// if it's a file, remove its link from linkCache.
func (cm *CacheManager) removeDirectoryObject(storage driver.Driver, dirPath string, obj model.Obj) {
key := Key(storage, dirPath)
if !obj.IsDir() {
cm.linkCache.DeleteKey(stdpath.Join(key, obj.GetName()))
}
if storage.Config().NoCache {
return
}
if cache, exist := cm.dirCache.Get(key); exist {
if obj.IsDir() {
cm.deleteDirectoryTree(stdpath.Join(key, obj.GetName()))
}
cache.RemoveObject(obj.GetName())
}
}
// cache user data
func (cm *CacheManager) SetUser(username string, user *model.User) {
cm.userCache.Set(username, user)
}
// cached user data
func (cm *CacheManager) GetUser(username string) (*model.User, bool) {
return cm.userCache.Get(username)
}
// remove user data from cache
func (cm *CacheManager) DeleteUser(username string) {
cm.userCache.Delete(username)
}
// caches setting
func (cm *CacheManager) SetSetting(key string, setting *model.SettingItem) {
cm.settingCache.Set(key, setting)
}
// cached setting
func (cm *CacheManager) GetSetting(key string) (*model.SettingItem, bool) {
if data, exists := cm.settingCache.Get(key); exists {
if setting, ok := data.(*model.SettingItem); ok {
return setting, true
}
}
return nil, false
}
// cache setting groups
func (cm *CacheManager) SetSettingGroup(key string, settings []model.SettingItem) {
cm.settingCache.Set(key, settings)
}
// cached setting group
func (cm *CacheManager) GetSettingGroup(key string) ([]model.SettingItem, bool) {
if data, exists := cm.settingCache.Get(key); exists {
if settings, ok := data.([]model.SettingItem); ok {
return settings, true
}
}
return nil, false
}
func (cm *CacheManager) SetStorageDetails(storage driver.Driver, details *model.StorageDetails) {
if storage.Config().NoCache {
return
}
expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
cm.detailCache.SetWithTTL(storage.GetStorage().MountPath, details, expiration)
}
func (cm *CacheManager) GetStorageDetails(storage driver.Driver) (*model.StorageDetails, bool) {
return cm.detailCache.Get(storage.GetStorage().MountPath)
}
func (cm *CacheManager) InvalidateStorageDetails(storage driver.Driver) {
cm.detailCache.Delete(storage.GetStorage().MountPath)
}
// clears all caches
func (cm *CacheManager) ClearAll() {
cm.dirCache.Clear()
cm.linkCache.Clear()
cm.userCache.Clear()
cm.settingCache.Clear()
cm.detailCache.Clear()
}
type directoryCache struct {
objs []model.Obj
sorted []model.Obj
mu sync.RWMutex
dirtyFlags uint8
}
const (
dirtyRemove uint8 = 1 << iota // 对象删除:刷新 sorted 副本,但不需要 full sort/extract
dirtyUpdate // 对象更新:需要执行 full sort + extract
)
func newDirectoryCache(objs []model.Obj) *directoryCache {
sorted := make([]model.Obj, len(objs))
copy(sorted, objs)
return &directoryCache{
objs: objs,
sorted: sorted,
}
}
func (dc *directoryCache) RemoveObject(name string) {
dc.mu.Lock()
defer dc.mu.Unlock()
for i, obj := range dc.objs {
if obj.GetName() == name {
dc.objs = append(dc.objs[:i], dc.objs[i+1:]...)
dc.dirtyFlags |= dirtyRemove
break
}
}
}
func (dc *directoryCache) UpdateObject(oldName string, newObj model.Obj) {
dc.mu.Lock()
defer dc.mu.Unlock()
if oldName != "" {
for i, obj := range dc.objs {
if obj.GetName() == oldName {
dc.objs[i] = newObj
dc.dirtyFlags |= dirtyUpdate
return
}
}
}
dc.objs = append(dc.objs, newObj)
dc.dirtyFlags |= dirtyUpdate
}
func (dc *directoryCache) GetSortedObjects(meta driver.Meta) []model.Obj {
dc.mu.RLock()
if dc.dirtyFlags == 0 {
dc.mu.RUnlock()
return dc.sorted
}
dc.mu.RUnlock()
dc.mu.Lock()
defer dc.mu.Unlock()
sorted := make([]model.Obj, len(dc.objs))
copy(sorted, dc.objs)
dc.sorted = sorted
if dc.dirtyFlags&dirtyUpdate != 0 {
storage := meta.GetStorage()
if meta.Config().LocalSort {
model.SortFiles(sorted, storage.OrderBy, storage.OrderDirection)
}
model.ExtractFolder(sorted, storage.ExtractFolder)
}
dc.dirtyFlags = 0
return sorted
}

View File

@@ -4,115 +4,20 @@ import (
"context"
stderrors "errors"
stdpath "path"
"slices"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/generic_sync"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/go-cache"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
// In order to facilitate adding some other things before and after file op
var listCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
var listG singleflight.Group[[]model.Obj]
func updateCacheObj(storage driver.Driver, path string, oldObj model.Obj, newObj model.Obj) {
key := Key(storage, path)
objs, ok := listCache.Get(key)
if ok {
for i, obj := range objs {
if obj.GetName() == newObj.GetName() {
objs = slices.Delete(objs, i, i+1)
break
}
}
for i, obj := range objs {
if obj.GetName() == oldObj.GetName() {
objs[i] = newObj
break
}
}
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
}
}
func delCacheObj(storage driver.Driver, path string, obj model.Obj) {
key := Key(storage, path)
objs, ok := listCache.Get(key)
if ok {
for i, oldObj := range objs {
if oldObj.GetName() == obj.GetName() {
objs = append(objs[:i], objs[i+1:]...)
break
}
}
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
}
}
var addSortDebounceMap generic_sync.MapOf[string, func(func())]
func addCacheObj(storage driver.Driver, path string, newObj model.Obj) {
key := Key(storage, path)
objs, ok := listCache.Get(key)
if ok {
for i, obj := range objs {
if obj.GetName() == newObj.GetName() {
objs[i] = newObj
return
}
}
// Simple separation of files and folders
if len(objs) > 0 && objs[len(objs)-1].IsDir() == newObj.IsDir() {
objs = append(objs, newObj)
} else {
objs = append([]model.Obj{newObj}, objs...)
}
if storage.Config().LocalSort {
debounce, _ := addSortDebounceMap.LoadOrStore(key, utils.NewDebounce(time.Minute))
log.Debug("addCacheObj: wait start sort")
debounce(func() {
log.Debug("addCacheObj: start sort")
model.SortFiles(objs, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
addSortDebounceMap.Delete(key)
})
}
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
}
}
func ClearCache(storage driver.Driver, path string) {
objs, ok := listCache.Get(Key(storage, path))
if ok {
for _, obj := range objs {
if obj.IsDir() {
ClearCache(storage, stdpath.Join(path, obj.GetName()))
}
}
}
listCache.Del(Key(storage, path))
}
func DeleteCache(storage driver.Driver, path string) {
listCache.Del(Key(storage, path))
}
func Key(storage driver.Driver, path string) string {
return stdpath.Join(storage.GetStorage().MountPath, utils.FixAndCleanPath(path))
}
// List files in storage, not contains virtual file
func List(ctx context.Context, storage driver.Driver, path string, args model.ListArgs) ([]model.Obj, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
@@ -122,11 +27,12 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
log.Debugf("op.List %s", path)
key := Key(storage, path)
if !args.Refresh {
if files, ok := listCache.Get(key); ok {
if dirCache, exists := Cache.dirCache.Get(key); exists {
log.Debugf("use cache when list %s", path)
return files, nil
return dirCache.GetSortedObjects(storage), nil
}
}
dir, err := GetUnwrap(ctx, storage, path)
if err != nil {
return nil, errors.WithMessage(err, "failed get dir")
@@ -135,6 +41,7 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
if !dir.IsDir() {
return nil, errors.WithStack(errs.NotFolder)
}
objs, err, _ := listG.Do(key, func() ([]model.Obj, error) {
files, err := storage.List(ctx, dir, args)
if err != nil {
@@ -162,10 +69,11 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
if !storage.Config().NoCache {
if len(files) > 0 {
log.Debugf("set cache: %s => %+v", key, files)
listCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
ttl := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
Cache.dirCache.SetWithTTL(key, newDirectoryCache(files), ttl)
} else {
log.Debugf("del cache: %s", key)
listCache.Del(key)
Cache.deleteDirectoryTree(key)
}
}
return files, nil
@@ -252,100 +160,72 @@ func GetUnwrap(ctx context.Context, storage driver.Driver, path string) (model.O
return model.UnwrapObj(obj), err
}
var linkCache = cache.NewMemCache(cache.WithShards[*model.Link](16))
var linkG = singleflight.Group[*model.Link]{Remember: true}
var errLinkMFileCache = stderrors.New("ErrLinkMFileCache")
var linkG = singleflight.Group[*objWithLink]{}
// Link get link, if is an url. should have an expiry time
func Link(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (*model.Link, model.Obj, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
var (
file model.Obj
err error
)
// use cache directly
dir, name := stdpath.Split(stdpath.Join(storage.GetStorage().MountPath, path))
if cacheFiles, ok := listCache.Get(strings.TrimSuffix(dir, "/")); ok {
for _, f := range cacheFiles {
if f.GetName() == name {
file = model.UnwrapObj(f)
break
}
typeKey := args.Type
var typeKeys []string
switch storage.Config().LinkCacheType {
case 1:
if args.IP != "" {
typeKey += "/" + args.IP
typeKeys = []string{typeKey}
}
} else {
if g, ok := storage.(driver.GetObjInfo); ok {
file, err = g.GetObjInfo(ctx, path)
} else {
file, err = GetUnwrap(ctx, storage, path)
case 2:
if ua := args.Header.Get("User-Agent"); ua != "" {
typeKey += "/" + ua
typeKeys = []string{typeKey}
}
}
if file == nil {
key := Key(storage, path)
if ol, exists := Cache.linkCache.GetType(key, args.Type, typeKeys...); exists {
if ol.link.Expiration != nil ||
ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
return ol.link, ol.obj, nil
}
}
fn := func() (*objWithLink, error) {
file, err := GetUnwrap(ctx, storage, path)
if err != nil {
return nil, nil, errors.WithMessage(err, "failed to get file")
return nil, errors.WithMessage(err, "failed to get file")
}
if file.IsDir() {
return nil, errors.WithStack(errs.NotFile)
}
return nil, nil, errors.WithStack(errs.ObjectNotFound)
}
if file.IsDir() {
return nil, nil, errors.WithStack(errs.NotFile)
}
key := stdpath.Join(Key(storage, path), args.Type)
if link, ok := linkCache.Get(key); ok {
return link, file, nil
}
var forget any
var linkM *model.Link
fn := func() (*model.Link, error) {
link, err := storage.Link(ctx, file, args)
if err != nil {
return nil, errors.Wrapf(err, "failed get link")
}
if link.MFile != nil && forget != nil {
linkM = link
return nil, errLinkMFileCache
}
ol := &objWithLink{link: link, obj: file}
if link.Expiration != nil {
linkCache.Set(key, link, cache.WithEx[*model.Link](*link.Expiration))
Cache.linkCache.SetTypeWithTTL(key, typeKey, ol, *link.Expiration)
} else {
Cache.linkCache.SetTypeWithExpirable(key, typeKey, ol, &link.SyncClosers)
}
link.AddIfCloser(forget)
return link, nil
return ol, nil
}
if storage.Config().OnlyLinkMFile {
link, err := fn()
retry := 0
for {
ol, err, _ := linkG.Do(key+"/"+typeKey, fn)
if err != nil {
return nil, nil, err
}
return link, file, err
}
forget = utils.CloseFunc(func() error {
if forget != nil {
forget = nil
linkG.Forget(key)
if ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
if retry > 1 {
log.Warnf("Link retry successed after %d times: %s %s", retry, key, typeKey)
}
return ol.link, ol.obj, nil
}
return nil
})
link, err, _ := linkG.Do(key, fn)
for err == nil && !link.AcquireReference() {
link, err, _ = linkG.Do(key, fn)
retry++
}
if err == errLinkMFileCache {
if linkM != nil {
return linkM, file, nil
}
forget = nil
link, err = fn()
}
if err != nil {
return nil, nil, err
}
return link, file, nil
}
// Other api
@@ -365,7 +245,7 @@ func Other(ctx context.Context, storage driver.Driver, args model.FsOtherArgs) (
}
}
var mkdirG singleflight.Group[interface{}]
var mkdirG singleflight.Group[any]
func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache ...bool) error {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
@@ -373,7 +253,7 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache
}
path = utils.FixAndCleanPath(path)
key := Key(storage, path)
_, err, _ := mkdirG.Do(key, func() (interface{}, error) {
_, err, _ := mkdirG.Do(key, func() (any, error) {
// check if dir exists
f, err := GetUnwrap(ctx, storage, path)
if err != nil {
@@ -395,15 +275,19 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache
newObj, err = s.MakeDir(ctx, parentDir, dirName)
if err == nil {
if newObj != nil {
addCacheObj(storage, parentPath, model.WrapObjName(newObj))
if !storage.Config().NoCache {
if dirCache, exist := Cache.dirCache.Get(Key(storage, parentPath)); exist {
dirCache.UpdateObject("", newObj)
}
}
} else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, parentPath)
Cache.DeleteDirectory(storage, parentPath)
}
}
case driver.Mkdir:
err = s.MakeDir(ctx, parentDir, dirName)
if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, parentPath)
Cache.DeleteDirectory(storage, parentPath)
}
default:
return nil, errs.NotImplement
@@ -427,7 +311,11 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
srcPath = utils.FixAndCleanPath(srcPath)
srcDirPath := stdpath.Dir(srcPath)
dstDirPath = utils.FixAndCleanPath(dstDirPath)
if dstDirPath == srcDirPath {
return stderrors.New("move in place")
}
srcRawObj, err := Get(ctx, storage, srcPath)
if err != nil {
return errors.WithMessage(err, "failed to get src object")
@@ -437,26 +325,25 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
if err != nil {
return errors.WithMessage(err, "failed to get dst dir")
}
srcDirPath := stdpath.Dir(srcPath)
switch s := storage.(type) {
case driver.MoveResult:
var newObj model.Obj
newObj, err = s.Move(ctx, srcObj, dstDir)
if err == nil {
delCacheObj(storage, srcDirPath, srcRawObj)
Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
if newObj != nil {
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
Cache.DeleteDirectory(storage, dstDirPath)
}
}
case driver.Move:
err = s.Move(ctx, srcObj, dstDir)
if err == nil {
delCacheObj(storage, srcDirPath, srcRawObj)
Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
if !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
Cache.DeleteDirectory(storage, dstDirPath)
}
}
default:
@@ -475,28 +362,29 @@ func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string,
return errors.WithMessage(err, "failed to get src object")
}
srcObj := model.UnwrapObj(srcRawObj)
srcDirPath := stdpath.Dir(srcPath)
switch s := storage.(type) {
case driver.RenameResult:
var newObj model.Obj
newObj, err = s.Rename(ctx, srcObj, dstName)
if err == nil {
srcDirPath := stdpath.Dir(srcPath)
if newObj != nil {
updateCacheObj(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, srcDirPath)
if srcRawObj.IsDir() {
ClearCache(storage, srcPath)
Cache.updateDirectoryObject(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj))
} else {
Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, srcDirPath)
}
}
}
case driver.Rename:
err = s.Rename(ctx, srcObj, dstName)
if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, srcDirPath)
if srcRawObj.IsDir() {
ClearCache(storage, srcPath)
if err == nil {
srcDirPath := stdpath.Dir(srcPath)
Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, srcDirPath)
}
}
default:
@@ -512,10 +400,14 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
}
srcPath = utils.FixAndCleanPath(srcPath)
dstDirPath = utils.FixAndCleanPath(dstDirPath)
srcObj, err := GetUnwrap(ctx, storage, srcPath)
if dstDirPath == stdpath.Dir(srcPath) {
return stderrors.New("copy in place")
}
srcRawObj, err := Get(ctx, storage, srcPath)
if err != nil {
return errors.WithMessage(err, "failed to get src object")
}
srcObj := model.UnwrapObj(srcRawObj)
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
if err != nil {
return errors.WithMessage(err, "failed to get dst dir")
@@ -527,15 +419,17 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
newObj, err = s.Copy(ctx, srcObj, dstDir)
if err == nil {
if newObj != nil {
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
Cache.DeleteDirectory(storage, dstDirPath)
}
}
case driver.Copy:
err = s.Copy(ctx, srcObj, dstDir)
if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
if err == nil {
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
}
}
default:
return errs.NotImplement
@@ -566,11 +460,7 @@ func Remove(ctx context.Context, storage driver.Driver, path string) error {
case driver.Remove:
err = s.Remove(ctx, model.UnwrapObj(rawObj))
if err == nil {
delCacheObj(storage, dirPath, rawObj)
// clear folder cache recursively
if rawObj.IsDir() {
ClearCache(storage, path)
}
Cache.removeDirectoryObject(storage, dirPath, rawObj)
}
default:
return errs.NotImplement
@@ -640,16 +530,20 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
var newObj model.Obj
newObj, err = s.Put(ctx, parentDir, file, up)
if err == nil {
Cache.linkCache.DeleteKey(Key(storage, dstPath))
if newObj != nil {
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
Cache.DeleteDirectory(storage, dstDirPath)
}
}
case driver.Put:
err = s.Put(ctx, parentDir, file, up)
if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
if err == nil {
Cache.linkCache.DeleteKey(Key(storage, dstPath))
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
}
}
default:
return errs.NotImplement
@@ -664,13 +558,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
}
} else {
// upload success, remove old obj
err := Remove(ctx, storage, tempPath)
if err != nil {
return err
} else {
key := Key(storage, stdpath.Join(dstDirPath, file.GetName()))
linkCache.Del(key)
}
err = Remove(ctx, storage, tempPath)
}
}
return errors.WithStack(err)
@@ -681,7 +569,8 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
dstDirPath = utils.FixAndCleanPath(dstDirPath)
_, err := GetUnwrap(ctx, storage, stdpath.Join(dstDirPath, dstName))
dstPath := stdpath.Join(dstDirPath, dstName)
_, err := GetUnwrap(ctx, storage, dstPath)
if err == nil {
return errors.New("obj already exists")
}
@@ -698,16 +587,20 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
var newObj model.Obj
newObj, err = s.PutURL(ctx, dstDir, dstName, url)
if err == nil {
Cache.linkCache.DeleteKey(Key(storage, dstPath))
if newObj != nil {
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
Cache.DeleteDirectory(storage, dstDirPath)
}
}
case driver.PutURL:
err = s.PutURL(ctx, dstDir, dstName, url)
if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
if err == nil {
Cache.linkCache.DeleteKey(Key(storage, dstPath))
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
}
}
default:
return errs.NotImplement

View File

@@ -5,26 +5,21 @@ import (
"sort"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/db"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/go-cache"
"github.com/pkg/errors"
)
var settingCache = cache.NewMemCache(cache.WithShards[*model.SettingItem](4))
var settingG singleflight.Group[*model.SettingItem]
var settingCacheF = func(item *model.SettingItem) {
settingCache.Set(item.Key, item, cache.WithEx[*model.SettingItem](time.Hour))
Cache.SetSetting(item.Key, item)
}
var settingGroupCache = cache.NewMemCache(cache.WithShards[[]model.SettingItem](4))
var settingGroupG singleflight.Group[[]model.SettingItem]
var settingGroupCacheF = func(key string, item []model.SettingItem) {
settingGroupCache.Set(key, item, cache.WithEx[[]model.SettingItem](time.Hour))
var settingGroupCacheF = func(key string, items []model.SettingItem) {
Cache.SetSettingGroup(key, items)
}
var settingChangingCallbacks = make([]func(), 0)
@@ -34,8 +29,7 @@ func RegisterSettingChangingCallback(f func()) {
}
func SettingCacheUpdate() {
settingCache.Clear()
settingGroupCache.Clear()
Cache.ClearAll()
for _, cb := range settingChangingCallbacks {
cb()
}
@@ -60,7 +54,7 @@ func GetSettingsMap() map[string]string {
}
func GetSettingItems() ([]model.SettingItem, error) {
if items, ok := settingGroupCache.Get("ALL_SETTING_ITEMS"); ok {
if items, exists := Cache.GetSettingGroup("ALL_SETTING_ITEMS"); exists {
return items, nil
}
items, err, _ := settingGroupG.Do("ALL_SETTING_ITEMS", func() ([]model.SettingItem, error) {
@@ -75,7 +69,7 @@ func GetSettingItems() ([]model.SettingItem, error) {
}
func GetPublicSettingItems() ([]model.SettingItem, error) {
if items, ok := settingGroupCache.Get("ALL_PUBLIC_SETTING_ITEMS"); ok {
if items, exists := Cache.GetSettingGroup("ALL_PUBLIC_SETTING_ITEMS"); exists {
return items, nil
}
items, err, _ := settingGroupG.Do("ALL_PUBLIC_SETTING_ITEMS", func() ([]model.SettingItem, error) {
@@ -90,7 +84,7 @@ func GetPublicSettingItems() ([]model.SettingItem, error) {
}
func GetSettingItemByKey(key string) (*model.SettingItem, error) {
if item, ok := settingCache.Get(key); ok {
if item, exists := Cache.GetSetting(key); exists {
return item, nil
}
@@ -118,8 +112,8 @@ func GetSettingItemInKeys(keys []string) ([]model.SettingItem, error) {
}
func GetSettingItemsByGroup(group int) ([]model.SettingItem, error) {
key := strconv.Itoa(group)
if items, ok := settingGroupCache.Get(key); ok {
key := fmt.Sprintf("GROUP_%d", group)
if items, exists := Cache.GetSettingGroup(key); exists {
return items, nil
}
items, err, _ := settingGroupG.Do(key, func() ([]model.SettingItem, error) {
@@ -135,11 +129,14 @@ func GetSettingItemsByGroup(group int) ([]model.SettingItem, error) {
func GetSettingItemsInGroups(groups []int) ([]model.SettingItem, error) {
sort.Ints(groups)
key := strings.Join(utils.MustSliceConvert(groups, func(i int) string {
return strconv.Itoa(i)
}), ",")
if items, ok := settingGroupCache.Get(key); ok {
keyParts := make([]string, 0, len(groups))
for _, g := range groups {
keyParts = append(keyParts, strconv.Itoa(g))
}
key := "GROUPS_" + strings.Join(keyParts, "_")
if items, exists := Cache.GetSettingGroup(key); exists {
return items, nil
}
items, err, _ := settingGroupG.Do(key, func() ([]model.SettingItem, error) {
@@ -165,10 +162,10 @@ func SaveSettingItems(items []model.SettingItem) error {
}
}
err := db.SaveSettingItems(items)
if err != nil {
if err != nil {
return fmt.Errorf("failed save setting: %+v", err)
}
SettingCacheUpdate()
SettingCacheUpdate()
return nil
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/generic_sync"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
@@ -239,6 +240,8 @@ func UpdateStorage(ctx context.Context, storage model.Storage) error {
if oldStorage.MountPath != storage.MountPath {
// mount path renamed, need to drop the storage
storagesMap.Delete(oldStorage.MountPath)
Cache.DeleteDirectoryTree(storageDriver, "/")
Cache.InvalidateStorageDetails(storageDriver)
}
if err != nil {
return errors.WithMessage(err, "failed get storage driver")
@@ -259,6 +262,7 @@ func DeleteStorageById(ctx context.Context, id uint) error {
if err != nil {
return errors.WithMessage(err, "failed get storage")
}
var dropErr error = nil
if !storage.Disabled {
storageDriver, err := GetStorageByMountPath(storage.MountPath)
if err != nil {
@@ -266,17 +270,19 @@ func DeleteStorageById(ctx context.Context, id uint) error {
}
// drop the storage in the driver
if err := storageDriver.Drop(ctx); err != nil {
return errors.Wrapf(err, "failed drop storage")
dropErr = errors.Wrapf(err, "failed drop storage")
}
// delete the storage in the memory
storagesMap.Delete(storage.MountPath)
Cache.DeleteDirectoryTree(storageDriver, "/")
Cache.InvalidateStorageDetails(storageDriver)
go callStorageHooks("del", storageDriver)
}
// delete the storage in the database
if err := db.DeleteStorageById(id); err != nil {
return errors.WithMessage(err, "failed delete storage in database")
}
return nil
return dropErr
}
// MustSaveDriverStorage call from specific driver
@@ -340,8 +346,8 @@ func GetStorageVirtualFilesByPath(prefix string) []model.Obj {
})
}
func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails ...bool) []model.Obj {
if utils.IsBool(hideDetails...) {
func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails, refresh bool) []model.Obj {
if hideDetails {
return GetStorageVirtualFilesByPath(prefix)
}
return getStorageVirtualFilesByPath(prefix, func(d driver.Driver, obj model.Obj) model.Obj {
@@ -354,7 +360,7 @@ func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string,
}
timeoutCtx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
details, err := GetStorageDetails(timeoutCtx, d)
details, err := GetStorageDetails(timeoutCtx, d, refresh)
if err != nil {
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err)
@@ -439,7 +445,9 @@ func GetBalancedStorage(path string) driver.Driver {
}
}
func GetStorageDetails(ctx context.Context, storage driver.Driver) (*model.StorageDetails, error) {
var detailsG singleflight.Group[*model.StorageDetails]
func GetStorageDetails(ctx context.Context, storage driver.Driver, refresh ...bool) (*model.StorageDetails, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
@@ -447,5 +455,18 @@ func GetStorageDetails(ctx context.Context, storage driver.Driver) (*model.Stora
if !ok {
return nil, errs.NotImplement
}
return wd.GetDetails(ctx)
if !utils.IsBool(refresh...) {
if ret, ok := Cache.GetStorageDetails(storage); ok {
return ret, nil
}
}
details, err, _ := detailsG.Do(storage.GetStorage().MountPath, func() (*model.StorageDetails, error) {
ret, err := wd.GetDetails(ctx)
if err != nil {
return nil, err
}
Cache.SetStorageDetails(storage, ret)
return ret, nil
})
return details, err
}

View File

@@ -1,17 +1,13 @@
package op
import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/db"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/go-cache"
)
var userCache = cache.NewMemCache(cache.WithShards[*model.User](2))
var userG singleflight.Group[*model.User]
var guestUser *model.User
var adminUser *model.User
@@ -46,7 +42,7 @@ func GetUserByName(username string) (*model.User, error) {
if username == "" {
return nil, errs.EmptyUsername
}
if user, ok := userCache.Get(username); ok {
if user, exists := Cache.GetUser(username); exists {
return user, nil
}
user, err, _ := userG.Do(username, func() (*model.User, error) {
@@ -54,7 +50,7 @@ func GetUserByName(username string) (*model.User, error) {
if err != nil {
return nil, err
}
userCache.Set(username, _user, cache.WithEx[*model.User](time.Hour))
Cache.SetUser(username, _user)
return _user, nil
})
return user, err
@@ -81,7 +77,7 @@ func DeleteUserById(id uint) error {
if old.IsAdmin() || old.IsGuest() {
return errs.DeleteAdminOrGuest
}
userCache.Del(old.Username)
Cache.DeleteUser(old.Username)
return db.DeleteUserById(id)
}
@@ -96,7 +92,7 @@ func UpdateUser(u *model.User) error {
if u.IsGuest() {
guestUser = nil
}
userCache.Del(old.Username)
Cache.DeleteUser(old.Username)
u.BasePath = utils.FixAndCleanPath(u.BasePath)
return db.UpdateUser(u)
}
@@ -125,6 +121,6 @@ func DelUserCache(username string) error {
if user.IsGuest() {
guestUser = nil
}
userCache.Del(username)
Cache.DeleteUser(username)
return nil
}

View File

@@ -11,6 +11,7 @@ import (
"os"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/net"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
@@ -27,9 +28,6 @@ func (f RangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Ran
}
func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) {
if link.MFile != nil {
return GetRangeReaderFromMFile(size, link.MFile), nil
}
if link.Concurrency > 0 || link.PartSize > 0 {
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency
@@ -66,7 +64,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
}
if len(link.URL) == 0 {
return nil, errors.New("invalid link: must have at least one of MFile, URL, or RangeReader")
return nil, errors.New("invalid link: must have at least one of URL or RangeReader")
}
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > size {
@@ -78,7 +76,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
response, err := net.RequestHttp(ctx, "GET", header, link.URL)
if err != nil {
if _, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
if _, ok := errs.UnwrapOrSelf(err).(net.HttpStatusCodeError); ok {
return nil, err
}
return nil, fmt.Errorf("http request failure, err:%w", err)

View File

@@ -24,16 +24,15 @@ func RefreshAndRemove(dstPath string, payloads ...any) {
return
}
_, dstNeedRefresh := dstStorage.(driver.Put)
dstNeedRefresh = dstNeedRefresh && !dstStorage.Config().NoCache
if dstNeedRefresh {
op.DeleteCache(dstStorage, dstActualPath)
op.Cache.DeleteDirectory(dstStorage, dstActualPath)
}
var ctx context.Context
for _, payload := range payloads {
switch p := payload.(type) {
case DstPathToRefresh:
if dstNeedRefresh {
op.DeleteCache(dstStorage, string(p))
op.Cache.DeleteDirectory(dstStorage, string(p))
}
case SrcPathToRemove:
if ctx == nil {
@@ -79,7 +78,7 @@ func verifyAndRemove(ctx context.Context, srcStorage, dstStorage driver.Driver,
}
if refresh {
op.DeleteCache(dstStorage, dstObjPath)
op.Cache.DeleteDirectory(dstStorage, dstObjPath)
}
hasErr := false
for _, obj := range srcObjs {