From febbcd6027082a891cf83ecbf3a3f2315aa000f3 Mon Sep 17 00:00:00 2001 From: ILoveScratch Date: Sat, 18 Oct 2025 21:47:18 +0800 Subject: [PATCH] feat(cache): improve cache management (#1339) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(cache): improve cache management * feat(disk-usage): add cache * feat(disk-usage): add refresh * fix(disk-usage): cache with ttl * feat(cache): implement KeyedCache and TypedCache for improved caching mechanism * fix(copy): update object retrieval to use Get instead of GetUnwrap * refactor(cache): simplify DirectoryCache structure and improve object management * fix(cache): correct cache entry initialization and key deletion logic in TypedCache * refactor(driver): remove GetObjInfo interface and simplify Link function logic https://github.com/OpenListTeam/OpenList/pull/888/files#r2430925783 * fix(link): optimize link retrieval and caching logic * refactor(cache): consolidate cache management and improve directory cache handling * fix(cache): add cache control based on storage configuration in List function * . * refactor: replace fmt.Sprintf with strconv for integer conversions * refactor(cache): enhance cache entry management with Expirable interface * fix(cache): improve link reference acquisition logic to handle expiration * refactor: replace OnlyLinkMFile with NoLinkSF in driver configurations and logic * refactor(link): enhance link caching logic with dynamic type keys based on IP and User-Agent * feat(drivers): add LinkCacheType to driver configurations for enhanced caching * refactor(cache): streamline directory object management in cache operations * refactor(cache): remove unnecessary 'dirty' field from CacheEntry structure * refactor(cache): replace 'dirty' field with bitwise flags * refactor(io): 调高SyncClosers.AcquireReference的优先级 * refactor(link): 优化链接获取逻辑,增加重 * refactor(link): 添加RequireReference字段以增强链接管理 * refactor(link): 移除MFile字段,改用RangeReader * refactor: 移除不必要的NoLinkSF字段 * refactor(cache): 修改目录缓存的脏标志定义和更新逻辑 * feat(cache): add expiration gc --------- Co-authored-by: KirCute <951206789@qq.com> Co-authored-by: KirCute Co-authored-by: j2rong4cn --- drivers/115/meta.go | 7 +- drivers/115_open/driver.go | 17 - drivers/115_open/meta.go | 5 +- drivers/alias/driver.go | 5 +- drivers/alias/util.go | 4 +- drivers/baidu_photo/meta.go | 5 +- drivers/chaoxing/driver.go | 5 +- drivers/chaoxing/types.go | 2 +- drivers/chaoxing/util.go | 3 +- drivers/crypt/driver.go | 3 +- drivers/doubao/util.go | 4 +- drivers/febbox/meta.go | 7 +- drivers/ftp/meta.go | 10 +- drivers/halalcloud_open/halalcloud_upload.go | 4 +- drivers/local/driver.go | 15 +- drivers/local/meta.go | 12 +- drivers/mediafire/meta.go | 1 - drivers/openlist/driver.go | 5 + drivers/sftp/driver.go | 11 +- drivers/sftp/meta.go | 12 +- drivers/smb/driver.go | 11 +- drivers/smb/meta.go | 12 +- drivers/strm/driver.go | 3 +- drivers/strm/meta.go | 15 +- drivers/teldrive/upload.go | 4 +- drivers/template/meta.go | 1 - drivers/thunder_browser/driver.go | 3 +- drivers/virtual/driver.go | 12 +- drivers/virtual/meta.go | 10 +- internal/cache/keyed_cache.go | 101 ++++++ internal/cache/type.go | 18 + internal/cache/typed_cache.go | 122 +++++++ internal/cache/utils.go | 24 ++ internal/driver/config.go | 14 +- internal/driver/driver.go | 5 - internal/errs/unwrap.go | 11 +- internal/fs/copy_move.go | 4 +- internal/fs/get.go | 2 +- internal/fs/list.go | 2 +- internal/model/args.go | 3 +- internal/net/request.go | 3 +- internal/op/archive.go | 94 ++---- internal/op/cache.go | 257 +++++++++++++++ internal/op/fs.go | 325 +++++++------------ internal/op/setting.go | 39 +-- internal/op/storage.go | 35 +- internal/op/user.go | 14 +- internal/stream/util.go | 8 +- internal/task_group/transfer.go | 7 +- pkg/gowebdav/errors.go | 4 +- pkg/singleflight/singleflight.go | 5 +- pkg/utils/io.go | 65 ++-- server/common/proxy.go | 12 +- server/handles/down.go | 3 +- server/handles/fsmanage.go | 2 +- server/webdav/webdav.go | 2 +- 56 files changed, 880 insertions(+), 509 deletions(-) create mode 100644 internal/cache/keyed_cache.go create mode 100644 internal/cache/type.go create mode 100644 internal/cache/typed_cache.go create mode 100644 internal/cache/utils.go create mode 100644 internal/op/cache.go diff --git a/drivers/115/meta.go b/drivers/115/meta.go index 6a32715e..5dc9ef41 100644 --- a/drivers/115/meta.go +++ b/drivers/115/meta.go @@ -15,10 +15,9 @@ type Addition struct { } var config = driver.Config{ - Name: "115 Cloud", - DefaultRoot: "0", - // OnlyProxy: true, - // NoOverwriteUpload: true, + Name: "115 Cloud", + DefaultRoot: "0", + LinkCacheType: 2, } func init() { diff --git a/drivers/115_open/driver.go b/drivers/115_open/driver.go index edab65ab..afccb2a7 100644 --- a/drivers/115_open/driver.go +++ b/drivers/115_open/driver.go @@ -131,23 +131,6 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) }, nil } -func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) { - if err := d.WaitLimit(ctx); err != nil { - return nil, err - } - resp, err := d.client.GetFolderInfoByPath(ctx, path) - if err != nil { - return nil, err - } - return &Obj{ - Fid: resp.FileID, - Fn: resp.FileName, - Fc: resp.FileCategory, - Sha1: resp.Sha1, - Pc: resp.PickCode, - }, nil -} - func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { if err := d.WaitLimit(ctx); err != nil { return nil, err diff --git a/drivers/115_open/meta.go b/drivers/115_open/meta.go index c24b9993..d9d7d598 100644 --- a/drivers/115_open/meta.go +++ b/drivers/115_open/meta.go @@ -17,8 +17,9 @@ type Addition struct { } var config = driver.Config{ - Name: "115 Open", - DefaultRoot: "0", + Name: "115 Open", + DefaultRoot: "0", + LinkCacheType: 2, } func init() { diff --git a/drivers/alias/driver.go b/drivers/alias/driver.go index bf83640d..ec4b0844 100644 --- a/drivers/alias/driver.go +++ b/drivers/alias/driver.go @@ -130,7 +130,7 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) { func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { path := dir.GetPath() if utils.PathEqual(path, "/") && !d.autoFlatten { - return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough), nil + return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough, args.Refresh), nil } root, sub := d.getRootAndPath(path) dsts, ok := d.pathMap[root] @@ -211,9 +211,6 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( if resultLink.ContentLength == 0 { resultLink.ContentLength = fi.GetSize() } - if resultLink.MFile != nil { - return &resultLink, nil - } if d.DownloadConcurrency > 0 { resultLink.Concurrency = d.DownloadConcurrency } diff --git a/drivers/alias/util.go b/drivers/alias/util.go index ac6f9185..80391fda 100644 --- a/drivers/alias/util.go +++ b/drivers/alias/util.go @@ -17,7 +17,7 @@ import ( log "github.com/sirupsen/logrus" ) -func (d *Alias) listRoot(ctx context.Context, withDetails bool) []model.Obj { +func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model.Obj { var objs []model.Obj var wg sync.WaitGroup for _, k := range d.rootOrder { @@ -52,7 +52,7 @@ func (d *Alias) listRoot(ctx context.Context, withDetails bool) []model.Obj { defer wg.Done() c, cancel := context.WithTimeout(ctx, time.Second) defer cancel() - details, e := op.GetStorageDetails(c, remoteDriver) + details, e := op.GetStorageDetails(c, remoteDriver, refresh) if e != nil { if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) { log.Errorf("failed get %s storage details: %+v", remoteDriver.GetStorage().MountPath, e) diff --git a/drivers/baidu_photo/meta.go b/drivers/baidu_photo/meta.go index 91997c9d..d144b0c7 100644 --- a/drivers/baidu_photo/meta.go +++ b/drivers/baidu_photo/meta.go @@ -18,8 +18,9 @@ type Addition struct { } var config = driver.Config{ - Name: "BaiduPhoto", - LocalSort: true, + Name: "BaiduPhoto", + LocalSort: true, + LinkCacheType: 2, } func init() { diff --git a/drivers/chaoxing/driver.go b/drivers/chaoxing/driver.go index cb12b29f..ac9ff115 100644 --- a/drivers/chaoxing/driver.go +++ b/drivers/chaoxing/driver.go @@ -10,6 +10,7 @@ import ( "mime/multipart" "net/http" "net/url" + "strconv" "strings" "time" @@ -239,7 +240,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr if err != nil { return err } - err = writer.WriteField("puid", fmt.Sprintf("%d", resp.Msg.Puid)) + err = writer.WriteField("puid", strconv.Itoa(resp.Msg.Puid)) if err != nil { fmt.Println("Error writing param2 to request body:", err) return err @@ -260,7 +261,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr return err } req.Header.Set("Content-Type", writer.FormDataContentType()) - req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len())) + req.Header.Set("Content-Length", strconv.Itoa(body.Len())) resps, err := http.DefaultClient.Do(req) if err != nil { return err diff --git a/drivers/chaoxing/types.go b/drivers/chaoxing/types.go index 9e6e270d..ca171c4d 100644 --- a/drivers/chaoxing/types.go +++ b/drivers/chaoxing/types.go @@ -258,7 +258,7 @@ type UploadDoneParam struct { func fileToObj(f File) *model.Object { if len(f.Content.FolderName) > 0 { return &model.Object{ - ID: fmt.Sprintf("%d", f.ID), + ID: strconv.Itoa(f.ID), Name: f.Content.FolderName, Size: 0, Modified: time.UnixMilli(f.Inserttime), diff --git a/drivers/chaoxing/util.go b/drivers/chaoxing/util.go index 715c248a..7e3067ce 100644 --- a/drivers/chaoxing/util.go +++ b/drivers/chaoxing/util.go @@ -9,6 +9,7 @@ import ( "fmt" "mime/multipart" "net/http" + "strconv" "strings" "github.com/OpenListTeam/OpenList/v4/drivers/base" @@ -172,7 +173,7 @@ func (d *ChaoXing) Login() (string, error) { return "", err } req.Header.Set("Content-Type", writer.FormDataContentType()) - req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len())) + req.Header.Set("Content-Length", strconv.Itoa(body.Len())) resp, err := http.DefaultClient.Do(req) if err != nil { return "", err diff --git a/drivers/crypt/driver.go b/drivers/crypt/driver.go index b00a2ea0..1398ff1c 100644 --- a/drivers/crypt/driver.go +++ b/drivers/crypt/driver.go @@ -317,7 +317,8 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( } return readSeeker, nil }), - SyncClosers: utils.NewSyncClosers(remoteLink), + SyncClosers: utils.NewSyncClosers(remoteLink), + RequireReference: remoteLink.RequireReference, }, nil } diff --git a/drivers/doubao/util.go b/drivers/doubao/util.go index f45c276f..68660897 100644 --- a/drivers/doubao/util.go +++ b/drivers/doubao/util.go @@ -486,7 +486,7 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model. "Authorization": {storeInfo.Auth}, "Content-Type": {"application/octet-stream"}, "Content-Crc32": {crc32Value}, - "Content-Length": {fmt.Sprintf("%d", file.GetSize())}, + "Content-Length": {strconv.FormatInt(file.GetSize(), 10)}, "Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))}, } res, err := base.HttpClient.Do(req) @@ -612,7 +612,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi "Authorization": {storeInfo.Auth}, "Content-Type": {"application/octet-stream"}, "Content-Crc32": {crc32Value}, - "Content-Length": {fmt.Sprintf("%d", size)}, + "Content-Length": {strconv.FormatInt(size, 10)}, "Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))}, } res, err := base.HttpClient.Do(req) diff --git a/drivers/febbox/meta.go b/drivers/febbox/meta.go index e449ad00..fdc5931a 100644 --- a/drivers/febbox/meta.go +++ b/drivers/febbox/meta.go @@ -16,9 +16,10 @@ type Addition struct { } var config = driver.Config{ - Name: "FebBox", - NoUpload: true, - DefaultRoot: "0", + Name: "FebBox", + NoUpload: true, + DefaultRoot: "0", + LinkCacheType: 1, } func init() { diff --git a/drivers/ftp/meta.go b/drivers/ftp/meta.go index 8f30776c..0ec0e735 100644 --- a/drivers/ftp/meta.go +++ b/drivers/ftp/meta.go @@ -31,11 +31,11 @@ type Addition struct { } var config = driver.Config{ - Name: "FTP", - LocalSort: true, - OnlyLinkMFile: false, - DefaultRoot: "/", - NoLinkURL: true, + Name: "FTP", + LocalSort: true, + OnlyProxy: true, + DefaultRoot: "/", + NoLinkURL: true, } func init() { diff --git a/drivers/halalcloud_open/halalcloud_upload.go b/drivers/halalcloud_open/halalcloud_upload.go index 5c1b7b99..f5d173f1 100644 --- a/drivers/halalcloud_open/halalcloud_upload.go +++ b/drivers/halalcloud_open/halalcloud_upload.go @@ -141,7 +141,7 @@ func doMakeFile(fileSlice []string, taskID string, uploadAddress string) (*sdkUs Header: map[string][]string{ "Accept": {"application/json"}, "Content-Type": {"application/json"}, - //"Content-Length": {fmt.Sprintf("%d", len(n))}, + //"Content-Length": {strconv.Itoa(len(n))}, }, Body: io.NopCloser(bytes.NewReader(n)), } @@ -238,7 +238,7 @@ func doPostFileSlice(fileSlice []byte, taskID string, uploadAddress string, prei Header: map[string][]string{ "Accept": {"application/json"}, "Content-Type": {"application/octet-stream"}, - // "Content-Length": {fmt.Sprintf("%d", len(fileSlice))}, + // "Content-Length": {strconv.Itoa(len(fileSlice))}, }, Body: io.NopCloser(bytes.NewReader(fileSlice)), } diff --git a/drivers/local/driver.go b/drivers/local/driver.go index 4fd61676..45badb2e 100644 --- a/drivers/local/driver.go +++ b/drivers/local/driver.go @@ -235,6 +235,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) { func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { fullPath := file.GetPath() link := &model.Link{} + var MFile model.File if args.Type == "thumb" && utils.Ext(file.GetName()) != "svg" { var buf *bytes.Buffer var thumbPath *string @@ -261,9 +262,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( return nil, err } link.ContentLength = int64(stat.Size()) - link.MFile = open + MFile = open } else { - link.MFile = bytes.NewReader(buf.Bytes()) + MFile = bytes.NewReader(buf.Bytes()) link.ContentLength = int64(buf.Len()) } } else { @@ -272,13 +273,11 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( return nil, err } link.ContentLength = file.GetSize() - link.MFile = open - } - link.AddIfCloser(link.MFile) - if !d.Config().OnlyLinkMFile { - link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, link.MFile) - link.MFile = nil + MFile = open } + link.SyncClosers.AddIfCloser(MFile) + link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, MFile) + link.RequireReference = link.SyncClosers.Length() > 0 return link, nil } diff --git a/drivers/local/meta.go b/drivers/local/meta.go index b16f3ca5..a27e9eec 100644 --- a/drivers/local/meta.go +++ b/drivers/local/meta.go @@ -18,12 +18,12 @@ type Addition struct { } var config = driver.Config{ - Name: "Local", - OnlyLinkMFile: false, - LocalSort: true, - NoCache: true, - DefaultRoot: "/", - NoLinkURL: true, + Name: "Local", + LocalSort: true, + OnlyProxy: true, + NoCache: true, + DefaultRoot: "/", + NoLinkURL: true, } func init() { diff --git a/drivers/mediafire/meta.go b/drivers/mediafire/meta.go index e80b11f1..78a5b9b1 100644 --- a/drivers/mediafire/meta.go +++ b/drivers/mediafire/meta.go @@ -36,7 +36,6 @@ type Addition struct { var config = driver.Config{ Name: "MediaFire", LocalSort: false, - OnlyLinkMFile: false, OnlyProxy: false, NoCache: false, NoUpload: false, diff --git a/drivers/openlist/driver.go b/drivers/openlist/driver.go index b539bb5a..2c064369 100644 --- a/drivers/openlist/driver.go +++ b/drivers/openlist/driver.go @@ -26,6 +26,11 @@ type OpenList struct { } func (d *OpenList) Config() driver.Config { + if d.PassUAToUpsteam { + c := config + c.LinkCacheType = 2 // add User-Agent to cache key + return c + } return config } diff --git a/drivers/sftp/driver.go b/drivers/sftp/driver.go index 17db4038..a3b3a96d 100644 --- a/drivers/sftp/driver.go +++ b/drivers/sftp/driver.go @@ -69,15 +69,10 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (* Limiter: stream.ServerDownloadLimit, Ctx: ctx, } - if !d.Config().OnlyLinkMFile { - return &model.Link{ - RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile), - SyncClosers: utils.NewSyncClosers(remoteFile), - }, nil - } return &model.Link{ - MFile: mFile, - SyncClosers: utils.NewSyncClosers(remoteFile), + RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile), + SyncClosers: utils.NewSyncClosers(remoteFile), + RequireReference: true, }, nil } diff --git a/drivers/sftp/meta.go b/drivers/sftp/meta.go index 9dada9ef..1c9bd3e3 100644 --- a/drivers/sftp/meta.go +++ b/drivers/sftp/meta.go @@ -16,12 +16,12 @@ type Addition struct { } var config = driver.Config{ - Name: "SFTP", - LocalSort: true, - OnlyLinkMFile: false, - DefaultRoot: "/", - CheckStatus: true, - NoLinkURL: true, + Name: "SFTP", + LocalSort: true, + OnlyProxy: true, + DefaultRoot: "/", + CheckStatus: true, + NoLinkURL: true, } func init() { diff --git a/drivers/smb/driver.go b/drivers/smb/driver.go index 3aeffbeb..7d4b0cb9 100644 --- a/drivers/smb/driver.go +++ b/drivers/smb/driver.go @@ -86,15 +86,10 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m Limiter: stream.ServerDownloadLimit, Ctx: ctx, } - if !d.Config().OnlyLinkMFile { - return &model.Link{ - RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile), - SyncClosers: utils.NewSyncClosers(remoteFile), - }, nil - } return &model.Link{ - MFile: mFile, - SyncClosers: utils.NewSyncClosers(remoteFile), + RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile), + SyncClosers: utils.NewSyncClosers(remoteFile), + RequireReference: true, }, nil } diff --git a/drivers/smb/meta.go b/drivers/smb/meta.go index 87a98277..ce0e5d3b 100644 --- a/drivers/smb/meta.go +++ b/drivers/smb/meta.go @@ -14,12 +14,12 @@ type Addition struct { } var config = driver.Config{ - Name: "SMB", - LocalSort: true, - OnlyLinkMFile: false, - DefaultRoot: ".", - NoCache: true, - NoLinkURL: true, + Name: "SMB", + LocalSort: true, + OnlyProxy: true, + DefaultRoot: ".", + NoCache: true, + NoLinkURL: true, } func init() { diff --git a/drivers/strm/driver.go b/drivers/strm/driver.go index 010defa9..e4482cf9 100644 --- a/drivers/strm/driver.go +++ b/drivers/strm/driver.go @@ -12,6 +12,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/fs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/sign" + "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/server/common" ) @@ -156,7 +157,7 @@ func (d *Strm) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (* if file.GetID() == "strm" { link := d.getLink(ctx, file.GetPath()) return &model.Link{ - MFile: strings.NewReader(link), + RangeReader: stream.GetRangeReaderFromMFile(int64(len(link)), strings.NewReader(link)), }, nil } // ftp,s3 diff --git a/drivers/strm/meta.go b/drivers/strm/meta.go index 0f3e0a99..d3c33164 100644 --- a/drivers/strm/meta.go +++ b/drivers/strm/meta.go @@ -15,14 +15,13 @@ type Addition struct { } var config = driver.Config{ - Name: "Strm", - LocalSort: true, - NoCache: true, - NoUpload: true, - DefaultRoot: "/", - OnlyLinkMFile: true, - OnlyProxy: true, - NoLinkURL: true, + Name: "Strm", + LocalSort: true, + OnlyProxy: true, + NoCache: true, + NoUpload: true, + DefaultRoot: "/", + NoLinkURL: true, } func init() { diff --git a/drivers/teldrive/upload.go b/drivers/teldrive/upload.go index 168d9bef..4f717dc8 100644 --- a/drivers/teldrive/upload.go +++ b/drivers/teldrive/upload.go @@ -164,7 +164,7 @@ func (d *Teldrive) doSingleUpload(ctx context.Context, dstDir model.Obj, file mo if err := d.singleUploadRequest(fileId, func(req *resty.Request) { uploadParams := map[string]string{ "partName": func() string { - digits := len(fmt.Sprintf("%d", totalParts)) + digits := len(strconv.Itoa(totalParts)) return file.GetName() + fmt.Sprintf(".%0*d", digits, 1) }(), "partNo": strconv.Itoa(1), @@ -333,7 +333,7 @@ func (d *Teldrive) uploadSingleChunk(ctx context.Context, fileId string, task ch err := d.singleUploadRequest(fileId, func(req *resty.Request) { uploadParams := map[string]string{ "partName": func() string { - digits := len(fmt.Sprintf("%d", totalParts)) + digits := len(strconv.Itoa(totalParts)) return task.fileName + fmt.Sprintf(".%0*d", digits, task.chunkIdx) }(), "partNo": strconv.Itoa(task.chunkIdx), diff --git a/drivers/template/meta.go b/drivers/template/meta.go index a546e676..f525b5a9 100644 --- a/drivers/template/meta.go +++ b/drivers/template/meta.go @@ -16,7 +16,6 @@ type Addition struct { var config = driver.Config{ Name: "Template", LocalSort: false, - OnlyLinkMFile: false, OnlyProxy: false, NoCache: false, NoUpload: false, diff --git a/drivers/thunder_browser/driver.go b/drivers/thunder_browser/driver.go index bf1843a4..8e0c6e1b 100644 --- a/drivers/thunder_browser/driver.go +++ b/drivers/thunder_browser/driver.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "strconv" "strings" "time" @@ -840,7 +841,7 @@ func (xc *XunLeiBrowserCommon) OfflineList(ctx context.Context, nextPageToken st func (xc *XunLeiBrowserCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string) error { queryParams := map[string]string{ "task_ids": strings.Join(taskIDs, ","), - "_t": fmt.Sprintf("%d", time.Now().UnixMilli()), + "_t": strconv.FormatInt(time.Now().UnixMilli(), 10), } if xc.UseFluentPlay { queryParams["space"] = ThunderBrowserDriveFluentPlayFolderType diff --git a/drivers/virtual/driver.go b/drivers/virtual/driver.go index 1d14427c..6e6bd087 100644 --- a/drivers/virtual/driver.go +++ b/drivers/virtual/driver.go @@ -2,11 +2,11 @@ package virtual import ( "context" - "io" "time" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/pkg/utils/random" ) @@ -42,16 +42,14 @@ func (d *Virtual) List(ctx context.Context, dir model.Obj, args model.ListArgs) return res, nil } -type DummyMFile struct { - io.Reader -} +type DummyMFile struct{} func (f DummyMFile) Read(p []byte) (n int, err error) { - return f.Reader.Read(p) + return random.Rand.Read(p) } func (f DummyMFile) ReadAt(p []byte, off int64) (n int, err error) { - return f.Reader.Read(p) + return random.Rand.Read(p) } func (DummyMFile) Seek(offset int64, whence int) (int64, error) { @@ -60,7 +58,7 @@ func (DummyMFile) Seek(offset int64, whence int) (int64, error) { func (d *Virtual) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { return &model.Link{ - MFile: DummyMFile{Reader: random.Rand}, + RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), DummyMFile{}), }, nil } diff --git a/drivers/virtual/meta.go b/drivers/virtual/meta.go index f567830a..c270ec12 100644 --- a/drivers/virtual/meta.go +++ b/drivers/virtual/meta.go @@ -14,11 +14,11 @@ type Addition struct { } var config = driver.Config{ - Name: "Virtual", - OnlyLinkMFile: true, - LocalSort: true, - NeedMs: true, - NoLinkURL: true, + Name: "Virtual", + LocalSort: true, + OnlyProxy: true, + NeedMs: true, + NoLinkURL: true, } func init() { diff --git a/internal/cache/keyed_cache.go b/internal/cache/keyed_cache.go new file mode 100644 index 00000000..87e71f18 --- /dev/null +++ b/internal/cache/keyed_cache.go @@ -0,0 +1,101 @@ +package cache + +import ( + "sync" + "time" +) + +type KeyedCache[T any] struct { + entries map[string]*CacheEntry[T] + mu sync.RWMutex + ttl time.Duration +} + +func NewKeyedCache[T any](ttl time.Duration) *KeyedCache[T] { + c := &KeyedCache[T]{ + entries: make(map[string]*CacheEntry[T]), + ttl: ttl, + } + gcFuncs = append(gcFuncs, c.GC) + return c +} + +func (c *KeyedCache[T]) Set(key string, value T) { + c.SetWithExpirable(key, value, ExpirationTime(time.Now().Add(c.ttl))) +} + +func (c *KeyedCache[T]) SetWithTTL(key string, value T, ttl time.Duration) { + c.SetWithExpirable(key, value, ExpirationTime(time.Now().Add(ttl))) +} + +func (c *KeyedCache[T]) SetWithExpirable(key string, value T, exp Expirable) { + c.mu.Lock() + defer c.mu.Unlock() + + c.entries[key] = &CacheEntry[T]{ + data: value, + Expirable: exp, + } +} + +func (c *KeyedCache[T]) Get(key string) (T, bool) { + c.mu.RLock() + entry, exists := c.entries[key] + if !exists { + c.mu.RUnlock() + return *new(T), false + } + + expired := entry.Expired() + c.mu.RUnlock() + + if !expired { + return entry.data, true + } + + c.mu.Lock() + if c.entries[key] == entry { + delete(c.entries, key) + c.mu.Unlock() + return *new(T), false + } + c.mu.Unlock() + return *new(T), false +} + +func (c *KeyedCache[T]) Delete(key string) { + c.mu.Lock() + defer c.mu.Unlock() + + delete(c.entries, key) +} + +func (c *KeyedCache[T]) Take(key string) (T, bool) { + c.mu.Lock() + defer c.mu.Unlock() + if entry, exists := c.entries[key]; exists { + delete(c.entries, key) + return entry.data, true + } + return *new(T), false +} + +func (c *KeyedCache[T]) Clear() { + c.mu.Lock() + defer c.mu.Unlock() + c.entries = make(map[string]*CacheEntry[T]) +} + +func (c *KeyedCache[T]) GC() { + c.mu.Lock() + defer c.mu.Unlock() + expiredKeys := make([]string, 0, len(c.entries)) + for key, entry := range c.entries { + if entry.Expired() { + expiredKeys = append(expiredKeys, key) + } + } + for _, key := range expiredKeys { + delete(c.entries, key) + } +} diff --git a/internal/cache/type.go b/internal/cache/type.go new file mode 100644 index 00000000..4eccba52 --- /dev/null +++ b/internal/cache/type.go @@ -0,0 +1,18 @@ +package cache + +import "time" + +type Expirable interface { + Expired() bool +} + +type ExpirationTime time.Time + +func (e ExpirationTime) Expired() bool { + return time.Now().After(time.Time(e)) +} + +type CacheEntry[T any] struct { + Expirable + data T +} diff --git a/internal/cache/typed_cache.go b/internal/cache/typed_cache.go new file mode 100644 index 00000000..3277a782 --- /dev/null +++ b/internal/cache/typed_cache.go @@ -0,0 +1,122 @@ +package cache + +import ( + "sync" + "time" +) + +type TypedCache[T any] struct { + entries map[string]map[string]*CacheEntry[T] + mu sync.RWMutex + ttl time.Duration +} + +func NewTypedCache[T any](ttl time.Duration) *TypedCache[T] { + c := &TypedCache[T]{ + entries: make(map[string]map[string]*CacheEntry[T]), + ttl: ttl, + } + gcFuncs = append(gcFuncs, c.GC) + return c +} + +func (c *TypedCache[T]) SetType(key, typeKey string, value T) { + c.SetTypeWithExpirable(key, typeKey, value, ExpirationTime(time.Now().Add(c.ttl))) +} + +func (c *TypedCache[T]) SetTypeWithTTL(key, typeKey string, value T, ttl time.Duration) { + c.SetTypeWithExpirable(key, typeKey, value, ExpirationTime(time.Now().Add(ttl))) +} + +func (c *TypedCache[T]) SetTypeWithExpirable(key, typeKey string, value T, exp Expirable) { + c.mu.Lock() + defer c.mu.Unlock() + cache, exists := c.entries[key] + if !exists { + cache = make(map[string]*CacheEntry[T]) + c.entries[key] = cache + } + + cache[typeKey] = &CacheEntry[T]{ + data: value, + Expirable: exp, + } +} + +// Prefer to use typeKeys for lookup; if none match, use fallbackTypeKey for lookup +func (c *TypedCache[T]) GetType(key, fallbackTypeKey string, typeKeys ...string) (T, bool) { + c.mu.RLock() + cache, exists := c.entries[key] + if !exists { + c.mu.RUnlock() + return *new(T), false + } + entry, exists := cache[fallbackTypeKey] + if len(typeKeys) > 0 { + for _, tk := range typeKeys { + if entry, exists = cache[tk]; exists { + fallbackTypeKey = tk + break + } + } + } + if !exists { + c.mu.RUnlock() + return *new(T), false + } + expired := entry.Expired() + c.mu.RUnlock() + + if !expired { + return entry.data, true + } + + c.mu.Lock() + if cache[fallbackTypeKey] == entry { + delete(cache, fallbackTypeKey) + if len(cache) == 0 { + delete(c.entries, key) + } + c.mu.Unlock() + return *new(T), false + } + c.mu.Unlock() + return *new(T), false +} + +func (c *TypedCache[T]) DeleteKey(key string) { + c.mu.Lock() + defer c.mu.Unlock() + delete(c.entries, key) +} + +func (c *TypedCache[T]) Clear() { + c.mu.Lock() + defer c.mu.Unlock() + c.entries = make(map[string]map[string]*CacheEntry[T]) +} + +func (c *TypedCache[T]) GC() { + c.mu.Lock() + defer c.mu.Unlock() + expiredKeys := make(map[string][]string) + for tk, entries := range c.entries { + for key, entry := range entries { + if !entry.Expired() { + continue + } + if _, ok := expiredKeys[tk]; !ok { + expiredKeys[tk] = make([]string, 0, len(entries)) + } + expiredKeys[tk] = append(expiredKeys[tk], key) + } + } + for tk, keys := range expiredKeys { + for _, key := range keys { + delete(c.entries[tk], key) + } + if len(c.entries[tk]) == 0 { + delete(c.entries, tk) + } + } +} diff --git a/internal/cache/utils.go b/internal/cache/utils.go new file mode 100644 index 00000000..82b7cf85 --- /dev/null +++ b/internal/cache/utils.go @@ -0,0 +1,24 @@ +package cache + +import ( + "time" + + "github.com/OpenListTeam/OpenList/v4/pkg/cron" + log "github.com/sirupsen/logrus" +) + +var ( + cacheGcCron *cron.Cron + gcFuncs []func() +) + +func init() { + // TODO Move to bootstrap + cacheGcCron = cron.NewCron(time.Hour) + cacheGcCron.Do(func() { + log.Infof("Start cache GC") + for _, f := range gcFuncs { + f() + } + }) +} diff --git a/internal/driver/config.go b/internal/driver/config.go index bec6d47b..7db34512 100644 --- a/internal/driver/config.go +++ b/internal/driver/config.go @@ -3,11 +3,9 @@ package driver type Config struct { Name string `json:"name"` LocalSort bool `json:"local_sort"` - // if the driver returns Link with MFile, this should be set to true - OnlyLinkMFile bool `json:"only_local"` - OnlyProxy bool `json:"only_proxy"` - NoCache bool `json:"no_cache"` - NoUpload bool `json:"no_upload"` + OnlyProxy bool `json:"only_proxy"` + NoCache bool `json:"no_cache"` + NoUpload bool `json:"no_upload"` // if need get message from user, such as validate code NeedMs bool `json:"need_ms"` DefaultRoot string `json:"default_root"` @@ -19,8 +17,12 @@ type Config struct { ProxyRangeOption bool `json:"-"` // if the driver returns Link without URL, this should be set to true NoLinkURL bool `json:"-"` + // LinkCacheType=1 add IP to cache key + // + // LinkCacheType=2 add UserAgent to cache key + LinkCacheType uint8 `json:"-"` } func (c Config) MustProxy() bool { - return c.OnlyProxy || c.OnlyLinkMFile || c.NoLinkURL + return c.OnlyProxy || c.NoLinkURL } diff --git a/internal/driver/driver.go b/internal/driver/driver.go index 4a8e8825..7521e8d7 100644 --- a/internal/driver/driver.go +++ b/internal/driver/driver.go @@ -47,11 +47,6 @@ type Getter interface { Get(ctx context.Context, path string) (model.Obj, error) } -type GetObjInfo interface { - // GetObjInfo get file info by path - GetObjInfo(ctx context.Context, path string) (model.Obj, error) -} - //type Writer interface { // Mkdir // Move diff --git a/internal/errs/unwrap.go b/internal/errs/unwrap.go index 1cec9651..f34953c7 100644 --- a/internal/errs/unwrap.go +++ b/internal/errs/unwrap.go @@ -1,12 +1,11 @@ package errs -import "errors" - func UnwrapOrSelf(err error) error { - // errors.Unwrap has no fallback mechanism - unwrapped := errors.Unwrap(err) - if unwrapped == nil { + u, ok := err.(interface { + Unwrap() error + }) + if !ok { return err } - return unwrapped + return u.Unwrap() } diff --git a/internal/fs/copy_move.go b/internal/fs/copy_move.go index d8ecf98c..e9b70856 100644 --- a/internal/fs/copy_move.go +++ b/internal/fs/copy_move.go @@ -152,7 +152,7 @@ func transfer(ctx context.Context, taskType taskType, srcObjPath, dstDirPath str if taskType == move { task_group.RefreshAndRemove(dstDirPath, task_group.SrcPathToRemove(srcObjPath)) } else { - op.DeleteCache(t.DstStorage, dstDirActualPath) + op.Cache.DeleteDirectory(t.DstStorage, dstDirActualPath) } } return nil, err @@ -186,7 +186,7 @@ func (t *FileTransferTask) RunWithNextTaskCallback(f func(nextTask *FileTransfer dstActualPath := stdpath.Join(t.DstActualPath, srcObj.GetName()) if t.TaskType == copy { if t.Ctx().Value(conf.NoTaskKey) != nil { - defer op.DeleteCache(t.DstStorage, dstActualPath) + defer op.Cache.DeleteDirectory(t.DstStorage, dstActualPath) } else { task_group.TransferCoordinator.AppendPayload(t.groupID, task_group.DstPathToRefresh(dstActualPath)) } diff --git a/internal/fs/get.go b/internal/fs/get.go index 4e91c5bd..459282ce 100644 --- a/internal/fs/get.go +++ b/internal/fs/get.go @@ -15,7 +15,7 @@ func get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) { path = utils.FixAndCleanPath(path) // maybe a virtual file if path != "/" { - virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails) + virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails, false) for _, f := range virtualFiles { if f.GetName() == stdpath.Base(path) { return f, nil diff --git a/internal/fs/list.go b/internal/fs/list.go index 5245795f..fc3b25ab 100644 --- a/internal/fs/list.go +++ b/internal/fs/list.go @@ -15,7 +15,7 @@ import ( func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) { meta, _ := ctx.Value(conf.MetaKey).(*model.Meta) user, _ := ctx.Value(conf.UserKey).(*model.User) - virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails) + virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails, args.Refresh) storage, actualPath, err := op.GetStorageAndActualPath(path) if err != nil && len(virtualFiles) == 0 { return nil, errors.WithMessage(err, "failed get storage") diff --git a/internal/model/args.go b/internal/model/args.go index 2ec95f14..3fd5b9e3 100644 --- a/internal/model/args.go +++ b/internal/model/args.go @@ -28,7 +28,6 @@ type Link struct { URL string `json:"url"` // most common way Header http.Header `json:"header"` // needed header (for url) RangeReader RangeReaderIF `json:"-"` // recommended way if can't use URL - MFile File `json:"-"` // best for local,smb... file system, which exposes MFile Expiration *time.Duration // local cache expire Duration @@ -38,6 +37,8 @@ type Link struct { ContentLength int64 `json:"-"` // 转码视频、缩略图 utils.SyncClosers `json:"-"` + // 如果SyncClosers中的资源被关闭后Link将不可用,则此值应为 true + RequireReference bool `json:"-"` } type OtherArgs struct { diff --git a/internal/net/request.go b/internal/net/request.go index 1306bc54..8d380ea4 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -12,6 +12,7 @@ import ( "time" "github.com/OpenListTeam/OpenList/v4/internal/conf" + "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/rclone/rclone/lib/mmap" @@ -403,7 +404,7 @@ var errInfiniteRetry = errors.New("infinite retry") func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) { resp, err := d.cfg.HttpClient(d.ctx, params) if err != nil { - statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError) + statusCode, ok := errs.UnwrapOrSelf(err).(HttpStatusCodeError) if !ok { return 0, err } diff --git a/internal/op/archive.go b/internal/op/archive.go index ae920279..50e05141 100644 --- a/internal/op/archive.go +++ b/internal/op/archive.go @@ -10,6 +10,7 @@ import ( "time" "github.com/OpenListTeam/OpenList/v4/internal/archive/tool" + "github.com/OpenListTeam/OpenList/v4/internal/cache" "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/driver" @@ -17,12 +18,12 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/singleflight" "github.com/OpenListTeam/OpenList/v4/pkg/utils" - "github.com/OpenListTeam/go-cache" + gocache "github.com/OpenListTeam/go-cache" "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) -var archiveMetaCache = cache.NewMemCache(cache.WithShards[*model.ArchiveMetaProvider](64)) +var archiveMetaCache = gocache.NewMemCache(gocache.WithShards[*model.ArchiveMetaProvider](64)) var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider] func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) { @@ -37,14 +38,14 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err) } if m.Expiration != nil { - archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](*m.Expiration)) + archiveMetaCache.Set(key, m, gocache.WithEx[*model.ArchiveMetaProvider](*m.Expiration)) } return m, nil } - if storage.Config().OnlyLinkMFile { - meta, err := fn() - return meta, err - } + // if storage.Config().NoLinkSingleflight { + // meta, err := fn() + // return meta, err + // } if !args.Refresh { if meta, ok := archiveMetaCache.Get(key); ok { log.Debugf("use cache when get %s archive meta", path) @@ -158,7 +159,7 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg return obj, archiveMetaProvider, err } -var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64)) +var archiveListCache = gocache.NewMemCache(gocache.WithShards[[]model.Obj](64)) var archiveListG singleflight.Group[[]model.Obj] func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) { @@ -199,7 +200,7 @@ func ListArchive(ctx context.Context, storage driver.Driver, path string, args m if !storage.Config().NoCache { if len(files) > 0 { log.Debugf("set cache: %s => %+v", key, files) - archiveListCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) + archiveListCache.Set(key, files, gocache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) } else { log.Debugf("del cache: %s", key) archiveListCache.Del(key) @@ -354,75 +355,50 @@ func ArchiveGet(ctx context.Context, storage driver.Driver, path string, args mo return nil, nil, errors.WithStack(errs.ObjectNotFound) } -type extractLink struct { - *model.Link - Obj model.Obj +type objWithLink struct { + link *model.Link + obj model.Obj } -var extractCache = cache.NewMemCache(cache.WithShards[*extractLink](16)) -var extractG = singleflight.Group[*extractLink]{Remember: true} +var extractCache = cache.NewKeyedCache[*objWithLink](5 * time.Minute) +var extractG = singleflight.Group[*objWithLink]{} func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) { if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) } key := stdpath.Join(Key(storage, path), args.InnerPath) - if link, ok := extractCache.Get(key); ok { - return link.Link, link.Obj, nil + if ol, ok := extractCache.Get(key); ok { + if ol.link.Expiration != nil || ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference { + return ol.link, ol.obj, nil + } } - var forget any - var linkM *extractLink - fn := func() (*extractLink, error) { - link, err := driverExtract(ctx, storage, path, args) + fn := func() (*objWithLink, error) { + ol, err := driverExtract(ctx, storage, path, args) if err != nil { return nil, errors.Wrapf(err, "failed extract archive") } - if link.MFile != nil && forget != nil { - linkM = link - return nil, errLinkMFileCache + if ol.link.Expiration != nil { + extractCache.SetWithTTL(key, ol, *ol.link.Expiration) + } else { + extractCache.SetWithExpirable(key, ol, &ol.link.SyncClosers) } - if link.Link.Expiration != nil { - extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration)) - } - link.AddIfCloser(forget) - return link, nil + return ol, nil } - if storage.Config().OnlyLinkMFile { - link, err := fn() + for { + ol, err, _ := extractG.Do(key, fn) if err != nil { return nil, nil, err } - return link.Link, link.Obj, nil - } - - forget = utils.CloseFunc(func() error { - if forget != nil { - forget = nil - linkG.Forget(key) + if ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference { + return ol.link, ol.obj, nil } - return nil - }) - link, err, _ := extractG.Do(key, fn) - for err == nil && !link.AcquireReference() { - link, err, _ = extractG.Do(key, fn) } - if err == errLinkMFileCache { - if linkM != nil { - return linkM.Link, linkM.Obj, nil - } - forget = nil - link, err = fn() - } - - if err != nil { - return nil, nil, err - } - return link.Link, link.Obj, nil } -func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*extractLink, error) { +func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*objWithLink, error) { storageAr, ok := storage.(driver.ArchiveReader) if !ok { return nil, errs.DriverExtractNotSupported @@ -438,7 +414,7 @@ func driverExtract(ctx context.Context, storage driver.Driver, path string, args return nil, errors.WithStack(errs.NotFile) } link, err := storageAr.Extract(ctx, archiveFile, args) - return &extractLink{Link: link, Obj: extracted}, err + return &objWithLink{link: link, obj: extracted}, err } type streamWithParent struct { @@ -500,16 +476,16 @@ func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstD if err == nil { if len(newObjs) > 0 { for _, newObj := range newObjs { - addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) + Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj)) } } else if !utils.IsBool(lazyCache...) { - DeleteCache(storage, dstDirPath) + Cache.DeleteDirectory(storage, dstDirPath) } } case driver.ArchiveDecompress: err = s.ArchiveDecompress(ctx, srcObj, dstDir, args) if err == nil && !utils.IsBool(lazyCache...) { - DeleteCache(storage, dstDirPath) + Cache.DeleteDirectory(storage, dstDirPath) } default: return errs.NotImplement diff --git a/internal/op/cache.go b/internal/op/cache.go new file mode 100644 index 00000000..52e430b1 --- /dev/null +++ b/internal/op/cache.go @@ -0,0 +1,257 @@ +package op + +import ( + stdpath "path" + "sync" + "time" + + "github.com/OpenListTeam/OpenList/v4/internal/cache" + "github.com/OpenListTeam/OpenList/v4/internal/driver" + "github.com/OpenListTeam/OpenList/v4/internal/model" +) + +type CacheManager struct { + dirCache *cache.KeyedCache[*directoryCache] // Cache for directory listings + linkCache *cache.TypedCache[*objWithLink] // Cache for file links + userCache *cache.KeyedCache[*model.User] // Cache for user data + settingCache *cache.KeyedCache[any] // Cache for settings + detailCache *cache.KeyedCache[*model.StorageDetails] // Cache for storage details +} + +func NewCacheManager() *CacheManager { + return &CacheManager{ + dirCache: cache.NewKeyedCache[*directoryCache](time.Minute * 5), + linkCache: cache.NewTypedCache[*objWithLink](time.Minute * 30), + userCache: cache.NewKeyedCache[*model.User](time.Hour), + settingCache: cache.NewKeyedCache[any](time.Hour), + detailCache: cache.NewKeyedCache[*model.StorageDetails](time.Minute * 30), + } +} + +// global instance +var Cache = NewCacheManager() + +func Key(storage driver.Driver, path string) string { + return stdpath.Join(storage.GetStorage().MountPath, path) +} + +// update object in dirCache. +// if it's a directory, remove all its children from dirCache too. +// if it's a file, remove its link from linkCache. +func (cm *CacheManager) updateDirectoryObject(storage driver.Driver, dirPath string, oldObj model.Obj, newObj model.Obj) { + key := Key(storage, dirPath) + if !oldObj.IsDir() { + cm.linkCache.DeleteKey(stdpath.Join(key, oldObj.GetName())) + cm.linkCache.DeleteKey(stdpath.Join(key, newObj.GetName())) + } + if storage.Config().NoCache { + return + } + + if cache, exist := cm.dirCache.Get(key); exist { + if oldObj.IsDir() { + cm.deleteDirectoryTree(stdpath.Join(key, oldObj.GetName())) + } + cache.UpdateObject(oldObj.GetName(), newObj) + } +} + +// add new object to dirCache +func (cm *CacheManager) addDirectoryObject(storage driver.Driver, dirPath string, newObj model.Obj) { + if storage.Config().NoCache { + return + } + cache, exist := cm.dirCache.Get(Key(storage, dirPath)) + if exist { + cache.UpdateObject(newObj.GetName(), newObj) + } +} + +// recursively delete directory and its children from dirCache +func (cm *CacheManager) DeleteDirectoryTree(storage driver.Driver, dirPath string) { + if storage.Config().NoCache { + return + } + cm.deleteDirectoryTree(Key(storage, dirPath)) +} +func (cm *CacheManager) deleteDirectoryTree(key string) { + if dirCache, exists := cm.dirCache.Take(key); exists { + for _, obj := range dirCache.objs { + if obj.IsDir() { + cm.deleteDirectoryTree(stdpath.Join(key, obj.GetName())) + } + } + } +} + +// remove directory from dirCache +func (cm *CacheManager) DeleteDirectory(storage driver.Driver, dirPath string) { + if storage.Config().NoCache { + return + } + cm.dirCache.Delete(Key(storage, dirPath)) +} + +// remove object from dirCache. +// if it's a directory, remove all its children from dirCache too. +// if it's a file, remove its link from linkCache. +func (cm *CacheManager) removeDirectoryObject(storage driver.Driver, dirPath string, obj model.Obj) { + key := Key(storage, dirPath) + if !obj.IsDir() { + cm.linkCache.DeleteKey(stdpath.Join(key, obj.GetName())) + } + + if storage.Config().NoCache { + return + } + if cache, exist := cm.dirCache.Get(key); exist { + if obj.IsDir() { + cm.deleteDirectoryTree(stdpath.Join(key, obj.GetName())) + } + cache.RemoveObject(obj.GetName()) + } +} + +// cache user data +func (cm *CacheManager) SetUser(username string, user *model.User) { + cm.userCache.Set(username, user) +} + +// cached user data +func (cm *CacheManager) GetUser(username string) (*model.User, bool) { + return cm.userCache.Get(username) +} + +// remove user data from cache +func (cm *CacheManager) DeleteUser(username string) { + cm.userCache.Delete(username) +} + +// caches setting +func (cm *CacheManager) SetSetting(key string, setting *model.SettingItem) { + cm.settingCache.Set(key, setting) +} + +// cached setting +func (cm *CacheManager) GetSetting(key string) (*model.SettingItem, bool) { + if data, exists := cm.settingCache.Get(key); exists { + if setting, ok := data.(*model.SettingItem); ok { + return setting, true + } + } + return nil, false +} + +// cache setting groups +func (cm *CacheManager) SetSettingGroup(key string, settings []model.SettingItem) { + cm.settingCache.Set(key, settings) +} + +// cached setting group +func (cm *CacheManager) GetSettingGroup(key string) ([]model.SettingItem, bool) { + if data, exists := cm.settingCache.Get(key); exists { + if settings, ok := data.([]model.SettingItem); ok { + return settings, true + } + } + return nil, false +} + +func (cm *CacheManager) SetStorageDetails(storage driver.Driver, details *model.StorageDetails) { + if storage.Config().NoCache { + return + } + expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration) + cm.detailCache.SetWithTTL(storage.GetStorage().MountPath, details, expiration) +} + +func (cm *CacheManager) GetStorageDetails(storage driver.Driver) (*model.StorageDetails, bool) { + return cm.detailCache.Get(storage.GetStorage().MountPath) +} + +func (cm *CacheManager) InvalidateStorageDetails(storage driver.Driver) { + cm.detailCache.Delete(storage.GetStorage().MountPath) +} + +// clears all caches +func (cm *CacheManager) ClearAll() { + cm.dirCache.Clear() + cm.linkCache.Clear() + cm.userCache.Clear() + cm.settingCache.Clear() + cm.detailCache.Clear() +} + +type directoryCache struct { + objs []model.Obj + sorted []model.Obj + mu sync.RWMutex + + dirtyFlags uint8 +} + +const ( + dirtyRemove uint8 = 1 << iota // 对象删除:刷新 sorted 副本,但不需要 full sort/extract + dirtyUpdate // 对象更新:需要执行 full sort + extract +) + +func newDirectoryCache(objs []model.Obj) *directoryCache { + sorted := make([]model.Obj, len(objs)) + copy(sorted, objs) + return &directoryCache{ + objs: objs, + sorted: sorted, + } +} + +func (dc *directoryCache) RemoveObject(name string) { + dc.mu.Lock() + defer dc.mu.Unlock() + for i, obj := range dc.objs { + if obj.GetName() == name { + dc.objs = append(dc.objs[:i], dc.objs[i+1:]...) + dc.dirtyFlags |= dirtyRemove + break + } + } +} + +func (dc *directoryCache) UpdateObject(oldName string, newObj model.Obj) { + dc.mu.Lock() + defer dc.mu.Unlock() + if oldName != "" { + for i, obj := range dc.objs { + if obj.GetName() == oldName { + dc.objs[i] = newObj + dc.dirtyFlags |= dirtyUpdate + return + } + } + } + dc.objs = append(dc.objs, newObj) + dc.dirtyFlags |= dirtyUpdate +} + +func (dc *directoryCache) GetSortedObjects(meta driver.Meta) []model.Obj { + dc.mu.RLock() + if dc.dirtyFlags == 0 { + dc.mu.RUnlock() + return dc.sorted + } + dc.mu.RUnlock() + dc.mu.Lock() + defer dc.mu.Unlock() + + sorted := make([]model.Obj, len(dc.objs)) + copy(sorted, dc.objs) + dc.sorted = sorted + if dc.dirtyFlags&dirtyUpdate != 0 { + storage := meta.GetStorage() + if meta.Config().LocalSort { + model.SortFiles(sorted, storage.OrderBy, storage.OrderDirection) + } + model.ExtractFolder(sorted, storage.ExtractFolder) + } + dc.dirtyFlags = 0 + return sorted +} diff --git a/internal/op/fs.go b/internal/op/fs.go index ac867103..1624ee3c 100644 --- a/internal/op/fs.go +++ b/internal/op/fs.go @@ -4,115 +4,20 @@ import ( "context" stderrors "errors" stdpath "path" - "slices" - "strings" "time" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/stream" - "github.com/OpenListTeam/OpenList/v4/pkg/generic_sync" "github.com/OpenListTeam/OpenList/v4/pkg/singleflight" "github.com/OpenListTeam/OpenList/v4/pkg/utils" - "github.com/OpenListTeam/go-cache" "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) -// In order to facilitate adding some other things before and after file op - -var listCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64)) var listG singleflight.Group[[]model.Obj] -func updateCacheObj(storage driver.Driver, path string, oldObj model.Obj, newObj model.Obj) { - key := Key(storage, path) - objs, ok := listCache.Get(key) - if ok { - for i, obj := range objs { - if obj.GetName() == newObj.GetName() { - objs = slices.Delete(objs, i, i+1) - break - } - } - for i, obj := range objs { - if obj.GetName() == oldObj.GetName() { - objs[i] = newObj - break - } - } - listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) - } -} - -func delCacheObj(storage driver.Driver, path string, obj model.Obj) { - key := Key(storage, path) - objs, ok := listCache.Get(key) - if ok { - for i, oldObj := range objs { - if oldObj.GetName() == obj.GetName() { - objs = append(objs[:i], objs[i+1:]...) - break - } - } - listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) - } -} - -var addSortDebounceMap generic_sync.MapOf[string, func(func())] - -func addCacheObj(storage driver.Driver, path string, newObj model.Obj) { - key := Key(storage, path) - objs, ok := listCache.Get(key) - if ok { - for i, obj := range objs { - if obj.GetName() == newObj.GetName() { - objs[i] = newObj - return - } - } - - // Simple separation of files and folders - if len(objs) > 0 && objs[len(objs)-1].IsDir() == newObj.IsDir() { - objs = append(objs, newObj) - } else { - objs = append([]model.Obj{newObj}, objs...) - } - - if storage.Config().LocalSort { - debounce, _ := addSortDebounceMap.LoadOrStore(key, utils.NewDebounce(time.Minute)) - log.Debug("addCacheObj: wait start sort") - debounce(func() { - log.Debug("addCacheObj: start sort") - model.SortFiles(objs, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection) - addSortDebounceMap.Delete(key) - }) - } - - listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) - } -} - -func ClearCache(storage driver.Driver, path string) { - objs, ok := listCache.Get(Key(storage, path)) - if ok { - for _, obj := range objs { - if obj.IsDir() { - ClearCache(storage, stdpath.Join(path, obj.GetName())) - } - } - } - listCache.Del(Key(storage, path)) -} - -func DeleteCache(storage driver.Driver, path string) { - listCache.Del(Key(storage, path)) -} - -func Key(storage driver.Driver, path string) string { - return stdpath.Join(storage.GetStorage().MountPath, utils.FixAndCleanPath(path)) -} - // List files in storage, not contains virtual file func List(ctx context.Context, storage driver.Driver, path string, args model.ListArgs) ([]model.Obj, error) { if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { @@ -122,11 +27,12 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li log.Debugf("op.List %s", path) key := Key(storage, path) if !args.Refresh { - if files, ok := listCache.Get(key); ok { + if dirCache, exists := Cache.dirCache.Get(key); exists { log.Debugf("use cache when list %s", path) - return files, nil + return dirCache.GetSortedObjects(storage), nil } } + dir, err := GetUnwrap(ctx, storage, path) if err != nil { return nil, errors.WithMessage(err, "failed get dir") @@ -135,6 +41,7 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li if !dir.IsDir() { return nil, errors.WithStack(errs.NotFolder) } + objs, err, _ := listG.Do(key, func() ([]model.Obj, error) { files, err := storage.List(ctx, dir, args) if err != nil { @@ -162,10 +69,11 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li if !storage.Config().NoCache { if len(files) > 0 { log.Debugf("set cache: %s => %+v", key, files) - listCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) + ttl := time.Minute * time.Duration(storage.GetStorage().CacheExpiration) + Cache.dirCache.SetWithTTL(key, newDirectoryCache(files), ttl) } else { log.Debugf("del cache: %s", key) - listCache.Del(key) + Cache.deleteDirectoryTree(key) } } return files, nil @@ -252,100 +160,72 @@ func GetUnwrap(ctx context.Context, storage driver.Driver, path string) (model.O return model.UnwrapObj(obj), err } -var linkCache = cache.NewMemCache(cache.WithShards[*model.Link](16)) -var linkG = singleflight.Group[*model.Link]{Remember: true} -var errLinkMFileCache = stderrors.New("ErrLinkMFileCache") +var linkG = singleflight.Group[*objWithLink]{} // Link get link, if is an url. should have an expiry time func Link(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (*model.Link, model.Obj, error) { if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) } - var ( - file model.Obj - err error - ) - // use cache directly - dir, name := stdpath.Split(stdpath.Join(storage.GetStorage().MountPath, path)) - if cacheFiles, ok := listCache.Get(strings.TrimSuffix(dir, "/")); ok { - for _, f := range cacheFiles { - if f.GetName() == name { - file = model.UnwrapObj(f) - break - } + + typeKey := args.Type + var typeKeys []string + switch storage.Config().LinkCacheType { + case 1: + if args.IP != "" { + typeKey += "/" + args.IP + typeKeys = []string{typeKey} } - } else { - if g, ok := storage.(driver.GetObjInfo); ok { - file, err = g.GetObjInfo(ctx, path) - } else { - file, err = GetUnwrap(ctx, storage, path) + case 2: + if ua := args.Header.Get("User-Agent"); ua != "" { + typeKey += "/" + ua + typeKeys = []string{typeKey} } } - if file == nil { + + key := Key(storage, path) + if ol, exists := Cache.linkCache.GetType(key, args.Type, typeKeys...); exists { + if ol.link.Expiration != nil || + ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference { + return ol.link, ol.obj, nil + } + } + + fn := func() (*objWithLink, error) { + file, err := GetUnwrap(ctx, storage, path) if err != nil { - return nil, nil, errors.WithMessage(err, "failed to get file") + return nil, errors.WithMessage(err, "failed to get file") + } + if file.IsDir() { + return nil, errors.WithStack(errs.NotFile) } - return nil, nil, errors.WithStack(errs.ObjectNotFound) - } - if file.IsDir() { - return nil, nil, errors.WithStack(errs.NotFile) - } - key := stdpath.Join(Key(storage, path), args.Type) - if link, ok := linkCache.Get(key); ok { - return link, file, nil - } - - var forget any - var linkM *model.Link - fn := func() (*model.Link, error) { link, err := storage.Link(ctx, file, args) if err != nil { return nil, errors.Wrapf(err, "failed get link") } - if link.MFile != nil && forget != nil { - linkM = link - return nil, errLinkMFileCache - } + ol := &objWithLink{link: link, obj: file} if link.Expiration != nil { - linkCache.Set(key, link, cache.WithEx[*model.Link](*link.Expiration)) + Cache.linkCache.SetTypeWithTTL(key, typeKey, ol, *link.Expiration) + } else { + Cache.linkCache.SetTypeWithExpirable(key, typeKey, ol, &link.SyncClosers) } - link.AddIfCloser(forget) - return link, nil + return ol, nil } - - if storage.Config().OnlyLinkMFile { - link, err := fn() + retry := 0 + for { + ol, err, _ := linkG.Do(key+"/"+typeKey, fn) if err != nil { return nil, nil, err } - return link, file, err - } - - forget = utils.CloseFunc(func() error { - if forget != nil { - forget = nil - linkG.Forget(key) + if ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference { + if retry > 1 { + log.Warnf("Link retry successed after %d times: %s %s", retry, key, typeKey) + } + return ol.link, ol.obj, nil } - return nil - }) - link, err, _ := linkG.Do(key, fn) - for err == nil && !link.AcquireReference() { - link, err, _ = linkG.Do(key, fn) + retry++ } - - if err == errLinkMFileCache { - if linkM != nil { - return linkM, file, nil - } - forget = nil - link, err = fn() - } - - if err != nil { - return nil, nil, err - } - return link, file, nil } // Other api @@ -365,7 +245,7 @@ func Other(ctx context.Context, storage driver.Driver, args model.FsOtherArgs) ( } } -var mkdirG singleflight.Group[interface{}] +var mkdirG singleflight.Group[any] func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache ...bool) error { if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { @@ -373,7 +253,7 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache } path = utils.FixAndCleanPath(path) key := Key(storage, path) - _, err, _ := mkdirG.Do(key, func() (interface{}, error) { + _, err, _ := mkdirG.Do(key, func() (any, error) { // check if dir exists f, err := GetUnwrap(ctx, storage, path) if err != nil { @@ -395,15 +275,19 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache newObj, err = s.MakeDir(ctx, parentDir, dirName) if err == nil { if newObj != nil { - addCacheObj(storage, parentPath, model.WrapObjName(newObj)) + if !storage.Config().NoCache { + if dirCache, exist := Cache.dirCache.Get(Key(storage, parentPath)); exist { + dirCache.UpdateObject("", newObj) + } + } } else if !utils.IsBool(lazyCache...) { - DeleteCache(storage, parentPath) + Cache.DeleteDirectory(storage, parentPath) } } case driver.Mkdir: err = s.MakeDir(ctx, parentDir, dirName) if err == nil && !utils.IsBool(lazyCache...) { - DeleteCache(storage, parentPath) + Cache.DeleteDirectory(storage, parentPath) } default: return nil, errs.NotImplement @@ -427,7 +311,11 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) } srcPath = utils.FixAndCleanPath(srcPath) + srcDirPath := stdpath.Dir(srcPath) dstDirPath = utils.FixAndCleanPath(dstDirPath) + if dstDirPath == srcDirPath { + return stderrors.New("move in place") + } srcRawObj, err := Get(ctx, storage, srcPath) if err != nil { return errors.WithMessage(err, "failed to get src object") @@ -437,26 +325,25 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string if err != nil { return errors.WithMessage(err, "failed to get dst dir") } - srcDirPath := stdpath.Dir(srcPath) switch s := storage.(type) { case driver.MoveResult: var newObj model.Obj newObj, err = s.Move(ctx, srcObj, dstDir) if err == nil { - delCacheObj(storage, srcDirPath, srcRawObj) + Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj) if newObj != nil { - addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) + Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj)) } else if !utils.IsBool(lazyCache...) { - DeleteCache(storage, dstDirPath) + Cache.DeleteDirectory(storage, dstDirPath) } } case driver.Move: err = s.Move(ctx, srcObj, dstDir) if err == nil { - delCacheObj(storage, srcDirPath, srcRawObj) + Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj) if !utils.IsBool(lazyCache...) { - DeleteCache(storage, dstDirPath) + Cache.DeleteDirectory(storage, dstDirPath) } } default: @@ -475,28 +362,29 @@ func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string, return errors.WithMessage(err, "failed to get src object") } srcObj := model.UnwrapObj(srcRawObj) - srcDirPath := stdpath.Dir(srcPath) switch s := storage.(type) { case driver.RenameResult: var newObj model.Obj newObj, err = s.Rename(ctx, srcObj, dstName) if err == nil { + srcDirPath := stdpath.Dir(srcPath) if newObj != nil { - updateCacheObj(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj)) - } else if !utils.IsBool(lazyCache...) { - DeleteCache(storage, srcDirPath) - if srcRawObj.IsDir() { - ClearCache(storage, srcPath) + Cache.updateDirectoryObject(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj)) + } else { + Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj) + if !utils.IsBool(lazyCache...) { + Cache.DeleteDirectory(storage, srcDirPath) } } } case driver.Rename: err = s.Rename(ctx, srcObj, dstName) - if err == nil && !utils.IsBool(lazyCache...) { - DeleteCache(storage, srcDirPath) - if srcRawObj.IsDir() { - ClearCache(storage, srcPath) + if err == nil { + srcDirPath := stdpath.Dir(srcPath) + Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj) + if !utils.IsBool(lazyCache...) { + Cache.DeleteDirectory(storage, srcDirPath) } } default: @@ -512,10 +400,14 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string } srcPath = utils.FixAndCleanPath(srcPath) dstDirPath = utils.FixAndCleanPath(dstDirPath) - srcObj, err := GetUnwrap(ctx, storage, srcPath) + if dstDirPath == stdpath.Dir(srcPath) { + return stderrors.New("copy in place") + } + srcRawObj, err := Get(ctx, storage, srcPath) if err != nil { return errors.WithMessage(err, "failed to get src object") } + srcObj := model.UnwrapObj(srcRawObj) dstDir, err := GetUnwrap(ctx, storage, dstDirPath) if err != nil { return errors.WithMessage(err, "failed to get dst dir") @@ -527,15 +419,17 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string newObj, err = s.Copy(ctx, srcObj, dstDir) if err == nil { if newObj != nil { - addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) + Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj)) } else if !utils.IsBool(lazyCache...) { - DeleteCache(storage, dstDirPath) + Cache.DeleteDirectory(storage, dstDirPath) } } case driver.Copy: err = s.Copy(ctx, srcObj, dstDir) - if err == nil && !utils.IsBool(lazyCache...) { - DeleteCache(storage, dstDirPath) + if err == nil { + if !utils.IsBool(lazyCache...) { + Cache.DeleteDirectory(storage, dstDirPath) + } } default: return errs.NotImplement @@ -566,11 +460,7 @@ func Remove(ctx context.Context, storage driver.Driver, path string) error { case driver.Remove: err = s.Remove(ctx, model.UnwrapObj(rawObj)) if err == nil { - delCacheObj(storage, dirPath, rawObj) - // clear folder cache recursively - if rawObj.IsDir() { - ClearCache(storage, path) - } + Cache.removeDirectoryObject(storage, dirPath, rawObj) } default: return errs.NotImplement @@ -640,16 +530,20 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod var newObj model.Obj newObj, err = s.Put(ctx, parentDir, file, up) if err == nil { + Cache.linkCache.DeleteKey(Key(storage, dstPath)) if newObj != nil { - addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) + Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj)) } else if !utils.IsBool(lazyCache...) { - DeleteCache(storage, dstDirPath) + Cache.DeleteDirectory(storage, dstDirPath) } } case driver.Put: err = s.Put(ctx, parentDir, file, up) - if err == nil && !utils.IsBool(lazyCache...) { - DeleteCache(storage, dstDirPath) + if err == nil { + Cache.linkCache.DeleteKey(Key(storage, dstPath)) + if !utils.IsBool(lazyCache...) { + Cache.DeleteDirectory(storage, dstDirPath) + } } default: return errs.NotImplement @@ -664,13 +558,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod } } else { // upload success, remove old obj - err := Remove(ctx, storage, tempPath) - if err != nil { - return err - } else { - key := Key(storage, stdpath.Join(dstDirPath, file.GetName())) - linkCache.Del(key) - } + err = Remove(ctx, storage, tempPath) } } return errors.WithStack(err) @@ -681,7 +569,8 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) } dstDirPath = utils.FixAndCleanPath(dstDirPath) - _, err := GetUnwrap(ctx, storage, stdpath.Join(dstDirPath, dstName)) + dstPath := stdpath.Join(dstDirPath, dstName) + _, err := GetUnwrap(ctx, storage, dstPath) if err == nil { return errors.New("obj already exists") } @@ -698,16 +587,20 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url var newObj model.Obj newObj, err = s.PutURL(ctx, dstDir, dstName, url) if err == nil { + Cache.linkCache.DeleteKey(Key(storage, dstPath)) if newObj != nil { - addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) + Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj)) } else if !utils.IsBool(lazyCache...) { - DeleteCache(storage, dstDirPath) + Cache.DeleteDirectory(storage, dstDirPath) } } case driver.PutURL: err = s.PutURL(ctx, dstDir, dstName, url) - if err == nil && !utils.IsBool(lazyCache...) { - DeleteCache(storage, dstDirPath) + if err == nil { + Cache.linkCache.DeleteKey(Key(storage, dstPath)) + if !utils.IsBool(lazyCache...) { + Cache.DeleteDirectory(storage, dstDirPath) + } } default: return errs.NotImplement diff --git a/internal/op/setting.go b/internal/op/setting.go index cbfa083f..bda4daaf 100644 --- a/internal/op/setting.go +++ b/internal/op/setting.go @@ -5,26 +5,21 @@ import ( "sort" "strconv" "strings" - "time" "github.com/OpenListTeam/OpenList/v4/internal/db" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/singleflight" - "github.com/OpenListTeam/OpenList/v4/pkg/utils" - "github.com/OpenListTeam/go-cache" "github.com/pkg/errors" ) -var settingCache = cache.NewMemCache(cache.WithShards[*model.SettingItem](4)) var settingG singleflight.Group[*model.SettingItem] var settingCacheF = func(item *model.SettingItem) { - settingCache.Set(item.Key, item, cache.WithEx[*model.SettingItem](time.Hour)) + Cache.SetSetting(item.Key, item) } -var settingGroupCache = cache.NewMemCache(cache.WithShards[[]model.SettingItem](4)) var settingGroupG singleflight.Group[[]model.SettingItem] -var settingGroupCacheF = func(key string, item []model.SettingItem) { - settingGroupCache.Set(key, item, cache.WithEx[[]model.SettingItem](time.Hour)) +var settingGroupCacheF = func(key string, items []model.SettingItem) { + Cache.SetSettingGroup(key, items) } var settingChangingCallbacks = make([]func(), 0) @@ -34,8 +29,7 @@ func RegisterSettingChangingCallback(f func()) { } func SettingCacheUpdate() { - settingCache.Clear() - settingGroupCache.Clear() + Cache.ClearAll() for _, cb := range settingChangingCallbacks { cb() } @@ -60,7 +54,7 @@ func GetSettingsMap() map[string]string { } func GetSettingItems() ([]model.SettingItem, error) { - if items, ok := settingGroupCache.Get("ALL_SETTING_ITEMS"); ok { + if items, exists := Cache.GetSettingGroup("ALL_SETTING_ITEMS"); exists { return items, nil } items, err, _ := settingGroupG.Do("ALL_SETTING_ITEMS", func() ([]model.SettingItem, error) { @@ -75,7 +69,7 @@ func GetSettingItems() ([]model.SettingItem, error) { } func GetPublicSettingItems() ([]model.SettingItem, error) { - if items, ok := settingGroupCache.Get("ALL_PUBLIC_SETTING_ITEMS"); ok { + if items, exists := Cache.GetSettingGroup("ALL_PUBLIC_SETTING_ITEMS"); exists { return items, nil } items, err, _ := settingGroupG.Do("ALL_PUBLIC_SETTING_ITEMS", func() ([]model.SettingItem, error) { @@ -90,7 +84,7 @@ func GetPublicSettingItems() ([]model.SettingItem, error) { } func GetSettingItemByKey(key string) (*model.SettingItem, error) { - if item, ok := settingCache.Get(key); ok { + if item, exists := Cache.GetSetting(key); exists { return item, nil } @@ -118,8 +112,8 @@ func GetSettingItemInKeys(keys []string) ([]model.SettingItem, error) { } func GetSettingItemsByGroup(group int) ([]model.SettingItem, error) { - key := strconv.Itoa(group) - if items, ok := settingGroupCache.Get(key); ok { + key := fmt.Sprintf("GROUP_%d", group) + if items, exists := Cache.GetSettingGroup(key); exists { return items, nil } items, err, _ := settingGroupG.Do(key, func() ([]model.SettingItem, error) { @@ -135,11 +129,14 @@ func GetSettingItemsByGroup(group int) ([]model.SettingItem, error) { func GetSettingItemsInGroups(groups []int) ([]model.SettingItem, error) { sort.Ints(groups) - key := strings.Join(utils.MustSliceConvert(groups, func(i int) string { - return strconv.Itoa(i) - }), ",") - if items, ok := settingGroupCache.Get(key); ok { + keyParts := make([]string, 0, len(groups)) + for _, g := range groups { + keyParts = append(keyParts, strconv.Itoa(g)) + } + key := "GROUPS_" + strings.Join(keyParts, "_") + + if items, exists := Cache.GetSettingGroup(key); exists { return items, nil } items, err, _ := settingGroupG.Do(key, func() ([]model.SettingItem, error) { @@ -165,10 +162,10 @@ func SaveSettingItems(items []model.SettingItem) error { } } err := db.SaveSettingItems(items) - if err != nil { + if err != nil { return fmt.Errorf("failed save setting: %+v", err) } - SettingCacheUpdate() + SettingCacheUpdate() return nil } diff --git a/internal/op/storage.go b/internal/op/storage.go index 127d107f..2bcd4ed7 100644 --- a/internal/op/storage.go +++ b/internal/op/storage.go @@ -15,6 +15,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/generic_sync" + "github.com/OpenListTeam/OpenList/v4/pkg/singleflight" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -239,6 +240,8 @@ func UpdateStorage(ctx context.Context, storage model.Storage) error { if oldStorage.MountPath != storage.MountPath { // mount path renamed, need to drop the storage storagesMap.Delete(oldStorage.MountPath) + Cache.DeleteDirectoryTree(storageDriver, "/") + Cache.InvalidateStorageDetails(storageDriver) } if err != nil { return errors.WithMessage(err, "failed get storage driver") @@ -259,6 +262,7 @@ func DeleteStorageById(ctx context.Context, id uint) error { if err != nil { return errors.WithMessage(err, "failed get storage") } + var dropErr error = nil if !storage.Disabled { storageDriver, err := GetStorageByMountPath(storage.MountPath) if err != nil { @@ -266,17 +270,19 @@ func DeleteStorageById(ctx context.Context, id uint) error { } // drop the storage in the driver if err := storageDriver.Drop(ctx); err != nil { - return errors.Wrapf(err, "failed drop storage") + dropErr = errors.Wrapf(err, "failed drop storage") } // delete the storage in the memory storagesMap.Delete(storage.MountPath) + Cache.DeleteDirectoryTree(storageDriver, "/") + Cache.InvalidateStorageDetails(storageDriver) go callStorageHooks("del", storageDriver) } // delete the storage in the database if err := db.DeleteStorageById(id); err != nil { return errors.WithMessage(err, "failed delete storage in database") } - return nil + return dropErr } // MustSaveDriverStorage call from specific driver @@ -340,8 +346,8 @@ func GetStorageVirtualFilesByPath(prefix string) []model.Obj { }) } -func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails ...bool) []model.Obj { - if utils.IsBool(hideDetails...) { +func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails, refresh bool) []model.Obj { + if hideDetails { return GetStorageVirtualFilesByPath(prefix) } return getStorageVirtualFilesByPath(prefix, func(d driver.Driver, obj model.Obj) model.Obj { @@ -354,7 +360,7 @@ func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, } timeoutCtx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() - details, err := GetStorageDetails(timeoutCtx, d) + details, err := GetStorageDetails(timeoutCtx, d, refresh) if err != nil { if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) { log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err) @@ -439,7 +445,9 @@ func GetBalancedStorage(path string) driver.Driver { } } -func GetStorageDetails(ctx context.Context, storage driver.Driver) (*model.StorageDetails, error) { +var detailsG singleflight.Group[*model.StorageDetails] + +func GetStorageDetails(ctx context.Context, storage driver.Driver, refresh ...bool) (*model.StorageDetails, error) { if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) } @@ -447,5 +455,18 @@ func GetStorageDetails(ctx context.Context, storage driver.Driver) (*model.Stora if !ok { return nil, errs.NotImplement } - return wd.GetDetails(ctx) + if !utils.IsBool(refresh...) { + if ret, ok := Cache.GetStorageDetails(storage); ok { + return ret, nil + } + } + details, err, _ := detailsG.Do(storage.GetStorage().MountPath, func() (*model.StorageDetails, error) { + ret, err := wd.GetDetails(ctx) + if err != nil { + return nil, err + } + Cache.SetStorageDetails(storage, ret) + return ret, nil + }) + return details, err } diff --git a/internal/op/user.go b/internal/op/user.go index 3f526625..6f0b35e8 100644 --- a/internal/op/user.go +++ b/internal/op/user.go @@ -1,17 +1,13 @@ package op import ( - "time" - "github.com/OpenListTeam/OpenList/v4/internal/db" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/singleflight" "github.com/OpenListTeam/OpenList/v4/pkg/utils" - "github.com/OpenListTeam/go-cache" ) -var userCache = cache.NewMemCache(cache.WithShards[*model.User](2)) var userG singleflight.Group[*model.User] var guestUser *model.User var adminUser *model.User @@ -46,7 +42,7 @@ func GetUserByName(username string) (*model.User, error) { if username == "" { return nil, errs.EmptyUsername } - if user, ok := userCache.Get(username); ok { + if user, exists := Cache.GetUser(username); exists { return user, nil } user, err, _ := userG.Do(username, func() (*model.User, error) { @@ -54,7 +50,7 @@ func GetUserByName(username string) (*model.User, error) { if err != nil { return nil, err } - userCache.Set(username, _user, cache.WithEx[*model.User](time.Hour)) + Cache.SetUser(username, _user) return _user, nil }) return user, err @@ -81,7 +77,7 @@ func DeleteUserById(id uint) error { if old.IsAdmin() || old.IsGuest() { return errs.DeleteAdminOrGuest } - userCache.Del(old.Username) + Cache.DeleteUser(old.Username) return db.DeleteUserById(id) } @@ -96,7 +92,7 @@ func UpdateUser(u *model.User) error { if u.IsGuest() { guestUser = nil } - userCache.Del(old.Username) + Cache.DeleteUser(old.Username) u.BasePath = utils.FixAndCleanPath(u.BasePath) return db.UpdateUser(u) } @@ -125,6 +121,6 @@ func DelUserCache(username string) error { if user.IsGuest() { guestUser = nil } - userCache.Del(username) + Cache.DeleteUser(username) return nil } diff --git a/internal/stream/util.go b/internal/stream/util.go index cba29e78..95f56e8a 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -11,6 +11,7 @@ import ( "os" "github.com/OpenListTeam/OpenList/v4/internal/conf" + "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/net" "github.com/OpenListTeam/OpenList/v4/pkg/http_range" @@ -27,9 +28,6 @@ func (f RangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Ran } func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) { - if link.MFile != nil { - return GetRangeReaderFromMFile(size, link.MFile), nil - } if link.Concurrency > 0 || link.PartSize > 0 { down := net.NewDownloader(func(d *net.Downloader) { d.Concurrency = link.Concurrency @@ -66,7 +64,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, } if len(link.URL) == 0 { - return nil, errors.New("invalid link: must have at least one of MFile, URL, or RangeReader") + return nil, errors.New("invalid link: must have at least one of URL or RangeReader") } rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { if httpRange.Length < 0 || httpRange.Start+httpRange.Length > size { @@ -78,7 +76,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, response, err := net.RequestHttp(ctx, "GET", header, link.URL) if err != nil { - if _, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok { + if _, ok := errs.UnwrapOrSelf(err).(net.HttpStatusCodeError); ok { return nil, err } return nil, fmt.Errorf("http request failure, err:%w", err) diff --git a/internal/task_group/transfer.go b/internal/task_group/transfer.go index 3cef5786..8c75b77d 100644 --- a/internal/task_group/transfer.go +++ b/internal/task_group/transfer.go @@ -24,16 +24,15 @@ func RefreshAndRemove(dstPath string, payloads ...any) { return } _, dstNeedRefresh := dstStorage.(driver.Put) - dstNeedRefresh = dstNeedRefresh && !dstStorage.Config().NoCache if dstNeedRefresh { - op.DeleteCache(dstStorage, dstActualPath) + op.Cache.DeleteDirectory(dstStorage, dstActualPath) } var ctx context.Context for _, payload := range payloads { switch p := payload.(type) { case DstPathToRefresh: if dstNeedRefresh { - op.DeleteCache(dstStorage, string(p)) + op.Cache.DeleteDirectory(dstStorage, string(p)) } case SrcPathToRemove: if ctx == nil { @@ -79,7 +78,7 @@ func verifyAndRemove(ctx context.Context, srcStorage, dstStorage driver.Driver, } if refresh { - op.DeleteCache(dstStorage, dstObjPath) + op.Cache.DeleteDirectory(dstStorage, dstObjPath) } hasErr := false for _, obj := range srcObjs { diff --git a/pkg/gowebdav/errors.go b/pkg/gowebdav/errors.go index bbf1e929..de03e7b9 100644 --- a/pkg/gowebdav/errors.go +++ b/pkg/gowebdav/errors.go @@ -1,8 +1,8 @@ package gowebdav import ( - "fmt" "os" + "strconv" ) // StatusError implements error and wraps @@ -12,7 +12,7 @@ type StatusError struct { } func (se StatusError) Error() string { - return fmt.Sprintf("%d", se.Status) + return strconv.Itoa(se.Status) } // IsErrCode returns true if the given error diff --git a/pkg/singleflight/singleflight.go b/pkg/singleflight/singleflight.go index 3555d5bd..48383478 100644 --- a/pkg/singleflight/singleflight.go +++ b/pkg/singleflight/singleflight.go @@ -73,9 +73,6 @@ type call[T any] struct { type Group[T any] struct { mu sync.Mutex // protects m m map[string]*call[T] // lazily initialized - - // Won't remember error - Remember bool } // Result holds the results of Do, so they can be passed @@ -159,7 +156,7 @@ func (g *Group[T]) doCall(c *call[T], key string, fn func() (T, error)) { g.mu.Lock() defer g.mu.Unlock() c.wg.Done() - if (!g.Remember || c.err != nil) && g.m[key] == c { + if g.m[key] == c { delete(g.m, key) } diff --git a/pkg/utils/io.go b/pkg/utils/io.go index ff52f843..f398161f 100644 --- a/pkg/utils/io.go +++ b/pkg/utils/io.go @@ -187,52 +187,39 @@ func NewClosers(c ...io.Closer) Closers { return Closers(c) } -type SyncClosersIF interface { - ClosersIF - AcquireReference() bool -} - type SyncClosers struct { closers []io.Closer ref int32 } -var _ SyncClosersIF = (*SyncClosers)(nil) - +// if closed, return false func (c *SyncClosers) AcquireReference() bool { + ref := atomic.AddInt32(&c.ref, 1) + if ref > 0 { + // log.Debugf("AcquireReference %p: %d", c, ref) + return true + } + atomic.StoreInt32(&c.ref, closersClosed) + return false +} + +const closersClosed = math.MinInt32 + +func (c *SyncClosers) Close() error { for { ref := atomic.LoadInt32(&c.ref) if ref < 0 { - return false + return nil } - newRef := ref + 1 - if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) { - // log.Debugf("AcquireReference %p: %d", c, newRef) - return true + if ref > 1 { + if atomic.CompareAndSwapInt32(&c.ref, ref, ref-1) { + // log.Debugf("ReleaseReference %p: %d", c, ref) + return nil + } + } else if atomic.CompareAndSwapInt32(&c.ref, ref, closersClosed) { + break } } -} - -const closersClosed = math.MinInt16 - -func (c *SyncClosers) Close() error { - ref := atomic.AddInt32(&c.ref, -1) - if ref > 0 { - // log.Debugf("ReleaseReference %p: %d", c, ref) - return nil - } - - if ref < -1 { - atomic.StoreInt32(&c.ref, closersClosed) - return nil - } - - // Attempt to acquire FinalClose permission. - // At this point, ref must be 0 or -1. We try to atomically change it to the closersClosed state. - // Only the first successful goroutine gets the cleanup permission. - if !atomic.CompareAndSwapInt32(&c.ref, ref, closersClosed) { - return nil - } // log.Debugf("FinalClose %p", c) var errs []error @@ -264,6 +251,16 @@ func (c *SyncClosers) AddIfCloser(a any) { } } +var _ ClosersIF = (*SyncClosers)(nil) + +// 实现cache.Expirable接口 +func (c *SyncClosers) Expired() bool { + return atomic.LoadInt32(&c.ref) < 0 +} +func (c *SyncClosers) Length() int { + return len(c.closers) +} + func NewSyncClosers(c ...io.Closer) SyncClosers { return SyncClosers{closers: c} } diff --git a/server/common/proxy.go b/server/common/proxy.go index d352582e..c7c975d2 100644 --- a/server/common/proxy.go +++ b/server/common/proxy.go @@ -18,11 +18,11 @@ import ( ) func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error { - if link.MFile != nil { - attachHeader(w, file, link) - http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile) - return nil - } + // if link.MFile != nil { + // attachHeader(w, file, link) + // http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile) + // return nil + // } if link.Concurrency > 0 || link.PartSize > 0 { attachHeader(w, file, link) @@ -101,7 +101,7 @@ func GetEtag(file model.Obj, size int64) string { } func ProxyRange(ctx context.Context, link *model.Link, size int64) *model.Link { - if link.MFile == nil && link.RangeReader == nil && !strings.HasPrefix(link.URL, GetApiUrl(ctx)+"/") { + if link.RangeReader == nil && !strings.HasPrefix(link.URL, GetApiUrl(ctx)+"/") { if link.ContentLength > 0 { size = link.ContentLength } diff --git a/server/handles/down.go b/server/handles/down.go index 84ebdc44..d4d634cb 100644 --- a/server/handles/down.go +++ b/server/handles/down.go @@ -9,6 +9,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/driver" + "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/fs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/net" @@ -147,7 +148,7 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) { if Writer.IsWritten() { log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err) } else { - if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok { + if statusCode, ok := errs.UnwrapOrSelf(err).(net.HttpStatusCodeError); ok { common.ErrorPage(c, err, int(statusCode), true) } else { common.ErrorPage(c, err, 500, true) diff --git a/server/handles/fsmanage.go b/server/handles/fsmanage.go index f45da69b..3fe86726 100644 --- a/server/handles/fsmanage.go +++ b/server/handles/fsmanage.go @@ -386,7 +386,7 @@ func Link(c *gin.Context) { common.ErrorResp(c, err, 500) return } - if storage.Config().NoLinkURL || storage.Config().OnlyLinkMFile { + if storage.Config().NoLinkURL { common.SuccessResp(c, model.Link{ URL: fmt.Sprintf("%s/p%s?d&sign=%s", common.GetApiUrl(c), diff --git a/server/webdav/webdav.go b/server/webdav/webdav.go index 0c4f0922..4612665b 100644 --- a/server/webdav/webdav.go +++ b/server/webdav/webdav.go @@ -272,7 +272,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta } err = common.Proxy(w, r, link, fi) if err != nil { - if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok { + if statusCode, ok := errs.UnwrapOrSelf(err).(net.HttpStatusCodeError); ok { return int(statusCode), err } return http.StatusInternalServerError, fmt.Errorf("webdav proxy error: %+v", err)