feat(cache): improve cache management (#1339)

* feat(cache): improve cache management

* feat(disk-usage): add cache

* feat(disk-usage): add refresh

* fix(disk-usage): cache with ttl

* feat(cache): implement KeyedCache and TypedCache for improved caching mechanism

* fix(copy): update object retrieval to use Get instead of GetUnwrap

* refactor(cache): simplify DirectoryCache structure and improve object management

* fix(cache): correct cache entry initialization and key deletion logic in TypedCache

* refactor(driver): remove GetObjInfo interface and simplify Link function logic
https://github.com/OpenListTeam/OpenList/pull/888/files#r2430925783

* fix(link): optimize link retrieval and caching logic

* refactor(cache): consolidate cache management and improve directory cache handling

* fix(cache): add cache control based on storage configuration in List function

* .

* refactor: replace fmt.Sprintf with strconv for integer conversions

* refactor(cache): enhance cache entry management with Expirable interface

* fix(cache): improve link reference acquisition logic to handle expiration

* refactor: replace OnlyLinkMFile with NoLinkSF in driver configurations and logic

* refactor(link): enhance link caching logic with dynamic type keys based on IP and User-Agent

* feat(drivers): add LinkCacheType to driver configurations for enhanced caching

* refactor(cache): streamline directory object management in cache operations

* refactor(cache): remove unnecessary 'dirty' field from CacheEntry structure

* refactor(cache): replace 'dirty' field with bitwise flags

* refactor(io): 调高SyncClosers.AcquireReference的优先级

* refactor(link): 优化链接获取逻辑,增加重

* refactor(link): 添加RequireReference字段以增强链接管理

* refactor(link): 移除MFile字段,改用RangeReader

* refactor: 移除不必要的NoLinkSF字段

* refactor(cache): 修改目录缓存的脏标志定义和更新逻辑

* feat(cache): add expiration gc

---------

Co-authored-by: KirCute <951206789@qq.com>
Co-authored-by: KirCute <kircute@foxmail.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
This commit is contained in:
ILoveScratch
2025-10-18 21:47:18 +08:00
committed by GitHub
parent 549e60136b
commit febbcd6027
56 changed files with 880 additions and 509 deletions

View File

@@ -17,8 +17,7 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "115 Cloud", Name: "115 Cloud",
DefaultRoot: "0", DefaultRoot: "0",
// OnlyProxy: true, LinkCacheType: 2,
// NoOverwriteUpload: true,
} }
func init() { func init() {

View File

@@ -131,23 +131,6 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}, nil }, nil
} }
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.GetFolderInfoByPath(ctx, path)
if err != nil {
return nil, err
}
return &Obj{
Fid: resp.FileID,
Fn: resp.FileName,
Fc: resp.FileCategory,
Sha1: resp.Sha1,
Pc: resp.PickCode,
}, nil
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil { if err := d.WaitLimit(ctx); err != nil {
return nil, err return nil, err

View File

@@ -19,6 +19,7 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "115 Open", Name: "115 Open",
DefaultRoot: "0", DefaultRoot: "0",
LinkCacheType: 2,
} }
func init() { func init() {

View File

@@ -130,7 +130,7 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
path := dir.GetPath() path := dir.GetPath()
if utils.PathEqual(path, "/") && !d.autoFlatten { if utils.PathEqual(path, "/") && !d.autoFlatten {
return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough), nil return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough, args.Refresh), nil
} }
root, sub := d.getRootAndPath(path) root, sub := d.getRootAndPath(path)
dsts, ok := d.pathMap[root] dsts, ok := d.pathMap[root]
@@ -211,9 +211,6 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if resultLink.ContentLength == 0 { if resultLink.ContentLength == 0 {
resultLink.ContentLength = fi.GetSize() resultLink.ContentLength = fi.GetSize()
} }
if resultLink.MFile != nil {
return &resultLink, nil
}
if d.DownloadConcurrency > 0 { if d.DownloadConcurrency > 0 {
resultLink.Concurrency = d.DownloadConcurrency resultLink.Concurrency = d.DownloadConcurrency
} }

View File

@@ -17,7 +17,7 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
func (d *Alias) listRoot(ctx context.Context, withDetails bool) []model.Obj { func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model.Obj {
var objs []model.Obj var objs []model.Obj
var wg sync.WaitGroup var wg sync.WaitGroup
for _, k := range d.rootOrder { for _, k := range d.rootOrder {
@@ -52,7 +52,7 @@ func (d *Alias) listRoot(ctx context.Context, withDetails bool) []model.Obj {
defer wg.Done() defer wg.Done()
c, cancel := context.WithTimeout(ctx, time.Second) c, cancel := context.WithTimeout(ctx, time.Second)
defer cancel() defer cancel()
details, e := op.GetStorageDetails(c, remoteDriver) details, e := op.GetStorageDetails(c, remoteDriver, refresh)
if e != nil { if e != nil {
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) { if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", remoteDriver.GetStorage().MountPath, e) log.Errorf("failed get %s storage details: %+v", remoteDriver.GetStorage().MountPath, e)

View File

@@ -20,6 +20,7 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "BaiduPhoto", Name: "BaiduPhoto",
LocalSort: true, LocalSort: true,
LinkCacheType: 2,
} }
func init() { func init() {

View File

@@ -10,6 +10,7 @@ import (
"mime/multipart" "mime/multipart"
"net/http" "net/http"
"net/url" "net/url"
"strconv"
"strings" "strings"
"time" "time"
@@ -239,7 +240,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
if err != nil { if err != nil {
return err return err
} }
err = writer.WriteField("puid", fmt.Sprintf("%d", resp.Msg.Puid)) err = writer.WriteField("puid", strconv.Itoa(resp.Msg.Puid))
if err != nil { if err != nil {
fmt.Println("Error writing param2 to request body:", err) fmt.Println("Error writing param2 to request body:", err)
return err return err
@@ -260,7 +261,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
return err return err
} }
req.Header.Set("Content-Type", writer.FormDataContentType()) req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len())) req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
resps, err := http.DefaultClient.Do(req) resps, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return err return err

View File

@@ -258,7 +258,7 @@ type UploadDoneParam struct {
func fileToObj(f File) *model.Object { func fileToObj(f File) *model.Object {
if len(f.Content.FolderName) > 0 { if len(f.Content.FolderName) > 0 {
return &model.Object{ return &model.Object{
ID: fmt.Sprintf("%d", f.ID), ID: strconv.Itoa(f.ID),
Name: f.Content.FolderName, Name: f.Content.FolderName,
Size: 0, Size: 0,
Modified: time.UnixMilli(f.Inserttime), Modified: time.UnixMilli(f.Inserttime),

View File

@@ -9,6 +9,7 @@ import (
"fmt" "fmt"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
"strconv"
"strings" "strings"
"github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/drivers/base"
@@ -172,7 +173,7 @@ func (d *ChaoXing) Login() (string, error) {
return "", err return "", err
} }
req.Header.Set("Content-Type", writer.FormDataContentType()) req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len())) req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return "", err return "", err

View File

@@ -318,6 +318,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return readSeeker, nil return readSeeker, nil
}), }),
SyncClosers: utils.NewSyncClosers(remoteLink), SyncClosers: utils.NewSyncClosers(remoteLink),
RequireReference: remoteLink.RequireReference,
}, nil }, nil
} }

View File

@@ -486,7 +486,7 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.
"Authorization": {storeInfo.Auth}, "Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"}, "Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value}, "Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", file.GetSize())}, "Content-Length": {strconv.FormatInt(file.GetSize(), 10)},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))}, "Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
} }
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)
@@ -612,7 +612,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
"Authorization": {storeInfo.Auth}, "Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"}, "Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value}, "Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", size)}, "Content-Length": {strconv.FormatInt(size, 10)},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))}, "Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
} }
res, err := base.HttpClient.Do(req) res, err := base.HttpClient.Do(req)

View File

@@ -19,6 +19,7 @@ var config = driver.Config{
Name: "FebBox", Name: "FebBox",
NoUpload: true, NoUpload: true,
DefaultRoot: "0", DefaultRoot: "0",
LinkCacheType: 1,
} }
func init() { func init() {

View File

@@ -33,7 +33,7 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "FTP", Name: "FTP",
LocalSort: true, LocalSort: true,
OnlyLinkMFile: false, OnlyProxy: true,
DefaultRoot: "/", DefaultRoot: "/",
NoLinkURL: true, NoLinkURL: true,
} }

View File

@@ -141,7 +141,7 @@ func doMakeFile(fileSlice []string, taskID string, uploadAddress string) (*sdkUs
Header: map[string][]string{ Header: map[string][]string{
"Accept": {"application/json"}, "Accept": {"application/json"},
"Content-Type": {"application/json"}, "Content-Type": {"application/json"},
//"Content-Length": {fmt.Sprintf("%d", len(n))}, //"Content-Length": {strconv.Itoa(len(n))},
}, },
Body: io.NopCloser(bytes.NewReader(n)), Body: io.NopCloser(bytes.NewReader(n)),
} }
@@ -238,7 +238,7 @@ func doPostFileSlice(fileSlice []byte, taskID string, uploadAddress string, prei
Header: map[string][]string{ Header: map[string][]string{
"Accept": {"application/json"}, "Accept": {"application/json"},
"Content-Type": {"application/octet-stream"}, "Content-Type": {"application/octet-stream"},
// "Content-Length": {fmt.Sprintf("%d", len(fileSlice))}, // "Content-Length": {strconv.Itoa(len(fileSlice))},
}, },
Body: io.NopCloser(bytes.NewReader(fileSlice)), Body: io.NopCloser(bytes.NewReader(fileSlice)),
} }

View File

@@ -235,6 +235,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
fullPath := file.GetPath() fullPath := file.GetPath()
link := &model.Link{} link := &model.Link{}
var MFile model.File
if args.Type == "thumb" && utils.Ext(file.GetName()) != "svg" { if args.Type == "thumb" && utils.Ext(file.GetName()) != "svg" {
var buf *bytes.Buffer var buf *bytes.Buffer
var thumbPath *string var thumbPath *string
@@ -261,9 +262,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err return nil, err
} }
link.ContentLength = int64(stat.Size()) link.ContentLength = int64(stat.Size())
link.MFile = open MFile = open
} else { } else {
link.MFile = bytes.NewReader(buf.Bytes()) MFile = bytes.NewReader(buf.Bytes())
link.ContentLength = int64(buf.Len()) link.ContentLength = int64(buf.Len())
} }
} else { } else {
@@ -272,13 +273,11 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err return nil, err
} }
link.ContentLength = file.GetSize() link.ContentLength = file.GetSize()
link.MFile = open MFile = open
}
link.AddIfCloser(link.MFile)
if !d.Config().OnlyLinkMFile {
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, link.MFile)
link.MFile = nil
} }
link.SyncClosers.AddIfCloser(MFile)
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, MFile)
link.RequireReference = link.SyncClosers.Length() > 0
return link, nil return link, nil
} }

View File

@@ -19,8 +19,8 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "Local", Name: "Local",
OnlyLinkMFile: false,
LocalSort: true, LocalSort: true,
OnlyProxy: true,
NoCache: true, NoCache: true,
DefaultRoot: "/", DefaultRoot: "/",
NoLinkURL: true, NoLinkURL: true,

View File

@@ -36,7 +36,6 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "MediaFire", Name: "MediaFire",
LocalSort: false, LocalSort: false,
OnlyLinkMFile: false,
OnlyProxy: false, OnlyProxy: false,
NoCache: false, NoCache: false,
NoUpload: false, NoUpload: false,

View File

@@ -26,6 +26,11 @@ type OpenList struct {
} }
func (d *OpenList) Config() driver.Config { func (d *OpenList) Config() driver.Config {
if d.PassUAToUpsteam {
c := config
c.LinkCacheType = 2 // add User-Agent to cache key
return c
}
return config return config
} }

View File

@@ -69,15 +69,10 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
Limiter: stream.ServerDownloadLimit, Limiter: stream.ServerDownloadLimit,
Ctx: ctx, Ctx: ctx,
} }
if !d.Config().OnlyLinkMFile {
return &model.Link{ return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile), RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
SyncClosers: utils.NewSyncClosers(remoteFile), SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil RequireReference: true,
}
return &model.Link{
MFile: mFile,
SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil }, nil
} }

View File

@@ -18,7 +18,7 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "SFTP", Name: "SFTP",
LocalSort: true, LocalSort: true,
OnlyLinkMFile: false, OnlyProxy: true,
DefaultRoot: "/", DefaultRoot: "/",
CheckStatus: true, CheckStatus: true,
NoLinkURL: true, NoLinkURL: true,

View File

@@ -86,15 +86,10 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
Limiter: stream.ServerDownloadLimit, Limiter: stream.ServerDownloadLimit,
Ctx: ctx, Ctx: ctx,
} }
if !d.Config().OnlyLinkMFile {
return &model.Link{ return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile), RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
SyncClosers: utils.NewSyncClosers(remoteFile), SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil RequireReference: true,
}
return &model.Link{
MFile: mFile,
SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil }, nil
} }

View File

@@ -16,7 +16,7 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "SMB", Name: "SMB",
LocalSort: true, LocalSort: true,
OnlyLinkMFile: false, OnlyProxy: true,
DefaultRoot: ".", DefaultRoot: ".",
NoCache: true, NoCache: true,
NoLinkURL: true, NoLinkURL: true,

View File

@@ -12,6 +12,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs" "github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/sign" "github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common" "github.com/OpenListTeam/OpenList/v4/server/common"
) )
@@ -156,7 +157,7 @@ func (d *Strm) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
if file.GetID() == "strm" { if file.GetID() == "strm" {
link := d.getLink(ctx, file.GetPath()) link := d.getLink(ctx, file.GetPath())
return &model.Link{ return &model.Link{
MFile: strings.NewReader(link), RangeReader: stream.GetRangeReaderFromMFile(int64(len(link)), strings.NewReader(link)),
}, nil }, nil
} }
// ftp,s3 // ftp,s3

View File

@@ -17,11 +17,10 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "Strm", Name: "Strm",
LocalSort: true, LocalSort: true,
OnlyProxy: true,
NoCache: true, NoCache: true,
NoUpload: true, NoUpload: true,
DefaultRoot: "/", DefaultRoot: "/",
OnlyLinkMFile: true,
OnlyProxy: true,
NoLinkURL: true, NoLinkURL: true,
} }

View File

@@ -164,7 +164,7 @@ func (d *Teldrive) doSingleUpload(ctx context.Context, dstDir model.Obj, file mo
if err := d.singleUploadRequest(fileId, func(req *resty.Request) { if err := d.singleUploadRequest(fileId, func(req *resty.Request) {
uploadParams := map[string]string{ uploadParams := map[string]string{
"partName": func() string { "partName": func() string {
digits := len(fmt.Sprintf("%d", totalParts)) digits := len(strconv.Itoa(totalParts))
return file.GetName() + fmt.Sprintf(".%0*d", digits, 1) return file.GetName() + fmt.Sprintf(".%0*d", digits, 1)
}(), }(),
"partNo": strconv.Itoa(1), "partNo": strconv.Itoa(1),
@@ -333,7 +333,7 @@ func (d *Teldrive) uploadSingleChunk(ctx context.Context, fileId string, task ch
err := d.singleUploadRequest(fileId, func(req *resty.Request) { err := d.singleUploadRequest(fileId, func(req *resty.Request) {
uploadParams := map[string]string{ uploadParams := map[string]string{
"partName": func() string { "partName": func() string {
digits := len(fmt.Sprintf("%d", totalParts)) digits := len(strconv.Itoa(totalParts))
return task.fileName + fmt.Sprintf(".%0*d", digits, task.chunkIdx) return task.fileName + fmt.Sprintf(".%0*d", digits, task.chunkIdx)
}(), }(),
"partNo": strconv.Itoa(task.chunkIdx), "partNo": strconv.Itoa(task.chunkIdx),

View File

@@ -16,7 +16,6 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "Template", Name: "Template",
LocalSort: false, LocalSort: false,
OnlyLinkMFile: false,
OnlyProxy: false, OnlyProxy: false,
NoCache: false, NoCache: false,
NoUpload: false, NoUpload: false,

View File

@@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"strconv"
"strings" "strings"
"time" "time"
@@ -840,7 +841,7 @@ func (xc *XunLeiBrowserCommon) OfflineList(ctx context.Context, nextPageToken st
func (xc *XunLeiBrowserCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string) error { func (xc *XunLeiBrowserCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string) error {
queryParams := map[string]string{ queryParams := map[string]string{
"task_ids": strings.Join(taskIDs, ","), "task_ids": strings.Join(taskIDs, ","),
"_t": fmt.Sprintf("%d", time.Now().UnixMilli()), "_t": strconv.FormatInt(time.Now().UnixMilli(), 10),
} }
if xc.UseFluentPlay { if xc.UseFluentPlay {
queryParams["space"] = ThunderBrowserDriveFluentPlayFolderType queryParams["space"] = ThunderBrowserDriveFluentPlayFolderType

View File

@@ -2,11 +2,11 @@ package virtual
import ( import (
"context" "context"
"io"
"time" "time"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils/random" "github.com/OpenListTeam/OpenList/v4/pkg/utils/random"
) )
@@ -42,16 +42,14 @@ func (d *Virtual) List(ctx context.Context, dir model.Obj, args model.ListArgs)
return res, nil return res, nil
} }
type DummyMFile struct { type DummyMFile struct{}
io.Reader
}
func (f DummyMFile) Read(p []byte) (n int, err error) { func (f DummyMFile) Read(p []byte) (n int, err error) {
return f.Reader.Read(p) return random.Rand.Read(p)
} }
func (f DummyMFile) ReadAt(p []byte, off int64) (n int, err error) { func (f DummyMFile) ReadAt(p []byte, off int64) (n int, err error) {
return f.Reader.Read(p) return random.Rand.Read(p)
} }
func (DummyMFile) Seek(offset int64, whence int) (int64, error) { func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
@@ -60,7 +58,7 @@ func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
func (d *Virtual) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *Virtual) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
return &model.Link{ return &model.Link{
MFile: DummyMFile{Reader: random.Rand}, RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), DummyMFile{}),
}, nil }, nil
} }

View File

@@ -15,8 +15,8 @@ type Addition struct {
var config = driver.Config{ var config = driver.Config{
Name: "Virtual", Name: "Virtual",
OnlyLinkMFile: true,
LocalSort: true, LocalSort: true,
OnlyProxy: true,
NeedMs: true, NeedMs: true,
NoLinkURL: true, NoLinkURL: true,
} }

101
internal/cache/keyed_cache.go vendored Normal file
View File

@@ -0,0 +1,101 @@
package cache
import (
"sync"
"time"
)
type KeyedCache[T any] struct {
entries map[string]*CacheEntry[T]
mu sync.RWMutex
ttl time.Duration
}
func NewKeyedCache[T any](ttl time.Duration) *KeyedCache[T] {
c := &KeyedCache[T]{
entries: make(map[string]*CacheEntry[T]),
ttl: ttl,
}
gcFuncs = append(gcFuncs, c.GC)
return c
}
func (c *KeyedCache[T]) Set(key string, value T) {
c.SetWithExpirable(key, value, ExpirationTime(time.Now().Add(c.ttl)))
}
func (c *KeyedCache[T]) SetWithTTL(key string, value T, ttl time.Duration) {
c.SetWithExpirable(key, value, ExpirationTime(time.Now().Add(ttl)))
}
func (c *KeyedCache[T]) SetWithExpirable(key string, value T, exp Expirable) {
c.mu.Lock()
defer c.mu.Unlock()
c.entries[key] = &CacheEntry[T]{
data: value,
Expirable: exp,
}
}
func (c *KeyedCache[T]) Get(key string) (T, bool) {
c.mu.RLock()
entry, exists := c.entries[key]
if !exists {
c.mu.RUnlock()
return *new(T), false
}
expired := entry.Expired()
c.mu.RUnlock()
if !expired {
return entry.data, true
}
c.mu.Lock()
if c.entries[key] == entry {
delete(c.entries, key)
c.mu.Unlock()
return *new(T), false
}
c.mu.Unlock()
return *new(T), false
}
func (c *KeyedCache[T]) Delete(key string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.entries, key)
}
func (c *KeyedCache[T]) Take(key string) (T, bool) {
c.mu.Lock()
defer c.mu.Unlock()
if entry, exists := c.entries[key]; exists {
delete(c.entries, key)
return entry.data, true
}
return *new(T), false
}
func (c *KeyedCache[T]) Clear() {
c.mu.Lock()
defer c.mu.Unlock()
c.entries = make(map[string]*CacheEntry[T])
}
func (c *KeyedCache[T]) GC() {
c.mu.Lock()
defer c.mu.Unlock()
expiredKeys := make([]string, 0, len(c.entries))
for key, entry := range c.entries {
if entry.Expired() {
expiredKeys = append(expiredKeys, key)
}
}
for _, key := range expiredKeys {
delete(c.entries, key)
}
}

18
internal/cache/type.go vendored Normal file
View File

@@ -0,0 +1,18 @@
package cache
import "time"
type Expirable interface {
Expired() bool
}
type ExpirationTime time.Time
func (e ExpirationTime) Expired() bool {
return time.Now().After(time.Time(e))
}
type CacheEntry[T any] struct {
Expirable
data T
}

122
internal/cache/typed_cache.go vendored Normal file
View File

@@ -0,0 +1,122 @@
package cache
import (
"sync"
"time"
)
type TypedCache[T any] struct {
entries map[string]map[string]*CacheEntry[T]
mu sync.RWMutex
ttl time.Duration
}
func NewTypedCache[T any](ttl time.Duration) *TypedCache[T] {
c := &TypedCache[T]{
entries: make(map[string]map[string]*CacheEntry[T]),
ttl: ttl,
}
gcFuncs = append(gcFuncs, c.GC)
return c
}
func (c *TypedCache[T]) SetType(key, typeKey string, value T) {
c.SetTypeWithExpirable(key, typeKey, value, ExpirationTime(time.Now().Add(c.ttl)))
}
func (c *TypedCache[T]) SetTypeWithTTL(key, typeKey string, value T, ttl time.Duration) {
c.SetTypeWithExpirable(key, typeKey, value, ExpirationTime(time.Now().Add(ttl)))
}
func (c *TypedCache[T]) SetTypeWithExpirable(key, typeKey string, value T, exp Expirable) {
c.mu.Lock()
defer c.mu.Unlock()
cache, exists := c.entries[key]
if !exists {
cache = make(map[string]*CacheEntry[T])
c.entries[key] = cache
}
cache[typeKey] = &CacheEntry[T]{
data: value,
Expirable: exp,
}
}
// Prefer to use typeKeys for lookup; if none match, use fallbackTypeKey for lookup
func (c *TypedCache[T]) GetType(key, fallbackTypeKey string, typeKeys ...string) (T, bool) {
c.mu.RLock()
cache, exists := c.entries[key]
if !exists {
c.mu.RUnlock()
return *new(T), false
}
entry, exists := cache[fallbackTypeKey]
if len(typeKeys) > 0 {
for _, tk := range typeKeys {
if entry, exists = cache[tk]; exists {
fallbackTypeKey = tk
break
}
}
}
if !exists {
c.mu.RUnlock()
return *new(T), false
}
expired := entry.Expired()
c.mu.RUnlock()
if !expired {
return entry.data, true
}
c.mu.Lock()
if cache[fallbackTypeKey] == entry {
delete(cache, fallbackTypeKey)
if len(cache) == 0 {
delete(c.entries, key)
}
c.mu.Unlock()
return *new(T), false
}
c.mu.Unlock()
return *new(T), false
}
func (c *TypedCache[T]) DeleteKey(key string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.entries, key)
}
func (c *TypedCache[T]) Clear() {
c.mu.Lock()
defer c.mu.Unlock()
c.entries = make(map[string]map[string]*CacheEntry[T])
}
func (c *TypedCache[T]) GC() {
c.mu.Lock()
defer c.mu.Unlock()
expiredKeys := make(map[string][]string)
for tk, entries := range c.entries {
for key, entry := range entries {
if !entry.Expired() {
continue
}
if _, ok := expiredKeys[tk]; !ok {
expiredKeys[tk] = make([]string, 0, len(entries))
}
expiredKeys[tk] = append(expiredKeys[tk], key)
}
}
for tk, keys := range expiredKeys {
for _, key := range keys {
delete(c.entries[tk], key)
}
if len(c.entries[tk]) == 0 {
delete(c.entries, tk)
}
}
}

24
internal/cache/utils.go vendored Normal file
View File

@@ -0,0 +1,24 @@
package cache
import (
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
log "github.com/sirupsen/logrus"
)
var (
cacheGcCron *cron.Cron
gcFuncs []func()
)
func init() {
// TODO Move to bootstrap
cacheGcCron = cron.NewCron(time.Hour)
cacheGcCron.Do(func() {
log.Infof("Start cache GC")
for _, f := range gcFuncs {
f()
}
})
}

View File

@@ -3,8 +3,6 @@ package driver
type Config struct { type Config struct {
Name string `json:"name"` Name string `json:"name"`
LocalSort bool `json:"local_sort"` LocalSort bool `json:"local_sort"`
// if the driver returns Link with MFile, this should be set to true
OnlyLinkMFile bool `json:"only_local"`
OnlyProxy bool `json:"only_proxy"` OnlyProxy bool `json:"only_proxy"`
NoCache bool `json:"no_cache"` NoCache bool `json:"no_cache"`
NoUpload bool `json:"no_upload"` NoUpload bool `json:"no_upload"`
@@ -19,8 +17,12 @@ type Config struct {
ProxyRangeOption bool `json:"-"` ProxyRangeOption bool `json:"-"`
// if the driver returns Link without URL, this should be set to true // if the driver returns Link without URL, this should be set to true
NoLinkURL bool `json:"-"` NoLinkURL bool `json:"-"`
// LinkCacheType=1 add IP to cache key
//
// LinkCacheType=2 add UserAgent to cache key
LinkCacheType uint8 `json:"-"`
} }
func (c Config) MustProxy() bool { func (c Config) MustProxy() bool {
return c.OnlyProxy || c.OnlyLinkMFile || c.NoLinkURL return c.OnlyProxy || c.NoLinkURL
} }

View File

@@ -47,11 +47,6 @@ type Getter interface {
Get(ctx context.Context, path string) (model.Obj, error) Get(ctx context.Context, path string) (model.Obj, error)
} }
type GetObjInfo interface {
// GetObjInfo get file info by path
GetObjInfo(ctx context.Context, path string) (model.Obj, error)
}
//type Writer interface { //type Writer interface {
// Mkdir // Mkdir
// Move // Move

View File

@@ -1,12 +1,11 @@
package errs package errs
import "errors"
func UnwrapOrSelf(err error) error { func UnwrapOrSelf(err error) error {
// errors.Unwrap has no fallback mechanism u, ok := err.(interface {
unwrapped := errors.Unwrap(err) Unwrap() error
if unwrapped == nil { })
if !ok {
return err return err
} }
return unwrapped return u.Unwrap()
} }

View File

@@ -152,7 +152,7 @@ func transfer(ctx context.Context, taskType taskType, srcObjPath, dstDirPath str
if taskType == move { if taskType == move {
task_group.RefreshAndRemove(dstDirPath, task_group.SrcPathToRemove(srcObjPath)) task_group.RefreshAndRemove(dstDirPath, task_group.SrcPathToRemove(srcObjPath))
} else { } else {
op.DeleteCache(t.DstStorage, dstDirActualPath) op.Cache.DeleteDirectory(t.DstStorage, dstDirActualPath)
} }
} }
return nil, err return nil, err
@@ -186,7 +186,7 @@ func (t *FileTransferTask) RunWithNextTaskCallback(f func(nextTask *FileTransfer
dstActualPath := stdpath.Join(t.DstActualPath, srcObj.GetName()) dstActualPath := stdpath.Join(t.DstActualPath, srcObj.GetName())
if t.TaskType == copy { if t.TaskType == copy {
if t.Ctx().Value(conf.NoTaskKey) != nil { if t.Ctx().Value(conf.NoTaskKey) != nil {
defer op.DeleteCache(t.DstStorage, dstActualPath) defer op.Cache.DeleteDirectory(t.DstStorage, dstActualPath)
} else { } else {
task_group.TransferCoordinator.AppendPayload(t.groupID, task_group.DstPathToRefresh(dstActualPath)) task_group.TransferCoordinator.AppendPayload(t.groupID, task_group.DstPathToRefresh(dstActualPath))
} }

View File

@@ -15,7 +15,7 @@ func get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
path = utils.FixAndCleanPath(path) path = utils.FixAndCleanPath(path)
// maybe a virtual file // maybe a virtual file
if path != "/" { if path != "/" {
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails) virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails, false)
for _, f := range virtualFiles { for _, f := range virtualFiles {
if f.GetName() == stdpath.Base(path) { if f.GetName() == stdpath.Base(path) {
return f, nil return f, nil

View File

@@ -15,7 +15,7 @@ import (
func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) { func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
meta, _ := ctx.Value(conf.MetaKey).(*model.Meta) meta, _ := ctx.Value(conf.MetaKey).(*model.Meta)
user, _ := ctx.Value(conf.UserKey).(*model.User) user, _ := ctx.Value(conf.UserKey).(*model.User)
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails) virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails, args.Refresh)
storage, actualPath, err := op.GetStorageAndActualPath(path) storage, actualPath, err := op.GetStorageAndActualPath(path)
if err != nil && len(virtualFiles) == 0 { if err != nil && len(virtualFiles) == 0 {
return nil, errors.WithMessage(err, "failed get storage") return nil, errors.WithMessage(err, "failed get storage")

View File

@@ -28,7 +28,6 @@ type Link struct {
URL string `json:"url"` // most common way URL string `json:"url"` // most common way
Header http.Header `json:"header"` // needed header (for url) Header http.Header `json:"header"` // needed header (for url)
RangeReader RangeReaderIF `json:"-"` // recommended way if can't use URL RangeReader RangeReaderIF `json:"-"` // recommended way if can't use URL
MFile File `json:"-"` // best for local,smb... file system, which exposes MFile
Expiration *time.Duration // local cache expire Duration Expiration *time.Duration // local cache expire Duration
@@ -38,6 +37,8 @@ type Link struct {
ContentLength int64 `json:"-"` // 转码视频、缩略图 ContentLength int64 `json:"-"` // 转码视频、缩略图
utils.SyncClosers `json:"-"` utils.SyncClosers `json:"-"`
// 如果SyncClosers中的资源被关闭后Link将不可用则此值应为 true
RequireReference bool `json:"-"`
} }
type OtherArgs struct { type OtherArgs struct {

View File

@@ -12,6 +12,7 @@ import (
"time" "time"
"github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/rclone/rclone/lib/mmap" "github.com/rclone/rclone/lib/mmap"
@@ -403,7 +404,7 @@ var errInfiniteRetry = errors.New("infinite retry")
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) { func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
resp, err := d.cfg.HttpClient(d.ctx, params) resp, err := d.cfg.HttpClient(d.ctx, params)
if err != nil { if err != nil {
statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError) statusCode, ok := errs.UnwrapOrSelf(err).(HttpStatusCodeError)
if !ok { if !ok {
return 0, err return 0, err
} }

View File

@@ -10,6 +10,7 @@ import (
"time" "time"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool" "github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
"github.com/OpenListTeam/OpenList/v4/internal/cache"
"github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
@@ -17,12 +18,12 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight" "github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/go-cache" gocache "github.com/OpenListTeam/go-cache"
"github.com/pkg/errors" "github.com/pkg/errors"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
var archiveMetaCache = cache.NewMemCache(cache.WithShards[*model.ArchiveMetaProvider](64)) var archiveMetaCache = gocache.NewMemCache(gocache.WithShards[*model.ArchiveMetaProvider](64))
var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider] var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider]
func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) { func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
@@ -37,14 +38,14 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err) return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err)
} }
if m.Expiration != nil { if m.Expiration != nil {
archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](*m.Expiration)) archiveMetaCache.Set(key, m, gocache.WithEx[*model.ArchiveMetaProvider](*m.Expiration))
} }
return m, nil return m, nil
} }
if storage.Config().OnlyLinkMFile { // if storage.Config().NoLinkSingleflight {
meta, err := fn() // meta, err := fn()
return meta, err // return meta, err
} // }
if !args.Refresh { if !args.Refresh {
if meta, ok := archiveMetaCache.Get(key); ok { if meta, ok := archiveMetaCache.Get(key); ok {
log.Debugf("use cache when get %s archive meta", path) log.Debugf("use cache when get %s archive meta", path)
@@ -158,7 +159,7 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
return obj, archiveMetaProvider, err return obj, archiveMetaProvider, err
} }
var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64)) var archiveListCache = gocache.NewMemCache(gocache.WithShards[[]model.Obj](64))
var archiveListG singleflight.Group[[]model.Obj] var archiveListG singleflight.Group[[]model.Obj]
func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) { func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
@@ -199,7 +200,7 @@ func ListArchive(ctx context.Context, storage driver.Driver, path string, args m
if !storage.Config().NoCache { if !storage.Config().NoCache {
if len(files) > 0 { if len(files) > 0 {
log.Debugf("set cache: %s => %+v", key, files) log.Debugf("set cache: %s => %+v", key, files)
archiveListCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) archiveListCache.Set(key, files, gocache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
} else { } else {
log.Debugf("del cache: %s", key) log.Debugf("del cache: %s", key)
archiveListCache.Del(key) archiveListCache.Del(key)
@@ -354,75 +355,50 @@ func ArchiveGet(ctx context.Context, storage driver.Driver, path string, args mo
return nil, nil, errors.WithStack(errs.ObjectNotFound) return nil, nil, errors.WithStack(errs.ObjectNotFound)
} }
type extractLink struct { type objWithLink struct {
*model.Link link *model.Link
Obj model.Obj obj model.Obj
} }
var extractCache = cache.NewMemCache(cache.WithShards[*extractLink](16)) var extractCache = cache.NewKeyedCache[*objWithLink](5 * time.Minute)
var extractG = singleflight.Group[*extractLink]{Remember: true} var extractG = singleflight.Group[*objWithLink]{}
func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) { func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
} }
key := stdpath.Join(Key(storage, path), args.InnerPath) key := stdpath.Join(Key(storage, path), args.InnerPath)
if link, ok := extractCache.Get(key); ok { if ol, ok := extractCache.Get(key); ok {
return link.Link, link.Obj, nil if ol.link.Expiration != nil || ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
return ol.link, ol.obj, nil
}
} }
var forget any fn := func() (*objWithLink, error) {
var linkM *extractLink ol, err := driverExtract(ctx, storage, path, args)
fn := func() (*extractLink, error) {
link, err := driverExtract(ctx, storage, path, args)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed extract archive") return nil, errors.Wrapf(err, "failed extract archive")
} }
if link.MFile != nil && forget != nil { if ol.link.Expiration != nil {
linkM = link extractCache.SetWithTTL(key, ol, *ol.link.Expiration)
return nil, errLinkMFileCache } else {
extractCache.SetWithExpirable(key, ol, &ol.link.SyncClosers)
} }
if link.Link.Expiration != nil { return ol, nil
extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration))
}
link.AddIfCloser(forget)
return link, nil
} }
if storage.Config().OnlyLinkMFile { for {
link, err := fn() ol, err, _ := extractG.Do(key, fn)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
return link.Link, link.Obj, nil if ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
return ol.link, ol.obj, nil
} }
forget = utils.CloseFunc(func() error {
if forget != nil {
forget = nil
linkG.Forget(key)
} }
return nil
})
link, err, _ := extractG.Do(key, fn)
for err == nil && !link.AcquireReference() {
link, err, _ = extractG.Do(key, fn)
}
if err == errLinkMFileCache {
if linkM != nil {
return linkM.Link, linkM.Obj, nil
}
forget = nil
link, err = fn()
}
if err != nil {
return nil, nil, err
}
return link.Link, link.Obj, nil
} }
func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*extractLink, error) { func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*objWithLink, error) {
storageAr, ok := storage.(driver.ArchiveReader) storageAr, ok := storage.(driver.ArchiveReader)
if !ok { if !ok {
return nil, errs.DriverExtractNotSupported return nil, errs.DriverExtractNotSupported
@@ -438,7 +414,7 @@ func driverExtract(ctx context.Context, storage driver.Driver, path string, args
return nil, errors.WithStack(errs.NotFile) return nil, errors.WithStack(errs.NotFile)
} }
link, err := storageAr.Extract(ctx, archiveFile, args) link, err := storageAr.Extract(ctx, archiveFile, args)
return &extractLink{Link: link, Obj: extracted}, err return &objWithLink{link: link, obj: extracted}, err
} }
type streamWithParent struct { type streamWithParent struct {
@@ -500,16 +476,16 @@ func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstD
if err == nil { if err == nil {
if len(newObjs) > 0 { if len(newObjs) > 0 {
for _, newObj := range newObjs { for _, newObj := range newObjs {
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
} }
} else if !utils.IsBool(lazyCache...) { } else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath) Cache.DeleteDirectory(storage, dstDirPath)
} }
} }
case driver.ArchiveDecompress: case driver.ArchiveDecompress:
err = s.ArchiveDecompress(ctx, srcObj, dstDir, args) err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
if err == nil && !utils.IsBool(lazyCache...) { if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath) Cache.DeleteDirectory(storage, dstDirPath)
} }
default: default:
return errs.NotImplement return errs.NotImplement

257
internal/op/cache.go Normal file
View File

@@ -0,0 +1,257 @@
package op
import (
stdpath "path"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/cache"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
)
type CacheManager struct {
dirCache *cache.KeyedCache[*directoryCache] // Cache for directory listings
linkCache *cache.TypedCache[*objWithLink] // Cache for file links
userCache *cache.KeyedCache[*model.User] // Cache for user data
settingCache *cache.KeyedCache[any] // Cache for settings
detailCache *cache.KeyedCache[*model.StorageDetails] // Cache for storage details
}
func NewCacheManager() *CacheManager {
return &CacheManager{
dirCache: cache.NewKeyedCache[*directoryCache](time.Minute * 5),
linkCache: cache.NewTypedCache[*objWithLink](time.Minute * 30),
userCache: cache.NewKeyedCache[*model.User](time.Hour),
settingCache: cache.NewKeyedCache[any](time.Hour),
detailCache: cache.NewKeyedCache[*model.StorageDetails](time.Minute * 30),
}
}
// global instance
var Cache = NewCacheManager()
func Key(storage driver.Driver, path string) string {
return stdpath.Join(storage.GetStorage().MountPath, path)
}
// update object in dirCache.
// if it's a directory, remove all its children from dirCache too.
// if it's a file, remove its link from linkCache.
func (cm *CacheManager) updateDirectoryObject(storage driver.Driver, dirPath string, oldObj model.Obj, newObj model.Obj) {
key := Key(storage, dirPath)
if !oldObj.IsDir() {
cm.linkCache.DeleteKey(stdpath.Join(key, oldObj.GetName()))
cm.linkCache.DeleteKey(stdpath.Join(key, newObj.GetName()))
}
if storage.Config().NoCache {
return
}
if cache, exist := cm.dirCache.Get(key); exist {
if oldObj.IsDir() {
cm.deleteDirectoryTree(stdpath.Join(key, oldObj.GetName()))
}
cache.UpdateObject(oldObj.GetName(), newObj)
}
}
// add new object to dirCache
func (cm *CacheManager) addDirectoryObject(storage driver.Driver, dirPath string, newObj model.Obj) {
if storage.Config().NoCache {
return
}
cache, exist := cm.dirCache.Get(Key(storage, dirPath))
if exist {
cache.UpdateObject(newObj.GetName(), newObj)
}
}
// recursively delete directory and its children from dirCache
func (cm *CacheManager) DeleteDirectoryTree(storage driver.Driver, dirPath string) {
if storage.Config().NoCache {
return
}
cm.deleteDirectoryTree(Key(storage, dirPath))
}
func (cm *CacheManager) deleteDirectoryTree(key string) {
if dirCache, exists := cm.dirCache.Take(key); exists {
for _, obj := range dirCache.objs {
if obj.IsDir() {
cm.deleteDirectoryTree(stdpath.Join(key, obj.GetName()))
}
}
}
}
// remove directory from dirCache
func (cm *CacheManager) DeleteDirectory(storage driver.Driver, dirPath string) {
if storage.Config().NoCache {
return
}
cm.dirCache.Delete(Key(storage, dirPath))
}
// remove object from dirCache.
// if it's a directory, remove all its children from dirCache too.
// if it's a file, remove its link from linkCache.
func (cm *CacheManager) removeDirectoryObject(storage driver.Driver, dirPath string, obj model.Obj) {
key := Key(storage, dirPath)
if !obj.IsDir() {
cm.linkCache.DeleteKey(stdpath.Join(key, obj.GetName()))
}
if storage.Config().NoCache {
return
}
if cache, exist := cm.dirCache.Get(key); exist {
if obj.IsDir() {
cm.deleteDirectoryTree(stdpath.Join(key, obj.GetName()))
}
cache.RemoveObject(obj.GetName())
}
}
// cache user data
func (cm *CacheManager) SetUser(username string, user *model.User) {
cm.userCache.Set(username, user)
}
// cached user data
func (cm *CacheManager) GetUser(username string) (*model.User, bool) {
return cm.userCache.Get(username)
}
// remove user data from cache
func (cm *CacheManager) DeleteUser(username string) {
cm.userCache.Delete(username)
}
// caches setting
func (cm *CacheManager) SetSetting(key string, setting *model.SettingItem) {
cm.settingCache.Set(key, setting)
}
// cached setting
func (cm *CacheManager) GetSetting(key string) (*model.SettingItem, bool) {
if data, exists := cm.settingCache.Get(key); exists {
if setting, ok := data.(*model.SettingItem); ok {
return setting, true
}
}
return nil, false
}
// cache setting groups
func (cm *CacheManager) SetSettingGroup(key string, settings []model.SettingItem) {
cm.settingCache.Set(key, settings)
}
// cached setting group
func (cm *CacheManager) GetSettingGroup(key string) ([]model.SettingItem, bool) {
if data, exists := cm.settingCache.Get(key); exists {
if settings, ok := data.([]model.SettingItem); ok {
return settings, true
}
}
return nil, false
}
func (cm *CacheManager) SetStorageDetails(storage driver.Driver, details *model.StorageDetails) {
if storage.Config().NoCache {
return
}
expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
cm.detailCache.SetWithTTL(storage.GetStorage().MountPath, details, expiration)
}
func (cm *CacheManager) GetStorageDetails(storage driver.Driver) (*model.StorageDetails, bool) {
return cm.detailCache.Get(storage.GetStorage().MountPath)
}
func (cm *CacheManager) InvalidateStorageDetails(storage driver.Driver) {
cm.detailCache.Delete(storage.GetStorage().MountPath)
}
// clears all caches
func (cm *CacheManager) ClearAll() {
cm.dirCache.Clear()
cm.linkCache.Clear()
cm.userCache.Clear()
cm.settingCache.Clear()
cm.detailCache.Clear()
}
type directoryCache struct {
objs []model.Obj
sorted []model.Obj
mu sync.RWMutex
dirtyFlags uint8
}
const (
dirtyRemove uint8 = 1 << iota // 对象删除:刷新 sorted 副本,但不需要 full sort/extract
dirtyUpdate // 对象更新:需要执行 full sort + extract
)
func newDirectoryCache(objs []model.Obj) *directoryCache {
sorted := make([]model.Obj, len(objs))
copy(sorted, objs)
return &directoryCache{
objs: objs,
sorted: sorted,
}
}
func (dc *directoryCache) RemoveObject(name string) {
dc.mu.Lock()
defer dc.mu.Unlock()
for i, obj := range dc.objs {
if obj.GetName() == name {
dc.objs = append(dc.objs[:i], dc.objs[i+1:]...)
dc.dirtyFlags |= dirtyRemove
break
}
}
}
func (dc *directoryCache) UpdateObject(oldName string, newObj model.Obj) {
dc.mu.Lock()
defer dc.mu.Unlock()
if oldName != "" {
for i, obj := range dc.objs {
if obj.GetName() == oldName {
dc.objs[i] = newObj
dc.dirtyFlags |= dirtyUpdate
return
}
}
}
dc.objs = append(dc.objs, newObj)
dc.dirtyFlags |= dirtyUpdate
}
func (dc *directoryCache) GetSortedObjects(meta driver.Meta) []model.Obj {
dc.mu.RLock()
if dc.dirtyFlags == 0 {
dc.mu.RUnlock()
return dc.sorted
}
dc.mu.RUnlock()
dc.mu.Lock()
defer dc.mu.Unlock()
sorted := make([]model.Obj, len(dc.objs))
copy(sorted, dc.objs)
dc.sorted = sorted
if dc.dirtyFlags&dirtyUpdate != 0 {
storage := meta.GetStorage()
if meta.Config().LocalSort {
model.SortFiles(sorted, storage.OrderBy, storage.OrderDirection)
}
model.ExtractFolder(sorted, storage.ExtractFolder)
}
dc.dirtyFlags = 0
return sorted
}

View File

@@ -4,115 +4,20 @@ import (
"context" "context"
stderrors "errors" stderrors "errors"
stdpath "path" stdpath "path"
"slices"
"strings"
"time" "time"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/generic_sync"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight" "github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/go-cache"
"github.com/pkg/errors" "github.com/pkg/errors"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
// In order to facilitate adding some other things before and after file op
var listCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
var listG singleflight.Group[[]model.Obj] var listG singleflight.Group[[]model.Obj]
func updateCacheObj(storage driver.Driver, path string, oldObj model.Obj, newObj model.Obj) {
key := Key(storage, path)
objs, ok := listCache.Get(key)
if ok {
for i, obj := range objs {
if obj.GetName() == newObj.GetName() {
objs = slices.Delete(objs, i, i+1)
break
}
}
for i, obj := range objs {
if obj.GetName() == oldObj.GetName() {
objs[i] = newObj
break
}
}
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
}
}
func delCacheObj(storage driver.Driver, path string, obj model.Obj) {
key := Key(storage, path)
objs, ok := listCache.Get(key)
if ok {
for i, oldObj := range objs {
if oldObj.GetName() == obj.GetName() {
objs = append(objs[:i], objs[i+1:]...)
break
}
}
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
}
}
var addSortDebounceMap generic_sync.MapOf[string, func(func())]
func addCacheObj(storage driver.Driver, path string, newObj model.Obj) {
key := Key(storage, path)
objs, ok := listCache.Get(key)
if ok {
for i, obj := range objs {
if obj.GetName() == newObj.GetName() {
objs[i] = newObj
return
}
}
// Simple separation of files and folders
if len(objs) > 0 && objs[len(objs)-1].IsDir() == newObj.IsDir() {
objs = append(objs, newObj)
} else {
objs = append([]model.Obj{newObj}, objs...)
}
if storage.Config().LocalSort {
debounce, _ := addSortDebounceMap.LoadOrStore(key, utils.NewDebounce(time.Minute))
log.Debug("addCacheObj: wait start sort")
debounce(func() {
log.Debug("addCacheObj: start sort")
model.SortFiles(objs, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
addSortDebounceMap.Delete(key)
})
}
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
}
}
func ClearCache(storage driver.Driver, path string) {
objs, ok := listCache.Get(Key(storage, path))
if ok {
for _, obj := range objs {
if obj.IsDir() {
ClearCache(storage, stdpath.Join(path, obj.GetName()))
}
}
}
listCache.Del(Key(storage, path))
}
func DeleteCache(storage driver.Driver, path string) {
listCache.Del(Key(storage, path))
}
func Key(storage driver.Driver, path string) string {
return stdpath.Join(storage.GetStorage().MountPath, utils.FixAndCleanPath(path))
}
// List files in storage, not contains virtual file // List files in storage, not contains virtual file
func List(ctx context.Context, storage driver.Driver, path string, args model.ListArgs) ([]model.Obj, error) { func List(ctx context.Context, storage driver.Driver, path string, args model.ListArgs) ([]model.Obj, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
@@ -122,11 +27,12 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
log.Debugf("op.List %s", path) log.Debugf("op.List %s", path)
key := Key(storage, path) key := Key(storage, path)
if !args.Refresh { if !args.Refresh {
if files, ok := listCache.Get(key); ok { if dirCache, exists := Cache.dirCache.Get(key); exists {
log.Debugf("use cache when list %s", path) log.Debugf("use cache when list %s", path)
return files, nil return dirCache.GetSortedObjects(storage), nil
} }
} }
dir, err := GetUnwrap(ctx, storage, path) dir, err := GetUnwrap(ctx, storage, path)
if err != nil { if err != nil {
return nil, errors.WithMessage(err, "failed get dir") return nil, errors.WithMessage(err, "failed get dir")
@@ -135,6 +41,7 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
if !dir.IsDir() { if !dir.IsDir() {
return nil, errors.WithStack(errs.NotFolder) return nil, errors.WithStack(errs.NotFolder)
} }
objs, err, _ := listG.Do(key, func() ([]model.Obj, error) { objs, err, _ := listG.Do(key, func() ([]model.Obj, error) {
files, err := storage.List(ctx, dir, args) files, err := storage.List(ctx, dir, args)
if err != nil { if err != nil {
@@ -162,10 +69,11 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
if !storage.Config().NoCache { if !storage.Config().NoCache {
if len(files) > 0 { if len(files) > 0 {
log.Debugf("set cache: %s => %+v", key, files) log.Debugf("set cache: %s => %+v", key, files)
listCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration))) ttl := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
Cache.dirCache.SetWithTTL(key, newDirectoryCache(files), ttl)
} else { } else {
log.Debugf("del cache: %s", key) log.Debugf("del cache: %s", key)
listCache.Del(key) Cache.deleteDirectoryTree(key)
} }
} }
return files, nil return files, nil
@@ -252,100 +160,72 @@ func GetUnwrap(ctx context.Context, storage driver.Driver, path string) (model.O
return model.UnwrapObj(obj), err return model.UnwrapObj(obj), err
} }
var linkCache = cache.NewMemCache(cache.WithShards[*model.Link](16)) var linkG = singleflight.Group[*objWithLink]{}
var linkG = singleflight.Group[*model.Link]{Remember: true}
var errLinkMFileCache = stderrors.New("ErrLinkMFileCache")
// Link get link, if is an url. should have an expiry time // Link get link, if is an url. should have an expiry time
func Link(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (*model.Link, model.Obj, error) { func Link(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (*model.Link, model.Obj, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
} }
var (
file model.Obj typeKey := args.Type
err error var typeKeys []string
) switch storage.Config().LinkCacheType {
// use cache directly case 1:
dir, name := stdpath.Split(stdpath.Join(storage.GetStorage().MountPath, path)) if args.IP != "" {
if cacheFiles, ok := listCache.Get(strings.TrimSuffix(dir, "/")); ok { typeKey += "/" + args.IP
for _, f := range cacheFiles { typeKeys = []string{typeKey}
if f.GetName() == name { }
file = model.UnwrapObj(f) case 2:
break if ua := args.Header.Get("User-Agent"); ua != "" {
typeKey += "/" + ua
typeKeys = []string{typeKey}
} }
} }
} else {
if g, ok := storage.(driver.GetObjInfo); ok { key := Key(storage, path)
file, err = g.GetObjInfo(ctx, path) if ol, exists := Cache.linkCache.GetType(key, args.Type, typeKeys...); exists {
} else { if ol.link.Expiration != nil ||
file, err = GetUnwrap(ctx, storage, path) ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
return ol.link, ol.obj, nil
} }
} }
if file == nil {
fn := func() (*objWithLink, error) {
file, err := GetUnwrap(ctx, storage, path)
if err != nil { if err != nil {
return nil, nil, errors.WithMessage(err, "failed to get file") return nil, errors.WithMessage(err, "failed to get file")
}
return nil, nil, errors.WithStack(errs.ObjectNotFound)
} }
if file.IsDir() { if file.IsDir() {
return nil, nil, errors.WithStack(errs.NotFile) return nil, errors.WithStack(errs.NotFile)
} }
key := stdpath.Join(Key(storage, path), args.Type)
if link, ok := linkCache.Get(key); ok {
return link, file, nil
}
var forget any
var linkM *model.Link
fn := func() (*model.Link, error) {
link, err := storage.Link(ctx, file, args) link, err := storage.Link(ctx, file, args)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed get link") return nil, errors.Wrapf(err, "failed get link")
} }
if link.MFile != nil && forget != nil { ol := &objWithLink{link: link, obj: file}
linkM = link
return nil, errLinkMFileCache
}
if link.Expiration != nil { if link.Expiration != nil {
linkCache.Set(key, link, cache.WithEx[*model.Link](*link.Expiration)) Cache.linkCache.SetTypeWithTTL(key, typeKey, ol, *link.Expiration)
} else {
Cache.linkCache.SetTypeWithExpirable(key, typeKey, ol, &link.SyncClosers)
} }
link.AddIfCloser(forget) return ol, nil
return link, nil
} }
retry := 0
if storage.Config().OnlyLinkMFile { for {
link, err := fn() ol, err, _ := linkG.Do(key+"/"+typeKey, fn)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
return link, file, err if ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
if retry > 1 {
log.Warnf("Link retry successed after %d times: %s %s", retry, key, typeKey)
} }
return ol.link, ol.obj, nil
forget = utils.CloseFunc(func() error {
if forget != nil {
forget = nil
linkG.Forget(key)
} }
return nil retry++
})
link, err, _ := linkG.Do(key, fn)
for err == nil && !link.AcquireReference() {
link, err, _ = linkG.Do(key, fn)
} }
if err == errLinkMFileCache {
if linkM != nil {
return linkM, file, nil
}
forget = nil
link, err = fn()
}
if err != nil {
return nil, nil, err
}
return link, file, nil
} }
// Other api // Other api
@@ -365,7 +245,7 @@ func Other(ctx context.Context, storage driver.Driver, args model.FsOtherArgs) (
} }
} }
var mkdirG singleflight.Group[interface{}] var mkdirG singleflight.Group[any]
func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache ...bool) error { func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache ...bool) error {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
@@ -373,7 +253,7 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache
} }
path = utils.FixAndCleanPath(path) path = utils.FixAndCleanPath(path)
key := Key(storage, path) key := Key(storage, path)
_, err, _ := mkdirG.Do(key, func() (interface{}, error) { _, err, _ := mkdirG.Do(key, func() (any, error) {
// check if dir exists // check if dir exists
f, err := GetUnwrap(ctx, storage, path) f, err := GetUnwrap(ctx, storage, path)
if err != nil { if err != nil {
@@ -395,15 +275,19 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache
newObj, err = s.MakeDir(ctx, parentDir, dirName) newObj, err = s.MakeDir(ctx, parentDir, dirName)
if err == nil { if err == nil {
if newObj != nil { if newObj != nil {
addCacheObj(storage, parentPath, model.WrapObjName(newObj)) if !storage.Config().NoCache {
if dirCache, exist := Cache.dirCache.Get(Key(storage, parentPath)); exist {
dirCache.UpdateObject("", newObj)
}
}
} else if !utils.IsBool(lazyCache...) { } else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, parentPath) Cache.DeleteDirectory(storage, parentPath)
} }
} }
case driver.Mkdir: case driver.Mkdir:
err = s.MakeDir(ctx, parentDir, dirName) err = s.MakeDir(ctx, parentDir, dirName)
if err == nil && !utils.IsBool(lazyCache...) { if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, parentPath) Cache.DeleteDirectory(storage, parentPath)
} }
default: default:
return nil, errs.NotImplement return nil, errs.NotImplement
@@ -427,7 +311,11 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
} }
srcPath = utils.FixAndCleanPath(srcPath) srcPath = utils.FixAndCleanPath(srcPath)
srcDirPath := stdpath.Dir(srcPath)
dstDirPath = utils.FixAndCleanPath(dstDirPath) dstDirPath = utils.FixAndCleanPath(dstDirPath)
if dstDirPath == srcDirPath {
return stderrors.New("move in place")
}
srcRawObj, err := Get(ctx, storage, srcPath) srcRawObj, err := Get(ctx, storage, srcPath)
if err != nil { if err != nil {
return errors.WithMessage(err, "failed to get src object") return errors.WithMessage(err, "failed to get src object")
@@ -437,26 +325,25 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
if err != nil { if err != nil {
return errors.WithMessage(err, "failed to get dst dir") return errors.WithMessage(err, "failed to get dst dir")
} }
srcDirPath := stdpath.Dir(srcPath)
switch s := storage.(type) { switch s := storage.(type) {
case driver.MoveResult: case driver.MoveResult:
var newObj model.Obj var newObj model.Obj
newObj, err = s.Move(ctx, srcObj, dstDir) newObj, err = s.Move(ctx, srcObj, dstDir)
if err == nil { if err == nil {
delCacheObj(storage, srcDirPath, srcRawObj) Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
if newObj != nil { if newObj != nil {
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) { } else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath) Cache.DeleteDirectory(storage, dstDirPath)
} }
} }
case driver.Move: case driver.Move:
err = s.Move(ctx, srcObj, dstDir) err = s.Move(ctx, srcObj, dstDir)
if err == nil { if err == nil {
delCacheObj(storage, srcDirPath, srcRawObj) Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
if !utils.IsBool(lazyCache...) { if !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath) Cache.DeleteDirectory(storage, dstDirPath)
} }
} }
default: default:
@@ -475,28 +362,29 @@ func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string,
return errors.WithMessage(err, "failed to get src object") return errors.WithMessage(err, "failed to get src object")
} }
srcObj := model.UnwrapObj(srcRawObj) srcObj := model.UnwrapObj(srcRawObj)
srcDirPath := stdpath.Dir(srcPath)
switch s := storage.(type) { switch s := storage.(type) {
case driver.RenameResult: case driver.RenameResult:
var newObj model.Obj var newObj model.Obj
newObj, err = s.Rename(ctx, srcObj, dstName) newObj, err = s.Rename(ctx, srcObj, dstName)
if err == nil { if err == nil {
srcDirPath := stdpath.Dir(srcPath)
if newObj != nil { if newObj != nil {
updateCacheObj(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj)) Cache.updateDirectoryObject(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) { } else {
DeleteCache(storage, srcDirPath) Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
if srcRawObj.IsDir() { if !utils.IsBool(lazyCache...) {
ClearCache(storage, srcPath) Cache.DeleteDirectory(storage, srcDirPath)
} }
} }
} }
case driver.Rename: case driver.Rename:
err = s.Rename(ctx, srcObj, dstName) err = s.Rename(ctx, srcObj, dstName)
if err == nil && !utils.IsBool(lazyCache...) { if err == nil {
DeleteCache(storage, srcDirPath) srcDirPath := stdpath.Dir(srcPath)
if srcRawObj.IsDir() { Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
ClearCache(storage, srcPath) if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, srcDirPath)
} }
} }
default: default:
@@ -512,10 +400,14 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
} }
srcPath = utils.FixAndCleanPath(srcPath) srcPath = utils.FixAndCleanPath(srcPath)
dstDirPath = utils.FixAndCleanPath(dstDirPath) dstDirPath = utils.FixAndCleanPath(dstDirPath)
srcObj, err := GetUnwrap(ctx, storage, srcPath) if dstDirPath == stdpath.Dir(srcPath) {
return stderrors.New("copy in place")
}
srcRawObj, err := Get(ctx, storage, srcPath)
if err != nil { if err != nil {
return errors.WithMessage(err, "failed to get src object") return errors.WithMessage(err, "failed to get src object")
} }
srcObj := model.UnwrapObj(srcRawObj)
dstDir, err := GetUnwrap(ctx, storage, dstDirPath) dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
if err != nil { if err != nil {
return errors.WithMessage(err, "failed to get dst dir") return errors.WithMessage(err, "failed to get dst dir")
@@ -527,15 +419,17 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
newObj, err = s.Copy(ctx, srcObj, dstDir) newObj, err = s.Copy(ctx, srcObj, dstDir)
if err == nil { if err == nil {
if newObj != nil { if newObj != nil {
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) { } else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath) Cache.DeleteDirectory(storage, dstDirPath)
} }
} }
case driver.Copy: case driver.Copy:
err = s.Copy(ctx, srcObj, dstDir) err = s.Copy(ctx, srcObj, dstDir)
if err == nil && !utils.IsBool(lazyCache...) { if err == nil {
DeleteCache(storage, dstDirPath) if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
}
} }
default: default:
return errs.NotImplement return errs.NotImplement
@@ -566,11 +460,7 @@ func Remove(ctx context.Context, storage driver.Driver, path string) error {
case driver.Remove: case driver.Remove:
err = s.Remove(ctx, model.UnwrapObj(rawObj)) err = s.Remove(ctx, model.UnwrapObj(rawObj))
if err == nil { if err == nil {
delCacheObj(storage, dirPath, rawObj) Cache.removeDirectoryObject(storage, dirPath, rawObj)
// clear folder cache recursively
if rawObj.IsDir() {
ClearCache(storage, path)
}
} }
default: default:
return errs.NotImplement return errs.NotImplement
@@ -640,16 +530,20 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
var newObj model.Obj var newObj model.Obj
newObj, err = s.Put(ctx, parentDir, file, up) newObj, err = s.Put(ctx, parentDir, file, up)
if err == nil { if err == nil {
Cache.linkCache.DeleteKey(Key(storage, dstPath))
if newObj != nil { if newObj != nil {
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) { } else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath) Cache.DeleteDirectory(storage, dstDirPath)
} }
} }
case driver.Put: case driver.Put:
err = s.Put(ctx, parentDir, file, up) err = s.Put(ctx, parentDir, file, up)
if err == nil && !utils.IsBool(lazyCache...) { if err == nil {
DeleteCache(storage, dstDirPath) Cache.linkCache.DeleteKey(Key(storage, dstPath))
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
}
} }
default: default:
return errs.NotImplement return errs.NotImplement
@@ -664,13 +558,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
} }
} else { } else {
// upload success, remove old obj // upload success, remove old obj
err := Remove(ctx, storage, tempPath) err = Remove(ctx, storage, tempPath)
if err != nil {
return err
} else {
key := Key(storage, stdpath.Join(dstDirPath, file.GetName()))
linkCache.Del(key)
}
} }
} }
return errors.WithStack(err) return errors.WithStack(err)
@@ -681,7 +569,8 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
} }
dstDirPath = utils.FixAndCleanPath(dstDirPath) dstDirPath = utils.FixAndCleanPath(dstDirPath)
_, err := GetUnwrap(ctx, storage, stdpath.Join(dstDirPath, dstName)) dstPath := stdpath.Join(dstDirPath, dstName)
_, err := GetUnwrap(ctx, storage, dstPath)
if err == nil { if err == nil {
return errors.New("obj already exists") return errors.New("obj already exists")
} }
@@ -698,16 +587,20 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
var newObj model.Obj var newObj model.Obj
newObj, err = s.PutURL(ctx, dstDir, dstName, url) newObj, err = s.PutURL(ctx, dstDir, dstName, url)
if err == nil { if err == nil {
Cache.linkCache.DeleteKey(Key(storage, dstPath))
if newObj != nil { if newObj != nil {
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj)) Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) { } else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath) Cache.DeleteDirectory(storage, dstDirPath)
} }
} }
case driver.PutURL: case driver.PutURL:
err = s.PutURL(ctx, dstDir, dstName, url) err = s.PutURL(ctx, dstDir, dstName, url)
if err == nil && !utils.IsBool(lazyCache...) { if err == nil {
DeleteCache(storage, dstDirPath) Cache.linkCache.DeleteKey(Key(storage, dstPath))
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
}
} }
default: default:
return errs.NotImplement return errs.NotImplement

View File

@@ -5,26 +5,21 @@ import (
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/db" "github.com/OpenListTeam/OpenList/v4/internal/db"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight" "github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/go-cache"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
var settingCache = cache.NewMemCache(cache.WithShards[*model.SettingItem](4))
var settingG singleflight.Group[*model.SettingItem] var settingG singleflight.Group[*model.SettingItem]
var settingCacheF = func(item *model.SettingItem) { var settingCacheF = func(item *model.SettingItem) {
settingCache.Set(item.Key, item, cache.WithEx[*model.SettingItem](time.Hour)) Cache.SetSetting(item.Key, item)
} }
var settingGroupCache = cache.NewMemCache(cache.WithShards[[]model.SettingItem](4))
var settingGroupG singleflight.Group[[]model.SettingItem] var settingGroupG singleflight.Group[[]model.SettingItem]
var settingGroupCacheF = func(key string, item []model.SettingItem) { var settingGroupCacheF = func(key string, items []model.SettingItem) {
settingGroupCache.Set(key, item, cache.WithEx[[]model.SettingItem](time.Hour)) Cache.SetSettingGroup(key, items)
} }
var settingChangingCallbacks = make([]func(), 0) var settingChangingCallbacks = make([]func(), 0)
@@ -34,8 +29,7 @@ func RegisterSettingChangingCallback(f func()) {
} }
func SettingCacheUpdate() { func SettingCacheUpdate() {
settingCache.Clear() Cache.ClearAll()
settingGroupCache.Clear()
for _, cb := range settingChangingCallbacks { for _, cb := range settingChangingCallbacks {
cb() cb()
} }
@@ -60,7 +54,7 @@ func GetSettingsMap() map[string]string {
} }
func GetSettingItems() ([]model.SettingItem, error) { func GetSettingItems() ([]model.SettingItem, error) {
if items, ok := settingGroupCache.Get("ALL_SETTING_ITEMS"); ok { if items, exists := Cache.GetSettingGroup("ALL_SETTING_ITEMS"); exists {
return items, nil return items, nil
} }
items, err, _ := settingGroupG.Do("ALL_SETTING_ITEMS", func() ([]model.SettingItem, error) { items, err, _ := settingGroupG.Do("ALL_SETTING_ITEMS", func() ([]model.SettingItem, error) {
@@ -75,7 +69,7 @@ func GetSettingItems() ([]model.SettingItem, error) {
} }
func GetPublicSettingItems() ([]model.SettingItem, error) { func GetPublicSettingItems() ([]model.SettingItem, error) {
if items, ok := settingGroupCache.Get("ALL_PUBLIC_SETTING_ITEMS"); ok { if items, exists := Cache.GetSettingGroup("ALL_PUBLIC_SETTING_ITEMS"); exists {
return items, nil return items, nil
} }
items, err, _ := settingGroupG.Do("ALL_PUBLIC_SETTING_ITEMS", func() ([]model.SettingItem, error) { items, err, _ := settingGroupG.Do("ALL_PUBLIC_SETTING_ITEMS", func() ([]model.SettingItem, error) {
@@ -90,7 +84,7 @@ func GetPublicSettingItems() ([]model.SettingItem, error) {
} }
func GetSettingItemByKey(key string) (*model.SettingItem, error) { func GetSettingItemByKey(key string) (*model.SettingItem, error) {
if item, ok := settingCache.Get(key); ok { if item, exists := Cache.GetSetting(key); exists {
return item, nil return item, nil
} }
@@ -118,8 +112,8 @@ func GetSettingItemInKeys(keys []string) ([]model.SettingItem, error) {
} }
func GetSettingItemsByGroup(group int) ([]model.SettingItem, error) { func GetSettingItemsByGroup(group int) ([]model.SettingItem, error) {
key := strconv.Itoa(group) key := fmt.Sprintf("GROUP_%d", group)
if items, ok := settingGroupCache.Get(key); ok { if items, exists := Cache.GetSettingGroup(key); exists {
return items, nil return items, nil
} }
items, err, _ := settingGroupG.Do(key, func() ([]model.SettingItem, error) { items, err, _ := settingGroupG.Do(key, func() ([]model.SettingItem, error) {
@@ -135,11 +129,14 @@ func GetSettingItemsByGroup(group int) ([]model.SettingItem, error) {
func GetSettingItemsInGroups(groups []int) ([]model.SettingItem, error) { func GetSettingItemsInGroups(groups []int) ([]model.SettingItem, error) {
sort.Ints(groups) sort.Ints(groups)
key := strings.Join(utils.MustSliceConvert(groups, func(i int) string {
return strconv.Itoa(i)
}), ",")
if items, ok := settingGroupCache.Get(key); ok { keyParts := make([]string, 0, len(groups))
for _, g := range groups {
keyParts = append(keyParts, strconv.Itoa(g))
}
key := "GROUPS_" + strings.Join(keyParts, "_")
if items, exists := Cache.GetSettingGroup(key); exists {
return items, nil return items, nil
} }
items, err, _ := settingGroupG.Do(key, func() ([]model.SettingItem, error) { items, err, _ := settingGroupG.Do(key, func() ([]model.SettingItem, error) {

View File

@@ -15,6 +15,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/generic_sync" "github.com/OpenListTeam/OpenList/v4/pkg/generic_sync"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/pkg/errors" "github.com/pkg/errors"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@@ -239,6 +240,8 @@ func UpdateStorage(ctx context.Context, storage model.Storage) error {
if oldStorage.MountPath != storage.MountPath { if oldStorage.MountPath != storage.MountPath {
// mount path renamed, need to drop the storage // mount path renamed, need to drop the storage
storagesMap.Delete(oldStorage.MountPath) storagesMap.Delete(oldStorage.MountPath)
Cache.DeleteDirectoryTree(storageDriver, "/")
Cache.InvalidateStorageDetails(storageDriver)
} }
if err != nil { if err != nil {
return errors.WithMessage(err, "failed get storage driver") return errors.WithMessage(err, "failed get storage driver")
@@ -259,6 +262,7 @@ func DeleteStorageById(ctx context.Context, id uint) error {
if err != nil { if err != nil {
return errors.WithMessage(err, "failed get storage") return errors.WithMessage(err, "failed get storage")
} }
var dropErr error = nil
if !storage.Disabled { if !storage.Disabled {
storageDriver, err := GetStorageByMountPath(storage.MountPath) storageDriver, err := GetStorageByMountPath(storage.MountPath)
if err != nil { if err != nil {
@@ -266,17 +270,19 @@ func DeleteStorageById(ctx context.Context, id uint) error {
} }
// drop the storage in the driver // drop the storage in the driver
if err := storageDriver.Drop(ctx); err != nil { if err := storageDriver.Drop(ctx); err != nil {
return errors.Wrapf(err, "failed drop storage") dropErr = errors.Wrapf(err, "failed drop storage")
} }
// delete the storage in the memory // delete the storage in the memory
storagesMap.Delete(storage.MountPath) storagesMap.Delete(storage.MountPath)
Cache.DeleteDirectoryTree(storageDriver, "/")
Cache.InvalidateStorageDetails(storageDriver)
go callStorageHooks("del", storageDriver) go callStorageHooks("del", storageDriver)
} }
// delete the storage in the database // delete the storage in the database
if err := db.DeleteStorageById(id); err != nil { if err := db.DeleteStorageById(id); err != nil {
return errors.WithMessage(err, "failed delete storage in database") return errors.WithMessage(err, "failed delete storage in database")
} }
return nil return dropErr
} }
// MustSaveDriverStorage call from specific driver // MustSaveDriverStorage call from specific driver
@@ -340,8 +346,8 @@ func GetStorageVirtualFilesByPath(prefix string) []model.Obj {
}) })
} }
func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails ...bool) []model.Obj { func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails, refresh bool) []model.Obj {
if utils.IsBool(hideDetails...) { if hideDetails {
return GetStorageVirtualFilesByPath(prefix) return GetStorageVirtualFilesByPath(prefix)
} }
return getStorageVirtualFilesByPath(prefix, func(d driver.Driver, obj model.Obj) model.Obj { return getStorageVirtualFilesByPath(prefix, func(d driver.Driver, obj model.Obj) model.Obj {
@@ -354,7 +360,7 @@ func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string,
} }
timeoutCtx, cancel := context.WithTimeout(ctx, time.Second) timeoutCtx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel() defer cancel()
details, err := GetStorageDetails(timeoutCtx, d) details, err := GetStorageDetails(timeoutCtx, d, refresh)
if err != nil { if err != nil {
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) { if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err) log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err)
@@ -439,7 +445,9 @@ func GetBalancedStorage(path string) driver.Driver {
} }
} }
func GetStorageDetails(ctx context.Context, storage driver.Driver) (*model.StorageDetails, error) { var detailsG singleflight.Group[*model.StorageDetails]
func GetStorageDetails(ctx context.Context, storage driver.Driver, refresh ...bool) (*model.StorageDetails, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
} }
@@ -447,5 +455,18 @@ func GetStorageDetails(ctx context.Context, storage driver.Driver) (*model.Stora
if !ok { if !ok {
return nil, errs.NotImplement return nil, errs.NotImplement
} }
return wd.GetDetails(ctx) if !utils.IsBool(refresh...) {
if ret, ok := Cache.GetStorageDetails(storage); ok {
return ret, nil
}
}
details, err, _ := detailsG.Do(storage.GetStorage().MountPath, func() (*model.StorageDetails, error) {
ret, err := wd.GetDetails(ctx)
if err != nil {
return nil, err
}
Cache.SetStorageDetails(storage, ret)
return ret, nil
})
return details, err
} }

View File

@@ -1,17 +1,13 @@
package op package op
import ( import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/db" "github.com/OpenListTeam/OpenList/v4/internal/db"
"github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight" "github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/go-cache"
) )
var userCache = cache.NewMemCache(cache.WithShards[*model.User](2))
var userG singleflight.Group[*model.User] var userG singleflight.Group[*model.User]
var guestUser *model.User var guestUser *model.User
var adminUser *model.User var adminUser *model.User
@@ -46,7 +42,7 @@ func GetUserByName(username string) (*model.User, error) {
if username == "" { if username == "" {
return nil, errs.EmptyUsername return nil, errs.EmptyUsername
} }
if user, ok := userCache.Get(username); ok { if user, exists := Cache.GetUser(username); exists {
return user, nil return user, nil
} }
user, err, _ := userG.Do(username, func() (*model.User, error) { user, err, _ := userG.Do(username, func() (*model.User, error) {
@@ -54,7 +50,7 @@ func GetUserByName(username string) (*model.User, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
userCache.Set(username, _user, cache.WithEx[*model.User](time.Hour)) Cache.SetUser(username, _user)
return _user, nil return _user, nil
}) })
return user, err return user, err
@@ -81,7 +77,7 @@ func DeleteUserById(id uint) error {
if old.IsAdmin() || old.IsGuest() { if old.IsAdmin() || old.IsGuest() {
return errs.DeleteAdminOrGuest return errs.DeleteAdminOrGuest
} }
userCache.Del(old.Username) Cache.DeleteUser(old.Username)
return db.DeleteUserById(id) return db.DeleteUserById(id)
} }
@@ -96,7 +92,7 @@ func UpdateUser(u *model.User) error {
if u.IsGuest() { if u.IsGuest() {
guestUser = nil guestUser = nil
} }
userCache.Del(old.Username) Cache.DeleteUser(old.Username)
u.BasePath = utils.FixAndCleanPath(u.BasePath) u.BasePath = utils.FixAndCleanPath(u.BasePath)
return db.UpdateUser(u) return db.UpdateUser(u)
} }
@@ -125,6 +121,6 @@ func DelUserCache(username string) error {
if user.IsGuest() { if user.IsGuest() {
guestUser = nil guestUser = nil
} }
userCache.Del(username) Cache.DeleteUser(username)
return nil return nil
} }

View File

@@ -11,6 +11,7 @@ import (
"os" "os"
"github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/net" "github.com/OpenListTeam/OpenList/v4/internal/net"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range" "github.com/OpenListTeam/OpenList/v4/pkg/http_range"
@@ -27,9 +28,6 @@ func (f RangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Ran
} }
func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) { func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) {
if link.MFile != nil {
return GetRangeReaderFromMFile(size, link.MFile), nil
}
if link.Concurrency > 0 || link.PartSize > 0 { if link.Concurrency > 0 || link.PartSize > 0 {
down := net.NewDownloader(func(d *net.Downloader) { down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency d.Concurrency = link.Concurrency
@@ -66,7 +64,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
} }
if len(link.URL) == 0 { if len(link.URL) == 0 {
return nil, errors.New("invalid link: must have at least one of MFile, URL, or RangeReader") return nil, errors.New("invalid link: must have at least one of URL or RangeReader")
} }
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > size { if httpRange.Length < 0 || httpRange.Start+httpRange.Length > size {
@@ -78,7 +76,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
response, err := net.RequestHttp(ctx, "GET", header, link.URL) response, err := net.RequestHttp(ctx, "GET", header, link.URL)
if err != nil { if err != nil {
if _, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok { if _, ok := errs.UnwrapOrSelf(err).(net.HttpStatusCodeError); ok {
return nil, err return nil, err
} }
return nil, fmt.Errorf("http request failure, err:%w", err) return nil, fmt.Errorf("http request failure, err:%w", err)

View File

@@ -24,16 +24,15 @@ func RefreshAndRemove(dstPath string, payloads ...any) {
return return
} }
_, dstNeedRefresh := dstStorage.(driver.Put) _, dstNeedRefresh := dstStorage.(driver.Put)
dstNeedRefresh = dstNeedRefresh && !dstStorage.Config().NoCache
if dstNeedRefresh { if dstNeedRefresh {
op.DeleteCache(dstStorage, dstActualPath) op.Cache.DeleteDirectory(dstStorage, dstActualPath)
} }
var ctx context.Context var ctx context.Context
for _, payload := range payloads { for _, payload := range payloads {
switch p := payload.(type) { switch p := payload.(type) {
case DstPathToRefresh: case DstPathToRefresh:
if dstNeedRefresh { if dstNeedRefresh {
op.DeleteCache(dstStorage, string(p)) op.Cache.DeleteDirectory(dstStorage, string(p))
} }
case SrcPathToRemove: case SrcPathToRemove:
if ctx == nil { if ctx == nil {
@@ -79,7 +78,7 @@ func verifyAndRemove(ctx context.Context, srcStorage, dstStorage driver.Driver,
} }
if refresh { if refresh {
op.DeleteCache(dstStorage, dstObjPath) op.Cache.DeleteDirectory(dstStorage, dstObjPath)
} }
hasErr := false hasErr := false
for _, obj := range srcObjs { for _, obj := range srcObjs {

View File

@@ -1,8 +1,8 @@
package gowebdav package gowebdav
import ( import (
"fmt"
"os" "os"
"strconv"
) )
// StatusError implements error and wraps // StatusError implements error and wraps
@@ -12,7 +12,7 @@ type StatusError struct {
} }
func (se StatusError) Error() string { func (se StatusError) Error() string {
return fmt.Sprintf("%d", se.Status) return strconv.Itoa(se.Status)
} }
// IsErrCode returns true if the given error // IsErrCode returns true if the given error

View File

@@ -73,9 +73,6 @@ type call[T any] struct {
type Group[T any] struct { type Group[T any] struct {
mu sync.Mutex // protects m mu sync.Mutex // protects m
m map[string]*call[T] // lazily initialized m map[string]*call[T] // lazily initialized
// Won't remember error
Remember bool
} }
// Result holds the results of Do, so they can be passed // Result holds the results of Do, so they can be passed
@@ -159,7 +156,7 @@ func (g *Group[T]) doCall(c *call[T], key string, fn func() (T, error)) {
g.mu.Lock() g.mu.Lock()
defer g.mu.Unlock() defer g.mu.Unlock()
c.wg.Done() c.wg.Done()
if (!g.Remember || c.err != nil) && g.m[key] == c { if g.m[key] == c {
delete(g.m, key) delete(g.m, key)
} }

View File

@@ -187,51 +187,38 @@ func NewClosers(c ...io.Closer) Closers {
return Closers(c) return Closers(c)
} }
type SyncClosersIF interface {
ClosersIF
AcquireReference() bool
}
type SyncClosers struct { type SyncClosers struct {
closers []io.Closer closers []io.Closer
ref int32 ref int32
} }
var _ SyncClosersIF = (*SyncClosers)(nil) // if closed, return false
func (c *SyncClosers) AcquireReference() bool { func (c *SyncClosers) AcquireReference() bool {
ref := atomic.AddInt32(&c.ref, 1)
if ref > 0 {
// log.Debugf("AcquireReference %p: %d", c, ref)
return true
}
atomic.StoreInt32(&c.ref, closersClosed)
return false
}
const closersClosed = math.MinInt32
func (c *SyncClosers) Close() error {
for { for {
ref := atomic.LoadInt32(&c.ref) ref := atomic.LoadInt32(&c.ref)
if ref < 0 { if ref < 0 {
return false return nil
} }
newRef := ref + 1 if ref > 1 {
if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) { if atomic.CompareAndSwapInt32(&c.ref, ref, ref-1) {
// log.Debugf("AcquireReference %p: %d", c, newRef)
return true
}
}
}
const closersClosed = math.MinInt16
func (c *SyncClosers) Close() error {
ref := atomic.AddInt32(&c.ref, -1)
if ref > 0 {
// log.Debugf("ReleaseReference %p: %d", c, ref) // log.Debugf("ReleaseReference %p: %d", c, ref)
return nil return nil
} }
} else if atomic.CompareAndSwapInt32(&c.ref, ref, closersClosed) {
if ref < -1 { break
atomic.StoreInt32(&c.ref, closersClosed)
return nil
} }
// Attempt to acquire FinalClose permission.
// At this point, ref must be 0 or -1. We try to atomically change it to the closersClosed state.
// Only the first successful goroutine gets the cleanup permission.
if !atomic.CompareAndSwapInt32(&c.ref, ref, closersClosed) {
return nil
} }
// log.Debugf("FinalClose %p", c) // log.Debugf("FinalClose %p", c)
@@ -264,6 +251,16 @@ func (c *SyncClosers) AddIfCloser(a any) {
} }
} }
var _ ClosersIF = (*SyncClosers)(nil)
// 实现cache.Expirable接口
func (c *SyncClosers) Expired() bool {
return atomic.LoadInt32(&c.ref) < 0
}
func (c *SyncClosers) Length() int {
return len(c.closers)
}
func NewSyncClosers(c ...io.Closer) SyncClosers { func NewSyncClosers(c ...io.Closer) SyncClosers {
return SyncClosers{closers: c} return SyncClosers{closers: c}
} }

View File

@@ -18,11 +18,11 @@ import (
) )
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error { func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
if link.MFile != nil { // if link.MFile != nil {
attachHeader(w, file, link) // attachHeader(w, file, link)
http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile) // http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile)
return nil // return nil
} // }
if link.Concurrency > 0 || link.PartSize > 0 { if link.Concurrency > 0 || link.PartSize > 0 {
attachHeader(w, file, link) attachHeader(w, file, link)
@@ -101,7 +101,7 @@ func GetEtag(file model.Obj, size int64) string {
} }
func ProxyRange(ctx context.Context, link *model.Link, size int64) *model.Link { func ProxyRange(ctx context.Context, link *model.Link, size int64) *model.Link {
if link.MFile == nil && link.RangeReader == nil && !strings.HasPrefix(link.URL, GetApiUrl(ctx)+"/") { if link.RangeReader == nil && !strings.HasPrefix(link.URL, GetApiUrl(ctx)+"/") {
if link.ContentLength > 0 { if link.ContentLength > 0 {
size = link.ContentLength size = link.ContentLength
} }

View File

@@ -9,6 +9,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs" "github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/net" "github.com/OpenListTeam/OpenList/v4/internal/net"
@@ -147,7 +148,7 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
if Writer.IsWritten() { if Writer.IsWritten() {
log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err) log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err)
} else { } else {
if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok { if statusCode, ok := errs.UnwrapOrSelf(err).(net.HttpStatusCodeError); ok {
common.ErrorPage(c, err, int(statusCode), true) common.ErrorPage(c, err, int(statusCode), true)
} else { } else {
common.ErrorPage(c, err, 500, true) common.ErrorPage(c, err, 500, true)

View File

@@ -386,7 +386,7 @@ func Link(c *gin.Context) {
common.ErrorResp(c, err, 500) common.ErrorResp(c, err, 500)
return return
} }
if storage.Config().NoLinkURL || storage.Config().OnlyLinkMFile { if storage.Config().NoLinkURL {
common.SuccessResp(c, model.Link{ common.SuccessResp(c, model.Link{
URL: fmt.Sprintf("%s/p%s?d&sign=%s", URL: fmt.Sprintf("%s/p%s?d&sign=%s",
common.GetApiUrl(c), common.GetApiUrl(c),

View File

@@ -272,7 +272,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
} }
err = common.Proxy(w, r, link, fi) err = common.Proxy(w, r, link, fi)
if err != nil { if err != nil {
if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok { if statusCode, ok := errs.UnwrapOrSelf(err).(net.HttpStatusCodeError); ok {
return int(statusCode), err return int(statusCode), err
} }
return http.StatusInternalServerError, fmt.Errorf("webdav proxy error: %+v", err) return http.StatusInternalServerError, fmt.Errorf("webdav proxy error: %+v", err)