mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-11-25 19:37:41 +08:00
* feat(cache): improve cache management * feat(disk-usage): add cache * feat(disk-usage): add refresh * fix(disk-usage): cache with ttl * feat(cache): implement KeyedCache and TypedCache for improved caching mechanism * fix(copy): update object retrieval to use Get instead of GetUnwrap * refactor(cache): simplify DirectoryCache structure and improve object management * fix(cache): correct cache entry initialization and key deletion logic in TypedCache * refactor(driver): remove GetObjInfo interface and simplify Link function logic https://github.com/OpenListTeam/OpenList/pull/888/files#r2430925783 * fix(link): optimize link retrieval and caching logic * refactor(cache): consolidate cache management and improve directory cache handling * fix(cache): add cache control based on storage configuration in List function * . * refactor: replace fmt.Sprintf with strconv for integer conversions * refactor(cache): enhance cache entry management with Expirable interface * fix(cache): improve link reference acquisition logic to handle expiration * refactor: replace OnlyLinkMFile with NoLinkSF in driver configurations and logic * refactor(link): enhance link caching logic with dynamic type keys based on IP and User-Agent * feat(drivers): add LinkCacheType to driver configurations for enhanced caching * refactor(cache): streamline directory object management in cache operations * refactor(cache): remove unnecessary 'dirty' field from CacheEntry structure * refactor(cache): replace 'dirty' field with bitwise flags * refactor(io): 调高SyncClosers.AcquireReference的优先级 * refactor(link): 优化链接获取逻辑,增加重 * refactor(link): 添加RequireReference字段以增强链接管理 * refactor(link): 移除MFile字段,改用RangeReader * refactor: 移除不必要的NoLinkSF字段 * refactor(cache): 修改目录缓存的脏标志定义和更新逻辑 * feat(cache): add expiration gc --------- Co-authored-by: KirCute <951206789@qq.com> Co-authored-by: KirCute <kircute@foxmail.com> Co-authored-by: j2rong4cn <j2rong@qq.com>
216 lines
9.0 KiB
Go
216 lines
9.0 KiB
Go
package driver
|
|
|
|
import (
|
|
"context"
|
|
|
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
|
)
|
|
|
|
type Driver interface {
|
|
Meta
|
|
Reader
|
|
// Writer
|
|
// Other
|
|
}
|
|
|
|
type Meta interface {
|
|
Config() Config
|
|
// GetStorage just get raw storage, no need to implement, because model.Storage have implemented
|
|
GetStorage() *model.Storage
|
|
SetStorage(model.Storage)
|
|
// GetAddition Additional is used for unmarshal of JSON, so need return pointer
|
|
GetAddition() Additional
|
|
// Init If already initialized, drop first
|
|
Init(ctx context.Context) error
|
|
Drop(ctx context.Context) error
|
|
}
|
|
|
|
type Other interface {
|
|
Other(ctx context.Context, args model.OtherArgs) (interface{}, error)
|
|
}
|
|
|
|
type Reader interface {
|
|
// List files in the path
|
|
// if identify files by path, need to set ID with path,like path.Join(dir.GetID(), obj.GetName())
|
|
// if identify files by id, need to set ID with corresponding id
|
|
List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error)
|
|
// Link get url/filepath/reader of file
|
|
Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error)
|
|
}
|
|
|
|
type GetRooter interface {
|
|
GetRoot(ctx context.Context) (model.Obj, error)
|
|
}
|
|
|
|
type Getter interface {
|
|
// Get file by path, the path haven't been joined with root path
|
|
Get(ctx context.Context, path string) (model.Obj, error)
|
|
}
|
|
|
|
//type Writer interface {
|
|
// Mkdir
|
|
// Move
|
|
// Rename
|
|
// Copy
|
|
// Remove
|
|
// Put
|
|
//}
|
|
|
|
type Mkdir interface {
|
|
MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error
|
|
}
|
|
|
|
type Move interface {
|
|
Move(ctx context.Context, srcObj, dstDir model.Obj) error
|
|
}
|
|
|
|
type Rename interface {
|
|
Rename(ctx context.Context, srcObj model.Obj, newName string) error
|
|
}
|
|
|
|
type Copy interface {
|
|
Copy(ctx context.Context, srcObj, dstDir model.Obj) error
|
|
}
|
|
|
|
type Remove interface {
|
|
Remove(ctx context.Context, obj model.Obj) error
|
|
}
|
|
|
|
type Put interface {
|
|
// Put a file (provided as a FileStreamer) into the driver
|
|
// Besides the most basic upload functionality, the following features also need to be implemented:
|
|
// 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
|
|
// (1) Use request methods that carry context, such as the following:
|
|
// a. http.NewRequestWithContext
|
|
// b. resty.Request.SetContext
|
|
// c. s3manager.Uploader.UploadWithContext
|
|
// d. utils.CopyWithCtx
|
|
// (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
|
|
// (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
|
|
// this is typically applicable to chunked uploads.
|
|
// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
|
|
// (1) Use `utils.CopyWithCtx`
|
|
// (2) Use `driver.ReaderUpdatingProgress`
|
|
// (3) Use `driver.Progress` with `io.TeeReader`
|
|
// 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream
|
|
// in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and
|
|
// before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN`
|
|
// if your file chunks are sufficiently small (less than about 50KB).
|
|
// NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if
|
|
// you use a `errgroup.Group` to upload each chunk in parallel, you should use `Group.SetLimit` to
|
|
// limit the maximum number of upload threads, preventing excessive memory usage caused by buffering
|
|
// too many file chunks awaiting upload.
|
|
Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) error
|
|
}
|
|
|
|
type PutURL interface {
|
|
// PutURL directly put a URL into the storage
|
|
// Applicable to index-based drivers like URL-Tree or drivers that support uploading files as URLs
|
|
// Called when using SimpleHttp for offline downloading, skipping creating a download task
|
|
PutURL(ctx context.Context, dstDir model.Obj, name, url string) error
|
|
}
|
|
|
|
//type WriteResult interface {
|
|
// MkdirResult
|
|
// MoveResult
|
|
// RenameResult
|
|
// CopyResult
|
|
// PutResult
|
|
// Remove
|
|
//}
|
|
|
|
type MkdirResult interface {
|
|
MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error)
|
|
}
|
|
|
|
type MoveResult interface {
|
|
Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error)
|
|
}
|
|
|
|
type RenameResult interface {
|
|
Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error)
|
|
}
|
|
|
|
type CopyResult interface {
|
|
Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error)
|
|
}
|
|
|
|
type PutResult interface {
|
|
// Put a file (provided as a FileStreamer) into the driver and return the put obj
|
|
// Besides the most basic upload functionality, the following features also need to be implemented:
|
|
// 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
|
|
// (1) Use request methods that carry context, such as the following:
|
|
// a. http.NewRequestWithContext
|
|
// b. resty.Request.SetContext
|
|
// c. s3manager.Uploader.UploadWithContext
|
|
// d. utils.CopyWithCtx
|
|
// (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
|
|
// (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
|
|
// this is typically applicable to chunked uploads.
|
|
// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
|
|
// (1) Use `utils.CopyWithCtx`
|
|
// (2) Use `driver.ReaderUpdatingProgress`
|
|
// (3) Use `driver.Progress` with `io.TeeReader`
|
|
// 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream
|
|
// in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and
|
|
// before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN`
|
|
// if your file chunks are sufficiently small (less than about 50KB).
|
|
// NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if
|
|
// you use a `errgroup.Group` to upload each chunk in parallel, you should use `Group.SetLimit` to
|
|
// limit the maximum number of upload threads, preventing excessive memory usage caused by buffering
|
|
// too many file chunks awaiting upload.
|
|
Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) (model.Obj, error)
|
|
}
|
|
|
|
type PutURLResult interface {
|
|
// PutURL directly put a URL into the storage
|
|
// Applicable to index-based drivers like URL-Tree or drivers that support uploading files as URLs
|
|
// Called when using SimpleHttp for offline downloading, skipping creating a download task
|
|
PutURL(ctx context.Context, dstDir model.Obj, name, url string) (model.Obj, error)
|
|
}
|
|
|
|
type ArchiveReader interface {
|
|
// GetArchiveMeta get the meta-info of an archive
|
|
// return errs.WrongArchivePassword if the meta-info is also encrypted but provided password is wrong or empty
|
|
// return errs.NotImplement to use internal archive tools to get the meta-info, such as the following cases:
|
|
// 1. the driver do not support the format of the archive but there may be an internal tool do
|
|
// 2. handling archives is a VIP feature, but the driver does not have VIP access
|
|
GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error)
|
|
// ListArchive list the children of model.ArchiveArgs.InnerPath in the archive
|
|
// return errs.NotImplement to use internal archive tools to list the children
|
|
// return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree
|
|
ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error)
|
|
// Extract get url/filepath/reader of a file in the archive
|
|
// return errs.NotImplement to use internal archive tools to extract
|
|
Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error)
|
|
}
|
|
|
|
type ArchiveGetter interface {
|
|
// ArchiveGet get file by inner path
|
|
// return errs.NotImplement to use internal archive tools to get the children
|
|
// return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree
|
|
ArchiveGet(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (model.Obj, error)
|
|
}
|
|
|
|
type ArchiveDecompress interface {
|
|
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error
|
|
}
|
|
|
|
type ArchiveDecompressResult interface {
|
|
// ArchiveDecompress decompress an archive
|
|
// when args.PutIntoNewDir, the new sub-folder should be named the same to the archive but without the extension
|
|
// return each decompressed obj from the root path of the archive when args.PutIntoNewDir is false
|
|
// return only the newly created folder when args.PutIntoNewDir is true
|
|
// return errs.NotImplement to use internal archive tools to decompress
|
|
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
|
|
}
|
|
|
|
type WithDetails interface {
|
|
// GetDetails get storage details (total space, free space, etc.)
|
|
GetDetails(ctx context.Context) (*model.StorageDetails, error)
|
|
}
|
|
|
|
type Reference interface {
|
|
InitReference(storage Driver) error
|
|
}
|