Compare commits

..

2 Commits

Author SHA1 Message Date
ShenLin
749fc07cb2 fix(typo): grammar
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: ShenLin <773933146@qq.com>
2025-10-14 09:38:22 +08:00
jyxjjj
18173878c4 feat(preview): support Browser built-in PDF Preview 2025-10-14 09:38:22 +08:00
99 changed files with 573 additions and 3290 deletions

View File

@@ -64,9 +64,8 @@ Thank you for your support and understanding of the OpenList project.
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
- [x] Teambition([China](https://www.teambition.com), [International](https://us.teambition.com))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [Mediatrack](https://www.mediatrack.cn)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [MediaFire](https://www.mediafire.com)
- [x] [139yun](https://yun.139.com) (Personal, Family, Group)
- [x] [YandexDisk](https://disk.yandex.com)
- [x] [BaiduNetdisk](http://pan.baidu.com)

View File

@@ -64,9 +64,8 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
- [x] [又拍云对象存储](https://www.upyun.com/products/file-storage)
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
- [x] Teambition([中国](https://www.teambition.com), [国际](https://us.teambition.com))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [分秒帧](https://www.mediatrack.cn)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [MediaFire](https://www.mediafire.com)
- [x] [和彩云](https://yun.139.com)(个人、家庭、群组)
- [x] [YandexDisk](https://disk.yandex.com)
- [x] [百度网盘](http://pan.baidu.com)

View File

@@ -65,7 +65,6 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
- [x] Teambition([中国](https://www.teambition.com), [国際](https://us.teambition.com))
- [x] [Mediatrack](https://www.mediatrack.cn)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [139yun](https://yun.139.com)(個人、家族、グループ)
- [x] [YandexDisk](https://disk.yandex.com)
- [x] [BaiduNetdisk](http://pan.baidu.com)

View File

@@ -66,7 +66,6 @@ Dank u voor uw ondersteuning en begrip
- [x] Teambition([China](https://www.teambition.com), [Internationaal](https://us.teambition.com))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [Mediatrack](https://www.mediatrack.cn)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [139yun](https://yun.139.com) (Persoonlijk, Familie, Groep)
- [x] [YandexDisk](https://disk.yandex.com)
- [x] [BaiduNetdisk](http://pan.baidu.com)

View File

@@ -2,7 +2,6 @@ package flags
var (
DataDir string
ConfigPath string
Debug bool
NoPrefix bool
Dev bool

View File

@@ -27,8 +27,7 @@ func Execute() {
}
func init() {
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data directory (relative paths are resolved against the current working directory)")
RootCmd.PersistentFlags().StringVar(&flags.ConfigPath, "config", "", "path to config.json (relative to current working directory; defaults to [data directory]/config.json, where [data directory] is set by --data)")
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")

View File

@@ -27,8 +27,6 @@ import (
"github.com/spf13/cobra"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
"github.com/quic-go/quic-go/http3"
)
// ServerCmd represents the server command
@@ -65,7 +63,6 @@ the address is defined in config file`,
httpHandler = h2c.NewHandler(r, &http2.Server{})
}
var httpSrv, httpsSrv, unixSrv *http.Server
var quicSrv *http3.Server
if conf.Conf.Scheme.HttpPort != -1 {
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
fmt.Printf("start HTTP server @ %s\n", httpBase)
@@ -89,24 +86,6 @@ the address is defined in config file`,
utils.Log.Fatalf("failed to start https: %s", err.Error())
}
}()
if conf.Conf.Scheme.EnableH3 {
fmt.Printf("start HTTP3 (quic) server @ %s\n", httpsBase)
utils.Log.Infof("start HTTP3 (quic) server @ %s", httpsBase)
r.Use(func(c *gin.Context) {
if c.Request.TLS != nil {
port := conf.Conf.Scheme.HttpsPort
c.Header("Alt-Svc", fmt.Sprintf("h3=\":%d\"; ma=86400", port))
}
c.Next()
})
quicSrv = &http3.Server{Addr: httpsBase, Handler: r}
go func() {
err := quicSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
if err != nil && !errors.Is(err, http.ErrServerClosed) {
utils.Log.Fatalf("failed to start http3 (quic): %s", err.Error())
}
}()
}
}
if conf.Conf.Scheme.UnixFile != "" {
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
@@ -224,15 +203,6 @@ the address is defined in config file`,
utils.Log.Fatal("HTTPS server shutdown err: ", err)
}
}()
if conf.Conf.Scheme.EnableH3 {
wg.Add(1)
go func() {
defer wg.Done()
if err := quicSrv.Shutdown(ctx); err != nil {
utils.Log.Fatal("HTTP3 (quic) server shutdown err: ", err)
}
}()
}
}
if conf.Conf.Scheme.UnixFile != "" {
wg.Add(1)

View File

@@ -15,9 +15,10 @@ type Addition struct {
}
var config = driver.Config{
Name: "115 Cloud",
DefaultRoot: "0",
LinkCacheMode: driver.LinkCacheUA,
Name: "115 Cloud",
DefaultRoot: "0",
// OnlyProxy: true,
// NoOverwriteUpload: true,
}
func init() {

View File

@@ -131,6 +131,23 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}, nil
}
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.GetFolderInfoByPath(ctx, path)
if err != nil {
return nil, err
}
return &Obj{
Fid: resp.FileID,
Fn: resp.FileName,
Fc: resp.FileCategory,
Sha1: resp.Sha1,
Pc: resp.PickCode,
}, nil
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err

View File

@@ -17,9 +17,8 @@ type Addition struct {
}
var config = driver.Config{
Name: "115 Open",
DefaultRoot: "0",
LinkCacheMode: driver.LinkCacheUA,
Name: "115 Open",
DefaultRoot: "0",
}
func init() {

View File

@@ -130,7 +130,7 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
path := dir.GetPath()
if utils.PathEqual(path, "/") && !d.autoFlatten {
return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough, args.Refresh), nil
return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough), nil
}
root, sub := d.getRootAndPath(path)
dsts, ok := d.pathMap[root]
@@ -211,6 +211,9 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if resultLink.ContentLength == 0 {
resultLink.ContentLength = fi.GetSize()
}
if resultLink.MFile != nil {
return &resultLink, nil
}
if d.DownloadConcurrency > 0 {
resultLink.Concurrency = d.DownloadConcurrency
}
@@ -524,25 +527,4 @@ func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj,
}
}
func (d *Alias) ResolveLinkCacheMode(path string) driver.LinkCacheMode {
root, sub := d.getRootAndPath(path)
dsts, ok := d.pathMap[root]
if !ok {
return 0
}
for _, dst := range dsts {
storage, actualPath, err := op.GetStorageAndActualPath(stdpath.Join(dst, sub))
if err != nil {
continue
}
mode := storage.Config().LinkCacheMode
if mode == -1 {
return storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(actualPath)
} else {
return mode
}
}
return 0
}
var _ driver.Driver = (*Alias)(nil)

View File

@@ -26,7 +26,6 @@ var config = driver.Config{
NoUpload: false,
DefaultRoot: "/",
ProxyRangeOption: true,
LinkCacheMode: driver.LinkCacheAuto,
}
func init() {

View File

@@ -17,7 +17,7 @@ import (
log "github.com/sirupsen/logrus"
)
func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model.Obj {
func (d *Alias) listRoot(ctx context.Context, withDetails bool) []model.Obj {
var objs []model.Obj
var wg sync.WaitGroup
for _, k := range d.rootOrder {
@@ -52,7 +52,7 @@ func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model
defer wg.Done()
c, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
details, e := op.GetStorageDetails(c, remoteDriver, refresh)
details, e := op.GetStorageDetails(c, remoteDriver)
if e != nil {
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", remoteDriver.GetStorage().MountPath, e)

View File

@@ -299,7 +299,10 @@ func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: total - used,
},
}, nil
}

View File

@@ -36,7 +36,6 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_drive"
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_photo"
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud"
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud_open"
_ "github.com/OpenListTeam/OpenList/v4/drivers/ilanzou"
_ "github.com/OpenListTeam/OpenList/v4/drivers/ipfs_api"
_ "github.com/OpenListTeam/OpenList/v4/drivers/kodbox"
@@ -56,7 +55,6 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/proton_drive"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc_tv"

View File

@@ -18,9 +18,8 @@ type Addition struct {
}
var config = driver.Config{
Name: "BaiduPhoto",
LocalSort: true,
LinkCacheMode: driver.LinkCacheUA,
Name: "BaiduPhoto",
LocalSort: true,
}
func init() {

View File

@@ -10,7 +10,6 @@ import (
"mime/multipart"
"net/http"
"net/url"
"strconv"
"strings"
"time"
@@ -240,7 +239,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
if err != nil {
return err
}
err = writer.WriteField("puid", strconv.Itoa(resp.Msg.Puid))
err = writer.WriteField("puid", fmt.Sprintf("%d", resp.Msg.Puid))
if err != nil {
fmt.Println("Error writing param2 to request body:", err)
return err
@@ -261,7 +260,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
return err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
resps, err := http.DefaultClient.Do(req)
if err != nil {
return err

View File

@@ -258,7 +258,7 @@ type UploadDoneParam struct {
func fileToObj(f File) *model.Object {
if len(f.Content.FolderName) > 0 {
return &model.Object{
ID: strconv.Itoa(f.ID),
ID: fmt.Sprintf("%d", f.ID),
Name: f.Content.FolderName,
Size: 0,
Modified: time.UnixMilli(f.Inserttime),

View File

@@ -9,7 +9,6 @@ import (
"fmt"
"mime/multipart"
"net/http"
"strconv"
"strings"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
@@ -173,7 +172,7 @@ func (d *ChaoXing) Login() (string, error) {
return "", err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err

View File

@@ -317,8 +317,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
}
return readSeeker, nil
}),
SyncClosers: utils.NewSyncClosers(remoteLink),
RequireReference: remoteLink.RequireReference,
SyncClosers: utils.NewSyncClosers(remoteLink),
}, nil
}

View File

@@ -486,7 +486,7 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {strconv.FormatInt(file.GetSize(), 10)},
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
@@ -612,7 +612,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {strconv.FormatInt(size, 10)},
"Content-Length": {fmt.Sprintf("%d", size)},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)

View File

@@ -16,10 +16,9 @@ type Addition struct {
}
var config = driver.Config{
Name: "FebBox",
NoUpload: true,
DefaultRoot: "0",
LinkCacheMode: driver.LinkCacheIP,
Name: "FebBox",
NoUpload: true,
DefaultRoot: "0",
}
func init() {

View File

@@ -31,11 +31,11 @@ type Addition struct {
}
var config = driver.Config{
Name: "FTP",
LocalSort: true,
OnlyProxy: true,
DefaultRoot: "/",
NoLinkURL: true,
Name: "FTP",
LocalSort: true,
OnlyLinkMFile: false,
DefaultRoot: "/",
NoLinkURL: true,
}
func init() {

View File

@@ -1,111 +0,0 @@
package halalcloudopen
import (
"sync"
"time"
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
)
var (
slicePostErrorRetryInterval = time.Second * 120
retryTimes = 5
)
type halalCommon struct {
// *AuthService // 登录信息
UserInfo *sdkUser.User // 用户信息
refreshTokenFunc func(token string) error
// serv *AuthService
configs sync.Map
}
func (m *halalCommon) GetAccessToken() (string, error) {
value, exists := m.configs.Load("access_token")
if !exists {
return "", nil // 如果不存在,返回空字符串
}
return value.(string), nil // 返回配置项的值
}
// GetRefreshToken implements ConfigStore.
func (m *halalCommon) GetRefreshToken() (string, error) {
value, exists := m.configs.Load("refresh_token")
if !exists {
return "", nil // 如果不存在,返回空字符串
}
return value.(string), nil // 返回配置项的值
}
// SetAccessToken implements ConfigStore.
func (m *halalCommon) SetAccessToken(token string) error {
m.configs.Store("access_token", token)
return nil
}
// SetRefreshToken implements ConfigStore.
func (m *halalCommon) SetRefreshToken(token string) error {
m.configs.Store("refresh_token", token)
if m.refreshTokenFunc != nil {
return m.refreshTokenFunc(token)
}
return nil
}
// SetToken implements ConfigStore.
func (m *halalCommon) SetToken(accessToken string, refreshToken string, expiresIn int64) error {
m.configs.Store("access_token", accessToken)
m.configs.Store("refresh_token", refreshToken)
m.configs.Store("expires_in", expiresIn)
if m.refreshTokenFunc != nil {
return m.refreshTokenFunc(refreshToken)
}
return nil
}
// ClearConfigs implements ConfigStore.
func (m *halalCommon) ClearConfigs() error {
m.configs = sync.Map{} // 清空map
return nil
}
// DeleteConfig implements ConfigStore.
func (m *halalCommon) DeleteConfig(key string) error {
_, exists := m.configs.Load(key)
if !exists {
return nil // 如果不存在,直接返回
}
m.configs.Delete(key) // 删除指定的配置项
return nil
}
// GetConfig implements ConfigStore.
func (m *halalCommon) GetConfig(key string) (string, error) {
value, exists := m.configs.Load(key)
if !exists {
return "", nil // 如果不存在,返回空字符串
}
return value.(string), nil // 返回配置项的值
}
// ListConfigs implements ConfigStore.
func (m *halalCommon) ListConfigs() (map[string]string, error) {
configs := make(map[string]string)
m.configs.Range(func(key, value interface{}) bool {
configs[key.(string)] = value.(string) // 将每个配置项添加到map中
return true // 继续遍历
})
return configs, nil // 返回所有配置项
}
// SetConfig implements ConfigStore.
func (m *halalCommon) SetConfig(key string, value string) error {
m.configs.Store(key, value) // 使用Store方法设置或更新配置项
return nil // 成功设置配置项后返回nil
}
func NewHalalCommon() *halalCommon {
return &halalCommon{
configs: sync.Map{},
}
}

View File

@@ -1,29 +0,0 @@
package halalcloudopen
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
sdkClient "github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
)
type HalalCloudOpen struct {
*halalCommon
model.Storage
Addition
sdkClient *sdkClient.Client
sdkUserFileService *sdkUserFile.UserFileService
sdkUserService *sdkUser.UserService
uploadThread int
}
func (d *HalalCloudOpen) Config() driver.Config {
return config
}
func (d *HalalCloudOpen) GetAddition() driver.Additional {
return &d.Addition
}
var _ driver.Driver = (*HalalCloudOpen)(nil)

View File

@@ -1,131 +0,0 @@
package halalcloudopen
import (
"context"
"strconv"
"github.com/OpenListTeam/OpenList/v4/internal/model"
sdkModel "github.com/halalcloud/golang-sdk-lite/halalcloud/model"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
)
func (d *HalalCloudOpen) getFiles(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
files := make([]model.Obj, 0)
limit := int64(100)
token := ""
for {
result, err := d.sdkUserFileService.List(ctx, &sdkUserFile.FileListRequest{
Parent: &sdkUserFile.File{Path: dir.GetPath()},
ListInfo: &sdkModel.ScanListRequest{
Limit: strconv.FormatInt(limit, 10),
Token: token,
},
})
if err != nil {
return nil, err
}
for i := 0; len(result.Files) > i; i++ {
files = append(files, NewObjFile(result.Files[i]))
}
if result.ListInfo == nil || result.ListInfo.Token == "" {
break
}
token = result.ListInfo.Token
}
return files, nil
}
func (d *HalalCloudOpen) makeDir(ctx context.Context, dir model.Obj, name string) (model.Obj, error) {
_, err := d.sdkUserFileService.Create(ctx, &sdkUserFile.File{
Path: dir.GetPath(),
Name: name,
})
return nil, err
}
func (d *HalalCloudOpen) move(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
oldDir := obj.GetPath()
newDir := dir.GetPath()
_, err := d.sdkUserFileService.Move(ctx, &sdkUserFile.BatchOperationRequest{
Source: []*sdkUserFile.File{
{
Path: oldDir,
},
},
Dest: &sdkUserFile.File{
Path: newDir,
},
})
return nil, err
}
func (d *HalalCloudOpen) rename(ctx context.Context, obj model.Obj, name string) (model.Obj, error) {
_, err := d.sdkUserFileService.Rename(ctx, &sdkUserFile.File{
Path: obj.GetPath(),
Name: name,
})
return nil, err
}
func (d *HalalCloudOpen) copy(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
id := obj.GetID()
sourcePath := obj.GetPath()
if len(id) > 0 {
sourcePath = ""
}
destID := dir.GetID()
destPath := dir.GetPath()
if len(destID) > 0 {
destPath = ""
}
dest := &sdkUserFile.File{
Path: destPath,
Identity: destID,
}
_, err := d.sdkUserFileService.Copy(ctx, &sdkUserFile.BatchOperationRequest{
Source: []*sdkUserFile.File{
{
Path: sourcePath,
Identity: id,
},
},
Dest: dest,
})
return nil, err
}
func (d *HalalCloudOpen) remove(ctx context.Context, obj model.Obj) error {
id := obj.GetID()
_, err := d.sdkUserFileService.Delete(ctx, &sdkUserFile.BatchOperationRequest{
Source: []*sdkUserFile.File{
{
Identity: id,
Path: obj.GetPath(),
},
},
})
return err
}
func (d *HalalCloudOpen) details(ctx context.Context) (*model.StorageDetails, error) {
ret, err := d.sdkUserService.GetStatisticsAndQuota(ctx)
if err != nil {
return nil, err
}
total := uint64(ret.DiskStatisticsQuota.BytesQuota)
free := uint64(ret.DiskStatisticsQuota.BytesFree)
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
}

View File

@@ -1,108 +0,0 @@
package halalcloudopen
import (
"context"
"crypto/sha1"
"io"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
"github.com/rclone/rclone/lib/readers"
)
func (d *HalalCloudOpen) getLink(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if args.Redirect {
// return nil, model.ErrUnsupported
fid := file.GetID()
fpath := file.GetPath()
if fid != "" {
fpath = ""
}
fi, err := d.sdkUserFileService.GetDirectDownloadAddress(ctx, &sdkUserFile.DirectDownloadRequest{
Identity: fid,
Path: fpath,
})
if err != nil {
return nil, err
}
expireAt := fi.ExpireAt
duration := time.Until(time.UnixMilli(expireAt))
return &model.Link{
URL: fi.DownloadAddress,
Expiration: &duration,
}, nil
}
result, err := d.sdkUserFileService.ParseFileSlice(ctx, &sdkUserFile.File{
Identity: file.GetID(),
Path: file.GetPath(),
})
if err != nil {
return nil, err
}
fileAddrs := []*sdkUserFile.SliceDownloadInfo{}
var addressDuration int64
nodesNumber := len(result.RawNodes)
nodesIndex := nodesNumber - 1
startIndex, endIndex := 0, nodesIndex
for nodesIndex >= 0 {
if nodesIndex >= 200 {
endIndex = 200
} else {
endIndex = nodesNumber
}
for ; endIndex <= nodesNumber; endIndex += 200 {
if endIndex == 0 {
endIndex = 1
}
sliceAddress, err := d.sdkUserFileService.GetSliceDownloadAddress(ctx, &sdkUserFile.SliceDownloadAddressRequest{
Identity: result.RawNodes[startIndex:endIndex],
Version: 1,
})
if err != nil {
return nil, err
}
addressDuration, _ = strconv.ParseInt(sliceAddress.ExpireAt, 10, 64)
fileAddrs = append(fileAddrs, sliceAddress.Addresses...)
startIndex = endIndex
nodesIndex -= 200
}
}
size, _ := strconv.ParseInt(result.FileSize, 10, 64)
chunks := getChunkSizes(result.Sizes)
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if httpRange.Length < 0 || httpRange.Start+httpRange.Length >= size {
length = size - httpRange.Start
}
oo := &openObject{
ctx: ctx,
d: fileAddrs,
chunk: []byte{},
chunks: chunks,
skip: httpRange.Start,
sha: result.Sha1,
shaTemp: sha1.New(),
}
return readers.NewLimitedReadCloser(oo, length), nil
}
var duration time.Duration
if addressDuration != 0 {
duration = time.Until(time.UnixMilli(addressDuration))
} else {
duration = time.Until(time.Now().Add(time.Hour))
}
return &model.Link{
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
Expiration: &duration,
}, nil
}

View File

@@ -1,50 +0,0 @@
package halalcloudopen
import (
"context"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
)
func (d *HalalCloudOpen) Init(ctx context.Context) error {
if d.uploadThread < 1 || d.uploadThread > 32 {
d.uploadThread, d.UploadThread = 3, 3
}
if d.halalCommon == nil {
d.halalCommon = &halalCommon{
UserInfo: &sdkUser.User{},
refreshTokenFunc: func(token string) error {
d.Addition.RefreshToken = token
op.MustSaveDriverStorage(d)
return nil
},
}
}
if d.Addition.RefreshToken != "" {
d.halalCommon.SetRefreshToken(d.Addition.RefreshToken)
}
timeout := d.Addition.TimeOut
if timeout <= 0 {
timeout = 60
}
host := d.Addition.Host
if host == "" {
host = "openapi.2dland.cn"
}
client := apiclient.NewClient(nil, host, d.Addition.ClientID, d.Addition.ClientSecret, d.halalCommon, apiclient.WithTimeout(time.Second*time.Duration(timeout)))
d.sdkClient = client
d.sdkUserFileService = sdkUserFile.NewUserFileService(client)
d.sdkUserService = sdkUser.NewUserService(client)
userInfo, err := d.sdkUserService.Get(ctx, &sdkUser.User{})
if err != nil {
return err
}
d.halalCommon.UserInfo = userInfo
// 能够获取到用户信息,已经检查了 RefreshToken 的有效性,无需再次检查
return nil
}

View File

@@ -1,48 +0,0 @@
package halalcloudopen
import (
"context"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
)
func (d *HalalCloudOpen) Drop(ctx context.Context) error {
return nil
}
func (d *HalalCloudOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
return d.getFiles(ctx, dir)
}
func (d *HalalCloudOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
return d.getLink(ctx, file, args)
}
func (d *HalalCloudOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
return d.makeDir(ctx, parentDir, dirName)
}
func (d *HalalCloudOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return d.move(ctx, srcObj, dstDir)
}
func (d *HalalCloudOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
return d.rename(ctx, srcObj, newName)
}
func (d *HalalCloudOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return d.copy(ctx, srcObj, dstDir)
}
func (d *HalalCloudOpen) Remove(ctx context.Context, obj model.Obj) error {
return d.remove(ctx, obj)
}
func (d *HalalCloudOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
return d.put(ctx, dstDir, stream, up)
}
func (d *HalalCloudOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
return d.details(ctx)
}

View File

@@ -1,258 +0,0 @@
package halalcloudopen
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
"github.com/ipfs/go-cid"
)
func (d *HalalCloudOpen) put(ctx context.Context, dstDir model.Obj, fileStream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
newPath := path.Join(dstDir.GetPath(), fileStream.GetName())
uploadTask, err := d.sdkUserFileService.CreateUploadTask(ctx, &sdkUserFile.File{
Path: newPath,
Size: fileStream.GetSize(),
})
if err != nil {
return nil, err
}
if uploadTask.Created {
return nil, nil
}
slicesList := make([]string, 0)
codec := uint64(0x55)
if uploadTask.BlockCodec > 0 {
codec = uint64(uploadTask.BlockCodec)
}
blockHashType := uploadTask.BlockHashType
mhType := uint64(0x12)
if blockHashType > 0 {
mhType = uint64(blockHashType)
}
prefix := cid.Prefix{
Codec: codec,
MhLength: -1,
MhType: mhType,
Version: 1,
}
blockSize := uploadTask.BlockSize
useSingleUpload := true
//
if fileStream.GetSize() <= int64(blockSize) || d.uploadThread <= 1 {
useSingleUpload = true
}
// Not sure whether FileStream supports concurrent read and write operations, so currently using single-threaded upload to ensure safety.
// read file
if useSingleUpload {
bufferSize := int(blockSize)
buffer := make([]byte, bufferSize)
reader := driver.NewLimitedUploadStream(ctx, fileStream)
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
// fileStream.Seek(0, os.SEEK_SET)
for {
n, err := teeReader.Read(buffer)
if n > 0 {
data := buffer[:n]
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
if err != nil {
return nil, err
}
slicesList = append(slicesList, uploadCid.String())
}
if err == io.EOF || n == 0 {
break
}
}
} else {
// TODO: implement multipart upload, currently using single-threaded upload to ensure safety.
bufferSize := int(blockSize)
buffer := make([]byte, bufferSize)
reader := driver.NewLimitedUploadStream(ctx, fileStream)
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
for {
n, err := teeReader.Read(buffer)
if n > 0 {
data := buffer[:n]
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
if err != nil {
return nil, err
}
slicesList = append(slicesList, uploadCid.String())
}
if err == io.EOF || n == 0 {
break
}
}
}
newFile, err := makeFile(ctx, slicesList, uploadTask.Task, uploadTask.UploadAddress, retryTimes)
if err != nil {
return nil, err
}
return NewObjFile(newFile), nil
}
func makeFile(ctx context.Context, fileSlice []string, taskID string, uploadAddress string, retry int) (*sdkUserFile.File, error) {
var lastError error = nil
for range retry {
newFile, err := doMakeFile(fileSlice, taskID, uploadAddress)
if err == nil {
return newFile, nil
}
if ctx.Err() != nil {
return nil, err
}
if strings.Contains(err.Error(), "not found") {
return nil, err
}
lastError = err
time.Sleep(slicePostErrorRetryInterval)
}
return nil, fmt.Errorf("mk file slice failed after %d times, error: %s", retry, lastError.Error())
}
func doMakeFile(fileSlice []string, taskID string, uploadAddress string) (*sdkUserFile.File, error) {
accessUrl := uploadAddress + "/" + taskID
getTimeOut := time.Minute * 2
u, err := url.Parse(accessUrl)
if err != nil {
return nil, err
}
n, _ := json.Marshal(fileSlice)
httpRequest := http.Request{
Method: http.MethodPost,
URL: u,
Header: map[string][]string{
"Accept": {"application/json"},
"Content-Type": {"application/json"},
//"Content-Length": {strconv.Itoa(len(n))},
},
Body: io.NopCloser(bytes.NewReader(n)),
}
httpClient := http.Client{
Timeout: getTimeOut,
}
httpResponse, err := httpClient.Do(&httpRequest)
if err != nil {
return nil, err
}
defer httpResponse.Body.Close()
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
b, _ := io.ReadAll(httpResponse.Body)
message := string(b)
return nil, fmt.Errorf("mk file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
}
b, _ := io.ReadAll(httpResponse.Body)
var result *sdkUserFile.File
err = json.Unmarshal(b, &result)
if err != nil {
return nil, err
}
return result, nil
}
func postFileSlice(ctx context.Context, fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix, retry int) (cid.Cid, error) {
var lastError error = nil
for range retry {
newCid, err := doPostFileSlice(fileSlice, taskID, uploadAddress, preix)
if err == nil {
return newCid, nil
}
if ctx.Err() != nil {
return cid.Undef, err
}
time.Sleep(slicePostErrorRetryInterval)
lastError = err
}
return cid.Undef, fmt.Errorf("upload file slice failed after %d times, error: %s", retry, lastError.Error())
}
func doPostFileSlice(fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix) (cid.Cid, error) {
// 1. sum file slice
newCid, err := preix.Sum(fileSlice)
if err != nil {
return cid.Undef, err
}
// 2. post file slice
sliceCidString := newCid.String()
// /{taskID}/{sliceID}
accessUrl := uploadAddress + "/" + taskID + "/" + sliceCidString
getTimeOut := time.Second * 30
// get {accessUrl} in {getTimeOut}
u, err := url.Parse(accessUrl)
if err != nil {
return cid.Undef, err
}
// header: accept: application/json
// header: content-type: application/octet-stream
// header: content-length: {fileSlice.length}
// header: x-content-cid: {sliceCidString}
// header: x-task-id: {taskID}
httpRequest := http.Request{
Method: http.MethodGet,
URL: u,
Header: map[string][]string{
"Accept": {"application/json"},
},
}
httpClient := http.Client{
Timeout: getTimeOut,
}
httpResponse, err := httpClient.Do(&httpRequest)
if err != nil {
return cid.Undef, err
}
if httpResponse.StatusCode != http.StatusOK {
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d", httpResponse.StatusCode)
}
var result bool
b, err := io.ReadAll(httpResponse.Body)
if err != nil {
return cid.Undef, err
}
err = json.Unmarshal(b, &result)
if err != nil {
return cid.Undef, err
}
if result {
return newCid, nil
}
httpRequest = http.Request{
Method: http.MethodPost,
URL: u,
Header: map[string][]string{
"Accept": {"application/json"},
"Content-Type": {"application/octet-stream"},
// "Content-Length": {strconv.Itoa(len(fileSlice))},
},
Body: io.NopCloser(bytes.NewReader(fileSlice)),
}
httpResponse, err = httpClient.Do(&httpRequest)
if err != nil {
return cid.Undef, err
}
defer httpResponse.Body.Close()
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
b, _ := io.ReadAll(httpResponse.Body)
message := string(b)
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
}
//
return newCid, nil
}

View File

@@ -1,32 +0,0 @@
package halalcloudopen
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
// Usually one of two
driver.RootPath
// define other
RefreshToken string `json:"refresh_token" required:"false" help:"If using a personal API approach, the RefreshToken is not required."`
UploadThread int `json:"upload_thread" type:"number" default:"3" help:"1 <= thread <= 32"`
ClientID string `json:"client_id" required:"true" default:""`
ClientSecret string `json:"client_secret" required:"true" default:""`
Host string `json:"host" required:"false" default:"openapi.2dland.cn"`
TimeOut int `json:"timeout" type:"number" default:"60" help:"timeout in seconds"`
}
var config = driver.Config{
Name: "HalalCloudOpen",
OnlyProxy: false,
DefaultRoot: "/",
NoLinkURL: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &HalalCloudOpen{}
})
}

View File

@@ -1,60 +0,0 @@
package halalcloudopen
import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
)
type ObjFile struct {
sdkFile *sdkUserFile.File
fileSize int64
modTime time.Time
createTime time.Time
}
func NewObjFile(f *sdkUserFile.File) model.Obj {
ofile := &ObjFile{sdkFile: f}
ofile.fileSize = f.Size
modTimeTs := f.UpdateTs
ofile.modTime = time.UnixMilli(modTimeTs)
createTimeTs := f.CreateTs
ofile.createTime = time.UnixMilli(createTimeTs)
return ofile
}
func (f *ObjFile) GetSize() int64 {
return f.fileSize
}
func (f *ObjFile) GetName() string {
return f.sdkFile.Name
}
func (f *ObjFile) ModTime() time.Time {
return f.modTime
}
func (f *ObjFile) IsDir() bool {
return f.sdkFile.Dir
}
func (f *ObjFile) GetHash() utils.HashInfo {
return utils.HashInfo{
// TODO: support more hash types
}
}
func (f *ObjFile) GetID() string {
return f.sdkFile.Identity
}
func (f *ObjFile) GetPath() string {
return f.sdkFile.Path
}
func (f *ObjFile) CreateTime() time.Time {
return f.createTime
}

View File

@@ -1,185 +0,0 @@
package halalcloudopen
import (
"context"
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"net/http"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
"github.com/ipfs/go-cid"
)
// get the next chunk
func (oo *openObject) getChunk(_ context.Context) (err error) {
if oo.id >= len(oo.chunks) {
return io.EOF
}
var chunk []byte
err = utils.Retry(3, time.Second, func() (err error) {
chunk, err = getRawFiles(oo.d[oo.id])
return err
})
if err != nil {
return err
}
oo.id++
oo.chunk = chunk
return nil
}
// Read reads up to len(p) bytes into p.
func (oo *openObject) Read(p []byte) (n int, err error) {
oo.mu.Lock()
defer oo.mu.Unlock()
if oo.closed {
return 0, fmt.Errorf("read on closed file")
}
// Skip data at the start if requested
for oo.skip > 0 {
//size := 1024 * 1024
_, size, err := oo.ChunkLocation(oo.id)
if err != nil {
return 0, err
}
if oo.skip < int64(size) {
break
}
oo.id++
oo.skip -= int64(size)
}
if len(oo.chunk) == 0 {
err = oo.getChunk(oo.ctx)
if err != nil {
return 0, err
}
if oo.skip > 0 {
oo.chunk = (oo.chunk)[oo.skip:]
oo.skip = 0
}
}
n = copy(p, oo.chunk)
oo.shaTemp.Write(p[:n])
oo.chunk = (oo.chunk)[n:]
return n, nil
}
// Close closed the file - MAC errors are reported here
func (oo *openObject) Close() (err error) {
oo.mu.Lock()
defer oo.mu.Unlock()
if oo.closed {
return nil
}
// 校验Sha1
if string(oo.shaTemp.Sum(nil)) != oo.sha {
return fmt.Errorf("failed to finish download: SHA mismatch")
}
oo.closed = true
return nil
}
func GetMD5Hash(text string) string {
tHash := md5.Sum([]byte(text))
return hex.EncodeToString(tHash[:])
}
type chunkSize struct {
position int64
size int
}
type openObject struct {
ctx context.Context
mu sync.Mutex
d []*sdkUserFile.SliceDownloadInfo
id int
skip int64
chunk []byte
chunks []chunkSize
closed bool
sha string
shaTemp hash.Hash
}
func getChunkSizes(sliceSize []*sdkUserFile.SliceSize) (chunks []chunkSize) {
chunks = make([]chunkSize, 0)
for _, s := range sliceSize {
// 对最后一个做特殊处理
endIndex := s.EndIndex
startIndex := s.StartIndex
if endIndex == 0 {
endIndex = startIndex
}
for j := startIndex; j <= endIndex; j++ {
size := s.Size
chunks = append(chunks, chunkSize{position: j, size: int(size)})
}
}
return chunks
}
func (oo *openObject) ChunkLocation(id int) (position int64, size int, err error) {
if id < 0 || id >= len(oo.chunks) {
return 0, 0, errors.New("invalid arguments")
}
return (oo.chunks)[id].position, (oo.chunks)[id].size, nil
}
func getRawFiles(addr *sdkUserFile.SliceDownloadInfo) ([]byte, error) {
if addr == nil {
return nil, errors.New("addr is nil")
}
client := http.Client{
Timeout: time.Duration(60 * time.Second), // Set timeout to 60 seconds
}
resp, err := client.Get(addr.DownloadAddress)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("bad status: %s, body: %s", resp.Status, body)
}
if addr.Encrypt > 0 {
cd := uint8(addr.Encrypt)
for idx := 0; idx < len(body); idx++ {
body[idx] = body[idx] ^ cd
}
}
storeType := addr.StoreType
if storeType != 10 {
sourceCid, err := cid.Decode(addr.Identity)
if err != nil {
return nil, err
}
checkCid, err := sourceCid.Prefix().Sum(body)
if err != nil {
return nil, err
}
if !checkCid.Equals(sourceCid) {
return nil, fmt.Errorf("bad cid: %s, body: %s", checkCid.String(), body)
}
}
return body, nil
}

View File

@@ -235,7 +235,6 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
fullPath := file.GetPath()
link := &model.Link{}
var MFile model.File
if args.Type == "thumb" && utils.Ext(file.GetName()) != "svg" {
var buf *bytes.Buffer
var thumbPath *string
@@ -262,9 +261,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err
}
link.ContentLength = int64(stat.Size())
MFile = open
link.MFile = open
} else {
MFile = bytes.NewReader(buf.Bytes())
link.MFile = bytes.NewReader(buf.Bytes())
link.ContentLength = int64(buf.Len())
}
} else {
@@ -273,11 +272,13 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err
}
link.ContentLength = file.GetSize()
MFile = open
link.MFile = open
}
link.AddIfCloser(link.MFile)
if !d.Config().OnlyLinkMFile {
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, link.MFile)
link.MFile = nil
}
link.SyncClosers.AddIfCloser(MFile)
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, MFile)
link.RequireReference = link.SyncClosers.Length() > 0
return link, nil
}
@@ -374,26 +375,18 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
err = os.Remove(obj.GetPath())
}
} else {
objPath := obj.GetPath()
objName := obj.GetName()
var relPath string
relPath, err = filepath.Rel(d.GetRootPath(), filepath.Dir(objPath))
if err != nil {
return err
}
recycleBinPath := filepath.Join(d.RecycleBinPath, relPath)
if !utils.Exists(recycleBinPath) {
err = os.MkdirAll(recycleBinPath, 0o755)
if !utils.Exists(d.RecycleBinPath) {
err = os.MkdirAll(d.RecycleBinPath, 0o755)
if err != nil {
return err
}
}
dstPath := filepath.Join(recycleBinPath, objName)
dstPath := filepath.Join(d.RecycleBinPath, obj.GetName())
if utils.Exists(dstPath) {
dstPath = filepath.Join(recycleBinPath, objName+"_"+time.Now().Format("20060102150405"))
dstPath = filepath.Join(d.RecycleBinPath, obj.GetName()+"_"+time.Now().Format("20060102150405"))
}
err = os.Rename(objPath, dstPath)
err = os.Rename(obj.GetPath(), dstPath)
}
if err != nil {
return err

View File

@@ -18,12 +18,12 @@ type Addition struct {
}
var config = driver.Config{
Name: "Local",
LocalSort: true,
OnlyProxy: true,
NoCache: true,
DefaultRoot: "/",
NoLinkURL: true,
Name: "Local",
OnlyLinkMFile: false,
LocalSort: true,
NoCache: true,
DefaultRoot: "/",
NoLinkURL: true,
}
func init() {

View File

@@ -36,6 +36,7 @@ type Addition struct {
var config = driver.Config{
Name: "MediaFire",
LocalSort: false,
OnlyLinkMFile: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,

View File

@@ -22,7 +22,6 @@ type Onedrive struct {
AccessToken string
root *Object
mutex sync.Mutex
ref *Onedrive
}
func (d *Onedrive) Config() driver.Config {
@@ -37,22 +36,10 @@ func (d *Onedrive) Init(ctx context.Context) error {
if d.ChunkSize < 1 {
d.ChunkSize = 5
}
if d.ref != nil {
return nil
}
return d.refreshToken()
}
func (d *Onedrive) InitReference(refStorage driver.Driver) error {
if ref, ok := refStorage.(*Onedrive); ok {
d.ref = ref
return nil
}
return errs.NotSupport
}
func (d *Onedrive) Drop(ctx context.Context) error {
d.ref = nil
return nil
}

View File

@@ -134,9 +134,6 @@ func (d *Onedrive) _refreshToken() error {
}
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
if d.ref != nil {
return d.ref.Request(url, method, callback, resp)
}
req := base.RestyClient.R()
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
if callback != nil {

View File

@@ -110,29 +110,19 @@ func (d *OpenList) List(ctx context.Context, dir model.Obj, args model.ListArgs)
func (d *OpenList) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
var resp common.Resp[FsGetResp]
headers := map[string]string{
"User-Agent": base.UserAgent,
}
// if PassUAToUpsteam is true, then pass the user-agent to the upstream
userAgent := base.UserAgent
if d.PassUAToUpsteam {
userAgent := args.Header.Get("user-agent")
if userAgent != "" {
headers["User-Agent"] = base.UserAgent
}
}
// if PassIPToUpsteam is true, then pass the ip address to the upstream
if d.PassIPToUpsteam {
ip := args.IP
if ip != "" {
headers["X-Forwarded-For"] = ip
headers["X-Real-Ip"] = ip
userAgent = args.Header.Get("user-agent")
if userAgent == "" {
userAgent = base.UserAgent
}
}
_, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(FsGetReq{
Path: file.GetPath(),
Password: d.MetaPassword,
}).SetHeaders(headers)
}).SetHeader("user-agent", userAgent)
})
if err != nil {
return nil, err
@@ -365,15 +355,8 @@ func (d *OpenList) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.O
return err
}
func (d *OpenList) ResolveLinkCacheMode(_ string) driver.LinkCacheMode {
var mode driver.LinkCacheMode
if d.PassIPToUpsteam {
mode |= driver.LinkCacheIP
}
if d.PassUAToUpsteam {
mode |= driver.LinkCacheUA
}
return mode
}
//func (d *OpenList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*OpenList)(nil)

View File

@@ -12,7 +12,6 @@ type Addition struct {
Username string `json:"username"`
Password string `json:"password"`
Token string `json:"token"`
PassIPToUpsteam bool `json:"pass_ip_to_upsteam" default:"true"`
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
}
@@ -23,7 +22,6 @@ var config = driver.Config{
DefaultRoot: "/",
CheckStatus: true,
ProxyRangeOption: true,
LinkCacheMode: driver.LinkCacheAuto,
}
func init() {

View File

@@ -1,290 +0,0 @@
package protondrive
/*
Package protondrive
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-18
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
The power of open-source, the force of teamwork and the magic of reverse engineering!
D@' 3z K!7 - The King Of Cracking
Да здравствует Родина))
*/
import (
"context"
"fmt"
"io"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/ProtonMail/gopenpgp/v2/crypto"
proton_api_bridge "github.com/henrybear327/Proton-API-Bridge"
"github.com/henrybear327/Proton-API-Bridge/common"
"github.com/henrybear327/go-proton-api"
)
type ProtonDrive struct {
model.Storage
Addition
protonDrive *proton_api_bridge.ProtonDrive
apiBase string
appVersion string
protonJson string
userAgent string
sdkVersion string
webDriveAV string
c *proton.Client
// userKR *crypto.KeyRing
addrKRs map[string]*crypto.KeyRing
addrData map[string]proton.Address
MainShare *proton.Share
DefaultAddrKR *crypto.KeyRing
MainShareKR *crypto.KeyRing
}
func (d *ProtonDrive) Config() driver.Config {
return config
}
func (d *ProtonDrive) GetAddition() driver.Additional {
return &d.Addition
}
func (d *ProtonDrive) Init(ctx context.Context) (err error) {
defer func() {
if r := recover(); err == nil && r != nil {
err = fmt.Errorf("ProtonDrive initialization panic: %v", r)
}
}()
if d.Email == "" {
return fmt.Errorf("email is required")
}
if d.Password == "" {
return fmt.Errorf("password is required")
}
config := &common.Config{
AppVersion: d.appVersion,
UserAgent: d.userAgent,
FirstLoginCredential: &common.FirstLoginCredentialData{
Username: d.Email,
Password: d.Password,
TwoFA: d.TwoFACode,
},
EnableCaching: true,
ConcurrentBlockUploadCount: setting.GetInt(conf.TaskUploadThreadsNum, conf.Conf.Tasks.Upload.Workers),
//ConcurrentFileCryptoCount: 2,
UseReusableLogin: d.UseReusableLogin && d.ReusableCredential != (common.ReusableCredentialData{}),
ReplaceExistingDraft: true,
ReusableCredential: &d.ReusableCredential,
}
protonDrive, _, err := proton_api_bridge.NewProtonDrive(
ctx,
config,
d.authHandler,
func() {},
)
if err != nil && config.UseReusableLogin {
config.UseReusableLogin = false
protonDrive, _, err = proton_api_bridge.NewProtonDrive(ctx,
config,
d.authHandler,
func() {},
)
if err == nil {
op.MustSaveDriverStorage(d)
}
}
if err != nil {
return fmt.Errorf("failed to initialize ProtonDrive: %w", err)
}
if err := d.initClient(ctx); err != nil {
return err
}
d.protonDrive = protonDrive
d.MainShare = protonDrive.MainShare
if d.RootFolderID == "root" || d.RootFolderID == "" {
d.RootFolderID = protonDrive.RootLink.LinkID
}
d.MainShareKR = protonDrive.MainShareKR
d.DefaultAddrKR = protonDrive.DefaultAddrKR
return nil
}
func (d *ProtonDrive) Drop(ctx context.Context) error {
return nil
}
func (d *ProtonDrive) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
entries, err := d.protonDrive.ListDirectory(ctx, dir.GetID())
if err != nil {
return nil, fmt.Errorf("failed to list directory: %w", err)
}
objects := make([]model.Obj, 0, len(entries))
for _, entry := range entries {
obj := &model.Object{
ID: entry.Link.LinkID,
Name: entry.Name,
Size: entry.Link.Size,
Modified: time.Unix(entry.Link.ModifyTime, 0),
IsFolder: entry.IsFolder,
}
objects = append(objects, obj)
}
return objects, nil
}
func (d *ProtonDrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
link, err := d.getLink(ctx, file.GetID())
if err != nil {
return nil, fmt.Errorf("failed get file link: %+v", err)
}
fileSystemAttrs, err := d.protonDrive.GetActiveRevisionAttrs(ctx, link)
if err != nil {
return nil, fmt.Errorf("failed get file revision: %+v", err)
}
// 解密后的文件大小
size := fileSystemAttrs.Size
rangeReaderFunc := func(rangeCtx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if length < 0 || httpRange.Start+length > size {
length = size - httpRange.Start
}
reader, _, _, err := d.protonDrive.DownloadFile(rangeCtx, link, httpRange.Start)
if err != nil {
return nil, fmt.Errorf("failed start download: %+v", err)
}
return utils.ReadCloser{
Reader: io.LimitReader(reader, length),
Closer: reader,
}, nil
}
expiration := time.Minute
return &model.Link{
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(rangeReaderFunc),
},
ContentLength: size,
Expiration: &expiration,
}, nil
}
func (d *ProtonDrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
id, err := d.protonDrive.CreateNewFolderByID(ctx, parentDir.GetID(), dirName)
if err != nil {
return nil, fmt.Errorf("failed to create directory: %w", err)
}
newDir := &model.Object{
ID: id,
Name: dirName,
IsFolder: true,
Modified: time.Now(),
}
return newDir, nil
}
func (d *ProtonDrive) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return d.DirectMove(ctx, srcObj, dstDir)
}
func (d *ProtonDrive) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
if d.protonDrive == nil {
return nil, fmt.Errorf("protonDrive bridge is nil")
}
return d.DirectRename(ctx, srcObj, newName)
}
func (d *ProtonDrive) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if srcObj.IsDir() {
return nil, fmt.Errorf("directory copy not supported")
}
srcLink, err := d.getLink(ctx, srcObj.GetID())
if err != nil {
return nil, err
}
reader, linkSize, fileSystemAttrs, err := d.protonDrive.DownloadFile(ctx, srcLink, 0)
if err != nil {
return nil, fmt.Errorf("failed to download source file: %w", err)
}
defer reader.Close()
actualSize := linkSize
if fileSystemAttrs != nil && fileSystemAttrs.Size > 0 {
actualSize = fileSystemAttrs.Size
}
file := &stream.FileStream{
Ctx: ctx,
Obj: &model.Object{
Name: srcObj.GetName(),
// Use the accurate and real size
Size: actualSize,
Modified: srcObj.ModTime(),
},
Reader: reader,
}
defer file.Close()
return d.Put(ctx, dstDir, file, func(percentage float64) {})
}
func (d *ProtonDrive) Remove(ctx context.Context, obj model.Obj) error {
if obj.IsDir() {
return d.protonDrive.MoveFolderToTrashByID(ctx, obj.GetID(), false)
} else {
return d.protonDrive.MoveFileToTrashByID(ctx, obj.GetID())
}
}
func (d *ProtonDrive) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
return d.uploadFile(ctx, dstDir.GetID(), file, up)
}
func (d *ProtonDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
about, err := d.protonDrive.About(ctx)
if err != nil {
return nil, err
}
total := uint64(about.MaxSpace)
free := total - uint64(about.UsedSpace)
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
}
var _ driver.Driver = (*ProtonDrive)(nil)

View File

@@ -1,56 +0,0 @@
package protondrive
/*
Package protondrive
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-18
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
The power of open-source, the force of teamwork and the magic of reverse engineering!
D@' 3z K!7 - The King Of Cracking
Да здравствует Родина))
*/
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/henrybear327/Proton-API-Bridge/common"
)
type Addition struct {
driver.RootID
Email string `json:"email" required:"true" type:"string"`
Password string `json:"password" required:"true" type:"string"`
TwoFACode string `json:"two_fa_code" type:"string"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"100"`
UseReusableLogin bool `json:"use_reusable_login" type:"bool" default:"true" help:"Use reusable login credentials instead of username/password"`
ReusableCredential common.ReusableCredentialData
}
var config = driver.Config{
Name: "ProtonDrive",
LocalSort: true,
OnlyProxy: true,
DefaultRoot: "root",
NoLinkURL: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &ProtonDrive{
Addition: Addition{
UseReusableLogin: true,
},
apiBase: "https://drive.proton.me/api",
appVersion: "windows-drive@1.11.3+rclone+proton",
protonJson: "application/vnd.protonmail.v1+json",
sdkVersion: "js@0.3.0",
userAgent: "ProtonDrive/v1.70.0 (Windows NT 10.0.22000; Win64; x64)",
webDriveAV: "web-drive@5.2.0+0f69f7a8",
}
})
}

View File

@@ -1,38 +0,0 @@
package protondrive
/*
Package protondrive
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-18
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
The power of open-source, the force of teamwork and the magic of reverse engineering!
D@' 3z K!7 - The King Of Cracking
Да здравствует Родина))
*/
type MoveRequest struct {
ParentLinkID string `json:"ParentLinkID"`
NodePassphrase string `json:"NodePassphrase"`
NodePassphraseSignature *string `json:"NodePassphraseSignature"`
Name string `json:"Name"`
NameSignatureEmail string `json:"NameSignatureEmail"`
Hash string `json:"Hash"`
OriginalHash string `json:"OriginalHash"`
ContentHash *string `json:"ContentHash"` // Maybe null
}
type RenameRequest struct {
Name string `json:"Name"` // PGP encrypted name
NameSignatureEmail string `json:"NameSignatureEmail"` // User's signature email
Hash string `json:"Hash"` // New name hash
OriginalHash string `json:"OriginalHash"` // Current name hash
}
type RenameResponse struct {
Code int `json:"Code"`
}

View File

@@ -1,670 +0,0 @@
package protondrive
/*
Package protondrive
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-18
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
The power of open-source, the force of teamwork and the magic of reverse engineering!
D@' 3z K!7 - The King Of Cracking
Да здравствует Родина))
*/
import (
"bufio"
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/henrybear327/go-proton-api"
)
func (d *ProtonDrive) uploadFile(ctx context.Context, parentLinkID string, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
_, err := d.getLink(ctx, parentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to get parent link: %w", err)
}
var reader io.Reader
// Use buffered reader with larger buffer for better performance
var bufferSize int
// File > 100MB (default)
if file.GetSize() > d.ChunkSize*1024*1024 {
// 256KB for large files
bufferSize = 256 * 1024
// File > 10MB
} else if file.GetSize() > 10*1024*1024 {
// 128KB for medium files
bufferSize = 128 * 1024
} else {
// 64KB for small files
bufferSize = 64 * 1024
}
// reader = bufio.NewReader(file)
reader = bufio.NewReaderSize(file, bufferSize)
reader = &driver.ReaderUpdatingProgress{
Reader: &stream.SimpleReaderWithSize{
Reader: reader,
Size: file.GetSize(),
},
UpdateProgress: up,
}
reader = driver.NewLimitedUploadStream(ctx, reader)
id, _, err := d.protonDrive.UploadFileByReader(ctx, parentLinkID, file.GetName(), file.ModTime(), reader, 0)
if err != nil {
return nil, fmt.Errorf("failed to upload file: %w", err)
}
return &model.Object{
ID: id,
Name: file.GetName(),
Size: file.GetSize(),
Modified: file.ModTime(),
IsFolder: false,
}, nil
}
func (d *ProtonDrive) encryptFileName(ctx context.Context, name string, parentLinkID string) (string, error) {
parentLink, err := d.getLink(ctx, parentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get parent link: %w", err)
}
// Get parent node keyring
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
if err != nil {
return "", fmt.Errorf("failed to get parent keyring: %w", err)
}
// Temporary file (request)
tempReq := proton.CreateFileReq{
SignatureAddress: d.MainShare.Creator,
}
// Encrypt the filename
err = tempReq.SetName(name, d.DefaultAddrKR, parentNodeKR)
if err != nil {
return "", fmt.Errorf("failed to encrypt filename: %w", err)
}
return tempReq.Name, nil
}
func (d *ProtonDrive) generateFileNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
parentLink, err := d.getLink(ctx, parentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get parent link: %w", err)
}
// Get parent node keyring
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
if err != nil {
return "", fmt.Errorf("failed to get parent keyring: %w", err)
}
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
if err != nil {
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
}
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
if err != nil {
return "", fmt.Errorf("failed to get parent hash key: %w", err)
}
nameHash, err := proton.GetNameHash(name, parentHashKey)
if err != nil {
return "", fmt.Errorf("failed to generate name hash: %w", err)
}
return nameHash, nil
}
func (d *ProtonDrive) getOriginalNameHash(link *proton.Link) (string, error) {
if link == nil {
return "", fmt.Errorf("link cannot be nil")
}
if link.Hash == "" {
return "", fmt.Errorf("link hash is empty")
}
return link.Hash, nil
}
func (d *ProtonDrive) getLink(ctx context.Context, linkID string) (*proton.Link, error) {
if linkID == "" {
return nil, fmt.Errorf("linkID cannot be empty")
}
link, err := d.c.GetLink(ctx, d.MainShare.ShareID, linkID)
if err != nil {
return nil, err
}
return &link, nil
}
func (d *ProtonDrive) getLinkKR(ctx context.Context, link *proton.Link) (*crypto.KeyRing, error) {
if link == nil {
return nil, fmt.Errorf("link cannot be nil")
}
// Root Link or Root Dir
if link.ParentLinkID == "" {
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
if err != nil {
return nil, err
}
return link.GetKeyRing(d.MainShareKR, signatureVerificationKR)
}
// Get parent keyring recursively
parentLink, err := d.getLink(ctx, link.ParentLinkID)
if err != nil {
return nil, err
}
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
if err != nil {
return nil, err
}
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
if err != nil {
return nil, err
}
return link.GetKeyRing(parentNodeKR, signatureVerificationKR)
}
var (
ErrKeyPassOrSaltedKeyPassMustBeNotNil = errors.New("either keyPass or saltedKeyPass must be not nil")
ErrFailedToUnlockUserKeys = errors.New("failed to unlock user keys")
)
func getAccountKRs(ctx context.Context, c *proton.Client, keyPass, saltedKeyPass []byte) (*crypto.KeyRing, map[string]*crypto.KeyRing, map[string]proton.Address, []byte, error) {
user, err := c.GetUser(ctx)
if err != nil {
return nil, nil, nil, nil, err
}
// fmt.Printf("user %#v", user)
addrsArr, err := c.GetAddresses(ctx)
if err != nil {
return nil, nil, nil, nil, err
}
// fmt.Printf("addr %#v", addr)
if saltedKeyPass == nil {
if keyPass == nil {
return nil, nil, nil, nil, ErrKeyPassOrSaltedKeyPassMustBeNotNil
}
// Due to limitations, salts are stored using cacheCredentialToFile
salts, err := c.GetSalts(ctx)
if err != nil {
return nil, nil, nil, nil, err
}
// fmt.Printf("salts %#v", salts)
saltedKeyPass, err = salts.SaltForKey(keyPass, user.Keys.Primary().ID)
if err != nil {
return nil, nil, nil, nil, err
}
// fmt.Printf("saltedKeyPass ok")
}
userKR, addrKRs, err := proton.Unlock(user, addrsArr, saltedKeyPass, nil)
if err != nil {
return nil, nil, nil, nil, err
} else if userKR.CountDecryptionEntities() == 0 {
return nil, nil, nil, nil, ErrFailedToUnlockUserKeys
}
addrs := make(map[string]proton.Address)
for _, addr := range addrsArr {
addrs[addr.Email] = addr
}
return userKR, addrKRs, addrs, saltedKeyPass, nil
}
func (d *ProtonDrive) getSignatureVerificationKeyring(emailAddresses []string, verificationAddrKRs ...*crypto.KeyRing) (*crypto.KeyRing, error) {
ret, err := crypto.NewKeyRing(nil)
if err != nil {
return nil, err
}
for _, emailAddress := range emailAddresses {
if addr, ok := d.addrData[emailAddress]; ok {
if addrKR, exists := d.addrKRs[addr.ID]; exists {
err = d.addKeysFromKR(ret, addrKR)
if err != nil {
return nil, err
}
}
}
}
for _, kr := range verificationAddrKRs {
err = d.addKeysFromKR(ret, kr)
if err != nil {
return nil, err
}
}
if ret.CountEntities() == 0 {
return nil, fmt.Errorf("no keyring for signature verification")
}
return ret, nil
}
func (d *ProtonDrive) addKeysFromKR(kr *crypto.KeyRing, newKRs ...*crypto.KeyRing) error {
for i := range newKRs {
for _, key := range newKRs[i].GetKeys() {
err := kr.AddKey(key)
if err != nil {
return err
}
}
}
return nil
}
func (d *ProtonDrive) DirectRename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
// fmt.Printf("DEBUG DirectRename: path=%s, newName=%s", srcObj.GetPath(), newName)
if d.MainShare == nil || d.DefaultAddrKR == nil {
return nil, fmt.Errorf("missing required fields: MainShare=%v, DefaultAddrKR=%v",
d.MainShare != nil, d.DefaultAddrKR != nil)
}
if d.protonDrive == nil {
return nil, fmt.Errorf("protonDrive bridge is nil")
}
srcLink, err := d.getLink(ctx, srcObj.GetID())
if err != nil {
return nil, fmt.Errorf("failed to find source: %w", err)
}
parentLinkID := srcLink.ParentLinkID
if parentLinkID == "" {
return nil, fmt.Errorf("cannot rename root folder")
}
encryptedName, err := d.encryptFileName(ctx, newName, parentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
}
newHash, err := d.generateFileNameHash(ctx, newName, parentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to generate new hash: %w", err)
}
originalHash, err := d.getOriginalNameHash(srcLink)
if err != nil {
return nil, fmt.Errorf("failed to get original hash: %w", err)
}
renameReq := RenameRequest{
Name: encryptedName,
NameSignatureEmail: d.MainShare.Creator,
Hash: newHash,
OriginalHash: originalHash,
}
err = d.executeRenameAPI(ctx, srcLink.LinkID, renameReq)
if err != nil {
return nil, fmt.Errorf("rename API call failed: %w", err)
}
return &model.Object{
ID: srcLink.LinkID,
Name: newName,
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
IsFolder: srcObj.IsDir(),
}, nil
}
func (d *ProtonDrive) executeRenameAPI(ctx context.Context, linkID string, req RenameRequest) error {
renameURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/rename",
d.MainShare.VolumeID, linkID)
reqBody, err := json.Marshal(req)
if err != nil {
return fmt.Errorf("failed to marshal rename request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "PUT", renameURL, bytes.NewReader(reqBody))
if err != nil {
return fmt.Errorf("failed to create HTTP request: %w", err)
}
httpReq.Header.Set("Content-Type", "application/json")
httpReq.Header.Set("Accept", d.protonJson)
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
httpReq.Header.Set("X-Pm-Uid", d.ReusableCredential.UID)
httpReq.Header.Set("Authorization", "Bearer "+d.ReusableCredential.AccessToken)
client := &http.Client{}
resp, err := client.Do(httpReq)
if err != nil {
return fmt.Errorf("failed to execute rename request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("rename failed with status %d", resp.StatusCode)
}
var renameResp RenameResponse
if err := json.NewDecoder(resp.Body).Decode(&renameResp); err != nil {
return fmt.Errorf("failed to decode rename response: %w", err)
}
if renameResp.Code != 1000 {
return fmt.Errorf("rename failed with code %d", renameResp.Code)
}
return nil
}
func (d *ProtonDrive) executeMoveAPI(ctx context.Context, linkID string, req MoveRequest) error {
// fmt.Printf("DEBUG Move Request - Name: %s\n", req.Name)
// fmt.Printf("DEBUG Move Request - Hash: %s\n", req.Hash)
// fmt.Printf("DEBUG Move Request - OriginalHash: %s\n", req.OriginalHash)
// fmt.Printf("DEBUG Move Request - ParentLinkID: %s\n", req.ParentLinkID)
// fmt.Printf("DEBUG Move Request - Name length: %d\n", len(req.Name))
// fmt.Printf("DEBUG Move Request - NameSignatureEmail: %s\n", req.NameSignatureEmail)
// fmt.Printf("DEBUG Move Request - ContentHash: %v\n", req.ContentHash)
// fmt.Printf("DEBUG Move Request - NodePassphrase length: %d\n", len(req.NodePassphrase))
// fmt.Printf("DEBUG Move Request - NodePassphraseSignature length: %d\n", len(req.NodePassphraseSignature))
// fmt.Printf("DEBUG Move Request - SrcLinkID: %s\n", linkID)
// fmt.Printf("DEBUG Move Request - DstParentLinkID: %s\n", req.ParentLinkID)
// fmt.Printf("DEBUG Move Request - ShareID: %s\n", d.MainShare.ShareID)
srcLink, _ := d.getLink(ctx, linkID)
if srcLink != nil && srcLink.ParentLinkID == req.ParentLinkID {
return fmt.Errorf("cannot move to same parent directory")
}
moveURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/move",
d.MainShare.VolumeID, linkID)
reqBody, err := json.Marshal(req)
if err != nil {
return fmt.Errorf("failed to marshal move request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "PUT", moveURL, bytes.NewReader(reqBody))
if err != nil {
return fmt.Errorf("failed to create HTTP request: %w", err)
}
httpReq.Header.Set("Authorization", "Bearer "+d.ReusableCredential.AccessToken)
httpReq.Header.Set("Accept", d.protonJson)
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
httpReq.Header.Set("X-Pm-Uid", d.ReusableCredential.UID)
httpReq.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(httpReq)
if err != nil {
return fmt.Errorf("failed to execute move request: %w", err)
}
defer resp.Body.Close()
var moveResp RenameResponse
if err := json.NewDecoder(resp.Body).Decode(&moveResp); err != nil {
return fmt.Errorf("failed to decode move response: %w", err)
}
if moveResp.Code != 1000 {
return fmt.Errorf("move operation failed with code: %d", moveResp.Code)
}
return nil
}
func (d *ProtonDrive) DirectMove(ctx context.Context, srcObj model.Obj, dstDir model.Obj) (model.Obj, error) {
// fmt.Printf("DEBUG DirectMove: srcPath=%s, dstPath=%s", srcObj.GetPath(), dstDir.GetPath())
srcLink, err := d.getLink(ctx, srcObj.GetID())
if err != nil {
return nil, fmt.Errorf("failed to find source: %w", err)
}
dstParentLinkID := dstDir.GetID()
if srcObj.IsDir() {
// Check if destination is a descendant of source
if err := d.checkCircularMove(ctx, srcLink.LinkID, dstParentLinkID); err != nil {
return nil, err
}
}
// Encrypt the filename for the new location
encryptedName, err := d.encryptFileName(ctx, srcObj.GetName(), dstParentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
}
newHash, err := d.generateNameHash(ctx, srcObj.GetName(), dstParentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to generate new hash: %w", err)
}
originalHash, err := d.getOriginalNameHash(srcLink)
if err != nil {
return nil, fmt.Errorf("failed to get original hash: %w", err)
}
// Re-encrypt node passphrase for new parent context
reencryptedPassphrase, err := d.reencryptNodePassphrase(ctx, srcLink, dstParentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to re-encrypt node passphrase: %w", err)
}
moveReq := MoveRequest{
ParentLinkID: dstParentLinkID,
NodePassphrase: reencryptedPassphrase,
Name: encryptedName,
NameSignatureEmail: d.MainShare.Creator,
Hash: newHash,
OriginalHash: originalHash,
ContentHash: nil,
// *** Causes rejection ***
/* NodePassphraseSignature: srcLink.NodePassphraseSignature, */
}
//fmt.Printf("DEBUG MoveRequest validation:\n")
//fmt.Printf(" Name length: %d\n", len(moveReq.Name))
//fmt.Printf(" Hash: %s\n", moveReq.Hash)
//fmt.Printf(" OriginalHash: %s\n", moveReq.OriginalHash)
//fmt.Printf(" NodePassphrase length: %d\n", len(moveReq.NodePassphrase))
/* fmt.Printf(" NodePassphraseSignature length: %d\n", len(moveReq.NodePassphraseSignature)) */
//fmt.Printf(" NameSignatureEmail: %s\n", moveReq.NameSignatureEmail)
err = d.executeMoveAPI(ctx, srcLink.LinkID, moveReq)
if err != nil {
return nil, fmt.Errorf("move API call failed: %w", err)
}
return &model.Object{
ID: srcLink.LinkID,
Name: srcObj.GetName(),
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
IsFolder: srcObj.IsDir(),
}, nil
}
func (d *ProtonDrive) reencryptNodePassphrase(ctx context.Context, srcLink *proton.Link, dstParentLinkID string) (string, error) {
// Get source parent link with metadata
srcParentLink, err := d.getLink(ctx, srcLink.ParentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get source parent link: %w", err)
}
// Get source parent keyring using link object
srcParentKR, err := d.getLinkKR(ctx, srcParentLink)
if err != nil {
return "", fmt.Errorf("failed to get source parent keyring: %w", err)
}
// Get destination parent link with metadata
dstParentLink, err := d.getLink(ctx, dstParentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get destination parent link: %w", err)
}
// Get destination parent keyring using link object
dstParentKR, err := d.getLinkKR(ctx, dstParentLink)
if err != nil {
return "", fmt.Errorf("failed to get destination parent keyring: %w", err)
}
// Re-encrypt the node passphrase from source parent context to destination parent context
reencryptedPassphrase, err := reencryptKeyPacket(srcParentKR, dstParentKR, d.DefaultAddrKR, srcLink.NodePassphrase)
if err != nil {
return "", fmt.Errorf("failed to re-encrypt key packet: %w", err)
}
return reencryptedPassphrase, nil
}
func (d *ProtonDrive) generateNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
parentLink, err := d.getLink(ctx, parentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get parent link: %w", err)
}
// Get parent node keyring
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
if err != nil {
return "", fmt.Errorf("failed to get parent keyring: %w", err)
}
// Get signature verification keyring
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
if err != nil {
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
}
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
if err != nil {
return "", fmt.Errorf("failed to get parent hash key: %w", err)
}
nameHash, err := proton.GetNameHash(name, parentHashKey)
if err != nil {
return "", fmt.Errorf("failed to generate name hash: %w", err)
}
return nameHash, nil
}
func reencryptKeyPacket(srcKR, dstKR, _ *crypto.KeyRing, passphrase string) (string, error) { // addrKR (3)
oldSplitMessage, err := crypto.NewPGPSplitMessageFromArmored(passphrase)
if err != nil {
return "", err
}
sessionKey, err := srcKR.DecryptSessionKey(oldSplitMessage.KeyPacket)
if err != nil {
return "", err
}
newKeyPacket, err := dstKR.EncryptSessionKey(sessionKey)
if err != nil {
return "", err
}
newSplitMessage := crypto.NewPGPSplitMessage(newKeyPacket, oldSplitMessage.DataPacket)
return newSplitMessage.GetArmored()
}
func (d *ProtonDrive) checkCircularMove(ctx context.Context, srcLinkID, dstParentLinkID string) error {
currentLinkID := dstParentLinkID
for currentLinkID != "" && currentLinkID != d.RootFolderID {
if currentLinkID == srcLinkID {
return fmt.Errorf("cannot move folder into itself or its subfolder")
}
currentLink, err := d.getLink(ctx, currentLinkID)
if err != nil {
return err
}
currentLinkID = currentLink.ParentLinkID
}
return nil
}
func (d *ProtonDrive) authHandler(auth proton.Auth) {
if auth.AccessToken != d.ReusableCredential.AccessToken || auth.RefreshToken != d.ReusableCredential.RefreshToken {
d.ReusableCredential.UID = auth.UID
d.ReusableCredential.AccessToken = auth.AccessToken
d.ReusableCredential.RefreshToken = auth.RefreshToken
if err := d.initClient(context.Background()); err != nil {
fmt.Printf("ProtonDrive: failed to reinitialize client after auth refresh: %v\n", err)
}
op.MustSaveDriverStorage(d)
}
}
func (d *ProtonDrive) initClient(ctx context.Context) error {
clientOptions := []proton.Option{
proton.WithAppVersion(d.appVersion),
proton.WithUserAgent(d.userAgent),
}
manager := proton.New(clientOptions...)
d.c = manager.NewClient(d.ReusableCredential.UID, d.ReusableCredential.AccessToken, d.ReusableCredential.RefreshToken)
saltedKeyPassBytes, err := base64.StdEncoding.DecodeString(d.ReusableCredential.SaltedKeyPass)
if err != nil {
return fmt.Errorf("failed to decode salted key pass: %w", err)
}
_, addrKRs, addrs, _, err := getAccountKRs(ctx, d.c, nil, saltedKeyPassBytes)
if err != nil {
return fmt.Errorf("failed to get account keyrings: %w", err)
}
d.addrKRs = addrKRs
d.addrData = addrs
return nil
}

View File

@@ -69,10 +69,15 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
Limiter: stream.ServerDownloadLimit,
Ctx: ctx,
}
if !d.Config().OnlyLinkMFile {
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil
}
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
SyncClosers: utils.NewSyncClosers(remoteFile),
RequireReference: true,
MFile: mFile,
SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil
}

View File

@@ -16,12 +16,12 @@ type Addition struct {
}
var config = driver.Config{
Name: "SFTP",
LocalSort: true,
OnlyProxy: true,
DefaultRoot: "/",
CheckStatus: true,
NoLinkURL: true,
Name: "SFTP",
LocalSort: true,
OnlyLinkMFile: false,
DefaultRoot: "/",
CheckStatus: true,
NoLinkURL: true,
}
func init() {

View File

@@ -86,10 +86,15 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
Limiter: stream.ServerDownloadLimit,
Ctx: ctx,
}
if !d.Config().OnlyLinkMFile {
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil
}
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
SyncClosers: utils.NewSyncClosers(remoteFile),
RequireReference: true,
MFile: mFile,
SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil
}

View File

@@ -14,12 +14,12 @@ type Addition struct {
}
var config = driver.Config{
Name: "SMB",
LocalSort: true,
OnlyProxy: true,
DefaultRoot: ".",
NoCache: true,
NoLinkURL: true,
Name: "SMB",
LocalSort: true,
OnlyLinkMFile: false,
DefaultRoot: ".",
NoCache: true,
NoLinkURL: true,
}
func init() {

View File

@@ -12,7 +12,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
)
@@ -157,7 +156,7 @@ func (d *Strm) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
if file.GetID() == "strm" {
link := d.getLink(ctx, file.GetPath())
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(int64(len(link)), strings.NewReader(link)),
MFile: strings.NewReader(link),
}, nil
}
// ftp,s3

View File

@@ -15,13 +15,14 @@ type Addition struct {
}
var config = driver.Config{
Name: "Strm",
LocalSort: true,
OnlyProxy: true,
NoCache: true,
NoUpload: true,
DefaultRoot: "/",
NoLinkURL: true,
Name: "Strm",
LocalSort: true,
NoCache: true,
NoUpload: true,
DefaultRoot: "/",
OnlyLinkMFile: true,
OnlyProxy: true,
NoLinkURL: true,
}
func init() {

View File

@@ -164,7 +164,7 @@ func (d *Teldrive) doSingleUpload(ctx context.Context, dstDir model.Obj, file mo
if err := d.singleUploadRequest(fileId, func(req *resty.Request) {
uploadParams := map[string]string{
"partName": func() string {
digits := len(strconv.Itoa(totalParts))
digits := len(fmt.Sprintf("%d", totalParts))
return file.GetName() + fmt.Sprintf(".%0*d", digits, 1)
}(),
"partNo": strconv.Itoa(1),
@@ -333,7 +333,7 @@ func (d *Teldrive) uploadSingleChunk(ctx context.Context, fileId string, task ch
err := d.singleUploadRequest(fileId, func(req *resty.Request) {
uploadParams := map[string]string{
"partName": func() string {
digits := len(strconv.Itoa(totalParts))
digits := len(fmt.Sprintf("%d", totalParts))
return task.fileName + fmt.Sprintf(".%0*d", digits, task.chunkIdx)
}(),
"partNo": strconv.Itoa(task.chunkIdx),

View File

@@ -16,6 +16,7 @@ type Addition struct {
var config = driver.Config{
Name: "Template",
LocalSort: false,
OnlyLinkMFile: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
@@ -841,7 +840,7 @@ func (xc *XunLeiBrowserCommon) OfflineList(ctx context.Context, nextPageToken st
func (xc *XunLeiBrowserCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string) error {
queryParams := map[string]string{
"task_ids": strings.Join(taskIDs, ","),
"_t": strconv.FormatInt(time.Now().UnixMilli(), 10),
"_t": fmt.Sprintf("%d", time.Now().UnixMilli()),
}
if xc.UseFluentPlay {
queryParams["space"] = ThunderBrowserDriveFluentPlayFolderType

View File

@@ -2,11 +2,11 @@ package virtual
import (
"context"
"io"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils/random"
)
@@ -42,14 +42,16 @@ func (d *Virtual) List(ctx context.Context, dir model.Obj, args model.ListArgs)
return res, nil
}
type DummyMFile struct{}
type DummyMFile struct {
io.Reader
}
func (f DummyMFile) Read(p []byte) (n int, err error) {
return random.Rand.Read(p)
return f.Reader.Read(p)
}
func (f DummyMFile) ReadAt(p []byte, off int64) (n int, err error) {
return random.Rand.Read(p)
return f.Reader.Read(p)
}
func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
@@ -58,7 +60,7 @@ func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
func (d *Virtual) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), DummyMFile{}),
MFile: DummyMFile{Reader: random.Rand},
}, nil
}

View File

@@ -14,11 +14,11 @@ type Addition struct {
}
var config = driver.Config{
Name: "Virtual",
LocalSort: true,
OnlyProxy: true,
NeedMs: true,
NoLinkURL: true,
Name: "Virtual",
OnlyLinkMFile: true,
LocalSort: true,
NeedMs: true,
NoLinkURL: true,
}
func init() {

29
go.mod
View File

@@ -20,7 +20,7 @@ require (
github.com/charmbracelet/bubbles v0.21.0
github.com/charmbracelet/bubbletea v1.3.6
github.com/charmbracelet/lipgloss v1.1.0
github.com/city404/v6-public-rpc-proto/go d0e94501d242
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc
github.com/coreos/go-oidc v2.3.0+incompatible
github.com/deckarep/golang-set/v2 v2.8.0
@@ -38,7 +38,6 @@ require (
github.com/golang-jwt/jwt/v4 v4.5.2
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.3
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499
github.com/hekmon/transmissionrpc/v3 v3.0.0
github.com/ipfs/go-ipfs-api v0.7.0
github.com/itsHenry35/gofakes3 v0.0.8
@@ -53,7 +52,6 @@ require (
github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.13.9
github.com/pquerna/otp v1.5.0
github.com/quic-go/quic-go v0.54.1
github.com/rclone/rclone v1.70.3
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
github.com/shirou/gopsutil/v4 v4.25.5
@@ -84,24 +82,12 @@ require (
require (
cloud.google.com/go/compute/metadata v0.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
github.com/ProtonMail/go-srp v0.0.7 // indirect
github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect
github.com/PuerkitoBio/goquery v1.10.3 // indirect
github.com/RoaringBitmap/roaring/v2 v2.4.5 // indirect
github.com/andybalholm/cascadia v1.3.3 // indirect
github.com/bradenaw/juniper v0.15.3 // indirect
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cronokirby/saferith v0.33.0 // indirect
github.com/ebitengine/purego v0.8.4 // indirect
github.com/emersion/go-message v0.18.2 // indirect
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
github.com/henrybear327/go-proton-api v1.0.0 // indirect
github.com/geoffgarside/ber v1.2.0 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
@@ -116,13 +102,7 @@ require (
github.com/minio/xxml v0.0.3 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/otiai10/mint v1.6.3 // indirect
github.com/relvacode/iso8601 v1.6.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
go.uber.org/mock v0.5.0 // indirect
golang.org/x/mod v0.27.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
)
@@ -214,7 +194,6 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/henrybear327/Proton-API-Bridge v1.0.0
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/ipfs/go-cid v0.5.0
github.com/jackc/pgpassfile v1.0.0 // indirect
@@ -288,7 +267,7 @@ require (
golang.org/x/sys v0.34.0
golang.org/x/term v0.33.0 // indirect
golang.org/x/text v0.27.0
golang.org/x/tools v0.35.0 // indirect
golang.org/x/tools v0.34.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.6 // indirect
@@ -298,8 +277,4 @@ require (
lukechampine.com/blake3 v1.1.7 // indirect
)
replace github.com/ProtonMail/go-proton-api => github.com/henrybear327/go-proton-api v1.0.0
replace github.com/cronokirby/saferith => github.com/Da3zKi7/saferith v0.33.0-fixed
// replace github.com/OpenListTeam/115-sdk-go => ../../OpenListTeam/115-sdk-go

56
go.sum
View File

@@ -37,8 +37,6 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Da3zKi7/saferith v0.33.0-fixed h1:fnIWTk7EP9mZAICf7aQjeoAwpfrlCrkOvqmi6CbWdTk=
github.com/Da3zKi7/saferith v0.33.0-fixed/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
github.com/OpenListTeam/115-sdk-go v0.2.2 h1:JCrGHqQjBX3laOA6Hw4CuBovSg7g+FC5s0LEAYsRciU=
@@ -55,22 +53,8 @@ github.com/OpenListTeam/times v0.1.0 h1:qknxw+qj5CYKgXAwydA102UEpPcpU8TYNGRmwRyP
github.com/OpenListTeam/times v0.1.0/go.mod h1:Jx7qen5NCYzKk2w14YuvU48YYMcPa1P9a+EJePC15Pc=
github.com/OpenListTeam/wopan-sdk-go v0.1.5 h1:iKKcVzIqBgtGDbn0QbdWrCazSGxXFmYFyrnFBG+U8dI=
github.com/OpenListTeam/wopan-sdk-go v0.1.5/go.mod h1:otynv0CgSNUClPpUgZ44qCZGcMRe0dc83Pkk65xAunI=
github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I=
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug=
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e h1:lCsqUUACrcMC83lg5rTo9Y0PnPItE61JSfvMyIcANwk=
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo=
github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE=
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI=
github.com/ProtonMail/go-srp v0.0.7/go.mod h1:giCp+7qRnMIcCvI6V6U3S1lDDXDQYx2ewJ6F/9wdlJk=
github.com/ProtonMail/gopenpgp/v2 v2.9.0 h1:ruLzBmwe4dR1hdnrsEJ/S7psSBmV15gFttFUPP/+/kE=
github.com/ProtonMail/gopenpgp/v2 v2.9.0/go.mod h1:IldDyh9Hv1ZCCYatTuuEt1XZJ0OPjxLpTarDfglih7s=
github.com/PuerkitoBio/goquery v1.10.3 h1:pFYcNSqHxBD06Fpj/KsbStFRsgRATgnf3LeXiUkhzPo=
github.com/PuerkitoBio/goquery v1.10.3/go.mod h1:tMUX0zDMHXYlAQk6p35XxQMqMweEKB7iK7iLNd4RH4Y=
github.com/RoaringBitmap/roaring/v2 v2.4.5 h1:uGrrMreGjvAtTBobc0g5IrW1D5ldxDQYe2JW2gggRdg=
github.com/RoaringBitmap/roaring/v2 v2.4.5/go.mod h1:FiJcsfkGje/nZBZgCu0ZxCPOKD/hVXDS2dXi7/eUFE0=
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
@@ -88,8 +72,6 @@ github.com/andreburgaud/crypt2go v1.8.0/go.mod h1:L5nfShQ91W78hOWhUH2tlGRPO+POAP
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 h1:8PmGpDEZl9yDpcdEr6Odf23feCxK3LNUNMxjXg41pZQ=
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM=
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
@@ -182,9 +164,6 @@ github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradenaw/juniper v0.15.3 h1:RHIAMEDTpvmzV1wg1jMAHGOoI2oJUSPx3lxRldXnFGo=
github.com/bradenaw/juniper v0.15.3/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/bytedance/sonic v1.13.3 h1:MS8gmaH16Gtirygw7jV91pDCN33NyMrPbN7qiYhEsF0=
github.com/bytedance/sonic v1.13.3/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
@@ -219,7 +198,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e h1:GLC8iDDcbt1H8+RkNao2nRGjyNTIo81e1rAJT9/uWYA=
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e/go.mod h1:ln9Whp+wVY/FTbn2SK0ag+SKD2fC0yQCF/Lqowc1LmU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc h1:t8YjNUCt1DimB4HCIXBztwWMhgxr5yG5/YaRl9Afdfg=
@@ -261,10 +239,6 @@ github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJL
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8=
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/emersion/go-message v0.18.2 h1:rl55SQdjd9oJcIoQNhubD2Acs1E6IzlZISRTK7x/Lpg=
github.com/emersion/go-message v0.18.2/go.mod h1:XpJyL70LwRvq2a8rVbHXikPgKj8+aI0kGdHlg16ibYA=
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff h1:4N8wnS3f1hNHSmFD5zgFkWCyA4L1kCDkImPAtK7D6tg=
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff/go.mod h1:HMJKR5wlh/ziNp+sHEDV2ltblO4JD2+IdDOWtGcQBTM=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
@@ -390,10 +364,6 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006100629-ba7a40dce261 h1:47L8SHM80cXszQydLrpp9MhVkFLLWCvrU9XmJ6XtRu0=
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006100629-ba7a40dce261/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499 h1:4ovnBdiGDFi8putQGxhipuuhXItAgh4/YnzufPYkZkQ=
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -415,12 +385,6 @@ github.com/hekmon/cunits/v2 v2.1.0 h1:k6wIjc4PlacNOHwKEMBgWV2/c8jyD4eRMs5mR1BBhI
github.com/hekmon/cunits/v2 v2.1.0/go.mod h1:9r1TycXYXaTmEWlAIfFV8JT+Xo59U96yUJAYHxzii2M=
github.com/hekmon/transmissionrpc/v3 v3.0.0 h1:0Fb11qE0IBh4V4GlOwHNYpqpjcYDp5GouolwrpmcUDQ=
github.com/hekmon/transmissionrpc/v3 v3.0.0/go.mod h1:38SlNhFzinVUuY87wGj3acOmRxeYZAZfrj6Re7UgCDg=
github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0LTKWQciUm8PMZb0=
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
@@ -621,14 +585,8 @@ github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQP
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg=
github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
github.com/rclone/rclone v1.70.3 h1:rg/WNh4DmSVZyKP2tHZ4lAaWEyMi7h/F0r7smOMA3IE=
github.com/rclone/rclone v1.70.3/go.mod h1:nLyN+hpxAsQn9Rgt5kM774lcRDad82x/KqQeBZ83cMo=
github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU=
github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=
github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -744,8 +702,6 @@ go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5J
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
gocv.io/x/gocv v0.25.0/go.mod h1:Rar2PS6DV+T4FL+PM535EImD/h13hGVaHhnCu1xarBs=
@@ -758,7 +714,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
@@ -801,8 +756,6 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -822,12 +775,10 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -870,7 +821,6 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -889,7 +839,6 @@ golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXct
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
@@ -905,7 +854,6 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
@@ -947,8 +895,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -39,21 +39,7 @@ func InitConfig() {
if !filepath.IsAbs(dataDir) {
flags.DataDir = filepath.Join(pwd, flags.DataDir)
}
// Determine config file path: use flags.ConfigPath if provided, otherwise default to <dataDir>/config.json
configPath := flags.ConfigPath
if configPath == "" {
configPath = filepath.Join(flags.DataDir, "config.json")
} else {
// if relative, resolve relative to working directory
if !filepath.IsAbs(configPath) {
if absPath, err := filepath.Abs(configPath); err == nil {
configPath = absPath
} else {
configPath = filepath.Join(pwd, configPath)
}
}
}
configPath = filepath.Clean(configPath)
configPath := filepath.Join(flags.DataDir, "config.json")
log.Infof("reading config file: %s", configPath)
if !utils.Exists(configPath) {
log.Infof("config file not exists, creating default config file")

View File

@@ -132,7 +132,8 @@ func InitialSettings() []model.SettingItem {
"Google":"https://docs.google.com/gview?url=$e_url&embedded=true"
},
"pdf": {
"PDF.js":"https://res.oplist.org/pdf.js/web/viewer.html?file=$e_url"
"PDF.js":"https://res.oplist.org/pdf.js/web/viewer.html?file=$e_url",
"Browser":"$url?inline_preview=true"
},
"epub": {
"EPUB.js":"https://res.oplist.org/epub.js/viewer.html?url=$e_url"
@@ -148,10 +149,7 @@ func InitialSettings() []model.SettingItem {
{Key: "audio_cover", Value: "https://res.oplist.org/logo/logo.svg", MigrationValue: "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg", Type: conf.TypeString, Group: model.PREVIEW},
{Key: conf.AudioAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.VideoAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.PreviewDownloadByDefault, Value: "false", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.PreviewArchivesByDefault, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.SharePreviewDownloadByDefault, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.SharePreviewArchivesByDefault, Value: "false", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.ReadMeAutoRender, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.FilterReadMeScripts, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
// global settings

View File

@@ -25,6 +25,6 @@ func LoadStorages() {
storages[i].MountPath, storages[i].Driver, storages[i].Order)
}
}
conf.SendStoragesLoadedSignal()
conf.StoragesLoaded = true
}(storages)
}

View File

@@ -1,101 +0,0 @@
package cache
import (
"sync"
"time"
)
type KeyedCache[T any] struct {
entries map[string]*CacheEntry[T]
mu sync.RWMutex
ttl time.Duration
}
func NewKeyedCache[T any](ttl time.Duration) *KeyedCache[T] {
c := &KeyedCache[T]{
entries: make(map[string]*CacheEntry[T]),
ttl: ttl,
}
gcFuncs = append(gcFuncs, c.GC)
return c
}
func (c *KeyedCache[T]) Set(key string, value T) {
c.SetWithExpirable(key, value, ExpirationTime(time.Now().Add(c.ttl)))
}
func (c *KeyedCache[T]) SetWithTTL(key string, value T, ttl time.Duration) {
c.SetWithExpirable(key, value, ExpirationTime(time.Now().Add(ttl)))
}
func (c *KeyedCache[T]) SetWithExpirable(key string, value T, exp Expirable) {
c.mu.Lock()
defer c.mu.Unlock()
c.entries[key] = &CacheEntry[T]{
data: value,
Expirable: exp,
}
}
func (c *KeyedCache[T]) Get(key string) (T, bool) {
c.mu.RLock()
entry, exists := c.entries[key]
if !exists {
c.mu.RUnlock()
return *new(T), false
}
expired := entry.Expired()
c.mu.RUnlock()
if !expired {
return entry.data, true
}
c.mu.Lock()
if c.entries[key] == entry {
delete(c.entries, key)
c.mu.Unlock()
return *new(T), false
}
c.mu.Unlock()
return *new(T), false
}
func (c *KeyedCache[T]) Delete(key string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.entries, key)
}
func (c *KeyedCache[T]) Take(key string) (T, bool) {
c.mu.Lock()
defer c.mu.Unlock()
if entry, exists := c.entries[key]; exists {
delete(c.entries, key)
return entry.data, true
}
return *new(T), false
}
func (c *KeyedCache[T]) Clear() {
c.mu.Lock()
defer c.mu.Unlock()
c.entries = make(map[string]*CacheEntry[T])
}
func (c *KeyedCache[T]) GC() {
c.mu.Lock()
defer c.mu.Unlock()
expiredKeys := make([]string, 0, len(c.entries))
for key, entry := range c.entries {
if entry.Expired() {
expiredKeys = append(expiredKeys, key)
}
}
for _, key := range expiredKeys {
delete(c.entries, key)
}
}

View File

@@ -1,18 +0,0 @@
package cache
import "time"
type Expirable interface {
Expired() bool
}
type ExpirationTime time.Time
func (e ExpirationTime) Expired() bool {
return time.Now().After(time.Time(e))
}
type CacheEntry[T any] struct {
Expirable
data T
}

View File

@@ -1,113 +0,0 @@
package cache
import (
"sync"
"time"
)
type TypedCache[T any] struct {
entries map[string]map[string]*CacheEntry[T]
mu sync.RWMutex
ttl time.Duration
}
func NewTypedCache[T any](ttl time.Duration) *TypedCache[T] {
c := &TypedCache[T]{
entries: make(map[string]map[string]*CacheEntry[T]),
ttl: ttl,
}
gcFuncs = append(gcFuncs, c.GC)
return c
}
func (c *TypedCache[T]) SetType(key, typeKey string, value T) {
c.SetTypeWithExpirable(key, typeKey, value, ExpirationTime(time.Now().Add(c.ttl)))
}
func (c *TypedCache[T]) SetTypeWithTTL(key, typeKey string, value T, ttl time.Duration) {
c.SetTypeWithExpirable(key, typeKey, value, ExpirationTime(time.Now().Add(ttl)))
}
func (c *TypedCache[T]) SetTypeWithExpirable(key, typeKey string, value T, exp Expirable) {
c.mu.Lock()
defer c.mu.Unlock()
cache, exists := c.entries[key]
if !exists {
cache = make(map[string]*CacheEntry[T])
c.entries[key] = cache
}
cache[typeKey] = &CacheEntry[T]{
data: value,
Expirable: exp,
}
}
func (c *TypedCache[T]) GetType(key, typeKey string) (T, bool) {
c.mu.RLock()
cache, exists := c.entries[key]
if !exists {
c.mu.RUnlock()
return *new(T), false
}
entry, exists := cache[typeKey]
if !exists {
c.mu.RUnlock()
return *new(T), false
}
expired := entry.Expired()
c.mu.RUnlock()
if !expired {
return entry.data, true
}
c.mu.Lock()
if cache[typeKey] == entry {
delete(cache, typeKey)
if len(cache) == 0 {
delete(c.entries, key)
}
c.mu.Unlock()
return *new(T), false
}
c.mu.Unlock()
return *new(T), false
}
func (c *TypedCache[T]) DeleteKey(key string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.entries, key)
}
func (c *TypedCache[T]) Clear() {
c.mu.Lock()
defer c.mu.Unlock()
c.entries = make(map[string]map[string]*CacheEntry[T])
}
func (c *TypedCache[T]) GC() {
c.mu.Lock()
defer c.mu.Unlock()
expiredKeys := make(map[string][]string)
for tk, entries := range c.entries {
for key, entry := range entries {
if !entry.Expired() {
continue
}
if _, ok := expiredKeys[tk]; !ok {
expiredKeys[tk] = make([]string, 0, len(entries))
}
expiredKeys[tk] = append(expiredKeys[tk], key)
}
}
for tk, keys := range expiredKeys {
for _, key := range keys {
delete(c.entries[tk], key)
}
if len(c.entries[tk]) == 0 {
delete(c.entries, tk)
}
}
}

View File

@@ -1,24 +0,0 @@
package cache
import (
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
log "github.com/sirupsen/logrus"
)
var (
cacheGcCron *cron.Cron
gcFuncs []func()
)
func init() {
// TODO Move to bootstrap
cacheGcCron = cron.NewCron(time.Hour)
cacheGcCron.Do(func() {
log.Infof("Start cache GC")
for _, f := range gcFuncs {
f()
}
})
}

View File

@@ -35,7 +35,6 @@ type Scheme struct {
UnixFile string `json:"unix_file" env:"UNIX_FILE"`
UnixFilePerm string `json:"unix_file_perm" env:"UNIX_FILE_PERM"`
EnableH2c bool `json:"enable_h2c" env:"ENABLE_H2C"`
EnableH3 bool `json:"enable_h3" env:"ENABLE_H3"`
}
type LogConfig struct {

View File

@@ -24,20 +24,17 @@ const (
HideStorageDetailsInManagePage = "hide_storage_details_in_manage_page"
// preview
TextTypes = "text_types"
AudioTypes = "audio_types"
VideoTypes = "video_types"
ImageTypes = "image_types"
ProxyTypes = "proxy_types"
ProxyIgnoreHeaders = "proxy_ignore_headers"
AudioAutoplay = "audio_autoplay"
VideoAutoplay = "video_autoplay"
PreviewDownloadByDefault = "preview_download_by_default"
PreviewArchivesByDefault = "preview_archives_by_default"
SharePreviewDownloadByDefault = "share_preview_download_by_default"
SharePreviewArchivesByDefault = "share_preview_archives_by_default"
ReadMeAutoRender = "readme_autorender"
FilterReadMeScripts = "filter_readme_scripts"
TextTypes = "text_types"
AudioTypes = "audio_types"
VideoTypes = "video_types"
ImageTypes = "image_types"
ProxyTypes = "proxy_types"
ProxyIgnoreHeaders = "proxy_ignore_headers"
AudioAutoplay = "audio_autoplay"
VideoAutoplay = "video_autoplay"
PreviewArchivesByDefault = "preview_archives_by_default"
ReadMeAutoRender = "readme_autorender"
FilterReadMeScripts = "filter_readme_scripts"
// global
HideFiles = "hide_files"

View File

@@ -3,7 +3,6 @@ package conf
import (
"net/url"
"regexp"
"sync"
)
var (
@@ -24,6 +23,8 @@ var FilenameCharMap = make(map[string]string)
var PrivacyReg []*regexp.Regexp
var (
// StoragesLoaded loaded success if empty
StoragesLoaded = false
// 单个Buffer最大限制
MaxBufferLimit = 16 * 1024 * 1024
// 超过该阈值的Buffer将使用 mmap 分配,可主动释放内存
@@ -34,39 +35,3 @@ var (
ManageHtml string
IndexHtml string
)
var (
// StoragesLoaded loaded success if empty
StoragesLoaded = false
storagesLoadMu sync.RWMutex
storagesLoadSignal chan struct{} = make(chan struct{})
)
func StoragesLoadSignal() <-chan struct{} {
storagesLoadMu.RLock()
ch := storagesLoadSignal
storagesLoadMu.RUnlock()
return ch
}
func SendStoragesLoadedSignal() {
storagesLoadMu.Lock()
select {
case <-storagesLoadSignal:
// already closed
default:
StoragesLoaded = true
close(storagesLoadSignal)
}
storagesLoadMu.Unlock()
}
func ResetStoragesLoadSignal() {
storagesLoadMu.Lock()
select {
case <-storagesLoadSignal:
StoragesLoaded = false
storagesLoadSignal = make(chan struct{})
default:
// not closed -> nothing to do
}
storagesLoadMu.Unlock()
}

View File

@@ -60,7 +60,3 @@ func DeleteSharingById(id string) error {
s := model.SharingDB{ID: id}
return errors.WithStack(db.Where(s).Delete(&s).Error)
}
func DeleteSharingsByCreatorId(creatorId uint) error {
return errors.WithStack(db.Where("creator_id = ?", creatorId).Delete(&model.SharingDB{}).Error)
}

View File

@@ -1,7 +1,6 @@
package db
import (
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/pkg/errors"
)
@@ -31,7 +30,6 @@ func GetTaskDataFunc(type_s string, enabled bool) func() ([]byte, error) {
return nil
}
return func() ([]byte, error) {
<-conf.StoragesLoadSignal()
return []byte(task.PersistData), nil
}
}

View File

@@ -3,9 +3,11 @@ package driver
type Config struct {
Name string `json:"name"`
LocalSort bool `json:"local_sort"`
OnlyProxy bool `json:"only_proxy"`
NoCache bool `json:"no_cache"`
NoUpload bool `json:"no_upload"`
// if the driver returns Link with MFile, this should be set to true
OnlyLinkMFile bool `json:"only_local"`
OnlyProxy bool `json:"only_proxy"`
NoCache bool `json:"no_cache"`
NoUpload bool `json:"no_upload"`
// if need get message from user, such as validate code
NeedMs bool `json:"need_ms"`
DefaultRoot string `json:"default_root"`
@@ -17,24 +19,8 @@ type Config struct {
ProxyRangeOption bool `json:"-"`
// if the driver returns Link without URL, this should be set to true
NoLinkURL bool `json:"-"`
// Link cache behaviour:
// - LinkCacheAuto: let driver decide per-path (implement driver.LinkCacheModeResolver)
// - LinkCacheNone: no extra info added to cache key (default)
// - flags (OR-able) can add more attributes to cache key (IP, UA, ...)
LinkCacheMode `json:"-"`
}
type LinkCacheMode int8
const (
LinkCacheAuto LinkCacheMode = -1 // Let the driver decide per-path (use driver.LinkCacheModeResolver)
LinkCacheNone LinkCacheMode = 0 // No extra info added to cache key (default)
)
const (
LinkCacheIP LinkCacheMode = 1 << iota // include client IP in cache key
LinkCacheUA // include User-Agent in cache key
)
func (c Config) MustProxy() bool {
return c.OnlyProxy || c.NoLinkURL
return c.OnlyProxy || c.OnlyLinkMFile || c.NoLinkURL
}

View File

@@ -47,6 +47,11 @@ type Getter interface {
Get(ctx context.Context, path string) (model.Obj, error)
}
type GetObjInfo interface {
// GetObjInfo get file info by path
GetObjInfo(ctx context.Context, path string) (model.Obj, error)
}
//type Writer interface {
// Mkdir
// Move
@@ -213,8 +218,3 @@ type WithDetails interface {
type Reference interface {
InitReference(storage Driver) error
}
type LinkCacheModeResolver interface {
// ResolveLinkCacheMode returns the LinkCacheMode for the given path.
ResolveLinkCacheMode(path string) LinkCacheMode
}

View File

@@ -1,11 +1,12 @@
package errs
import "errors"
func UnwrapOrSelf(err error) error {
u, ok := err.(interface {
Unwrap() error
})
if !ok {
// errors.Unwrap has no fallback mechanism
unwrapped := errors.Unwrap(err)
if unwrapped == nil {
return err
}
return u.Unwrap()
return unwrapped
}

View File

@@ -41,18 +41,6 @@ func (t *ArchiveDownloadTask) Run() error {
if err := t.ReinitCtx(); err != nil {
return err
}
if t.SrcStorage == nil {
if srcStorage, _, err := op.GetStorageAndActualPath(t.SrcStorageMp); err == nil {
t.SrcStorage = srcStorage
} else {
return err
}
if dstStorage, _, err := op.GetStorageAndActualPath(t.DstStorageMp); err == nil {
t.DstStorage = dstStorage
} else {
return err
}
}
t.ClearEndTime()
t.SetStartTime(time.Now())
defer func() { t.SetEndTime(time.Now()) }()

View File

@@ -48,19 +48,6 @@ func (t *FileTransferTask) Run() error {
if err := t.ReinitCtx(); err != nil {
return err
}
if t.SrcStorage == nil {
if srcStorage, _, err := op.GetStorageAndActualPath(t.SrcStorageMp); err == nil {
t.SrcStorage = srcStorage
} else {
return err
}
if dstStorage, _, err := op.GetStorageAndActualPath(t.DstStorageMp); err == nil {
t.DstStorage = dstStorage
} else {
return err
}
}
t.ClearEndTime()
t.SetStartTime(time.Now())
defer func() { t.SetEndTime(time.Now()) }()
@@ -152,7 +139,7 @@ func transfer(ctx context.Context, taskType taskType, srcObjPath, dstDirPath str
if taskType == move {
task_group.RefreshAndRemove(dstDirPath, task_group.SrcPathToRemove(srcObjPath))
} else {
op.Cache.DeleteDirectory(t.DstStorage, dstDirActualPath)
op.DeleteCache(t.DstStorage, dstDirActualPath)
}
}
return nil, err
@@ -186,7 +173,7 @@ func (t *FileTransferTask) RunWithNextTaskCallback(f func(nextTask *FileTransfer
dstActualPath := stdpath.Join(t.DstActualPath, srcObj.GetName())
if t.TaskType == copy {
if t.Ctx().Value(conf.NoTaskKey) != nil {
defer op.Cache.DeleteDirectory(t.DstStorage, dstActualPath)
defer op.DeleteCache(t.DstStorage, dstActualPath)
} else {
task_group.TransferCoordinator.AppendPayload(t.groupID, task_group.DstPathToRefresh(dstActualPath))
}

View File

@@ -15,7 +15,7 @@ func get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
path = utils.FixAndCleanPath(path)
// maybe a virtual file
if path != "/" {
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails, false)
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails)
for _, f := range virtualFiles {
if f.GetName() == stdpath.Base(path) {
return f, nil

View File

@@ -15,7 +15,7 @@ import (
func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
meta, _ := ctx.Value(conf.MetaKey).(*model.Meta)
user, _ := ctx.Value(conf.UserKey).(*model.User)
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails, args.Refresh)
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails)
storage, actualPath, err := op.GetStorageAndActualPath(path)
if err != nil && len(virtualFiles) == 0 {
return nil, errors.WithMessage(err, "failed get storage")

View File

@@ -28,6 +28,7 @@ type Link struct {
URL string `json:"url"` // most common way
Header http.Header `json:"header"` // needed header (for url)
RangeReader RangeReaderIF `json:"-"` // recommended way if can't use URL
MFile File `json:"-"` // best for local,smb... file system, which exposes MFile
Expiration *time.Duration // local cache expire Duration
@@ -37,8 +38,6 @@ type Link struct {
ContentLength int64 `json:"-"` // 转码视频、缩略图
utils.SyncClosers `json:"-"`
// 如果SyncClosers中的资源被关闭后Link将不可用则此值应为 true
RequireReference bool `json:"-"`
}
type OtherArgs struct {

View File

@@ -33,7 +33,7 @@ func (s *Sharing) Valid() bool {
if len(s.Files) == 0 {
return false
}
if s.Creator == nil || !s.Creator.CanShare() {
if !s.Creator.CanShare() {
return false
}
if s.Expires != nil && !s.Expires.IsZero() && s.Expires.Before(time.Now()) {

View File

@@ -12,7 +12,6 @@ import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/rclone/rclone/lib/mmap"
@@ -404,7 +403,7 @@ var errInfiniteRetry = errors.New("infinite retry")
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
resp, err := d.cfg.HttpClient(d.ctx, params)
if err != nil {
statusCode, ok := errs.UnwrapOrSelf(err).(HttpStatusCodeError)
statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError)
if !ok {
return 0, err
}

View File

@@ -34,20 +34,6 @@ func (t *TransferTask) Run() error {
if err := t.ReinitCtx(); err != nil {
return err
}
if t.SrcStorage == nil && t.SrcStorageMp != "" {
if srcStorage, _, err := op.GetStorageAndActualPath(t.SrcStorageMp); err == nil {
t.SrcStorage = srcStorage
} else {
return err
}
if t.DstStorage == nil {
if dstStorage, _, err := op.GetStorageAndActualPath(t.DstStorageMp); err == nil {
t.DstStorage = dstStorage
} else {
return err
}
}
}
t.ClearEndTime()
t.SetStartTime(time.Now())
defer func() { t.SetEndTime(time.Now()) }()
@@ -78,8 +64,9 @@ func (t *TransferTask) Run() error {
return op.Put(t.Ctx(), t.DstStorage, t.DstActualPath, s, t.SetProgress)
}
return transferStdPath(t)
} else {
return transferObjPath(t)
}
return transferObjPath(t)
}
func (t *TransferTask) GetName() string {

View File

@@ -10,7 +10,6 @@ import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
"github.com/OpenListTeam/OpenList/v4/internal/cache"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
@@ -18,12 +17,12 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
gocache "github.com/OpenListTeam/go-cache"
"github.com/OpenListTeam/go-cache"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
var archiveMetaCache = gocache.NewMemCache(gocache.WithShards[*model.ArchiveMetaProvider](64))
var archiveMetaCache = cache.NewMemCache(cache.WithShards[*model.ArchiveMetaProvider](64))
var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider]
func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
@@ -38,14 +37,14 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err)
}
if m.Expiration != nil {
archiveMetaCache.Set(key, m, gocache.WithEx[*model.ArchiveMetaProvider](*m.Expiration))
archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](*m.Expiration))
}
return m, nil
}
// if storage.Config().NoLinkSingleflight {
// meta, err := fn()
// return meta, err
// }
if storage.Config().OnlyLinkMFile {
meta, err := fn()
return meta, err
}
if !args.Refresh {
if meta, ok := archiveMetaCache.Get(key); ok {
log.Debugf("use cache when get %s archive meta", path)
@@ -159,7 +158,7 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
return obj, archiveMetaProvider, err
}
var archiveListCache = gocache.NewMemCache(gocache.WithShards[[]model.Obj](64))
var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
var archiveListG singleflight.Group[[]model.Obj]
func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
@@ -200,7 +199,7 @@ func ListArchive(ctx context.Context, storage driver.Driver, path string, args m
if !storage.Config().NoCache {
if len(files) > 0 {
log.Debugf("set cache: %s => %+v", key, files)
archiveListCache.Set(key, files, gocache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
archiveListCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
} else {
log.Debugf("del cache: %s", key)
archiveListCache.Del(key)
@@ -355,50 +354,75 @@ func ArchiveGet(ctx context.Context, storage driver.Driver, path string, args mo
return nil, nil, errors.WithStack(errs.ObjectNotFound)
}
type objWithLink struct {
link *model.Link
obj model.Obj
type extractLink struct {
*model.Link
Obj model.Obj
}
var extractCache = cache.NewKeyedCache[*objWithLink](5 * time.Minute)
var extractG = singleflight.Group[*objWithLink]{}
var extractCache = cache.NewMemCache(cache.WithShards[*extractLink](16))
var extractG = singleflight.Group[*extractLink]{Remember: true}
func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
key := stdpath.Join(Key(storage, path), args.InnerPath)
if ol, ok := extractCache.Get(key); ok {
if ol.link.Expiration != nil || ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
return ol.link, ol.obj, nil
}
if link, ok := extractCache.Get(key); ok {
return link.Link, link.Obj, nil
}
fn := func() (*objWithLink, error) {
ol, err := driverExtract(ctx, storage, path, args)
var forget any
var linkM *extractLink
fn := func() (*extractLink, error) {
link, err := driverExtract(ctx, storage, path, args)
if err != nil {
return nil, errors.Wrapf(err, "failed extract archive")
}
if ol.link.Expiration != nil {
extractCache.SetWithTTL(key, ol, *ol.link.Expiration)
} else {
extractCache.SetWithExpirable(key, ol, &ol.link.SyncClosers)
if link.MFile != nil && forget != nil {
linkM = link
return nil, errLinkMFileCache
}
return ol, nil
if link.Link.Expiration != nil {
extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration))
}
link.AddIfCloser(forget)
return link, nil
}
for {
ol, err, _ := extractG.Do(key, fn)
if storage.Config().OnlyLinkMFile {
link, err := fn()
if err != nil {
return nil, nil, err
}
if ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
return ol.link, ol.obj, nil
}
return link.Link, link.Obj, nil
}
forget = utils.CloseFunc(func() error {
if forget != nil {
forget = nil
linkG.Forget(key)
}
return nil
})
link, err, _ := extractG.Do(key, fn)
for err == nil && !link.AcquireReference() {
link, err, _ = extractG.Do(key, fn)
}
if err == errLinkMFileCache {
if linkM != nil {
return linkM.Link, linkM.Obj, nil
}
forget = nil
link, err = fn()
}
if err != nil {
return nil, nil, err
}
return link.Link, link.Obj, nil
}
func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*objWithLink, error) {
func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*extractLink, error) {
storageAr, ok := storage.(driver.ArchiveReader)
if !ok {
return nil, errs.DriverExtractNotSupported
@@ -414,7 +438,7 @@ func driverExtract(ctx context.Context, storage driver.Driver, path string, args
return nil, errors.WithStack(errs.NotFile)
}
link, err := storageAr.Extract(ctx, archiveFile, args)
return &objWithLink{link: link, obj: extracted}, err
return &extractLink{Link: link, Obj: extracted}, err
}
type streamWithParent struct {
@@ -476,16 +500,16 @@ func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstD
if err == nil {
if len(newObjs) > 0 {
for _, newObj := range newObjs {
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
}
} else if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
DeleteCache(storage, dstDirPath)
}
}
case driver.ArchiveDecompress:
err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
if err == nil && !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
DeleteCache(storage, dstDirPath)
}
default:
return errs.NotImplement

View File

@@ -1,257 +0,0 @@
package op
import (
stdpath "path"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/cache"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
)
type CacheManager struct {
dirCache *cache.KeyedCache[*directoryCache] // Cache for directory listings
linkCache *cache.TypedCache[*objWithLink] // Cache for file links
userCache *cache.KeyedCache[*model.User] // Cache for user data
settingCache *cache.KeyedCache[any] // Cache for settings
detailCache *cache.KeyedCache[*model.StorageDetails] // Cache for storage details
}
func NewCacheManager() *CacheManager {
return &CacheManager{
dirCache: cache.NewKeyedCache[*directoryCache](time.Minute * 5),
linkCache: cache.NewTypedCache[*objWithLink](time.Minute * 30),
userCache: cache.NewKeyedCache[*model.User](time.Hour),
settingCache: cache.NewKeyedCache[any](time.Hour),
detailCache: cache.NewKeyedCache[*model.StorageDetails](time.Minute * 30),
}
}
// global instance
var Cache = NewCacheManager()
func Key(storage driver.Driver, path string) string {
return stdpath.Join(storage.GetStorage().MountPath, path)
}
// update object in dirCache.
// if it's a directory, remove all its children from dirCache too.
// if it's a file, remove its link from linkCache.
func (cm *CacheManager) updateDirectoryObject(storage driver.Driver, dirPath string, oldObj model.Obj, newObj model.Obj) {
key := Key(storage, dirPath)
if !oldObj.IsDir() {
cm.linkCache.DeleteKey(stdpath.Join(key, oldObj.GetName()))
cm.linkCache.DeleteKey(stdpath.Join(key, newObj.GetName()))
}
if storage.Config().NoCache {
return
}
if cache, exist := cm.dirCache.Get(key); exist {
if oldObj.IsDir() {
cm.deleteDirectoryTree(stdpath.Join(key, oldObj.GetName()))
}
cache.UpdateObject(oldObj.GetName(), newObj)
}
}
// add new object to dirCache
func (cm *CacheManager) addDirectoryObject(storage driver.Driver, dirPath string, newObj model.Obj) {
if storage.Config().NoCache {
return
}
cache, exist := cm.dirCache.Get(Key(storage, dirPath))
if exist {
cache.UpdateObject(newObj.GetName(), newObj)
}
}
// recursively delete directory and its children from dirCache
func (cm *CacheManager) DeleteDirectoryTree(storage driver.Driver, dirPath string) {
if storage.Config().NoCache {
return
}
cm.deleteDirectoryTree(Key(storage, dirPath))
}
func (cm *CacheManager) deleteDirectoryTree(key string) {
if dirCache, exists := cm.dirCache.Take(key); exists {
for _, obj := range dirCache.objs {
if obj.IsDir() {
cm.deleteDirectoryTree(stdpath.Join(key, obj.GetName()))
}
}
}
}
// remove directory from dirCache
func (cm *CacheManager) DeleteDirectory(storage driver.Driver, dirPath string) {
if storage.Config().NoCache {
return
}
cm.dirCache.Delete(Key(storage, dirPath))
}
// remove object from dirCache.
// if it's a directory, remove all its children from dirCache too.
// if it's a file, remove its link from linkCache.
func (cm *CacheManager) removeDirectoryObject(storage driver.Driver, dirPath string, obj model.Obj) {
key := Key(storage, dirPath)
if !obj.IsDir() {
cm.linkCache.DeleteKey(stdpath.Join(key, obj.GetName()))
}
if storage.Config().NoCache {
return
}
if cache, exist := cm.dirCache.Get(key); exist {
if obj.IsDir() {
cm.deleteDirectoryTree(stdpath.Join(key, obj.GetName()))
}
cache.RemoveObject(obj.GetName())
}
}
// cache user data
func (cm *CacheManager) SetUser(username string, user *model.User) {
cm.userCache.Set(username, user)
}
// cached user data
func (cm *CacheManager) GetUser(username string) (*model.User, bool) {
return cm.userCache.Get(username)
}
// remove user data from cache
func (cm *CacheManager) DeleteUser(username string) {
cm.userCache.Delete(username)
}
// caches setting
func (cm *CacheManager) SetSetting(key string, setting *model.SettingItem) {
cm.settingCache.Set(key, setting)
}
// cached setting
func (cm *CacheManager) GetSetting(key string) (*model.SettingItem, bool) {
if data, exists := cm.settingCache.Get(key); exists {
if setting, ok := data.(*model.SettingItem); ok {
return setting, true
}
}
return nil, false
}
// cache setting groups
func (cm *CacheManager) SetSettingGroup(key string, settings []model.SettingItem) {
cm.settingCache.Set(key, settings)
}
// cached setting group
func (cm *CacheManager) GetSettingGroup(key string) ([]model.SettingItem, bool) {
if data, exists := cm.settingCache.Get(key); exists {
if settings, ok := data.([]model.SettingItem); ok {
return settings, true
}
}
return nil, false
}
func (cm *CacheManager) SetStorageDetails(storage driver.Driver, details *model.StorageDetails) {
if storage.Config().NoCache {
return
}
expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
cm.detailCache.SetWithTTL(storage.GetStorage().MountPath, details, expiration)
}
func (cm *CacheManager) GetStorageDetails(storage driver.Driver) (*model.StorageDetails, bool) {
return cm.detailCache.Get(storage.GetStorage().MountPath)
}
func (cm *CacheManager) InvalidateStorageDetails(storage driver.Driver) {
cm.detailCache.Delete(storage.GetStorage().MountPath)
}
// clears all caches
func (cm *CacheManager) ClearAll() {
cm.dirCache.Clear()
cm.linkCache.Clear()
cm.userCache.Clear()
cm.settingCache.Clear()
cm.detailCache.Clear()
}
type directoryCache struct {
objs []model.Obj
sorted []model.Obj
mu sync.RWMutex
dirtyFlags uint8
}
const (
dirtyRemove uint8 = 1 << iota // 对象删除:刷新 sorted 副本,但不需要 full sort/extract
dirtyUpdate // 对象更新:需要执行 full sort + extract
)
func newDirectoryCache(objs []model.Obj) *directoryCache {
sorted := make([]model.Obj, len(objs))
copy(sorted, objs)
return &directoryCache{
objs: objs,
sorted: sorted,
}
}
func (dc *directoryCache) RemoveObject(name string) {
dc.mu.Lock()
defer dc.mu.Unlock()
for i, obj := range dc.objs {
if obj.GetName() == name {
dc.objs = append(dc.objs[:i], dc.objs[i+1:]...)
dc.dirtyFlags |= dirtyRemove
break
}
}
}
func (dc *directoryCache) UpdateObject(oldName string, newObj model.Obj) {
dc.mu.Lock()
defer dc.mu.Unlock()
if oldName != "" {
for i, obj := range dc.objs {
if obj.GetName() == oldName {
dc.objs[i] = newObj
dc.dirtyFlags |= dirtyUpdate
return
}
}
}
dc.objs = append(dc.objs, newObj)
dc.dirtyFlags |= dirtyUpdate
}
func (dc *directoryCache) GetSortedObjects(meta driver.Meta) []model.Obj {
dc.mu.RLock()
if dc.dirtyFlags == 0 {
dc.mu.RUnlock()
return dc.sorted
}
dc.mu.RUnlock()
dc.mu.Lock()
defer dc.mu.Unlock()
sorted := make([]model.Obj, len(dc.objs))
copy(sorted, dc.objs)
dc.sorted = sorted
if dc.dirtyFlags&dirtyUpdate != 0 {
storage := meta.GetStorage()
if meta.Config().LocalSort {
model.SortFiles(sorted, storage.OrderBy, storage.OrderDirection)
}
model.ExtractFolder(sorted, storage.ExtractFolder)
}
dc.dirtyFlags = 0
return sorted
}

View File

@@ -4,20 +4,115 @@ import (
"context"
stderrors "errors"
stdpath "path"
"slices"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/generic_sync"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/go-cache"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
// In order to facilitate adding some other things before and after file op
var listCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
var listG singleflight.Group[[]model.Obj]
func updateCacheObj(storage driver.Driver, path string, oldObj model.Obj, newObj model.Obj) {
key := Key(storage, path)
objs, ok := listCache.Get(key)
if ok {
for i, obj := range objs {
if obj.GetName() == newObj.GetName() {
objs = slices.Delete(objs, i, i+1)
break
}
}
for i, obj := range objs {
if obj.GetName() == oldObj.GetName() {
objs[i] = newObj
break
}
}
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
}
}
func delCacheObj(storage driver.Driver, path string, obj model.Obj) {
key := Key(storage, path)
objs, ok := listCache.Get(key)
if ok {
for i, oldObj := range objs {
if oldObj.GetName() == obj.GetName() {
objs = append(objs[:i], objs[i+1:]...)
break
}
}
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
}
}
var addSortDebounceMap generic_sync.MapOf[string, func(func())]
func addCacheObj(storage driver.Driver, path string, newObj model.Obj) {
key := Key(storage, path)
objs, ok := listCache.Get(key)
if ok {
for i, obj := range objs {
if obj.GetName() == newObj.GetName() {
objs[i] = newObj
return
}
}
// Simple separation of files and folders
if len(objs) > 0 && objs[len(objs)-1].IsDir() == newObj.IsDir() {
objs = append(objs, newObj)
} else {
objs = append([]model.Obj{newObj}, objs...)
}
if storage.Config().LocalSort {
debounce, _ := addSortDebounceMap.LoadOrStore(key, utils.NewDebounce(time.Minute))
log.Debug("addCacheObj: wait start sort")
debounce(func() {
log.Debug("addCacheObj: start sort")
model.SortFiles(objs, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
addSortDebounceMap.Delete(key)
})
}
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
}
}
func ClearCache(storage driver.Driver, path string) {
objs, ok := listCache.Get(Key(storage, path))
if ok {
for _, obj := range objs {
if obj.IsDir() {
ClearCache(storage, stdpath.Join(path, obj.GetName()))
}
}
}
listCache.Del(Key(storage, path))
}
func DeleteCache(storage driver.Driver, path string) {
listCache.Del(Key(storage, path))
}
func Key(storage driver.Driver, path string) string {
return stdpath.Join(storage.GetStorage().MountPath, utils.FixAndCleanPath(path))
}
// List files in storage, not contains virtual file
func List(ctx context.Context, storage driver.Driver, path string, args model.ListArgs) ([]model.Obj, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
@@ -27,12 +122,11 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
log.Debugf("op.List %s", path)
key := Key(storage, path)
if !args.Refresh {
if dirCache, exists := Cache.dirCache.Get(key); exists {
if files, ok := listCache.Get(key); ok {
log.Debugf("use cache when list %s", path)
return dirCache.GetSortedObjects(storage), nil
return files, nil
}
}
dir, err := GetUnwrap(ctx, storage, path)
if err != nil {
return nil, errors.WithMessage(err, "failed get dir")
@@ -41,7 +135,6 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
if !dir.IsDir() {
return nil, errors.WithStack(errs.NotFolder)
}
objs, err, _ := listG.Do(key, func() ([]model.Obj, error) {
files, err := storage.List(ctx, dir, args)
if err != nil {
@@ -69,11 +162,10 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
if !storage.Config().NoCache {
if len(files) > 0 {
log.Debugf("set cache: %s => %+v", key, files)
ttl := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
Cache.dirCache.SetWithTTL(key, newDirectoryCache(files), ttl)
listCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
} else {
log.Debugf("del cache: %s", key)
Cache.deleteDirectoryTree(key)
listCache.Del(key)
}
}
return files, nil
@@ -160,68 +252,100 @@ func GetUnwrap(ctx context.Context, storage driver.Driver, path string) (model.O
return model.UnwrapObj(obj), err
}
var linkG = singleflight.Group[*objWithLink]{}
var linkCache = cache.NewMemCache(cache.WithShards[*model.Link](16))
var linkG = singleflight.Group[*model.Link]{Remember: true}
var errLinkMFileCache = stderrors.New("ErrLinkMFileCache")
// Link get link, if is an url. should have an expiry time
func Link(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (*model.Link, model.Obj, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
mode := storage.Config().LinkCacheMode
if mode == -1 {
mode = storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(path)
}
typeKey := args.Type
if mode&driver.LinkCacheIP == 1 {
typeKey += "/" + args.IP
}
if mode&driver.LinkCacheUA == 1 {
typeKey += "/" + args.Header.Get("User-Agent")
}
key := Key(storage, path)
if ol, exists := Cache.linkCache.GetType(key, typeKey); exists {
if ol.link.Expiration != nil ||
ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
return ol.link, ol.obj, nil
var (
file model.Obj
err error
)
// use cache directly
dir, name := stdpath.Split(stdpath.Join(storage.GetStorage().MountPath, path))
if cacheFiles, ok := listCache.Get(strings.TrimSuffix(dir, "/")); ok {
for _, f := range cacheFiles {
if f.GetName() == name {
file = model.UnwrapObj(f)
break
}
}
} else {
if g, ok := storage.(driver.GetObjInfo); ok {
file, err = g.GetObjInfo(ctx, path)
} else {
file, err = GetUnwrap(ctx, storage, path)
}
}
fn := func() (*objWithLink, error) {
file, err := GetUnwrap(ctx, storage, path)
if file == nil {
if err != nil {
return nil, errors.WithMessage(err, "failed to get file")
}
if file.IsDir() {
return nil, errors.WithStack(errs.NotFile)
return nil, nil, errors.WithMessage(err, "failed to get file")
}
return nil, nil, errors.WithStack(errs.ObjectNotFound)
}
if file.IsDir() {
return nil, nil, errors.WithStack(errs.NotFile)
}
key := stdpath.Join(Key(storage, path), args.Type)
if link, ok := linkCache.Get(key); ok {
return link, file, nil
}
var forget any
var linkM *model.Link
fn := func() (*model.Link, error) {
link, err := storage.Link(ctx, file, args)
if err != nil {
return nil, errors.Wrapf(err, "failed get link")
}
ol := &objWithLink{link: link, obj: file}
if link.Expiration != nil {
Cache.linkCache.SetTypeWithTTL(key, typeKey, ol, *link.Expiration)
} else {
Cache.linkCache.SetTypeWithExpirable(key, typeKey, ol, &link.SyncClosers)
if link.MFile != nil && forget != nil {
linkM = link
return nil, errLinkMFileCache
}
return ol, nil
if link.Expiration != nil {
linkCache.Set(key, link, cache.WithEx[*model.Link](*link.Expiration))
}
link.AddIfCloser(forget)
return link, nil
}
retry := 0
for {
ol, err, _ := linkG.Do(key+"/"+typeKey, fn)
if storage.Config().OnlyLinkMFile {
link, err := fn()
if err != nil {
return nil, nil, err
}
if ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
if retry > 1 {
log.Warnf("Link retry successed after %d times: %s %s", retry, key, typeKey)
}
return ol.link, ol.obj, nil
}
retry++
return link, file, err
}
forget = utils.CloseFunc(func() error {
if forget != nil {
forget = nil
linkG.Forget(key)
}
return nil
})
link, err, _ := linkG.Do(key, fn)
for err == nil && !link.AcquireReference() {
link, err, _ = linkG.Do(key, fn)
}
if err == errLinkMFileCache {
if linkM != nil {
return linkM, file, nil
}
forget = nil
link, err = fn()
}
if err != nil {
return nil, nil, err
}
return link, file, nil
}
// Other api
@@ -241,7 +365,7 @@ func Other(ctx context.Context, storage driver.Driver, args model.FsOtherArgs) (
}
}
var mkdirG singleflight.Group[any]
var mkdirG singleflight.Group[interface{}]
func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache ...bool) error {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
@@ -249,7 +373,7 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache
}
path = utils.FixAndCleanPath(path)
key := Key(storage, path)
_, err, _ := mkdirG.Do(key, func() (any, error) {
_, err, _ := mkdirG.Do(key, func() (interface{}, error) {
// check if dir exists
f, err := GetUnwrap(ctx, storage, path)
if err != nil {
@@ -271,19 +395,15 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache
newObj, err = s.MakeDir(ctx, parentDir, dirName)
if err == nil {
if newObj != nil {
if !storage.Config().NoCache {
if dirCache, exist := Cache.dirCache.Get(Key(storage, parentPath)); exist {
dirCache.UpdateObject("", newObj)
}
}
addCacheObj(storage, parentPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, parentPath)
DeleteCache(storage, parentPath)
}
}
case driver.Mkdir:
err = s.MakeDir(ctx, parentDir, dirName)
if err == nil && !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, parentPath)
DeleteCache(storage, parentPath)
}
default:
return nil, errs.NotImplement
@@ -307,11 +427,7 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
srcPath = utils.FixAndCleanPath(srcPath)
srcDirPath := stdpath.Dir(srcPath)
dstDirPath = utils.FixAndCleanPath(dstDirPath)
if dstDirPath == srcDirPath {
return stderrors.New("move in place")
}
srcRawObj, err := Get(ctx, storage, srcPath)
if err != nil {
return errors.WithMessage(err, "failed to get src object")
@@ -321,25 +437,26 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
if err != nil {
return errors.WithMessage(err, "failed to get dst dir")
}
srcDirPath := stdpath.Dir(srcPath)
switch s := storage.(type) {
case driver.MoveResult:
var newObj model.Obj
newObj, err = s.Move(ctx, srcObj, dstDir)
if err == nil {
Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
delCacheObj(storage, srcDirPath, srcRawObj)
if newObj != nil {
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
DeleteCache(storage, dstDirPath)
}
}
case driver.Move:
err = s.Move(ctx, srcObj, dstDir)
if err == nil {
Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
delCacheObj(storage, srcDirPath, srcRawObj)
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
DeleteCache(storage, dstDirPath)
}
}
default:
@@ -358,29 +475,28 @@ func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string,
return errors.WithMessage(err, "failed to get src object")
}
srcObj := model.UnwrapObj(srcRawObj)
srcDirPath := stdpath.Dir(srcPath)
switch s := storage.(type) {
case driver.RenameResult:
var newObj model.Obj
newObj, err = s.Rename(ctx, srcObj, dstName)
if err == nil {
srcDirPath := stdpath.Dir(srcPath)
if newObj != nil {
Cache.updateDirectoryObject(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj))
} else {
Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, srcDirPath)
updateCacheObj(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) {
DeleteCache(storage, srcDirPath)
if srcRawObj.IsDir() {
ClearCache(storage, srcPath)
}
}
}
case driver.Rename:
err = s.Rename(ctx, srcObj, dstName)
if err == nil {
srcDirPath := stdpath.Dir(srcPath)
Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, srcDirPath)
if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, srcDirPath)
if srcRawObj.IsDir() {
ClearCache(storage, srcPath)
}
}
default:
@@ -396,14 +512,10 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
}
srcPath = utils.FixAndCleanPath(srcPath)
dstDirPath = utils.FixAndCleanPath(dstDirPath)
if dstDirPath == stdpath.Dir(srcPath) {
return stderrors.New("copy in place")
}
srcRawObj, err := Get(ctx, storage, srcPath)
srcObj, err := GetUnwrap(ctx, storage, srcPath)
if err != nil {
return errors.WithMessage(err, "failed to get src object")
}
srcObj := model.UnwrapObj(srcRawObj)
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
if err != nil {
return errors.WithMessage(err, "failed to get dst dir")
@@ -415,17 +527,15 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
newObj, err = s.Copy(ctx, srcObj, dstDir)
if err == nil {
if newObj != nil {
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
DeleteCache(storage, dstDirPath)
}
}
case driver.Copy:
err = s.Copy(ctx, srcObj, dstDir)
if err == nil {
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
}
if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
}
default:
return errs.NotImplement
@@ -456,7 +566,11 @@ func Remove(ctx context.Context, storage driver.Driver, path string) error {
case driver.Remove:
err = s.Remove(ctx, model.UnwrapObj(rawObj))
if err == nil {
Cache.removeDirectoryObject(storage, dirPath, rawObj)
delCacheObj(storage, dirPath, rawObj)
// clear folder cache recursively
if rawObj.IsDir() {
ClearCache(storage, path)
}
}
default:
return errs.NotImplement
@@ -526,20 +640,16 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
var newObj model.Obj
newObj, err = s.Put(ctx, parentDir, file, up)
if err == nil {
Cache.linkCache.DeleteKey(Key(storage, dstPath))
if newObj != nil {
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
DeleteCache(storage, dstDirPath)
}
}
case driver.Put:
err = s.Put(ctx, parentDir, file, up)
if err == nil {
Cache.linkCache.DeleteKey(Key(storage, dstPath))
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
}
if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
}
default:
return errs.NotImplement
@@ -554,7 +664,13 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
}
} else {
// upload success, remove old obj
err = Remove(ctx, storage, tempPath)
err := Remove(ctx, storage, tempPath)
if err != nil {
return err
} else {
key := Key(storage, stdpath.Join(dstDirPath, file.GetName()))
linkCache.Del(key)
}
}
}
return errors.WithStack(err)
@@ -565,8 +681,7 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
dstDirPath = utils.FixAndCleanPath(dstDirPath)
dstPath := stdpath.Join(dstDirPath, dstName)
_, err := GetUnwrap(ctx, storage, dstPath)
_, err := GetUnwrap(ctx, storage, stdpath.Join(dstDirPath, dstName))
if err == nil {
return errors.New("obj already exists")
}
@@ -583,20 +698,16 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
var newObj model.Obj
newObj, err = s.PutURL(ctx, dstDir, dstName, url)
if err == nil {
Cache.linkCache.DeleteKey(Key(storage, dstPath))
if newObj != nil {
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
} else if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
DeleteCache(storage, dstDirPath)
}
}
case driver.PutURL:
err = s.PutURL(ctx, dstDir, dstName, url)
if err == nil {
Cache.linkCache.DeleteKey(Key(storage, dstPath))
if !utils.IsBool(lazyCache...) {
Cache.DeleteDirectory(storage, dstDirPath)
}
if err == nil && !utils.IsBool(lazyCache...) {
DeleteCache(storage, dstDirPath)
}
default:
return errs.NotImplement

View File

@@ -5,21 +5,26 @@ import (
"sort"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/db"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/go-cache"
"github.com/pkg/errors"
)
var settingCache = cache.NewMemCache(cache.WithShards[*model.SettingItem](4))
var settingG singleflight.Group[*model.SettingItem]
var settingCacheF = func(item *model.SettingItem) {
Cache.SetSetting(item.Key, item)
settingCache.Set(item.Key, item, cache.WithEx[*model.SettingItem](time.Hour))
}
var settingGroupCache = cache.NewMemCache(cache.WithShards[[]model.SettingItem](4))
var settingGroupG singleflight.Group[[]model.SettingItem]
var settingGroupCacheF = func(key string, items []model.SettingItem) {
Cache.SetSettingGroup(key, items)
var settingGroupCacheF = func(key string, item []model.SettingItem) {
settingGroupCache.Set(key, item, cache.WithEx[[]model.SettingItem](time.Hour))
}
var settingChangingCallbacks = make([]func(), 0)
@@ -29,7 +34,8 @@ func RegisterSettingChangingCallback(f func()) {
}
func SettingCacheUpdate() {
Cache.ClearAll()
settingCache.Clear()
settingGroupCache.Clear()
for _, cb := range settingChangingCallbacks {
cb()
}
@@ -54,7 +60,7 @@ func GetSettingsMap() map[string]string {
}
func GetSettingItems() ([]model.SettingItem, error) {
if items, exists := Cache.GetSettingGroup("ALL_SETTING_ITEMS"); exists {
if items, ok := settingGroupCache.Get("ALL_SETTING_ITEMS"); ok {
return items, nil
}
items, err, _ := settingGroupG.Do("ALL_SETTING_ITEMS", func() ([]model.SettingItem, error) {
@@ -69,7 +75,7 @@ func GetSettingItems() ([]model.SettingItem, error) {
}
func GetPublicSettingItems() ([]model.SettingItem, error) {
if items, exists := Cache.GetSettingGroup("ALL_PUBLIC_SETTING_ITEMS"); exists {
if items, ok := settingGroupCache.Get("ALL_PUBLIC_SETTING_ITEMS"); ok {
return items, nil
}
items, err, _ := settingGroupG.Do("ALL_PUBLIC_SETTING_ITEMS", func() ([]model.SettingItem, error) {
@@ -84,7 +90,7 @@ func GetPublicSettingItems() ([]model.SettingItem, error) {
}
func GetSettingItemByKey(key string) (*model.SettingItem, error) {
if item, exists := Cache.GetSetting(key); exists {
if item, ok := settingCache.Get(key); ok {
return item, nil
}
@@ -112,8 +118,8 @@ func GetSettingItemInKeys(keys []string) ([]model.SettingItem, error) {
}
func GetSettingItemsByGroup(group int) ([]model.SettingItem, error) {
key := fmt.Sprintf("GROUP_%d", group)
if items, exists := Cache.GetSettingGroup(key); exists {
key := strconv.Itoa(group)
if items, ok := settingGroupCache.Get(key); ok {
return items, nil
}
items, err, _ := settingGroupG.Do(key, func() ([]model.SettingItem, error) {
@@ -129,14 +135,11 @@ func GetSettingItemsByGroup(group int) ([]model.SettingItem, error) {
func GetSettingItemsInGroups(groups []int) ([]model.SettingItem, error) {
sort.Ints(groups)
key := strings.Join(utils.MustSliceConvert(groups, func(i int) string {
return strconv.Itoa(i)
}), ",")
keyParts := make([]string, 0, len(groups))
for _, g := range groups {
keyParts = append(keyParts, strconv.Itoa(g))
}
key := "GROUPS_" + strings.Join(keyParts, "_")
if items, exists := Cache.GetSettingGroup(key); exists {
if items, ok := settingGroupCache.Get(key); ok {
return items, nil
}
items, err, _ := settingGroupG.Do(key, func() ([]model.SettingItem, error) {
@@ -162,10 +165,10 @@ func SaveSettingItems(items []model.SettingItem) error {
}
}
err := db.SaveSettingItems(items)
if err != nil {
if err != nil {
return fmt.Errorf("failed save setting: %+v", err)
}
SettingCacheUpdate()
SettingCacheUpdate()
return nil
}

View File

@@ -137,7 +137,3 @@ func DeleteSharing(sid string) error {
sharingCache.Del(sid)
return db.DeleteSharingById(sid)
}
func DeleteSharingsByCreatorId(creatorId uint) error {
return db.DeleteSharingsByCreatorId(creatorId)
}

View File

@@ -15,7 +15,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/generic_sync"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
@@ -240,8 +239,6 @@ func UpdateStorage(ctx context.Context, storage model.Storage) error {
if oldStorage.MountPath != storage.MountPath {
// mount path renamed, need to drop the storage
storagesMap.Delete(oldStorage.MountPath)
Cache.DeleteDirectoryTree(storageDriver, "/")
Cache.InvalidateStorageDetails(storageDriver)
}
if err != nil {
return errors.WithMessage(err, "failed get storage driver")
@@ -262,7 +259,6 @@ func DeleteStorageById(ctx context.Context, id uint) error {
if err != nil {
return errors.WithMessage(err, "failed get storage")
}
var dropErr error = nil
if !storage.Disabled {
storageDriver, err := GetStorageByMountPath(storage.MountPath)
if err != nil {
@@ -270,19 +266,17 @@ func DeleteStorageById(ctx context.Context, id uint) error {
}
// drop the storage in the driver
if err := storageDriver.Drop(ctx); err != nil {
dropErr = errors.Wrapf(err, "failed drop storage")
return errors.Wrapf(err, "failed drop storage")
}
// delete the storage in the memory
storagesMap.Delete(storage.MountPath)
Cache.DeleteDirectoryTree(storageDriver, "/")
Cache.InvalidateStorageDetails(storageDriver)
go callStorageHooks("del", storageDriver)
}
// delete the storage in the database
if err := db.DeleteStorageById(id); err != nil {
return errors.WithMessage(err, "failed delete storage in database")
}
return dropErr
return nil
}
// MustSaveDriverStorage call from specific driver
@@ -346,8 +340,8 @@ func GetStorageVirtualFilesByPath(prefix string) []model.Obj {
})
}
func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails, refresh bool) []model.Obj {
if hideDetails {
func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails ...bool) []model.Obj {
if utils.IsBool(hideDetails...) {
return GetStorageVirtualFilesByPath(prefix)
}
return getStorageVirtualFilesByPath(prefix, func(d driver.Driver, obj model.Obj) model.Obj {
@@ -360,7 +354,7 @@ func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string,
}
timeoutCtx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
details, err := GetStorageDetails(timeoutCtx, d, refresh)
details, err := GetStorageDetails(timeoutCtx, d)
if err != nil {
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err)
@@ -445,9 +439,7 @@ func GetBalancedStorage(path string) driver.Driver {
}
}
var detailsG singleflight.Group[*model.StorageDetails]
func GetStorageDetails(ctx context.Context, storage driver.Driver, refresh ...bool) (*model.StorageDetails, error) {
func GetStorageDetails(ctx context.Context, storage driver.Driver) (*model.StorageDetails, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
@@ -455,18 +447,5 @@ func GetStorageDetails(ctx context.Context, storage driver.Driver, refresh ...bo
if !ok {
return nil, errs.NotImplement
}
if !utils.IsBool(refresh...) {
if ret, ok := Cache.GetStorageDetails(storage); ok {
return ret, nil
}
}
details, err, _ := detailsG.Do(storage.GetStorage().MountPath, func() (*model.StorageDetails, error) {
ret, err := wd.GetDetails(ctx)
if err != nil {
return nil, err
}
Cache.SetStorageDetails(storage, ret)
return ret, nil
})
return details, err
return wd.GetDetails(ctx)
}

View File

@@ -1,14 +1,17 @@
package op
import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/db"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/pkg/errors"
"github.com/OpenListTeam/go-cache"
)
var userCache = cache.NewMemCache(cache.WithShards[*model.User](2))
var userG singleflight.Group[*model.User]
var guestUser *model.User
var adminUser *model.User
@@ -43,7 +46,7 @@ func GetUserByName(username string) (*model.User, error) {
if username == "" {
return nil, errs.EmptyUsername
}
if user, exists := Cache.GetUser(username); exists {
if user, ok := userCache.Get(username); ok {
return user, nil
}
user, err, _ := userG.Do(username, func() (*model.User, error) {
@@ -51,7 +54,7 @@ func GetUserByName(username string) (*model.User, error) {
if err != nil {
return nil, err
}
Cache.SetUser(username, _user)
userCache.Set(username, _user, cache.WithEx[*model.User](time.Hour))
return _user, nil
})
return user, err
@@ -78,10 +81,7 @@ func DeleteUserById(id uint) error {
if old.IsAdmin() || old.IsGuest() {
return errs.DeleteAdminOrGuest
}
Cache.DeleteUser(old.Username)
if err := DeleteSharingsByCreatorId(id); err != nil {
return errors.WithMessage(err, "failed to delete user's sharings")
}
userCache.Del(old.Username)
return db.DeleteUserById(id)
}
@@ -96,7 +96,7 @@ func UpdateUser(u *model.User) error {
if u.IsGuest() {
guestUser = nil
}
Cache.DeleteUser(old.Username)
userCache.Del(old.Username)
u.BasePath = utils.FixAndCleanPath(u.BasePath)
return db.UpdateUser(u)
}
@@ -125,6 +125,6 @@ func DelUserCache(username string) error {
if user.IsGuest() {
guestUser = nil
}
Cache.DeleteUser(username)
userCache.Del(username)
return nil
}

View File

@@ -11,7 +11,6 @@ import (
"os"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/net"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
@@ -28,6 +27,9 @@ func (f RangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Ran
}
func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) {
if link.MFile != nil {
return GetRangeReaderFromMFile(size, link.MFile), nil
}
if link.Concurrency > 0 || link.PartSize > 0 {
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency
@@ -64,7 +66,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
}
if len(link.URL) == 0 {
return nil, errors.New("invalid link: must have at least one of URL or RangeReader")
return nil, errors.New("invalid link: must have at least one of MFile, URL, or RangeReader")
}
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > size {
@@ -76,7 +78,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
response, err := net.RequestHttp(ctx, "GET", header, link.URL)
if err != nil {
if _, ok := errs.UnwrapOrSelf(err).(net.HttpStatusCodeError); ok {
if _, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
return nil, err
}
return nil, fmt.Errorf("http request failure, err:%w", err)

View File

@@ -14,7 +14,7 @@ type TaskExtension struct {
Creator *model.User
startTime *time.Time
endTime *time.Time
TotalBytes int64
totalBytes int64
ApiUrl string
}
@@ -58,11 +58,11 @@ func (t *TaskExtension) ClearEndTime() {
}
func (t *TaskExtension) SetTotalBytes(totalBytes int64) {
t.TotalBytes = totalBytes
t.totalBytes = totalBytes
}
func (t *TaskExtension) GetTotalBytes() int64 {
return t.TotalBytes
return t.totalBytes
}
func (t *TaskExtension) ReinitCtx() error {

View File

@@ -24,15 +24,16 @@ func RefreshAndRemove(dstPath string, payloads ...any) {
return
}
_, dstNeedRefresh := dstStorage.(driver.Put)
dstNeedRefresh = dstNeedRefresh && !dstStorage.Config().NoCache
if dstNeedRefresh {
op.Cache.DeleteDirectory(dstStorage, dstActualPath)
op.DeleteCache(dstStorage, dstActualPath)
}
var ctx context.Context
for _, payload := range payloads {
switch p := payload.(type) {
case DstPathToRefresh:
if dstNeedRefresh {
op.Cache.DeleteDirectory(dstStorage, string(p))
op.DeleteCache(dstStorage, string(p))
}
case SrcPathToRemove:
if ctx == nil {
@@ -78,7 +79,7 @@ func verifyAndRemove(ctx context.Context, srcStorage, dstStorage driver.Driver,
}
if refresh {
op.Cache.DeleteDirectory(dstStorage, dstObjPath)
op.DeleteCache(dstStorage, dstObjPath)
}
hasErr := false
for _, obj := range srcObjs {

View File

@@ -1,8 +1,8 @@
package gowebdav
import (
"fmt"
"os"
"strconv"
)
// StatusError implements error and wraps
@@ -12,7 +12,7 @@ type StatusError struct {
}
func (se StatusError) Error() string {
return strconv.Itoa(se.Status)
return fmt.Sprintf("%d", se.Status)
}
// IsErrCode returns true if the given error

View File

@@ -73,6 +73,9 @@ type call[T any] struct {
type Group[T any] struct {
mu sync.Mutex // protects m
m map[string]*call[T] // lazily initialized
// Won't remember error
Remember bool
}
// Result holds the results of Do, so they can be passed
@@ -156,7 +159,7 @@ func (g *Group[T]) doCall(c *call[T], key string, fn func() (T, error)) {
g.mu.Lock()
defer g.mu.Unlock()
c.wg.Done()
if g.m[key] == c {
if (!g.Remember || c.err != nil) && g.m[key] == c {
delete(g.m, key)
}

View File

@@ -187,39 +187,52 @@ func NewClosers(c ...io.Closer) Closers {
return Closers(c)
}
type SyncClosersIF interface {
ClosersIF
AcquireReference() bool
}
type SyncClosers struct {
closers []io.Closer
ref int32
}
// if closed, return false
var _ SyncClosersIF = (*SyncClosers)(nil)
func (c *SyncClosers) AcquireReference() bool {
ref := atomic.AddInt32(&c.ref, 1)
if ref > 0 {
// log.Debugf("AcquireReference %p: %d", c, ref)
return true
}
atomic.StoreInt32(&c.ref, closersClosed)
return false
}
const closersClosed = math.MinInt32
func (c *SyncClosers) Close() error {
for {
ref := atomic.LoadInt32(&c.ref)
if ref < 0 {
return nil
return false
}
if ref > 1 {
if atomic.CompareAndSwapInt32(&c.ref, ref, ref-1) {
// log.Debugf("ReleaseReference %p: %d", c, ref)
return nil
}
} else if atomic.CompareAndSwapInt32(&c.ref, ref, closersClosed) {
break
newRef := ref + 1
if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) {
// log.Debugf("AcquireReference %p: %d", c, newRef)
return true
}
}
}
const closersClosed = math.MinInt16
func (c *SyncClosers) Close() error {
ref := atomic.AddInt32(&c.ref, -1)
if ref > 0 {
// log.Debugf("ReleaseReference %p: %d", c, ref)
return nil
}
if ref < -1 {
atomic.StoreInt32(&c.ref, closersClosed)
return nil
}
// Attempt to acquire FinalClose permission.
// At this point, ref must be 0 or -1. We try to atomically change it to the closersClosed state.
// Only the first successful goroutine gets the cleanup permission.
if !atomic.CompareAndSwapInt32(&c.ref, ref, closersClosed) {
return nil
}
// log.Debugf("FinalClose %p", c)
var errs []error
@@ -251,16 +264,6 @@ func (c *SyncClosers) AddIfCloser(a any) {
}
}
var _ ClosersIF = (*SyncClosers)(nil)
// 实现cache.Expirable接口
func (c *SyncClosers) Expired() bool {
return atomic.LoadInt32(&c.ref) < 0
}
func (c *SyncClosers) Length() int {
return len(c.closers)
}
func NewSyncClosers(c ...io.Closer) SyncClosers {
return SyncClosers{closers: c}
}

View File

@@ -18,14 +18,18 @@ import (
)
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
// if link.MFile != nil {
// attachHeader(w, file, link)
// http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile)
// return nil
// }
// check if query param "inline_preview" is true
inlinePreview := r.URL.Query().Get("inline_preview") == "true"
if link.MFile != nil {
attachHeader(w, file, link, inlinePreview)
http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile)
return nil
}
if link.Concurrency > 0 || link.PartSize > 0 {
attachHeader(w, file, link)
attachHeader(w, file, link, inlinePreview)
size := link.ContentLength
if size <= 0 {
size = file.GetSize()
@@ -40,7 +44,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
}
if link.RangeReader != nil {
attachHeader(w, file, link)
attachHeader(w, file, link, inlinePreview)
size := link.ContentLength
if size <= 0 {
size = file.GetSize()
@@ -70,9 +74,11 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
})
return err
}
func attachHeader(w http.ResponseWriter, file model.Obj, link *model.Link) {
func attachHeader(w http.ResponseWriter, file model.Obj, link *model.Link, inlinePreview bool) {
fileName := file.GetName()
w.Header().Set("Content-Disposition", utils.GenerateContentDisposition(fileName))
if !inlinePreview {
w.Header().Set("Content-Disposition", utils.GenerateContentDisposition(fileName))
}
w.Header().Set("Content-Type", utils.GetMimeType(fileName))
size := link.ContentLength
if size <= 0 {
@@ -101,7 +107,7 @@ func GetEtag(file model.Obj, size int64) string {
}
func ProxyRange(ctx context.Context, link *model.Link, size int64) *model.Link {
if link.RangeReader == nil && !strings.HasPrefix(link.URL, GetApiUrl(ctx)+"/") {
if link.MFile == nil && link.RangeReader == nil && !strings.HasPrefix(link.URL, GetApiUrl(ctx)+"/") {
if link.ContentLength > 0 {
size = link.ContentLength
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/net"
@@ -148,7 +147,7 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) {
if Writer.IsWritten() {
log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err)
} else {
if statusCode, ok := errs.UnwrapOrSelf(err).(net.HttpStatusCodeError); ok {
if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
common.ErrorPage(c, err, int(statusCode), true)
} else {
common.ErrorPage(c, err, 500, true)

View File

@@ -386,7 +386,7 @@ func Link(c *gin.Context) {
common.ErrorResp(c, err, 500)
return
}
if storage.Config().NoLinkURL {
if storage.Config().NoLinkURL || storage.Config().OnlyLinkMFile {
common.SuccessResp(c, model.Link{
URL: fmt.Sprintf("%s/p%s?d&sign=%s",
common.GetApiUrl(c),

View File

@@ -175,7 +175,7 @@ func LoadAllStorages(c *gin.Context) {
common.ErrorResp(c, err, 500, true)
return
}
conf.ResetStoragesLoadSignal()
conf.StoragesLoaded = false
go func(storages []model.Storage) {
for _, storage := range storages {
storageDriver, err := op.GetStorageByMountPath(storage.MountPath)
@@ -195,7 +195,7 @@ func LoadAllStorages(c *gin.Context) {
log.Infof("success load storage: [%s], driver: [%s]",
storage.MountPath, storage.Driver)
}
conf.SendStoragesLoadedSignal()
conf.StoragesLoaded = true
}(storages)
common.SuccessResp(c)
}

View File

@@ -22,12 +22,9 @@ func StoragesLoaded(c *gin.Context) {
return
}
}
select {
case <-conf.StoragesLoadSignal():
case <-c.Request.Context().Done():
c.Abort()
return
}
common.ErrorStrResp(c, "Loading storage, please wait", 500)
c.Abort()
return
}
common.GinWithValue(c,
conf.ApiUrlKey, common.GetApiUrlFromRequest(c.Request),

View File

@@ -272,7 +272,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
}
err = common.Proxy(w, r, link, fi)
if err != nil {
if statusCode, ok := errs.UnwrapOrSelf(err).(net.HttpStatusCodeError); ok {
if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok {
return int(statusCode), err
}
return http.StatusInternalServerError, fmt.Errorf("webdav proxy error: %+v", err)