Compare commits

..

1 Commits

Author SHA1 Message Date
renovate[bot]
43a9764caa fix(deps): update module gorm.io/driver/postgres to v1.6.0 2025-10-06 17:15:48 +00:00
165 changed files with 1317 additions and 5142 deletions

View File

@@ -13,7 +13,7 @@ body:
attributes:
label: 请确认以下事项
description: |
您必须确认、同意并勾选以下内容,否则您的问题一定会被直接关闭。
您必须勾选以下内容,否则您的问题可能会被直接关闭。
或者您可以去[讨论区](https://github.com/OpenListTeam/OpenList/discussions)。
options:
- label: |
@@ -59,14 +59,6 @@ body:
label: 问题描述(必填)
validations:
required: true
- type: textarea
id: logs
attributes:
label: 日志(必填)
description: |
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
validations:
required: true
- type: textarea
id: config
attributes:
@@ -75,6 +67,12 @@ body:
请提供您的`OpenList`应用的配置文件,并截图相关存储配置。(可隐藏隐私字段)
validations:
required: true
- type: textarea
id: logs
attributes:
label: 日志(可选)
description: |
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
- type: textarea
id: reproduction
attributes:

View File

@@ -13,7 +13,7 @@ body:
attributes:
label: Please confirm the following
description: |
You must confirm, agree, and check all the following, otherwise your issue will definitely be closed directly.
You must check all the following, otherwise your issue may be closed directly.
Or you can go to the [discussions](https://github.com/OpenListTeam/OpenList/discussions).
options:
- label: |
@@ -59,14 +59,6 @@ body:
label: Bug Description (required)
validations:
required: true
- type: textarea
id: logs
attributes:
label: Logs (required)
description: |
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
validations:
required: true
- type: textarea
id: config
attributes:
@@ -75,6 +67,12 @@ body:
Please provide your `OpenList` application's configuration file and a screenshot of the relevant storage configuration. (You may mask sensitive fields)
validations:
required: true
- type: textarea
id: logs
attributes:
label: Logs (optional)
description: |
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
- type: textarea
id: reproduction
attributes:

View File

@@ -64,9 +64,8 @@ Thank you for your support and understanding of the OpenList project.
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
- [x] Teambition([China](https://www.teambition.com), [International](https://us.teambition.com))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [Mediatrack](https://www.mediatrack.cn)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [MediaFire](https://www.mediafire.com)
- [x] [139yun](https://yun.139.com) (Personal, Family, Group)
- [x] [YandexDisk](https://disk.yandex.com)
- [x] [BaiduNetdisk](http://pan.baidu.com)

View File

@@ -64,9 +64,8 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
- [x] [又拍云对象存储](https://www.upyun.com/products/file-storage)
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
- [x] Teambition([中国](https://www.teambition.com), [国际](https://us.teambition.com))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [分秒帧](https://www.mediatrack.cn)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [MediaFire](https://www.mediafire.com)
- [x] [和彩云](https://yun.139.com)(个人、家庭、群组)
- [x] [YandexDisk](https://disk.yandex.com)
- [x] [百度网盘](http://pan.baidu.com)

View File

@@ -65,7 +65,6 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
- [x] Teambition([中国](https://www.teambition.com), [国際](https://us.teambition.com))
- [x] [Mediatrack](https://www.mediatrack.cn)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [139yun](https://yun.139.com)(個人、家族、グループ)
- [x] [YandexDisk](https://disk.yandex.com)
- [x] [BaiduNetdisk](http://pan.baidu.com)

View File

@@ -66,7 +66,6 @@ Dank u voor uw ondersteuning en begrip
- [x] Teambition([China](https://www.teambition.com), [Internationaal](https://us.teambition.com))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [Mediatrack](https://www.mediatrack.cn)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [139yun](https://yun.139.com) (Persoonlijk, Familie, Groep)
- [x] [YandexDisk](https://disk.yandex.com)
- [x] [BaiduNetdisk](http://pan.baidu.com)

View File

@@ -2,7 +2,6 @@ package flags
var (
DataDir string
ConfigPath string
Debug bool
NoPrefix bool
Dev bool

View File

@@ -27,8 +27,7 @@ func Execute() {
}
func init() {
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data directory (relative paths are resolved against the current working directory)")
RootCmd.PersistentFlags().StringVar(&flags.ConfigPath, "config", "", "path to config.json (relative to current working directory; defaults to [data directory]/config.json, where [data directory] is set by --data)")
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")

View File

@@ -27,8 +27,6 @@ import (
"github.com/spf13/cobra"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
"github.com/quic-go/quic-go/http3"
)
// ServerCmd represents the server command
@@ -65,7 +63,6 @@ the address is defined in config file`,
httpHandler = h2c.NewHandler(r, &http2.Server{})
}
var httpSrv, httpsSrv, unixSrv *http.Server
var quicSrv *http3.Server
if conf.Conf.Scheme.HttpPort != -1 {
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
fmt.Printf("start HTTP server @ %s\n", httpBase)
@@ -89,24 +86,6 @@ the address is defined in config file`,
utils.Log.Fatalf("failed to start https: %s", err.Error())
}
}()
if conf.Conf.Scheme.EnableH3 {
fmt.Printf("start HTTP3 (quic) server @ %s\n", httpsBase)
utils.Log.Infof("start HTTP3 (quic) server @ %s", httpsBase)
r.Use(func(c *gin.Context) {
if c.Request.TLS != nil {
port := conf.Conf.Scheme.HttpsPort
c.Header("Alt-Svc", fmt.Sprintf("h3=\":%d\"; ma=86400", port))
}
c.Next()
})
quicSrv = &http3.Server{Addr: httpsBase, Handler: r}
go func() {
err := quicSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
if err != nil && !errors.Is(err, http.ErrServerClosed) {
utils.Log.Fatalf("failed to start http3 (quic): %s", err.Error())
}
}()
}
}
if conf.Conf.Scheme.UnixFile != "" {
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
@@ -224,15 +203,6 @@ the address is defined in config file`,
utils.Log.Fatal("HTTPS server shutdown err: ", err)
}
}()
if conf.Conf.Scheme.EnableH3 {
wg.Add(1)
go func() {
defer wg.Done()
if err := quicSrv.Shutdown(ctx); err != nil {
utils.Log.Fatal("HTTP3 (quic) server shutdown err: ", err)
}
}()
}
}
if conf.Conf.Scheme.UnixFile != "" {
wg.Add(1)

View File

@@ -15,9 +15,10 @@ type Addition struct {
}
var config = driver.Config{
Name: "115 Cloud",
DefaultRoot: "0",
LinkCacheMode: driver.LinkCacheUA,
Name: "115 Cloud",
DefaultRoot: "0",
// OnlyProxy: true,
// NoOverwriteUpload: true,
}
func init() {

View File

@@ -131,6 +131,23 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}, nil
}
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.GetFolderInfoByPath(ctx, path)
if err != nil {
return nil, err
}
return &Obj{
Fid: resp.FileID,
Fn: resp.FileName,
Fc: resp.FileCategory,
Sha1: resp.Sha1,
Pc: resp.PickCode,
}, nil
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err

View File

@@ -17,9 +17,8 @@ type Addition struct {
}
var config = driver.Config{
Name: "115 Open",
DefaultRoot: "0",
LinkCacheMode: driver.LinkCacheUA,
Name: "115 Open",
DefaultRoot: "0",
}
func init() {

View File

@@ -41,9 +41,7 @@ func (d *Pan123) GetAddition() driver.Additional {
}
func (d *Pan123) Init(ctx context.Context) error {
_, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
req.SetHeader("platform", "web")
}, nil)
_, err := d.Request(UserInfo, http.MethodGet, nil, nil)
return err
}

View File

@@ -12,8 +12,7 @@ type Addition struct {
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
AccessToken string
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
Platform string `json:"platform" type:"string" default:"web" help:"the platform header value, sent with API requests"`
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
}
var config = driver.Config{
@@ -28,7 +27,6 @@ func init() {
return &Pan123{
Addition: Addition{
UploadThread: 3,
Platform: "web",
},
}
})

View File

@@ -203,7 +203,7 @@ do:
"referer": "https://www.123pan.com/",
"authorization": "Bearer " + d.AccessToken,
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) openlist-client",
"platform": d.Platform,
"platform": "web",
"app-version": "3",
//"user-agent": base.UserAgent,
})

View File

@@ -200,7 +200,10 @@ func (d *Cloud189) GetDetails(ctx context.Context) (*model.StorageDetails, error
return nil, err
}
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(capacityInfo.CloudCapacityInfo.UsedSize, capacityInfo.CloudCapacityInfo.TotalSize),
DiskUsage: model.DiskUsage{
TotalSpace: capacityInfo.CloudCapacityInfo.TotalSize,
FreeSpace: capacityInfo.CloudCapacityInfo.FreeSize,
},
}, nil
}

View File

@@ -72,13 +72,13 @@ type CapacityResp struct {
ResMessage string `json:"res_message"`
Account string `json:"account"`
CloudCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
FreeSize uint64 `json:"freeSize"`
MailUsedSize uint64 `json:"mail189UsedSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"cloudCapacityInfo"`
FamilyCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
FreeSize uint64 `json:"freeSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"familyCapacityInfo"`

View File

@@ -284,15 +284,18 @@ func (y *Cloud189TV) GetDetails(ctx context.Context) (*model.StorageDetails, err
if err != nil {
return nil, err
}
var total, used uint64
var total, free uint64
if y.isFamily() {
total = capacityInfo.FamilyCapacityInfo.TotalSize
used = capacityInfo.FamilyCapacityInfo.UsedSize
free = capacityInfo.FamilyCapacityInfo.FreeSize
} else {
total = capacityInfo.CloudCapacityInfo.TotalSize
used = capacityInfo.CloudCapacityInfo.UsedSize
free = capacityInfo.CloudCapacityInfo.FreeSize
}
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
}

View File

@@ -322,13 +322,13 @@ type CapacityResp struct {
ResMessage string `json:"res_message"`
Account string `json:"account"`
CloudCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
FreeSize uint64 `json:"freeSize"`
MailUsedSize uint64 `json:"mail189UsedSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"cloudCapacityInfo"`
FamilyCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
FreeSize uint64 `json:"freeSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"familyCapacityInfo"`

View File

@@ -416,15 +416,18 @@ func (y *Cloud189PC) GetDetails(ctx context.Context) (*model.StorageDetails, err
if err != nil {
return nil, err
}
var total, used uint64
var total, free uint64
if y.isFamily() {
total = capacityInfo.FamilyCapacityInfo.TotalSize
used = capacityInfo.FamilyCapacityInfo.UsedSize
free = capacityInfo.FamilyCapacityInfo.FreeSize
} else {
total = capacityInfo.CloudCapacityInfo.TotalSize
used = capacityInfo.CloudCapacityInfo.UsedSize
free = capacityInfo.CloudCapacityInfo.FreeSize
}
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
}

View File

@@ -415,13 +415,13 @@ type CapacityResp struct {
ResMessage string `json:"res_message"`
Account string `json:"account"`
CloudCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
FreeSize uint64 `json:"freeSize"`
MailUsedSize uint64 `json:"mail189UsedSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"cloudCapacityInfo"`
FamilyCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
FreeSize uint64 `json:"freeSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"familyCapacityInfo"`

View File

@@ -130,7 +130,7 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
path := dir.GetPath()
if utils.PathEqual(path, "/") && !d.autoFlatten {
return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough, args.Refresh), nil
return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough), nil
}
root, sub := d.getRootAndPath(path)
dsts, ok := d.pathMap[root]
@@ -211,6 +211,9 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if resultLink.ContentLength == 0 {
resultLink.ContentLength = fi.GetSize()
}
if resultLink.MFile != nil {
return &resultLink, nil
}
if d.DownloadConcurrency > 0 {
resultLink.Concurrency = d.DownloadConcurrency
}
@@ -524,25 +527,4 @@ func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj,
}
}
func (d *Alias) ResolveLinkCacheMode(path string) driver.LinkCacheMode {
root, sub := d.getRootAndPath(path)
dsts, ok := d.pathMap[root]
if !ok {
return 0
}
for _, dst := range dsts {
storage, actualPath, err := op.GetStorageAndActualPath(stdpath.Join(dst, sub))
if err != nil {
continue
}
mode := storage.Config().LinkCacheMode
if mode == -1 {
return storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(actualPath)
} else {
return mode
}
}
return 0
}
var _ driver.Driver = (*Alias)(nil)

View File

@@ -26,7 +26,6 @@ var config = driver.Config{
NoUpload: false,
DefaultRoot: "/",
ProxyRangeOption: true,
LinkCacheMode: driver.LinkCacheAuto,
}
func init() {

View File

@@ -5,6 +5,7 @@ import (
"errors"
stdpath "path"
"strings"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
@@ -16,15 +17,9 @@ import (
log "github.com/sirupsen/logrus"
)
type detailWithIndex struct {
idx int
val *model.StorageDetails
}
func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model.Obj {
func (d *Alias) listRoot(ctx context.Context, withDetails bool) []model.Obj {
var objs []model.Obj
detailsChan := make(chan detailWithIndex, len(d.pathMap))
workerCount := 0
var wg sync.WaitGroup
for _, k := range d.rootOrder {
obj := model.Object{
Name: k,
@@ -52,26 +47,22 @@ func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model
DriverName: remoteDriver.Config().Name,
},
}
workerCount++
go func(dri driver.Driver, i int) {
details, e := op.GetStorageDetails(ctx, dri, refresh)
wg.Add(1)
go func() {
defer wg.Done()
c, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
details, e := op.GetStorageDetails(c, remoteDriver)
if e != nil {
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", dri.GetStorage().MountPath, e)
log.Errorf("failed get %s storage details: %+v", remoteDriver.GetStorage().MountPath, e)
}
return
}
detailsChan <- detailWithIndex{idx: i, val: details}
}(remoteDriver, idx)
}
for workerCount > 0 {
select {
case r := <-detailsChan:
objs[r.idx].(*model.ObjStorageDetails).StorageDetails = r.val
workerCount--
case <-time.After(time.Second):
workerCount = 0
}
objs[idx].(*model.ObjStorageDetails).StorageDetails = details
}()
}
wg.Wait()
return objs
}

View File

@@ -299,7 +299,10 @@ func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: total - used,
},
}, nil
}

View File

@@ -36,7 +36,6 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_drive"
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_photo"
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud"
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud_open"
_ "github.com/OpenListTeam/OpenList/v4/drivers/ilanzou"
_ "github.com/OpenListTeam/OpenList/v4/drivers/ipfs_api"
_ "github.com/OpenListTeam/OpenList/v4/drivers/kodbox"
@@ -56,7 +55,6 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/proton_drive"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc_tv"

View File

@@ -5,15 +5,11 @@ import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
stdpath "path"
"strconv"
"strings"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
@@ -22,10 +18,8 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@@ -35,20 +29,8 @@ type BaiduNetdisk struct {
uploadThread int
vipType int // 会员类型0普通用户(4G/4M)、1普通会员(10G/16M)、2超级会员(20G/32M)
upClient *resty.Client // 上传文件使用的http客户端
uploadUrlG singleflight.Group[string]
uploadUrlMu sync.RWMutex
uploadUrlCache map[string]uploadURLCacheEntry
}
type uploadURLCacheEntry struct {
url string
updateTime time.Time
}
var ErrUploadIDExpired = errors.New("uploadid expired")
func (d *BaiduNetdisk) Config() driver.Config {
return config
}
@@ -58,27 +40,19 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional {
}
func (d *BaiduNetdisk) Init(ctx context.Context) error {
d.upClient = base.NewRestyClient().
SetTimeout(UPLOAD_TIMEOUT).
SetRetryCount(UPLOAD_RETRY_COUNT).
SetRetryWaitTime(UPLOAD_RETRY_WAIT_TIME).
SetRetryMaxWaitTime(UPLOAD_RETRY_MAX_WAIT_TIME)
d.uploadUrlCache = make(map[string]uploadURLCacheEntry)
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
if d.uploadThread < 1 {
d.uploadThread, d.UploadThread = 1, "1"
} else if d.uploadThread > 32 {
d.uploadThread, d.UploadThread = 32, "32"
if d.uploadThread < 1 || d.uploadThread > 32 {
d.uploadThread, d.UploadThread = 3, "3"
}
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
d.UploadAPI = UPLOAD_FALLBACK_API
d.UploadAPI = "https://d.pcs.baidu.com"
}
res, err := d.get("/xpan/nas", map[string]string{
"method": "uinfo",
}, nil)
log.Debugf("[baidu_netdisk] get uinfo: %s", string(res))
log.Debugf("[baidu] get uinfo: %s", string(res))
if err != nil {
return err
}
@@ -205,11 +179,6 @@ func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream mo
// **注意**: 截至 2024/04/20 百度云盘 api 接口返回的时间永远是当前时间,而不是文件时间。
// 而实际上云盘存储的时间是文件时间,所以此处需要覆盖时间,保证缓存与云盘的数据一致
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 百度网盘不允许上传空文件
if stream.GetSize() < 1 {
return nil, ErrBaiduEmptyFilesNotAllowed
}
// rapid upload
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
return newObj, nil
@@ -245,6 +214,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
// cal md5 for first 256k data
const SliceSize int64 = 256 * utils.KB
// cal md5
blockList := make([]string, 0, count)
byteSize := sliceSize
fileMd5H := md5.New()
@@ -274,7 +244,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
}
if tmpF != nil {
if written != streamSize {
return nil, errs.NewErr(err, "CreateTempFile failed, size mismatch: %d != %d ", written, streamSize)
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
}
_, err = tmpF.Seek(0, io.SeekStart)
if err != nil {
@@ -288,14 +258,31 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
mtime := stream.ModTime().Unix()
ctime := stream.CreateTime().Unix()
// step.1 尝试读取已保存进度
// step.1 预上传
// 尝试获取之前的进度
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
if !ok {
// 没有进度,走预上传
precreateResp, err = d.precreate(ctx, path, streamSize, blockListStr, contentMd5, sliceMd5, ctime, mtime)
params := map[string]string{
"method": "precreate",
}
form := map[string]string{
"path": path,
"size": strconv.FormatInt(streamSize, 10),
"isdir": "0",
"autoinit": "1",
"rtype": "3",
"block_list": blockListStr,
"content-md5": contentMd5,
"slice-md5": sliceMd5,
}
joinTime(form, ctime, mtime)
log.Debugf("[baidu_netdisk] precreate data: %s", form)
_, err = d.postForm("/xpan/file", params, form, &precreateResp)
if err != nil {
return nil, err
}
log.Debugf("%+v", precreateResp)
if precreateResp.ReturnType == 2 {
// rapid upload, since got md5 match from baidu server
// 修复时间,具体原因见 Put 方法注释的 **注意**
@@ -304,96 +291,45 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
return fileToObj(precreateResp.File), nil
}
}
ensureUploadURL := func() {
if precreateResp.UploadURL != "" {
return
}
precreateResp.UploadURL = d.getUploadUrl(path, precreateResp.Uploadid)
}
ensureUploadURL()
// step.2 上传分片
uploadLoop:
for attempt := 0; attempt < 2; attempt++ {
// 获取上传域名
if precreateResp.UploadURL == "" {
ensureUploadURL()
}
uploadUrl := precreateResp.UploadURL
// 并发上传
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
cacheReaderAt, okReaderAt := cache.(io.ReaderAt)
if !okReaderAt {
return nil, fmt.Errorf("cache object must implement io.ReaderAt interface for upload operations")
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) {
break
}
totalParts := len(precreateResp.BlockList)
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) || partseq < 0 {
continue
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
if partseq+1 == count {
byteSize = lastBlockSize
}
threadG.Go(func(ctx context.Context) error {
params := map[string]string{
"method": "upload",
"access_token": d.AccessToken,
"type": "tmpfile",
"path": path,
"uploadid": precreateResp.Uploadid,
"partseq": strconv.Itoa(partseq),
}
i, partseq := i, partseq
offset, size := int64(partseq)*sliceSize, sliceSize
if partseq+1 == count {
size = lastBlockSize
err := d.uploadSlice(ctx, params, stream.GetName(),
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)))
if err != nil {
return err
}
threadG.Go(func(ctx context.Context) error {
params := map[string]string{
"method": "upload",
"access_token": d.AccessToken,
"type": "tmpfile",
"path": path,
"uploadid": precreateResp.Uploadid,
"partseq": strconv.Itoa(partseq),
}
section := io.NewSectionReader(cacheReaderAt, offset, size)
err := d.uploadSlice(ctx, uploadUrl, params, stream.GetName(), driver.NewLimitedUploadStream(ctx, section))
if err != nil {
return err
}
precreateResp.BlockList[i] = -1
// 当前goroutine还没退出+1才是真正成功的数量
success := threadG.Success() + 1
progress := float64(success) * 100 / float64(totalParts)
up(progress)
return nil
})
}
err = threadG.Wait()
if err == nil {
break uploadLoop
}
// 保存进度(所有错误都会保存)
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
precreateResp.BlockList[i] = -1
return nil
})
}
if err = threadG.Wait(); err != nil {
// 如果属于用户主动取消,则保存上传进度
if errors.Is(err, context.Canceled) {
return nil, err
}
if errors.Is(err, ErrUploadIDExpired) {
log.Warn("[baidu_netdisk] uploadid expired, will restart from scratch")
d.clearUploadUrlCache(precreateResp.Uploadid)
// 重新 precreate所有分片都要重传
newPre, err2 := d.precreate(ctx, path, streamSize, blockListStr, "", "", ctime, mtime)
if err2 != nil {
return nil, err2
}
if newPre.ReturnType == 2 {
return fileToObj(newPre.File), nil
}
precreateResp = newPre
precreateResp.UploadURL = ""
ensureUploadURL()
// 覆盖掉旧的进度
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
continue uploadLoop
}
return nil, err
}
@@ -407,72 +343,23 @@ uploadLoop:
// 修复时间,具体原因见 Put 方法注释的 **注意**
newFile.Ctime = ctime
newFile.Mtime = mtime
// 上传成功清理进度
base.SaveUploadProgress(d, nil, d.AccessToken, contentMd5)
d.clearUploadUrlCache(precreateResp.Uploadid)
return fileToObj(newFile), nil
}
// precreate 执行预上传操作,支持首次上传和 uploadid 过期重试
func (d *BaiduNetdisk) precreate(ctx context.Context, path string, streamSize int64, blockListStr, contentMd5, sliceMd5 string, ctime, mtime int64) (*PrecreateResp, error) {
params := map[string]string{"method": "precreate"}
form := map[string]string{
"path": path,
"size": strconv.FormatInt(streamSize, 10),
"isdir": "0",
"autoinit": "1",
"rtype": "3",
"block_list": blockListStr,
}
// 只有在首次上传时才包含 content-md5 和 slice-md5
if contentMd5 != "" && sliceMd5 != "" {
form["content-md5"] = contentMd5
form["slice-md5"] = sliceMd5
}
joinTime(form, ctime, mtime)
var precreateResp PrecreateResp
_, err := d.postForm("/xpan/file", params, form, &precreateResp)
if err != nil {
return nil, err
}
// 修复时间,具体原因见 Put 方法注释的 **注意**
if precreateResp.ReturnType == 2 {
precreateResp.File.Ctime = ctime
precreateResp.File.Mtime = mtime
}
return &precreateResp, nil
}
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, uploadUrl string, params map[string]string, fileName string, file io.Reader) error {
res, err := d.upClient.R().
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
res, err := base.RestyClient.R().
SetContext(ctx).
SetQueryParams(params).
SetFileReader("file", fileName, file).
Post(uploadUrl + "/rest/2.0/pcs/superfile2")
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
if err != nil {
return err
}
log.Debugln(res.RawResponse.Status + res.String())
if res.StatusCode() != http.StatusOK {
return errs.NewErr(errs.StreamIncomplete, "baidu upload failed, status=%d, body=%s", res.StatusCode(), res.String())
}
errCode := utils.Json.Get(res.Body(), "error_code").ToInt()
errNo := utils.Json.Get(res.Body(), "errno").ToInt()
respStr := res.String()
lower := strings.ToLower(respStr)
// 合并 uploadid 过期检测逻辑
if strings.Contains(lower, "uploadid") &&
(strings.Contains(lower, "invalid") || strings.Contains(lower, "expired") || strings.Contains(lower, "not found")) {
return ErrUploadIDExpired
}
if errCode != 0 || errNo != 0 {
return errs.NewErr(errs.StreamIncomplete, "error uploading to baidu, response=%s", res.String())
return errs.NewErr(errs.StreamIncomplete, "error in uploading to baidu, will retry. response=%s", res.String())
}
return nil
}

View File

@@ -3,7 +3,6 @@ package baidu_netdisk
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"time"
)
type Addition struct {
@@ -20,21 +19,11 @@ type Addition struct {
RefreshToken string `json:"refresh_token" required:"true"`
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
UseDynamicUploadAPI bool `json:"use_dynamic_upload_api" default:"true" help:"dynamically get upload api domain, when enabled, the 'Upload API' setting will be used as a fallback if failed to get"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
}
const (
UPLOAD_FALLBACK_API = "https://d.pcs.baidu.com" // 备用上传地址
UPLOAD_URL_EXPIRE_TIME = time.Minute * 60 // 上传地址有效期(分钟)
UPLOAD_TIMEOUT = time.Minute * 30 // 上传请求超时时间
UPLOAD_RETRY_COUNT = 3
UPLOAD_RETRY_WAIT_TIME = time.Second * 1
UPLOAD_RETRY_MAX_WAIT_TIME = time.Second * 5
)
var config = driver.Config{
Name: "BaiduNetdisk",
DefaultRoot: "/",

View File

@@ -1,7 +1,6 @@
package baidu_netdisk
import (
"errors"
"path"
"strconv"
"time"
@@ -10,10 +9,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
var (
ErrBaiduEmptyFilesNotAllowed = errors.New("empty files are not allowed by baidu netdisk")
)
type TokenErrResp struct {
ErrorDescription string `json:"error_description"`
Error string `json:"error"`
@@ -193,32 +188,6 @@ type PrecreateResp struct {
// return_type=2
File File `json:"info"`
UploadURL string `json:"-"` // 保存断点续传对应的上传域名
}
type UploadServerResp struct {
BakServer []any `json:"bak_server"`
BakServers []struct {
Server string `json:"server"`
} `json:"bak_servers"`
ClientIP string `json:"client_ip"`
ErrorCode int `json:"error_code"`
ErrorMsg string `json:"error_msg"`
Expire int `json:"expire"`
Host string `json:"host"`
Newno string `json:"newno"`
QuicServer []any `json:"quic_server"`
QuicServers []struct {
Server string `json:"server"`
} `json:"quic_servers"`
RequestID int64 `json:"request_id"`
Server []any `json:"server"`
ServerTime int `json:"server_time"`
Servers []struct {
Server string `json:"server"`
} `json:"servers"`
Sl int `json:"sl"`
}
type QuotaResp struct {

View File

@@ -115,7 +115,7 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
errno := utils.Json.Get(res.Body(), "errno").ToInt()
if errno != 0 {
if utils.SliceContains([]int{111, -6}, errno) {
log.Info("[baidu_netdisk] refreshing baidu_netdisk token.")
log.Info("refreshing baidu_netdisk token.")
err2 := d.refreshToken()
if err2 != nil {
return retry.Unrecoverable(err2)
@@ -326,10 +326,10 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
// 非会员固定为 4MB
if d.vipType == 0 {
if d.CustomUploadPartSize != 0 {
log.Warnf("[baidu_netdisk] CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
}
if filesize > MaxSliceNum*DefaultSliceSize {
log.Warnf("[baidu_netdisk] File size(%d) is too large, may cause upload failure", filesize)
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
}
return DefaultSliceSize
@@ -337,17 +337,17 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
if d.CustomUploadPartSize != 0 {
if d.CustomUploadPartSize < DefaultSliceSize {
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
return DefaultSliceSize
}
if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize {
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
return VipSliceSize
}
if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize {
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
return SVipSliceSize
}
@@ -377,7 +377,7 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
}
if filesize > MaxSliceNum*maxSliceSize {
log.Warnf("[baidu_netdisk] File size(%d) is too large, may cause upload failure", filesize)
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
}
return maxSliceSize
@@ -394,97 +394,6 @@ func (d *BaiduNetdisk) quota(ctx context.Context) (model.DiskUsage, error) {
return driver.DiskUsageFromUsedAndTotal(resp.Used, resp.Total), nil
}
// getUploadUrl 从开放平台获取上传域名/地址,并发请求会被合并,结果会在 uploadid 生命周期内复用。
// 如果获取失败,则返回 Upload API设置项。
func (d *BaiduNetdisk) getUploadUrl(path, uploadId string) string {
if !d.UseDynamicUploadAPI || uploadId == "" {
return d.UploadAPI
}
getCachedUrlFunc := func() (string, bool) {
d.uploadUrlMu.RLock()
defer d.uploadUrlMu.RUnlock()
if entry, ok := d.uploadUrlCache[uploadId]; ok {
return entry.url, true
}
return "", false
}
// 检查地址缓存
if uploadUrl, ok := getCachedUrlFunc(); ok {
return uploadUrl
}
uploadUrlGetFunc := func() (string, error) {
// 双重检查缓存
if uploadUrl, ok := getCachedUrlFunc(); ok {
return uploadUrl, nil
}
uploadUrl, err := d.requestForUploadUrl(path, uploadId)
if err != nil {
return "", err
}
d.uploadUrlMu.Lock()
d.uploadUrlCache[uploadId] = uploadURLCacheEntry{
url: uploadUrl,
updateTime: time.Now(),
}
d.uploadUrlMu.Unlock()
return uploadUrl, nil
}
uploadUrl, err, _ := d.uploadUrlG.Do(uploadId, uploadUrlGetFunc)
if err != nil {
fallback := d.UploadAPI
log.Warnf("[baidu_netdisk] get upload URL failed (%v), will use fallback URL: %s", err, fallback)
return fallback
}
return uploadUrl
}
func (d *BaiduNetdisk) clearUploadUrlCache(uploadId string) {
if uploadId == "" {
return
}
d.uploadUrlMu.Lock()
if _, ok := d.uploadUrlCache[uploadId]; ok {
delete(d.uploadUrlCache, uploadId)
}
d.uploadUrlMu.Unlock()
}
// requestForUploadUrl 请求获取上传地址。
// 实测此接口不需要认证传method和upload_version就行不过还是按文档规范调用。
// https://pan.baidu.com/union/doc/Mlvw5hfnr
func (d *BaiduNetdisk) requestForUploadUrl(path, uploadId string) (string, error) {
params := map[string]string{
"method": "locateupload",
"appid": "250528",
"path": path,
"uploadid": uploadId,
"upload_version": "2.0",
}
apiUrl := "https://d.pcs.baidu.com/rest/2.0/pcs/file"
var resp UploadServerResp
_, err := d.request(apiUrl, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(params)
}, &resp)
if err != nil {
return "", err
}
// 应该是https开头的一个地址
var uploadUrl string
if len(resp.Servers) > 0 {
uploadUrl = resp.Servers[0].Server
} else if len(resp.BakServers) > 0 {
uploadUrl = resp.BakServers[0].Server
}
if uploadUrl == "" {
return "", errors.New("upload URL is empty")
}
return uploadUrl, nil
}
// func encodeURIComponent(str string) string {
// r := url.QueryEscape(str)
// r = strings.ReplaceAll(r, "+", "%20")

View File

@@ -18,9 +18,8 @@ type Addition struct {
}
var config = driver.Config{
Name: "BaiduPhoto",
LocalSort: true,
LinkCacheMode: driver.LinkCacheUA,
Name: "BaiduPhoto",
LocalSort: true,
}
func init() {

View File

@@ -25,7 +25,6 @@ func InitClient() {
}),
).SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
NoRedirectClient.SetHeader("user-agent", UserAgent)
net.SetRestyProxyIfConfigured(NoRedirectClient)
RestyClient = NewRestyClient()
HttpClient = net.NewHttpClient()
@@ -38,7 +37,5 @@ func NewRestyClient() *resty.Client {
SetRetryResetReaders(true).
SetTimeout(DefaultTimeout).
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
net.SetRestyProxyIfConfigured(client)
return client
}

View File

@@ -10,7 +10,6 @@ import (
"mime/multipart"
"net/http"
"net/url"
"strconv"
"strings"
"time"
@@ -240,7 +239,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
if err != nil {
return err
}
err = writer.WriteField("puid", strconv.Itoa(resp.Msg.Puid))
err = writer.WriteField("puid", fmt.Sprintf("%d", resp.Msg.Puid))
if err != nil {
fmt.Println("Error writing param2 to request body:", err)
return err
@@ -261,7 +260,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
return err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
resps, err := http.DefaultClient.Do(req)
if err != nil {
return err

View File

@@ -258,7 +258,7 @@ type UploadDoneParam struct {
func fileToObj(f File) *model.Object {
if len(f.Content.FolderName) > 0 {
return &model.Object{
ID: strconv.Itoa(f.ID),
ID: fmt.Sprintf("%d", f.ID),
Name: f.Content.FolderName,
Size: 0,
Modified: time.UnixMilli(f.Inserttime),

View File

@@ -9,7 +9,6 @@ import (
"fmt"
"mime/multipart"
"net/http"
"strconv"
"strings"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
@@ -173,7 +172,7 @@ func (d *ChaoXing) Login() (string, error) {
return "", err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err

View File

@@ -317,8 +317,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
}
return readSeeker, nil
}),
SyncClosers: utils.NewSyncClosers(remoteLink),
RequireReference: remoteLink.RequireReference,
SyncClosers: utils.NewSyncClosers(remoteLink),
}, nil
}

View File

@@ -15,7 +15,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
"golang.org/x/time/rate"
)
type Doubao struct {
@@ -24,7 +23,6 @@ type Doubao struct {
*UploadToken
UserId string
uploadThread int
limiter *rate.Limiter
}
func (d *Doubao) Config() driver.Config {
@@ -63,17 +61,6 @@ func (d *Doubao) Init(ctx context.Context) error {
d.UploadToken = uploadToken
}
if d.LimitRate > 0 {
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
}
return nil
}
func (d *Doubao) WaitLimit(ctx context.Context) error {
if d.limiter != nil {
return d.limiter.Wait(ctx)
}
return nil
}
@@ -82,10 +69,6 @@ func (d *Doubao) Drop(ctx context.Context) error {
}
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
var files []model.Obj
fileList, err := d.getFiles(dir.GetID(), "")
if err != nil {
@@ -112,10 +95,6 @@ func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
}
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
var downloadUrl string
if u, ok := file.(*Object); ok {
@@ -181,10 +160,6 @@ func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
var r UploadNodeResp
_, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@@ -202,10 +177,6 @@ func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
}
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
var r UploadNodeResp
_, err := d.request("/samantha/aispace/move_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@@ -220,10 +191,6 @@ func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
}
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
var r BaseResp
_, err := d.request("/samantha/aispace/rename_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@@ -240,10 +207,6 @@ func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
}
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
var r BaseResp
_, err := d.request("/samantha/aispace/delete_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
@@ -252,10 +215,6 @@ func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
// 根据MIME类型确定数据类型
mimetype := file.GetMimetype()
dataType := FileDataType

View File

@@ -10,10 +10,9 @@ type Addition struct {
// driver.RootPath
driver.RootID
// define other
Cookie string `json:"cookie" type:"text"`
UploadThread string `json:"upload_thread" default:"3"`
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"`
Cookie string `json:"cookie" type:"text"`
UploadThread string `json:"upload_thread" default:"3"`
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
}
var config = driver.Config{
@@ -24,10 +23,6 @@ var config = driver.Config{
func init() {
op.RegisterDriver(func() driver.Driver {
return &Doubao{
Addition: Addition{
LimitRate: 2,
},
}
return &Doubao{}
})
}

View File

@@ -486,7 +486,7 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {strconv.FormatInt(file.GetSize(), 10)},
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
@@ -612,7 +612,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {strconv.FormatInt(size, 10)},
"Content-Length": {fmt.Sprintf("%d", size)},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)

View File

@@ -16,10 +16,9 @@ type Addition struct {
}
var config = driver.Config{
Name: "FebBox",
NoUpload: true,
DefaultRoot: "0",
LinkCacheMode: driver.LinkCacheIP,
Name: "FebBox",
NoUpload: true,
DefaultRoot: "0",
}
func init() {

View File

@@ -113,7 +113,9 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
}
return &model.Link{
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
},
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
}, nil
}

View File

@@ -31,11 +31,11 @@ type Addition struct {
}
var config = driver.Config{
Name: "FTP",
LocalSort: true,
OnlyProxy: true,
DefaultRoot: "/",
NoLinkURL: true,
Name: "FTP",
LocalSort: true,
OnlyLinkMFile: false,
DefaultRoot: "/",
NoLinkURL: true,
}
func init() {

View File

@@ -51,9 +51,6 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
if d.Addition.ShowReadme {
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
}
if d.Addition.ShowSourceCode{
files = append(files, point.GetSourceCode()...)
}
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
nextDir := GetNextDir(point.Point, path)
if nextDir == "" {
@@ -120,10 +117,6 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
}
files = append(files, point.GetReleaseByTagName(tagName)...)
if d.Addition.ShowSourceCode{
files = append(files, point.GetSourceCodeByTagName(tagName)...)
}
}
}
}

View File

@@ -10,7 +10,6 @@ type Addition struct {
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"OpenListTeam/OpenList" help:"structure:[path:]org/repo"`
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
ShowSourceCode bool `json:"show_source_code" type:"bool" default:"false" help:"show Source code (zip/tar.gz)"`
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
}

View File

@@ -143,60 +143,6 @@ func (m *MountPoint) GetAllVersionSize() int64 {
return size
}
func (m *MountPoint) GetSourceCode() []File {
files := make([]File, 0)
// 无法获取文件大小,此处设为 1
files = append(files, File{
Path: m.Point + "/" + "Source code (zip)",
FileName: "Source code (zip)",
Size: 1,
Type: "file",
UpdateAt: m.Release.CreatedAt,
CreateAt: m.Release.CreatedAt,
Url: m.Release.ZipballUrl,
})
files = append(files, File{
Path: m.Point + "/" + "Source code (tar.gz)",
FileName: "Source code (tar.gz)",
Size: 1,
Type: "file",
UpdateAt: m.Release.CreatedAt,
CreateAt: m.Release.CreatedAt,
Url: m.Release.TarballUrl,
})
return files
}
func (m *MountPoint) GetSourceCodeByTagName(tagName string) []File {
for _, item := range *m.Releases {
if item.TagName == tagName {
files := make([]File, 0)
files = append(files, File{
Path: m.Point + "/" + "Source code (zip)",
FileName: "Source code (zip)",
Size: 1,
Type: "file",
UpdateAt: item.CreatedAt,
CreateAt: item.CreatedAt,
Url: item.ZipballUrl,
})
files = append(files, File{
Path: m.Point + "/" + "Source code (tar.gz)",
FileName: "Source code (tar.gz)",
Size: 1,
Type: "file",
UpdateAt: item.CreatedAt,
CreateAt: item.CreatedAt,
Url: item.TarballUrl,
})
return files
}
}
return nil
}
func (m *MountPoint) GetOtherFile(get func(url string) (*resty.Response, error), refresh bool) []File {
if m.OtherFile == nil || refresh {
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/contents")

View File

@@ -27,14 +27,6 @@ import (
// do others that not defined in Driver interface
// Google Drive API field constants
const (
// File list query fields
FilesListFields = "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken"
// Single file query fields
FileInfoFields = "id,name,mimeType,size,md5Checksum,sha1Checksum,sha256Checksum"
)
type googleDriveServiceAccount struct {
// Type string `json:"type"`
// ProjectID string `json:"project_id"`
@@ -243,7 +235,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
}
query := map[string]string{
"orderBy": orderBy,
"fields": FilesListFields,
"fields": "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken",
"pageSize": "1000",
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
//"includeItemsFromAllDrives": "true",
@@ -257,82 +249,11 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
return nil, err
}
pageToken = resp.NextPageToken
// Batch process shortcuts, API calls only for file shortcuts
shortcutTargetIds := make([]string, 0)
shortcutIndices := make([]int, 0)
// Collect target IDs of all file shortcuts (skip folder shortcuts)
for i := range resp.Files {
if resp.Files[i].MimeType == "application/vnd.google-apps.shortcut" &&
resp.Files[i].ShortcutDetails.TargetId != "" &&
resp.Files[i].ShortcutDetails.TargetMimeType != "application/vnd.google-apps.folder" {
shortcutTargetIds = append(shortcutTargetIds, resp.Files[i].ShortcutDetails.TargetId)
shortcutIndices = append(shortcutIndices, i)
}
}
// Batch get target file info (only for file shortcuts)
if len(shortcutTargetIds) > 0 {
targetFiles := d.batchGetTargetFilesInfo(shortcutTargetIds)
// Update shortcut file info
for j, targetId := range shortcutTargetIds {
if targetFile, exists := targetFiles[targetId]; exists {
fileIndex := shortcutIndices[j]
if targetFile.Size != "" {
resp.Files[fileIndex].Size = targetFile.Size
}
if targetFile.MD5Checksum != "" {
resp.Files[fileIndex].MD5Checksum = targetFile.MD5Checksum
}
if targetFile.SHA1Checksum != "" {
resp.Files[fileIndex].SHA1Checksum = targetFile.SHA1Checksum
}
if targetFile.SHA256Checksum != "" {
resp.Files[fileIndex].SHA256Checksum = targetFile.SHA256Checksum
}
}
}
}
res = append(res, resp.Files...)
}
return res, nil
}
// getTargetFileInfo gets target file details for shortcuts
func (d *GoogleDrive) getTargetFileInfo(targetId string) (File, error) {
var targetFile File
url := fmt.Sprintf("https://www.googleapis.com/drive/v3/files/%s", targetId)
query := map[string]string{
"fields": FileInfoFields,
}
_, err := d.request(url, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &targetFile)
if err != nil {
return File{}, err
}
return targetFile, nil
}
// batchGetTargetFilesInfo batch gets target file info, sequential processing to avoid concurrency complexity
func (d *GoogleDrive) batchGetTargetFilesInfo(targetIds []string) map[string]File {
if len(targetIds) == 0 {
return make(map[string]File)
}
result := make(map[string]File)
// Sequential processing to avoid concurrency complexity
for _, targetId := range targetIds {
file, err := d.getTargetFileInfo(targetId)
if err == nil {
result[targetId] = file
}
}
return result
}
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
defaultChunkSize := d.ChunkSize * 1024 * 1024
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)

View File

@@ -1,111 +0,0 @@
package halalcloudopen
import (
"sync"
"time"
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
)
var (
slicePostErrorRetryInterval = time.Second * 120
retryTimes = 5
)
type halalCommon struct {
// *AuthService // 登录信息
UserInfo *sdkUser.User // 用户信息
refreshTokenFunc func(token string) error
// serv *AuthService
configs sync.Map
}
func (m *halalCommon) GetAccessToken() (string, error) {
value, exists := m.configs.Load("access_token")
if !exists {
return "", nil // 如果不存在,返回空字符串
}
return value.(string), nil // 返回配置项的值
}
// GetRefreshToken implements ConfigStore.
func (m *halalCommon) GetRefreshToken() (string, error) {
value, exists := m.configs.Load("refresh_token")
if !exists {
return "", nil // 如果不存在,返回空字符串
}
return value.(string), nil // 返回配置项的值
}
// SetAccessToken implements ConfigStore.
func (m *halalCommon) SetAccessToken(token string) error {
m.configs.Store("access_token", token)
return nil
}
// SetRefreshToken implements ConfigStore.
func (m *halalCommon) SetRefreshToken(token string) error {
m.configs.Store("refresh_token", token)
if m.refreshTokenFunc != nil {
return m.refreshTokenFunc(token)
}
return nil
}
// SetToken implements ConfigStore.
func (m *halalCommon) SetToken(accessToken string, refreshToken string, expiresIn int64) error {
m.configs.Store("access_token", accessToken)
m.configs.Store("refresh_token", refreshToken)
m.configs.Store("expires_in", expiresIn)
if m.refreshTokenFunc != nil {
return m.refreshTokenFunc(refreshToken)
}
return nil
}
// ClearConfigs implements ConfigStore.
func (m *halalCommon) ClearConfigs() error {
m.configs = sync.Map{} // 清空map
return nil
}
// DeleteConfig implements ConfigStore.
func (m *halalCommon) DeleteConfig(key string) error {
_, exists := m.configs.Load(key)
if !exists {
return nil // 如果不存在,直接返回
}
m.configs.Delete(key) // 删除指定的配置项
return nil
}
// GetConfig implements ConfigStore.
func (m *halalCommon) GetConfig(key string) (string, error) {
value, exists := m.configs.Load(key)
if !exists {
return "", nil // 如果不存在,返回空字符串
}
return value.(string), nil // 返回配置项的值
}
// ListConfigs implements ConfigStore.
func (m *halalCommon) ListConfigs() (map[string]string, error) {
configs := make(map[string]string)
m.configs.Range(func(key, value interface{}) bool {
configs[key.(string)] = value.(string) // 将每个配置项添加到map中
return true // 继续遍历
})
return configs, nil // 返回所有配置项
}
// SetConfig implements ConfigStore.
func (m *halalCommon) SetConfig(key string, value string) error {
m.configs.Store(key, value) // 使用Store方法设置或更新配置项
return nil // 成功设置配置项后返回nil
}
func NewHalalCommon() *halalCommon {
return &halalCommon{
configs: sync.Map{},
}
}

View File

@@ -1,29 +0,0 @@
package halalcloudopen
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
sdkClient "github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
)
type HalalCloudOpen struct {
*halalCommon
model.Storage
Addition
sdkClient *sdkClient.Client
sdkUserFileService *sdkUserFile.UserFileService
sdkUserService *sdkUser.UserService
uploadThread int
}
func (d *HalalCloudOpen) Config() driver.Config {
return config
}
func (d *HalalCloudOpen) GetAddition() driver.Additional {
return &d.Addition
}
var _ driver.Driver = (*HalalCloudOpen)(nil)

View File

@@ -1,131 +0,0 @@
package halalcloudopen
import (
"context"
"strconv"
"github.com/OpenListTeam/OpenList/v4/internal/model"
sdkModel "github.com/halalcloud/golang-sdk-lite/halalcloud/model"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
)
func (d *HalalCloudOpen) getFiles(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
files := make([]model.Obj, 0)
limit := int64(100)
token := ""
for {
result, err := d.sdkUserFileService.List(ctx, &sdkUserFile.FileListRequest{
Parent: &sdkUserFile.File{Path: dir.GetPath()},
ListInfo: &sdkModel.ScanListRequest{
Limit: strconv.FormatInt(limit, 10),
Token: token,
},
})
if err != nil {
return nil, err
}
for i := 0; len(result.Files) > i; i++ {
files = append(files, NewObjFile(result.Files[i]))
}
if result.ListInfo == nil || result.ListInfo.Token == "" {
break
}
token = result.ListInfo.Token
}
return files, nil
}
func (d *HalalCloudOpen) makeDir(ctx context.Context, dir model.Obj, name string) (model.Obj, error) {
_, err := d.sdkUserFileService.Create(ctx, &sdkUserFile.File{
Path: dir.GetPath(),
Name: name,
})
return nil, err
}
func (d *HalalCloudOpen) move(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
oldDir := obj.GetPath()
newDir := dir.GetPath()
_, err := d.sdkUserFileService.Move(ctx, &sdkUserFile.BatchOperationRequest{
Source: []*sdkUserFile.File{
{
Path: oldDir,
},
},
Dest: &sdkUserFile.File{
Path: newDir,
},
})
return nil, err
}
func (d *HalalCloudOpen) rename(ctx context.Context, obj model.Obj, name string) (model.Obj, error) {
_, err := d.sdkUserFileService.Rename(ctx, &sdkUserFile.File{
Path: obj.GetPath(),
Name: name,
})
return nil, err
}
func (d *HalalCloudOpen) copy(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
id := obj.GetID()
sourcePath := obj.GetPath()
if len(id) > 0 {
sourcePath = ""
}
destID := dir.GetID()
destPath := dir.GetPath()
if len(destID) > 0 {
destPath = ""
}
dest := &sdkUserFile.File{
Path: destPath,
Identity: destID,
}
_, err := d.sdkUserFileService.Copy(ctx, &sdkUserFile.BatchOperationRequest{
Source: []*sdkUserFile.File{
{
Path: sourcePath,
Identity: id,
},
},
Dest: dest,
})
return nil, err
}
func (d *HalalCloudOpen) remove(ctx context.Context, obj model.Obj) error {
id := obj.GetID()
_, err := d.sdkUserFileService.Delete(ctx, &sdkUserFile.BatchOperationRequest{
Source: []*sdkUserFile.File{
{
Identity: id,
Path: obj.GetPath(),
},
},
})
return err
}
func (d *HalalCloudOpen) details(ctx context.Context) (*model.StorageDetails, error) {
ret, err := d.sdkUserService.GetStatisticsAndQuota(ctx)
if err != nil {
return nil, err
}
total := uint64(ret.DiskStatisticsQuota.BytesQuota)
free := uint64(ret.DiskStatisticsQuota.BytesFree)
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
}

View File

@@ -1,108 +0,0 @@
package halalcloudopen
import (
"context"
"crypto/sha1"
"io"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
"github.com/rclone/rclone/lib/readers"
)
func (d *HalalCloudOpen) getLink(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if args.Redirect {
// return nil, model.ErrUnsupported
fid := file.GetID()
fpath := file.GetPath()
if fid != "" {
fpath = ""
}
fi, err := d.sdkUserFileService.GetDirectDownloadAddress(ctx, &sdkUserFile.DirectDownloadRequest{
Identity: fid,
Path: fpath,
})
if err != nil {
return nil, err
}
expireAt := fi.ExpireAt
duration := time.Until(time.UnixMilli(expireAt))
return &model.Link{
URL: fi.DownloadAddress,
Expiration: &duration,
}, nil
}
result, err := d.sdkUserFileService.ParseFileSlice(ctx, &sdkUserFile.File{
Identity: file.GetID(),
Path: file.GetPath(),
})
if err != nil {
return nil, err
}
fileAddrs := []*sdkUserFile.SliceDownloadInfo{}
var addressDuration int64
nodesNumber := len(result.RawNodes)
nodesIndex := nodesNumber - 1
startIndex, endIndex := 0, nodesIndex
for nodesIndex >= 0 {
if nodesIndex >= 200 {
endIndex = 200
} else {
endIndex = nodesNumber
}
for ; endIndex <= nodesNumber; endIndex += 200 {
if endIndex == 0 {
endIndex = 1
}
sliceAddress, err := d.sdkUserFileService.GetSliceDownloadAddress(ctx, &sdkUserFile.SliceDownloadAddressRequest{
Identity: result.RawNodes[startIndex:endIndex],
Version: 1,
})
if err != nil {
return nil, err
}
addressDuration, _ = strconv.ParseInt(sliceAddress.ExpireAt, 10, 64)
fileAddrs = append(fileAddrs, sliceAddress.Addresses...)
startIndex = endIndex
nodesIndex -= 200
}
}
size, _ := strconv.ParseInt(result.FileSize, 10, 64)
chunks := getChunkSizes(result.Sizes)
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if httpRange.Length < 0 || httpRange.Start+httpRange.Length >= size {
length = size - httpRange.Start
}
oo := &openObject{
ctx: ctx,
d: fileAddrs,
chunk: []byte{},
chunks: chunks,
skip: httpRange.Start,
sha: result.Sha1,
shaTemp: sha1.New(),
}
return readers.NewLimitedReadCloser(oo, length), nil
}
var duration time.Duration
if addressDuration != 0 {
duration = time.Until(time.UnixMilli(addressDuration))
} else {
duration = time.Until(time.Now().Add(time.Hour))
}
return &model.Link{
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
Expiration: &duration,
}, nil
}

View File

@@ -1,50 +0,0 @@
package halalcloudopen
import (
"context"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
)
func (d *HalalCloudOpen) Init(ctx context.Context) error {
if d.uploadThread < 1 || d.uploadThread > 32 {
d.uploadThread, d.UploadThread = 3, 3
}
if d.halalCommon == nil {
d.halalCommon = &halalCommon{
UserInfo: &sdkUser.User{},
refreshTokenFunc: func(token string) error {
d.Addition.RefreshToken = token
op.MustSaveDriverStorage(d)
return nil
},
}
}
if d.Addition.RefreshToken != "" {
d.halalCommon.SetRefreshToken(d.Addition.RefreshToken)
}
timeout := d.Addition.TimeOut
if timeout <= 0 {
timeout = 60
}
host := d.Addition.Host
if host == "" {
host = "openapi.2dland.cn"
}
client := apiclient.NewClient(nil, host, d.Addition.ClientID, d.Addition.ClientSecret, d.halalCommon, apiclient.WithTimeout(time.Second*time.Duration(timeout)))
d.sdkClient = client
d.sdkUserFileService = sdkUserFile.NewUserFileService(client)
d.sdkUserService = sdkUser.NewUserService(client)
userInfo, err := d.sdkUserService.Get(ctx, &sdkUser.User{})
if err != nil {
return err
}
d.halalCommon.UserInfo = userInfo
// 能够获取到用户信息,已经检查了 RefreshToken 的有效性,无需再次检查
return nil
}

View File

@@ -1,48 +0,0 @@
package halalcloudopen
import (
"context"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
)
func (d *HalalCloudOpen) Drop(ctx context.Context) error {
return nil
}
func (d *HalalCloudOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
return d.getFiles(ctx, dir)
}
func (d *HalalCloudOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
return d.getLink(ctx, file, args)
}
func (d *HalalCloudOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
return d.makeDir(ctx, parentDir, dirName)
}
func (d *HalalCloudOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return d.move(ctx, srcObj, dstDir)
}
func (d *HalalCloudOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
return d.rename(ctx, srcObj, newName)
}
func (d *HalalCloudOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return d.copy(ctx, srcObj, dstDir)
}
func (d *HalalCloudOpen) Remove(ctx context.Context, obj model.Obj) error {
return d.remove(ctx, obj)
}
func (d *HalalCloudOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
return d.put(ctx, dstDir, stream, up)
}
func (d *HalalCloudOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
return d.details(ctx)
}

View File

@@ -1,258 +0,0 @@
package halalcloudopen
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
"github.com/ipfs/go-cid"
)
func (d *HalalCloudOpen) put(ctx context.Context, dstDir model.Obj, fileStream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
newPath := path.Join(dstDir.GetPath(), fileStream.GetName())
uploadTask, err := d.sdkUserFileService.CreateUploadTask(ctx, &sdkUserFile.File{
Path: newPath,
Size: fileStream.GetSize(),
})
if err != nil {
return nil, err
}
if uploadTask.Created {
return nil, nil
}
slicesList := make([]string, 0)
codec := uint64(0x55)
if uploadTask.BlockCodec > 0 {
codec = uint64(uploadTask.BlockCodec)
}
blockHashType := uploadTask.BlockHashType
mhType := uint64(0x12)
if blockHashType > 0 {
mhType = uint64(blockHashType)
}
prefix := cid.Prefix{
Codec: codec,
MhLength: -1,
MhType: mhType,
Version: 1,
}
blockSize := uploadTask.BlockSize
useSingleUpload := true
//
if fileStream.GetSize() <= int64(blockSize) || d.uploadThread <= 1 {
useSingleUpload = true
}
// Not sure whether FileStream supports concurrent read and write operations, so currently using single-threaded upload to ensure safety.
// read file
if useSingleUpload {
bufferSize := int(blockSize)
buffer := make([]byte, bufferSize)
reader := driver.NewLimitedUploadStream(ctx, fileStream)
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
// fileStream.Seek(0, os.SEEK_SET)
for {
n, err := teeReader.Read(buffer)
if n > 0 {
data := buffer[:n]
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
if err != nil {
return nil, err
}
slicesList = append(slicesList, uploadCid.String())
}
if err == io.EOF || n == 0 {
break
}
}
} else {
// TODO: implement multipart upload, currently using single-threaded upload to ensure safety.
bufferSize := int(blockSize)
buffer := make([]byte, bufferSize)
reader := driver.NewLimitedUploadStream(ctx, fileStream)
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
for {
n, err := teeReader.Read(buffer)
if n > 0 {
data := buffer[:n]
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
if err != nil {
return nil, err
}
slicesList = append(slicesList, uploadCid.String())
}
if err == io.EOF || n == 0 {
break
}
}
}
newFile, err := makeFile(ctx, slicesList, uploadTask.Task, uploadTask.UploadAddress, retryTimes)
if err != nil {
return nil, err
}
return NewObjFile(newFile), nil
}
func makeFile(ctx context.Context, fileSlice []string, taskID string, uploadAddress string, retry int) (*sdkUserFile.File, error) {
var lastError error = nil
for range retry {
newFile, err := doMakeFile(fileSlice, taskID, uploadAddress)
if err == nil {
return newFile, nil
}
if ctx.Err() != nil {
return nil, err
}
if strings.Contains(err.Error(), "not found") {
return nil, err
}
lastError = err
time.Sleep(slicePostErrorRetryInterval)
}
return nil, fmt.Errorf("mk file slice failed after %d times, error: %s", retry, lastError.Error())
}
func doMakeFile(fileSlice []string, taskID string, uploadAddress string) (*sdkUserFile.File, error) {
accessUrl := uploadAddress + "/" + taskID
getTimeOut := time.Minute * 2
u, err := url.Parse(accessUrl)
if err != nil {
return nil, err
}
n, _ := json.Marshal(fileSlice)
httpRequest := http.Request{
Method: http.MethodPost,
URL: u,
Header: map[string][]string{
"Accept": {"application/json"},
"Content-Type": {"application/json"},
//"Content-Length": {strconv.Itoa(len(n))},
},
Body: io.NopCloser(bytes.NewReader(n)),
}
httpClient := http.Client{
Timeout: getTimeOut,
}
httpResponse, err := httpClient.Do(&httpRequest)
if err != nil {
return nil, err
}
defer httpResponse.Body.Close()
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
b, _ := io.ReadAll(httpResponse.Body)
message := string(b)
return nil, fmt.Errorf("mk file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
}
b, _ := io.ReadAll(httpResponse.Body)
var result *sdkUserFile.File
err = json.Unmarshal(b, &result)
if err != nil {
return nil, err
}
return result, nil
}
func postFileSlice(ctx context.Context, fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix, retry int) (cid.Cid, error) {
var lastError error = nil
for range retry {
newCid, err := doPostFileSlice(fileSlice, taskID, uploadAddress, preix)
if err == nil {
return newCid, nil
}
if ctx.Err() != nil {
return cid.Undef, err
}
time.Sleep(slicePostErrorRetryInterval)
lastError = err
}
return cid.Undef, fmt.Errorf("upload file slice failed after %d times, error: %s", retry, lastError.Error())
}
func doPostFileSlice(fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix) (cid.Cid, error) {
// 1. sum file slice
newCid, err := preix.Sum(fileSlice)
if err != nil {
return cid.Undef, err
}
// 2. post file slice
sliceCidString := newCid.String()
// /{taskID}/{sliceID}
accessUrl := uploadAddress + "/" + taskID + "/" + sliceCidString
getTimeOut := time.Second * 30
// get {accessUrl} in {getTimeOut}
u, err := url.Parse(accessUrl)
if err != nil {
return cid.Undef, err
}
// header: accept: application/json
// header: content-type: application/octet-stream
// header: content-length: {fileSlice.length}
// header: x-content-cid: {sliceCidString}
// header: x-task-id: {taskID}
httpRequest := http.Request{
Method: http.MethodGet,
URL: u,
Header: map[string][]string{
"Accept": {"application/json"},
},
}
httpClient := http.Client{
Timeout: getTimeOut,
}
httpResponse, err := httpClient.Do(&httpRequest)
if err != nil {
return cid.Undef, err
}
if httpResponse.StatusCode != http.StatusOK {
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d", httpResponse.StatusCode)
}
var result bool
b, err := io.ReadAll(httpResponse.Body)
if err != nil {
return cid.Undef, err
}
err = json.Unmarshal(b, &result)
if err != nil {
return cid.Undef, err
}
if result {
return newCid, nil
}
httpRequest = http.Request{
Method: http.MethodPost,
URL: u,
Header: map[string][]string{
"Accept": {"application/json"},
"Content-Type": {"application/octet-stream"},
// "Content-Length": {strconv.Itoa(len(fileSlice))},
},
Body: io.NopCloser(bytes.NewReader(fileSlice)),
}
httpResponse, err = httpClient.Do(&httpRequest)
if err != nil {
return cid.Undef, err
}
defer httpResponse.Body.Close()
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
b, _ := io.ReadAll(httpResponse.Body)
message := string(b)
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
}
//
return newCid, nil
}

View File

@@ -1,32 +0,0 @@
package halalcloudopen
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
// Usually one of two
driver.RootPath
// define other
RefreshToken string `json:"refresh_token" required:"false" help:"If using a personal API approach, the RefreshToken is not required."`
UploadThread int `json:"upload_thread" type:"number" default:"3" help:"1 <= thread <= 32"`
ClientID string `json:"client_id" required:"true" default:""`
ClientSecret string `json:"client_secret" required:"true" default:""`
Host string `json:"host" required:"false" default:"openapi.2dland.cn"`
TimeOut int `json:"timeout" type:"number" default:"60" help:"timeout in seconds"`
}
var config = driver.Config{
Name: "HalalCloudOpen",
OnlyProxy: false,
DefaultRoot: "/",
NoLinkURL: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &HalalCloudOpen{}
})
}

View File

@@ -1,60 +0,0 @@
package halalcloudopen
import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
)
type ObjFile struct {
sdkFile *sdkUserFile.File
fileSize int64
modTime time.Time
createTime time.Time
}
func NewObjFile(f *sdkUserFile.File) model.Obj {
ofile := &ObjFile{sdkFile: f}
ofile.fileSize = f.Size
modTimeTs := f.UpdateTs
ofile.modTime = time.UnixMilli(modTimeTs)
createTimeTs := f.CreateTs
ofile.createTime = time.UnixMilli(createTimeTs)
return ofile
}
func (f *ObjFile) GetSize() int64 {
return f.fileSize
}
func (f *ObjFile) GetName() string {
return f.sdkFile.Name
}
func (f *ObjFile) ModTime() time.Time {
return f.modTime
}
func (f *ObjFile) IsDir() bool {
return f.sdkFile.Dir
}
func (f *ObjFile) GetHash() utils.HashInfo {
return utils.HashInfo{
// TODO: support more hash types
}
}
func (f *ObjFile) GetID() string {
return f.sdkFile.Identity
}
func (f *ObjFile) GetPath() string {
return f.sdkFile.Path
}
func (f *ObjFile) CreateTime() time.Time {
return f.createTime
}

View File

@@ -1,185 +0,0 @@
package halalcloudopen
import (
"context"
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"net/http"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
"github.com/ipfs/go-cid"
)
// get the next chunk
func (oo *openObject) getChunk(_ context.Context) (err error) {
if oo.id >= len(oo.chunks) {
return io.EOF
}
var chunk []byte
err = utils.Retry(3, time.Second, func() (err error) {
chunk, err = getRawFiles(oo.d[oo.id])
return err
})
if err != nil {
return err
}
oo.id++
oo.chunk = chunk
return nil
}
// Read reads up to len(p) bytes into p.
func (oo *openObject) Read(p []byte) (n int, err error) {
oo.mu.Lock()
defer oo.mu.Unlock()
if oo.closed {
return 0, fmt.Errorf("read on closed file")
}
// Skip data at the start if requested
for oo.skip > 0 {
//size := 1024 * 1024
_, size, err := oo.ChunkLocation(oo.id)
if err != nil {
return 0, err
}
if oo.skip < int64(size) {
break
}
oo.id++
oo.skip -= int64(size)
}
if len(oo.chunk) == 0 {
err = oo.getChunk(oo.ctx)
if err != nil {
return 0, err
}
if oo.skip > 0 {
oo.chunk = (oo.chunk)[oo.skip:]
oo.skip = 0
}
}
n = copy(p, oo.chunk)
oo.shaTemp.Write(p[:n])
oo.chunk = (oo.chunk)[n:]
return n, nil
}
// Close closed the file - MAC errors are reported here
func (oo *openObject) Close() (err error) {
oo.mu.Lock()
defer oo.mu.Unlock()
if oo.closed {
return nil
}
// 校验Sha1
if string(oo.shaTemp.Sum(nil)) != oo.sha {
return fmt.Errorf("failed to finish download: SHA mismatch")
}
oo.closed = true
return nil
}
func GetMD5Hash(text string) string {
tHash := md5.Sum([]byte(text))
return hex.EncodeToString(tHash[:])
}
type chunkSize struct {
position int64
size int
}
type openObject struct {
ctx context.Context
mu sync.Mutex
d []*sdkUserFile.SliceDownloadInfo
id int
skip int64
chunk []byte
chunks []chunkSize
closed bool
sha string
shaTemp hash.Hash
}
func getChunkSizes(sliceSize []*sdkUserFile.SliceSize) (chunks []chunkSize) {
chunks = make([]chunkSize, 0)
for _, s := range sliceSize {
// 对最后一个做特殊处理
endIndex := s.EndIndex
startIndex := s.StartIndex
if endIndex == 0 {
endIndex = startIndex
}
for j := startIndex; j <= endIndex; j++ {
size := s.Size
chunks = append(chunks, chunkSize{position: j, size: int(size)})
}
}
return chunks
}
func (oo *openObject) ChunkLocation(id int) (position int64, size int, err error) {
if id < 0 || id >= len(oo.chunks) {
return 0, 0, errors.New("invalid arguments")
}
return (oo.chunks)[id].position, (oo.chunks)[id].size, nil
}
func getRawFiles(addr *sdkUserFile.SliceDownloadInfo) ([]byte, error) {
if addr == nil {
return nil, errors.New("addr is nil")
}
client := http.Client{
Timeout: time.Duration(60 * time.Second), // Set timeout to 60 seconds
}
resp, err := client.Get(addr.DownloadAddress)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("bad status: %s, body: %s", resp.Status, body)
}
if addr.Encrypt > 0 {
cd := uint8(addr.Encrypt)
for idx := 0; idx < len(body); idx++ {
body[idx] = body[idx] ^ cd
}
}
storeType := addr.StoreType
if storeType != 10 {
sourceCid, err := cid.Decode(addr.Identity)
if err != nil {
return nil, err
}
checkCid, err := sourceCid.Prefix().Sum(body)
if err != nil {
return nil, err
}
if !checkCid.Equals(sourceCid) {
return nil, fmt.Errorf("bad cid: %s, body: %s", checkCid.String(), body)
}
}
return body, nil
}

View File

@@ -235,7 +235,6 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
fullPath := file.GetPath()
link := &model.Link{}
var MFile model.File
if args.Type == "thumb" && utils.Ext(file.GetName()) != "svg" {
var buf *bytes.Buffer
var thumbPath *string
@@ -262,9 +261,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err
}
link.ContentLength = int64(stat.Size())
MFile = open
link.MFile = open
} else {
MFile = bytes.NewReader(buf.Bytes())
link.MFile = bytes.NewReader(buf.Bytes())
link.ContentLength = int64(buf.Len())
}
} else {
@@ -273,11 +272,13 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err
}
link.ContentLength = file.GetSize()
MFile = open
link.MFile = open
}
link.AddIfCloser(link.MFile)
if !d.Config().OnlyLinkMFile {
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, link.MFile)
link.MFile = nil
}
link.SyncClosers.AddIfCloser(MFile)
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, MFile)
link.RequireReference = link.SyncClosers.Length() > 0
return link, nil
}
@@ -374,26 +375,18 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
err = os.Remove(obj.GetPath())
}
} else {
objPath := obj.GetPath()
objName := obj.GetName()
var relPath string
relPath, err = filepath.Rel(d.GetRootPath(), filepath.Dir(objPath))
if err != nil {
return err
}
recycleBinPath := filepath.Join(d.RecycleBinPath, relPath)
if !utils.Exists(recycleBinPath) {
err = os.MkdirAll(recycleBinPath, 0o755)
if !utils.Exists(d.RecycleBinPath) {
err = os.MkdirAll(d.RecycleBinPath, 0o755)
if err != nil {
return err
}
}
dstPath := filepath.Join(recycleBinPath, objName)
dstPath := filepath.Join(d.RecycleBinPath, obj.GetName())
if utils.Exists(dstPath) {
dstPath = filepath.Join(recycleBinPath, objName+"_"+time.Now().Format("20060102150405"))
dstPath = filepath.Join(d.RecycleBinPath, obj.GetName()+"_"+time.Now().Format("20060102150405"))
}
err = os.Rename(objPath, dstPath)
err = os.Rename(obj.GetPath(), dstPath)
}
if err != nil {
return err

View File

@@ -18,12 +18,12 @@ type Addition struct {
}
var config = driver.Config{
Name: "Local",
LocalSort: true,
OnlyProxy: true,
NoCache: true,
DefaultRoot: "/",
NoLinkURL: true,
Name: "Local",
OnlyLinkMFile: false,
LocalSort: true,
NoCache: true,
DefaultRoot: "/",
NoLinkURL: true,
}
func init() {

View File

@@ -36,6 +36,7 @@ type Addition struct {
var config = driver.Config{
Name: "MediaFire",
LocalSort: false,
OnlyLinkMFile: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,

View File

@@ -22,7 +22,6 @@ type Onedrive struct {
AccessToken string
root *Object
mutex sync.Mutex
ref *Onedrive
}
func (d *Onedrive) Config() driver.Config {
@@ -37,22 +36,10 @@ func (d *Onedrive) Init(ctx context.Context) error {
if d.ChunkSize < 1 {
d.ChunkSize = 5
}
if d.ref != nil {
return nil
}
return d.refreshToken()
}
func (d *Onedrive) InitReference(refStorage driver.Driver) error {
if ref, ok := refStorage.(*Onedrive); ok {
d.ref = ref
return nil
}
return errs.NotSupport
}
func (d *Onedrive) Drop(ctx context.Context) error {
d.ref = nil
return nil
}
@@ -236,19 +223,4 @@ func (d *Onedrive) GetDetails(ctx context.Context) (*model.StorageDetails, error
}, nil
}
func (d *Onedrive) GetDirectUploadTools() []string {
if !d.EnableDirectUpload {
return nil
}
return []string{"HttpDirect"}
}
// GetDirectUploadInfo returns the direct upload info for OneDrive
func (d *Onedrive) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
if !d.EnableDirectUpload {
return nil, errs.NotImplement
}
return d.getDirectUploadInfo(ctx, path.Join(dstDir.GetPath(), fileName))
}
var _ driver.Driver = (*Onedrive)(nil)

View File

@@ -7,19 +7,18 @@ import (
type Addition struct {
driver.RootPath
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
IsSharepoint bool `json:"is_sharepoint"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/onedrive/renewapi"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RedirectUri string `json:"redirect_uri" required:"true" default:"https://api.oplist.org/onedrive/callback"`
RefreshToken string `json:"refresh_token" required:"true"`
SiteId string `json:"site_id"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
IsSharepoint bool `json:"is_sharepoint"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/onedrive/renewapi"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RedirectUri string `json:"redirect_uri" required:"true" default:"https://api.oplist.org/onedrive/callback"`
RefreshToken string `json:"refresh_token" required:"true"`
SiteId string `json:"site_id"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
}
var config = driver.Config{

View File

@@ -133,10 +133,7 @@ func (d *Onedrive) _refreshToken() error {
return nil
}
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
if d.ref != nil {
return d.ref.Request(url, method, callback, resp)
}
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
if callback != nil {
@@ -152,7 +149,7 @@ func (d *Onedrive) Request(url string, method string, callback base.ReqCallback,
return nil, err
}
if e.Error.Code != "" {
if e.Error.Code == "InvalidAuthenticationToken" && !utils.IsBool(noRetry...) {
if e.Error.Code == "InvalidAuthenticationToken" {
err = d.refreshToken()
if err != nil {
return nil, err
@@ -310,36 +307,9 @@ func (d *Onedrive) getDrive(ctx context.Context) (*DriveResp, error) {
var resp DriveResp
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, &resp, true)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Onedrive) getDirectUploadInfo(ctx context.Context, path string) (*model.HttpDirectUploadInfo, error) {
// Create upload session
url := d.GetMetaUrl(false, path) + "/createUploadSession"
metadata := map[string]any{
"item": map[string]any{
"@microsoft.graph.conflictBehavior": "rename",
},
}
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
req.SetBody(metadata).SetContext(ctx)
}, nil)
if err != nil {
return nil, err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
if uploadUrl == "" {
return nil, fmt.Errorf("failed to get upload URL from response")
}
return &model.HttpDirectUploadInfo{
UploadURL: uploadUrl,
ChunkSize: d.ChunkSize * 1024 * 1024, // Convert MB to bytes
Method: "PUT",
}, nil
}

View File

@@ -222,18 +222,4 @@ func (d *OnedriveAPP) GetDetails(ctx context.Context) (*model.StorageDetails, er
}, nil
}
func (d *OnedriveAPP) GetDirectUploadTools() []string {
if !d.EnableDirectUpload {
return nil
}
return []string{"HttpDirect"}
}
func (d *OnedriveAPP) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
if !d.EnableDirectUpload {
return nil, errs.NotImplement
}
return d.getDirectUploadInfo(ctx, path.Join(dstDir.GetPath(), fileName))
}
var _ driver.Driver = (*OnedriveAPP)(nil)

View File

@@ -7,15 +7,14 @@ import (
type Addition struct {
driver.RootPath
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
ClientID string `json:"client_id" required:"true"`
ClientSecret string `json:"client_secret" required:"true"`
TenantID string `json:"tenant_id"`
Email string `json:"email"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
ClientID string `json:"client_id" required:"true"`
ClientSecret string `json:"client_secret" required:"true"`
TenantID string `json:"tenant_id"`
Email string `json:"email"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
}
var config = driver.Config{

View File

@@ -88,7 +88,7 @@ func (d *OnedriveAPP) _accessToken() error {
return nil
}
func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
if callback != nil {
@@ -104,7 +104,7 @@ func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallba
return nil, err
}
if e.Error.Code != "" {
if e.Error.Code == "InvalidAuthenticationToken" && !utils.IsBool(noRetry...) {
if e.Error.Code == "InvalidAuthenticationToken" {
err = d.accessToken()
if err != nil {
return nil, err
@@ -216,36 +216,9 @@ func (d *OnedriveAPP) getDrive(ctx context.Context) (*DriveResp, error) {
var resp DriveResp
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, &resp, true)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *OnedriveAPP) getDirectUploadInfo(ctx context.Context, path string) (*model.HttpDirectUploadInfo, error) {
// Create upload session
url := d.GetMetaUrl(false, path) + "/createUploadSession"
metadata := map[string]any{
"item": map[string]any{
"@microsoft.graph.conflictBehavior": "rename",
},
}
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
req.SetBody(metadata).SetContext(ctx)
}, nil)
if err != nil {
return nil, err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
if uploadUrl == "" {
return nil, fmt.Errorf("failed to get upload URL from response")
}
return &model.HttpDirectUploadInfo{
UploadURL: uploadUrl,
ChunkSize: d.ChunkSize * 1024 * 1024, // Convert MB to bytes
Method: "PUT",
}, nil
}

View File

@@ -110,29 +110,19 @@ func (d *OpenList) List(ctx context.Context, dir model.Obj, args model.ListArgs)
func (d *OpenList) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
var resp common.Resp[FsGetResp]
headers := map[string]string{
"User-Agent": base.UserAgent,
}
// if PassUAToUpsteam is true, then pass the user-agent to the upstream
userAgent := base.UserAgent
if d.PassUAToUpsteam {
userAgent := args.Header.Get("user-agent")
if userAgent != "" {
headers["User-Agent"] = base.UserAgent
}
}
// if PassIPToUpsteam is true, then pass the ip address to the upstream
if d.PassIPToUpsteam {
ip := args.IP
if ip != "" {
headers["X-Forwarded-For"] = ip
headers["X-Real-Ip"] = ip
userAgent = args.Header.Get("user-agent")
if userAgent == "" {
userAgent = base.UserAgent
}
}
_, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(FsGetReq{
Path: file.GetPath(),
Password: d.MetaPassword,
}).SetHeaders(headers)
}).SetHeader("user-agent", userAgent)
})
if err != nil {
return nil, err
@@ -365,15 +355,8 @@ func (d *OpenList) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.O
return err
}
func (d *OpenList) ResolveLinkCacheMode(_ string) driver.LinkCacheMode {
var mode driver.LinkCacheMode
if d.PassIPToUpsteam {
mode |= driver.LinkCacheIP
}
if d.PassUAToUpsteam {
mode |= driver.LinkCacheUA
}
return mode
}
//func (d *OpenList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*OpenList)(nil)

View File

@@ -12,7 +12,6 @@ type Addition struct {
Username string `json:"username"`
Password string `json:"password"`
Token string `json:"token"`
PassIPToUpsteam bool `json:"pass_ip_to_upsteam" default:"true"`
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
}
@@ -23,7 +22,6 @@ var config = driver.Config{
DefaultRoot: "/",
CheckStatus: true,
ProxyRangeOption: true,
LinkCacheMode: driver.LinkCacheAuto,
}
func init() {

View File

@@ -1,288 +0,0 @@
package protondrive
/*
Package protondrive
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-18
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
The power of open-source, the force of teamwork and the magic of reverse engineering!
D@' 3z K!7 - The King Of Cracking
Да здравствует Родина))
*/
import (
"context"
"fmt"
"io"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/ProtonMail/gopenpgp/v2/crypto"
proton_api_bridge "github.com/henrybear327/Proton-API-Bridge"
"github.com/henrybear327/Proton-API-Bridge/common"
"github.com/henrybear327/go-proton-api"
)
type ProtonDrive struct {
model.Storage
Addition
protonDrive *proton_api_bridge.ProtonDrive
apiBase string
appVersion string
protonJson string
userAgent string
sdkVersion string
webDriveAV string
c *proton.Client
// userKR *crypto.KeyRing
addrKRs map[string]*crypto.KeyRing
addrData map[string]proton.Address
MainShare *proton.Share
DefaultAddrKR *crypto.KeyRing
MainShareKR *crypto.KeyRing
}
func (d *ProtonDrive) Config() driver.Config {
return config
}
func (d *ProtonDrive) GetAddition() driver.Additional {
return &d.Addition
}
func (d *ProtonDrive) Init(ctx context.Context) (err error) {
defer func() {
if r := recover(); err == nil && r != nil {
err = fmt.Errorf("ProtonDrive initialization panic: %v", r)
}
}()
if d.Email == "" {
return fmt.Errorf("email is required")
}
if d.Password == "" {
return fmt.Errorf("password is required")
}
config := &common.Config{
AppVersion: d.appVersion,
UserAgent: d.userAgent,
FirstLoginCredential: &common.FirstLoginCredentialData{
Username: d.Email,
Password: d.Password,
TwoFA: d.TwoFACode,
},
EnableCaching: true,
ConcurrentBlockUploadCount: setting.GetInt(conf.TaskUploadThreadsNum, conf.Conf.Tasks.Upload.Workers),
//ConcurrentFileCryptoCount: 2,
UseReusableLogin: d.UseReusableLogin && d.ReusableCredential != (common.ReusableCredentialData{}),
ReplaceExistingDraft: true,
ReusableCredential: &d.ReusableCredential,
}
protonDrive, _, err := proton_api_bridge.NewProtonDrive(
ctx,
config,
d.authHandler,
func() {},
)
if err != nil && config.UseReusableLogin {
config.UseReusableLogin = false
protonDrive, _, err = proton_api_bridge.NewProtonDrive(ctx,
config,
d.authHandler,
func() {},
)
if err == nil {
op.MustSaveDriverStorage(d)
}
}
if err != nil {
return fmt.Errorf("failed to initialize ProtonDrive: %w", err)
}
if err := d.initClient(ctx); err != nil {
return err
}
d.protonDrive = protonDrive
d.MainShare = protonDrive.MainShare
if d.RootFolderID == "root" || d.RootFolderID == "" {
d.RootFolderID = protonDrive.RootLink.LinkID
}
d.MainShareKR = protonDrive.MainShareKR
d.DefaultAddrKR = protonDrive.DefaultAddrKR
return nil
}
func (d *ProtonDrive) Drop(ctx context.Context) error {
return nil
}
func (d *ProtonDrive) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
entries, err := d.protonDrive.ListDirectory(ctx, dir.GetID())
if err != nil {
return nil, fmt.Errorf("failed to list directory: %w", err)
}
objects := make([]model.Obj, 0, len(entries))
for _, entry := range entries {
obj := &model.Object{
ID: entry.Link.LinkID,
Name: entry.Name,
Size: entry.Link.Size,
Modified: time.Unix(entry.Link.ModifyTime, 0),
IsFolder: entry.IsFolder,
}
objects = append(objects, obj)
}
return objects, nil
}
func (d *ProtonDrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
link, err := d.getLink(ctx, file.GetID())
if err != nil {
return nil, fmt.Errorf("failed get file link: %+v", err)
}
fileSystemAttrs, err := d.protonDrive.GetActiveRevisionAttrs(ctx, link)
if err != nil {
return nil, fmt.Errorf("failed get file revision: %+v", err)
}
// 解密后的文件大小
size := fileSystemAttrs.Size
rangeReaderFunc := func(rangeCtx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if length < 0 || httpRange.Start+length > size {
length = size - httpRange.Start
}
reader, _, _, err := d.protonDrive.DownloadFile(rangeCtx, link, httpRange.Start)
if err != nil {
return nil, fmt.Errorf("failed start download: %+v", err)
}
return utils.ReadCloser{
Reader: io.LimitReader(reader, length),
Closer: reader,
}, nil
}
expiration := time.Minute
return &model.Link{
RangeReader: stream.RateLimitRangeReaderFunc(rangeReaderFunc),
ContentLength: size,
Expiration: &expiration,
}, nil
}
func (d *ProtonDrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
id, err := d.protonDrive.CreateNewFolderByID(ctx, parentDir.GetID(), dirName)
if err != nil {
return nil, fmt.Errorf("failed to create directory: %w", err)
}
newDir := &model.Object{
ID: id,
Name: dirName,
IsFolder: true,
Modified: time.Now(),
}
return newDir, nil
}
func (d *ProtonDrive) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return d.DirectMove(ctx, srcObj, dstDir)
}
func (d *ProtonDrive) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
if d.protonDrive == nil {
return nil, fmt.Errorf("protonDrive bridge is nil")
}
return d.DirectRename(ctx, srcObj, newName)
}
func (d *ProtonDrive) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if srcObj.IsDir() {
return nil, fmt.Errorf("directory copy not supported")
}
srcLink, err := d.getLink(ctx, srcObj.GetID())
if err != nil {
return nil, err
}
reader, linkSize, fileSystemAttrs, err := d.protonDrive.DownloadFile(ctx, srcLink, 0)
if err != nil {
return nil, fmt.Errorf("failed to download source file: %w", err)
}
defer reader.Close()
actualSize := linkSize
if fileSystemAttrs != nil && fileSystemAttrs.Size > 0 {
actualSize = fileSystemAttrs.Size
}
file := &stream.FileStream{
Ctx: ctx,
Obj: &model.Object{
Name: srcObj.GetName(),
// Use the accurate and real size
Size: actualSize,
Modified: srcObj.ModTime(),
},
Reader: reader,
}
defer file.Close()
return d.Put(ctx, dstDir, file, func(percentage float64) {})
}
func (d *ProtonDrive) Remove(ctx context.Context, obj model.Obj) error {
if obj.IsDir() {
return d.protonDrive.MoveFolderToTrashByID(ctx, obj.GetID(), false)
} else {
return d.protonDrive.MoveFileToTrashByID(ctx, obj.GetID())
}
}
func (d *ProtonDrive) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
return d.uploadFile(ctx, dstDir.GetID(), file, up)
}
func (d *ProtonDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
about, err := d.protonDrive.About(ctx)
if err != nil {
return nil, err
}
total := uint64(about.MaxSpace)
free := total - uint64(about.UsedSpace)
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
}
var _ driver.Driver = (*ProtonDrive)(nil)

View File

@@ -1,56 +0,0 @@
package protondrive
/*
Package protondrive
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-18
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
The power of open-source, the force of teamwork and the magic of reverse engineering!
D@' 3z K!7 - The King Of Cracking
Да здравствует Родина))
*/
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/henrybear327/Proton-API-Bridge/common"
)
type Addition struct {
driver.RootID
Email string `json:"email" required:"true" type:"string"`
Password string `json:"password" required:"true" type:"string"`
TwoFACode string `json:"two_fa_code" type:"string"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"100"`
UseReusableLogin bool `json:"use_reusable_login" type:"bool" default:"true" help:"Use reusable login credentials instead of username/password"`
ReusableCredential common.ReusableCredentialData
}
var config = driver.Config{
Name: "ProtonDrive",
LocalSort: true,
OnlyProxy: true,
DefaultRoot: "root",
NoLinkURL: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &ProtonDrive{
Addition: Addition{
UseReusableLogin: true,
},
apiBase: "https://drive.proton.me/api",
appVersion: "windows-drive@1.11.3+rclone+proton",
protonJson: "application/vnd.protonmail.v1+json",
sdkVersion: "js@0.3.0",
userAgent: "ProtonDrive/v1.70.0 (Windows NT 10.0.22000; Win64; x64)",
webDriveAV: "web-drive@5.2.0+0f69f7a8",
}
})
}

View File

@@ -1,38 +0,0 @@
package protondrive
/*
Package protondrive
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-18
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
The power of open-source, the force of teamwork and the magic of reverse engineering!
D@' 3z K!7 - The King Of Cracking
Да здравствует Родина))
*/
type MoveRequest struct {
ParentLinkID string `json:"ParentLinkID"`
NodePassphrase string `json:"NodePassphrase"`
NodePassphraseSignature *string `json:"NodePassphraseSignature"`
Name string `json:"Name"`
NameSignatureEmail string `json:"NameSignatureEmail"`
Hash string `json:"Hash"`
OriginalHash string `json:"OriginalHash"`
ContentHash *string `json:"ContentHash"` // Maybe null
}
type RenameRequest struct {
Name string `json:"Name"` // PGP encrypted name
NameSignatureEmail string `json:"NameSignatureEmail"` // User's signature email
Hash string `json:"Hash"` // New name hash
OriginalHash string `json:"OriginalHash"` // Current name hash
}
type RenameResponse struct {
Code int `json:"Code"`
}

View File

@@ -1,670 +0,0 @@
package protondrive
/*
Package protondrive
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-18
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
The power of open-source, the force of teamwork and the magic of reverse engineering!
D@' 3z K!7 - The King Of Cracking
Да здравствует Родина))
*/
import (
"bufio"
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/henrybear327/go-proton-api"
)
func (d *ProtonDrive) uploadFile(ctx context.Context, parentLinkID string, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
_, err := d.getLink(ctx, parentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to get parent link: %w", err)
}
var reader io.Reader
// Use buffered reader with larger buffer for better performance
var bufferSize int
// File > 100MB (default)
if file.GetSize() > d.ChunkSize*1024*1024 {
// 256KB for large files
bufferSize = 256 * 1024
// File > 10MB
} else if file.GetSize() > 10*1024*1024 {
// 128KB for medium files
bufferSize = 128 * 1024
} else {
// 64KB for small files
bufferSize = 64 * 1024
}
// reader = bufio.NewReader(file)
reader = bufio.NewReaderSize(file, bufferSize)
reader = &driver.ReaderUpdatingProgress{
Reader: &stream.SimpleReaderWithSize{
Reader: reader,
Size: file.GetSize(),
},
UpdateProgress: up,
}
reader = driver.NewLimitedUploadStream(ctx, reader)
id, _, err := d.protonDrive.UploadFileByReader(ctx, parentLinkID, file.GetName(), file.ModTime(), reader, 0)
if err != nil {
return nil, fmt.Errorf("failed to upload file: %w", err)
}
return &model.Object{
ID: id,
Name: file.GetName(),
Size: file.GetSize(),
Modified: file.ModTime(),
IsFolder: false,
}, nil
}
func (d *ProtonDrive) encryptFileName(ctx context.Context, name string, parentLinkID string) (string, error) {
parentLink, err := d.getLink(ctx, parentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get parent link: %w", err)
}
// Get parent node keyring
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
if err != nil {
return "", fmt.Errorf("failed to get parent keyring: %w", err)
}
// Temporary file (request)
tempReq := proton.CreateFileReq{
SignatureAddress: d.MainShare.Creator,
}
// Encrypt the filename
err = tempReq.SetName(name, d.DefaultAddrKR, parentNodeKR)
if err != nil {
return "", fmt.Errorf("failed to encrypt filename: %w", err)
}
return tempReq.Name, nil
}
func (d *ProtonDrive) generateFileNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
parentLink, err := d.getLink(ctx, parentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get parent link: %w", err)
}
// Get parent node keyring
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
if err != nil {
return "", fmt.Errorf("failed to get parent keyring: %w", err)
}
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
if err != nil {
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
}
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
if err != nil {
return "", fmt.Errorf("failed to get parent hash key: %w", err)
}
nameHash, err := proton.GetNameHash(name, parentHashKey)
if err != nil {
return "", fmt.Errorf("failed to generate name hash: %w", err)
}
return nameHash, nil
}
func (d *ProtonDrive) getOriginalNameHash(link *proton.Link) (string, error) {
if link == nil {
return "", fmt.Errorf("link cannot be nil")
}
if link.Hash == "" {
return "", fmt.Errorf("link hash is empty")
}
return link.Hash, nil
}
func (d *ProtonDrive) getLink(ctx context.Context, linkID string) (*proton.Link, error) {
if linkID == "" {
return nil, fmt.Errorf("linkID cannot be empty")
}
link, err := d.c.GetLink(ctx, d.MainShare.ShareID, linkID)
if err != nil {
return nil, err
}
return &link, nil
}
func (d *ProtonDrive) getLinkKR(ctx context.Context, link *proton.Link) (*crypto.KeyRing, error) {
if link == nil {
return nil, fmt.Errorf("link cannot be nil")
}
// Root Link or Root Dir
if link.ParentLinkID == "" {
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
if err != nil {
return nil, err
}
return link.GetKeyRing(d.MainShareKR, signatureVerificationKR)
}
// Get parent keyring recursively
parentLink, err := d.getLink(ctx, link.ParentLinkID)
if err != nil {
return nil, err
}
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
if err != nil {
return nil, err
}
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
if err != nil {
return nil, err
}
return link.GetKeyRing(parentNodeKR, signatureVerificationKR)
}
var (
ErrKeyPassOrSaltedKeyPassMustBeNotNil = errors.New("either keyPass or saltedKeyPass must be not nil")
ErrFailedToUnlockUserKeys = errors.New("failed to unlock user keys")
)
func getAccountKRs(ctx context.Context, c *proton.Client, keyPass, saltedKeyPass []byte) (*crypto.KeyRing, map[string]*crypto.KeyRing, map[string]proton.Address, []byte, error) {
user, err := c.GetUser(ctx)
if err != nil {
return nil, nil, nil, nil, err
}
// fmt.Printf("user %#v", user)
addrsArr, err := c.GetAddresses(ctx)
if err != nil {
return nil, nil, nil, nil, err
}
// fmt.Printf("addr %#v", addr)
if saltedKeyPass == nil {
if keyPass == nil {
return nil, nil, nil, nil, ErrKeyPassOrSaltedKeyPassMustBeNotNil
}
// Due to limitations, salts are stored using cacheCredentialToFile
salts, err := c.GetSalts(ctx)
if err != nil {
return nil, nil, nil, nil, err
}
// fmt.Printf("salts %#v", salts)
saltedKeyPass, err = salts.SaltForKey(keyPass, user.Keys.Primary().ID)
if err != nil {
return nil, nil, nil, nil, err
}
// fmt.Printf("saltedKeyPass ok")
}
userKR, addrKRs, err := proton.Unlock(user, addrsArr, saltedKeyPass, nil)
if err != nil {
return nil, nil, nil, nil, err
} else if userKR.CountDecryptionEntities() == 0 {
return nil, nil, nil, nil, ErrFailedToUnlockUserKeys
}
addrs := make(map[string]proton.Address)
for _, addr := range addrsArr {
addrs[addr.Email] = addr
}
return userKR, addrKRs, addrs, saltedKeyPass, nil
}
func (d *ProtonDrive) getSignatureVerificationKeyring(emailAddresses []string, verificationAddrKRs ...*crypto.KeyRing) (*crypto.KeyRing, error) {
ret, err := crypto.NewKeyRing(nil)
if err != nil {
return nil, err
}
for _, emailAddress := range emailAddresses {
if addr, ok := d.addrData[emailAddress]; ok {
if addrKR, exists := d.addrKRs[addr.ID]; exists {
err = d.addKeysFromKR(ret, addrKR)
if err != nil {
return nil, err
}
}
}
}
for _, kr := range verificationAddrKRs {
err = d.addKeysFromKR(ret, kr)
if err != nil {
return nil, err
}
}
if ret.CountEntities() == 0 {
return nil, fmt.Errorf("no keyring for signature verification")
}
return ret, nil
}
func (d *ProtonDrive) addKeysFromKR(kr *crypto.KeyRing, newKRs ...*crypto.KeyRing) error {
for i := range newKRs {
for _, key := range newKRs[i].GetKeys() {
err := kr.AddKey(key)
if err != nil {
return err
}
}
}
return nil
}
func (d *ProtonDrive) DirectRename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
// fmt.Printf("DEBUG DirectRename: path=%s, newName=%s", srcObj.GetPath(), newName)
if d.MainShare == nil || d.DefaultAddrKR == nil {
return nil, fmt.Errorf("missing required fields: MainShare=%v, DefaultAddrKR=%v",
d.MainShare != nil, d.DefaultAddrKR != nil)
}
if d.protonDrive == nil {
return nil, fmt.Errorf("protonDrive bridge is nil")
}
srcLink, err := d.getLink(ctx, srcObj.GetID())
if err != nil {
return nil, fmt.Errorf("failed to find source: %w", err)
}
parentLinkID := srcLink.ParentLinkID
if parentLinkID == "" {
return nil, fmt.Errorf("cannot rename root folder")
}
encryptedName, err := d.encryptFileName(ctx, newName, parentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
}
newHash, err := d.generateFileNameHash(ctx, newName, parentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to generate new hash: %w", err)
}
originalHash, err := d.getOriginalNameHash(srcLink)
if err != nil {
return nil, fmt.Errorf("failed to get original hash: %w", err)
}
renameReq := RenameRequest{
Name: encryptedName,
NameSignatureEmail: d.MainShare.Creator,
Hash: newHash,
OriginalHash: originalHash,
}
err = d.executeRenameAPI(ctx, srcLink.LinkID, renameReq)
if err != nil {
return nil, fmt.Errorf("rename API call failed: %w", err)
}
return &model.Object{
ID: srcLink.LinkID,
Name: newName,
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
IsFolder: srcObj.IsDir(),
}, nil
}
func (d *ProtonDrive) executeRenameAPI(ctx context.Context, linkID string, req RenameRequest) error {
renameURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/rename",
d.MainShare.VolumeID, linkID)
reqBody, err := json.Marshal(req)
if err != nil {
return fmt.Errorf("failed to marshal rename request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "PUT", renameURL, bytes.NewReader(reqBody))
if err != nil {
return fmt.Errorf("failed to create HTTP request: %w", err)
}
httpReq.Header.Set("Content-Type", "application/json")
httpReq.Header.Set("Accept", d.protonJson)
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
httpReq.Header.Set("X-Pm-Uid", d.ReusableCredential.UID)
httpReq.Header.Set("Authorization", "Bearer "+d.ReusableCredential.AccessToken)
client := &http.Client{}
resp, err := client.Do(httpReq)
if err != nil {
return fmt.Errorf("failed to execute rename request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("rename failed with status %d", resp.StatusCode)
}
var renameResp RenameResponse
if err := json.NewDecoder(resp.Body).Decode(&renameResp); err != nil {
return fmt.Errorf("failed to decode rename response: %w", err)
}
if renameResp.Code != 1000 {
return fmt.Errorf("rename failed with code %d", renameResp.Code)
}
return nil
}
func (d *ProtonDrive) executeMoveAPI(ctx context.Context, linkID string, req MoveRequest) error {
// fmt.Printf("DEBUG Move Request - Name: %s\n", req.Name)
// fmt.Printf("DEBUG Move Request - Hash: %s\n", req.Hash)
// fmt.Printf("DEBUG Move Request - OriginalHash: %s\n", req.OriginalHash)
// fmt.Printf("DEBUG Move Request - ParentLinkID: %s\n", req.ParentLinkID)
// fmt.Printf("DEBUG Move Request - Name length: %d\n", len(req.Name))
// fmt.Printf("DEBUG Move Request - NameSignatureEmail: %s\n", req.NameSignatureEmail)
// fmt.Printf("DEBUG Move Request - ContentHash: %v\n", req.ContentHash)
// fmt.Printf("DEBUG Move Request - NodePassphrase length: %d\n", len(req.NodePassphrase))
// fmt.Printf("DEBUG Move Request - NodePassphraseSignature length: %d\n", len(req.NodePassphraseSignature))
// fmt.Printf("DEBUG Move Request - SrcLinkID: %s\n", linkID)
// fmt.Printf("DEBUG Move Request - DstParentLinkID: %s\n", req.ParentLinkID)
// fmt.Printf("DEBUG Move Request - ShareID: %s\n", d.MainShare.ShareID)
srcLink, _ := d.getLink(ctx, linkID)
if srcLink != nil && srcLink.ParentLinkID == req.ParentLinkID {
return fmt.Errorf("cannot move to same parent directory")
}
moveURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/move",
d.MainShare.VolumeID, linkID)
reqBody, err := json.Marshal(req)
if err != nil {
return fmt.Errorf("failed to marshal move request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "PUT", moveURL, bytes.NewReader(reqBody))
if err != nil {
return fmt.Errorf("failed to create HTTP request: %w", err)
}
httpReq.Header.Set("Authorization", "Bearer "+d.ReusableCredential.AccessToken)
httpReq.Header.Set("Accept", d.protonJson)
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
httpReq.Header.Set("X-Pm-Uid", d.ReusableCredential.UID)
httpReq.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(httpReq)
if err != nil {
return fmt.Errorf("failed to execute move request: %w", err)
}
defer resp.Body.Close()
var moveResp RenameResponse
if err := json.NewDecoder(resp.Body).Decode(&moveResp); err != nil {
return fmt.Errorf("failed to decode move response: %w", err)
}
if moveResp.Code != 1000 {
return fmt.Errorf("move operation failed with code: %d", moveResp.Code)
}
return nil
}
func (d *ProtonDrive) DirectMove(ctx context.Context, srcObj model.Obj, dstDir model.Obj) (model.Obj, error) {
// fmt.Printf("DEBUG DirectMove: srcPath=%s, dstPath=%s", srcObj.GetPath(), dstDir.GetPath())
srcLink, err := d.getLink(ctx, srcObj.GetID())
if err != nil {
return nil, fmt.Errorf("failed to find source: %w", err)
}
dstParentLinkID := dstDir.GetID()
if srcObj.IsDir() {
// Check if destination is a descendant of source
if err := d.checkCircularMove(ctx, srcLink.LinkID, dstParentLinkID); err != nil {
return nil, err
}
}
// Encrypt the filename for the new location
encryptedName, err := d.encryptFileName(ctx, srcObj.GetName(), dstParentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
}
newHash, err := d.generateNameHash(ctx, srcObj.GetName(), dstParentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to generate new hash: %w", err)
}
originalHash, err := d.getOriginalNameHash(srcLink)
if err != nil {
return nil, fmt.Errorf("failed to get original hash: %w", err)
}
// Re-encrypt node passphrase for new parent context
reencryptedPassphrase, err := d.reencryptNodePassphrase(ctx, srcLink, dstParentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to re-encrypt node passphrase: %w", err)
}
moveReq := MoveRequest{
ParentLinkID: dstParentLinkID,
NodePassphrase: reencryptedPassphrase,
Name: encryptedName,
NameSignatureEmail: d.MainShare.Creator,
Hash: newHash,
OriginalHash: originalHash,
ContentHash: nil,
// *** Causes rejection ***
/* NodePassphraseSignature: srcLink.NodePassphraseSignature, */
}
//fmt.Printf("DEBUG MoveRequest validation:\n")
//fmt.Printf(" Name length: %d\n", len(moveReq.Name))
//fmt.Printf(" Hash: %s\n", moveReq.Hash)
//fmt.Printf(" OriginalHash: %s\n", moveReq.OriginalHash)
//fmt.Printf(" NodePassphrase length: %d\n", len(moveReq.NodePassphrase))
/* fmt.Printf(" NodePassphraseSignature length: %d\n", len(moveReq.NodePassphraseSignature)) */
//fmt.Printf(" NameSignatureEmail: %s\n", moveReq.NameSignatureEmail)
err = d.executeMoveAPI(ctx, srcLink.LinkID, moveReq)
if err != nil {
return nil, fmt.Errorf("move API call failed: %w", err)
}
return &model.Object{
ID: srcLink.LinkID,
Name: srcObj.GetName(),
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
IsFolder: srcObj.IsDir(),
}, nil
}
func (d *ProtonDrive) reencryptNodePassphrase(ctx context.Context, srcLink *proton.Link, dstParentLinkID string) (string, error) {
// Get source parent link with metadata
srcParentLink, err := d.getLink(ctx, srcLink.ParentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get source parent link: %w", err)
}
// Get source parent keyring using link object
srcParentKR, err := d.getLinkKR(ctx, srcParentLink)
if err != nil {
return "", fmt.Errorf("failed to get source parent keyring: %w", err)
}
// Get destination parent link with metadata
dstParentLink, err := d.getLink(ctx, dstParentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get destination parent link: %w", err)
}
// Get destination parent keyring using link object
dstParentKR, err := d.getLinkKR(ctx, dstParentLink)
if err != nil {
return "", fmt.Errorf("failed to get destination parent keyring: %w", err)
}
// Re-encrypt the node passphrase from source parent context to destination parent context
reencryptedPassphrase, err := reencryptKeyPacket(srcParentKR, dstParentKR, d.DefaultAddrKR, srcLink.NodePassphrase)
if err != nil {
return "", fmt.Errorf("failed to re-encrypt key packet: %w", err)
}
return reencryptedPassphrase, nil
}
func (d *ProtonDrive) generateNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
parentLink, err := d.getLink(ctx, parentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get parent link: %w", err)
}
// Get parent node keyring
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
if err != nil {
return "", fmt.Errorf("failed to get parent keyring: %w", err)
}
// Get signature verification keyring
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
if err != nil {
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
}
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
if err != nil {
return "", fmt.Errorf("failed to get parent hash key: %w", err)
}
nameHash, err := proton.GetNameHash(name, parentHashKey)
if err != nil {
return "", fmt.Errorf("failed to generate name hash: %w", err)
}
return nameHash, nil
}
func reencryptKeyPacket(srcKR, dstKR, _ *crypto.KeyRing, passphrase string) (string, error) { // addrKR (3)
oldSplitMessage, err := crypto.NewPGPSplitMessageFromArmored(passphrase)
if err != nil {
return "", err
}
sessionKey, err := srcKR.DecryptSessionKey(oldSplitMessage.KeyPacket)
if err != nil {
return "", err
}
newKeyPacket, err := dstKR.EncryptSessionKey(sessionKey)
if err != nil {
return "", err
}
newSplitMessage := crypto.NewPGPSplitMessage(newKeyPacket, oldSplitMessage.DataPacket)
return newSplitMessage.GetArmored()
}
func (d *ProtonDrive) checkCircularMove(ctx context.Context, srcLinkID, dstParentLinkID string) error {
currentLinkID := dstParentLinkID
for currentLinkID != "" && currentLinkID != d.RootFolderID {
if currentLinkID == srcLinkID {
return fmt.Errorf("cannot move folder into itself or its subfolder")
}
currentLink, err := d.getLink(ctx, currentLinkID)
if err != nil {
return err
}
currentLinkID = currentLink.ParentLinkID
}
return nil
}
func (d *ProtonDrive) authHandler(auth proton.Auth) {
if auth.AccessToken != d.ReusableCredential.AccessToken || auth.RefreshToken != d.ReusableCredential.RefreshToken {
d.ReusableCredential.UID = auth.UID
d.ReusableCredential.AccessToken = auth.AccessToken
d.ReusableCredential.RefreshToken = auth.RefreshToken
if err := d.initClient(context.Background()); err != nil {
fmt.Printf("ProtonDrive: failed to reinitialize client after auth refresh: %v\n", err)
}
op.MustSaveDriverStorage(d)
}
}
func (d *ProtonDrive) initClient(ctx context.Context) error {
clientOptions := []proton.Option{
proton.WithAppVersion(d.appVersion),
proton.WithUserAgent(d.userAgent),
}
manager := proton.New(clientOptions...)
d.c = manager.NewClient(d.ReusableCredential.UID, d.ReusableCredential.AccessToken, d.ReusableCredential.RefreshToken)
saltedKeyPassBytes, err := base64.StdEncoding.DecodeString(d.ReusableCredential.SaltedKeyPass)
if err != nil {
return fmt.Errorf("failed to decode salted key pass: %w", err)
}
_, addrKRs, addrs, _, err := getAccountKRs(ctx, d.c, nil, saltedKeyPassBytes)
if err != nil {
return fmt.Errorf("failed to get account keyrings: %w", err)
}
d.addrKRs = addrKRs
d.addrData = addrs
return nil
}

View File

@@ -217,10 +217,11 @@ func (d *QuarkOrUC) GetDetails(ctx context.Context) (*model.StorageDetails, erro
if err != nil {
return nil, err
}
used := memberInfo.Data.UseCapacity
total := memberInfo.Data.TotalCapacity
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
DiskUsage: model.DiskUsage{
TotalSpace: memberInfo.Data.TotalCapacity,
FreeSpace: memberInfo.Data.TotalCapacity - memberInfo.Data.UseCapacity,
},
}, nil
}

View File

@@ -10,7 +10,6 @@ import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
@@ -25,10 +24,9 @@ import (
type S3 struct {
model.Storage
Addition
Session *session.Session
client *s3.S3
linkClient *s3.S3
directUploadClient *s3.S3
Session *session.Session
client *s3.S3
linkClient *s3.S3
config driver.Config
cron *cron.Cron
@@ -54,18 +52,16 @@ func (d *S3) Init(ctx context.Context) error {
if err != nil {
log.Errorln("Doge init session error:", err)
}
d.client = d.getClient(ClientTypeNormal)
d.linkClient = d.getClient(ClientTypeLink)
d.directUploadClient = d.getClient(ClientTypeDirectUpload)
d.client = d.getClient(false)
d.linkClient = d.getClient(true)
})
}
err := d.initSession()
if err != nil {
return err
}
d.client = d.getClient(ClientTypeNormal)
d.linkClient = d.getClient(ClientTypeLink)
d.directUploadClient = d.getClient(ClientTypeDirectUpload)
d.client = d.getClient(false)
d.linkClient = d.getClient(true)
return nil
}
@@ -214,33 +210,4 @@ func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up
return err
}
func (d *S3) GetDirectUploadTools() []string {
if !d.EnableDirectUpload {
return nil
}
return []string{"HttpDirect"}
}
func (d *S3) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
if !d.EnableDirectUpload {
return nil, errs.NotImplement
}
path := getKey(stdpath.Join(dstDir.GetPath(), fileName), false)
req, _ := d.directUploadClient.PutObjectRequest(&s3.PutObjectInput{
Bucket: &d.Bucket,
Key: &path,
})
if req == nil {
return nil, fmt.Errorf("failed to create PutObject request")
}
link, err := req.Presign(time.Hour * time.Duration(d.SignURLExpire))
if err != nil {
return nil, err
}
return &model.HttpDirectUploadInfo{
UploadURL: link,
Method: "PUT",
}, nil
}
var _ driver.Driver = (*S3)(nil)

View File

@@ -21,8 +21,6 @@ type Addition struct {
ListObjectVersion string `json:"list_object_version" type:"select" options:"v1,v2" default:"v1"`
RemoveBucket bool `json:"remove_bucket" help:"Remove bucket name from path when using custom host."`
AddFilenameToDisposition bool `json:"add_filename_to_disposition" help:"Add filename to Content-Disposition header."`
EnableDirectUpload bool `json:"enable_direct_upload" default:"false"`
DirectUploadHost string `json:"direct_upload_host" required:"false"`
}
func init() {

View File

@@ -41,15 +41,9 @@ func (d *S3) initSession() error {
return err
}
const (
ClientTypeNormal = iota
ClientTypeLink
ClientTypeDirectUpload
)
func (d *S3) getClient(clientType int) *s3.S3 {
func (d *S3) getClient(link bool) *s3.S3 {
client := s3.New(d.Session)
if clientType == ClientTypeLink && d.CustomHost != "" {
if link && d.CustomHost != "" {
client.Handlers.Build.PushBack(func(r *request.Request) {
if r.HTTPRequest.Method != http.MethodGet {
return
@@ -64,20 +58,6 @@ func (d *S3) getClient(clientType int) *s3.S3 {
}
})
}
if clientType == ClientTypeDirectUpload && d.DirectUploadHost != "" {
client.Handlers.Build.PushBack(func(r *request.Request) {
if r.HTTPRequest.Method != http.MethodPut {
return
}
split := strings.SplitN(d.DirectUploadHost, "://", 2)
if utils.SliceContains([]string{"http", "https"}, split[0]) {
r.HTTPRequest.URL.Scheme = split[0]
r.HTTPRequest.URL.Host = split[1]
} else {
r.HTTPRequest.URL.Host = d.DirectUploadHost
}
})
}
return client
}

View File

@@ -69,10 +69,15 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
Limiter: stream.ServerDownloadLimit,
Ctx: ctx,
}
if !d.Config().OnlyLinkMFile {
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil
}
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
SyncClosers: utils.NewSyncClosers(remoteFile),
RequireReference: true,
MFile: mFile,
SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil
}

View File

@@ -16,12 +16,12 @@ type Addition struct {
}
var config = driver.Config{
Name: "SFTP",
LocalSort: true,
OnlyProxy: true,
DefaultRoot: "/",
CheckStatus: true,
NoLinkURL: true,
Name: "SFTP",
LocalSort: true,
OnlyLinkMFile: false,
DefaultRoot: "/",
CheckStatus: true,
NoLinkURL: true,
}
func init() {

View File

@@ -86,10 +86,15 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
Limiter: stream.ServerDownloadLimit,
Ctx: ctx,
}
if !d.Config().OnlyLinkMFile {
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil
}
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
SyncClosers: utils.NewSyncClosers(remoteFile),
RequireReference: true,
MFile: mFile,
SyncClosers: utils.NewSyncClosers(remoteFile),
}, nil
}

View File

@@ -14,12 +14,12 @@ type Addition struct {
}
var config = driver.Config{
Name: "SMB",
LocalSort: true,
OnlyProxy: true,
DefaultRoot: ".",
NoCache: true,
NoLinkURL: true,
Name: "SMB",
LocalSort: true,
OnlyLinkMFile: false,
DefaultRoot: ".",
NoCache: true,
NoLinkURL: true,
}
func init() {

View File

@@ -12,10 +12,8 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
log "github.com/sirupsen/logrus"
)
type Strm struct {
@@ -41,9 +39,6 @@ func (d *Strm) Init(ctx context.Context) error {
if d.Paths == "" {
return errors.New("paths is required")
}
if d.SaveStrmToLocal && len(d.SaveStrmLocalPath) <= 0 {
return errors.New("SaveStrmLocalPath is required")
}
d.pathMap = make(map[string][]string)
for _, path := range strings.Split(d.Paths, "\n") {
path = strings.TrimSpace(path)
@@ -52,13 +47,6 @@ func (d *Strm) Init(ctx context.Context) error {
}
k, v := getPair(path)
d.pathMap[k] = append(d.pathMap[k], v)
if d.SaveStrmToLocal {
err := InsertStrm(utils.FixAndCleanPath(strings.TrimSpace(path)), d)
if err != nil {
log.Errorf("insert strmTrie error: %v", err)
continue
}
}
}
if len(d.pathMap) == 1 {
for k := range d.pathMap {
@@ -70,51 +58,26 @@ func (d *Strm) Init(ctx context.Context) error {
d.autoFlatten = false
}
var supportTypes []string
if d.FilterFileTypes == "" {
d.FilterFileTypes = "mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac"
}
supportTypes = strings.Split(d.FilterFileTypes, ",")
d.supportSuffix = map[string]struct{}{}
for _, ext := range supportTypes {
ext = strings.ToLower(strings.TrimSpace(ext))
if ext != "" {
d.supportSuffix[ext] = struct{}{}
}
}
var downloadTypes []string
if d.DownloadFileTypes == "" {
d.DownloadFileTypes = "ass,srt,vtt,sub,strm"
}
downloadTypes = strings.Split(d.DownloadFileTypes, ",")
d.downloadSuffix = map[string]struct{}{}
for _, ext := range downloadTypes {
ext = strings.ToLower(strings.TrimSpace(ext))
if ext != "" {
d.downloadSuffix[ext] = struct{}{}
}
}
if d.Version != 3 {
types := strings.Split("mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac", ",")
d.supportSuffix = supportSuffix()
if d.FilterFileTypes != "" {
types := strings.Split(d.FilterFileTypes, ",")
for _, ext := range types {
if _, ok := d.supportSuffix[ext]; !ok {
ext = strings.ToLower(strings.TrimSpace(ext))
if ext != "" {
d.supportSuffix[ext] = struct{}{}
supportTypes = append(supportTypes, ext)
}
}
d.FilterFileTypes = strings.Join(supportTypes, ",")
}
types = strings.Split("ass,srt,vtt,sub,strm", ",")
for _, ext := range types {
if _, ok := d.downloadSuffix[ext]; !ok {
d.supportSuffix[ext] = struct{}{}
downloadTypes = append(downloadTypes, ext)
d.downloadSuffix = downloadSuffix()
if d.DownloadFileTypes != "" {
downloadTypes := strings.Split(d.DownloadFileTypes, ",")
for _, ext := range downloadTypes {
ext = strings.ToLower(strings.TrimSpace(ext))
if ext != "" {
d.downloadSuffix[ext] = struct{}{}
}
}
d.DownloadFileTypes = strings.Join(downloadTypes, ",")
d.Version = 3
}
return nil
}
@@ -123,9 +86,6 @@ func (d *Strm) Drop(ctx context.Context) error {
d.pathMap = nil
d.downloadSuffix = nil
d.supportSuffix = nil
for _, path := range strings.Split(d.Paths, "\n") {
RemoveStrm(utils.FixAndCleanPath(strings.TrimSpace(path)), d)
}
return nil
}
@@ -196,7 +156,7 @@ func (d *Strm) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
if file.GetID() == "strm" {
link := d.getLink(ctx, file.GetPath())
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(int64(len(link)), strings.NewReader(link)),
MFile: strings.NewReader(link),
}, nil
}
// ftp,s3

View File

@@ -1,175 +0,0 @@
package strm
import (
"context"
"errors"
"os"
stdpath "path"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
log "github.com/sirupsen/logrus"
"github.com/tchap/go-patricia/v2/patricia"
)
var strmTrie = patricia.NewTrie()
func UpdateLocalStrm(ctx context.Context, path string, objs []model.Obj) {
path = utils.FixAndCleanPath(path)
updateLocal := func(driver *Strm, basePath string, objs []model.Obj) {
relParent := strings.TrimPrefix(basePath, driver.MountPath)
localParentPath := stdpath.Join(driver.SaveStrmLocalPath, relParent)
for _, obj := range objs {
localPath := stdpath.Join(localParentPath, obj.GetName())
generateStrm(ctx, driver, obj, localPath)
}
deleteExtraFiles(localParentPath, objs)
}
_ = strmTrie.VisitPrefixes(patricia.Prefix(path), func(needPathPrefix patricia.Prefix, item patricia.Item) error {
strmDrivers := item.([]*Strm)
needPath := string(needPathPrefix)
restPath := strings.TrimPrefix(path, needPath)
if len(restPath) > 0 && restPath[0] != '/' {
return nil
}
for _, strmDriver := range strmDrivers {
strmObjs := strmDriver.convert2strmObjs(ctx, path, objs)
updateLocal(strmDriver, stdpath.Join(stdpath.Base(needPath), restPath), strmObjs)
}
return nil
})
}
func InsertStrm(dstPath string, d *Strm) error {
prefix := patricia.Prefix(strings.TrimRight(dstPath, "/"))
existing := strmTrie.Get(prefix)
if existing == nil {
if !strmTrie.Insert(prefix, []*Strm{d}) {
return errors.New("failed to insert strm")
}
return nil
}
if lst, ok := existing.([]*Strm); ok {
strmTrie.Set(prefix, append(lst, d))
} else {
return errors.New("invalid trie item type")
}
return nil
}
func RemoveStrm(dstPath string, d *Strm) {
prefix := patricia.Prefix(strings.TrimRight(dstPath, "/"))
existing := strmTrie.Get(prefix)
if existing == nil {
return
}
lst, ok := existing.([]*Strm)
if !ok {
return
}
if len(lst) == 1 && lst[0] == d {
strmTrie.Delete(prefix)
return
}
for i, di := range lst {
if di == d {
newList := append(lst[:i], lst[i+1:]...)
strmTrie.Set(prefix, newList)
return
}
}
}
func generateStrm(ctx context.Context, driver *Strm, obj model.Obj, localPath string) {
if obj.IsDir() {
err := utils.CreateNestedDirectory(localPath)
if err != nil {
log.Warnf("failed to generate strm dir %s: failed to create dir: %v", localPath, err)
return
}
} else {
link, err := driver.Link(ctx, obj, model.LinkArgs{})
if err != nil {
log.Warnf("failed to generate strm of obj %s: failed to link: %v", localPath, err)
return
}
defer link.Close()
size := link.ContentLength
if size <= 0 {
size = obj.GetSize()
}
rrf, err := stream.GetRangeReaderFromLink(size, link)
if err != nil {
log.Warnf("failed to generate strm of obj %s: failed to get range reader: %v", localPath, err)
return
}
rc, err := rrf.RangeRead(ctx, http_range.Range{Length: -1})
if err != nil {
log.Warnf("failed to generate strm of obj %s: failed to read range: %v", localPath, err)
return
}
defer rc.Close()
file, err := utils.CreateNestedFile(localPath)
if err != nil {
log.Warnf("failed to generate strm of obj %s: failed to create local file: %v", localPath, err)
return
}
defer file.Close()
if _, err := utils.CopyWithBuffer(file, rc); err != nil {
log.Warnf("failed to generate strm of obj %s: copy failed: %v", localPath, err)
}
}
}
func deleteExtraFiles(localPath string, objs []model.Obj) {
localFiles, err := getLocalFiles(localPath)
if err != nil {
log.Errorf("Failed to read local files from %s: %v", localPath, err)
return
}
objsSet := make(map[string]struct{})
for _, obj := range objs {
if obj.IsDir() {
continue
}
objsSet[stdpath.Join(localPath, obj.GetName())] = struct{}{}
}
for _, localFile := range localFiles {
if _, exists := objsSet[localFile]; !exists {
err := os.Remove(localFile)
if err != nil {
log.Errorf("Failed to delete file: %s, error: %v\n", localFile, err)
} else {
log.Infof("Deleted file %s", localFile)
}
}
}
}
func getLocalFiles(localPath string) ([]string, error) {
var files []string
entries, err := os.ReadDir(localPath)
if err != nil {
return nil, err
}
for _, entry := range entries {
if !entry.IsDir() {
files = append(files, stdpath.Join(localPath, entry.Name()))
}
}
return files, nil
}
func init() {
op.RegisterObjsUpdateHook(UpdateLocalStrm)
}

View File

@@ -8,23 +8,21 @@ import (
type Addition struct {
Paths string `json:"paths" required:"true" type:"text"`
SiteUrl string `json:"siteUrl" type:"text" required:"false" help:"The prefix URL of the strm file"`
DownloadFileTypes string `json:"downloadFileTypes" type:"text" default:"ass,srt,vtt,sub,strm" required:"false" help:"Files need to download with strm (usally subtitles)"`
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac" required:"false" help:"Supports suffix name of strm file"`
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"strm" required:"false" help:"Supports suffix name of strm file"`
DownloadFileTypes string `json:"downloadFileTypes" type:"text" default:"ass" required:"false" help:"Files need to download with strm (usally subtitles)"`
EncodePath bool `json:"encodePath" default:"true" required:"true" help:"encode the path in the strm file"`
WithoutUrl bool `json:"withoutUrl" default:"false" help:"strm file content without URL prefix"`
SaveStrmToLocal bool `json:"SaveStrmToLocal" default:"false" help:"save strm file locally"`
SaveStrmLocalPath string `json:"SaveStrmLocalPath" type:"text" help:"save strm file local path"`
Version int
LocalModel bool `json:"localModel" default:"false" help:"enable local mode"`
}
var config = driver.Config{
Name: "Strm",
LocalSort: true,
OnlyProxy: true,
NoCache: true,
NoUpload: true,
DefaultRoot: "/",
NoLinkURL: true,
Name: "Strm",
LocalSort: true,
NoCache: true,
NoUpload: true,
DefaultRoot: "/",
OnlyLinkMFile: true,
OnlyProxy: true,
NoLinkURL: true,
}
func init() {

36
drivers/strm/types.go Normal file
View File

@@ -0,0 +1,36 @@
package strm
func supportSuffix() map[string]struct{} {
return map[string]struct{}{
// video
"mp4": {},
"mkv": {},
"flv": {},
"avi": {},
"wmv": {},
"ts": {},
"rmvb": {},
"webm": {},
// audio
"mp3": {},
"flac": {},
"aac": {},
"wav": {},
"ogg": {},
"m4a": {},
"wma": {},
"alac": {},
}
}
func downloadSuffix() map[string]struct{} {
return map[string]struct{}{
// strm
"strm": {},
// subtitles
"ass": {},
"srt": {},
"vtt": {},
"sub": {},
}
}

View File

@@ -58,10 +58,7 @@ func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]
if err != nil {
return nil, err
}
return d.convert2strmObjs(ctx, reqPath, objs), nil
}
func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []model.Obj) []model.Obj {
var validObjs []model.Obj
for _, obj := range objs {
id, name, path := "", obj.GetName(), ""
@@ -69,12 +66,12 @@ func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []mode
if !obj.IsDir() {
path = stdpath.Join(reqPath, obj.GetName())
ext := strings.ToLower(utils.Ext(name))
if _, ok := d.downloadSuffix[ext]; ok {
size = obj.GetSize()
} else if _, ok := d.supportSuffix[ext]; ok {
if _, ok := d.supportSuffix[ext]; ok {
id = "strm"
name = strings.TrimSuffix(name, ext) + "strm"
size = int64(len(d.getLink(ctx, path)))
} else if _, ok := d.downloadSuffix[ext]; ok {
size = obj.GetSize()
} else {
continue
}
@@ -87,11 +84,13 @@ func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []mode
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
thumb, ok := model.GetThumb(obj)
if !ok {
validObjs = append(validObjs, &objRes)
continue
}
validObjs = append(validObjs, &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
@@ -99,7 +98,7 @@ func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []mode
},
})
}
return validObjs
return validObjs, nil
}
func (d *Strm) getLink(ctx context.Context, path string) string {
@@ -111,7 +110,7 @@ func (d *Strm) getLink(ctx context.Context, path string) string {
signPath := sign.Sign(path)
finalPath = fmt.Sprintf("%s?sign=%s", finalPath, signPath)
}
if d.WithoutUrl {
if d.LocalModel {
return finalPath
}
apiUrl := d.SiteUrl
@@ -120,9 +119,7 @@ func (d *Strm) getLink(ctx context.Context, path string) string {
} else {
apiUrl = common.GetApiUrl(ctx)
}
if !strings.HasPrefix(finalPath, "/") {
finalPath = "/" + finalPath
}
return fmt.Sprintf("%s/d%s",
apiUrl,
finalPath)

View File

@@ -164,7 +164,7 @@ func (d *Teldrive) doSingleUpload(ctx context.Context, dstDir model.Obj, file mo
if err := d.singleUploadRequest(fileId, func(req *resty.Request) {
uploadParams := map[string]string{
"partName": func() string {
digits := len(strconv.Itoa(totalParts))
digits := len(fmt.Sprintf("%d", totalParts))
return file.GetName() + fmt.Sprintf(".%0*d", digits, 1)
}(),
"partNo": strconv.Itoa(1),
@@ -333,7 +333,7 @@ func (d *Teldrive) uploadSingleChunk(ctx context.Context, fileId string, task ch
err := d.singleUploadRequest(fileId, func(req *resty.Request) {
uploadParams := map[string]string{
"partName": func() string {
digits := len(strconv.Itoa(totalParts))
digits := len(fmt.Sprintf("%d", totalParts))
return task.fileName + fmt.Sprintf(".%0*d", digits, task.chunkIdx)
}(),
"partNo": strconv.Itoa(task.chunkIdx),

View File

@@ -16,6 +16,7 @@ type Addition struct {
var config = driver.Config{
Name: "Template",
LocalSort: false,
OnlyLinkMFile: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,

View File

@@ -88,7 +88,7 @@ func (d *Terabox) request(rurl string, method string, callback base.ReqCallback,
return nil, err
}
errno := utils.Json.Get(res.Body(), "errno").ToInt()
if errno == 4000023 || errno == 450016 {
if errno == 4000023 || errno == 4500016 {
// reget jsToken
err = d.resetJsToken()
if err != nil {

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
@@ -69,7 +68,6 @@ func (x *Thunder) Init(ctx context.Context) (err error) {
PackageName: "com.xunlei.downloadprovider",
UserAgent: "ANDROID-com.xunlei.downloadprovider/8.31.0.9726 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/512000 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)",
DownloadUserAgent: "Dalvik/2.1.0 (Linux; U; Android 12; M2004J7AC Build/SP1A.210812.016)",
Space: x.Space,
refreshCTokenCk: func(token string) {
x.CaptchaToken = token
op.MustSaveDriverStorage(x)
@@ -169,7 +167,6 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) {
UserAgent: x.UserAgent,
DownloadUserAgent: x.DownloadUserAgent,
UseVideoUrl: x.UseVideoUrl,
Space: x.Space,
refreshCTokenCk: func(token string) {
x.CaptchaToken = token
@@ -284,7 +281,7 @@ func (xc *XunLeiCommon) Link(ctx context.Context, file model.Obj, args model.Lin
_, err := xc.Request(FILE_API_URL+"/{fileID}", http.MethodGet, func(r *resty.Request) {
r.SetContext(ctx)
r.SetPathParam("fileID", file.GetID())
r.SetQueryParam("space", xc.Space)
//r.SetQueryParam("space", "")
}, &lFile)
if err != nil {
return nil, err
@@ -325,7 +322,6 @@ func (xc *XunLeiCommon) MakeDir(ctx context.Context, parentDir model.Obj, dirNam
"kind": FOLDER,
"name": dirName,
"parent_id": parentDir.GetID(),
"space": xc.Space,
})
}, nil)
return err
@@ -335,9 +331,8 @@ func (xc *XunLeiCommon) Move(ctx context.Context, srcObj, dstDir model.Obj) erro
_, err := xc.Request(FILE_API_URL+":batchMove", http.MethodPost, func(r *resty.Request) {
r.SetContext(ctx)
r.SetBody(&base.Json{
"to": base.Json{"parent_id": dstDir.GetID()},
"ids": []string{srcObj.GetID()},
"space": xc.Space,
"to": base.Json{"parent_id": dstDir.GetID()},
"ids": []string{srcObj.GetID()},
})
}, nil)
return err
@@ -347,10 +342,7 @@ func (xc *XunLeiCommon) Rename(ctx context.Context, srcObj model.Obj, newName st
_, err := xc.Request(FILE_API_URL+"/{fileID}", http.MethodPatch, func(r *resty.Request) {
r.SetContext(ctx)
r.SetPathParam("fileID", srcObj.GetID())
r.SetBody(&base.Json{
"name": newName,
"space": xc.Space,
})
r.SetBody(&base.Json{"name": newName})
}, nil)
return err
}
@@ -359,9 +351,8 @@ func (xc *XunLeiCommon) Copy(ctx context.Context, srcObj, dstDir model.Obj) erro
_, err := xc.Request(FILE_API_URL+":batchCopy", http.MethodPost, func(r *resty.Request) {
r.SetContext(ctx)
r.SetBody(&base.Json{
"to": base.Json{"parent_id": dstDir.GetID()},
"ids": []string{srcObj.GetID()},
"space": xc.Space,
"to": base.Json{"parent_id": dstDir.GetID()},
"ids": []string{srcObj.GetID()},
})
}, nil)
return err
@@ -371,7 +362,6 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
_, err := xc.Request(FILE_API_URL+"/{fileID}/trash", http.MethodPatch, func(r *resty.Request) {
r.SetContext(ctx)
r.SetPathParam("fileID", obj.GetID())
r.SetQueryParam("space", xc.Space)
r.SetBody("{}")
}, nil)
return err
@@ -397,7 +387,6 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.Fi
"size": file.GetSize(),
"hash": gcid,
"upload_type": UPLOAD_TYPE_RESUMABLE,
"space": xc.Space,
})
}, &resp)
if err != nil {
@@ -441,7 +430,7 @@ func (xc *XunLeiCommon) getFiles(ctx context.Context, folderId string) ([]model.
_, err := xc.Request(FILE_API_URL, http.MethodGet, func(r *resty.Request) {
r.SetContext(ctx)
r.SetQueryParams(map[string]string{
"space": xc.Space,
"space": "",
"__type": "drive",
"refresh": "true",
"__sync": "true",
@@ -451,17 +440,6 @@ func (xc *XunLeiCommon) getFiles(ctx context.Context, folderId string) ([]model.
"limit": "100",
"filters": `{"phase":{"eq":"PHASE_TYPE_COMPLETE"},"trashed":{"eq":false}}`,
})
// 获取硬盘挂载目录等
if xc.Space != "" {
r.SetQueryParamsFromValues(url.Values{
"with": []string{
"withCategoryDiskMountPath",
"withCategoryDriveCachePath",
"withCategoryHistoryDownloadPath",
"withReadOnlyFS",
},
})
}
}, &fileList)
if err != nil {
return nil, err
@@ -598,7 +576,6 @@ func (xc *XunLeiCommon) OfflineDownload(ctx context.Context, fileUrl string, par
"name": fileName,
"parent_id": parentDir.GetID(),
"upload_type": UPLOAD_TYPE_URL,
"space": xc.Space,
"url": base.Json{
"url": fileUrl,
},
@@ -625,7 +602,6 @@ func (xc *XunLeiCommon) OfflineList(ctx context.Context, nextPageToken string) (
"type": "offline",
"limit": "10000",
"page_token": nextPageToken,
"space": xc.Space,
})
}, &resp)
@@ -642,7 +618,6 @@ func (xc *XunLeiCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string
SetQueryParams(map[string]string{
"task_ids": strings.Join(taskIDs, ","),
"delete_files": strconv.FormatBool(deleteFiles),
"space": xc.Space,
})
}, nil)
if err != nil {

View File

@@ -46,8 +46,6 @@ type ExpertAddition struct {
//优先使用视频链接代替下载链接
UseVideoUrl bool `json:"use_video_url"`
Space string `json:"space" default:"" help:"device id for remote device"`
}
// 登录特征,用于判断是否重新登录
@@ -82,8 +80,6 @@ type Addition struct {
CreditKey string `json:"credit_key" help:"credit key,used for login"`
// 登录设备ID
DeviceID string `json:"device_id" default:""`
Space string `json:"space" default:"" help:"device id for remote device"`
}
// 登录特征,用于判断是否重新登录
@@ -94,6 +90,7 @@ func (i *Addition) GetIdentity() string {
var config = driver.Config{
Name: "Thunder",
LocalSort: true,
OnlyProxy: true,
}
var configExpert = driver.Config{

View File

@@ -68,7 +68,6 @@ type Common struct {
UserAgent string
DownloadUserAgent string
UseVideoUrl bool
Space string
// 验证码token刷新成功回调
refreshCTokenCk func(token string)

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
@@ -841,7 +840,7 @@ func (xc *XunLeiBrowserCommon) OfflineList(ctx context.Context, nextPageToken st
func (xc *XunLeiBrowserCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string) error {
queryParams := map[string]string{
"task_ids": strings.Join(taskIDs, ","),
"_t": strconv.FormatInt(time.Now().UnixMilli(), 10),
"_t": fmt.Sprintf("%d", time.Now().UnixMilli()),
}
if xc.UseFluentPlay {
queryParams["space"] = ThunderBrowserDriveFluentPlayFolderType

View File

@@ -2,11 +2,11 @@ package virtual
import (
"context"
"io"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils/random"
)
@@ -42,14 +42,16 @@ func (d *Virtual) List(ctx context.Context, dir model.Obj, args model.ListArgs)
return res, nil
}
type DummyMFile struct{}
type DummyMFile struct {
io.Reader
}
func (f DummyMFile) Read(p []byte) (n int, err error) {
return random.Rand.Read(p)
return f.Reader.Read(p)
}
func (f DummyMFile) ReadAt(p []byte, off int64) (n int, err error) {
return random.Rand.Read(p)
return f.Reader.Read(p)
}
func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
@@ -58,7 +60,7 @@ func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
func (d *Virtual) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
return &model.Link{
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), DummyMFile{}),
MFile: DummyMFile{Reader: random.Rand},
}, nil
}

View File

@@ -14,11 +14,11 @@ type Addition struct {
}
var config = driver.Config{
Name: "Virtual",
LocalSort: true,
OnlyProxy: true,
NeedMs: true,
NoLinkURL: true,
Name: "Virtual",
OnlyLinkMFile: true,
LocalSort: true,
NeedMs: true,
NoLinkURL: true,
}
func init() {

41
go.mod
View File

@@ -5,14 +5,12 @@ go 1.23.4
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
github.com/KirCute/zip v1.0.1
github.com/OpenListTeam/go-cache v0.1.0
github.com/OpenListTeam/sftpd-openlist v1.0.1
github.com/OpenListTeam/tache v0.2.1
github.com/OpenListTeam/tache v0.2.0
github.com/OpenListTeam/times v0.1.0
github.com/OpenListTeam/wopan-sdk-go v0.1.5
github.com/ProtonMail/go-crypto v1.3.0
github.com/ProtonMail/gopenpgp/v2 v2.9.0
github.com/SheltonZhu/115driver v1.1.1
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
github.com/avast/retry-go v3.0.0+incompatible
@@ -40,9 +38,7 @@ require (
github.com/golang-jwt/jwt/v4 v4.5.2
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.3
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499
github.com/hekmon/transmissionrpc/v3 v3.0.0
github.com/henrybear327/go-proton-api v1.0.0
github.com/ipfs/go-ipfs-api v0.7.0
github.com/itsHenry35/gofakes3 v0.0.8
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3
@@ -56,8 +52,8 @@ require (
github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.13.9
github.com/pquerna/otp v1.5.0
github.com/quic-go/quic-go v0.54.1
github.com/rclone/rclone v1.70.3
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
github.com/shirou/gopsutil/v4 v4.25.5
github.com/sirupsen/logrus v1.9.3
github.com/spf13/afero v1.14.0
@@ -68,6 +64,7 @@ require (
github.com/u2takey/ffmpeg-go v0.5.0
github.com/upyun/go-sdk/v3 v3.0.4
github.com/winfsp/cgofuse v1.6.0
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
github.com/zzzhr1990/go-common-entity v0.0.0-20250202070650-1a200048f0d3
golang.org/x/crypto v0.40.0
golang.org/x/image v0.29.0
@@ -77,7 +74,7 @@ require (
google.golang.org/appengine v1.6.8
gopkg.in/ldap.v3 v3.1.0
gorm.io/driver/mysql v1.5.7
gorm.io/driver/postgres v1.5.9
gorm.io/driver/postgres v1.6.0
gorm.io/driver/sqlite v1.5.6
gorm.io/gorm v1.25.11
)
@@ -85,22 +82,12 @@ require (
require (
cloud.google.com/go/compute/metadata v0.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
github.com/ProtonMail/go-srp v0.0.7 // indirect
github.com/PuerkitoBio/goquery v1.10.3 // indirect
github.com/RoaringBitmap/roaring/v2 v2.4.5 // indirect
github.com/andybalholm/cascadia v1.3.3 // indirect
github.com/bradenaw/juniper v0.15.3 // indirect
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cronokirby/saferith v0.33.0 // indirect
github.com/ebitengine/purego v0.8.4 // indirect
github.com/emersion/go-message v0.18.2 // indirect
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
github.com/geoffgarside/ber v1.2.0 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
@@ -115,12 +102,7 @@ require (
github.com/minio/xxml v0.0.3 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/otiai10/mint v1.6.3 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/relvacode/iso8601 v1.6.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
go.uber.org/mock v0.5.0 // indirect
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
golang.org/x/mod v0.27.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
)
@@ -146,7 +128,7 @@ require (
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hekmon/cunits/v2 v2.1.0 // indirect
github.com/ipfs/boxo v0.12.0 // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/matoous/go-nanoid/v2 v2.1.0 // indirect
github.com/microcosm-cc/bluemonday v1.0.27
@@ -212,12 +194,11 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/henrybear327/Proton-API-Bridge v1.0.0
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/ipfs/go-cid v0.5.0
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.5.5 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/pgx/v5 v5.6.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
@@ -286,9 +267,9 @@ require (
golang.org/x/sys v0.34.0
golang.org/x/term v0.33.0 // indirect
golang.org/x/text v0.27.0
golang.org/x/tools v0.35.0 // indirect
golang.org/x/tools v0.34.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/grpc v1.77.0
google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
@@ -296,8 +277,4 @@ require (
lukechampine.com/blake3 v1.1.7 // indirect
)
replace github.com/ProtonMail/go-proton-api => github.com/henrybear327/go-proton-api v1.0.0
replace github.com/cronokirby/saferith => github.com/Da3zKi7/saferith v0.33.0-fixed
// replace github.com/OpenListTeam/115-sdk-go => ../../OpenListTeam/115-sdk-go

74
go.sum
View File

@@ -37,12 +37,6 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Da3zKi7/saferith v0.33.0-fixed h1:fnIWTk7EP9mZAICf7aQjeoAwpfrlCrkOvqmi6CbWdTk=
github.com/Da3zKi7/saferith v0.33.0-fixed/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA=
github.com/KirCute/zip v1.0.1 h1:L/tVZglOiDVKDi9Ud+fN49htgKdQ3Z0H80iX8OZk13c=
github.com/KirCute/zip v1.0.1/go.mod h1:xhF7dCB+Bjvy+5a56lenYCKBsH+gxDNPZSy5Cp+nlXk=
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
github.com/OpenListTeam/115-sdk-go v0.2.2 h1:JCrGHqQjBX3laOA6Hw4CuBovSg7g+FC5s0LEAYsRciU=
@@ -53,28 +47,14 @@ github.com/OpenListTeam/gsync v0.1.0 h1:ywzGybOvA3lW8K1BUjKZ2IUlT2FSlzPO4DOazfYX
github.com/OpenListTeam/gsync v0.1.0/go.mod h1:h/Rvv9aX/6CdW/7B8di3xK3xNV8dUg45Fehrd/ksZ9s=
github.com/OpenListTeam/sftpd-openlist v1.0.1 h1:j4S3iPFOpnXCUKRPS7uCT4mF2VCl34GyqvH6lqwnkUU=
github.com/OpenListTeam/sftpd-openlist v1.0.1/go.mod h1:uO/wKnbvbdq3rBLmClMTZXuCnw7XW4wlAq4dZe91a40=
github.com/OpenListTeam/tache v0.2.1 h1:Uy/xAr05clHuMrr9+5fXAhv0Z5PGJivp4P5DnRez6cw=
github.com/OpenListTeam/tache v0.2.1/go.mod h1:qmnZ/VpY2DUlmjg3UoDeNFy/LRqrw0biN3hYEEGc/+A=
github.com/OpenListTeam/tache v0.2.0 h1:Q4MjuyECn0CZCf1ZF91JaVaZTaps1mOTAm8bFj8sr9Q=
github.com/OpenListTeam/tache v0.2.0/go.mod h1:qmnZ/VpY2DUlmjg3UoDeNFy/LRqrw0biN3hYEEGc/+A=
github.com/OpenListTeam/times v0.1.0 h1:qknxw+qj5CYKgXAwydA102UEpPcpU8TYNGRmwRyPYpg=
github.com/OpenListTeam/times v0.1.0/go.mod h1:Jx7qen5NCYzKk2w14YuvU48YYMcPa1P9a+EJePC15Pc=
github.com/OpenListTeam/wopan-sdk-go v0.1.5 h1:iKKcVzIqBgtGDbn0QbdWrCazSGxXFmYFyrnFBG+U8dI=
github.com/OpenListTeam/wopan-sdk-go v0.1.5/go.mod h1:otynv0CgSNUClPpUgZ44qCZGcMRe0dc83Pkk65xAunI=
github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I=
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug=
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e h1:lCsqUUACrcMC83lg5rTo9Y0PnPItE61JSfvMyIcANwk=
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo=
github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE=
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI=
github.com/ProtonMail/go-srp v0.0.7/go.mod h1:giCp+7qRnMIcCvI6V6U3S1lDDXDQYx2ewJ6F/9wdlJk=
github.com/ProtonMail/gopenpgp/v2 v2.9.0 h1:ruLzBmwe4dR1hdnrsEJ/S7psSBmV15gFttFUPP/+/kE=
github.com/ProtonMail/gopenpgp/v2 v2.9.0/go.mod h1:IldDyh9Hv1ZCCYatTuuEt1XZJ0OPjxLpTarDfglih7s=
github.com/PuerkitoBio/goquery v1.10.3 h1:pFYcNSqHxBD06Fpj/KsbStFRsgRATgnf3LeXiUkhzPo=
github.com/PuerkitoBio/goquery v1.10.3/go.mod h1:tMUX0zDMHXYlAQk6p35XxQMqMweEKB7iK7iLNd4RH4Y=
github.com/RoaringBitmap/roaring/v2 v2.4.5 h1:uGrrMreGjvAtTBobc0g5IrW1D5ldxDQYe2JW2gggRdg=
github.com/RoaringBitmap/roaring/v2 v2.4.5/go.mod h1:FiJcsfkGje/nZBZgCu0ZxCPOKD/hVXDS2dXi7/eUFE0=
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
@@ -92,8 +72,6 @@ github.com/andreburgaud/crypt2go v1.8.0/go.mod h1:L5nfShQ91W78hOWhUH2tlGRPO+POAP
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 h1:8PmGpDEZl9yDpcdEr6Odf23feCxK3LNUNMxjXg41pZQ=
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM=
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
@@ -186,9 +164,6 @@ github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradenaw/juniper v0.15.3 h1:RHIAMEDTpvmzV1wg1jMAHGOoI2oJUSPx3lxRldXnFGo=
github.com/bradenaw/juniper v0.15.3/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/bytedance/sonic v1.13.3 h1:MS8gmaH16Gtirygw7jV91pDCN33NyMrPbN7qiYhEsF0=
github.com/bytedance/sonic v1.13.3/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
@@ -223,7 +198,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e h1:GLC8iDDcbt1H8+RkNao2nRGjyNTIo81e1rAJT9/uWYA=
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e/go.mod h1:ln9Whp+wVY/FTbn2SK0ag+SKD2fC0yQCF/Lqowc1LmU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc h1:t8YjNUCt1DimB4HCIXBztwWMhgxr5yG5/YaRl9Afdfg=
@@ -265,10 +239,6 @@ github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJL
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8=
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/emersion/go-message v0.18.2 h1:rl55SQdjd9oJcIoQNhubD2Acs1E6IzlZISRTK7x/Lpg=
github.com/emersion/go-message v0.18.2/go.mod h1:XpJyL70LwRvq2a8rVbHXikPgKj8+aI0kGdHlg16ibYA=
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff h1:4N8wnS3f1hNHSmFD5zgFkWCyA4L1kCDkImPAtK7D6tg=
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff/go.mod h1:HMJKR5wlh/ziNp+sHEDV2ltblO4JD2+IdDOWtGcQBTM=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
@@ -394,8 +364,6 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499 h1:4ovnBdiGDFi8putQGxhipuuhXItAgh4/YnzufPYkZkQ=
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -417,10 +385,6 @@ github.com/hekmon/cunits/v2 v2.1.0 h1:k6wIjc4PlacNOHwKEMBgWV2/c8jyD4eRMs5mR1BBhI
github.com/hekmon/cunits/v2 v2.1.0/go.mod h1:9r1TycXYXaTmEWlAIfFV8JT+Xo59U96yUJAYHxzii2M=
github.com/hekmon/transmissionrpc/v3 v3.0.0 h1:0Fb11qE0IBh4V4GlOwHNYpqpjcYDp5GouolwrpmcUDQ=
github.com/hekmon/transmissionrpc/v3 v3.0.0/go.mod h1:38SlNhFzinVUuY87wGj3acOmRxeYZAZfrj6Re7UgCDg=
github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0LTKWQciUm8PMZb0=
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
@@ -436,10 +400,16 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY=
github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
@@ -621,14 +591,8 @@ github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQP
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg=
github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
github.com/rclone/rclone v1.70.3 h1:rg/WNh4DmSVZyKP2tHZ4lAaWEyMi7h/F0r7smOMA3IE=
github.com/rclone/rclone v1.70.3/go.mod h1:nLyN+hpxAsQn9Rgt5kM774lcRDad82x/KqQeBZ83cMo=
github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU=
github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=
github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -641,6 +605,8 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d h1:hrujxIzL1woJ7AwssoOcM/tq5JjjG2yYOc8odClEiXA=
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM=
@@ -713,6 +679,8 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavM
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9 h1:K8gF0eekWPEX+57l30ixxzGhHH/qscI3JCnuhbN6V4M=
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9/go.mod h1:9BnoKCcgJ/+SLhfAXj15352hTOuVmG5Gzo8xNRINfqI=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA=
github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
@@ -740,10 +708,6 @@ go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5J
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
gocv.io/x/gocv v0.25.0/go.mod h1:Rar2PS6DV+T4FL+PM535EImD/h13hGVaHhnCu1xarBs=
@@ -757,7 +721,6 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
@@ -799,8 +762,6 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -820,12 +781,10 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -868,7 +827,6 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -887,7 +845,6 @@ golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXct
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
@@ -903,7 +860,6 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
@@ -945,8 +901,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1021,6 +977,8 @@ gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo=
gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM=
gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8=
gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
gorm.io/driver/sqlite v1.5.6 h1:fO/X46qn5NUEEOZtnjJRWRzZMe8nqJiQ9E+0hi+hKQE=
gorm.io/driver/sqlite v1.5.6/go.mod h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDah4=
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=

View File

@@ -4,7 +4,6 @@ import (
"io"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
@@ -22,7 +21,7 @@ func (RarDecoder) AcceptedExtensions() []string {
func (RarDecoder) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
return map[string]tool.MultipartExtension{
".part1.rar": {regexp.MustCompile("^.*\\.part(\\d+)\\.rar$"), 2},
".part1.rar": {".part%d.rar", 2},
}
}

View File

@@ -2,7 +2,6 @@ package sevenzip
import (
"io"
"regexp"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
@@ -19,7 +18,7 @@ func (SevenZip) AcceptedExtensions() []string {
func (SevenZip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
return map[string]tool.MultipartExtension{
".7z.001": {regexp.MustCompile("^.*\\.7z\\.(\\d+)$"), 2},
".7z.001": {".7z.%.3d", 2},
}
}

View File

@@ -2,14 +2,13 @@ package tool
import (
"io"
"regexp"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
)
type MultipartExtension struct {
PartFileFormat *regexp.Regexp
PartFileFormat string
SecondPartIndex int
}

View File

@@ -4,15 +4,22 @@ import (
"bytes"
"io"
"io/fs"
stdpath "path"
"strings"
"github.com/KirCute/zip"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"golang.org/x/text/encoding/ianaindex"
"github.com/saintfish/chardet"
"github.com/yeka/zip"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/encoding/japanese"
"golang.org/x/text/encoding/korean"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/encoding/traditionalchinese"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/encoding/unicode/utf32"
"golang.org/x/text/transform"
)
@@ -30,11 +37,10 @@ func (r *WrapReader) Files() []tool.SubFile {
type WrapFileInfo struct {
fs.FileInfo
efs bool
}
func (f *WrapFileInfo) Name() string {
return decodeName(f.FileInfo.Name(), f.efs)
return decodeName(f.FileInfo.Name())
}
type WrapFile struct {
@@ -42,11 +48,11 @@ type WrapFile struct {
}
func (f *WrapFile) Name() string {
return decodeName(f.f.Name, isEFS(f.f.Flags))
return decodeName(f.f.Name)
}
func (f *WrapFile) FileInfo() fs.FileInfo {
return &WrapFileInfo{FileInfo: f.f.FileInfo(), efs: isEFS(f.f.Flags)}
return &WrapFileInfo{FileInfo: f.f.FileInfo()}
}
func (f *WrapFile) Open() (io.ReadCloser, error) {
@@ -61,33 +67,16 @@ func (f *WrapFile) SetPassword(password string) {
f.f.SetPassword(password)
}
func makePart(ss *stream.SeekableStream) (zip.SizeReaderAt, error) {
ra, err := stream.NewReadAtSeeker(ss, 0)
func getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
if len(ss) > 1 && stdpath.Ext(ss[1].GetName()) == ".z01" {
// FIXME: Incorrect parsing method for standard multipart zip format
ss = append(ss[1:], ss[0])
}
reader, err := stream.NewMultiReaderAt(ss)
if err != nil {
return nil, err
}
return &inlineSizeReaderAt{ReaderAt: ra, size: ss.GetSize()}, nil
}
func (z *Zip) getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
if len(ss) > 1 && z.traditionalSecondPartRegExp.MatchString(ss[1].GetName()) {
ss = append(ss[1:], ss[0])
ras := make([]zip.SizeReaderAt, 0, len(ss))
for _, s := range ss {
ra, err := makePart(s)
if err != nil {
return nil, err
}
ras = append(ras, ra)
}
return zip.NewMultipartReader(ras)
} else {
reader, err := stream.NewMultiReaderAt(ss)
if err != nil {
return nil, err
}
return zip.NewReader(reader, reader.Size())
}
return zip.NewReader(reader, reader.Size())
}
func filterPassword(err error) error {
@@ -97,29 +86,110 @@ func filterPassword(err error) error {
return err
}
func decodeName(name string, efs bool) string {
if efs {
return name
}
enc, err := ianaindex.IANA.Encoding(setting.GetStr(conf.NonEFSZipEncoding))
func decodeName(name string) string {
b := []byte(name)
detector := chardet.NewTextDetector()
results, err := detector.DetectAll(b)
if err != nil {
return name
}
i := bytes.NewReader([]byte(name))
var ce, re, enc encoding.Encoding
for _, r := range results {
if r.Confidence > 30 {
ce = getCommonEncoding(r.Charset)
if ce != nil {
break
}
}
if re == nil {
re = getEncoding(r.Charset)
}
}
if ce != nil {
enc = ce
} else if re != nil {
enc = re
} else {
return name
}
i := bytes.NewReader(b)
decoder := transform.NewReader(i, enc.NewDecoder())
content, _ := io.ReadAll(decoder)
return string(content)
}
func isEFS(flags uint16) bool {
return (flags & 0x800) > 0
func getCommonEncoding(name string) (enc encoding.Encoding) {
switch name {
case "UTF-8":
enc = unicode.UTF8
case "UTF-16LE":
enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
case "Shift_JIS":
enc = japanese.ShiftJIS
case "GB-18030":
enc = simplifiedchinese.GB18030
case "EUC-KR":
enc = korean.EUCKR
case "Big5":
enc = traditionalchinese.Big5
default:
enc = nil
}
return
}
type inlineSizeReaderAt struct {
io.ReaderAt
size int64
}
func (i *inlineSizeReaderAt) Size() int64 {
return i.size
func getEncoding(name string) (enc encoding.Encoding) {
switch name {
case "UTF-8":
enc = unicode.UTF8
case "UTF-16BE":
enc = unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM)
case "UTF-16LE":
enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
case "UTF-32BE":
enc = utf32.UTF32(utf32.BigEndian, utf32.IgnoreBOM)
case "UTF-32LE":
enc = utf32.UTF32(utf32.LittleEndian, utf32.IgnoreBOM)
case "ISO-8859-1":
enc = charmap.ISO8859_1
case "ISO-8859-2":
enc = charmap.ISO8859_2
case "ISO-8859-3":
enc = charmap.ISO8859_3
case "ISO-8859-4":
enc = charmap.ISO8859_4
case "ISO-8859-5":
enc = charmap.ISO8859_5
case "ISO-8859-6":
enc = charmap.ISO8859_6
case "ISO-8859-7":
enc = charmap.ISO8859_7
case "ISO-8859-8":
enc = charmap.ISO8859_8
case "ISO-8859-8-I":
enc = charmap.ISO8859_8I
case "ISO-8859-9":
enc = charmap.ISO8859_9
case "windows-1251":
enc = charmap.Windows1251
case "windows-1256":
enc = charmap.Windows1256
case "KOI8-R":
enc = charmap.KOI8R
case "Shift_JIS":
enc = japanese.ShiftJIS
case "GB-18030":
enc = simplifiedchinese.GB18030
case "EUC-JP":
enc = japanese.EUCJP
case "EUC-KR":
enc = korean.EUCKR
case "Big5":
enc = traditionalchinese.Big5
case "ISO-2022-JP":
enc = japanese.ISO2022JP
default:
enc = nil
}
return
}

View File

@@ -3,7 +3,6 @@ package zip
import (
"io"
stdpath "path"
"regexp"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
@@ -13,39 +12,34 @@ import (
)
type Zip struct {
traditionalSecondPartRegExp *regexp.Regexp
}
func (z *Zip) AcceptedExtensions() []string {
func (Zip) AcceptedExtensions() []string {
return []string{}
}
func (z *Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
func (Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
return map[string]tool.MultipartExtension{
".zip": {regexp.MustCompile("^.*\\.z(\\d+)$"), 1},
".zip.001": {regexp.MustCompile("^.*\\.zip\\.(\\d+)$"), 2},
".zip": {".z%.2d", 1},
".zip.001": {".zip.%.3d", 2},
}
}
func (z *Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
zipReader, err := z.getReader(ss)
func (Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
zipReader, err := getReader(ss)
if err != nil {
return nil, err
}
efs := true
if len(zipReader.File) > 0 {
efs = isEFS(zipReader.File[0].Flags)
}
encrypted, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: zipReader})
return &model.ArchiveMetaInfo{
Comment: decodeName(zipReader.Comment, efs),
Comment: zipReader.Comment,
Encrypted: encrypted,
Tree: tree,
}, nil
}
func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
zipReader, err := z.getReader(ss)
func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
zipReader, err := getReader(ss)
if err != nil {
return nil, err
}
@@ -63,7 +57,7 @@ func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]
_ = rc.Close()
passVerified = true
}
name := strings.TrimSuffix(decodeName(file.Name, isEFS(file.Flags)), "/")
name := strings.TrimSuffix(decodeName(file.Name), "/")
if strings.Contains(name, "/") {
// 有些压缩包不压缩第一个文件夹
strs := strings.Split(name, "/")
@@ -76,7 +70,7 @@ func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]
}
continue
}
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo(), efs: isEFS(file.Flags)}))
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo()}))
}
if len(ret) == 0 && dir != nil {
ret = append(ret, dir)
@@ -87,13 +81,13 @@ func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]
ret := make([]model.Obj, 0)
exist := false
for _, file := range zipReader.File {
name := decodeName(file.Name, isEFS(file.Flags))
name := decodeName(file.Name)
dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/"
if dir != innerPath {
continue
}
exist = true
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo(), isEFS(file.Flags)}))
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo()}))
}
if !exist {
return nil, errs.ObjectNotFound
@@ -102,14 +96,14 @@ func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]
}
}
func (z *Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
zipReader, err := z.getReader(ss)
func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
zipReader, err := getReader(ss)
if err != nil {
return nil, 0, err
}
innerPath := strings.TrimPrefix(args.InnerPath, "/")
for _, file := range zipReader.File {
if decodeName(file.Name, isEFS(file.Flags)) == innerPath {
if decodeName(file.Name) == innerPath {
if file.IsEncrypted() {
file.SetPassword(args.Password)
}
@@ -123,8 +117,8 @@ func (z *Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs)
return nil, 0, errs.ObjectNotFound
}
func (z *Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
zipReader, err := z.getReader(ss)
func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
zipReader, err := getReader(ss)
if err != nil {
return err
}
@@ -134,7 +128,5 @@ func (z *Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args mo
var _ tool.Tool = (*Zip)(nil)
func init() {
tool.RegisterTool(&Zip{
traditionalSecondPartRegExp: regexp.MustCompile("^.*\\.z0*1$"),
})
tool.RegisterTool(Zip{})
}

Some files were not shown because too many files have changed in this diff Show More