Compare commits

..

3 Commits

Author SHA1 Message Date
Suyunmeng
85e007c302 chore(deps): tidy go.mod 2025-10-19 13:24:31 +08:00
Suyunmeng
282aa97c57 Merge branch 'main' into renovate/github.com-jlaffaye-ftp-digest 2025-10-19 13:22:33 +08:00
renovate[bot]
ac69cc5b61 fix(deps): update github.com/jlaffaye/ftp digest to 3f092e0 2025-10-06 17:15:19 +00:00
122 changed files with 903 additions and 2548 deletions

View File

@@ -13,7 +13,7 @@ body:
attributes:
label: 请确认以下事项
description: |
您必须确认、同意并勾选以下内容,否则您的问题一定会被直接关闭。
您必须勾选以下内容,否则您的问题可能会被直接关闭。
或者您可以去[讨论区](https://github.com/OpenListTeam/OpenList/discussions)。
options:
- label: |
@@ -59,14 +59,6 @@ body:
label: 问题描述(必填)
validations:
required: true
- type: textarea
id: logs
attributes:
label: 日志(必填)
description: |
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
validations:
required: true
- type: textarea
id: config
attributes:
@@ -75,6 +67,12 @@ body:
请提供您的`OpenList`应用的配置文件,并截图相关存储配置。(可隐藏隐私字段)
validations:
required: true
- type: textarea
id: logs
attributes:
label: 日志(可选)
description: |
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
- type: textarea
id: reproduction
attributes:

View File

@@ -13,7 +13,7 @@ body:
attributes:
label: Please confirm the following
description: |
You must confirm, agree, and check all the following, otherwise your issue will definitely be closed directly.
You must check all the following, otherwise your issue may be closed directly.
Or you can go to the [discussions](https://github.com/OpenListTeam/OpenList/discussions).
options:
- label: |
@@ -59,14 +59,6 @@ body:
label: Bug Description (required)
validations:
required: true
- type: textarea
id: logs
attributes:
label: Logs (required)
description: |
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
validations:
required: true
- type: textarea
id: config
attributes:
@@ -75,6 +67,12 @@ body:
Please provide your `OpenList` application's configuration file and a screenshot of the relevant storage configuration. (You may mask sensitive fields)
validations:
required: true
- type: textarea
id: logs
attributes:
label: Logs (optional)
description: |
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
- type: textarea
id: reproduction
attributes:

View File

@@ -2,7 +2,6 @@ package flags
var (
DataDir string
ConfigPath string
Debug bool
NoPrefix bool
Dev bool

View File

@@ -27,8 +27,7 @@ func Execute() {
}
func init() {
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data directory (relative paths are resolved against the current working directory)")
RootCmd.PersistentFlags().StringVar(&flags.ConfigPath, "config", "", "path to config.json (relative to current working directory; defaults to [data directory]/config.json, where [data directory] is set by --data)")
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")

View File

@@ -17,7 +17,7 @@ type Addition struct {
var config = driver.Config{
Name: "115 Cloud",
DefaultRoot: "0",
LinkCacheMode: driver.LinkCacheUA,
LinkCacheType: 2,
}
func init() {

View File

@@ -19,7 +19,7 @@ type Addition struct {
var config = driver.Config{
Name: "115 Open",
DefaultRoot: "0",
LinkCacheMode: driver.LinkCacheUA,
LinkCacheType: 2,
}
func init() {

View File

@@ -41,9 +41,7 @@ func (d *Pan123) GetAddition() driver.Additional {
}
func (d *Pan123) Init(ctx context.Context) error {
_, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
req.SetHeader("platform", "web")
}, nil)
_, err := d.Request(UserInfo, http.MethodGet, nil, nil)
return err
}

View File

@@ -12,8 +12,7 @@ type Addition struct {
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
AccessToken string
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
Platform string `json:"platform" type:"string" default:"web" help:"the platform header value, sent with API requests"`
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
}
var config = driver.Config{
@@ -28,7 +27,6 @@ func init() {
return &Pan123{
Addition: Addition{
UploadThread: 3,
Platform: "web",
},
}
})

View File

@@ -203,7 +203,7 @@ do:
"referer": "https://www.123pan.com/",
"authorization": "Bearer " + d.AccessToken,
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) openlist-client",
"platform": d.Platform,
"platform": "web",
"app-version": "3",
//"user-agent": base.UserAgent,
})

View File

@@ -200,7 +200,10 @@ func (d *Cloud189) GetDetails(ctx context.Context) (*model.StorageDetails, error
return nil, err
}
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(capacityInfo.CloudCapacityInfo.UsedSize, capacityInfo.CloudCapacityInfo.TotalSize),
DiskUsage: model.DiskUsage{
TotalSpace: capacityInfo.CloudCapacityInfo.TotalSize,
FreeSpace: capacityInfo.CloudCapacityInfo.FreeSize,
},
}, nil
}

View File

@@ -72,13 +72,13 @@ type CapacityResp struct {
ResMessage string `json:"res_message"`
Account string `json:"account"`
CloudCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
FreeSize uint64 `json:"freeSize"`
MailUsedSize uint64 `json:"mail189UsedSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"cloudCapacityInfo"`
FamilyCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
FreeSize uint64 `json:"freeSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"familyCapacityInfo"`

View File

@@ -284,15 +284,18 @@ func (y *Cloud189TV) GetDetails(ctx context.Context) (*model.StorageDetails, err
if err != nil {
return nil, err
}
var total, used uint64
var total, free uint64
if y.isFamily() {
total = capacityInfo.FamilyCapacityInfo.TotalSize
used = capacityInfo.FamilyCapacityInfo.UsedSize
free = capacityInfo.FamilyCapacityInfo.FreeSize
} else {
total = capacityInfo.CloudCapacityInfo.TotalSize
used = capacityInfo.CloudCapacityInfo.UsedSize
free = capacityInfo.CloudCapacityInfo.FreeSize
}
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
}

View File

@@ -322,13 +322,13 @@ type CapacityResp struct {
ResMessage string `json:"res_message"`
Account string `json:"account"`
CloudCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
FreeSize uint64 `json:"freeSize"`
MailUsedSize uint64 `json:"mail189UsedSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"cloudCapacityInfo"`
FamilyCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
FreeSize uint64 `json:"freeSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"familyCapacityInfo"`

View File

@@ -416,15 +416,18 @@ func (y *Cloud189PC) GetDetails(ctx context.Context) (*model.StorageDetails, err
if err != nil {
return nil, err
}
var total, used uint64
var total, free uint64
if y.isFamily() {
total = capacityInfo.FamilyCapacityInfo.TotalSize
used = capacityInfo.FamilyCapacityInfo.UsedSize
free = capacityInfo.FamilyCapacityInfo.FreeSize
} else {
total = capacityInfo.CloudCapacityInfo.TotalSize
used = capacityInfo.CloudCapacityInfo.UsedSize
free = capacityInfo.CloudCapacityInfo.FreeSize
}
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
}

View File

@@ -415,13 +415,13 @@ type CapacityResp struct {
ResMessage string `json:"res_message"`
Account string `json:"account"`
CloudCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
FreeSize uint64 `json:"freeSize"`
MailUsedSize uint64 `json:"mail189UsedSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"cloudCapacityInfo"`
FamilyCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
FreeSize uint64 `json:"freeSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"familyCapacityInfo"`

View File

@@ -524,25 +524,4 @@ func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj,
}
}
func (d *Alias) ResolveLinkCacheMode(path string) driver.LinkCacheMode {
root, sub := d.getRootAndPath(path)
dsts, ok := d.pathMap[root]
if !ok {
return 0
}
for _, dst := range dsts {
storage, actualPath, err := op.GetStorageAndActualPath(stdpath.Join(dst, sub))
if err != nil {
continue
}
mode := storage.Config().LinkCacheMode
if mode == -1 {
return storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(actualPath)
} else {
return mode
}
}
return 0
}
var _ driver.Driver = (*Alias)(nil)

View File

@@ -26,7 +26,6 @@ var config = driver.Config{
NoUpload: false,
DefaultRoot: "/",
ProxyRangeOption: true,
LinkCacheMode: driver.LinkCacheAuto,
}
func init() {

View File

@@ -5,6 +5,7 @@ import (
"errors"
stdpath "path"
"strings"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
@@ -16,15 +17,9 @@ import (
log "github.com/sirupsen/logrus"
)
type detailWithIndex struct {
idx int
val *model.StorageDetails
}
func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model.Obj {
var objs []model.Obj
detailsChan := make(chan detailWithIndex, len(d.pathMap))
workerCount := 0
var wg sync.WaitGroup
for _, k := range d.rootOrder {
obj := model.Object{
Name: k,
@@ -52,26 +47,22 @@ func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model
DriverName: remoteDriver.Config().Name,
},
}
workerCount++
go func(dri driver.Driver, i int) {
details, e := op.GetStorageDetails(ctx, dri, refresh)
wg.Add(1)
go func() {
defer wg.Done()
c, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
details, e := op.GetStorageDetails(c, remoteDriver, refresh)
if e != nil {
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", dri.GetStorage().MountPath, e)
log.Errorf("failed get %s storage details: %+v", remoteDriver.GetStorage().MountPath, e)
}
return
}
detailsChan <- detailWithIndex{idx: i, val: details}
}(remoteDriver, idx)
}
for workerCount > 0 {
select {
case r := <-detailsChan:
objs[r.idx].(*model.ObjStorageDetails).StorageDetails = r.val
workerCount--
case <-time.After(time.Second):
workerCount = 0
}
objs[idx].(*model.ObjStorageDetails).StorageDetails = details
}()
}
wg.Wait()
return objs
}

View File

@@ -299,7 +299,10 @@ func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: total - used,
},
}, nil
}

View File

@@ -5,15 +5,11 @@ import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
stdpath "path"
"strconv"
"strings"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
@@ -22,10 +18,8 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@@ -35,20 +29,8 @@ type BaiduNetdisk struct {
uploadThread int
vipType int // 会员类型0普通用户(4G/4M)、1普通会员(10G/16M)、2超级会员(20G/32M)
upClient *resty.Client // 上传文件使用的http客户端
uploadUrlG singleflight.Group[string]
uploadUrlMu sync.RWMutex
uploadUrlCache map[string]uploadURLCacheEntry
}
type uploadURLCacheEntry struct {
url string
updateTime time.Time
}
var ErrUploadIDExpired = errors.New("uploadid expired")
func (d *BaiduNetdisk) Config() driver.Config {
return config
}
@@ -58,32 +40,19 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional {
}
func (d *BaiduNetdisk) Init(ctx context.Context) error {
timeout := DEFAULT_UPLOAD_SLICE_TIMEOUT
if d.UploadSliceTimeout > 0 {
timeout = time.Second * time.Duration(d.UploadSliceTimeout)
}
d.upClient = base.NewRestyClient().
SetTimeout(timeout).
SetRetryCount(UPLOAD_RETRY_COUNT).
SetRetryWaitTime(UPLOAD_RETRY_WAIT_TIME).
SetRetryMaxWaitTime(UPLOAD_RETRY_MAX_WAIT_TIME)
d.uploadUrlCache = make(map[string]uploadURLCacheEntry)
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
if d.uploadThread < 1 {
d.uploadThread, d.UploadThread = 1, "1"
} else if d.uploadThread > 32 {
d.uploadThread, d.UploadThread = 32, "32"
if d.uploadThread < 1 || d.uploadThread > 32 {
d.uploadThread, d.UploadThread = 3, "3"
}
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
d.UploadAPI = UPLOAD_FALLBACK_API
d.UploadAPI = "https://d.pcs.baidu.com"
}
res, err := d.get("/xpan/nas", map[string]string{
"method": "uinfo",
}, nil)
log.Debugf("[baidu_netdisk] get uinfo: %s", string(res))
log.Debugf("[baidu] get uinfo: %s", string(res))
if err != nil {
return err
}
@@ -210,11 +179,6 @@ func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream mo
// **注意**: 截至 2024/04/20 百度云盘 api 接口返回的时间永远是当前时间,而不是文件时间。
// 而实际上云盘存储的时间是文件时间,所以此处需要覆盖时间,保证缓存与云盘的数据一致
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 百度网盘不允许上传空文件
if stream.GetSize() < 1 {
return nil, ErrBaiduEmptyFilesNotAllowed
}
// rapid upload
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
return newObj, nil
@@ -250,6 +214,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
// cal md5 for first 256k data
const SliceSize int64 = 256 * utils.KB
// cal md5
blockList := make([]string, 0, count)
byteSize := sliceSize
fileMd5H := md5.New()
@@ -279,7 +244,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
}
if tmpF != nil {
if written != streamSize {
return nil, errs.NewErr(err, "CreateTempFile failed, size mismatch: %d != %d ", written, streamSize)
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
}
_, err = tmpF.Seek(0, io.SeekStart)
if err != nil {
@@ -293,14 +258,31 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
mtime := stream.ModTime().Unix()
ctime := stream.CreateTime().Unix()
// step.1 尝试读取已保存进度
// step.1 预上传
// 尝试获取之前的进度
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
if !ok {
// 没有进度,走预上传
precreateResp, err = d.precreate(ctx, path, streamSize, blockListStr, contentMd5, sliceMd5, ctime, mtime)
params := map[string]string{
"method": "precreate",
}
form := map[string]string{
"path": path,
"size": strconv.FormatInt(streamSize, 10),
"isdir": "0",
"autoinit": "1",
"rtype": "3",
"block_list": blockListStr,
"content-md5": contentMd5,
"slice-md5": sliceMd5,
}
joinTime(form, ctime, mtime)
log.Debugf("[baidu_netdisk] precreate data: %s", form)
_, err = d.postForm("/xpan/file", params, form, &precreateResp)
if err != nil {
return nil, err
}
log.Debugf("%+v", precreateResp)
if precreateResp.ReturnType == 2 {
// rapid upload, since got md5 match from baidu server
// 修复时间,具体原因见 Put 方法注释的 **注意**
@@ -309,96 +291,45 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
return fileToObj(precreateResp.File), nil
}
}
ensureUploadURL := func() {
if precreateResp.UploadURL != "" {
return
}
precreateResp.UploadURL = d.getUploadUrl(path, precreateResp.Uploadid)
}
ensureUploadURL()
// step.2 上传分片
uploadLoop:
for attempt := 0; attempt < 2; attempt++ {
// 获取上传域名
if precreateResp.UploadURL == "" {
ensureUploadURL()
}
uploadUrl := precreateResp.UploadURL
// 并发上传
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
cacheReaderAt, okReaderAt := cache.(io.ReaderAt)
if !okReaderAt {
return nil, fmt.Errorf("cache object must implement io.ReaderAt interface for upload operations")
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) {
break
}
totalParts := len(precreateResp.BlockList)
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) || partseq < 0 {
continue
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
if partseq+1 == count {
byteSize = lastBlockSize
}
threadG.Go(func(ctx context.Context) error {
params := map[string]string{
"method": "upload",
"access_token": d.AccessToken,
"type": "tmpfile",
"path": path,
"uploadid": precreateResp.Uploadid,
"partseq": strconv.Itoa(partseq),
}
i, partseq := i, partseq
offset, size := int64(partseq)*sliceSize, sliceSize
if partseq+1 == count {
size = lastBlockSize
err := d.uploadSlice(ctx, params, stream.GetName(),
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)))
if err != nil {
return err
}
threadG.Go(func(ctx context.Context) error {
params := map[string]string{
"method": "upload",
"access_token": d.AccessToken,
"type": "tmpfile",
"path": path,
"uploadid": precreateResp.Uploadid,
"partseq": strconv.Itoa(partseq),
}
section := io.NewSectionReader(cacheReaderAt, offset, size)
err := d.uploadSlice(ctx, uploadUrl, params, stream.GetName(), driver.NewLimitedUploadStream(ctx, section))
if err != nil {
return err
}
precreateResp.BlockList[i] = -1
// 当前goroutine还没退出+1才是真正成功的数量
success := threadG.Success() + 1
progress := float64(success) * 100 / float64(totalParts)
up(progress)
return nil
})
}
err = threadG.Wait()
if err == nil {
break uploadLoop
}
// 保存进度(所有错误都会保存)
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
precreateResp.BlockList[i] = -1
return nil
})
}
if err = threadG.Wait(); err != nil {
// 如果属于用户主动取消,则保存上传进度
if errors.Is(err, context.Canceled) {
return nil, err
}
if errors.Is(err, ErrUploadIDExpired) {
log.Warn("[baidu_netdisk] uploadid expired, will restart from scratch")
d.clearUploadUrlCache(precreateResp.Uploadid)
// 重新 precreate所有分片都要重传
newPre, err2 := d.precreate(ctx, path, streamSize, blockListStr, "", "", ctime, mtime)
if err2 != nil {
return nil, err2
}
if newPre.ReturnType == 2 {
return fileToObj(newPre.File), nil
}
precreateResp = newPre
precreateResp.UploadURL = ""
ensureUploadURL()
// 覆盖掉旧的进度
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
continue uploadLoop
}
return nil, err
}
@@ -412,72 +343,23 @@ uploadLoop:
// 修复时间,具体原因见 Put 方法注释的 **注意**
newFile.Ctime = ctime
newFile.Mtime = mtime
// 上传成功清理进度
base.SaveUploadProgress(d, nil, d.AccessToken, contentMd5)
d.clearUploadUrlCache(precreateResp.Uploadid)
return fileToObj(newFile), nil
}
// precreate 执行预上传操作,支持首次上传和 uploadid 过期重试
func (d *BaiduNetdisk) precreate(ctx context.Context, path string, streamSize int64, blockListStr, contentMd5, sliceMd5 string, ctime, mtime int64) (*PrecreateResp, error) {
params := map[string]string{"method": "precreate"}
form := map[string]string{
"path": path,
"size": strconv.FormatInt(streamSize, 10),
"isdir": "0",
"autoinit": "1",
"rtype": "3",
"block_list": blockListStr,
}
// 只有在首次上传时才包含 content-md5 和 slice-md5
if contentMd5 != "" && sliceMd5 != "" {
form["content-md5"] = contentMd5
form["slice-md5"] = sliceMd5
}
joinTime(form, ctime, mtime)
var precreateResp PrecreateResp
_, err := d.postForm("/xpan/file", params, form, &precreateResp)
if err != nil {
return nil, err
}
// 修复时间,具体原因见 Put 方法注释的 **注意**
if precreateResp.ReturnType == 2 {
precreateResp.File.Ctime = ctime
precreateResp.File.Mtime = mtime
}
return &precreateResp, nil
}
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, uploadUrl string, params map[string]string, fileName string, file io.Reader) error {
res, err := d.upClient.R().
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
res, err := base.RestyClient.R().
SetContext(ctx).
SetQueryParams(params).
SetFileReader("file", fileName, file).
Post(uploadUrl + "/rest/2.0/pcs/superfile2")
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
if err != nil {
return err
}
log.Debugln(res.RawResponse.Status + res.String())
if res.StatusCode() != http.StatusOK {
return errs.NewErr(errs.StreamIncomplete, "baidu upload failed, status=%d, body=%s", res.StatusCode(), res.String())
}
errCode := utils.Json.Get(res.Body(), "error_code").ToInt()
errNo := utils.Json.Get(res.Body(), "errno").ToInt()
respStr := res.String()
lower := strings.ToLower(respStr)
// 合并 uploadid 过期检测逻辑
if strings.Contains(lower, "uploadid") &&
(strings.Contains(lower, "invalid") || strings.Contains(lower, "expired") || strings.Contains(lower, "not found")) {
return ErrUploadIDExpired
}
if errCode != 0 || errNo != 0 {
return errs.NewErr(errs.StreamIncomplete, "error uploading to baidu, response=%s", res.String())
return errs.NewErr(errs.StreamIncomplete, "error in uploading to baidu, will retry. response=%s", res.String())
}
return nil
}

View File

@@ -3,7 +3,6 @@ package baidu_netdisk
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"time"
)
type Addition struct {
@@ -19,23 +18,12 @@ type Addition struct {
AccessToken string
RefreshToken string `json:"refresh_token" required:"true"`
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
UploadSliceTimeout int `json:"upload_timeout" type:"number" default:"60" help:"per-slice upload timeout in seconds"`
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
UseDynamicUploadAPI bool `json:"use_dynamic_upload_api" default:"true" help:"dynamically get upload api domain, when enabled, the 'Upload API' setting will be used as a fallback if failed to get"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
}
const (
UPLOAD_FALLBACK_API = "https://d.pcs.baidu.com" // 备用上传地址
UPLOAD_URL_EXPIRE_TIME = time.Minute * 60 // 上传地址有效期(分钟)
DEFAULT_UPLOAD_SLICE_TIMEOUT = time.Second * 60 // 上传分片请求默认超时时间
UPLOAD_RETRY_COUNT = 3
UPLOAD_RETRY_WAIT_TIME = time.Second * 1
UPLOAD_RETRY_MAX_WAIT_TIME = time.Second * 5
)
var config = driver.Config{
Name: "BaiduNetdisk",
DefaultRoot: "/",

View File

@@ -1,7 +1,6 @@
package baidu_netdisk
import (
"errors"
"path"
"strconv"
"time"
@@ -10,10 +9,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
var (
ErrBaiduEmptyFilesNotAllowed = errors.New("empty files are not allowed by baidu netdisk")
)
type TokenErrResp struct {
ErrorDescription string `json:"error_description"`
Error string `json:"error"`
@@ -193,32 +188,6 @@ type PrecreateResp struct {
// return_type=2
File File `json:"info"`
UploadURL string `json:"-"` // 保存断点续传对应的上传域名
}
type UploadServerResp struct {
BakServer []any `json:"bak_server"`
BakServers []struct {
Server string `json:"server"`
} `json:"bak_servers"`
ClientIP string `json:"client_ip"`
ErrorCode int `json:"error_code"`
ErrorMsg string `json:"error_msg"`
Expire int `json:"expire"`
Host string `json:"host"`
Newno string `json:"newno"`
QuicServer []any `json:"quic_server"`
QuicServers []struct {
Server string `json:"server"`
} `json:"quic_servers"`
RequestID int64 `json:"request_id"`
Server []any `json:"server"`
ServerTime int `json:"server_time"`
Servers []struct {
Server string `json:"server"`
} `json:"servers"`
Sl int `json:"sl"`
}
type QuotaResp struct {

View File

@@ -115,7 +115,7 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
errno := utils.Json.Get(res.Body(), "errno").ToInt()
if errno != 0 {
if utils.SliceContains([]int{111, -6}, errno) {
log.Info("[baidu_netdisk] refreshing baidu_netdisk token.")
log.Info("refreshing baidu_netdisk token.")
err2 := d.refreshToken()
if err2 != nil {
return retry.Unrecoverable(err2)
@@ -326,10 +326,10 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
// 非会员固定为 4MB
if d.vipType == 0 {
if d.CustomUploadPartSize != 0 {
log.Warnf("[baidu_netdisk] CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
}
if filesize > MaxSliceNum*DefaultSliceSize {
log.Warnf("[baidu_netdisk] File size(%d) is too large, may cause upload failure", filesize)
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
}
return DefaultSliceSize
@@ -337,17 +337,17 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
if d.CustomUploadPartSize != 0 {
if d.CustomUploadPartSize < DefaultSliceSize {
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
return DefaultSliceSize
}
if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize {
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
return VipSliceSize
}
if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize {
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
return SVipSliceSize
}
@@ -377,7 +377,7 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
}
if filesize > MaxSliceNum*maxSliceSize {
log.Warnf("[baidu_netdisk] File size(%d) is too large, may cause upload failure", filesize)
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
}
return maxSliceSize
@@ -394,97 +394,6 @@ func (d *BaiduNetdisk) quota(ctx context.Context) (model.DiskUsage, error) {
return driver.DiskUsageFromUsedAndTotal(resp.Used, resp.Total), nil
}
// getUploadUrl 从开放平台获取上传域名/地址,并发请求会被合并,结果会在 uploadid 生命周期内复用。
// 如果获取失败,则返回 Upload API设置项。
func (d *BaiduNetdisk) getUploadUrl(path, uploadId string) string {
if !d.UseDynamicUploadAPI || uploadId == "" {
return d.UploadAPI
}
getCachedUrlFunc := func() (string, bool) {
d.uploadUrlMu.RLock()
defer d.uploadUrlMu.RUnlock()
if entry, ok := d.uploadUrlCache[uploadId]; ok {
return entry.url, true
}
return "", false
}
// 检查地址缓存
if uploadUrl, ok := getCachedUrlFunc(); ok {
return uploadUrl
}
uploadUrlGetFunc := func() (string, error) {
// 双重检查缓存
if uploadUrl, ok := getCachedUrlFunc(); ok {
return uploadUrl, nil
}
uploadUrl, err := d.requestForUploadUrl(path, uploadId)
if err != nil {
return "", err
}
d.uploadUrlMu.Lock()
d.uploadUrlCache[uploadId] = uploadURLCacheEntry{
url: uploadUrl,
updateTime: time.Now(),
}
d.uploadUrlMu.Unlock()
return uploadUrl, nil
}
uploadUrl, err, _ := d.uploadUrlG.Do(uploadId, uploadUrlGetFunc)
if err != nil {
fallback := d.UploadAPI
log.Warnf("[baidu_netdisk] get upload URL failed (%v), will use fallback URL: %s", err, fallback)
return fallback
}
return uploadUrl
}
func (d *BaiduNetdisk) clearUploadUrlCache(uploadId string) {
if uploadId == "" {
return
}
d.uploadUrlMu.Lock()
if _, ok := d.uploadUrlCache[uploadId]; ok {
delete(d.uploadUrlCache, uploadId)
}
d.uploadUrlMu.Unlock()
}
// requestForUploadUrl 请求获取上传地址。
// 实测此接口不需要认证传method和upload_version就行不过还是按文档规范调用。
// https://pan.baidu.com/union/doc/Mlvw5hfnr
func (d *BaiduNetdisk) requestForUploadUrl(path, uploadId string) (string, error) {
params := map[string]string{
"method": "locateupload",
"appid": "250528",
"path": path,
"uploadid": uploadId,
"upload_version": "2.0",
}
apiUrl := "https://d.pcs.baidu.com/rest/2.0/pcs/file"
var resp UploadServerResp
_, err := d.request(apiUrl, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(params)
}, &resp)
if err != nil {
return "", err
}
// 应该是https开头的一个地址
var uploadUrl string
if len(resp.Servers) > 0 {
uploadUrl = resp.Servers[0].Server
} else if len(resp.BakServers) > 0 {
uploadUrl = resp.BakServers[0].Server
}
if uploadUrl == "" {
return "", errors.New("upload URL is empty")
}
return uploadUrl, nil
}
// func encodeURIComponent(str string) string {
// r := url.QueryEscape(str)
// r = strings.ReplaceAll(r, "+", "%20")

View File

@@ -20,7 +20,7 @@ type Addition struct {
var config = driver.Config{
Name: "BaiduPhoto",
LocalSort: true,
LinkCacheMode: driver.LinkCacheUA,
LinkCacheType: 2,
}
func init() {

View File

@@ -25,7 +25,6 @@ func InitClient() {
}),
).SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
NoRedirectClient.SetHeader("user-agent", UserAgent)
net.SetRestyProxyIfConfigured(NoRedirectClient)
RestyClient = NewRestyClient()
HttpClient = net.NewHttpClient()
@@ -38,7 +37,5 @@ func NewRestyClient() *resty.Client {
SetRetryResetReaders(true).
SetTimeout(DefaultTimeout).
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
net.SetRestyProxyIfConfigured(client)
return client
}

View File

@@ -15,7 +15,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
"golang.org/x/time/rate"
)
type Doubao struct {
@@ -24,7 +23,6 @@ type Doubao struct {
*UploadToken
UserId string
uploadThread int
limiter *rate.Limiter
}
func (d *Doubao) Config() driver.Config {
@@ -63,17 +61,6 @@ func (d *Doubao) Init(ctx context.Context) error {
d.UploadToken = uploadToken
}
if d.LimitRate > 0 {
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
}
return nil
}
func (d *Doubao) WaitLimit(ctx context.Context) error {
if d.limiter != nil {
return d.limiter.Wait(ctx)
}
return nil
}
@@ -82,10 +69,6 @@ func (d *Doubao) Drop(ctx context.Context) error {
}
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
var files []model.Obj
fileList, err := d.getFiles(dir.GetID(), "")
if err != nil {
@@ -112,10 +95,6 @@ func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
}
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
var downloadUrl string
if u, ok := file.(*Object); ok {
@@ -181,10 +160,6 @@ func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
var r UploadNodeResp
_, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@@ -202,10 +177,6 @@ func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
}
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
var r UploadNodeResp
_, err := d.request("/samantha/aispace/move_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@@ -220,10 +191,6 @@ func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
}
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
var r BaseResp
_, err := d.request("/samantha/aispace/rename_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@@ -240,10 +207,6 @@ func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
}
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
var r BaseResp
_, err := d.request("/samantha/aispace/delete_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
@@ -252,10 +215,6 @@ func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
// 根据MIME类型确定数据类型
mimetype := file.GetMimetype()
dataType := FileDataType

View File

@@ -10,10 +10,9 @@ type Addition struct {
// driver.RootPath
driver.RootID
// define other
Cookie string `json:"cookie" type:"text"`
UploadThread string `json:"upload_thread" default:"3"`
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"`
Cookie string `json:"cookie" type:"text"`
UploadThread string `json:"upload_thread" default:"3"`
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
}
var config = driver.Config{
@@ -24,10 +23,6 @@ var config = driver.Config{
func init() {
op.RegisterDriver(func() driver.Driver {
return &Doubao{
Addition: Addition{
LimitRate: 2,
},
}
return &Doubao{}
})
}

View File

@@ -19,7 +19,7 @@ var config = driver.Config{
Name: "FebBox",
NoUpload: true,
DefaultRoot: "0",
LinkCacheMode: driver.LinkCacheIP,
LinkCacheType: 1,
}
func init() {

View File

@@ -113,7 +113,9 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
}
return &model.Link{
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
},
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
}, nil
}

View File

@@ -51,9 +51,6 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
if d.Addition.ShowReadme {
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
}
if d.Addition.ShowSourceCode{
files = append(files, point.GetSourceCode()...)
}
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
nextDir := GetNextDir(point.Point, path)
if nextDir == "" {
@@ -120,10 +117,6 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
}
files = append(files, point.GetReleaseByTagName(tagName)...)
if d.Addition.ShowSourceCode{
files = append(files, point.GetSourceCodeByTagName(tagName)...)
}
}
}
}

View File

@@ -10,7 +10,6 @@ type Addition struct {
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"OpenListTeam/OpenList" help:"structure:[path:]org/repo"`
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
ShowSourceCode bool `json:"show_source_code" type:"bool" default:"false" help:"show Source code (zip/tar.gz)"`
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
}

View File

@@ -143,60 +143,6 @@ func (m *MountPoint) GetAllVersionSize() int64 {
return size
}
func (m *MountPoint) GetSourceCode() []File {
files := make([]File, 0)
// 无法获取文件大小,此处设为 1
files = append(files, File{
Path: m.Point + "/" + "Source code (zip)",
FileName: "Source code (zip)",
Size: 1,
Type: "file",
UpdateAt: m.Release.CreatedAt,
CreateAt: m.Release.CreatedAt,
Url: m.Release.ZipballUrl,
})
files = append(files, File{
Path: m.Point + "/" + "Source code (tar.gz)",
FileName: "Source code (tar.gz)",
Size: 1,
Type: "file",
UpdateAt: m.Release.CreatedAt,
CreateAt: m.Release.CreatedAt,
Url: m.Release.TarballUrl,
})
return files
}
func (m *MountPoint) GetSourceCodeByTagName(tagName string) []File {
for _, item := range *m.Releases {
if item.TagName == tagName {
files := make([]File, 0)
files = append(files, File{
Path: m.Point + "/" + "Source code (zip)",
FileName: "Source code (zip)",
Size: 1,
Type: "file",
UpdateAt: item.CreatedAt,
CreateAt: item.CreatedAt,
Url: item.ZipballUrl,
})
files = append(files, File{
Path: m.Point + "/" + "Source code (tar.gz)",
FileName: "Source code (tar.gz)",
Size: 1,
Type: "file",
UpdateAt: item.CreatedAt,
CreateAt: item.CreatedAt,
Url: item.TarballUrl,
})
return files
}
}
return nil
}
func (m *MountPoint) GetOtherFile(get func(url string) (*resty.Response, error), refresh bool) []File {
if m.OtherFile == nil || refresh {
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/contents")

View File

@@ -27,14 +27,6 @@ import (
// do others that not defined in Driver interface
// Google Drive API field constants
const (
// File list query fields
FilesListFields = "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken"
// Single file query fields
FileInfoFields = "id,name,mimeType,size,md5Checksum,sha1Checksum,sha256Checksum"
)
type googleDriveServiceAccount struct {
// Type string `json:"type"`
// ProjectID string `json:"project_id"`
@@ -243,7 +235,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
}
query := map[string]string{
"orderBy": orderBy,
"fields": FilesListFields,
"fields": "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken",
"pageSize": "1000",
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
//"includeItemsFromAllDrives": "true",
@@ -257,82 +249,11 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
return nil, err
}
pageToken = resp.NextPageToken
// Batch process shortcuts, API calls only for file shortcuts
shortcutTargetIds := make([]string, 0)
shortcutIndices := make([]int, 0)
// Collect target IDs of all file shortcuts (skip folder shortcuts)
for i := range resp.Files {
if resp.Files[i].MimeType == "application/vnd.google-apps.shortcut" &&
resp.Files[i].ShortcutDetails.TargetId != "" &&
resp.Files[i].ShortcutDetails.TargetMimeType != "application/vnd.google-apps.folder" {
shortcutTargetIds = append(shortcutTargetIds, resp.Files[i].ShortcutDetails.TargetId)
shortcutIndices = append(shortcutIndices, i)
}
}
// Batch get target file info (only for file shortcuts)
if len(shortcutTargetIds) > 0 {
targetFiles := d.batchGetTargetFilesInfo(shortcutTargetIds)
// Update shortcut file info
for j, targetId := range shortcutTargetIds {
if targetFile, exists := targetFiles[targetId]; exists {
fileIndex := shortcutIndices[j]
if targetFile.Size != "" {
resp.Files[fileIndex].Size = targetFile.Size
}
if targetFile.MD5Checksum != "" {
resp.Files[fileIndex].MD5Checksum = targetFile.MD5Checksum
}
if targetFile.SHA1Checksum != "" {
resp.Files[fileIndex].SHA1Checksum = targetFile.SHA1Checksum
}
if targetFile.SHA256Checksum != "" {
resp.Files[fileIndex].SHA256Checksum = targetFile.SHA256Checksum
}
}
}
}
res = append(res, resp.Files...)
}
return res, nil
}
// getTargetFileInfo gets target file details for shortcuts
func (d *GoogleDrive) getTargetFileInfo(targetId string) (File, error) {
var targetFile File
url := fmt.Sprintf("https://www.googleapis.com/drive/v3/files/%s", targetId)
query := map[string]string{
"fields": FileInfoFields,
}
_, err := d.request(url, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &targetFile)
if err != nil {
return File{}, err
}
return targetFile, nil
}
// batchGetTargetFilesInfo batch gets target file info, sequential processing to avoid concurrency complexity
func (d *GoogleDrive) batchGetTargetFilesInfo(targetIds []string) map[string]File {
if len(targetIds) == 0 {
return make(map[string]File)
}
result := make(map[string]File)
// Sequential processing to avoid concurrency complexity
for _, targetId := range targetIds {
file, err := d.getTargetFileInfo(targetId)
if err == nil {
result[targetId] = file
}
}
return result
}
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
defaultChunkSize := d.ChunkSize * 1024 * 1024
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)

View File

@@ -1,16 +0,0 @@
//go:build !windows && !plan9 && !netbsd && !aix && !illumos && !solaris && !js
package local
import (
"os"
"path/filepath"
"syscall"
)
func copyNamedPipe(dstPath string, mode os.FileMode, dirMode os.FileMode) error {
if err := os.MkdirAll(filepath.Dir(dstPath), dirMode); err != nil {
return err
}
return syscall.Mkfifo(dstPath, uint32(mode))
}

View File

@@ -1,9 +0,0 @@
//go:build windows || plan9 || netbsd || aix || illumos || solaris || js
package local
import "os"
func copyNamedPipe(_ string, _, _ os.FileMode) error {
return nil
}

View File

@@ -23,6 +23,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/OpenListTeam/times"
cp "github.com/otiai10/copy"
log "github.com/sirupsen/logrus"
_ "golang.org/x/image/webp"
)
@@ -296,9 +297,16 @@ func (d *Local) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
return fmt.Errorf("the destination folder is a subfolder of the source folder")
}
err := os.Rename(srcPath, dstPath)
if isCrossDeviceError(err) {
// 跨设备移动,变更为移动任务
return errs.NotImplement
if err != nil && strings.Contains(err.Error(), "invalid cross-device link") {
// 跨设备移动,先复制再删除
if err := d.Copy(ctx, srcObj, dstDir); err != nil {
return err
}
// 复制成功后直接删除源文件/文件夹
if srcObj.IsDir() {
return os.RemoveAll(srcObj.GetPath())
}
return os.Remove(srcObj.GetPath())
}
if err == nil {
srcParent := filepath.Dir(srcPath)
@@ -339,14 +347,15 @@ func (d *Local) Copy(_ context.Context, srcObj, dstDir model.Obj) error {
if utils.IsSubPath(srcPath, dstPath) {
return fmt.Errorf("the destination folder is a subfolder of the source folder")
}
info, err := os.Lstat(srcPath)
// Copy using otiai10/copy to perform more secure & efficient copy
err := cp.Copy(srcPath, dstPath, cp.Options{
Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS
PreserveTimes: true,
PreserveOwner: true,
})
if err != nil {
return err
}
// 复制regular文件会返回errs.NotImplement, 转为复制任务
if err = d.tryCopy(srcPath, dstPath, info); err != nil {
return err
}
if d.directoryMap.Has(filepath.Dir(dstPath)) {
d.directoryMap.UpdateDirSize(filepath.Dir(dstPath))

View File

@@ -3,7 +3,6 @@ package local
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/fs"
"os"
@@ -15,9 +14,7 @@ import (
"strings"
"sync"
"github.com/KarpelesLab/reflink"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/disintegration/imaging"
@@ -151,7 +148,7 @@ func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
return nil, nil, err
}
if d.ThumbCacheFolder != "" {
err = os.WriteFile(filepath.Join(d.ThumbCacheFolder, thumbName), buf.Bytes(), 0o666)
err = os.WriteFile(filepath.Join(d.ThumbCacheFolder, thumbName), buf.Bytes(), 0666)
if err != nil {
return nil, nil, err
}
@@ -408,79 +405,3 @@ func (m *DirectoryMap) DeleteDirNode(dirname string) error {
return nil
}
func (d *Local) tryCopy(srcPath, dstPath string, info os.FileInfo) error {
if info.Mode()&os.ModeDevice != 0 {
return errors.New("cannot copy a device")
} else if info.Mode()&os.ModeSymlink != 0 {
return d.copySymlink(srcPath, dstPath)
} else if info.Mode()&os.ModeNamedPipe != 0 {
return copyNamedPipe(dstPath, info.Mode(), os.FileMode(d.mkdirPerm))
} else if info.IsDir() {
return d.recurAndTryCopy(srcPath, dstPath)
} else {
return tryReflinkCopy(srcPath, dstPath)
}
}
func (d *Local) copySymlink(srcPath, dstPath string) error {
linkOrig, err := os.Readlink(srcPath)
if err != nil {
return err
}
dstDir := filepath.Dir(dstPath)
if !filepath.IsAbs(linkOrig) {
srcDir := filepath.Dir(srcPath)
rel, err := filepath.Rel(dstDir, srcDir)
if err != nil {
rel, err = filepath.Abs(srcDir)
}
if err != nil {
return err
}
linkOrig = filepath.Clean(filepath.Join(rel, linkOrig))
}
err = os.MkdirAll(dstDir, os.FileMode(d.mkdirPerm))
if err != nil {
return err
}
return os.Symlink(linkOrig, dstPath)
}
func (d *Local) recurAndTryCopy(srcPath, dstPath string) error {
err := os.MkdirAll(dstPath, os.FileMode(d.mkdirPerm))
if err != nil {
return err
}
files, err := readDir(srcPath)
if err != nil {
return err
}
for _, f := range files {
if !f.IsDir() {
sp := filepath.Join(srcPath, f.Name())
dp := filepath.Join(dstPath, f.Name())
if err = d.tryCopy(sp, dp, f); err != nil {
return err
}
}
}
for _, f := range files {
if f.IsDir() {
sp := filepath.Join(srcPath, f.Name())
dp := filepath.Join(dstPath, f.Name())
if err = d.recurAndTryCopy(sp, dp); err != nil {
return err
}
}
}
return nil
}
func tryReflinkCopy(srcPath, dstPath string) error {
err := reflink.Always(srcPath, dstPath)
if errors.Is(err, reflink.ErrReflinkUnsupported) || errors.Is(err, reflink.ErrReflinkFailed) || isCrossDeviceError(err) {
return errs.NotImplement
}
return err
}

View File

@@ -3,13 +3,11 @@
package local
import (
"errors"
"io/fs"
"strings"
"syscall"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"golang.org/x/sys/unix"
)
func isHidden(f fs.FileInfo, _ string) bool {
@@ -29,7 +27,3 @@ func getDiskUsage(path string) (model.DiskUsage, error) {
FreeSpace: free,
}, nil
}
func isCrossDeviceError(err error) bool {
return errors.Is(err, unix.EXDEV)
}

View File

@@ -49,7 +49,3 @@ func getDiskUsage(path string) (model.DiskUsage, error) {
FreeSpace: freeBytes,
}, nil
}
func isCrossDeviceError(err error) bool {
return errors.Is(err, windows.ERROR_NOT_SAME_DEVICE)
}

View File

@@ -57,22 +57,18 @@ func setBody(body interface{}) base.ReqCallback {
}
func handleFolderId(dir model.Obj) interface{} {
if isRootFolder(dir) {
return nil // Root folder doesn't need folderId
if dir.GetID() == "" {
return nil
}
return dir.GetID()
}
func isRootFolder(dir model.Obj) bool {
return dir.GetID() == ""
}
// API layer methods
func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) {
var files []MFile
var body map[string]string
if !isRootFolder(dir) {
if dir.GetPath() != "/" {
body = map[string]string{"folderId": dir.GetID()}
} else {
body = map[string]string{}
@@ -89,7 +85,7 @@ func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) {
func (d *Misskey) getFolders(dir model.Obj) ([]model.Obj, error) {
var folders []MFolder
var body map[string]string
if !isRootFolder(dir) {
if dir.GetPath() != "/" {
body = map[string]string{"folderId": dir.GetID()}
} else {
body = map[string]string{}
@@ -201,24 +197,16 @@ func (d *Misskey) put(ctx context.Context, dstDir model.Obj, stream model.FileSt
Reader: stream,
UpdateProgress: up,
})
// Build form data, only add folderId if not root folder
formData := map[string]string{
"name": stream.GetName(),
"comment": "",
"isSensitive": "false",
"force": "false",
}
folderId := handleFolderId(dstDir)
if folderId != nil {
formData["folderId"] = folderId.(string)
}
req := base.RestyClient.R().
SetContext(ctx).
SetFileReader("file", stream.GetName(), reader).
SetFormData(formData).
SetFormData(map[string]string{
"folderId": handleFolderId(dstDir).(string),
"name": stream.GetName(),
"comment": "",
"isSensitive": "false",
"force": "false",
}).
SetResult(&file).
SetAuthToken(d.AccessToken)

View File

@@ -236,19 +236,4 @@ func (d *Onedrive) GetDetails(ctx context.Context) (*model.StorageDetails, error
}, nil
}
func (d *Onedrive) GetDirectUploadTools() []string {
if !d.EnableDirectUpload {
return nil
}
return []string{"HttpDirect"}
}
// GetDirectUploadInfo returns the direct upload info for OneDrive
func (d *Onedrive) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
if !d.EnableDirectUpload {
return nil, errs.NotImplement
}
return d.getDirectUploadInfo(ctx, path.Join(dstDir.GetPath(), fileName))
}
var _ driver.Driver = (*Onedrive)(nil)

View File

@@ -7,19 +7,18 @@ import (
type Addition struct {
driver.RootPath
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
IsSharepoint bool `json:"is_sharepoint"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/onedrive/renewapi"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RedirectUri string `json:"redirect_uri" required:"true" default:"https://api.oplist.org/onedrive/callback"`
RefreshToken string `json:"refresh_token" required:"true"`
SiteId string `json:"site_id"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
IsSharepoint bool `json:"is_sharepoint"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/onedrive/renewapi"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RedirectUri string `json:"redirect_uri" required:"true" default:"https://api.oplist.org/onedrive/callback"`
RefreshToken string `json:"refresh_token" required:"true"`
SiteId string `json:"site_id"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
}
var config = driver.Config{

View File

@@ -133,7 +133,7 @@ func (d *Onedrive) _refreshToken() error {
return nil
}
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
if d.ref != nil {
return d.ref.Request(url, method, callback, resp)
}
@@ -152,7 +152,7 @@ func (d *Onedrive) Request(url string, method string, callback base.ReqCallback,
return nil, err
}
if e.Error.Code != "" {
if e.Error.Code == "InvalidAuthenticationToken" && !utils.IsBool(noRetry...) {
if e.Error.Code == "InvalidAuthenticationToken" {
err = d.refreshToken()
if err != nil {
return nil, err
@@ -310,36 +310,9 @@ func (d *Onedrive) getDrive(ctx context.Context) (*DriveResp, error) {
var resp DriveResp
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, &resp, true)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Onedrive) getDirectUploadInfo(ctx context.Context, path string) (*model.HttpDirectUploadInfo, error) {
// Create upload session
url := d.GetMetaUrl(false, path) + "/createUploadSession"
metadata := map[string]any{
"item": map[string]any{
"@microsoft.graph.conflictBehavior": "rename",
},
}
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
req.SetBody(metadata).SetContext(ctx)
}, nil)
if err != nil {
return nil, err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
if uploadUrl == "" {
return nil, fmt.Errorf("failed to get upload URL from response")
}
return &model.HttpDirectUploadInfo{
UploadURL: uploadUrl,
ChunkSize: d.ChunkSize * 1024 * 1024, // Convert MB to bytes
Method: "PUT",
}, nil
}

View File

@@ -222,18 +222,4 @@ func (d *OnedriveAPP) GetDetails(ctx context.Context) (*model.StorageDetails, er
}, nil
}
func (d *OnedriveAPP) GetDirectUploadTools() []string {
if !d.EnableDirectUpload {
return nil
}
return []string{"HttpDirect"}
}
func (d *OnedriveAPP) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
if !d.EnableDirectUpload {
return nil, errs.NotImplement
}
return d.getDirectUploadInfo(ctx, path.Join(dstDir.GetPath(), fileName))
}
var _ driver.Driver = (*OnedriveAPP)(nil)

View File

@@ -7,15 +7,14 @@ import (
type Addition struct {
driver.RootPath
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
ClientID string `json:"client_id" required:"true"`
ClientSecret string `json:"client_secret" required:"true"`
TenantID string `json:"tenant_id"`
Email string `json:"email"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
ClientID string `json:"client_id" required:"true"`
ClientSecret string `json:"client_secret" required:"true"`
TenantID string `json:"tenant_id"`
Email string `json:"email"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
}
var config = driver.Config{

View File

@@ -88,7 +88,7 @@ func (d *OnedriveAPP) _accessToken() error {
return nil
}
func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
if callback != nil {
@@ -104,7 +104,7 @@ func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallba
return nil, err
}
if e.Error.Code != "" {
if e.Error.Code == "InvalidAuthenticationToken" && !utils.IsBool(noRetry...) {
if e.Error.Code == "InvalidAuthenticationToken" {
err = d.accessToken()
if err != nil {
return nil, err
@@ -216,36 +216,9 @@ func (d *OnedriveAPP) getDrive(ctx context.Context) (*DriveResp, error) {
var resp DriveResp
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, &resp, true)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *OnedriveAPP) getDirectUploadInfo(ctx context.Context, path string) (*model.HttpDirectUploadInfo, error) {
// Create upload session
url := d.GetMetaUrl(false, path) + "/createUploadSession"
metadata := map[string]any{
"item": map[string]any{
"@microsoft.graph.conflictBehavior": "rename",
},
}
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
req.SetBody(metadata).SetContext(ctx)
}, nil)
if err != nil {
return nil, err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
if uploadUrl == "" {
return nil, fmt.Errorf("failed to get upload URL from response")
}
return &model.HttpDirectUploadInfo{
UploadURL: uploadUrl,
ChunkSize: d.ChunkSize * 1024 * 1024, // Convert MB to bytes
Method: "PUT",
}, nil
}

View File

@@ -26,6 +26,11 @@ type OpenList struct {
}
func (d *OpenList) Config() driver.Config {
if d.PassUAToUpsteam {
c := config
c.LinkCacheType = 2 // add User-Agent to cache key
return c
}
return config
}
@@ -110,29 +115,19 @@ func (d *OpenList) List(ctx context.Context, dir model.Obj, args model.ListArgs)
func (d *OpenList) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
var resp common.Resp[FsGetResp]
headers := map[string]string{
"User-Agent": base.UserAgent,
}
// if PassUAToUpsteam is true, then pass the user-agent to the upstream
userAgent := base.UserAgent
if d.PassUAToUpsteam {
userAgent := args.Header.Get("user-agent")
if userAgent != "" {
headers["User-Agent"] = userAgent
}
}
// if PassIPToUpsteam is true, then pass the ip address to the upstream
if d.PassIPToUpsteam {
ip := args.IP
if ip != "" {
headers["X-Forwarded-For"] = ip
headers["X-Real-Ip"] = ip
userAgent = args.Header.Get("user-agent")
if userAgent == "" {
userAgent = base.UserAgent
}
}
_, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(FsGetReq{
Path: file.GetPath(),
Password: d.MetaPassword,
}).SetHeaders(headers)
}).SetHeader("user-agent", userAgent)
})
if err != nil {
return nil, err
@@ -360,21 +355,13 @@ func (d *OpenList) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.O
Name: []string{name},
PutIntoNewDir: args.PutIntoNewDir,
SrcDir: dir,
Overwrite: args.Overwrite,
})
})
return err
}
func (d *OpenList) ResolveLinkCacheMode(_ string) driver.LinkCacheMode {
var mode driver.LinkCacheMode
if d.PassIPToUpsteam {
mode |= driver.LinkCacheIP
}
if d.PassUAToUpsteam {
mode |= driver.LinkCacheUA
}
return mode
}
//func (d *OpenList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*OpenList)(nil)

View File

@@ -12,7 +12,6 @@ type Addition struct {
Username string `json:"username"`
Password string `json:"password"`
Token string `json:"token"`
PassIPToUpsteam bool `json:"pass_ip_to_upsteam" default:"true"`
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
}
@@ -23,7 +22,6 @@ var config = driver.Config{
DefaultRoot: "/",
CheckStatus: true,
ProxyRangeOption: true,
LinkCacheMode: driver.LinkCacheAuto,
}
func init() {

View File

@@ -167,5 +167,4 @@ type DecompressReq struct {
Name []string `json:"name"`
PutIntoNewDir bool `json:"put_into_new_dir"`
SrcDir string `json:"src_dir"`
Overwrite bool `json:"overwrite"`
}

View File

@@ -190,7 +190,9 @@ func (d *ProtonDrive) Link(ctx context.Context, file model.Obj, args model.LinkA
expiration := time.Minute
return &model.Link{
RangeReader: stream.RateLimitRangeReaderFunc(rangeReaderFunc),
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(rangeReaderFunc),
},
ContentLength: size,
Expiration: &expiration,
}, nil

View File

@@ -217,10 +217,11 @@ func (d *QuarkOrUC) GetDetails(ctx context.Context) (*model.StorageDetails, erro
if err != nil {
return nil, err
}
used := memberInfo.Data.UseCapacity
total := memberInfo.Data.TotalCapacity
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
DiskUsage: model.DiskUsage{
TotalSpace: memberInfo.Data.TotalCapacity,
FreeSpace: memberInfo.Data.TotalCapacity - memberInfo.Data.UseCapacity,
},
}, nil
}

View File

@@ -10,7 +10,6 @@ import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
@@ -25,10 +24,9 @@ import (
type S3 struct {
model.Storage
Addition
Session *session.Session
client *s3.S3
linkClient *s3.S3
directUploadClient *s3.S3
Session *session.Session
client *s3.S3
linkClient *s3.S3
config driver.Config
cron *cron.Cron
@@ -54,18 +52,16 @@ func (d *S3) Init(ctx context.Context) error {
if err != nil {
log.Errorln("Doge init session error:", err)
}
d.client = d.getClient(ClientTypeNormal)
d.linkClient = d.getClient(ClientTypeLink)
d.directUploadClient = d.getClient(ClientTypeDirectUpload)
d.client = d.getClient(false)
d.linkClient = d.getClient(true)
})
}
err := d.initSession()
if err != nil {
return err
}
d.client = d.getClient(ClientTypeNormal)
d.linkClient = d.getClient(ClientTypeLink)
d.directUploadClient = d.getClient(ClientTypeDirectUpload)
d.client = d.getClient(false)
d.linkClient = d.getClient(true)
return nil
}
@@ -214,33 +210,4 @@ func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up
return err
}
func (d *S3) GetDirectUploadTools() []string {
if !d.EnableDirectUpload {
return nil
}
return []string{"HttpDirect"}
}
func (d *S3) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
if !d.EnableDirectUpload {
return nil, errs.NotImplement
}
path := getKey(stdpath.Join(dstDir.GetPath(), fileName), false)
req, _ := d.directUploadClient.PutObjectRequest(&s3.PutObjectInput{
Bucket: &d.Bucket,
Key: &path,
})
if req == nil {
return nil, fmt.Errorf("failed to create PutObject request")
}
link, err := req.Presign(time.Hour * time.Duration(d.SignURLExpire))
if err != nil {
return nil, err
}
return &model.HttpDirectUploadInfo{
UploadURL: link,
Method: "PUT",
}, nil
}
var _ driver.Driver = (*S3)(nil)

View File

@@ -21,8 +21,6 @@ type Addition struct {
ListObjectVersion string `json:"list_object_version" type:"select" options:"v1,v2" default:"v1"`
RemoveBucket bool `json:"remove_bucket" help:"Remove bucket name from path when using custom host."`
AddFilenameToDisposition bool `json:"add_filename_to_disposition" help:"Add filename to Content-Disposition header."`
EnableDirectUpload bool `json:"enable_direct_upload" default:"false"`
DirectUploadHost string `json:"direct_upload_host" required:"false"`
}
func init() {

View File

@@ -41,15 +41,9 @@ func (d *S3) initSession() error {
return err
}
const (
ClientTypeNormal = iota
ClientTypeLink
ClientTypeDirectUpload
)
func (d *S3) getClient(clientType int) *s3.S3 {
func (d *S3) getClient(link bool) *s3.S3 {
client := s3.New(d.Session)
if clientType == ClientTypeLink && d.CustomHost != "" {
if link && d.CustomHost != "" {
client.Handlers.Build.PushBack(func(r *request.Request) {
if r.HTTPRequest.Method != http.MethodGet {
return
@@ -64,20 +58,6 @@ func (d *S3) getClient(clientType int) *s3.S3 {
}
})
}
if clientType == ClientTypeDirectUpload && d.DirectUploadHost != "" {
client.Handlers.Build.PushBack(func(r *request.Request) {
if r.HTTPRequest.Method != http.MethodPut {
return
}
split := strings.SplitN(d.DirectUploadHost, "://", 2)
if utils.SliceContains([]string{"http", "https"}, split[0]) {
r.HTTPRequest.URL.Scheme = split[0]
r.HTTPRequest.URL.Host = split[1]
} else {
r.HTTPRequest.URL.Host = d.DirectUploadHost
}
})
}
return client
}

View File

@@ -15,7 +15,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
log "github.com/sirupsen/logrus"
)
type Strm struct {
@@ -41,9 +40,6 @@ func (d *Strm) Init(ctx context.Context) error {
if d.Paths == "" {
return errors.New("paths is required")
}
if d.SaveStrmToLocal && len(d.SaveStrmLocalPath) <= 0 {
return errors.New("SaveStrmLocalPath is required")
}
d.pathMap = make(map[string][]string)
for _, path := range strings.Split(d.Paths, "\n") {
path = strings.TrimSpace(path)
@@ -52,13 +48,6 @@ func (d *Strm) Init(ctx context.Context) error {
}
k, v := getPair(path)
d.pathMap[k] = append(d.pathMap[k], v)
if d.SaveStrmToLocal {
err := InsertStrm(utils.FixAndCleanPath(strings.TrimSpace(path)), d)
if err != nil {
log.Errorf("insert strmTrie error: %v", err)
continue
}
}
}
if len(d.pathMap) == 1 {
for k := range d.pathMap {
@@ -70,52 +59,26 @@ func (d *Strm) Init(ctx context.Context) error {
d.autoFlatten = false
}
var supportTypes []string
if d.FilterFileTypes == "" {
d.FilterFileTypes = "mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac"
}
supportTypes = strings.Split(d.FilterFileTypes, ",")
d.supportSuffix = map[string]struct{}{}
for _, ext := range supportTypes {
ext = strings.ToLower(strings.TrimSpace(ext))
if ext != "" {
d.supportSuffix[ext] = struct{}{}
}
}
var downloadTypes []string
if d.DownloadFileTypes == "" {
d.DownloadFileTypes = "ass,srt,vtt,sub,strm"
}
downloadTypes = strings.Split(d.DownloadFileTypes, ",")
d.downloadSuffix = map[string]struct{}{}
for _, ext := range downloadTypes {
ext = strings.ToLower(strings.TrimSpace(ext))
if ext != "" {
d.downloadSuffix[ext] = struct{}{}
}
}
if d.Version != 5 {
types := strings.Split("mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac", ",")
d.supportSuffix = supportSuffix()
if d.FilterFileTypes != "" {
types := strings.Split(d.FilterFileTypes, ",")
for _, ext := range types {
if _, ok := d.supportSuffix[ext]; !ok {
ext = strings.ToLower(strings.TrimSpace(ext))
if ext != "" {
d.supportSuffix[ext] = struct{}{}
supportTypes = append(supportTypes, ext)
}
}
d.FilterFileTypes = strings.Join(supportTypes, ",")
}
types = strings.Split("ass,srt,vtt,sub,strm", ",")
for _, ext := range types {
if _, ok := d.downloadSuffix[ext]; !ok {
d.downloadSuffix = downloadSuffix()
if d.DownloadFileTypes != "" {
downloadTypes := strings.Split(d.DownloadFileTypes, ",")
for _, ext := range downloadTypes {
ext = strings.ToLower(strings.TrimSpace(ext))
if ext != "" {
d.downloadSuffix[ext] = struct{}{}
downloadTypes = append(downloadTypes, ext)
}
}
d.DownloadFileTypes = strings.Join(downloadTypes, ",")
d.PathPrefix = "/d"
d.Version = 5
}
return nil
}
@@ -124,9 +87,6 @@ func (d *Strm) Drop(ctx context.Context) error {
d.pathMap = nil
d.downloadSuffix = nil
d.supportSuffix = nil
for _, path := range strings.Split(d.Paths, "\n") {
RemoveStrm(utils.FixAndCleanPath(strings.TrimSpace(path)), d)
}
return nil
}

View File

@@ -1,175 +0,0 @@
package strm
import (
"context"
"errors"
"os"
stdpath "path"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
log "github.com/sirupsen/logrus"
"github.com/tchap/go-patricia/v2/patricia"
)
var strmTrie = patricia.NewTrie()
func UpdateLocalStrm(ctx context.Context, path string, objs []model.Obj) {
path = utils.FixAndCleanPath(path)
updateLocal := func(driver *Strm, basePath string, objs []model.Obj) {
relParent := strings.TrimPrefix(basePath, driver.MountPath)
localParentPath := stdpath.Join(driver.SaveStrmLocalPath, relParent)
for _, obj := range objs {
localPath := stdpath.Join(localParentPath, obj.GetName())
generateStrm(ctx, driver, obj, localPath)
}
deleteExtraFiles(localParentPath, objs)
}
_ = strmTrie.VisitPrefixes(patricia.Prefix(path), func(needPathPrefix patricia.Prefix, item patricia.Item) error {
strmDrivers := item.([]*Strm)
needPath := string(needPathPrefix)
restPath := strings.TrimPrefix(path, needPath)
if len(restPath) > 0 && restPath[0] != '/' {
return nil
}
for _, strmDriver := range strmDrivers {
strmObjs := strmDriver.convert2strmObjs(ctx, path, objs)
updateLocal(strmDriver, stdpath.Join(stdpath.Base(needPath), restPath), strmObjs)
}
return nil
})
}
func InsertStrm(dstPath string, d *Strm) error {
prefix := patricia.Prefix(strings.TrimRight(dstPath, "/"))
existing := strmTrie.Get(prefix)
if existing == nil {
if !strmTrie.Insert(prefix, []*Strm{d}) {
return errors.New("failed to insert strm")
}
return nil
}
if lst, ok := existing.([]*Strm); ok {
strmTrie.Set(prefix, append(lst, d))
} else {
return errors.New("invalid trie item type")
}
return nil
}
func RemoveStrm(dstPath string, d *Strm) {
prefix := patricia.Prefix(strings.TrimRight(dstPath, "/"))
existing := strmTrie.Get(prefix)
if existing == nil {
return
}
lst, ok := existing.([]*Strm)
if !ok {
return
}
if len(lst) == 1 && lst[0] == d {
strmTrie.Delete(prefix)
return
}
for i, di := range lst {
if di == d {
newList := append(lst[:i], lst[i+1:]...)
strmTrie.Set(prefix, newList)
return
}
}
}
func generateStrm(ctx context.Context, driver *Strm, obj model.Obj, localPath string) {
if obj.IsDir() {
err := utils.CreateNestedDirectory(localPath)
if err != nil {
log.Warnf("failed to generate strm dir %s: failed to create dir: %v", localPath, err)
return
}
} else {
link, err := driver.Link(ctx, obj, model.LinkArgs{})
if err != nil {
log.Warnf("failed to generate strm of obj %s: failed to link: %v", localPath, err)
return
}
defer link.Close()
size := link.ContentLength
if size <= 0 {
size = obj.GetSize()
}
rrf, err := stream.GetRangeReaderFromLink(size, link)
if err != nil {
log.Warnf("failed to generate strm of obj %s: failed to get range reader: %v", localPath, err)
return
}
rc, err := rrf.RangeRead(ctx, http_range.Range{Length: -1})
if err != nil {
log.Warnf("failed to generate strm of obj %s: failed to read range: %v", localPath, err)
return
}
defer rc.Close()
file, err := utils.CreateNestedFile(localPath)
if err != nil {
log.Warnf("failed to generate strm of obj %s: failed to create local file: %v", localPath, err)
return
}
defer file.Close()
if _, err := utils.CopyWithBuffer(file, rc); err != nil {
log.Warnf("failed to generate strm of obj %s: copy failed: %v", localPath, err)
}
}
}
func deleteExtraFiles(localPath string, objs []model.Obj) {
localFiles, err := getLocalFiles(localPath)
if err != nil {
log.Errorf("Failed to read local files from %s: %v", localPath, err)
return
}
objsSet := make(map[string]struct{})
for _, obj := range objs {
if obj.IsDir() {
continue
}
objsSet[stdpath.Join(localPath, obj.GetName())] = struct{}{}
}
for _, localFile := range localFiles {
if _, exists := objsSet[localFile]; !exists {
err := os.Remove(localFile)
if err != nil {
log.Errorf("Failed to delete file: %s, error: %v\n", localFile, err)
} else {
log.Infof("Deleted file %s", localFile)
}
}
}
}
func getLocalFiles(localPath string) ([]string, error) {
var files []string
entries, err := os.ReadDir(localPath)
if err != nil {
return nil, err
}
for _, entry := range entries {
if !entry.IsDir() {
files = append(files, stdpath.Join(localPath, entry.Name()))
}
}
return files, nil
}
func init() {
op.RegisterObjsUpdateHook(UpdateLocalStrm)
}

View File

@@ -8,14 +8,10 @@ import (
type Addition struct {
Paths string `json:"paths" required:"true" type:"text"`
SiteUrl string `json:"siteUrl" type:"text" required:"false" help:"The prefix URL of the strm file"`
PathPrefix string `json:"PathPrefix" type:"text" required:"false" default:"/d" help:"Path prefix"`
DownloadFileTypes string `json:"downloadFileTypes" type:"text" default:"ass,srt,vtt,sub,strm" required:"false" help:"Files need to download with strm (usally subtitles)"`
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac" required:"false" help:"Supports suffix name of strm file"`
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"strm" required:"false" help:"Supports suffix name of strm file"`
DownloadFileTypes string `json:"downloadFileTypes" type:"text" default:"ass" required:"false" help:"Files need to download with strm (usally subtitles)"`
EncodePath bool `json:"encodePath" default:"true" required:"true" help:"encode the path in the strm file"`
WithoutUrl bool `json:"withoutUrl" default:"false" help:"strm file content without URL prefix"`
SaveStrmToLocal bool `json:"SaveStrmToLocal" default:"false" help:"save strm file locally"`
SaveStrmLocalPath string `json:"SaveStrmLocalPath" type:"text" help:"save strm file local path"`
Version int
LocalModel bool `json:"localModel" default:"false" help:"enable local mode"`
}
var config = driver.Config{

36
drivers/strm/types.go Normal file
View File

@@ -0,0 +1,36 @@
package strm
func supportSuffix() map[string]struct{} {
return map[string]struct{}{
// video
"mp4": {},
"mkv": {},
"flv": {},
"avi": {},
"wmv": {},
"ts": {},
"rmvb": {},
"webm": {},
// audio
"mp3": {},
"flac": {},
"aac": {},
"wav": {},
"ogg": {},
"m4a": {},
"wma": {},
"alac": {},
}
}
func downloadSuffix() map[string]struct{} {
return map[string]struct{}{
// strm
"strm": {},
// subtitles
"ass": {},
"srt": {},
"vtt": {},
"sub": {},
}
}

View File

@@ -3,6 +3,7 @@ package strm
import (
"context"
"fmt"
stdpath "path"
"strings"
@@ -57,10 +58,7 @@ func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]
if err != nil {
return nil, err
}
return d.convert2strmObjs(ctx, reqPath, objs), nil
}
func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []model.Obj) []model.Obj {
var validObjs []model.Obj
for _, obj := range objs {
id, name, path := "", obj.GetName(), ""
@@ -68,13 +66,12 @@ func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []mode
if !obj.IsDir() {
path = stdpath.Join(reqPath, obj.GetName())
ext := strings.ToLower(utils.Ext(name))
sourceExt := utils.SourceExt(name)
if _, ok := d.downloadSuffix[ext]; ok {
size = obj.GetSize()
} else if _, ok := d.supportSuffix[ext]; ok {
if _, ok := d.supportSuffix[ext]; ok {
id = "strm"
name = strings.TrimSuffix(name, sourceExt) + "strm"
name = strings.TrimSuffix(name, ext) + "strm"
size = int64(len(d.getLink(ctx, path)))
} else if _, ok := d.downloadSuffix[ext]; ok {
size = obj.GetSize()
} else {
continue
}
@@ -87,11 +84,13 @@ func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []mode
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
thumb, ok := model.GetThumb(obj)
if !ok {
validObjs = append(validObjs, &objRes)
continue
}
validObjs = append(validObjs, &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
@@ -99,7 +98,7 @@ func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []mode
},
})
}
return validObjs
return validObjs, nil
}
func (d *Strm) getLink(ctx context.Context, path string) string {
@@ -111,14 +110,7 @@ func (d *Strm) getLink(ctx context.Context, path string) string {
signPath := sign.Sign(path)
finalPath = fmt.Sprintf("%s?sign=%s", finalPath, signPath)
}
pathPrefix := d.PathPrefix
if len(pathPrefix) > 0 {
finalPath = stdpath.Join(pathPrefix, finalPath)
}
if !strings.HasPrefix(finalPath, "/") {
finalPath = "/" + finalPath
}
if d.WithoutUrl {
if d.LocalModel {
return finalPath
}
apiUrl := d.SiteUrl
@@ -127,7 +119,8 @@ func (d *Strm) getLink(ctx context.Context, path string) string {
} else {
apiUrl = common.GetApiUrl(ctx)
}
return fmt.Sprintf("%s%s",
return fmt.Sprintf("%s/d%s",
apiUrl,
finalPath)
}

View File

@@ -88,7 +88,7 @@ func (d *Terabox) request(rurl string, method string, callback base.ReqCallback,
return nil, err
}
errno := utils.Json.Get(res.Body(), "errno").ToInt()
if errno == 4000023 || errno == 450016 {
if errno == 4000023 || errno == 4500016 {
// reget jsToken
err = d.resetJsToken()
if err != nil {

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
@@ -69,7 +68,6 @@ func (x *Thunder) Init(ctx context.Context) (err error) {
PackageName: "com.xunlei.downloadprovider",
UserAgent: "ANDROID-com.xunlei.downloadprovider/8.31.0.9726 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/512000 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)",
DownloadUserAgent: "Dalvik/2.1.0 (Linux; U; Android 12; M2004J7AC Build/SP1A.210812.016)",
Space: x.Space,
refreshCTokenCk: func(token string) {
x.CaptchaToken = token
op.MustSaveDriverStorage(x)
@@ -169,7 +167,6 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) {
UserAgent: x.UserAgent,
DownloadUserAgent: x.DownloadUserAgent,
UseVideoUrl: x.UseVideoUrl,
Space: x.Space,
refreshCTokenCk: func(token string) {
x.CaptchaToken = token
@@ -284,7 +281,7 @@ func (xc *XunLeiCommon) Link(ctx context.Context, file model.Obj, args model.Lin
_, err := xc.Request(FILE_API_URL+"/{fileID}", http.MethodGet, func(r *resty.Request) {
r.SetContext(ctx)
r.SetPathParam("fileID", file.GetID())
r.SetQueryParam("space", xc.Space)
//r.SetQueryParam("space", "")
}, &lFile)
if err != nil {
return nil, err
@@ -325,7 +322,6 @@ func (xc *XunLeiCommon) MakeDir(ctx context.Context, parentDir model.Obj, dirNam
"kind": FOLDER,
"name": dirName,
"parent_id": parentDir.GetID(),
"space": xc.Space,
})
}, nil)
return err
@@ -335,9 +331,8 @@ func (xc *XunLeiCommon) Move(ctx context.Context, srcObj, dstDir model.Obj) erro
_, err := xc.Request(FILE_API_URL+":batchMove", http.MethodPost, func(r *resty.Request) {
r.SetContext(ctx)
r.SetBody(&base.Json{
"to": base.Json{"parent_id": dstDir.GetID()},
"ids": []string{srcObj.GetID()},
"space": xc.Space,
"to": base.Json{"parent_id": dstDir.GetID()},
"ids": []string{srcObj.GetID()},
})
}, nil)
return err
@@ -347,10 +342,7 @@ func (xc *XunLeiCommon) Rename(ctx context.Context, srcObj model.Obj, newName st
_, err := xc.Request(FILE_API_URL+"/{fileID}", http.MethodPatch, func(r *resty.Request) {
r.SetContext(ctx)
r.SetPathParam("fileID", srcObj.GetID())
r.SetBody(&base.Json{
"name": newName,
"space": xc.Space,
})
r.SetBody(&base.Json{"name": newName})
}, nil)
return err
}
@@ -359,9 +351,8 @@ func (xc *XunLeiCommon) Copy(ctx context.Context, srcObj, dstDir model.Obj) erro
_, err := xc.Request(FILE_API_URL+":batchCopy", http.MethodPost, func(r *resty.Request) {
r.SetContext(ctx)
r.SetBody(&base.Json{
"to": base.Json{"parent_id": dstDir.GetID()},
"ids": []string{srcObj.GetID()},
"space": xc.Space,
"to": base.Json{"parent_id": dstDir.GetID()},
"ids": []string{srcObj.GetID()},
})
}, nil)
return err
@@ -371,7 +362,6 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
_, err := xc.Request(FILE_API_URL+"/{fileID}/trash", http.MethodPatch, func(r *resty.Request) {
r.SetContext(ctx)
r.SetPathParam("fileID", obj.GetID())
r.SetQueryParam("space", xc.Space)
r.SetBody("{}")
}, nil)
return err
@@ -397,7 +387,6 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.Fi
"size": file.GetSize(),
"hash": gcid,
"upload_type": UPLOAD_TYPE_RESUMABLE,
"space": xc.Space,
})
}, &resp)
if err != nil {
@@ -441,7 +430,7 @@ func (xc *XunLeiCommon) getFiles(ctx context.Context, folderId string) ([]model.
_, err := xc.Request(FILE_API_URL, http.MethodGet, func(r *resty.Request) {
r.SetContext(ctx)
r.SetQueryParams(map[string]string{
"space": xc.Space,
"space": "",
"__type": "drive",
"refresh": "true",
"__sync": "true",
@@ -451,17 +440,6 @@ func (xc *XunLeiCommon) getFiles(ctx context.Context, folderId string) ([]model.
"limit": "100",
"filters": `{"phase":{"eq":"PHASE_TYPE_COMPLETE"},"trashed":{"eq":false}}`,
})
// 获取硬盘挂载目录等
if xc.Space != "" {
r.SetQueryParamsFromValues(url.Values{
"with": []string{
"withCategoryDiskMountPath",
"withCategoryDriveCachePath",
"withCategoryHistoryDownloadPath",
"withReadOnlyFS",
},
})
}
}, &fileList)
if err != nil {
return nil, err
@@ -598,7 +576,6 @@ func (xc *XunLeiCommon) OfflineDownload(ctx context.Context, fileUrl string, par
"name": fileName,
"parent_id": parentDir.GetID(),
"upload_type": UPLOAD_TYPE_URL,
"space": xc.Space,
"url": base.Json{
"url": fileUrl,
},
@@ -625,7 +602,6 @@ func (xc *XunLeiCommon) OfflineList(ctx context.Context, nextPageToken string) (
"type": "offline",
"limit": "10000",
"page_token": nextPageToken,
"space": xc.Space,
})
}, &resp)
@@ -642,7 +618,6 @@ func (xc *XunLeiCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string
SetQueryParams(map[string]string{
"task_ids": strings.Join(taskIDs, ","),
"delete_files": strconv.FormatBool(deleteFiles),
"space": xc.Space,
})
}, nil)
if err != nil {

View File

@@ -46,8 +46,6 @@ type ExpertAddition struct {
//优先使用视频链接代替下载链接
UseVideoUrl bool `json:"use_video_url"`
Space string `json:"space" default:"" help:"device id for remote device"`
}
// 登录特征,用于判断是否重新登录
@@ -82,8 +80,6 @@ type Addition struct {
CreditKey string `json:"credit_key" help:"credit key,used for login"`
// 登录设备ID
DeviceID string `json:"device_id" default:""`
Space string `json:"space" default:"" help:"device id for remote device"`
}
// 登录特征,用于判断是否重新登录
@@ -94,6 +90,7 @@ func (i *Addition) GetIdentity() string {
var config = driver.Config{
Name: "Thunder",
LocalSort: true,
OnlyProxy: true,
}
var configExpert = driver.Config{

View File

@@ -68,7 +68,6 @@ type Common struct {
UserAgent string
DownloadUserAgent string
UseVideoUrl bool
Space string
// 验证码token刷新成功回调
refreshCTokenCk func(token string)

View File

@@ -20,7 +20,6 @@ var config = driver.Config{
LocalSort: true,
NoCache: true,
CheckStatus: true,
OnlyIndices: true,
}
func init() {

12
go.mod
View File

@@ -5,11 +5,9 @@ go 1.23.4
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
github.com/KarpelesLab/reflink v1.0.2
github.com/KirCute/zip v1.0.1
github.com/OpenListTeam/go-cache v0.1.0
github.com/OpenListTeam/sftpd-openlist v1.0.1
github.com/OpenListTeam/tache v0.2.1
github.com/OpenListTeam/tache v0.2.0
github.com/OpenListTeam/times v0.1.0
github.com/OpenListTeam/wopan-sdk-go v0.1.5
github.com/ProtonMail/go-crypto v1.3.0
@@ -46,7 +44,7 @@ require (
github.com/henrybear327/go-proton-api v1.0.0
github.com/ipfs/go-ipfs-api v0.7.0
github.com/itsHenry35/gofakes3 v0.0.8
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3
github.com/jlaffaye/ftp v0.2.1-0.20250831012827-3f092e051c94
github.com/json-iterator/go v1.1.12
github.com/kdomanski/iso9660 v0.4.0
github.com/maruel/natural v1.1.1
@@ -59,16 +57,18 @@ require (
github.com/pquerna/otp v1.5.0
github.com/quic-go/quic-go v0.54.1
github.com/rclone/rclone v1.70.3
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
github.com/shirou/gopsutil/v4 v4.25.5
github.com/sirupsen/logrus v1.9.3
github.com/spf13/afero v1.14.0
github.com/spf13/cobra v1.9.1
github.com/stretchr/testify v1.10.0
github.com/stretchr/testify v1.11.1
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5
github.com/tchap/go-patricia/v2 v2.3.3
github.com/u2takey/ffmpeg-go v0.5.0
github.com/upyun/go-sdk/v3 v3.0.4
github.com/winfsp/cgofuse v1.6.0
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
github.com/zzzhr1990/go-common-entity v0.0.0-20250202070650-1a200048f0d3
golang.org/x/crypto v0.40.0
golang.org/x/image v0.29.0
@@ -115,6 +115,7 @@ require (
github.com/minio/minlz v1.0.0 // indirect
github.com/minio/xxml v0.0.3 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/otiai10/mint v1.6.3 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/relvacode/iso8601 v1.6.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
@@ -256,6 +257,7 @@ require (
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/otiai10/copy v1.14.1
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect

24
go.sum
View File

@@ -39,10 +39,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Da3zKi7/saferith v0.33.0-fixed h1:fnIWTk7EP9mZAICf7aQjeoAwpfrlCrkOvqmi6CbWdTk=
github.com/Da3zKi7/saferith v0.33.0-fixed/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA=
github.com/KarpelesLab/reflink v1.0.2 h1:hQ1aM3TmjU2kTNUx5p/HaobDoADYk+a6AuEinG4Cv88=
github.com/KarpelesLab/reflink v1.0.2/go.mod h1:WGkTOKNjd1FsJKBw3mu4JvrPEDJyJJ+JPtxBkbPoCok=
github.com/KirCute/zip v1.0.1 h1:L/tVZglOiDVKDi9Ud+fN49htgKdQ3Z0H80iX8OZk13c=
github.com/KirCute/zip v1.0.1/go.mod h1:xhF7dCB+Bjvy+5a56lenYCKBsH+gxDNPZSy5Cp+nlXk=
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
@@ -55,8 +51,8 @@ github.com/OpenListTeam/gsync v0.1.0 h1:ywzGybOvA3lW8K1BUjKZ2IUlT2FSlzPO4DOazfYX
github.com/OpenListTeam/gsync v0.1.0/go.mod h1:h/Rvv9aX/6CdW/7B8di3xK3xNV8dUg45Fehrd/ksZ9s=
github.com/OpenListTeam/sftpd-openlist v1.0.1 h1:j4S3iPFOpnXCUKRPS7uCT4mF2VCl34GyqvH6lqwnkUU=
github.com/OpenListTeam/sftpd-openlist v1.0.1/go.mod h1:uO/wKnbvbdq3rBLmClMTZXuCnw7XW4wlAq4dZe91a40=
github.com/OpenListTeam/tache v0.2.1 h1:Uy/xAr05clHuMrr9+5fXAhv0Z5PGJivp4P5DnRez6cw=
github.com/OpenListTeam/tache v0.2.1/go.mod h1:qmnZ/VpY2DUlmjg3UoDeNFy/LRqrw0biN3hYEEGc/+A=
github.com/OpenListTeam/tache v0.2.0 h1:Q4MjuyECn0CZCf1ZF91JaVaZTaps1mOTAm8bFj8sr9Q=
github.com/OpenListTeam/tache v0.2.0/go.mod h1:qmnZ/VpY2DUlmjg3UoDeNFy/LRqrw0biN3hYEEGc/+A=
github.com/OpenListTeam/times v0.1.0 h1:qknxw+qj5CYKgXAwydA102UEpPcpU8TYNGRmwRyPYpg=
github.com/OpenListTeam/times v0.1.0/go.mod h1:Jx7qen5NCYzKk2w14YuvU48YYMcPa1P9a+EJePC15Pc=
github.com/OpenListTeam/wopan-sdk-go v0.1.5 h1:iKKcVzIqBgtGDbn0QbdWrCazSGxXFmYFyrnFBG+U8dI=
@@ -458,8 +454,8 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 h1:ZxO6Qr2GOXPdcW80Mcn3nemvilMPvpWqxrNfK2ZnNNs=
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3/go.mod h1:dvLUr/8Fs9a2OBrEnCC5duphbkz/k/mSy5OkXg3PAgI=
github.com/jlaffaye/ftp v0.2.1-0.20250831012827-3f092e051c94 h1:sBUrMD4Gx91zDgzTqPCr3FqFs2+3wWX7lyUYIP/isuA=
github.com/jlaffaye/ftp v0.2.1-0.20250831012827-3f092e051c94/go.mod h1:H1+whwD0Qe3YOunlXIWhh3rlvzW5cZfkMDYGQPg+KAM=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
@@ -587,6 +583,10 @@ github.com/ncw/swift/v2 v2.0.4 h1:hHWVFxn5/YaTWAASmn4qyq2p6OyP/Hm3vMLzkjEqR7w=
github.com/ncw/swift/v2 v2.0.4/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk=
github.com/nwaples/rardecode/v2 v2.1.1 h1:OJaYalXdliBUXPmC8CZGQ7oZDxzX1/5mQmgn0/GASew=
github.com/nwaples/rardecode/v2 v2.1.1/go.mod h1:7uz379lSxPe6j9nvzxUZ+n7mnJNgjsRNb6IbvGVHRmw=
github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8=
github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I=
github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs=
github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
github.com/panjf2000/ants/v2 v2.4.2/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
@@ -639,6 +639,8 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d h1:hrujxIzL1woJ7AwssoOcM/tq5JjjG2yYOc8odClEiXA=
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM=
@@ -676,8 +678,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 h1:Sa+sR8aaAMFwxhXWENEnE6ZpqhZ9d7u1RT2722Rw6hc=
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5/go.mod h1:UdZiFUFu6e2WjjtjxivwXWcwc1N/8zgbkBR9QNucUOY=
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 h1:6Y51mutOvRGRx6KqyMNo//xk8B8o6zW9/RVmy1VamOs=
@@ -711,6 +713,8 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavM
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9 h1:K8gF0eekWPEX+57l30ixxzGhHH/qscI3JCnuhbN6V4M=
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9/go.mod h1:9BnoKCcgJ/+SLhfAXj15352hTOuVmG5Gzo8xNRINfqI=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA=
github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=

View File

@@ -4,7 +4,6 @@ import (
"io"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
@@ -22,7 +21,7 @@ func (RarDecoder) AcceptedExtensions() []string {
func (RarDecoder) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
return map[string]tool.MultipartExtension{
".part1.rar": {regexp.MustCompile("^.*\\.part(\\d+)\\.rar$"), 2},
".part1.rar": {".part%d.rar", 2},
}
}

View File

@@ -2,7 +2,6 @@ package sevenzip
import (
"io"
"regexp"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
@@ -19,7 +18,7 @@ func (SevenZip) AcceptedExtensions() []string {
func (SevenZip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
return map[string]tool.MultipartExtension{
".7z.001": {regexp.MustCompile("^.*\\.7z\\.(\\d+)$"), 2},
".7z.001": {".7z.%.3d", 2},
}
}

View File

@@ -2,14 +2,13 @@ package tool
import (
"io"
"regexp"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
)
type MultipartExtension struct {
PartFileFormat *regexp.Regexp
PartFileFormat string
SecondPartIndex int
}

View File

@@ -4,15 +4,22 @@ import (
"bytes"
"io"
"io/fs"
stdpath "path"
"strings"
"github.com/KirCute/zip"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"golang.org/x/text/encoding/ianaindex"
"github.com/saintfish/chardet"
"github.com/yeka/zip"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/encoding/japanese"
"golang.org/x/text/encoding/korean"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/encoding/traditionalchinese"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/encoding/unicode/utf32"
"golang.org/x/text/transform"
)
@@ -30,11 +37,10 @@ func (r *WrapReader) Files() []tool.SubFile {
type WrapFileInfo struct {
fs.FileInfo
efs bool
}
func (f *WrapFileInfo) Name() string {
return decodeName(f.FileInfo.Name(), f.efs)
return decodeName(f.FileInfo.Name())
}
type WrapFile struct {
@@ -42,11 +48,11 @@ type WrapFile struct {
}
func (f *WrapFile) Name() string {
return decodeName(f.f.Name, isEFS(f.f.Flags))
return decodeName(f.f.Name)
}
func (f *WrapFile) FileInfo() fs.FileInfo {
return &WrapFileInfo{FileInfo: f.f.FileInfo(), efs: isEFS(f.f.Flags)}
return &WrapFileInfo{FileInfo: f.f.FileInfo()}
}
func (f *WrapFile) Open() (io.ReadCloser, error) {
@@ -61,33 +67,16 @@ func (f *WrapFile) SetPassword(password string) {
f.f.SetPassword(password)
}
func makePart(ss *stream.SeekableStream) (zip.SizeReaderAt, error) {
ra, err := stream.NewReadAtSeeker(ss, 0)
func getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
if len(ss) > 1 && stdpath.Ext(ss[1].GetName()) == ".z01" {
// FIXME: Incorrect parsing method for standard multipart zip format
ss = append(ss[1:], ss[0])
}
reader, err := stream.NewMultiReaderAt(ss)
if err != nil {
return nil, err
}
return &inlineSizeReaderAt{ReaderAt: ra, size: ss.GetSize()}, nil
}
func (z *Zip) getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
if len(ss) > 1 && z.traditionalSecondPartRegExp.MatchString(ss[1].GetName()) {
ss = append(ss[1:], ss[0])
ras := make([]zip.SizeReaderAt, 0, len(ss))
for _, s := range ss {
ra, err := makePart(s)
if err != nil {
return nil, err
}
ras = append(ras, ra)
}
return zip.NewMultipartReader(ras)
} else {
reader, err := stream.NewMultiReaderAt(ss)
if err != nil {
return nil, err
}
return zip.NewReader(reader, reader.Size())
}
return zip.NewReader(reader, reader.Size())
}
func filterPassword(err error) error {
@@ -97,29 +86,110 @@ func filterPassword(err error) error {
return err
}
func decodeName(name string, efs bool) string {
if efs {
return name
}
enc, err := ianaindex.IANA.Encoding(setting.GetStr(conf.NonEFSZipEncoding))
func decodeName(name string) string {
b := []byte(name)
detector := chardet.NewTextDetector()
results, err := detector.DetectAll(b)
if err != nil {
return name
}
i := bytes.NewReader([]byte(name))
var ce, re, enc encoding.Encoding
for _, r := range results {
if r.Confidence > 30 {
ce = getCommonEncoding(r.Charset)
if ce != nil {
break
}
}
if re == nil {
re = getEncoding(r.Charset)
}
}
if ce != nil {
enc = ce
} else if re != nil {
enc = re
} else {
return name
}
i := bytes.NewReader(b)
decoder := transform.NewReader(i, enc.NewDecoder())
content, _ := io.ReadAll(decoder)
return string(content)
}
func isEFS(flags uint16) bool {
return (flags & 0x800) > 0
func getCommonEncoding(name string) (enc encoding.Encoding) {
switch name {
case "UTF-8":
enc = unicode.UTF8
case "UTF-16LE":
enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
case "Shift_JIS":
enc = japanese.ShiftJIS
case "GB-18030":
enc = simplifiedchinese.GB18030
case "EUC-KR":
enc = korean.EUCKR
case "Big5":
enc = traditionalchinese.Big5
default:
enc = nil
}
return
}
type inlineSizeReaderAt struct {
io.ReaderAt
size int64
}
func (i *inlineSizeReaderAt) Size() int64 {
return i.size
func getEncoding(name string) (enc encoding.Encoding) {
switch name {
case "UTF-8":
enc = unicode.UTF8
case "UTF-16BE":
enc = unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM)
case "UTF-16LE":
enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
case "UTF-32BE":
enc = utf32.UTF32(utf32.BigEndian, utf32.IgnoreBOM)
case "UTF-32LE":
enc = utf32.UTF32(utf32.LittleEndian, utf32.IgnoreBOM)
case "ISO-8859-1":
enc = charmap.ISO8859_1
case "ISO-8859-2":
enc = charmap.ISO8859_2
case "ISO-8859-3":
enc = charmap.ISO8859_3
case "ISO-8859-4":
enc = charmap.ISO8859_4
case "ISO-8859-5":
enc = charmap.ISO8859_5
case "ISO-8859-6":
enc = charmap.ISO8859_6
case "ISO-8859-7":
enc = charmap.ISO8859_7
case "ISO-8859-8":
enc = charmap.ISO8859_8
case "ISO-8859-8-I":
enc = charmap.ISO8859_8I
case "ISO-8859-9":
enc = charmap.ISO8859_9
case "windows-1251":
enc = charmap.Windows1251
case "windows-1256":
enc = charmap.Windows1256
case "KOI8-R":
enc = charmap.KOI8R
case "Shift_JIS":
enc = japanese.ShiftJIS
case "GB-18030":
enc = simplifiedchinese.GB18030
case "EUC-JP":
enc = japanese.EUCJP
case "EUC-KR":
enc = korean.EUCKR
case "Big5":
enc = traditionalchinese.Big5
case "ISO-2022-JP":
enc = japanese.ISO2022JP
default:
enc = nil
}
return
}

View File

@@ -3,7 +3,6 @@ package zip
import (
"io"
stdpath "path"
"regexp"
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
@@ -13,39 +12,34 @@ import (
)
type Zip struct {
traditionalSecondPartRegExp *regexp.Regexp
}
func (z *Zip) AcceptedExtensions() []string {
func (Zip) AcceptedExtensions() []string {
return []string{}
}
func (z *Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
func (Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
return map[string]tool.MultipartExtension{
".zip": {regexp.MustCompile("^.*\\.z(\\d+)$"), 1},
".zip.001": {regexp.MustCompile("^.*\\.zip\\.(\\d+)$"), 2},
".zip": {".z%.2d", 1},
".zip.001": {".zip.%.3d", 2},
}
}
func (z *Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
zipReader, err := z.getReader(ss)
func (Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
zipReader, err := getReader(ss)
if err != nil {
return nil, err
}
efs := true
if len(zipReader.File) > 0 {
efs = isEFS(zipReader.File[0].Flags)
}
encrypted, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: zipReader})
return &model.ArchiveMetaInfo{
Comment: decodeName(zipReader.Comment, efs),
Comment: zipReader.Comment,
Encrypted: encrypted,
Tree: tree,
}, nil
}
func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
zipReader, err := z.getReader(ss)
func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
zipReader, err := getReader(ss)
if err != nil {
return nil, err
}
@@ -63,7 +57,7 @@ func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]
_ = rc.Close()
passVerified = true
}
name := strings.TrimSuffix(decodeName(file.Name, isEFS(file.Flags)), "/")
name := strings.TrimSuffix(decodeName(file.Name), "/")
if strings.Contains(name, "/") {
// 有些压缩包不压缩第一个文件夹
strs := strings.Split(name, "/")
@@ -76,7 +70,7 @@ func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]
}
continue
}
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo(), efs: isEFS(file.Flags)}))
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo()}))
}
if len(ret) == 0 && dir != nil {
ret = append(ret, dir)
@@ -87,13 +81,13 @@ func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]
ret := make([]model.Obj, 0)
exist := false
for _, file := range zipReader.File {
name := decodeName(file.Name, isEFS(file.Flags))
name := decodeName(file.Name)
dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/"
if dir != innerPath {
continue
}
exist = true
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo(), isEFS(file.Flags)}))
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo()}))
}
if !exist {
return nil, errs.ObjectNotFound
@@ -102,14 +96,14 @@ func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]
}
}
func (z *Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
zipReader, err := z.getReader(ss)
func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
zipReader, err := getReader(ss)
if err != nil {
return nil, 0, err
}
innerPath := strings.TrimPrefix(args.InnerPath, "/")
for _, file := range zipReader.File {
if decodeName(file.Name, isEFS(file.Flags)) == innerPath {
if decodeName(file.Name) == innerPath {
if file.IsEncrypted() {
file.SetPassword(args.Password)
}
@@ -123,8 +117,8 @@ func (z *Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs)
return nil, 0, errs.ObjectNotFound
}
func (z *Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
zipReader, err := z.getReader(ss)
func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
zipReader, err := getReader(ss)
if err != nil {
return err
}
@@ -134,7 +128,5 @@ func (z *Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args mo
var _ tool.Tool = (*Zip)(nil)
func init() {
tool.RegisterTool(&Zip{
traditionalSecondPartRegExp: regexp.MustCompile("^.*\\.z0*1$"),
})
tool.RegisterTool(Zip{})
}

View File

@@ -39,21 +39,7 @@ func InitConfig() {
if !filepath.IsAbs(dataDir) {
flags.DataDir = filepath.Join(pwd, flags.DataDir)
}
// Determine config file path: use flags.ConfigPath if provided, otherwise default to <dataDir>/config.json
configPath := flags.ConfigPath
if configPath == "" {
configPath = filepath.Join(flags.DataDir, "config.json")
} else {
// if relative, resolve relative to working directory
if !filepath.IsAbs(configPath) {
if absPath, err := filepath.Abs(configPath); err == nil {
configPath = absPath
} else {
configPath = filepath.Join(pwd, configPath)
}
}
}
configPath = filepath.Clean(configPath)
configPath := filepath.Join(flags.DataDir, "config.json")
log.Infof("reading config file: %s", configPath)
if !utils.Exists(configPath) {
log.Infof("config file not exists, creating default config file")
@@ -140,10 +126,6 @@ func InitConfig() {
log.Fatalf("create temp dir error: %+v", err)
}
log.Debugf("config: %+v", conf.Conf)
// Validate and display proxy configuration status
validateProxyConfig()
base.InitClient()
initURL()
}
@@ -183,14 +165,3 @@ func CleanTempDir() {
}
}
}
// validateProxyConfig validates proxy configuration and displays status at startup
func validateProxyConfig() {
if conf.Conf.ProxyAddress != "" {
if _, err := url.Parse(conf.Conf.ProxyAddress); err == nil {
log.Infof("Proxy enabled: %s", conf.Conf.ProxyAddress)
} else {
log.Errorf("Invalid proxy address format: %s, error: %v", conf.Conf.ProxyAddress, err)
}
}
}

View File

@@ -154,7 +154,6 @@ func InitialSettings() []model.SettingItem {
{Key: conf.SharePreviewArchivesByDefault, Value: "false", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.ReadMeAutoRender, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.FilterReadMeScripts, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.NonEFSZipEncoding, Value: "IBM437", Type: conf.TypeString, Group: model.PREVIEW},
// global settings
{Key: conf.HideFiles, Value: "/\\/README.md/i", Type: conf.TypeText, Group: model.GLOBAL},
{Key: "package_download", Value: "true", Type: conf.TypeBool, Group: model.GLOBAL},
@@ -177,9 +176,6 @@ func InitialSettings() []model.SettingItem {
{Key: conf.ShareArchivePreview, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
{Key: conf.ShareForceProxy, Value: "true", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PRIVATE},
{Key: conf.ShareSummaryContent, Value: "@{{creator}} shared {{#each files}}{{#if @first}}\"{{filename this}}\"{{/if}}{{#if @last}}{{#unless (eq @index 0)}} and {{@index}} more files{{/unless}}{{/if}}{{/each}} from {{site_title}}: {{base_url}}/@s/{{id}}{{#if pwd}} , the share code is {{pwd}}{{/if}}{{#if expires}}, please access before {{dateLocaleString expires}}.{{/if}}", Type: conf.TypeText, Group: model.GLOBAL, Flag: model.PUBLIC},
{Key: conf.HandleHookAfterWriting, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PRIVATE},
{Key: conf.HandleHookRateLimit, Value: "0", Type: conf.TypeNumber, Group: model.GLOBAL, Flag: model.PRIVATE},
{Key: conf.IgnoreSystemFiles, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PRIVATE, Help: `When enabled, ignores common system files during upload (.DS_Store, desktop.ini, Thumbs.db, and files starting with ._)`},
// single settings
{Key: conf.Token, Value: token, Type: conf.TypeString, Group: model.SINGLE, Flag: model.PRIVATE},

View File

@@ -43,14 +43,23 @@ func (c *TypedCache[T]) SetTypeWithExpirable(key, typeKey string, value T, exp E
}
}
func (c *TypedCache[T]) GetType(key, typeKey string) (T, bool) {
// Prefer to use typeKeys for lookup; if none match, use fallbackTypeKey for lookup
func (c *TypedCache[T]) GetType(key, fallbackTypeKey string, typeKeys ...string) (T, bool) {
c.mu.RLock()
cache, exists := c.entries[key]
if !exists {
c.mu.RUnlock()
return *new(T), false
}
entry, exists := cache[typeKey]
entry, exists := cache[fallbackTypeKey]
if len(typeKeys) > 0 {
for _, tk := range typeKeys {
if entry, exists = cache[tk]; exists {
fallbackTypeKey = tk
break
}
}
}
if !exists {
c.mu.RUnlock()
return *new(T), false
@@ -63,8 +72,8 @@ func (c *TypedCache[T]) GetType(key, typeKey string) (T, bool) {
}
c.mu.Lock()
if cache[typeKey] == entry {
delete(cache, typeKey)
if cache[fallbackTypeKey] == entry {
delete(cache, fallbackTypeKey)
if len(cache) == 0 {
delete(c.entries, key)
}

View File

@@ -131,7 +131,6 @@ type Config struct {
FTP FTP `json:"ftp" envPrefix:"FTP_"`
SFTP SFTP `json:"sftp" envPrefix:"SFTP_"`
LastLaunchedVersion string `json:"last_launched_version"`
ProxyAddress string `json:"proxy_address" env:"PROXY_ADDRESS"`
}
func DefaultConfig(dataDir string) *Config {
@@ -245,6 +244,5 @@ func DefaultConfig(dataDir string) *Config {
Listen: ":5222",
},
LastLaunchedVersion: "",
ProxyAddress: "",
}
}

View File

@@ -38,7 +38,6 @@ const (
SharePreviewArchivesByDefault = "share_preview_archives_by_default"
ReadMeAutoRender = "readme_autorender"
FilterReadMeScripts = "filter_readme_scripts"
NonEFSZipEncoding = "non_efs_zip_encoding"
// global
HideFiles = "hide_files"
@@ -56,9 +55,6 @@ const (
ShareArchivePreview = "share_archive_preview"
ShareForceProxy = "share_force_proxy"
ShareSummaryContent = "share_summary_content"
HandleHookAfterWriting = "handle_hook_after_writing"
HandleHookRateLimit = "handle_hook_rate_limit"
IgnoreSystemFiles = "ignore_system_files"
// index
SearchIndex = "search_index"

View File

@@ -38,26 +38,18 @@ func GetSharingsByCreatorId(creator uint, pageIndex, pageSize int) (sharings []m
}
func CreateSharing(s *model.SharingDB) (string, error) {
if s.ID == "" {
id := random.String(8)
for len(id) < 12 {
old := model.SharingDB{
ID: id,
}
if err := db.Where(old).First(&old).Error; err != nil {
s.ID = id
return id, errors.WithStack(db.Create(s).Error)
}
id += random.String(1)
id := random.String(8)
for len(id) < 12 {
old := model.SharingDB{
ID: id,
}
return "", errors.New("failed find valid id")
} else {
query := model.SharingDB{ID: s.ID}
if err := db.Where(query).First(&query).Error; err == nil {
return "", errors.New("sharing already exist")
if err := db.Where(old).First(&old).Error; err != nil {
s.ID = id
return id, errors.WithStack(db.Create(s).Error)
}
return s.ID, errors.WithStack(db.Create(s).Error)
id += random.String(1)
}
return "", errors.New("failed find valid id")
}
func UpdateSharing(s *model.SharingDB) error {
@@ -68,7 +60,3 @@ func DeleteSharingById(id string) error {
s := model.SharingDB{ID: id}
return errors.WithStack(db.Where(s).Delete(&s).Error)
}
func DeleteSharingsByCreatorId(creatorId uint) error {
return errors.WithStack(db.Where("creator_id = ?", creatorId).Delete(&model.SharingDB{}).Error)
}

View File

@@ -17,25 +17,11 @@ type Config struct {
ProxyRangeOption bool `json:"-"`
// if the driver returns Link without URL, this should be set to true
NoLinkURL bool `json:"-"`
// Link cache behaviour:
// - LinkCacheAuto: let driver decide per-path (implement driver.LinkCacheModeResolver)
// - LinkCacheNone: no extra info added to cache key (default)
// - flags (OR-able) can add more attributes to cache key (IP, UA, ...)
LinkCacheMode `json:"-"`
// if the driver only store indices of files (e.g. UrlTree)
OnlyIndices bool `json:"only_indices"`
// LinkCacheType=1 add IP to cache key
//
// LinkCacheType=2 add UserAgent to cache key
LinkCacheType uint8 `json:"-"`
}
type LinkCacheMode int8
const (
LinkCacheAuto LinkCacheMode = -1 // Let the driver decide per-path (use driver.LinkCacheModeResolver)
LinkCacheNone LinkCacheMode = 0 // No extra info added to cache key (default)
)
const (
LinkCacheIP LinkCacheMode = 1 << iota // include client IP in cache key
LinkCacheUA // include User-Agent in cache key
)
func (c Config) MustProxy() bool {
return c.OnlyProxy || c.NoLinkURL

View File

@@ -213,17 +213,3 @@ type WithDetails interface {
type Reference interface {
InitReference(storage Driver) error
}
type LinkCacheModeResolver interface {
// ResolveLinkCacheMode returns the LinkCacheMode for the given path.
ResolveLinkCacheMode(path string) LinkCacheMode
}
type DirectUploader interface {
// GetDirectUploadTools returns available frontend-direct upload tools
GetDirectUploadTools() []string
// GetDirectUploadInfo returns the information needed for direct upload from client to storage
// actualPath is the path relative to the storage root (after removing mount path prefix)
// return errs.NotImplement if the driver does not support the given direct upload tool
GetDirectUploadInfo(ctx context.Context, tool string, dstDir model.Obj, fileName string, fileSize int64) (any, error)
}

View File

@@ -7,11 +7,9 @@ import (
)
var (
ObjectNotFound = errors.New("object not found")
ObjectAlreadyExists = errors.New("object already exists")
NotFolder = errors.New("not a folder")
NotFile = errors.New("not a file")
IgnoredSystemFile = errors.New("system file upload ignored")
ObjectNotFound = errors.New("object not found")
NotFolder = errors.New("not a folder")
NotFile = errors.New("not a file")
)
func IsObjectNotFound(err error) bool {

View File

@@ -125,7 +125,6 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT
DstActualPath: t.DstActualPath,
dstStorage: t.DstStorage,
DstStorageMp: t.DstStorageMp,
overwrite: t.Overwrite,
}
return uploadTask, nil
}
@@ -143,7 +142,6 @@ type ArchiveContentUploadTask struct {
DstStorageMp string
finalized bool
groupID string
overwrite bool
}
func (t *ArchiveContentUploadTask) GetName() string {
@@ -234,7 +232,6 @@ func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTask *Arch
dstStorage: t.dstStorage,
DstStorageMp: t.DstStorageMp,
groupID: t.groupID,
overwrite: t.overwrite,
})
if err != nil {
es = stderrors.Join(es, err)
@@ -244,12 +241,6 @@ func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTask *Arch
return es
}
} else {
if !t.overwrite {
dstPath := stdpath.Join(t.DstActualPath, t.ObjName)
if res, _ := op.Get(t.Ctx(), t.dstStorage, dstPath); res != nil {
return errs.ObjectAlreadyExists
}
}
file, err := os.Open(t.FilePath)
if err != nil {
return err

View File

@@ -24,17 +24,14 @@ type taskType uint8
func (t taskType) String() string {
if t == 0 {
return "copy"
} else if t == 1 {
return "move"
} else {
return "merge"
return "move"
}
}
const (
copy taskType = iota
move
merge
)
type FileTransferTask struct {
@@ -70,7 +67,7 @@ func (t *FileTransferTask) Run() error {
return t.RunWithNextTaskCallback(func(nextTask *FileTransferTask) error {
nextTask.groupID = t.groupID
task_group.TransferCoordinator.AddTask(t.groupID, nil)
if t.TaskType == copy || t.TaskType == merge {
if t.TaskType == copy {
CopyTaskManager.Add(nextTask)
} else {
MoveTaskManager.Add(nextTask)
@@ -112,7 +109,7 @@ func transfer(ctx context.Context, taskType taskType, srcObjPath, dstDirPath str
}
if srcStorage.GetStorage() == dstStorage.GetStorage() {
if taskType == copy || taskType == merge {
if taskType == copy {
err = op.Copy(ctx, srcStorage, srcObjActualPath, dstDirActualPath, lazyCache...)
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.NotSupport) {
return nil, err
@@ -164,7 +161,7 @@ func transfer(ctx context.Context, taskType taskType, srcObjPath, dstDirPath str
t.Creator, _ = ctx.Value(conf.UserKey).(*model.User)
t.ApiUrl = common.GetApiUrl(ctx)
t.groupID = dstDirPath
if taskType == copy || taskType == merge {
if taskType == copy {
task_group.TransferCoordinator.AddTask(dstDirPath, nil)
CopyTaskManager.Add(t)
} else {
@@ -180,7 +177,6 @@ func (t *FileTransferTask) RunWithNextTaskCallback(f func(nextTask *FileTransfer
if err != nil {
return errors.WithMessagef(err, "failed get src [%s] file", t.SrcActualPath)
}
if srcObj.IsDir() {
t.Status = "src object is dir, listing objs"
objs, err := op.List(t.Ctx(), t.SrcStorage, t.SrcActualPath, model.ListArgs{})
@@ -188,34 +184,17 @@ func (t *FileTransferTask) RunWithNextTaskCallback(f func(nextTask *FileTransfer
return errors.WithMessagef(err, "failed list src [%s] objs", t.SrcActualPath)
}
dstActualPath := stdpath.Join(t.DstActualPath, srcObj.GetName())
if t.TaskType == copy || t.TaskType == merge {
if t.TaskType == copy {
if t.Ctx().Value(conf.NoTaskKey) != nil {
defer op.Cache.DeleteDirectory(t.DstStorage, dstActualPath)
} else {
task_group.TransferCoordinator.AppendPayload(t.groupID, task_group.DstPathToRefresh(dstActualPath))
}
}
existedObjs := make(map[string]bool)
if t.TaskType == merge {
dstObjs, _ := op.List(t.Ctx(), t.DstStorage, dstActualPath, model.ListArgs{})
for _, obj := range dstObjs {
if !obj.IsDir() {
existedObjs[obj.GetName()] = true
}
}
}
for _, obj := range objs {
if utils.IsCanceled(t.Ctx()) {
return nil
}
if t.TaskType == merge && !obj.IsDir() && existedObjs[obj.GetName()] {
// skip existed file
continue
}
err = f(&FileTransferTask{
TaskType: t.TaskType,
TaskData: TaskData{

View File

@@ -84,14 +84,6 @@ func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool)
return res, err
}
func Merge(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskExtensionInfo, error) {
res, err := transfer(ctx, merge, srcObjPath, dstDirPath, lazyCache...)
if err != nil {
log.Errorf("failed merge %s to %s: %+v", srcObjPath, dstDirPath, err)
}
return res, err
}
func Rename(ctx context.Context, srcPath, dstName string, lazyCache ...bool) error {
err := rename(ctx, srcPath, dstName, lazyCache...)
if err != nil {
@@ -175,14 +167,6 @@ func GetStorage(path string, args *GetStoragesArgs) (driver.Driver, error) {
return storageDriver, nil
}
func GetStorageAndActualPath(path string) (driver.Driver, string, error) {
return op.GetStorageAndActualPath(path)
}
func GetByActualPath(ctx context.Context, storage driver.Driver, actualPath string) (model.Obj, error) {
return op.Get(ctx, storage, actualPath)
}
func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
res, err := other(ctx, args)
if err != nil {
@@ -206,11 +190,3 @@ func PutURL(ctx context.Context, path, dstName, urlStr string) error {
}
return op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr)
}
func GetDirectUploadInfo(ctx context.Context, tool, path, dstName string, fileSize int64) (any, error) {
info, err := getDirectUploadInfo(ctx, tool, path, dstName, fileSize)
if err != nil {
log.Errorf("failed get %s direct upload info for %s(%d bytes): %+v", path, dstName, fileSize, err)
}
return info, err
}

View File

@@ -105,11 +105,3 @@ func putDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer
}
return op.Put(ctx, storage, dstDirActualPath, file, nil, lazyCache...)
}
func getDirectUploadInfo(ctx context.Context, tool, dstDirPath, dstName string, fileSize int64) (any, error) {
storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
if err != nil {
return nil, errors.WithMessage(err, "failed get storage")
}
return op.GetDirectUploadInfo(ctx, tool, storage, dstDirActualPath, dstName, fileSize)
}

View File

@@ -34,7 +34,7 @@ type Link struct {
//for accelerating request, use multi-thread downloading
Concurrency int `json:"concurrency"`
PartSize int `json:"part_size"`
ContentLength int64 `json:"content_length"` // 转码视频、缩略图
ContentLength int64 `json:"-"` // 转码视频、缩略图
utils.SyncClosers `json:"-"`
// 如果SyncClosers中的资源被关闭后Link将不可用则此值应为 true
@@ -77,7 +77,6 @@ type ArchiveDecompressArgs struct {
ArchiveInnerArgs
CacheFull bool
PutIntoNewDir bool
Overwrite bool
}
type SharingListArgs struct {

View File

@@ -1,8 +0,0 @@
package model
type HttpDirectUploadInfo struct {
UploadURL string `json:"upload_url"` // The URL to upload the file
ChunkSize int64 `json:"chunk_size"` // The chunk size for uploading, 0 means no chunking required
Headers map[string]string `json:"headers,omitempty"` // Optional headers to include in the upload request
Method string `json:"method,omitempty"` // HTTP method, default is PUT
}

View File

@@ -27,9 +27,6 @@ func (f *FileCloser) Close() error {
return errors.Join(errs...)
}
// FileRangeReader 是对 RangeReaderIF 的轻量包装,表明由 RangeReaderIF.RangeRead
// 返回的 io.ReadCloser 同时实现了 model.File即支持 Read/ReadAt/Seek
// 只有满足这些才需要使用 FileRangeReader否则直接使用 RangeReaderIF 即可。
type FileRangeReader struct {
RangeReaderIF
}

View File

@@ -48,6 +48,7 @@ type FileStreamer interface {
// for a non-seekable Stream, if Read is called, this function won't work.
// caches the full Stream and writes it to writer (if provided, even if the stream is already cached).
CacheFullAndWriter(up *UpdateProgress, writer io.Writer) (File, error)
SetTmpFile(file File)
// if the Stream is not a File and is not cached, returns nil.
GetFile() File
}

View File

@@ -33,7 +33,7 @@ func (s *Sharing) Valid() bool {
if len(s.Files) == 0 {
return false
}
if s.Creator == nil || !s.Creator.CanShare() {
if !s.Creator.CanShare() {
return false
}
if s.Expires != nil && !s.Expires.IsZero() && s.Expires.Before(time.Now()) {

View File

@@ -283,15 +283,11 @@ func HttpClient() *http.Client {
}
func NewHttpClient() *http.Client {
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
}
SetProxyIfConfigured(transport)
return &http.Client{
Timeout: time.Hour * 48,
Transport: transport,
Timeout: time.Hour * 48,
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
},
}
}

View File

@@ -1,20 +1,18 @@
package net
import (
"fmt"
"io"
"math"
"mime/multipart"
"net/http"
"net/textproto"
"net/url"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/rclone/rclone/lib/readers"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@@ -307,9 +305,39 @@ func rangesMIMESize(ranges []http_range.Range, contentType string, contentSize i
return encSize, nil
}
// LimitedReadCloser wraps a io.ReadCloser and limits the number of bytes that can be read from it.
type LimitedReadCloser struct {
rc io.ReadCloser
remaining int
}
func (l *LimitedReadCloser) Read(buf []byte) (int, error) {
if l.remaining <= 0 {
return 0, io.EOF
}
if len(buf) > l.remaining {
buf = buf[0:l.remaining]
}
n, err := l.rc.Read(buf)
l.remaining -= n
return n, err
}
func (l *LimitedReadCloser) Close() error {
return l.rc.Close()
}
// GetRangedHttpReader some http server doesn't support "Range" header,
// so this function read readCloser with whole data, skip offset, then return ReaderCloser.
func GetRangedHttpReader(readCloser io.ReadCloser, offset, length int64) (io.ReadCloser, error) {
var length_int int
if length > math.MaxInt {
return nil, fmt.Errorf("doesnot support length bigger than int32 max ")
}
length_int = int(length)
if offset > 100*1024*1024 {
log.Warnf("offset is more than 100MB, if loading data from internet, high-latency and wasting of bandwidth is expected")
@@ -320,25 +348,5 @@ func GetRangedHttpReader(readCloser io.ReadCloser, offset, length int64) (io.Rea
}
// return an io.ReadCloser that is limited to `length` bytes.
return readers.NewLimitedReadCloser(readCloser, length), nil
}
// SetProxyIfConfigured sets proxy for HTTP Transport if configured
func SetProxyIfConfigured(transport *http.Transport) {
// If proxy address is configured, override environment variable settings
if conf.Conf.ProxyAddress != "" {
if proxyURL, err := url.Parse(conf.Conf.ProxyAddress); err == nil {
transport.Proxy = http.ProxyURL(proxyURL)
}
}
}
// SetRestyProxyIfConfigured sets proxy for Resty client if configured
func SetRestyProxyIfConfigured(client *resty.Client) {
// If proxy address is configured, override environment variable settings
if conf.Conf.ProxyAddress != "" {
if proxyURL, err := url.Parse(conf.Conf.ProxyAddress); err == nil {
client.SetProxy(proxyURL.String())
}
}
return &LimitedReadCloser{readCloser, length_int}, nil
}

View File

@@ -3,31 +3,28 @@ package op
import (
"context"
stderrors "errors"
"fmt"
"io"
stdpath "path"
"strconv"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
"github.com/OpenListTeam/OpenList/v4/internal/cache"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
gocache "github.com/OpenListTeam/go-cache"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"golang.org/x/time/rate"
)
var (
archiveMetaCache = gocache.NewMemCache(gocache.WithShards[*model.ArchiveMetaProvider](64))
archiveMetaG singleflight.Group[*model.ArchiveMetaProvider]
)
var archiveMetaCache = gocache.NewMemCache(gocache.WithShards[*model.ArchiveMetaProvider](64))
var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider]
func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
@@ -64,25 +61,20 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
if err != nil {
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path)
}
// Get archive tool
var partExt *tool.MultipartExtension
var t tool.Tool
ext := obj.GetName()
for {
var found bool
_, ext, found = strings.Cut(ext, ".")
if !found {
baseName, ext, found := strings.Cut(obj.GetName(), ".")
if !found {
_ = l.Close()
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
}
partExt, t, err := tool.GetArchiveTool("." + ext)
if err != nil {
var e error
partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName()))
if e != nil {
_ = l.Close()
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
}
partExt, t, err = tool.GetArchiveTool("." + ext)
if err == nil {
break
return nil, nil, nil, errors.WithMessagef(stderrors.Join(err, e), "failed get archive tool: %s", ext)
}
}
// Get first part stream
ss, err := stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: obj}, l)
if err != nil {
_ = l.Close()
@@ -91,62 +83,29 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
ret := []*stream.SeekableStream{ss}
if partExt == nil {
return obj, t, ret, nil
}
// Merge multi-part archive
dir := stdpath.Dir(path)
objs, err := List(ctx, storage, dir, model.ListArgs{})
if err != nil {
} else {
index := partExt.SecondPartIndex
dir := stdpath.Dir(path)
for {
p := stdpath.Join(dir, baseName+fmt.Sprintf(partExt.PartFileFormat, index))
var o model.Obj
l, o, err = Link(ctx, storage, p, args)
if err != nil {
break
}
ss, err = stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: o}, l)
if err != nil {
_ = l.Close()
for _, s := range ret {
_ = s.Close()
}
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
}
ret = append(ret, ss)
index++
}
return obj, t, ret, nil
}
for _, o := range objs {
submatch := partExt.PartFileFormat.FindStringSubmatch(o.GetName())
if submatch == nil {
continue
}
partIdx, e := strconv.Atoi(submatch[1])
if e != nil {
continue
}
partIdx = partIdx - partExt.SecondPartIndex + 1
if partIdx < 1 {
continue
}
p := stdpath.Join(dir, o.GetName())
l1, o1, e := Link(ctx, storage, p, args)
if e != nil {
err = errors.WithMessagef(e, "failed get [%s] link", p)
break
}
ss1, e := stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: o1}, l1)
if e != nil {
_ = l1.Close()
err = errors.WithMessagef(e, "failed get [%s] stream", p)
break
}
for partIdx >= len(ret) {
ret = append(ret, nil)
}
ret[partIdx] = ss1
}
closeAll := func(r []*stream.SeekableStream) {
for _, s := range r {
if s != nil {
_ = s.Close()
}
}
}
if err != nil {
closeAll(ret)
return nil, nil, nil, err
}
for i, ss1 := range ret {
if ss1 == nil {
closeAll(ret)
return nil, nil, nil, errors.Errorf("failed merge [%s] parts, missing part %d", path, i)
}
}
return obj, t, ret, nil
}
func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) {
@@ -200,10 +159,8 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
return obj, archiveMetaProvider, err
}
var (
archiveListCache = gocache.NewMemCache(gocache.WithShards[[]model.Obj](64))
archiveListG singleflight.Group[[]model.Obj]
)
var archiveListCache = gocache.NewMemCache(gocache.WithShards[[]model.Obj](64))
var archiveListG singleflight.Group[[]model.Obj]
func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
@@ -403,10 +360,8 @@ type objWithLink struct {
obj model.Obj
}
var (
extractCache = cache.NewKeyedCache[*objWithLink](5 * time.Minute)
extractG = singleflight.Group[*objWithLink]{}
)
var extractCache = cache.NewKeyedCache[*objWithLink](5 * time.Minute)
var extractG = singleflight.Group[*objWithLink]{}
func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
@@ -514,9 +469,9 @@ func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstD
return errors.WithMessage(err, "failed to get dst dir")
}
var newObjs []model.Obj
switch s := storage.(type) {
case driver.ArchiveDecompressResult:
var newObjs []model.Obj
newObjs, err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
if err == nil {
if len(newObjs) > 0 {
@@ -535,31 +490,5 @@ func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstD
default:
return errs.NotImplement
}
if !utils.IsBool(lazyCache...) && err == nil && needHandleObjsUpdateHook() {
onlyList := false
targetPath := dstDirPath
if newObjs != nil && len(newObjs) == 1 && newObjs[0].IsDir() {
targetPath = stdpath.Join(dstDirPath, newObjs[0].GetName())
} else if newObjs != nil && len(newObjs) == 1 && !newObjs[0].IsDir() {
onlyList = true
} else if args.PutIntoNewDir {
targetPath = stdpath.Join(dstDirPath, strings.TrimSuffix(srcObj.GetName(), stdpath.Ext(srcObj.GetName())))
} else if innerBase := stdpath.Base(args.InnerPath); innerBase != "." && innerBase != "/" {
targetPath = stdpath.Join(dstDirPath, innerBase)
dstObj, e := GetUnwrap(ctx, storage, targetPath)
onlyList = e != nil || !dstObj.IsDir()
}
if onlyList {
go List(context.Background(), storage, dstDirPath, model.ListArgs{Refresh: true})
} else {
var limiter *rate.Limiter
if l, _ := GetSettingItemByKey(conf.HandleHookRateLimit); l != nil {
if f, e := strconv.ParseFloat(l.Value, 64); e == nil && f > .0 {
limiter = rate.NewLimiter(rate.Limit(f), 1)
}
}
go RecursivelyListStorage(context.Background(), storage, targetPath, limiter, nil)
}
}
return errors.WithStack(err)
}

View File

@@ -2,11 +2,10 @@ package op
import (
"context"
stderrors "errors"
stdpath "path"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
@@ -15,7 +14,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"golang.org/x/time/rate"
)
var listG singleflight.Group[[]model.Obj]
@@ -59,7 +57,7 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
model.WrapObjsName(files)
// call hooks
go func(reqPath string, files []model.Obj) {
HandleObjsUpdateHook(context.WithoutCancel(ctx), reqPath, files)
HandleObjsUpdateHook(reqPath, files)
}(utils.GetFullPath(storage.GetStorage().MountPath, path), files)
// sort objs
@@ -170,19 +168,23 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
mode := storage.Config().LinkCacheMode
if mode == -1 {
mode = storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(path)
}
typeKey := args.Type
if mode&driver.LinkCacheIP == driver.LinkCacheIP {
typeKey += "/" + args.IP
}
if mode&driver.LinkCacheUA == driver.LinkCacheUA {
typeKey += "/" + args.Header.Get("User-Agent")
var typeKeys []string
switch storage.Config().LinkCacheType {
case 1:
if args.IP != "" {
typeKey += "/" + args.IP
typeKeys = []string{typeKey}
}
case 2:
if ua := args.Header.Get("User-Agent"); ua != "" {
typeKey += "/" + ua
typeKeys = []string{typeKey}
}
}
key := Key(storage, path)
if ol, exists := Cache.linkCache.GetType(key, typeKey); exists {
if ol, exists := Cache.linkCache.GetType(key, args.Type, typeKeys...); exists {
if ol.link.Expiration != nil ||
ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
return ol.link, ol.obj, nil
@@ -312,7 +314,7 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
srcDirPath := stdpath.Dir(srcPath)
dstDirPath = utils.FixAndCleanPath(dstDirPath)
if dstDirPath == srcDirPath {
return errors.New("move in place")
return stderrors.New("move in place")
}
srcRawObj, err := Get(ctx, storage, srcPath)
if err != nil {
@@ -345,24 +347,8 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
}
}
default:
err = errs.NotImplement
return errs.NotImplement
}
if !utils.IsBool(lazyCache...) && err == nil && needHandleObjsUpdateHook() {
if !srcObj.IsDir() {
go List(context.Background(), storage, dstDirPath, model.ListArgs{Refresh: true})
} else {
targetPath := stdpath.Join(dstDirPath, srcObj.GetName())
var limiter *rate.Limiter
if l, _ := GetSettingItemByKey(conf.HandleHookRateLimit); l != nil {
if f, e := strconv.ParseFloat(l.Value, 64); e == nil && f > .0 {
limiter = rate.NewLimiter(rate.Limit(f), 1)
}
}
go RecursivelyListStorage(context.Background(), storage, targetPath, limiter, nil)
}
}
return errors.WithStack(err)
}
@@ -415,7 +401,7 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
srcPath = utils.FixAndCleanPath(srcPath)
dstDirPath = utils.FixAndCleanPath(dstDirPath)
if dstDirPath == stdpath.Dir(srcPath) {
return errors.New("copy in place")
return stderrors.New("copy in place")
}
srcRawObj, err := Get(ctx, storage, srcPath)
if err != nil {
@@ -446,24 +432,8 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
}
}
default:
err = errs.NotImplement
return errs.NotImplement
}
if !utils.IsBool(lazyCache...) && err == nil && needHandleObjsUpdateHook() {
if !srcObj.IsDir() {
go List(context.Background(), storage, dstDirPath, model.ListArgs{Refresh: true})
} else {
targetPath := stdpath.Join(dstDirPath, srcObj.GetName())
var limiter *rate.Limiter
if l, _ := GetSettingItemByKey(conf.HandleHookRateLimit); l != nil {
if f, e := strconv.ParseFloat(l.Value, 64); e == nil && f > .0 {
limiter = rate.NewLimiter(rate.Limit(f), 1)
}
}
go RecursivelyListStorage(context.Background(), storage, targetPath, limiter, nil)
}
}
return errors.WithStack(err)
}
@@ -509,7 +479,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
// UrlTree PUT
if storage.Config().OnlyIndices {
if storage.GetStorage().Driver == "UrlTree" {
var link string
dstDirPath, link = urlTreeSplitLineFormPath(stdpath.Join(dstDirPath, file.GetName()))
file = &stream.FileStream{Obj: &model.Object{Name: link}}
@@ -591,9 +561,6 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
err = Remove(ctx, storage, tempPath)
}
}
if !utils.IsBool(lazyCache...) && err == nil && needHandleObjsUpdateHook() {
go List(context.Background(), storage, dstDirPath, model.ListArgs{Refresh: true})
}
return errors.WithStack(err)
}
@@ -605,15 +572,15 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
dstPath := stdpath.Join(dstDirPath, dstName)
_, err := GetUnwrap(ctx, storage, dstPath)
if err == nil {
return errors.WithStack(errs.ObjectAlreadyExists)
return errors.New("obj already exists")
}
err = MakeDir(ctx, storage, dstDirPath)
if err != nil {
return errors.WithMessagef(err, "failed to make dir [%s]", dstDirPath)
return errors.WithMessagef(err, "failed to put url")
}
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
if err != nil {
return errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath)
return errors.WithMessagef(err, "failed to put url")
}
switch s := storage.(type) {
case driver.PutURLResult:
@@ -636,56 +603,8 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
}
}
default:
return errors.WithStack(errs.NotImplement)
}
if !utils.IsBool(lazyCache...) && err == nil && needHandleObjsUpdateHook() {
go List(context.Background(), storage, dstDirPath, model.ListArgs{Refresh: true})
return errs.NotImplement
}
log.Debugf("put url [%s](%s) done", dstName, url)
return errors.WithStack(err)
}
func GetDirectUploadTools(storage driver.Driver) []string {
du, ok := storage.(driver.DirectUploader)
if !ok {
return nil
}
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return nil
}
return du.GetDirectUploadTools()
}
func GetDirectUploadInfo(ctx context.Context, tool string, storage driver.Driver, dstDirPath, dstName string, fileSize int64) (any, error) {
du, ok := storage.(driver.DirectUploader)
if !ok {
return nil, errors.WithStack(errs.NotImplement)
}
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
}
dstDirPath = utils.FixAndCleanPath(dstDirPath)
dstPath := stdpath.Join(dstDirPath, dstName)
_, err := GetUnwrap(ctx, storage, dstPath)
if err == nil {
return nil, errors.WithStack(errs.ObjectAlreadyExists)
}
err = MakeDir(ctx, storage, dstDirPath)
if err != nil {
return nil, errors.WithMessagef(err, "failed to make dir [%s]", dstDirPath)
}
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
if err != nil {
return nil, errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath)
}
info, err := du.GetDirectUploadInfo(ctx, tool, dstDir, dstName, fileSize)
if err != nil {
return nil, errors.WithStack(err)
}
return info, nil
}
func needHandleObjsUpdateHook() bool {
needHandle, _ := GetSettingItemByKey(conf.HandleHookAfterWriting)
return needHandle != nil && (needHandle.Value == "true" || needHandle.Value == "1")
}

View File

@@ -1,7 +1,6 @@
package op
import (
"context"
"regexp"
"strings"
@@ -14,7 +13,7 @@ import (
)
// Obj
type ObjsUpdateHook = func(ctx context.Context, parent string, objs []model.Obj)
type ObjsUpdateHook = func(parent string, objs []model.Obj)
var (
objsUpdateHooks = make([]ObjsUpdateHook, 0)
@@ -24,9 +23,9 @@ func RegisterObjsUpdateHook(hook ObjsUpdateHook) {
objsUpdateHooks = append(objsUpdateHooks, hook)
}
func HandleObjsUpdateHook(ctx context.Context, parent string, objs []model.Obj) {
func HandleObjsUpdateHook(parent string, objs []model.Obj) {
for _, hook := range objsUpdateHooks {
hook(ctx, parent, objs)
hook(parent, objs)
}
}

View File

@@ -1,125 +0,0 @@
package op
import (
"context"
stdpath "path"
"sync"
"sync/atomic"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"golang.org/x/time/rate"
)
var (
ManualScanCancel = atomic.Pointer[context.CancelFunc]{}
ScannedCount = atomic.Uint64{}
)
func ManualScanRunning() bool {
return ManualScanCancel.Load() != nil
}
func BeginManualScan(rawPath string, limit float64) error {
rawPath = utils.FixAndCleanPath(rawPath)
ctx, cancel := context.WithCancel(context.Background())
if !ManualScanCancel.CompareAndSwap(nil, &cancel) {
cancel()
return errors.New("manual scan is running, please try later")
}
ScannedCount.Store(0)
go func() {
defer func() { (*ManualScanCancel.Swap(nil))() }()
err := RecursivelyList(ctx, rawPath, rate.Limit(limit), &ScannedCount)
if err != nil {
log.Errorf("failed recursively list: %v", err)
}
}()
return nil
}
func StopManualScan() {
c := ManualScanCancel.Load()
if c != nil {
(*c)()
}
}
func RecursivelyList(ctx context.Context, rawPath string, limit rate.Limit, counter *atomic.Uint64) error {
storage, actualPath, err := GetStorageAndActualPath(rawPath)
if err != nil && !errors.Is(err, errs.StorageNotFound) {
return err
} else if err == nil {
var limiter *rate.Limiter
if limit > .0 {
limiter = rate.NewLimiter(limit, 1)
}
RecursivelyListStorage(ctx, storage, actualPath, limiter, counter)
} else {
var wg sync.WaitGroup
recursivelyListVirtual(ctx, rawPath, limit, counter, &wg)
wg.Wait()
}
return nil
}
func recursivelyListVirtual(ctx context.Context, rawPath string, limit rate.Limit, counter *atomic.Uint64, wg *sync.WaitGroup) {
objs := GetStorageVirtualFilesByPath(rawPath)
if counter != nil {
counter.Add(uint64(len(objs)))
}
for _, obj := range objs {
if utils.IsCanceled(ctx) {
return
}
nextPath := stdpath.Join(rawPath, obj.GetName())
storage, actualPath, err := GetStorageAndActualPath(nextPath)
if err != nil && !errors.Is(err, errs.StorageNotFound) {
log.Errorf("error recursively list: failed get storage [%s]: %v", nextPath, err)
} else if err == nil {
var limiter *rate.Limiter
if limit > .0 {
limiter = rate.NewLimiter(limit, 1)
}
wg.Add(1)
go func() {
defer wg.Done()
RecursivelyListStorage(ctx, storage, actualPath, limiter, counter)
}()
} else {
recursivelyListVirtual(ctx, nextPath, limit, counter, wg)
}
}
}
func RecursivelyListStorage(ctx context.Context, storage driver.Driver, actualPath string, limiter *rate.Limiter, counter *atomic.Uint64) {
objs, err := List(ctx, storage, actualPath, model.ListArgs{Refresh: true})
if err != nil {
if !errors.Is(err, context.Canceled) {
log.Errorf("error recursively list: failed list (%s)[%s]: %v", storage.GetStorage().MountPath, actualPath, err)
}
return
}
if counter != nil {
counter.Add(uint64(len(objs)))
}
for _, obj := range objs {
if utils.IsCanceled(ctx) {
return
}
if !obj.IsDir() {
continue
}
if limiter != nil {
if err = limiter.Wait(ctx); err != nil {
return
}
}
nextPath := stdpath.Join(actualPath, obj.GetName())
RecursivelyListStorage(ctx, storage, nextPath, limiter, counter)
}
}

View File

@@ -137,7 +137,3 @@ func DeleteSharing(sid string) error {
sharingCache.Del(sid)
return db.DeleteSharingById(sid)
}
func DeleteSharingsByCreatorId(creatorId uint) error {
return db.DeleteSharingsByCreatorId(creatorId)
}

View File

@@ -358,21 +358,16 @@ func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string,
DriverName: d.Config().Name,
},
}
resultChan := make(chan *model.StorageDetails, 1)
go func(dri driver.Driver) {
details, err := GetStorageDetails(ctx, dri, refresh)
if err != nil {
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", dri.GetStorage().MountPath, err)
}
timeoutCtx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
details, err := GetStorageDetails(timeoutCtx, d, refresh)
if err != nil {
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err)
}
resultChan <- details
}(d)
select {
case r := <-resultChan:
ret.StorageDetails = r
case <-time.After(time.Second):
return ret
}
ret.StorageDetails = details
return ret
})
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/pkg/errors"
)
var userG singleflight.Group[*model.User]
@@ -79,9 +78,6 @@ func DeleteUserById(id uint) error {
return errs.DeleteAdminOrGuest
}
Cache.DeleteUser(old.Username)
if err := DeleteSharingsByCreatorId(id); err != nil {
return errors.WithMessage(err, "failed to delete user's sharings")
}
return db.DeleteUserById(id)
}

View File

@@ -199,13 +199,14 @@ func Config(ctx context.Context) searcher.Config {
return instance.Config()
}
func Update(ctx context.Context, parent string, objs []model.Obj) {
func Update(parent string, objs []model.Obj) {
if instance == nil || !instance.Config().AutoUpdate || !setting.GetBool(conf.AutoUpdateIndex) || Running() {
return
}
if isIgnorePath(parent) {
return
}
ctx := context.Background()
// only update when index have built
progress, err := Progress()
if err != nil {

View File

@@ -28,11 +28,3 @@ func GetInt(key string, defaultVal int) int {
func GetBool(key string) bool {
return GetStr(key) == "true" || GetStr(key) == "1"
}
func GetFloat(key string, defaultVal float64) float64 {
f, err := strconv.ParseFloat(GetStr(key), 64)
if err != nil {
return defaultVal
}
return f
}

Some files were not shown because too many files have changed in this diff Show More