mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-11-25 03:15:19 +08:00
Compare commits
28 Commits
v4.1.5
...
mark-stale
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
13310817d0 | ||
|
|
b45c260468 | ||
|
|
91076058ac | ||
|
|
854415160c | ||
|
|
8f4f7d1291 | ||
|
|
ee2c77acd8 | ||
|
|
fc90ec1b53 | ||
|
|
7d78944d14 | ||
|
|
f2e0fe8589 | ||
|
|
39dcf9bd19 | ||
|
|
25f38df4ca | ||
|
|
a1f1f98f94 | ||
|
|
affc499913 | ||
|
|
c7574b545c | ||
|
|
9e852ba12d | ||
|
|
174eae802a | ||
|
|
b9f058fcc9 | ||
|
|
6de15b6310 | ||
|
|
2844797684 | ||
|
|
9f4e439478 | ||
|
|
9d09ee133d | ||
|
|
d88f0e8f3c | ||
|
|
0857478516 | ||
|
|
66d9809057 | ||
|
|
db8a7e8caf | ||
|
|
8f18e34da0 | ||
|
|
525f26dc23 | ||
|
|
a0fcfa3ed2 |
16
.github/ISSUE_TEMPLATE/00-bug_report_zh.yml
vendored
16
.github/ISSUE_TEMPLATE/00-bug_report_zh.yml
vendored
@@ -13,7 +13,7 @@ body:
|
||||
attributes:
|
||||
label: 请确认以下事项
|
||||
description: |
|
||||
您必须勾选以下内容,否则您的问题可能会被直接关闭。
|
||||
您必须确认、同意并勾选以下内容,否则您的问题一定会被直接关闭。
|
||||
或者您可以去[讨论区](https://github.com/OpenListTeam/OpenList/discussions)。
|
||||
options:
|
||||
- label: |
|
||||
@@ -59,6 +59,14 @@ body:
|
||||
label: 问题描述(必填)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: 日志(必填)
|
||||
description: |
|
||||
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
@@ -67,12 +75,6 @@ body:
|
||||
请提供您的`OpenList`应用的配置文件,并截图相关存储配置。(可隐藏隐私字段)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: 日志(可选)
|
||||
description: |
|
||||
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
attributes:
|
||||
|
||||
16
.github/ISSUE_TEMPLATE/01-bug_report_en.yml
vendored
16
.github/ISSUE_TEMPLATE/01-bug_report_en.yml
vendored
@@ -13,7 +13,7 @@ body:
|
||||
attributes:
|
||||
label: Please confirm the following
|
||||
description: |
|
||||
You must check all the following, otherwise your issue may be closed directly.
|
||||
You must confirm, agree, and check all the following, otherwise your issue will definitely be closed directly.
|
||||
Or you can go to the [discussions](https://github.com/OpenListTeam/OpenList/discussions).
|
||||
options:
|
||||
- label: |
|
||||
@@ -59,6 +59,14 @@ body:
|
||||
label: Bug Description (required)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Logs (required)
|
||||
description: |
|
||||
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
@@ -67,12 +75,6 @@ body:
|
||||
Please provide your `OpenList` application's configuration file and a screenshot of the relevant storage configuration. (You may mask sensitive fields)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Logs (optional)
|
||||
description: |
|
||||
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
attributes:
|
||||
|
||||
27
.github/workflows/mark_stale_issues.yml
vendored
Normal file
27
.github/workflows/mark_stale_issues.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Mark stale issues
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0/2 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
mark-stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Mark issues stale using actions/stale
|
||||
uses: actions/stale@v10
|
||||
with:
|
||||
days-before-stale: 90
|
||||
days-before-close: 90
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
stale-issue-message: "This issue has been automatically marked as stale because it hasn't had recent activity for 90 days. If you'd like to keep it open, please reply and the label will be removed."
|
||||
stale-issue-label: "stale"
|
||||
close-issue-message: "This issue has been automatically closed due to inactivity for 90 days after being marked as stale. If you believe this was done in error, please feel free to reopen the issue or contact the maintainers."
|
||||
any-of-labels: "bug"
|
||||
exempt-labels: "WIP,has-parent,collection,Announcement"
|
||||
operations-per-run: 30
|
||||
@@ -2,6 +2,7 @@ package flags
|
||||
|
||||
var (
|
||||
DataDir string
|
||||
ConfigPath string
|
||||
Debug bool
|
||||
NoPrefix bool
|
||||
Dev bool
|
||||
|
||||
@@ -27,7 +27,8 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
|
||||
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data directory (relative paths are resolved against the current working directory)")
|
||||
RootCmd.PersistentFlags().StringVar(&flags.ConfigPath, "config", "", "path to config.json (relative to current working directory; defaults to [data directory]/config.json, where [data directory] is set by --data)")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")
|
||||
|
||||
@@ -41,7 +41,9 @@ func (d *Pan123) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *Pan123) Init(ctx context.Context) error {
|
||||
_, err := d.Request(UserInfo, http.MethodGet, nil, nil)
|
||||
_, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetHeader("platform", "web")
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,8 @@ type Addition struct {
|
||||
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
|
||||
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
AccessToken string
|
||||
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
||||
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
||||
Platform string `json:"platform" type:"string" default:"web" help:"the platform header value, sent with API requests"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -203,7 +203,7 @@ do:
|
||||
"referer": "https://www.123pan.com/",
|
||||
"authorization": "Bearer " + d.AccessToken,
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) openlist-client",
|
||||
"platform": "web",
|
||||
"platform": d.Platform,
|
||||
"app-version": "3",
|
||||
//"user-agent": base.UserAgent,
|
||||
})
|
||||
|
||||
@@ -200,10 +200,7 @@ func (d *Cloud189) GetDetails(ctx context.Context) (*model.StorageDetails, error
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: capacityInfo.CloudCapacityInfo.TotalSize,
|
||||
FreeSpace: capacityInfo.CloudCapacityInfo.FreeSize,
|
||||
},
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(capacityInfo.CloudCapacityInfo.UsedSize, capacityInfo.CloudCapacityInfo.TotalSize),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -72,13 +72,13 @@ type CapacityResp struct {
|
||||
ResMessage string `json:"res_message"`
|
||||
Account string `json:"account"`
|
||||
CloudCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
FreeSize int64 `json:"freeSize"`
|
||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"cloudCapacityInfo"`
|
||||
FamilyCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
FreeSize int64 `json:"freeSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"familyCapacityInfo"`
|
||||
|
||||
@@ -284,18 +284,15 @@ func (y *Cloud189TV) GetDetails(ctx context.Context) (*model.StorageDetails, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var total, free uint64
|
||||
var total, used uint64
|
||||
if y.isFamily() {
|
||||
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
||||
free = capacityInfo.FamilyCapacityInfo.FreeSize
|
||||
used = capacityInfo.FamilyCapacityInfo.UsedSize
|
||||
} else {
|
||||
total = capacityInfo.CloudCapacityInfo.TotalSize
|
||||
free = capacityInfo.CloudCapacityInfo.FreeSize
|
||||
used = capacityInfo.CloudCapacityInfo.UsedSize
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -322,13 +322,13 @@ type CapacityResp struct {
|
||||
ResMessage string `json:"res_message"`
|
||||
Account string `json:"account"`
|
||||
CloudCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
FreeSize int64 `json:"freeSize"`
|
||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"cloudCapacityInfo"`
|
||||
FamilyCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
FreeSize int64 `json:"freeSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"familyCapacityInfo"`
|
||||
|
||||
@@ -416,18 +416,15 @@ func (y *Cloud189PC) GetDetails(ctx context.Context) (*model.StorageDetails, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var total, free uint64
|
||||
var total, used uint64
|
||||
if y.isFamily() {
|
||||
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
||||
free = capacityInfo.FamilyCapacityInfo.FreeSize
|
||||
used = capacityInfo.FamilyCapacityInfo.UsedSize
|
||||
} else {
|
||||
total = capacityInfo.CloudCapacityInfo.TotalSize
|
||||
free = capacityInfo.CloudCapacityInfo.FreeSize
|
||||
used = capacityInfo.CloudCapacityInfo.UsedSize
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -415,13 +415,13 @@ type CapacityResp struct {
|
||||
ResMessage string `json:"res_message"`
|
||||
Account string `json:"account"`
|
||||
CloudCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
FreeSize int64 `json:"freeSize"`
|
||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"cloudCapacityInfo"`
|
||||
FamilyCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
FreeSize int64 `json:"freeSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"familyCapacityInfo"`
|
||||
|
||||
@@ -532,7 +532,7 @@ func (d *Alias) ResolveLinkCacheMode(path string) driver.LinkCacheMode {
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(stdpath.Join(dst, sub))
|
||||
if err == nil {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
mode := storage.Config().LinkCacheMode
|
||||
|
||||
@@ -299,10 +299,7 @@ func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails
|
||||
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
|
||||
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: total - used,
|
||||
},
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -5,11 +5,15 @@ import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
@@ -18,8 +22,10 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -29,8 +35,20 @@ type BaiduNetdisk struct {
|
||||
|
||||
uploadThread int
|
||||
vipType int // 会员类型,0普通用户(4G/4M)、1普通会员(10G/16M)、2超级会员(20G/32M)
|
||||
|
||||
upClient *resty.Client // 上传文件使用的http客户端
|
||||
uploadUrlG singleflight.Group[string]
|
||||
uploadUrlMu sync.RWMutex
|
||||
uploadUrlCache map[string]uploadURLCacheEntry
|
||||
}
|
||||
|
||||
type uploadURLCacheEntry struct {
|
||||
url string
|
||||
updateTime time.Time
|
||||
}
|
||||
|
||||
var ErrUploadIDExpired = errors.New("uploadid expired")
|
||||
|
||||
func (d *BaiduNetdisk) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
@@ -40,19 +58,27 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Init(ctx context.Context) error {
|
||||
d.upClient = base.NewRestyClient().
|
||||
SetTimeout(UPLOAD_TIMEOUT).
|
||||
SetRetryCount(UPLOAD_RETRY_COUNT).
|
||||
SetRetryWaitTime(UPLOAD_RETRY_WAIT_TIME).
|
||||
SetRetryMaxWaitTime(UPLOAD_RETRY_MAX_WAIT_TIME)
|
||||
d.uploadUrlCache = make(map[string]uploadURLCacheEntry)
|
||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, "3"
|
||||
if d.uploadThread < 1 {
|
||||
d.uploadThread, d.UploadThread = 1, "1"
|
||||
} else if d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 32, "32"
|
||||
}
|
||||
|
||||
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
|
||||
d.UploadAPI = "https://d.pcs.baidu.com"
|
||||
d.UploadAPI = UPLOAD_FALLBACK_API
|
||||
}
|
||||
|
||||
res, err := d.get("/xpan/nas", map[string]string{
|
||||
"method": "uinfo",
|
||||
}, nil)
|
||||
log.Debugf("[baidu] get uinfo: %s", string(res))
|
||||
log.Debugf("[baidu_netdisk] get uinfo: %s", string(res))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -179,6 +205,11 @@ func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream mo
|
||||
// **注意**: 截至 2024/04/20 百度云盘 api 接口返回的时间永远是当前时间,而不是文件时间。
|
||||
// 而实际上云盘存储的时间是文件时间,所以此处需要覆盖时间,保证缓存与云盘的数据一致
|
||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 百度网盘不允许上传空文件
|
||||
if stream.GetSize() < 1 {
|
||||
return nil, ErrBaiduEmptyFilesNotAllowed
|
||||
}
|
||||
|
||||
// rapid upload
|
||||
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
|
||||
return newObj, nil
|
||||
@@ -214,7 +245,6 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
|
||||
// cal md5 for first 256k data
|
||||
const SliceSize int64 = 256 * utils.KB
|
||||
// cal md5
|
||||
blockList := make([]string, 0, count)
|
||||
byteSize := sliceSize
|
||||
fileMd5H := md5.New()
|
||||
@@ -244,7 +274,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
}
|
||||
if tmpF != nil {
|
||||
if written != streamSize {
|
||||
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
|
||||
return nil, errs.NewErr(err, "CreateTempFile failed, size mismatch: %d != %d ", written, streamSize)
|
||||
}
|
||||
_, err = tmpF.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
@@ -258,31 +288,14 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
mtime := stream.ModTime().Unix()
|
||||
ctime := stream.CreateTime().Unix()
|
||||
|
||||
// step.1 预上传
|
||||
// 尝试获取之前的进度
|
||||
// step.1 尝试读取已保存进度
|
||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||
if !ok {
|
||||
params := map[string]string{
|
||||
"method": "precreate",
|
||||
}
|
||||
form := map[string]string{
|
||||
"path": path,
|
||||
"size": strconv.FormatInt(streamSize, 10),
|
||||
"isdir": "0",
|
||||
"autoinit": "1",
|
||||
"rtype": "3",
|
||||
"block_list": blockListStr,
|
||||
"content-md5": contentMd5,
|
||||
"slice-md5": sliceMd5,
|
||||
}
|
||||
joinTime(form, ctime, mtime)
|
||||
|
||||
log.Debugf("[baidu_netdisk] precreate data: %s", form)
|
||||
_, err = d.postForm("/xpan/file", params, form, &precreateResp)
|
||||
// 没有进度,走预上传
|
||||
precreateResp, err = d.precreate(ctx, path, streamSize, blockListStr, contentMd5, sliceMd5, ctime, mtime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("%+v", precreateResp)
|
||||
if precreateResp.ReturnType == 2 {
|
||||
// rapid upload, since got md5 match from baidu server
|
||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
||||
@@ -291,45 +304,96 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
return fileToObj(precreateResp.File), nil
|
||||
}
|
||||
}
|
||||
// step.2 上传分片
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(1),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
ensureUploadURL := func() {
|
||||
if precreateResp.UploadURL != "" {
|
||||
return
|
||||
}
|
||||
|
||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
|
||||
if partseq+1 == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
params := map[string]string{
|
||||
"method": "upload",
|
||||
"access_token": d.AccessToken,
|
||||
"type": "tmpfile",
|
||||
"path": path,
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
"partseq": strconv.Itoa(partseq),
|
||||
}
|
||||
err := d.uploadSlice(ctx, params, stream.GetName(),
|
||||
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
|
||||
precreateResp.BlockList[i] = -1
|
||||
return nil
|
||||
})
|
||||
precreateResp.UploadURL = d.getUploadUrl(path, precreateResp.Uploadid)
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
// 如果属于用户主动取消,则保存上传进度
|
||||
ensureUploadURL()
|
||||
|
||||
// step.2 上传分片
|
||||
uploadLoop:
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
// 获取上传域名
|
||||
if precreateResp.UploadURL == "" {
|
||||
ensureUploadURL()
|
||||
}
|
||||
uploadUrl := precreateResp.UploadURL
|
||||
// 并发上传
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(1),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
cacheReaderAt, okReaderAt := cache.(io.ReaderAt)
|
||||
if !okReaderAt {
|
||||
return nil, fmt.Errorf("cache object must implement io.ReaderAt interface for upload operations")
|
||||
}
|
||||
|
||||
totalParts := len(precreateResp.BlockList)
|
||||
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) || partseq < 0 {
|
||||
continue
|
||||
}
|
||||
i, partseq := i, partseq
|
||||
offset, size := int64(partseq)*sliceSize, sliceSize
|
||||
if partseq+1 == count {
|
||||
size = lastBlockSize
|
||||
}
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
params := map[string]string{
|
||||
"method": "upload",
|
||||
"access_token": d.AccessToken,
|
||||
"type": "tmpfile",
|
||||
"path": path,
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
"partseq": strconv.Itoa(partseq),
|
||||
}
|
||||
section := io.NewSectionReader(cacheReaderAt, offset, size)
|
||||
err := d.uploadSlice(ctx, uploadUrl, params, stream.GetName(), driver.NewLimitedUploadStream(ctx, section))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
precreateResp.BlockList[i] = -1
|
||||
// 当前goroutine还没退出,+1才是真正成功的数量
|
||||
success := threadG.Success() + 1
|
||||
progress := float64(success) * 100 / float64(totalParts)
|
||||
up(progress)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err = threadG.Wait()
|
||||
if err == nil {
|
||||
break uploadLoop
|
||||
}
|
||||
|
||||
// 保存进度(所有错误都会保存)
|
||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||
|
||||
if errors.Is(err, context.Canceled) {
|
||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||
return nil, err
|
||||
}
|
||||
if errors.Is(err, ErrUploadIDExpired) {
|
||||
log.Warn("[baidu_netdisk] uploadid expired, will restart from scratch")
|
||||
d.clearUploadUrlCache(precreateResp.Uploadid)
|
||||
// 重新 precreate(所有分片都要重传)
|
||||
newPre, err2 := d.precreate(ctx, path, streamSize, blockListStr, "", "", ctime, mtime)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
if newPre.ReturnType == 2 {
|
||||
return fileToObj(newPre.File), nil
|
||||
}
|
||||
precreateResp = newPre
|
||||
precreateResp.UploadURL = ""
|
||||
ensureUploadURL()
|
||||
// 覆盖掉旧的进度
|
||||
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||
continue uploadLoop
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -343,23 +407,72 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
||||
newFile.Ctime = ctime
|
||||
newFile.Mtime = mtime
|
||||
// 上传成功清理进度
|
||||
base.SaveUploadProgress(d, nil, d.AccessToken, contentMd5)
|
||||
d.clearUploadUrlCache(precreateResp.Uploadid)
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
|
||||
res, err := base.RestyClient.R().
|
||||
// precreate 执行预上传操作,支持首次上传和 uploadid 过期重试
|
||||
func (d *BaiduNetdisk) precreate(ctx context.Context, path string, streamSize int64, blockListStr, contentMd5, sliceMd5 string, ctime, mtime int64) (*PrecreateResp, error) {
|
||||
params := map[string]string{"method": "precreate"}
|
||||
form := map[string]string{
|
||||
"path": path,
|
||||
"size": strconv.FormatInt(streamSize, 10),
|
||||
"isdir": "0",
|
||||
"autoinit": "1",
|
||||
"rtype": "3",
|
||||
"block_list": blockListStr,
|
||||
}
|
||||
|
||||
// 只有在首次上传时才包含 content-md5 和 slice-md5
|
||||
if contentMd5 != "" && sliceMd5 != "" {
|
||||
form["content-md5"] = contentMd5
|
||||
form["slice-md5"] = sliceMd5
|
||||
}
|
||||
|
||||
joinTime(form, ctime, mtime)
|
||||
|
||||
var precreateResp PrecreateResp
|
||||
_, err := d.postForm("/xpan/file", params, form, &precreateResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
||||
if precreateResp.ReturnType == 2 {
|
||||
precreateResp.File.Ctime = ctime
|
||||
precreateResp.File.Mtime = mtime
|
||||
}
|
||||
|
||||
return &precreateResp, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, uploadUrl string, params map[string]string, fileName string, file io.Reader) error {
|
||||
res, err := d.upClient.R().
|
||||
SetContext(ctx).
|
||||
SetQueryParams(params).
|
||||
SetFileReader("file", fileName, file).
|
||||
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
|
||||
Post(uploadUrl + "/rest/2.0/pcs/superfile2")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugln(res.RawResponse.Status + res.String())
|
||||
if res.StatusCode() != http.StatusOK {
|
||||
return errs.NewErr(errs.StreamIncomplete, "baidu upload failed, status=%d, body=%s", res.StatusCode(), res.String())
|
||||
}
|
||||
errCode := utils.Json.Get(res.Body(), "error_code").ToInt()
|
||||
errNo := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||
respStr := res.String()
|
||||
lower := strings.ToLower(respStr)
|
||||
// 合并 uploadid 过期检测逻辑
|
||||
if strings.Contains(lower, "uploadid") &&
|
||||
(strings.Contains(lower, "invalid") || strings.Contains(lower, "expired") || strings.Contains(lower, "not found")) {
|
||||
return ErrUploadIDExpired
|
||||
}
|
||||
|
||||
if errCode != 0 || errNo != 0 {
|
||||
return errs.NewErr(errs.StreamIncomplete, "error in uploading to baidu, will retry. response=%s", res.String())
|
||||
return errs.NewErr(errs.StreamIncomplete, "error uploading to baidu, response=%s", res.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package baidu_netdisk
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
@@ -19,11 +20,21 @@ type Addition struct {
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||
UseDynamicUploadAPI bool `json:"use_dynamic_upload_api" default:"true" help:"dynamically get upload api domain, when enabled, the 'Upload API' setting will be used as a fallback if failed to get"`
|
||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
|
||||
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
|
||||
}
|
||||
|
||||
const (
|
||||
UPLOAD_FALLBACK_API = "https://d.pcs.baidu.com" // 备用上传地址
|
||||
UPLOAD_URL_EXPIRE_TIME = time.Minute * 60 // 上传地址有效期(分钟)
|
||||
UPLOAD_TIMEOUT = time.Minute * 30 // 上传请求超时时间
|
||||
UPLOAD_RETRY_COUNT = 3
|
||||
UPLOAD_RETRY_WAIT_TIME = time.Second * 1
|
||||
UPLOAD_RETRY_MAX_WAIT_TIME = time.Second * 5
|
||||
)
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "BaiduNetdisk",
|
||||
DefaultRoot: "/",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package baidu_netdisk
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -9,6 +10,10 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrBaiduEmptyFilesNotAllowed = errors.New("empty files are not allowed by baidu netdisk")
|
||||
)
|
||||
|
||||
type TokenErrResp struct {
|
||||
ErrorDescription string `json:"error_description"`
|
||||
Error string `json:"error"`
|
||||
@@ -188,6 +193,32 @@ type PrecreateResp struct {
|
||||
|
||||
// return_type=2
|
||||
File File `json:"info"`
|
||||
|
||||
UploadURL string `json:"-"` // 保存断点续传对应的上传域名
|
||||
}
|
||||
|
||||
type UploadServerResp struct {
|
||||
BakServer []any `json:"bak_server"`
|
||||
BakServers []struct {
|
||||
Server string `json:"server"`
|
||||
} `json:"bak_servers"`
|
||||
ClientIP string `json:"client_ip"`
|
||||
ErrorCode int `json:"error_code"`
|
||||
ErrorMsg string `json:"error_msg"`
|
||||
Expire int `json:"expire"`
|
||||
Host string `json:"host"`
|
||||
Newno string `json:"newno"`
|
||||
QuicServer []any `json:"quic_server"`
|
||||
QuicServers []struct {
|
||||
Server string `json:"server"`
|
||||
} `json:"quic_servers"`
|
||||
RequestID int64 `json:"request_id"`
|
||||
Server []any `json:"server"`
|
||||
ServerTime int `json:"server_time"`
|
||||
Servers []struct {
|
||||
Server string `json:"server"`
|
||||
} `json:"servers"`
|
||||
Sl int `json:"sl"`
|
||||
}
|
||||
|
||||
type QuotaResp struct {
|
||||
|
||||
@@ -115,7 +115,7 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
|
||||
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||
if errno != 0 {
|
||||
if utils.SliceContains([]int{111, -6}, errno) {
|
||||
log.Info("refreshing baidu_netdisk token.")
|
||||
log.Info("[baidu_netdisk] refreshing baidu_netdisk token.")
|
||||
err2 := d.refreshToken()
|
||||
if err2 != nil {
|
||||
return retry.Unrecoverable(err2)
|
||||
@@ -326,10 +326,10 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||
// 非会员固定为 4MB
|
||||
if d.vipType == 0 {
|
||||
if d.CustomUploadPartSize != 0 {
|
||||
log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
|
||||
log.Warnf("[baidu_netdisk] CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
|
||||
}
|
||||
if filesize > MaxSliceNum*DefaultSliceSize {
|
||||
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
|
||||
log.Warnf("[baidu_netdisk] File size(%d) is too large, may cause upload failure", filesize)
|
||||
}
|
||||
|
||||
return DefaultSliceSize
|
||||
@@ -337,17 +337,17 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||
|
||||
if d.CustomUploadPartSize != 0 {
|
||||
if d.CustomUploadPartSize < DefaultSliceSize {
|
||||
log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
|
||||
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
|
||||
return DefaultSliceSize
|
||||
}
|
||||
|
||||
if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize {
|
||||
log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
|
||||
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
|
||||
return VipSliceSize
|
||||
}
|
||||
|
||||
if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize {
|
||||
log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
|
||||
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
|
||||
return SVipSliceSize
|
||||
}
|
||||
|
||||
@@ -377,7 +377,7 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||
}
|
||||
|
||||
if filesize > MaxSliceNum*maxSliceSize {
|
||||
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
|
||||
log.Warnf("[baidu_netdisk] File size(%d) is too large, may cause upload failure", filesize)
|
||||
}
|
||||
|
||||
return maxSliceSize
|
||||
@@ -394,6 +394,97 @@ func (d *BaiduNetdisk) quota(ctx context.Context) (model.DiskUsage, error) {
|
||||
return driver.DiskUsageFromUsedAndTotal(resp.Used, resp.Total), nil
|
||||
}
|
||||
|
||||
// getUploadUrl 从开放平台获取上传域名/地址,并发请求会被合并,结果会在 uploadid 生命周期内复用。
|
||||
// 如果获取失败,则返回 Upload API设置项。
|
||||
func (d *BaiduNetdisk) getUploadUrl(path, uploadId string) string {
|
||||
if !d.UseDynamicUploadAPI || uploadId == "" {
|
||||
return d.UploadAPI
|
||||
}
|
||||
getCachedUrlFunc := func() (string, bool) {
|
||||
d.uploadUrlMu.RLock()
|
||||
defer d.uploadUrlMu.RUnlock()
|
||||
if entry, ok := d.uploadUrlCache[uploadId]; ok {
|
||||
return entry.url, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
// 检查地址缓存
|
||||
if uploadUrl, ok := getCachedUrlFunc(); ok {
|
||||
return uploadUrl
|
||||
}
|
||||
|
||||
uploadUrlGetFunc := func() (string, error) {
|
||||
// 双重检查缓存
|
||||
if uploadUrl, ok := getCachedUrlFunc(); ok {
|
||||
return uploadUrl, nil
|
||||
}
|
||||
|
||||
uploadUrl, err := d.requestForUploadUrl(path, uploadId)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
d.uploadUrlMu.Lock()
|
||||
d.uploadUrlCache[uploadId] = uploadURLCacheEntry{
|
||||
url: uploadUrl,
|
||||
updateTime: time.Now(),
|
||||
}
|
||||
d.uploadUrlMu.Unlock()
|
||||
return uploadUrl, nil
|
||||
}
|
||||
|
||||
uploadUrl, err, _ := d.uploadUrlG.Do(uploadId, uploadUrlGetFunc)
|
||||
if err != nil {
|
||||
fallback := d.UploadAPI
|
||||
log.Warnf("[baidu_netdisk] get upload URL failed (%v), will use fallback URL: %s", err, fallback)
|
||||
return fallback
|
||||
}
|
||||
return uploadUrl
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) clearUploadUrlCache(uploadId string) {
|
||||
if uploadId == "" {
|
||||
return
|
||||
}
|
||||
d.uploadUrlMu.Lock()
|
||||
if _, ok := d.uploadUrlCache[uploadId]; ok {
|
||||
delete(d.uploadUrlCache, uploadId)
|
||||
}
|
||||
d.uploadUrlMu.Unlock()
|
||||
}
|
||||
|
||||
// requestForUploadUrl 请求获取上传地址。
|
||||
// 实测此接口不需要认证,传method和upload_version就行,不过还是按文档规范调用。
|
||||
// https://pan.baidu.com/union/doc/Mlvw5hfnr
|
||||
func (d *BaiduNetdisk) requestForUploadUrl(path, uploadId string) (string, error) {
|
||||
params := map[string]string{
|
||||
"method": "locateupload",
|
||||
"appid": "250528",
|
||||
"path": path,
|
||||
"uploadid": uploadId,
|
||||
"upload_version": "2.0",
|
||||
}
|
||||
apiUrl := "https://d.pcs.baidu.com/rest/2.0/pcs/file"
|
||||
var resp UploadServerResp
|
||||
_, err := d.request(apiUrl, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(params)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// 应该是https开头的一个地址
|
||||
var uploadUrl string
|
||||
if len(resp.Servers) > 0 {
|
||||
uploadUrl = resp.Servers[0].Server
|
||||
} else if len(resp.BakServers) > 0 {
|
||||
uploadUrl = resp.BakServers[0].Server
|
||||
}
|
||||
if uploadUrl == "" {
|
||||
return "", errors.New("upload URL is empty")
|
||||
}
|
||||
return uploadUrl, nil
|
||||
}
|
||||
|
||||
// func encodeURIComponent(str string) string {
|
||||
// r := url.QueryEscape(str)
|
||||
// r = strings.ReplaceAll(r, "+", "%20")
|
||||
|
||||
@@ -25,6 +25,7 @@ func InitClient() {
|
||||
}),
|
||||
).SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
||||
net.SetRestyProxyIfConfigured(NoRedirectClient)
|
||||
|
||||
RestyClient = NewRestyClient()
|
||||
HttpClient = net.NewHttpClient()
|
||||
@@ -37,5 +38,7 @@ func NewRestyClient() *resty.Client {
|
||||
SetRetryResetReaders(true).
|
||||
SetTimeout(DefaultTimeout).
|
||||
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||
|
||||
net.SetRestyProxyIfConfigured(client)
|
||||
return client
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type Doubao struct {
|
||||
@@ -23,6 +24,7 @@ type Doubao struct {
|
||||
*UploadToken
|
||||
UserId string
|
||||
uploadThread int
|
||||
limiter *rate.Limiter
|
||||
}
|
||||
|
||||
func (d *Doubao) Config() driver.Config {
|
||||
@@ -61,6 +63,17 @@ func (d *Doubao) Init(ctx context.Context) error {
|
||||
d.UploadToken = uploadToken
|
||||
}
|
||||
|
||||
if d.LimitRate > 0 {
|
||||
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Doubao) WaitLimit(ctx context.Context) error {
|
||||
if d.limiter != nil {
|
||||
return d.limiter.Wait(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -69,6 +82,10 @@ func (d *Doubao) Drop(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var files []model.Obj
|
||||
fileList, err := d.getFiles(dir.GetID(), "")
|
||||
if err != nil {
|
||||
@@ -95,6 +112,10 @@ func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
|
||||
}
|
||||
|
||||
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var downloadUrl string
|
||||
|
||||
if u, ok := file.(*Object); ok {
|
||||
@@ -160,6 +181,10 @@ func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}
|
||||
|
||||
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var r UploadNodeResp
|
||||
_, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
@@ -177,6 +202,10 @@ func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
||||
}
|
||||
|
||||
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var r UploadNodeResp
|
||||
_, err := d.request("/samantha/aispace/move_node", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
@@ -191,6 +220,10 @@ func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var r BaseResp
|
||||
_, err := d.request("/samantha/aispace/rename_node", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
@@ -207,6 +240,10 @@ func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
|
||||
}
|
||||
|
||||
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var r BaseResp
|
||||
_, err := d.request("/samantha/aispace/delete_node", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
|
||||
@@ -215,6 +252,10 @@ func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 根据MIME类型确定数据类型
|
||||
mimetype := file.GetMimetype()
|
||||
dataType := FileDataType
|
||||
|
||||
@@ -10,9 +10,10 @@ type Addition struct {
|
||||
// driver.RootPath
|
||||
driver.RootID
|
||||
// define other
|
||||
Cookie string `json:"cookie" type:"text"`
|
||||
UploadThread string `json:"upload_thread" default:"3"`
|
||||
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
|
||||
Cookie string `json:"cookie" type:"text"`
|
||||
UploadThread string `json:"upload_thread" default:"3"`
|
||||
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
|
||||
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
@@ -23,6 +24,10 @@ var config = driver.Config{
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Doubao{}
|
||||
return &Doubao{
|
||||
Addition: Addition{
|
||||
LimitRate: 2,
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -113,9 +113,7 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
RangeReader: &model.FileRangeReader{
|
||||
RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
|
||||
},
|
||||
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
|
||||
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -51,6 +51,9 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
|
||||
if d.Addition.ShowReadme {
|
||||
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
|
||||
}
|
||||
if d.Addition.ShowSourceCode{
|
||||
files = append(files, point.GetSourceCode()...)
|
||||
}
|
||||
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
|
||||
nextDir := GetNextDir(point.Point, path)
|
||||
if nextDir == "" {
|
||||
@@ -117,6 +120,10 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
|
||||
}
|
||||
|
||||
files = append(files, point.GetReleaseByTagName(tagName)...)
|
||||
|
||||
if d.Addition.ShowSourceCode{
|
||||
files = append(files, point.GetSourceCodeByTagName(tagName)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ type Addition struct {
|
||||
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"OpenListTeam/OpenList" help:"structure:[path:]org/repo"`
|
||||
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
|
||||
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
|
||||
ShowSourceCode bool `json:"show_source_code" type:"bool" default:"false" help:"show Source code (zip/tar.gz)"`
|
||||
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
|
||||
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
|
||||
}
|
||||
|
||||
@@ -143,6 +143,60 @@ func (m *MountPoint) GetAllVersionSize() int64 {
|
||||
return size
|
||||
}
|
||||
|
||||
func (m *MountPoint) GetSourceCode() []File {
|
||||
files := make([]File, 0)
|
||||
|
||||
// 无法获取文件大小,此处设为 1
|
||||
files = append(files, File{
|
||||
Path: m.Point + "/" + "Source code (zip)",
|
||||
FileName: "Source code (zip)",
|
||||
Size: 1,
|
||||
Type: "file",
|
||||
UpdateAt: m.Release.CreatedAt,
|
||||
CreateAt: m.Release.CreatedAt,
|
||||
Url: m.Release.ZipballUrl,
|
||||
})
|
||||
files = append(files, File{
|
||||
Path: m.Point + "/" + "Source code (tar.gz)",
|
||||
FileName: "Source code (tar.gz)",
|
||||
Size: 1,
|
||||
Type: "file",
|
||||
UpdateAt: m.Release.CreatedAt,
|
||||
CreateAt: m.Release.CreatedAt,
|
||||
Url: m.Release.TarballUrl,
|
||||
})
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
func (m *MountPoint) GetSourceCodeByTagName(tagName string) []File {
|
||||
for _, item := range *m.Releases {
|
||||
if item.TagName == tagName {
|
||||
files := make([]File, 0)
|
||||
files = append(files, File{
|
||||
Path: m.Point + "/" + "Source code (zip)",
|
||||
FileName: "Source code (zip)",
|
||||
Size: 1,
|
||||
Type: "file",
|
||||
UpdateAt: item.CreatedAt,
|
||||
CreateAt: item.CreatedAt,
|
||||
Url: item.ZipballUrl,
|
||||
})
|
||||
files = append(files, File{
|
||||
Path: m.Point + "/" + "Source code (tar.gz)",
|
||||
FileName: "Source code (tar.gz)",
|
||||
Size: 1,
|
||||
Type: "file",
|
||||
UpdateAt: item.CreatedAt,
|
||||
CreateAt: item.CreatedAt,
|
||||
Url: item.TarballUrl,
|
||||
})
|
||||
return files
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MountPoint) GetOtherFile(get func(url string) (*resty.Response, error), refresh bool) []File {
|
||||
if m.OtherFile == nil || refresh {
|
||||
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/contents")
|
||||
|
||||
@@ -27,6 +27,14 @@ import (
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
// Google Drive API field constants
|
||||
const (
|
||||
// File list query fields
|
||||
FilesListFields = "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken"
|
||||
// Single file query fields
|
||||
FileInfoFields = "id,name,mimeType,size,md5Checksum,sha1Checksum,sha256Checksum"
|
||||
)
|
||||
|
||||
type googleDriveServiceAccount struct {
|
||||
// Type string `json:"type"`
|
||||
// ProjectID string `json:"project_id"`
|
||||
@@ -235,7 +243,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
||||
}
|
||||
query := map[string]string{
|
||||
"orderBy": orderBy,
|
||||
"fields": "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken",
|
||||
"fields": FilesListFields,
|
||||
"pageSize": "1000",
|
||||
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
|
||||
//"includeItemsFromAllDrives": "true",
|
||||
@@ -249,11 +257,82 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
||||
return nil, err
|
||||
}
|
||||
pageToken = resp.NextPageToken
|
||||
|
||||
// Batch process shortcuts, API calls only for file shortcuts
|
||||
shortcutTargetIds := make([]string, 0)
|
||||
shortcutIndices := make([]int, 0)
|
||||
|
||||
// Collect target IDs of all file shortcuts (skip folder shortcuts)
|
||||
for i := range resp.Files {
|
||||
if resp.Files[i].MimeType == "application/vnd.google-apps.shortcut" &&
|
||||
resp.Files[i].ShortcutDetails.TargetId != "" &&
|
||||
resp.Files[i].ShortcutDetails.TargetMimeType != "application/vnd.google-apps.folder" {
|
||||
shortcutTargetIds = append(shortcutTargetIds, resp.Files[i].ShortcutDetails.TargetId)
|
||||
shortcutIndices = append(shortcutIndices, i)
|
||||
}
|
||||
}
|
||||
|
||||
// Batch get target file info (only for file shortcuts)
|
||||
if len(shortcutTargetIds) > 0 {
|
||||
targetFiles := d.batchGetTargetFilesInfo(shortcutTargetIds)
|
||||
// Update shortcut file info
|
||||
for j, targetId := range shortcutTargetIds {
|
||||
if targetFile, exists := targetFiles[targetId]; exists {
|
||||
fileIndex := shortcutIndices[j]
|
||||
if targetFile.Size != "" {
|
||||
resp.Files[fileIndex].Size = targetFile.Size
|
||||
}
|
||||
if targetFile.MD5Checksum != "" {
|
||||
resp.Files[fileIndex].MD5Checksum = targetFile.MD5Checksum
|
||||
}
|
||||
if targetFile.SHA1Checksum != "" {
|
||||
resp.Files[fileIndex].SHA1Checksum = targetFile.SHA1Checksum
|
||||
}
|
||||
if targetFile.SHA256Checksum != "" {
|
||||
resp.Files[fileIndex].SHA256Checksum = targetFile.SHA256Checksum
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res = append(res, resp.Files...)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// getTargetFileInfo gets target file details for shortcuts
|
||||
func (d *GoogleDrive) getTargetFileInfo(targetId string) (File, error) {
|
||||
var targetFile File
|
||||
url := fmt.Sprintf("https://www.googleapis.com/drive/v3/files/%s", targetId)
|
||||
query := map[string]string{
|
||||
"fields": FileInfoFields,
|
||||
}
|
||||
_, err := d.request(url, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(query)
|
||||
}, &targetFile)
|
||||
if err != nil {
|
||||
return File{}, err
|
||||
}
|
||||
return targetFile, nil
|
||||
}
|
||||
|
||||
// batchGetTargetFilesInfo batch gets target file info, sequential processing to avoid concurrency complexity
|
||||
func (d *GoogleDrive) batchGetTargetFilesInfo(targetIds []string) map[string]File {
|
||||
if len(targetIds) == 0 {
|
||||
return make(map[string]File)
|
||||
}
|
||||
|
||||
result := make(map[string]File)
|
||||
// Sequential processing to avoid concurrency complexity
|
||||
for _, targetId := range targetIds {
|
||||
file, err := d.getTargetFileInfo(targetId)
|
||||
if err == nil {
|
||||
result[targetId] = file
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
|
||||
defaultChunkSize := d.ChunkSize * 1024 * 1024
|
||||
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
|
||||
|
||||
@@ -236,4 +236,19 @@ func (d *Onedrive) GetDetails(ctx context.Context) (*model.StorageDetails, error
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Onedrive) GetDirectUploadTools() []string {
|
||||
if !d.EnableDirectUpload {
|
||||
return nil
|
||||
}
|
||||
return []string{"HttpDirect"}
|
||||
}
|
||||
|
||||
// GetDirectUploadInfo returns the direct upload info for OneDrive
|
||||
func (d *Onedrive) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
|
||||
if !d.EnableDirectUpload {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
return d.getDirectUploadInfo(ctx, path.Join(dstDir.GetPath(), fileName))
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Onedrive)(nil)
|
||||
|
||||
@@ -19,6 +19,7 @@ type Addition struct {
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -133,7 +133,7 @@ func (d *Onedrive) _refreshToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
|
||||
if d.ref != nil {
|
||||
return d.ref.Request(url, method, callback, resp)
|
||||
}
|
||||
@@ -152,7 +152,7 @@ func (d *Onedrive) Request(url string, method string, callback base.ReqCallback,
|
||||
return nil, err
|
||||
}
|
||||
if e.Error.Code != "" {
|
||||
if e.Error.Code == "InvalidAuthenticationToken" {
|
||||
if e.Error.Code == "InvalidAuthenticationToken" && !utils.IsBool(noRetry...) {
|
||||
err = d.refreshToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -310,9 +310,36 @@ func (d *Onedrive) getDrive(ctx context.Context) (*DriveResp, error) {
|
||||
var resp DriveResp
|
||||
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
}, &resp, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Onedrive) getDirectUploadInfo(ctx context.Context, path string) (*model.HttpDirectUploadInfo, error) {
|
||||
// Create upload session
|
||||
url := d.GetMetaUrl(false, path) + "/createUploadSession"
|
||||
metadata := map[string]any{
|
||||
"item": map[string]any{
|
||||
"@microsoft.graph.conflictBehavior": "rename",
|
||||
},
|
||||
}
|
||||
|
||||
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(metadata).SetContext(ctx)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
if uploadUrl == "" {
|
||||
return nil, fmt.Errorf("failed to get upload URL from response")
|
||||
}
|
||||
return &model.HttpDirectUploadInfo{
|
||||
UploadURL: uploadUrl,
|
||||
ChunkSize: d.ChunkSize * 1024 * 1024, // Convert MB to bytes
|
||||
Method: "PUT",
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -222,4 +222,18 @@ func (d *OnedriveAPP) GetDetails(ctx context.Context) (*model.StorageDetails, er
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *OnedriveAPP) GetDirectUploadTools() []string {
|
||||
if !d.EnableDirectUpload {
|
||||
return nil
|
||||
}
|
||||
return []string{"HttpDirect"}
|
||||
}
|
||||
|
||||
func (d *OnedriveAPP) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
|
||||
if !d.EnableDirectUpload {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
return d.getDirectUploadInfo(ctx, path.Join(dstDir.GetPath(), fileName))
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*OnedriveAPP)(nil)
|
||||
|
||||
@@ -7,14 +7,15 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
|
||||
ClientID string `json:"client_id" required:"true"`
|
||||
ClientSecret string `json:"client_secret" required:"true"`
|
||||
TenantID string `json:"tenant_id"`
|
||||
Email string `json:"email"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
|
||||
ClientID string `json:"client_id" required:"true"`
|
||||
ClientSecret string `json:"client_secret" required:"true"`
|
||||
TenantID string `json:"tenant_id"`
|
||||
Email string `json:"email"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -88,7 +88,7 @@ func (d *OnedriveAPP) _accessToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||
if callback != nil {
|
||||
@@ -104,7 +104,7 @@ func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallba
|
||||
return nil, err
|
||||
}
|
||||
if e.Error.Code != "" {
|
||||
if e.Error.Code == "InvalidAuthenticationToken" {
|
||||
if e.Error.Code == "InvalidAuthenticationToken" && !utils.IsBool(noRetry...) {
|
||||
err = d.accessToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -216,9 +216,36 @@ func (d *OnedriveAPP) getDrive(ctx context.Context) (*DriveResp, error) {
|
||||
var resp DriveResp
|
||||
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
}, &resp, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *OnedriveAPP) getDirectUploadInfo(ctx context.Context, path string) (*model.HttpDirectUploadInfo, error) {
|
||||
// Create upload session
|
||||
url := d.GetMetaUrl(false, path) + "/createUploadSession"
|
||||
metadata := map[string]any{
|
||||
"item": map[string]any{
|
||||
"@microsoft.graph.conflictBehavior": "rename",
|
||||
},
|
||||
}
|
||||
|
||||
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(metadata).SetContext(ctx)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
if uploadUrl == "" {
|
||||
return nil, fmt.Errorf("failed to get upload URL from response")
|
||||
}
|
||||
return &model.HttpDirectUploadInfo{
|
||||
UploadURL: uploadUrl,
|
||||
ChunkSize: d.ChunkSize * 1024 * 1024, // Convert MB to bytes
|
||||
Method: "PUT",
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -190,9 +190,7 @@ func (d *ProtonDrive) Link(ctx context.Context, file model.Obj, args model.LinkA
|
||||
|
||||
expiration := time.Minute
|
||||
return &model.Link{
|
||||
RangeReader: &model.FileRangeReader{
|
||||
RangeReaderIF: stream.RateLimitRangeReaderFunc(rangeReaderFunc),
|
||||
},
|
||||
RangeReader: stream.RateLimitRangeReaderFunc(rangeReaderFunc),
|
||||
ContentLength: size,
|
||||
Expiration: &expiration,
|
||||
}, nil
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Strm struct {
|
||||
@@ -40,6 +41,9 @@ func (d *Strm) Init(ctx context.Context) error {
|
||||
if d.Paths == "" {
|
||||
return errors.New("paths is required")
|
||||
}
|
||||
if d.SaveStrmToLocal && len(d.SaveStrmLocalPath) <= 0 {
|
||||
return errors.New("SaveStrmLocalPath is required")
|
||||
}
|
||||
d.pathMap = make(map[string][]string)
|
||||
for _, path := range strings.Split(d.Paths, "\n") {
|
||||
path = strings.TrimSpace(path)
|
||||
@@ -48,6 +52,13 @@ func (d *Strm) Init(ctx context.Context) error {
|
||||
}
|
||||
k, v := getPair(path)
|
||||
d.pathMap[k] = append(d.pathMap[k], v)
|
||||
if d.SaveStrmToLocal {
|
||||
err := InsertStrm(utils.FixAndCleanPath(strings.TrimSpace(path)), d)
|
||||
if err != nil {
|
||||
log.Errorf("insert strmTrie error: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(d.pathMap) == 1 {
|
||||
for k := range d.pathMap {
|
||||
@@ -59,26 +70,51 @@ func (d *Strm) Init(ctx context.Context) error {
|
||||
d.autoFlatten = false
|
||||
}
|
||||
|
||||
d.supportSuffix = supportSuffix()
|
||||
if d.FilterFileTypes != "" {
|
||||
types := strings.Split(d.FilterFileTypes, ",")
|
||||
for _, ext := range types {
|
||||
ext = strings.ToLower(strings.TrimSpace(ext))
|
||||
if ext != "" {
|
||||
d.supportSuffix[ext] = struct{}{}
|
||||
}
|
||||
var supportTypes []string
|
||||
if d.FilterFileTypes == "" {
|
||||
d.FilterFileTypes = "mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac"
|
||||
}
|
||||
supportTypes = strings.Split(d.FilterFileTypes, ",")
|
||||
d.supportSuffix = map[string]struct{}{}
|
||||
for _, ext := range supportTypes {
|
||||
ext = strings.ToLower(strings.TrimSpace(ext))
|
||||
if ext != "" {
|
||||
d.supportSuffix[ext] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
d.downloadSuffix = downloadSuffix()
|
||||
if d.DownloadFileTypes != "" {
|
||||
downloadTypes := strings.Split(d.DownloadFileTypes, ",")
|
||||
for _, ext := range downloadTypes {
|
||||
ext = strings.ToLower(strings.TrimSpace(ext))
|
||||
if ext != "" {
|
||||
d.downloadSuffix[ext] = struct{}{}
|
||||
var downloadTypes []string
|
||||
if d.DownloadFileTypes == "" {
|
||||
d.DownloadFileTypes = "ass,srt,vtt,sub,strm"
|
||||
}
|
||||
downloadTypes = strings.Split(d.DownloadFileTypes, ",")
|
||||
d.downloadSuffix = map[string]struct{}{}
|
||||
for _, ext := range downloadTypes {
|
||||
ext = strings.ToLower(strings.TrimSpace(ext))
|
||||
if ext != "" {
|
||||
d.downloadSuffix[ext] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if d.Version != 3 {
|
||||
types := strings.Split("mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac", ",")
|
||||
for _, ext := range types {
|
||||
if _, ok := d.supportSuffix[ext]; !ok {
|
||||
d.supportSuffix[ext] = struct{}{}
|
||||
supportTypes = append(supportTypes, ext)
|
||||
}
|
||||
}
|
||||
d.FilterFileTypes = strings.Join(supportTypes, ",")
|
||||
|
||||
types = strings.Split("ass,srt,vtt,sub,strm", ",")
|
||||
for _, ext := range types {
|
||||
if _, ok := d.downloadSuffix[ext]; !ok {
|
||||
d.supportSuffix[ext] = struct{}{}
|
||||
downloadTypes = append(downloadTypes, ext)
|
||||
}
|
||||
}
|
||||
d.DownloadFileTypes = strings.Join(downloadTypes, ",")
|
||||
d.Version = 3
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -87,6 +123,9 @@ func (d *Strm) Drop(ctx context.Context) error {
|
||||
d.pathMap = nil
|
||||
d.downloadSuffix = nil
|
||||
d.supportSuffix = nil
|
||||
for _, path := range strings.Split(d.Paths, "\n") {
|
||||
RemoveStrm(utils.FixAndCleanPath(strings.TrimSpace(path)), d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
175
drivers/strm/hook.go
Normal file
175
drivers/strm/hook.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package strm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tchap/go-patricia/v2/patricia"
|
||||
)
|
||||
|
||||
var strmTrie = patricia.NewTrie()
|
||||
|
||||
func UpdateLocalStrm(ctx context.Context, path string, objs []model.Obj) {
|
||||
path = utils.FixAndCleanPath(path)
|
||||
updateLocal := func(driver *Strm, basePath string, objs []model.Obj) {
|
||||
relParent := strings.TrimPrefix(basePath, driver.MountPath)
|
||||
localParentPath := stdpath.Join(driver.SaveStrmLocalPath, relParent)
|
||||
for _, obj := range objs {
|
||||
localPath := stdpath.Join(localParentPath, obj.GetName())
|
||||
generateStrm(ctx, driver, obj, localPath)
|
||||
}
|
||||
deleteExtraFiles(localParentPath, objs)
|
||||
}
|
||||
|
||||
_ = strmTrie.VisitPrefixes(patricia.Prefix(path), func(needPathPrefix patricia.Prefix, item patricia.Item) error {
|
||||
strmDrivers := item.([]*Strm)
|
||||
needPath := string(needPathPrefix)
|
||||
restPath := strings.TrimPrefix(path, needPath)
|
||||
if len(restPath) > 0 && restPath[0] != '/' {
|
||||
return nil
|
||||
}
|
||||
for _, strmDriver := range strmDrivers {
|
||||
strmObjs := strmDriver.convert2strmObjs(ctx, path, objs)
|
||||
updateLocal(strmDriver, stdpath.Join(stdpath.Base(needPath), restPath), strmObjs)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func InsertStrm(dstPath string, d *Strm) error {
|
||||
prefix := patricia.Prefix(strings.TrimRight(dstPath, "/"))
|
||||
existing := strmTrie.Get(prefix)
|
||||
|
||||
if existing == nil {
|
||||
if !strmTrie.Insert(prefix, []*Strm{d}) {
|
||||
return errors.New("failed to insert strm")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if lst, ok := existing.([]*Strm); ok {
|
||||
strmTrie.Set(prefix, append(lst, d))
|
||||
} else {
|
||||
return errors.New("invalid trie item type")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemoveStrm(dstPath string, d *Strm) {
|
||||
prefix := patricia.Prefix(strings.TrimRight(dstPath, "/"))
|
||||
existing := strmTrie.Get(prefix)
|
||||
if existing == nil {
|
||||
return
|
||||
}
|
||||
lst, ok := existing.([]*Strm)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if len(lst) == 1 && lst[0] == d {
|
||||
strmTrie.Delete(prefix)
|
||||
return
|
||||
}
|
||||
|
||||
for i, di := range lst {
|
||||
if di == d {
|
||||
newList := append(lst[:i], lst[i+1:]...)
|
||||
strmTrie.Set(prefix, newList)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func generateStrm(ctx context.Context, driver *Strm, obj model.Obj, localPath string) {
|
||||
if obj.IsDir() {
|
||||
err := utils.CreateNestedDirectory(localPath)
|
||||
if err != nil {
|
||||
log.Warnf("failed to generate strm dir %s: failed to create dir: %v", localPath, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
link, err := driver.Link(ctx, obj, model.LinkArgs{})
|
||||
if err != nil {
|
||||
log.Warnf("failed to generate strm of obj %s: failed to link: %v", localPath, err)
|
||||
return
|
||||
}
|
||||
defer link.Close()
|
||||
size := link.ContentLength
|
||||
if size <= 0 {
|
||||
size = obj.GetSize()
|
||||
}
|
||||
rrf, err := stream.GetRangeReaderFromLink(size, link)
|
||||
if err != nil {
|
||||
log.Warnf("failed to generate strm of obj %s: failed to get range reader: %v", localPath, err)
|
||||
return
|
||||
}
|
||||
rc, err := rrf.RangeRead(ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
log.Warnf("failed to generate strm of obj %s: failed to read range: %v", localPath, err)
|
||||
return
|
||||
}
|
||||
defer rc.Close()
|
||||
file, err := utils.CreateNestedFile(localPath)
|
||||
if err != nil {
|
||||
log.Warnf("failed to generate strm of obj %s: failed to create local file: %v", localPath, err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
if _, err := utils.CopyWithBuffer(file, rc); err != nil {
|
||||
log.Warnf("failed to generate strm of obj %s: copy failed: %v", localPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func deleteExtraFiles(localPath string, objs []model.Obj) {
|
||||
localFiles, err := getLocalFiles(localPath)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to read local files from %s: %v", localPath, err)
|
||||
return
|
||||
}
|
||||
|
||||
objsSet := make(map[string]struct{})
|
||||
for _, obj := range objs {
|
||||
if obj.IsDir() {
|
||||
continue
|
||||
}
|
||||
objsSet[stdpath.Join(localPath, obj.GetName())] = struct{}{}
|
||||
}
|
||||
|
||||
for _, localFile := range localFiles {
|
||||
if _, exists := objsSet[localFile]; !exists {
|
||||
err := os.Remove(localFile)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to delete file: %s, error: %v\n", localFile, err)
|
||||
} else {
|
||||
log.Infof("Deleted file %s", localFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getLocalFiles(localPath string) ([]string, error) {
|
||||
var files []string
|
||||
entries, err := os.ReadDir(localPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
files = append(files, stdpath.Join(localPath, entry.Name()))
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterObjsUpdateHook(UpdateLocalStrm)
|
||||
}
|
||||
@@ -8,10 +8,13 @@ import (
|
||||
type Addition struct {
|
||||
Paths string `json:"paths" required:"true" type:"text"`
|
||||
SiteUrl string `json:"siteUrl" type:"text" required:"false" help:"The prefix URL of the strm file"`
|
||||
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"strm" required:"false" help:"Supports suffix name of strm file"`
|
||||
DownloadFileTypes string `json:"downloadFileTypes" type:"text" default:"ass" required:"false" help:"Files need to download with strm (usally subtitles)"`
|
||||
DownloadFileTypes string `json:"downloadFileTypes" type:"text" default:"ass,srt,vtt,sub,strm" required:"false" help:"Files need to download with strm (usally subtitles)"`
|
||||
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac" required:"false" help:"Supports suffix name of strm file"`
|
||||
EncodePath bool `json:"encodePath" default:"true" required:"true" help:"encode the path in the strm file"`
|
||||
LocalModel bool `json:"localModel" default:"false" help:"enable local mode"`
|
||||
WithoutUrl bool `json:"withoutUrl" default:"false" help:"strm file content without URL prefix"`
|
||||
SaveStrmToLocal bool `json:"SaveStrmToLocal" default:"false" help:"save strm file locally"`
|
||||
SaveStrmLocalPath string `json:"SaveStrmLocalPath" type:"text" help:"save strm file local path"`
|
||||
Version int
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
package strm
|
||||
|
||||
func supportSuffix() map[string]struct{} {
|
||||
return map[string]struct{}{
|
||||
// video
|
||||
"mp4": {},
|
||||
"mkv": {},
|
||||
"flv": {},
|
||||
"avi": {},
|
||||
"wmv": {},
|
||||
"ts": {},
|
||||
"rmvb": {},
|
||||
"webm": {},
|
||||
// audio
|
||||
"mp3": {},
|
||||
"flac": {},
|
||||
"aac": {},
|
||||
"wav": {},
|
||||
"ogg": {},
|
||||
"m4a": {},
|
||||
"wma": {},
|
||||
"alac": {},
|
||||
}
|
||||
}
|
||||
|
||||
func downloadSuffix() map[string]struct{} {
|
||||
return map[string]struct{}{
|
||||
// strm
|
||||
"strm": {},
|
||||
// subtitles
|
||||
"ass": {},
|
||||
"srt": {},
|
||||
"vtt": {},
|
||||
"sub": {},
|
||||
}
|
||||
}
|
||||
@@ -58,7 +58,10 @@ func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.convert2strmObjs(ctx, reqPath, objs), nil
|
||||
}
|
||||
|
||||
func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []model.Obj) []model.Obj {
|
||||
var validObjs []model.Obj
|
||||
for _, obj := range objs {
|
||||
id, name, path := "", obj.GetName(), ""
|
||||
@@ -66,12 +69,12 @@ func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]
|
||||
if !obj.IsDir() {
|
||||
path = stdpath.Join(reqPath, obj.GetName())
|
||||
ext := strings.ToLower(utils.Ext(name))
|
||||
if _, ok := d.supportSuffix[ext]; ok {
|
||||
if _, ok := d.downloadSuffix[ext]; ok {
|
||||
size = obj.GetSize()
|
||||
} else if _, ok := d.supportSuffix[ext]; ok {
|
||||
id = "strm"
|
||||
name = strings.TrimSuffix(name, ext) + "strm"
|
||||
size = int64(len(d.getLink(ctx, path)))
|
||||
} else if _, ok := d.downloadSuffix[ext]; ok {
|
||||
size = obj.GetSize()
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
@@ -84,13 +87,11 @@ func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
}
|
||||
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
if !ok {
|
||||
validObjs = append(validObjs, &objRes)
|
||||
continue
|
||||
}
|
||||
|
||||
validObjs = append(validObjs, &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
@@ -98,7 +99,7 @@ func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]
|
||||
},
|
||||
})
|
||||
}
|
||||
return validObjs, nil
|
||||
return validObjs
|
||||
}
|
||||
|
||||
func (d *Strm) getLink(ctx context.Context, path string) string {
|
||||
@@ -110,7 +111,7 @@ func (d *Strm) getLink(ctx context.Context, path string) string {
|
||||
signPath := sign.Sign(path)
|
||||
finalPath = fmt.Sprintf("%s?sign=%s", finalPath, signPath)
|
||||
}
|
||||
if d.LocalModel {
|
||||
if d.WithoutUrl {
|
||||
return finalPath
|
||||
}
|
||||
apiUrl := d.SiteUrl
|
||||
@@ -119,7 +120,9 @@ func (d *Strm) getLink(ctx context.Context, path string) string {
|
||||
} else {
|
||||
apiUrl = common.GetApiUrl(ctx)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(finalPath, "/") {
|
||||
finalPath = "/" + finalPath
|
||||
}
|
||||
return fmt.Sprintf("%s/d%s",
|
||||
apiUrl,
|
||||
finalPath)
|
||||
|
||||
@@ -88,7 +88,7 @@ func (d *Terabox) request(rurl string, method string, callback base.ReqCallback,
|
||||
return nil, err
|
||||
}
|
||||
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||
if errno == 4000023 || errno == 4500016 {
|
||||
if errno == 4000023 || errno == 450016 {
|
||||
// reget jsToken
|
||||
err = d.resetJsToken()
|
||||
if err != nil {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -68,6 +69,7 @@ func (x *Thunder) Init(ctx context.Context) (err error) {
|
||||
PackageName: "com.xunlei.downloadprovider",
|
||||
UserAgent: "ANDROID-com.xunlei.downloadprovider/8.31.0.9726 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/512000 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)",
|
||||
DownloadUserAgent: "Dalvik/2.1.0 (Linux; U; Android 12; M2004J7AC Build/SP1A.210812.016)",
|
||||
Space: x.Space,
|
||||
refreshCTokenCk: func(token string) {
|
||||
x.CaptchaToken = token
|
||||
op.MustSaveDriverStorage(x)
|
||||
@@ -167,6 +169,7 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) {
|
||||
UserAgent: x.UserAgent,
|
||||
DownloadUserAgent: x.DownloadUserAgent,
|
||||
UseVideoUrl: x.UseVideoUrl,
|
||||
Space: x.Space,
|
||||
|
||||
refreshCTokenCk: func(token string) {
|
||||
x.CaptchaToken = token
|
||||
@@ -281,7 +284,7 @@ func (xc *XunLeiCommon) Link(ctx context.Context, file model.Obj, args model.Lin
|
||||
_, err := xc.Request(FILE_API_URL+"/{fileID}", http.MethodGet, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetPathParam("fileID", file.GetID())
|
||||
//r.SetQueryParam("space", "")
|
||||
r.SetQueryParam("space", xc.Space)
|
||||
}, &lFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -322,6 +325,7 @@ func (xc *XunLeiCommon) MakeDir(ctx context.Context, parentDir model.Obj, dirNam
|
||||
"kind": FOLDER,
|
||||
"name": dirName,
|
||||
"parent_id": parentDir.GetID(),
|
||||
"space": xc.Space,
|
||||
})
|
||||
}, nil)
|
||||
return err
|
||||
@@ -331,8 +335,9 @@ func (xc *XunLeiCommon) Move(ctx context.Context, srcObj, dstDir model.Obj) erro
|
||||
_, err := xc.Request(FILE_API_URL+":batchMove", http.MethodPost, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetBody(&base.Json{
|
||||
"to": base.Json{"parent_id": dstDir.GetID()},
|
||||
"ids": []string{srcObj.GetID()},
|
||||
"to": base.Json{"parent_id": dstDir.GetID()},
|
||||
"ids": []string{srcObj.GetID()},
|
||||
"space": xc.Space,
|
||||
})
|
||||
}, nil)
|
||||
return err
|
||||
@@ -342,7 +347,10 @@ func (xc *XunLeiCommon) Rename(ctx context.Context, srcObj model.Obj, newName st
|
||||
_, err := xc.Request(FILE_API_URL+"/{fileID}", http.MethodPatch, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetPathParam("fileID", srcObj.GetID())
|
||||
r.SetBody(&base.Json{"name": newName})
|
||||
r.SetBody(&base.Json{
|
||||
"name": newName,
|
||||
"space": xc.Space,
|
||||
})
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
@@ -351,8 +359,9 @@ func (xc *XunLeiCommon) Copy(ctx context.Context, srcObj, dstDir model.Obj) erro
|
||||
_, err := xc.Request(FILE_API_URL+":batchCopy", http.MethodPost, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetBody(&base.Json{
|
||||
"to": base.Json{"parent_id": dstDir.GetID()},
|
||||
"ids": []string{srcObj.GetID()},
|
||||
"to": base.Json{"parent_id": dstDir.GetID()},
|
||||
"ids": []string{srcObj.GetID()},
|
||||
"space": xc.Space,
|
||||
})
|
||||
}, nil)
|
||||
return err
|
||||
@@ -362,6 +371,7 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
|
||||
_, err := xc.Request(FILE_API_URL+"/{fileID}/trash", http.MethodPatch, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetPathParam("fileID", obj.GetID())
|
||||
r.SetQueryParam("space", xc.Space)
|
||||
r.SetBody("{}")
|
||||
}, nil)
|
||||
return err
|
||||
@@ -387,6 +397,7 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.Fi
|
||||
"size": file.GetSize(),
|
||||
"hash": gcid,
|
||||
"upload_type": UPLOAD_TYPE_RESUMABLE,
|
||||
"space": xc.Space,
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
@@ -430,7 +441,7 @@ func (xc *XunLeiCommon) getFiles(ctx context.Context, folderId string) ([]model.
|
||||
_, err := xc.Request(FILE_API_URL, http.MethodGet, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(map[string]string{
|
||||
"space": "",
|
||||
"space": xc.Space,
|
||||
"__type": "drive",
|
||||
"refresh": "true",
|
||||
"__sync": "true",
|
||||
@@ -440,6 +451,17 @@ func (xc *XunLeiCommon) getFiles(ctx context.Context, folderId string) ([]model.
|
||||
"limit": "100",
|
||||
"filters": `{"phase":{"eq":"PHASE_TYPE_COMPLETE"},"trashed":{"eq":false}}`,
|
||||
})
|
||||
// 获取硬盘挂载目录等
|
||||
if xc.Space != "" {
|
||||
r.SetQueryParamsFromValues(url.Values{
|
||||
"with": []string{
|
||||
"withCategoryDiskMountPath",
|
||||
"withCategoryDriveCachePath",
|
||||
"withCategoryHistoryDownloadPath",
|
||||
"withReadOnlyFS",
|
||||
},
|
||||
})
|
||||
}
|
||||
}, &fileList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -576,6 +598,7 @@ func (xc *XunLeiCommon) OfflineDownload(ctx context.Context, fileUrl string, par
|
||||
"name": fileName,
|
||||
"parent_id": parentDir.GetID(),
|
||||
"upload_type": UPLOAD_TYPE_URL,
|
||||
"space": xc.Space,
|
||||
"url": base.Json{
|
||||
"url": fileUrl,
|
||||
},
|
||||
@@ -602,6 +625,7 @@ func (xc *XunLeiCommon) OfflineList(ctx context.Context, nextPageToken string) (
|
||||
"type": "offline",
|
||||
"limit": "10000",
|
||||
"page_token": nextPageToken,
|
||||
"space": xc.Space,
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
@@ -618,6 +642,7 @@ func (xc *XunLeiCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string
|
||||
SetQueryParams(map[string]string{
|
||||
"task_ids": strings.Join(taskIDs, ","),
|
||||
"delete_files": strconv.FormatBool(deleteFiles),
|
||||
"space": xc.Space,
|
||||
})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
|
||||
@@ -46,6 +46,8 @@ type ExpertAddition struct {
|
||||
|
||||
//优先使用视频链接代替下载链接
|
||||
UseVideoUrl bool `json:"use_video_url"`
|
||||
|
||||
Space string `json:"space" default:"" help:"device id for remote device"`
|
||||
}
|
||||
|
||||
// 登录特征,用于判断是否重新登录
|
||||
@@ -80,6 +82,8 @@ type Addition struct {
|
||||
CreditKey string `json:"credit_key" help:"credit key,used for login"`
|
||||
// 登录设备ID
|
||||
DeviceID string `json:"device_id" default:""`
|
||||
|
||||
Space string `json:"space" default:"" help:"device id for remote device"`
|
||||
}
|
||||
|
||||
// 登录特征,用于判断是否重新登录
|
||||
@@ -90,7 +94,6 @@ func (i *Addition) GetIdentity() string {
|
||||
var config = driver.Config{
|
||||
Name: "Thunder",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
}
|
||||
|
||||
var configExpert = driver.Config{
|
||||
|
||||
@@ -68,6 +68,7 @@ type Common struct {
|
||||
UserAgent string
|
||||
DownloadUserAgent string
|
||||
UseVideoUrl bool
|
||||
Space string
|
||||
|
||||
// 验证码token刷新成功回调
|
||||
refreshCTokenCk func(token string)
|
||||
|
||||
14
go.mod
14
go.mod
@@ -5,12 +5,14 @@ go 1.23.4
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
|
||||
github.com/KirCute/zip v1.0.1
|
||||
github.com/OpenListTeam/go-cache v0.1.0
|
||||
github.com/OpenListTeam/sftpd-openlist v1.0.1
|
||||
github.com/OpenListTeam/tache v0.2.0
|
||||
github.com/OpenListTeam/tache v0.2.1
|
||||
github.com/OpenListTeam/times v0.1.0
|
||||
github.com/OpenListTeam/wopan-sdk-go v0.1.5
|
||||
github.com/ProtonMail/go-crypto v1.3.0
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.9.0
|
||||
github.com/SheltonZhu/115driver v1.1.1
|
||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
|
||||
github.com/avast/retry-go v3.0.0+incompatible
|
||||
@@ -40,6 +42,7 @@ require (
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0
|
||||
github.com/henrybear327/go-proton-api v1.0.0
|
||||
github.com/ipfs/go-ipfs-api v0.7.0
|
||||
github.com/itsHenry35/gofakes3 v0.0.8
|
||||
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3
|
||||
@@ -55,7 +58,6 @@ require (
|
||||
github.com/pquerna/otp v1.5.0
|
||||
github.com/quic-go/quic-go v0.54.1
|
||||
github.com/rclone/rclone v1.70.3
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
||||
github.com/shirou/gopsutil/v4 v4.25.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.14.0
|
||||
@@ -66,7 +68,6 @@ require (
|
||||
github.com/u2takey/ffmpeg-go v0.5.0
|
||||
github.com/upyun/go-sdk/v3 v3.0.4
|
||||
github.com/winfsp/cgofuse v1.6.0
|
||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
|
||||
github.com/zzzhr1990/go-common-entity v0.0.0-20250202070650-1a200048f0d3
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/image v0.29.0
|
||||
@@ -88,7 +89,6 @@ require (
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.10.3 // indirect
|
||||
github.com/RoaringBitmap/roaring/v2 v2.4.5 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.3 // indirect
|
||||
@@ -101,7 +101,6 @@ require (
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/emersion/go-message v0.18.2 // indirect
|
||||
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
|
||||
github.com/henrybear327/go-proton-api v1.0.0 // indirect
|
||||
github.com/geoffgarside/ber v1.2.0 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||
@@ -116,12 +115,11 @@ require (
|
||||
github.com/minio/xxml v0.0.3 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/otiai10/mint v1.6.3 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/relvacode/iso8601 v1.6.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
|
||||
)
|
||||
|
||||
20
go.sum
20
go.sum
@@ -39,6 +39,10 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Da3zKi7/saferith v0.33.0-fixed h1:fnIWTk7EP9mZAICf7aQjeoAwpfrlCrkOvqmi6CbWdTk=
|
||||
github.com/Da3zKi7/saferith v0.33.0-fixed/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA=
|
||||
github.com/KirCute/zip v1.0.1 h1:L/tVZglOiDVKDi9Ud+fN49htgKdQ3Z0H80iX8OZk13c=
|
||||
github.com/KirCute/zip v1.0.1/go.mod h1:xhF7dCB+Bjvy+5a56lenYCKBsH+gxDNPZSy5Cp+nlXk=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/OpenListTeam/115-sdk-go v0.2.2 h1:JCrGHqQjBX3laOA6Hw4CuBovSg7g+FC5s0LEAYsRciU=
|
||||
@@ -49,8 +53,8 @@ github.com/OpenListTeam/gsync v0.1.0 h1:ywzGybOvA3lW8K1BUjKZ2IUlT2FSlzPO4DOazfYX
|
||||
github.com/OpenListTeam/gsync v0.1.0/go.mod h1:h/Rvv9aX/6CdW/7B8di3xK3xNV8dUg45Fehrd/ksZ9s=
|
||||
github.com/OpenListTeam/sftpd-openlist v1.0.1 h1:j4S3iPFOpnXCUKRPS7uCT4mF2VCl34GyqvH6lqwnkUU=
|
||||
github.com/OpenListTeam/sftpd-openlist v1.0.1/go.mod h1:uO/wKnbvbdq3rBLmClMTZXuCnw7XW4wlAq4dZe91a40=
|
||||
github.com/OpenListTeam/tache v0.2.0 h1:Q4MjuyECn0CZCf1ZF91JaVaZTaps1mOTAm8bFj8sr9Q=
|
||||
github.com/OpenListTeam/tache v0.2.0/go.mod h1:qmnZ/VpY2DUlmjg3UoDeNFy/LRqrw0biN3hYEEGc/+A=
|
||||
github.com/OpenListTeam/tache v0.2.1 h1:Uy/xAr05clHuMrr9+5fXAhv0Z5PGJivp4P5DnRez6cw=
|
||||
github.com/OpenListTeam/tache v0.2.1/go.mod h1:qmnZ/VpY2DUlmjg3UoDeNFy/LRqrw0biN3hYEEGc/+A=
|
||||
github.com/OpenListTeam/times v0.1.0 h1:qknxw+qj5CYKgXAwydA102UEpPcpU8TYNGRmwRyPYpg=
|
||||
github.com/OpenListTeam/times v0.1.0/go.mod h1:Jx7qen5NCYzKk2w14YuvU48YYMcPa1P9a+EJePC15Pc=
|
||||
github.com/OpenListTeam/wopan-sdk-go v0.1.5 h1:iKKcVzIqBgtGDbn0QbdWrCazSGxXFmYFyrnFBG+U8dI=
|
||||
@@ -390,8 +394,6 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006100629-ba7a40dce261 h1:47L8SHM80cXszQydLrpp9MhVkFLLWCvrU9XmJ6XtRu0=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006100629-ba7a40dce261/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499 h1:4ovnBdiGDFi8putQGxhipuuhXItAgh4/YnzufPYkZkQ=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@@ -419,8 +421,6 @@ github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
|
||||
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
|
||||
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@@ -641,8 +641,6 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d h1:hrujxIzL1woJ7AwssoOcM/tq5JjjG2yYOc8odClEiXA=
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
|
||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
|
||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM=
|
||||
@@ -715,8 +713,6 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavM
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
||||
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9 h1:K8gF0eekWPEX+57l30ixxzGhHH/qscI3JCnuhbN6V4M=
|
||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9/go.mod h1:9BnoKCcgJ/+SLhfAXj15352hTOuVmG5Gzo8xNRINfqI=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA=
|
||||
github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
|
||||
@@ -744,6 +740,8 @@ go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5J
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
||||
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
|
||||
@@ -758,8 +756,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
@@ -21,7 +22,7 @@ func (RarDecoder) AcceptedExtensions() []string {
|
||||
|
||||
func (RarDecoder) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
return map[string]tool.MultipartExtension{
|
||||
".part1.rar": {".part%d.rar", 2},
|
||||
".part1.rar": {regexp.MustCompile("^.*\\.part(\\d+)\\.rar$"), 2},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package sevenzip
|
||||
|
||||
import (
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
@@ -18,7 +19,7 @@ func (SevenZip) AcceptedExtensions() []string {
|
||||
|
||||
func (SevenZip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
return map[string]tool.MultipartExtension{
|
||||
".7z.001": {".7z.%.3d", 2},
|
||||
".7z.001": {regexp.MustCompile("^.*\\.7z\\.(\\d+)$"), 2},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,13 +2,14 @@ package tool
|
||||
|
||||
import (
|
||||
"io"
|
||||
"regexp"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
)
|
||||
|
||||
type MultipartExtension struct {
|
||||
PartFileFormat string
|
||||
PartFileFormat *regexp.Regexp
|
||||
SecondPartIndex int
|
||||
}
|
||||
|
||||
|
||||
@@ -4,22 +4,15 @@ import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/fs"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/KirCute/zip"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/saintfish/chardet"
|
||||
"github.com/yeka/zip"
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/charmap"
|
||||
"golang.org/x/text/encoding/japanese"
|
||||
"golang.org/x/text/encoding/korean"
|
||||
"golang.org/x/text/encoding/simplifiedchinese"
|
||||
"golang.org/x/text/encoding/traditionalchinese"
|
||||
"golang.org/x/text/encoding/unicode"
|
||||
"golang.org/x/text/encoding/unicode/utf32"
|
||||
"golang.org/x/text/encoding/ianaindex"
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
@@ -37,10 +30,11 @@ func (r *WrapReader) Files() []tool.SubFile {
|
||||
|
||||
type WrapFileInfo struct {
|
||||
fs.FileInfo
|
||||
efs bool
|
||||
}
|
||||
|
||||
func (f *WrapFileInfo) Name() string {
|
||||
return decodeName(f.FileInfo.Name())
|
||||
return decodeName(f.FileInfo.Name(), f.efs)
|
||||
}
|
||||
|
||||
type WrapFile struct {
|
||||
@@ -48,11 +42,11 @@ type WrapFile struct {
|
||||
}
|
||||
|
||||
func (f *WrapFile) Name() string {
|
||||
return decodeName(f.f.Name)
|
||||
return decodeName(f.f.Name, isEFS(f.f.Flags))
|
||||
}
|
||||
|
||||
func (f *WrapFile) FileInfo() fs.FileInfo {
|
||||
return &WrapFileInfo{FileInfo: f.f.FileInfo()}
|
||||
return &WrapFileInfo{FileInfo: f.f.FileInfo(), efs: isEFS(f.f.Flags)}
|
||||
}
|
||||
|
||||
func (f *WrapFile) Open() (io.ReadCloser, error) {
|
||||
@@ -67,16 +61,33 @@ func (f *WrapFile) SetPassword(password string) {
|
||||
f.f.SetPassword(password)
|
||||
}
|
||||
|
||||
func getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
|
||||
if len(ss) > 1 && stdpath.Ext(ss[1].GetName()) == ".z01" {
|
||||
// FIXME: Incorrect parsing method for standard multipart zip format
|
||||
ss = append(ss[1:], ss[0])
|
||||
}
|
||||
reader, err := stream.NewMultiReaderAt(ss)
|
||||
func makePart(ss *stream.SeekableStream) (zip.SizeReaderAt, error) {
|
||||
ra, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return zip.NewReader(reader, reader.Size())
|
||||
return &inlineSizeReaderAt{ReaderAt: ra, size: ss.GetSize()}, nil
|
||||
}
|
||||
|
||||
func (z *Zip) getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
|
||||
if len(ss) > 1 && z.traditionalSecondPartRegExp.MatchString(ss[1].GetName()) {
|
||||
ss = append(ss[1:], ss[0])
|
||||
ras := make([]zip.SizeReaderAt, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
ra, err := makePart(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ras = append(ras, ra)
|
||||
}
|
||||
return zip.NewMultipartReader(ras)
|
||||
} else {
|
||||
reader, err := stream.NewMultiReaderAt(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return zip.NewReader(reader, reader.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func filterPassword(err error) error {
|
||||
@@ -86,110 +97,29 @@ func filterPassword(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func decodeName(name string) string {
|
||||
b := []byte(name)
|
||||
detector := chardet.NewTextDetector()
|
||||
results, err := detector.DetectAll(b)
|
||||
func decodeName(name string, efs bool) string {
|
||||
if efs {
|
||||
return name
|
||||
}
|
||||
enc, err := ianaindex.IANA.Encoding(setting.GetStr(conf.NonEFSZipEncoding))
|
||||
if err != nil {
|
||||
return name
|
||||
}
|
||||
var ce, re, enc encoding.Encoding
|
||||
for _, r := range results {
|
||||
if r.Confidence > 30 {
|
||||
ce = getCommonEncoding(r.Charset)
|
||||
if ce != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if re == nil {
|
||||
re = getEncoding(r.Charset)
|
||||
}
|
||||
}
|
||||
if ce != nil {
|
||||
enc = ce
|
||||
} else if re != nil {
|
||||
enc = re
|
||||
} else {
|
||||
return name
|
||||
}
|
||||
i := bytes.NewReader(b)
|
||||
i := bytes.NewReader([]byte(name))
|
||||
decoder := transform.NewReader(i, enc.NewDecoder())
|
||||
content, _ := io.ReadAll(decoder)
|
||||
return string(content)
|
||||
}
|
||||
|
||||
func getCommonEncoding(name string) (enc encoding.Encoding) {
|
||||
switch name {
|
||||
case "UTF-8":
|
||||
enc = unicode.UTF8
|
||||
case "UTF-16LE":
|
||||
enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
|
||||
case "Shift_JIS":
|
||||
enc = japanese.ShiftJIS
|
||||
case "GB-18030":
|
||||
enc = simplifiedchinese.GB18030
|
||||
case "EUC-KR":
|
||||
enc = korean.EUCKR
|
||||
case "Big5":
|
||||
enc = traditionalchinese.Big5
|
||||
default:
|
||||
enc = nil
|
||||
}
|
||||
return
|
||||
func isEFS(flags uint16) bool {
|
||||
return (flags & 0x800) > 0
|
||||
}
|
||||
|
||||
func getEncoding(name string) (enc encoding.Encoding) {
|
||||
switch name {
|
||||
case "UTF-8":
|
||||
enc = unicode.UTF8
|
||||
case "UTF-16BE":
|
||||
enc = unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM)
|
||||
case "UTF-16LE":
|
||||
enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
|
||||
case "UTF-32BE":
|
||||
enc = utf32.UTF32(utf32.BigEndian, utf32.IgnoreBOM)
|
||||
case "UTF-32LE":
|
||||
enc = utf32.UTF32(utf32.LittleEndian, utf32.IgnoreBOM)
|
||||
case "ISO-8859-1":
|
||||
enc = charmap.ISO8859_1
|
||||
case "ISO-8859-2":
|
||||
enc = charmap.ISO8859_2
|
||||
case "ISO-8859-3":
|
||||
enc = charmap.ISO8859_3
|
||||
case "ISO-8859-4":
|
||||
enc = charmap.ISO8859_4
|
||||
case "ISO-8859-5":
|
||||
enc = charmap.ISO8859_5
|
||||
case "ISO-8859-6":
|
||||
enc = charmap.ISO8859_6
|
||||
case "ISO-8859-7":
|
||||
enc = charmap.ISO8859_7
|
||||
case "ISO-8859-8":
|
||||
enc = charmap.ISO8859_8
|
||||
case "ISO-8859-8-I":
|
||||
enc = charmap.ISO8859_8I
|
||||
case "ISO-8859-9":
|
||||
enc = charmap.ISO8859_9
|
||||
case "windows-1251":
|
||||
enc = charmap.Windows1251
|
||||
case "windows-1256":
|
||||
enc = charmap.Windows1256
|
||||
case "KOI8-R":
|
||||
enc = charmap.KOI8R
|
||||
case "Shift_JIS":
|
||||
enc = japanese.ShiftJIS
|
||||
case "GB-18030":
|
||||
enc = simplifiedchinese.GB18030
|
||||
case "EUC-JP":
|
||||
enc = japanese.EUCJP
|
||||
case "EUC-KR":
|
||||
enc = korean.EUCKR
|
||||
case "Big5":
|
||||
enc = traditionalchinese.Big5
|
||||
case "ISO-2022-JP":
|
||||
enc = japanese.ISO2022JP
|
||||
default:
|
||||
enc = nil
|
||||
}
|
||||
return
|
||||
type inlineSizeReaderAt struct {
|
||||
io.ReaderAt
|
||||
size int64
|
||||
}
|
||||
|
||||
func (i *inlineSizeReaderAt) Size() int64 {
|
||||
return i.size
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package zip
|
||||
import (
|
||||
"io"
|
||||
stdpath "path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
@@ -12,34 +13,39 @@ import (
|
||||
)
|
||||
|
||||
type Zip struct {
|
||||
traditionalSecondPartRegExp *regexp.Regexp
|
||||
}
|
||||
|
||||
func (Zip) AcceptedExtensions() []string {
|
||||
func (z *Zip) AcceptedExtensions() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
func (z *Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
return map[string]tool.MultipartExtension{
|
||||
".zip": {".z%.2d", 1},
|
||||
".zip.001": {".zip.%.3d", 2},
|
||||
".zip": {regexp.MustCompile("^.*\\.z(\\d+)$"), 1},
|
||||
".zip.001": {regexp.MustCompile("^.*\\.zip\\.(\\d+)$"), 2},
|
||||
}
|
||||
}
|
||||
|
||||
func (Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
zipReader, err := getReader(ss)
|
||||
func (z *Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
zipReader, err := z.getReader(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
efs := true
|
||||
if len(zipReader.File) > 0 {
|
||||
efs = isEFS(zipReader.File[0].Flags)
|
||||
}
|
||||
encrypted, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: zipReader})
|
||||
return &model.ArchiveMetaInfo{
|
||||
Comment: zipReader.Comment,
|
||||
Comment: decodeName(zipReader.Comment, efs),
|
||||
Encrypted: encrypted,
|
||||
Tree: tree,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
zipReader, err := getReader(ss)
|
||||
func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
zipReader, err := z.getReader(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -57,7 +63,7 @@ func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]mod
|
||||
_ = rc.Close()
|
||||
passVerified = true
|
||||
}
|
||||
name := strings.TrimSuffix(decodeName(file.Name), "/")
|
||||
name := strings.TrimSuffix(decodeName(file.Name, isEFS(file.Flags)), "/")
|
||||
if strings.Contains(name, "/") {
|
||||
// 有些压缩包不压缩第一个文件夹
|
||||
strs := strings.Split(name, "/")
|
||||
@@ -70,7 +76,7 @@ func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]mod
|
||||
}
|
||||
continue
|
||||
}
|
||||
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo()}))
|
||||
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo(), efs: isEFS(file.Flags)}))
|
||||
}
|
||||
if len(ret) == 0 && dir != nil {
|
||||
ret = append(ret, dir)
|
||||
@@ -81,13 +87,13 @@ func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]mod
|
||||
ret := make([]model.Obj, 0)
|
||||
exist := false
|
||||
for _, file := range zipReader.File {
|
||||
name := decodeName(file.Name)
|
||||
name := decodeName(file.Name, isEFS(file.Flags))
|
||||
dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/"
|
||||
if dir != innerPath {
|
||||
continue
|
||||
}
|
||||
exist = true
|
||||
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo()}))
|
||||
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo(), isEFS(file.Flags)}))
|
||||
}
|
||||
if !exist {
|
||||
return nil, errs.ObjectNotFound
|
||||
@@ -96,14 +102,14 @@ func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]mod
|
||||
}
|
||||
}
|
||||
|
||||
func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
zipReader, err := getReader(ss)
|
||||
func (z *Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
zipReader, err := z.getReader(ss)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
for _, file := range zipReader.File {
|
||||
if decodeName(file.Name) == innerPath {
|
||||
if decodeName(file.Name, isEFS(file.Flags)) == innerPath {
|
||||
if file.IsEncrypted() {
|
||||
file.SetPassword(args.Password)
|
||||
}
|
||||
@@ -117,8 +123,8 @@ func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io
|
||||
return nil, 0, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
zipReader, err := getReader(ss)
|
||||
func (z *Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
zipReader, err := z.getReader(ss)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -128,5 +134,7 @@ func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model
|
||||
var _ tool.Tool = (*Zip)(nil)
|
||||
|
||||
func init() {
|
||||
tool.RegisterTool(Zip{})
|
||||
tool.RegisterTool(&Zip{
|
||||
traditionalSecondPartRegExp: regexp.MustCompile("^.*\\.z0*1$"),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -39,7 +39,21 @@ func InitConfig() {
|
||||
if !filepath.IsAbs(dataDir) {
|
||||
flags.DataDir = filepath.Join(pwd, flags.DataDir)
|
||||
}
|
||||
configPath := filepath.Join(flags.DataDir, "config.json")
|
||||
// Determine config file path: use flags.ConfigPath if provided, otherwise default to <dataDir>/config.json
|
||||
configPath := flags.ConfigPath
|
||||
if configPath == "" {
|
||||
configPath = filepath.Join(flags.DataDir, "config.json")
|
||||
} else {
|
||||
// if relative, resolve relative to working directory
|
||||
if !filepath.IsAbs(configPath) {
|
||||
if absPath, err := filepath.Abs(configPath); err == nil {
|
||||
configPath = absPath
|
||||
} else {
|
||||
configPath = filepath.Join(pwd, configPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
configPath = filepath.Clean(configPath)
|
||||
log.Infof("reading config file: %s", configPath)
|
||||
if !utils.Exists(configPath) {
|
||||
log.Infof("config file not exists, creating default config file")
|
||||
@@ -126,6 +140,10 @@ func InitConfig() {
|
||||
log.Fatalf("create temp dir error: %+v", err)
|
||||
}
|
||||
log.Debugf("config: %+v", conf.Conf)
|
||||
|
||||
// Validate and display proxy configuration status
|
||||
validateProxyConfig()
|
||||
|
||||
base.InitClient()
|
||||
initURL()
|
||||
}
|
||||
@@ -165,3 +183,14 @@ func CleanTempDir() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// validateProxyConfig validates proxy configuration and displays status at startup
|
||||
func validateProxyConfig() {
|
||||
if conf.Conf.ProxyAddress != "" {
|
||||
if _, err := url.Parse(conf.Conf.ProxyAddress); err == nil {
|
||||
log.Infof("Proxy enabled: %s", conf.Conf.ProxyAddress)
|
||||
} else {
|
||||
log.Errorf("Invalid proxy address format: %s, error: %v", conf.Conf.ProxyAddress, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,6 +154,7 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: conf.SharePreviewArchivesByDefault, Value: "false", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.ReadMeAutoRender, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.FilterReadMeScripts, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.NonEFSZipEncoding, Value: "IBM437", Type: conf.TypeString, Group: model.PREVIEW},
|
||||
// global settings
|
||||
{Key: conf.HideFiles, Value: "/\\/README.md/i", Type: conf.TypeText, Group: model.GLOBAL},
|
||||
{Key: "package_download", Value: "true", Type: conf.TypeBool, Group: model.GLOBAL},
|
||||
|
||||
@@ -131,6 +131,7 @@ type Config struct {
|
||||
FTP FTP `json:"ftp" envPrefix:"FTP_"`
|
||||
SFTP SFTP `json:"sftp" envPrefix:"SFTP_"`
|
||||
LastLaunchedVersion string `json:"last_launched_version"`
|
||||
ProxyAddress string `json:"proxy_address" env:"PROXY_ADDRESS"`
|
||||
}
|
||||
|
||||
func DefaultConfig(dataDir string) *Config {
|
||||
@@ -244,5 +245,6 @@ func DefaultConfig(dataDir string) *Config {
|
||||
Listen: ":5222",
|
||||
},
|
||||
LastLaunchedVersion: "",
|
||||
ProxyAddress: "",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ const (
|
||||
SharePreviewArchivesByDefault = "share_preview_archives_by_default"
|
||||
ReadMeAutoRender = "readme_autorender"
|
||||
FilterReadMeScripts = "filter_readme_scripts"
|
||||
NonEFSZipEncoding = "non_efs_zip_encoding"
|
||||
|
||||
// global
|
||||
HideFiles = "hide_files"
|
||||
|
||||
@@ -38,18 +38,26 @@ func GetSharingsByCreatorId(creator uint, pageIndex, pageSize int) (sharings []m
|
||||
}
|
||||
|
||||
func CreateSharing(s *model.SharingDB) (string, error) {
|
||||
id := random.String(8)
|
||||
for len(id) < 12 {
|
||||
old := model.SharingDB{
|
||||
ID: id,
|
||||
if s.ID == "" {
|
||||
id := random.String(8)
|
||||
for len(id) < 12 {
|
||||
old := model.SharingDB{
|
||||
ID: id,
|
||||
}
|
||||
if err := db.Where(old).First(&old).Error; err != nil {
|
||||
s.ID = id
|
||||
return id, errors.WithStack(db.Create(s).Error)
|
||||
}
|
||||
id += random.String(1)
|
||||
}
|
||||
if err := db.Where(old).First(&old).Error; err != nil {
|
||||
s.ID = id
|
||||
return id, errors.WithStack(db.Create(s).Error)
|
||||
return "", errors.New("failed find valid id")
|
||||
} else {
|
||||
query := model.SharingDB{ID: s.ID}
|
||||
if err := db.Where(query).First(&query).Error; err == nil {
|
||||
return "", errors.New("sharing already exist")
|
||||
}
|
||||
id += random.String(1)
|
||||
return s.ID, errors.WithStack(db.Create(s).Error)
|
||||
}
|
||||
return "", errors.New("failed find valid id")
|
||||
}
|
||||
|
||||
func UpdateSharing(s *model.SharingDB) error {
|
||||
|
||||
@@ -218,3 +218,12 @@ type LinkCacheModeResolver interface {
|
||||
// ResolveLinkCacheMode returns the LinkCacheMode for the given path.
|
||||
ResolveLinkCacheMode(path string) LinkCacheMode
|
||||
}
|
||||
|
||||
type DirectUploader interface {
|
||||
// GetDirectUploadTools returns available frontend-direct upload tools
|
||||
GetDirectUploadTools() []string
|
||||
// GetDirectUploadInfo returns the information needed for direct upload from client to storage
|
||||
// actualPath is the path relative to the storage root (after removing mount path prefix)
|
||||
// return errs.NotImplement if the driver does not support the given direct upload tool
|
||||
GetDirectUploadInfo(ctx context.Context, tool string, dstDir model.Obj, fileName string, fileSize int64) (any, error)
|
||||
}
|
||||
|
||||
@@ -7,9 +7,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ObjectNotFound = errors.New("object not found")
|
||||
NotFolder = errors.New("not a folder")
|
||||
NotFile = errors.New("not a file")
|
||||
ObjectNotFound = errors.New("object not found")
|
||||
ObjectAlreadyExists = errors.New("object already exists")
|
||||
NotFolder = errors.New("not a folder")
|
||||
NotFile = errors.New("not a file")
|
||||
)
|
||||
|
||||
func IsObjectNotFound(err error) bool {
|
||||
|
||||
@@ -167,6 +167,14 @@ func GetStorage(path string, args *GetStoragesArgs) (driver.Driver, error) {
|
||||
return storageDriver, nil
|
||||
}
|
||||
|
||||
func GetStorageAndActualPath(path string) (driver.Driver, string, error) {
|
||||
return op.GetStorageAndActualPath(path)
|
||||
}
|
||||
|
||||
func GetByActualPath(ctx context.Context, storage driver.Driver, actualPath string) (model.Obj, error) {
|
||||
return op.Get(ctx, storage, actualPath)
|
||||
}
|
||||
|
||||
func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
|
||||
res, err := other(ctx, args)
|
||||
if err != nil {
|
||||
@@ -190,3 +198,11 @@ func PutURL(ctx context.Context, path, dstName, urlStr string) error {
|
||||
}
|
||||
return op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr)
|
||||
}
|
||||
|
||||
func GetDirectUploadInfo(ctx context.Context, tool, path, dstName string, fileSize int64) (any, error) {
|
||||
info, err := getDirectUploadInfo(ctx, tool, path, dstName, fileSize)
|
||||
if err != nil {
|
||||
log.Errorf("failed get %s direct upload info for %s(%d bytes): %+v", path, dstName, fileSize, err)
|
||||
}
|
||||
return info, err
|
||||
}
|
||||
|
||||
@@ -105,3 +105,11 @@ func putDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer
|
||||
}
|
||||
return op.Put(ctx, storage, dstDirActualPath, file, nil, lazyCache...)
|
||||
}
|
||||
|
||||
func getDirectUploadInfo(ctx context.Context, tool, dstDirPath, dstName string, fileSize int64) (any, error) {
|
||||
storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get storage")
|
||||
}
|
||||
return op.GetDirectUploadInfo(ctx, tool, storage, dstDirActualPath, dstName, fileSize)
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ type Link struct {
|
||||
//for accelerating request, use multi-thread downloading
|
||||
Concurrency int `json:"concurrency"`
|
||||
PartSize int `json:"part_size"`
|
||||
ContentLength int64 `json:"-"` // 转码视频、缩略图
|
||||
ContentLength int64 `json:"content_length"` // 转码视频、缩略图
|
||||
|
||||
utils.SyncClosers `json:"-"`
|
||||
// 如果SyncClosers中的资源被关闭后Link将不可用,则此值应为 true
|
||||
|
||||
8
internal/model/direct_upload.go
Normal file
8
internal/model/direct_upload.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package model
|
||||
|
||||
type HttpDirectUploadInfo struct {
|
||||
UploadURL string `json:"upload_url"` // The URL to upload the file
|
||||
ChunkSize int64 `json:"chunk_size"` // The chunk size for uploading, 0 means no chunking required
|
||||
Headers map[string]string `json:"headers,omitempty"` // Optional headers to include in the upload request
|
||||
Method string `json:"method,omitempty"` // HTTP method, default is PUT
|
||||
}
|
||||
@@ -27,6 +27,9 @@ func (f *FileCloser) Close() error {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// FileRangeReader 是对 RangeReaderIF 的轻量包装,表明由 RangeReaderIF.RangeRead
|
||||
// 返回的 io.ReadCloser 同时实现了 model.File(即支持 Read/ReadAt/Seek)。
|
||||
// 只有满足这些才需要使用 FileRangeReader,否则直接使用 RangeReaderIF 即可。
|
||||
type FileRangeReader struct {
|
||||
RangeReaderIF
|
||||
}
|
||||
|
||||
@@ -48,7 +48,6 @@ type FileStreamer interface {
|
||||
// for a non-seekable Stream, if Read is called, this function won't work.
|
||||
// caches the full Stream and writes it to writer (if provided, even if the stream is already cached).
|
||||
CacheFullAndWriter(up *UpdateProgress, writer io.Writer) (File, error)
|
||||
SetTmpFile(file File)
|
||||
// if the Stream is not a File and is not cached, returns nil.
|
||||
GetFile() File
|
||||
}
|
||||
|
||||
@@ -283,11 +283,15 @@ func HttpClient() *http.Client {
|
||||
}
|
||||
|
||||
func NewHttpClient() *http.Client {
|
||||
transport := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
|
||||
}
|
||||
|
||||
SetProxyIfConfigured(transport)
|
||||
|
||||
return &http.Client{
|
||||
Timeout: time.Hour * 48,
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
|
||||
},
|
||||
Timeout: time.Hour * 48,
|
||||
Transport: transport,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
package net
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -305,39 +307,9 @@ func rangesMIMESize(ranges []http_range.Range, contentType string, contentSize i
|
||||
return encSize, nil
|
||||
}
|
||||
|
||||
// LimitedReadCloser wraps a io.ReadCloser and limits the number of bytes that can be read from it.
|
||||
type LimitedReadCloser struct {
|
||||
rc io.ReadCloser
|
||||
remaining int
|
||||
}
|
||||
|
||||
func (l *LimitedReadCloser) Read(buf []byte) (int, error) {
|
||||
if l.remaining <= 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if len(buf) > l.remaining {
|
||||
buf = buf[0:l.remaining]
|
||||
}
|
||||
|
||||
n, err := l.rc.Read(buf)
|
||||
l.remaining -= n
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (l *LimitedReadCloser) Close() error {
|
||||
return l.rc.Close()
|
||||
}
|
||||
|
||||
// GetRangedHttpReader some http server doesn't support "Range" header,
|
||||
// so this function read readCloser with whole data, skip offset, then return ReaderCloser.
|
||||
func GetRangedHttpReader(readCloser io.ReadCloser, offset, length int64) (io.ReadCloser, error) {
|
||||
var length_int int
|
||||
if length > math.MaxInt {
|
||||
return nil, fmt.Errorf("doesnot support length bigger than int32 max ")
|
||||
}
|
||||
length_int = int(length)
|
||||
|
||||
if offset > 100*1024*1024 {
|
||||
log.Warnf("offset is more than 100MB, if loading data from internet, high-latency and wasting of bandwidth is expected")
|
||||
@@ -348,5 +320,25 @@ func GetRangedHttpReader(readCloser io.ReadCloser, offset, length int64) (io.Rea
|
||||
}
|
||||
|
||||
// return an io.ReadCloser that is limited to `length` bytes.
|
||||
return &LimitedReadCloser{readCloser, length_int}, nil
|
||||
return readers.NewLimitedReadCloser(readCloser, length), nil
|
||||
}
|
||||
|
||||
// SetProxyIfConfigured sets proxy for HTTP Transport if configured
|
||||
func SetProxyIfConfigured(transport *http.Transport) {
|
||||
// If proxy address is configured, override environment variable settings
|
||||
if conf.Conf.ProxyAddress != "" {
|
||||
if proxyURL, err := url.Parse(conf.Conf.ProxyAddress); err == nil {
|
||||
transport.Proxy = http.ProxyURL(proxyURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetRestyProxyIfConfigured sets proxy for Resty client if configured
|
||||
func SetRestyProxyIfConfigured(client *resty.Client) {
|
||||
// If proxy address is configured, override environment variable settings
|
||||
if conf.Conf.ProxyAddress != "" {
|
||||
if proxyURL, err := url.Parse(conf.Conf.ProxyAddress); err == nil {
|
||||
client.SetProxy(proxyURL.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,19 +3,18 @@ package op
|
||||
import (
|
||||
"context"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/cache"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
gocache "github.com/OpenListTeam/go-cache"
|
||||
@@ -61,20 +60,25 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path)
|
||||
}
|
||||
baseName, ext, found := strings.Cut(obj.GetName(), ".")
|
||||
if !found {
|
||||
_ = l.Close()
|
||||
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
|
||||
}
|
||||
partExt, t, err := tool.GetArchiveTool("." + ext)
|
||||
if err != nil {
|
||||
var e error
|
||||
partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName()))
|
||||
if e != nil {
|
||||
|
||||
// Get archive tool
|
||||
var partExt *tool.MultipartExtension
|
||||
var t tool.Tool
|
||||
ext := obj.GetName()
|
||||
for {
|
||||
var found bool
|
||||
_, ext, found = strings.Cut(ext, ".")
|
||||
if !found {
|
||||
_ = l.Close()
|
||||
return nil, nil, nil, errors.WithMessagef(stderrors.Join(err, e), "failed get archive tool: %s", ext)
|
||||
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
|
||||
}
|
||||
partExt, t, err = tool.GetArchiveTool("." + ext)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Get first part stream
|
||||
ss, err := stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: obj}, l)
|
||||
if err != nil {
|
||||
_ = l.Close()
|
||||
@@ -83,29 +87,62 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
|
||||
ret := []*stream.SeekableStream{ss}
|
||||
if partExt == nil {
|
||||
return obj, t, ret, nil
|
||||
} else {
|
||||
index := partExt.SecondPartIndex
|
||||
dir := stdpath.Dir(path)
|
||||
for {
|
||||
p := stdpath.Join(dir, baseName+fmt.Sprintf(partExt.PartFileFormat, index))
|
||||
var o model.Obj
|
||||
l, o, err = Link(ctx, storage, p, args)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
ss, err = stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: o}, l)
|
||||
if err != nil {
|
||||
_ = l.Close()
|
||||
for _, s := range ret {
|
||||
_ = s.Close()
|
||||
}
|
||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
|
||||
}
|
||||
ret = append(ret, ss)
|
||||
index++
|
||||
}
|
||||
}
|
||||
|
||||
// Merge multi-part archive
|
||||
dir := stdpath.Dir(path)
|
||||
objs, err := List(ctx, storage, dir, model.ListArgs{})
|
||||
if err != nil {
|
||||
return obj, t, ret, nil
|
||||
}
|
||||
for _, o := range objs {
|
||||
submatch := partExt.PartFileFormat.FindStringSubmatch(o.GetName())
|
||||
if submatch == nil {
|
||||
continue
|
||||
}
|
||||
partIdx, e := strconv.Atoi(submatch[1])
|
||||
if e != nil {
|
||||
continue
|
||||
}
|
||||
partIdx = partIdx - partExt.SecondPartIndex + 1
|
||||
if partIdx < 1 {
|
||||
continue
|
||||
}
|
||||
p := stdpath.Join(dir, o.GetName())
|
||||
l1, o1, e := Link(ctx, storage, p, args)
|
||||
if e != nil {
|
||||
err = errors.WithMessagef(e, "failed get [%s] link", p)
|
||||
break
|
||||
}
|
||||
ss1, e := stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: o1}, l1)
|
||||
if e != nil {
|
||||
_ = l1.Close()
|
||||
err = errors.WithMessagef(e, "failed get [%s] stream", p)
|
||||
break
|
||||
}
|
||||
for partIdx >= len(ret) {
|
||||
ret = append(ret, nil)
|
||||
}
|
||||
ret[partIdx] = ss1
|
||||
}
|
||||
closeAll := func(r []*stream.SeekableStream) {
|
||||
for _, s := range r {
|
||||
if s != nil {
|
||||
_ = s.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
closeAll(ret)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
for i, ss1 := range ret {
|
||||
if ss1 == nil {
|
||||
closeAll(ret)
|
||||
return nil, nil, nil, errors.Errorf("failed merge [%s] parts, missing part %d", path, i)
|
||||
}
|
||||
}
|
||||
return obj, t, ret, nil
|
||||
}
|
||||
|
||||
func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) {
|
||||
|
||||
@@ -57,7 +57,7 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
|
||||
model.WrapObjsName(files)
|
||||
// call hooks
|
||||
go func(reqPath string, files []model.Obj) {
|
||||
HandleObjsUpdateHook(reqPath, files)
|
||||
HandleObjsUpdateHook(context.WithoutCancel(ctx), reqPath, files)
|
||||
}(utils.GetFullPath(storage.GetStorage().MountPath, path), files)
|
||||
|
||||
// sort objs
|
||||
@@ -568,15 +568,15 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
|
||||
dstPath := stdpath.Join(dstDirPath, dstName)
|
||||
_, err := GetUnwrap(ctx, storage, dstPath)
|
||||
if err == nil {
|
||||
return errors.New("obj already exists")
|
||||
return errors.WithStack(errs.ObjectAlreadyExists)
|
||||
}
|
||||
err = MakeDir(ctx, storage, dstDirPath)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed to put url")
|
||||
return errors.WithMessagef(err, "failed to make dir [%s]", dstDirPath)
|
||||
}
|
||||
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed to put url")
|
||||
return errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath)
|
||||
}
|
||||
switch s := storage.(type) {
|
||||
case driver.PutURLResult:
|
||||
@@ -599,8 +599,48 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
|
||||
}
|
||||
}
|
||||
default:
|
||||
return errs.NotImplement
|
||||
return errors.WithStack(errs.NotImplement)
|
||||
}
|
||||
log.Debugf("put url [%s](%s) done", dstName, url)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
func GetDirectUploadTools(storage driver.Driver) []string {
|
||||
du, ok := storage.(driver.DirectUploader)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil
|
||||
}
|
||||
return du.GetDirectUploadTools()
|
||||
}
|
||||
|
||||
func GetDirectUploadInfo(ctx context.Context, tool string, storage driver.Driver, dstDirPath, dstName string, fileSize int64) (any, error) {
|
||||
du, ok := storage.(driver.DirectUploader)
|
||||
if !ok {
|
||||
return nil, errors.WithStack(errs.NotImplement)
|
||||
}
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||
dstPath := stdpath.Join(dstDirPath, dstName)
|
||||
_, err := GetUnwrap(ctx, storage, dstPath)
|
||||
if err == nil {
|
||||
return nil, errors.WithStack(errs.ObjectAlreadyExists)
|
||||
}
|
||||
err = MakeDir(ctx, storage, dstDirPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed to make dir [%s]", dstDirPath)
|
||||
}
|
||||
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath)
|
||||
}
|
||||
info, err := du.GetDirectUploadInfo(ctx, tool, dstDir, dstName, fileSize)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package op
|
||||
|
||||
import (
|
||||
"context"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
@@ -13,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
// Obj
|
||||
type ObjsUpdateHook = func(parent string, objs []model.Obj)
|
||||
type ObjsUpdateHook = func(ctx context.Context, parent string, objs []model.Obj)
|
||||
|
||||
var (
|
||||
objsUpdateHooks = make([]ObjsUpdateHook, 0)
|
||||
@@ -23,9 +24,9 @@ func RegisterObjsUpdateHook(hook ObjsUpdateHook) {
|
||||
objsUpdateHooks = append(objsUpdateHooks, hook)
|
||||
}
|
||||
|
||||
func HandleObjsUpdateHook(parent string, objs []model.Obj) {
|
||||
func HandleObjsUpdateHook(ctx context.Context, parent string, objs []model.Obj) {
|
||||
for _, hook := range objsUpdateHooks {
|
||||
hook(parent, objs)
|
||||
hook(ctx, parent, objs)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -199,14 +199,13 @@ func Config(ctx context.Context) searcher.Config {
|
||||
return instance.Config()
|
||||
}
|
||||
|
||||
func Update(parent string, objs []model.Obj) {
|
||||
func Update(ctx context.Context, parent string, objs []model.Obj) {
|
||||
if instance == nil || !instance.Config().AutoUpdate || !setting.GetBool(conf.AutoUpdateIndex) || Running() {
|
||||
return
|
||||
}
|
||||
if isIgnorePath(parent) {
|
||||
return
|
||||
}
|
||||
ctx := context.Background()
|
||||
// only update when index have built
|
||||
progress, err := Progress()
|
||||
if err != nil {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
@@ -42,17 +41,14 @@ type RateLimitReader struct {
|
||||
}
|
||||
|
||||
func (r *RateLimitReader) Read(p []byte) (n int, err error) {
|
||||
if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
|
||||
return 0, r.Ctx.Err()
|
||||
if err = r.Ctx.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = r.Reader.Read(p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if r.Limiter != nil {
|
||||
if r.Ctx == nil {
|
||||
r.Ctx = context.Background()
|
||||
}
|
||||
err = r.Limiter.WaitN(r.Ctx, n)
|
||||
}
|
||||
return
|
||||
@@ -72,17 +68,14 @@ type RateLimitWriter struct {
|
||||
}
|
||||
|
||||
func (w *RateLimitWriter) Write(p []byte) (n int, err error) {
|
||||
if w.Ctx != nil && utils.IsCanceled(w.Ctx) {
|
||||
return 0, w.Ctx.Err()
|
||||
if err = w.Ctx.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = w.Writer.Write(p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if w.Limiter != nil {
|
||||
if w.Ctx == nil {
|
||||
w.Ctx = context.Background()
|
||||
}
|
||||
err = w.Limiter.WaitN(w.Ctx, n)
|
||||
}
|
||||
return
|
||||
@@ -102,34 +95,28 @@ type RateLimitFile struct {
|
||||
}
|
||||
|
||||
func (r *RateLimitFile) Read(p []byte) (n int, err error) {
|
||||
if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
|
||||
return 0, r.Ctx.Err()
|
||||
if err = r.Ctx.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = r.File.Read(p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if r.Limiter != nil {
|
||||
if r.Ctx == nil {
|
||||
r.Ctx = context.Background()
|
||||
}
|
||||
err = r.Limiter.WaitN(r.Ctx, n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *RateLimitFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
|
||||
return 0, r.Ctx.Err()
|
||||
if err = r.Ctx.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = r.File.ReadAt(p, off)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if r.Limiter != nil {
|
||||
if r.Ctx == nil {
|
||||
r.Ctx = context.Background()
|
||||
}
|
||||
err = r.Limiter.WaitN(r.Ctx, n)
|
||||
}
|
||||
return
|
||||
@@ -145,16 +132,16 @@ func (r *RateLimitFile) Close() error {
|
||||
type RateLimitRangeReaderFunc RangeReaderFunc
|
||||
|
||||
func (f RateLimitRangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
if ServerDownloadLimit == nil {
|
||||
return f(ctx, httpRange)
|
||||
}
|
||||
rc, err := f(ctx, httpRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ServerDownloadLimit != nil {
|
||||
rc = &RateLimitReader{
|
||||
Ctx: ctx,
|
||||
Reader: rc,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}
|
||||
}
|
||||
return rc, nil
|
||||
return &RateLimitReader{
|
||||
Ctx: ctx,
|
||||
Reader: rc,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/buffer"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
@@ -28,10 +27,8 @@ type FileStream struct {
|
||||
ForceStreamUpload bool
|
||||
Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it
|
||||
utils.Closers
|
||||
|
||||
tmpFile model.File //if present, tmpFile has full content, it will be deleted at last
|
||||
peekBuff *buffer.Reader
|
||||
size int64
|
||||
peekBuff *buffer.Reader
|
||||
oriReader io.Reader // the original reader, used for caching
|
||||
}
|
||||
|
||||
@@ -39,12 +36,6 @@ func (f *FileStream) GetSize() int64 {
|
||||
if f.size > 0 {
|
||||
return f.size
|
||||
}
|
||||
if file, ok := f.tmpFile.(*os.File); ok {
|
||||
info, err := file.Stat()
|
||||
if err == nil {
|
||||
return info.Size()
|
||||
}
|
||||
}
|
||||
return f.Obj.GetSize()
|
||||
}
|
||||
|
||||
@@ -63,24 +54,10 @@ func (f *FileStream) IsForceStreamUpload() bool {
|
||||
func (f *FileStream) Close() error {
|
||||
if f.peekBuff != nil {
|
||||
f.peekBuff.Reset()
|
||||
f.oriReader = nil
|
||||
f.peekBuff = nil
|
||||
}
|
||||
|
||||
var err1, err2 error
|
||||
err1 = f.Closers.Close()
|
||||
if errors.Is(err1, os.ErrClosed) {
|
||||
err1 = nil
|
||||
}
|
||||
if file, ok := f.tmpFile.(*os.File); ok {
|
||||
err2 = os.RemoveAll(file.Name())
|
||||
if err2 != nil {
|
||||
err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", file.Name())
|
||||
} else {
|
||||
f.tmpFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(err1, err2)
|
||||
return f.Closers.Close()
|
||||
}
|
||||
|
||||
func (f *FileStream) GetExist() model.Obj {
|
||||
@@ -94,27 +71,28 @@ func (f *FileStream) SetExist(obj model.Obj) {
|
||||
// It's not thread-safe!
|
||||
func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writer) (model.File, error) {
|
||||
if cache := f.GetFile(); cache != nil {
|
||||
_, err := cache.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if writer == nil {
|
||||
return cache, nil
|
||||
}
|
||||
_, err := cache.Seek(0, io.SeekStart)
|
||||
reader := f.Reader
|
||||
if up != nil {
|
||||
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||
reader = &ReaderUpdatingProgress{
|
||||
Reader: &SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: f.GetSize(),
|
||||
},
|
||||
UpdateProgress: cacheProgress,
|
||||
}
|
||||
}
|
||||
_, err = utils.CopyWithBuffer(writer, reader)
|
||||
if err == nil {
|
||||
reader := f.Reader
|
||||
if up != nil {
|
||||
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||
reader = &ReaderUpdatingProgress{
|
||||
Reader: &SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: f.GetSize(),
|
||||
},
|
||||
UpdateProgress: cacheProgress,
|
||||
}
|
||||
}
|
||||
_, err = utils.CopyWithBuffer(writer, reader)
|
||||
if err == nil {
|
||||
_, err = cache.Seek(0, io.SeekStart)
|
||||
}
|
||||
_, err = cache.Seek(0, io.SeekStart)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -123,21 +101,20 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
||||
}
|
||||
|
||||
reader := f.Reader
|
||||
if up != nil {
|
||||
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||
reader = &ReaderUpdatingProgress{
|
||||
Reader: &SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: f.GetSize(),
|
||||
},
|
||||
UpdateProgress: cacheProgress,
|
||||
if f.peekBuff != nil {
|
||||
f.peekBuff.Seek(0, io.SeekStart)
|
||||
if writer != nil {
|
||||
_, err := utils.CopyWithBuffer(writer, f.peekBuff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.peekBuff.Seek(0, io.SeekStart)
|
||||
}
|
||||
reader = f.oriReader
|
||||
}
|
||||
if writer != nil {
|
||||
reader = io.TeeReader(reader, writer)
|
||||
}
|
||||
|
||||
if f.GetSize() < 0 {
|
||||
if f.peekBuff == nil {
|
||||
f.peekBuff = &buffer.Reader{}
|
||||
@@ -174,7 +151,6 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tmpF, err := utils.CreateTempFile(reader, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -191,22 +167,42 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
||||
return peekF, nil
|
||||
}
|
||||
|
||||
f.Reader = reader
|
||||
if up != nil {
|
||||
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||
size := f.GetSize()
|
||||
if f.peekBuff != nil {
|
||||
peekSize := f.peekBuff.Size()
|
||||
cacheProgress(float64(peekSize) / float64(size) * 100)
|
||||
size -= peekSize
|
||||
}
|
||||
reader = &ReaderUpdatingProgress{
|
||||
Reader: &SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: size,
|
||||
},
|
||||
UpdateProgress: cacheProgress,
|
||||
}
|
||||
}
|
||||
|
||||
if f.peekBuff != nil {
|
||||
f.oriReader = reader
|
||||
} else {
|
||||
f.Reader = reader
|
||||
}
|
||||
return f.cache(f.GetSize())
|
||||
}
|
||||
|
||||
func (f *FileStream) GetFile() model.File {
|
||||
if f.tmpFile != nil {
|
||||
return f.tmpFile
|
||||
}
|
||||
if file, ok := f.Reader.(model.File); ok {
|
||||
return file
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RangeRead have to cache all data first since only Reader is provided.
|
||||
// It's not thread-safe!
|
||||
// 从流读取指定范围的一块数据,并且不消耗流。
|
||||
// 当读取的边界超过内部设置大小后会缓存整个流。
|
||||
// 流未缓存时线程不完全
|
||||
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > f.GetSize() {
|
||||
httpRange.Length = f.GetSize() - httpRange.Start
|
||||
@@ -215,12 +211,7 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
return io.NewSectionReader(f.GetFile(), httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
size := httpRange.Start + httpRange.Length
|
||||
if f.peekBuff != nil && size <= int64(f.peekBuff.Size()) {
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
cache, err := f.cache(size)
|
||||
cache, err := f.cache(httpRange.Start + httpRange.Length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -232,14 +223,30 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
// 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
|
||||
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
|
||||
|
||||
// 确保指定大小的数据被缓存
|
||||
func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||
if maxCacheSize > int64(conf.MaxBufferLimit) {
|
||||
tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize())
|
||||
size := f.GetSize()
|
||||
reader := f.Reader
|
||||
if f.peekBuff != nil {
|
||||
size -= f.peekBuff.Size()
|
||||
reader = f.oriReader
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(reader, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Add(tmpF)
|
||||
f.tmpFile = tmpF
|
||||
f.Add(utils.CloseFunc(func() error {
|
||||
return errors.Join(tmpF.Close(), os.RemoveAll(tmpF.Name()))
|
||||
}))
|
||||
if f.peekBuff != nil {
|
||||
peekF, err := buffer.NewPeekFile(f.peekBuff, tmpF)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Reader = peekF
|
||||
return peekF, nil
|
||||
}
|
||||
f.Reader = tmpF
|
||||
return tmpF, nil
|
||||
}
|
||||
@@ -247,8 +254,12 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||
if f.peekBuff == nil {
|
||||
f.peekBuff = &buffer.Reader{}
|
||||
f.oriReader = f.Reader
|
||||
f.Reader = io.MultiReader(f.peekBuff, f.oriReader)
|
||||
}
|
||||
bufSize := maxCacheSize - f.peekBuff.Size()
|
||||
if bufSize <= 0 {
|
||||
return f.peekBuff, nil
|
||||
}
|
||||
bufSize := maxCacheSize - int64(f.peekBuff.Size())
|
||||
var buf []byte
|
||||
if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) {
|
||||
m, err := mmap.Alloc(int(bufSize))
|
||||
@@ -267,37 +278,24 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
||||
}
|
||||
f.peekBuff.Append(buf)
|
||||
if int64(f.peekBuff.Size()) >= f.GetSize() {
|
||||
if f.peekBuff.Size() >= f.GetSize() {
|
||||
f.Reader = f.peekBuff
|
||||
f.oriReader = nil
|
||||
} else {
|
||||
f.Reader = io.MultiReader(f.peekBuff, f.oriReader)
|
||||
}
|
||||
return f.peekBuff, nil
|
||||
}
|
||||
|
||||
func (f *FileStream) SetTmpFile(file model.File) {
|
||||
f.AddIfCloser(file)
|
||||
f.tmpFile = file
|
||||
f.Reader = file
|
||||
}
|
||||
|
||||
var _ model.FileStreamer = (*SeekableStream)(nil)
|
||||
var _ model.FileStreamer = (*FileStream)(nil)
|
||||
|
||||
//var _ seekableStream = (*FileStream)(nil)
|
||||
|
||||
// for most internal stream, which is either RangeReadCloser or MFile
|
||||
// Any functionality implemented based on SeekableStream should implement a Close method,
|
||||
// whose only purpose is to close the SeekableStream object. If such functionality has
|
||||
// additional resources that need to be closed, they should be added to the Closer property of
|
||||
// the SeekableStream object and be closed together when the SeekableStream object is closed.
|
||||
type SeekableStream struct {
|
||||
*FileStream
|
||||
// should have one of belows to support rangeRead
|
||||
rangeReadCloser model.RangeReadCloserIF
|
||||
rangeReader model.RangeReaderIF
|
||||
}
|
||||
|
||||
// NewSeekableStream create a SeekableStream from FileStream and Link
|
||||
// if FileStream.Reader is not nil, use it directly
|
||||
// else create RangeReader from Link
|
||||
func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error) {
|
||||
if len(fs.Mimetype) == 0 {
|
||||
fs.Mimetype = utils.GetMimeType(fs.Obj.GetName())
|
||||
@@ -317,30 +315,31 @@ func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rrc := &model.RangeReadCloser{
|
||||
RangeReader: rr,
|
||||
}
|
||||
if _, ok := rr.(*model.FileRangeReader); ok {
|
||||
fs.Reader, err = rrc.RangeRead(fs.Ctx, http_range.Range{Length: -1})
|
||||
var rc io.ReadCloser
|
||||
rc, err = rr.RangeRead(fs.Ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Reader = rc
|
||||
fs.Add(rc)
|
||||
}
|
||||
fs.size = size
|
||||
fs.Add(link)
|
||||
fs.Add(rrc)
|
||||
return &SeekableStream{FileStream: fs, rangeReadCloser: rrc}, nil
|
||||
return &SeekableStream{FileStream: fs, rangeReader: rr}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
|
||||
// RangeRead is not thread-safe, pls use it in single thread only.
|
||||
// 如果使用缓存或者rangeReader读取指定范围的数据,是线程安全的
|
||||
// 其他特性继承自FileStream.RangeRead
|
||||
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if ss.GetFile() == nil && ss.rangeReadCloser != nil {
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, httpRange)
|
||||
if ss.GetFile() == nil && ss.rangeReader != nil {
|
||||
rc, err := ss.rangeReader.RangeRead(ss.Ctx, httpRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss.Add(rc)
|
||||
return rc, nil
|
||||
}
|
||||
return ss.FileStream.RangeRead(httpRange)
|
||||
@@ -356,13 +355,14 @@ func (ss *SeekableStream) Read(p []byte) (n int, err error) {
|
||||
|
||||
func (ss *SeekableStream) generateReader() error {
|
||||
if ss.Reader == nil {
|
||||
if ss.rangeReadCloser == nil {
|
||||
if ss.rangeReader == nil {
|
||||
return fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
||||
rc, err := ss.rangeReader.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ss.Add(rc)
|
||||
ss.Reader = rc
|
||||
}
|
||||
return nil
|
||||
@@ -456,7 +456,7 @@ func (r *headCache) Close() error {
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
||||
if r.ss.GetFile() == nil && r.masterOff == 0 {
|
||||
if r.masterOff == 0 {
|
||||
value, _ := r.readerMap.LoadAndDelete(int64(0))
|
||||
r.headCache = &headCache{reader: value.(io.Reader)}
|
||||
r.ss.Closers.Add(r.headCache)
|
||||
@@ -464,12 +464,12 @@ func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
||||
}
|
||||
|
||||
func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (model.File, error) {
|
||||
if ss.GetFile() != nil {
|
||||
_, err := ss.GetFile().Seek(offset, io.SeekStart)
|
||||
if cache := ss.GetFile(); cache != nil {
|
||||
_, err := cache.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ss.GetFile(), nil
|
||||
return cache, nil
|
||||
}
|
||||
r := &RangeReadReadAtSeeker{
|
||||
ss: ss,
|
||||
@@ -479,10 +479,11 @@ func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (mode
|
||||
if offset < 0 || offset > ss.GetSize() {
|
||||
return nil, errors.New("offset out of range")
|
||||
}
|
||||
_, err := r.getReaderAtOffset(offset)
|
||||
reader, err := r.getReaderAtOffset(offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.readerMap.Store(int64(offset), reader)
|
||||
} else {
|
||||
r.readerMap.Store(int64(offset), ss)
|
||||
}
|
||||
@@ -502,39 +503,41 @@ func NewMultiReaderAt(ss []*SeekableStream) (readerutil.SizeReaderAt, error) {
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (io.Reader, error) {
|
||||
var rr io.Reader
|
||||
var cur int64 = -1
|
||||
r.readerMap.Range(func(key, value any) bool {
|
||||
k := key.(int64)
|
||||
if off == k {
|
||||
cur = k
|
||||
rr = value.(io.Reader)
|
||||
return false
|
||||
for {
|
||||
var cur int64 = -1
|
||||
r.readerMap.Range(func(key, value any) bool {
|
||||
k := key.(int64)
|
||||
if off == k {
|
||||
cur = k
|
||||
return false
|
||||
}
|
||||
if off > k && off-k <= 4*utils.MB && k > cur {
|
||||
cur = k
|
||||
}
|
||||
return true
|
||||
})
|
||||
if cur < 0 {
|
||||
break
|
||||
}
|
||||
if off > k && off-k <= 4*utils.MB && (rr == nil || k < cur) {
|
||||
rr = value.(io.Reader)
|
||||
cur = k
|
||||
v, ok := r.readerMap.LoadAndDelete(int64(cur))
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
rr := v.(io.Reader)
|
||||
if off == int64(cur) {
|
||||
// logrus.Debugf("getReaderAtOffset match_%d", off)
|
||||
return rr, nil
|
||||
}
|
||||
return true
|
||||
})
|
||||
if cur >= 0 {
|
||||
r.readerMap.Delete(int64(cur))
|
||||
}
|
||||
if off == int64(cur) {
|
||||
// logrus.Debugf("getReaderAtOffset match_%d", off)
|
||||
return rr, nil
|
||||
}
|
||||
|
||||
if rr != nil {
|
||||
n, _ := utils.CopyWithBufferN(io.Discard, rr, off-cur)
|
||||
cur += n
|
||||
if cur == off {
|
||||
// logrus.Debugf("getReaderAtOffset old_%d", off)
|
||||
return rr, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
// logrus.Debugf("getReaderAtOffset new_%d", off)
|
||||
|
||||
// logrus.Debugf("getReaderAtOffset new_%d", off)
|
||||
reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: -1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -7,13 +7,12 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
)
|
||||
|
||||
func TestFileStream_RangeRead(t *testing.T) {
|
||||
conf.MaxBufferLimit = 16 * 1024 * 1024
|
||||
type args struct {
|
||||
httpRange http_range.Range
|
||||
}
|
||||
@@ -73,16 +72,38 @@ func TestFileStream_RangeRead(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
t.Run("after", func(t *testing.T) {
|
||||
if f.GetFile() == nil {
|
||||
t.Error("not cached")
|
||||
}
|
||||
buf2 := make([]byte, len(buf))
|
||||
if _, err := io.ReadFull(f, buf2); err != nil {
|
||||
t.Errorf("FileStream.Read() error = %v", err)
|
||||
}
|
||||
if !bytes.Equal(buf, buf2) {
|
||||
t.Errorf("FileStream.Read() = %s, want %s", buf2, buf)
|
||||
}
|
||||
})
|
||||
if f.GetFile() == nil {
|
||||
t.Error("not cached")
|
||||
}
|
||||
buf2 := make([]byte, len(buf))
|
||||
if _, err := io.ReadFull(f, buf2); err != nil {
|
||||
t.Errorf("FileStream.Read() error = %v", err)
|
||||
}
|
||||
if !bytes.Equal(buf, buf2) {
|
||||
t.Errorf("FileStream.Read() = %s, want %s", buf2, buf)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStream_With_PreHash(t *testing.T) {
|
||||
buf := []byte("github.com/OpenListTeam/OpenList")
|
||||
f := &FileStream{
|
||||
Obj: &model.Object{
|
||||
Size: int64(len(buf)),
|
||||
},
|
||||
Reader: io.NopCloser(bytes.NewReader(buf)),
|
||||
}
|
||||
|
||||
const hashSize int64 = 20
|
||||
reader, _ := f.RangeRead(http_range.Range{Start: 0, Length: hashSize})
|
||||
preHash, _ := utils.HashReader(utils.SHA1, reader)
|
||||
if preHash == "" {
|
||||
t.Error("preHash is empty")
|
||||
}
|
||||
tmpF, fullHash, _ := CacheFullAndHash(f, nil, utils.SHA1)
|
||||
fmt.Println(fullHash)
|
||||
fileFullHash, _ := utils.HashFile(utils.SHA1, tmpF)
|
||||
fmt.Println(fileFullHash)
|
||||
if fullHash != fileFullHash {
|
||||
t.Errorf("fullHash and fileFullHash should match: fullHash=%s fileFullHash=%s", fullHash, fileFullHash)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,44 +28,61 @@ func (f RangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Ran
|
||||
}
|
||||
|
||||
func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) {
|
||||
if link.Concurrency > 0 || link.PartSize > 0 {
|
||||
if link.RangeReader != nil {
|
||||
if link.Concurrency < 1 && link.PartSize < 1 {
|
||||
return link.RangeReader, nil
|
||||
}
|
||||
down := net.NewDownloader(func(d *net.Downloader) {
|
||||
d.Concurrency = link.Concurrency
|
||||
d.PartSize = link.PartSize
|
||||
d.HttpClient = net.GetRangeReaderHttpRequestFunc(link.RangeReader)
|
||||
})
|
||||
var rangeReader RangeReaderFunc = func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
var req *net.HttpRequestParams
|
||||
if link.RangeReader != nil {
|
||||
req = &net.HttpRequestParams{
|
||||
Range: httpRange,
|
||||
Size: size,
|
||||
}
|
||||
} else {
|
||||
requestHeader, _ := ctx.Value(conf.RequestHeaderKey).(http.Header)
|
||||
header := net.ProcessHeader(requestHeader, link.Header)
|
||||
req = &net.HttpRequestParams{
|
||||
Range: httpRange,
|
||||
Size: size,
|
||||
URL: link.URL,
|
||||
HeaderRef: header,
|
||||
}
|
||||
}
|
||||
return down.Download(ctx, req)
|
||||
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
return down.Download(ctx, &net.HttpRequestParams{
|
||||
Range: httpRange,
|
||||
Size: size,
|
||||
})
|
||||
}
|
||||
if link.RangeReader != nil {
|
||||
down.HttpClient = net.GetRangeReaderHttpRequestFunc(link.RangeReader)
|
||||
return rangeReader, nil
|
||||
}
|
||||
return RateLimitRangeReaderFunc(rangeReader), nil
|
||||
}
|
||||
|
||||
if link.RangeReader != nil {
|
||||
return link.RangeReader, nil
|
||||
// RangeReader只能在驱动限速
|
||||
return RangeReaderFunc(rangeReader), nil
|
||||
}
|
||||
|
||||
if len(link.URL) == 0 {
|
||||
return nil, errors.New("invalid link: must have at least one of URL or RangeReader")
|
||||
}
|
||||
|
||||
if link.Concurrency > 0 || link.PartSize > 0 {
|
||||
down := net.NewDownloader(func(d *net.Downloader) {
|
||||
d.Concurrency = link.Concurrency
|
||||
d.PartSize = link.PartSize
|
||||
d.HttpClient = func(ctx context.Context, params *net.HttpRequestParams) (*http.Response, error) {
|
||||
if ServerDownloadLimit == nil {
|
||||
return net.DefaultHttpRequestFunc(ctx, params)
|
||||
}
|
||||
resp, err := net.DefaultHttpRequestFunc(ctx, params)
|
||||
if err == nil && resp.Body != nil {
|
||||
resp.Body = &RateLimitReader{
|
||||
Ctx: ctx,
|
||||
Reader: resp.Body,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
})
|
||||
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
requestHeader, _ := ctx.Value(conf.RequestHeaderKey).(http.Header)
|
||||
header := net.ProcessHeader(requestHeader, link.Header)
|
||||
return down.Download(ctx, &net.HttpRequestParams{
|
||||
Range: httpRange,
|
||||
Size: size,
|
||||
URL: link.URL,
|
||||
HeaderRef: header,
|
||||
})
|
||||
}
|
||||
return RangeReaderFunc(rangeReader), nil
|
||||
}
|
||||
|
||||
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > size {
|
||||
httpRange.Length = size - httpRange.Start
|
||||
@@ -81,7 +98,15 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
|
||||
}
|
||||
return nil, fmt.Errorf("http request failure, err:%w", err)
|
||||
}
|
||||
if httpRange.Start == 0 && (httpRange.Length == -1 || httpRange.Length == size) || response.StatusCode == http.StatusPartialContent ||
|
||||
if ServerDownloadLimit != nil {
|
||||
response.Body = &RateLimitReader{
|
||||
Ctx: ctx,
|
||||
Reader: response.Body,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}
|
||||
}
|
||||
if httpRange.Start == 0 && httpRange.Length == size ||
|
||||
response.StatusCode == http.StatusPartialContent ||
|
||||
checkContentRange(&response.Header, httpRange.Start) {
|
||||
return response.Body, nil
|
||||
} else if response.StatusCode == http.StatusOK {
|
||||
@@ -94,11 +119,10 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
|
||||
}
|
||||
return response.Body, nil
|
||||
}
|
||||
return RateLimitRangeReaderFunc(rangeReader), nil
|
||||
return RangeReaderFunc(rangeReader), nil
|
||||
}
|
||||
|
||||
// RangeReaderIF.RangeRead返回的io.ReadCloser保留file的签名。
|
||||
func GetRangeReaderFromMFile(size int64, file model.File) model.RangeReaderIF {
|
||||
func GetRangeReaderFromMFile(size int64, file model.File) *model.FileRangeReader {
|
||||
return &model.FileRangeReader{
|
||||
RangeReaderIF: RangeReaderFunc(func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
|
||||
@@ -104,7 +104,7 @@ func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserve
|
||||
return nil, err
|
||||
}
|
||||
if (flags & os.O_EXCL) != 0 {
|
||||
return nil, errors.New("file already exists")
|
||||
return nil, errs.ObjectAlreadyExists
|
||||
}
|
||||
if (flags & os.O_WRONLY) != 0 {
|
||||
return nil, errors.New("cannot write to uploading file")
|
||||
@@ -122,7 +122,7 @@ func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserve
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
if (flags&os.O_EXCL) != 0 && exists {
|
||||
return nil, errors.New("file already exists")
|
||||
return nil, errs.ObjectAlreadyExists
|
||||
}
|
||||
if (flags & os.O_WRONLY) != 0 {
|
||||
if offset != 0 {
|
||||
|
||||
54
server/handles/direct_upload.go
Normal file
54
server/handles/direct_upload.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package handles
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type FsGetDirectUploadInfoReq struct {
|
||||
Path string `json:"path" form:"path"`
|
||||
FileName string `json:"file_name" form:"file_name"`
|
||||
FileSize int64 `json:"file_size" form:"file_size"`
|
||||
Tool string `json:"tool" form:"tool"`
|
||||
}
|
||||
|
||||
// FsGetDirectUploadInfo returns the direct upload info if supported by the driver
|
||||
// If the driver does not support direct upload, returns null for upload_info
|
||||
func FsGetDirectUploadInfo(c *gin.Context) {
|
||||
var req FsGetDirectUploadInfoReq
|
||||
if err := c.ShouldBind(&req); err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
// Decode path
|
||||
path, err := url.PathUnescape(req.Path)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
// Get user and join path
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
path, err = user.JoinPath(path)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 403)
|
||||
return
|
||||
}
|
||||
overwrite := c.GetHeader("Overwrite") != "false"
|
||||
if !overwrite {
|
||||
if res, _ := fs.Get(c.Request.Context(), path, &fs.GetArgs{NoLog: true}); res != nil {
|
||||
common.ErrorStrResp(c, "file exists", 403)
|
||||
return
|
||||
}
|
||||
}
|
||||
directUploadInfo, err := fs.GetDirectUploadInfo(c, req.Tool, path, req.FileName, req.FileSize)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
common.SuccessResp(c, directUploadInfo)
|
||||
}
|
||||
@@ -49,12 +49,13 @@ type ObjResp struct {
|
||||
}
|
||||
|
||||
type FsListResp struct {
|
||||
Content []ObjResp `json:"content"`
|
||||
Total int64 `json:"total"`
|
||||
Readme string `json:"readme"`
|
||||
Header string `json:"header"`
|
||||
Write bool `json:"write"`
|
||||
Provider string `json:"provider"`
|
||||
Content []ObjResp `json:"content"`
|
||||
Total int64 `json:"total"`
|
||||
Readme string `json:"readme"`
|
||||
Header string `json:"header"`
|
||||
Write bool `json:"write"`
|
||||
Provider string `json:"provider"`
|
||||
DirectUploadTools []string `json:"direct_upload_tools,omitempty"`
|
||||
}
|
||||
|
||||
func FsListSplit(c *gin.Context) {
|
||||
@@ -109,17 +110,20 @@ func FsList(c *gin.Context, req *ListReq, user *model.User) {
|
||||
}
|
||||
total, objs := pagination(objs, &req.PageReq)
|
||||
provider := "unknown"
|
||||
storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{})
|
||||
if err == nil {
|
||||
provider = storage.GetStorage().Driver
|
||||
var directUploadTools []string
|
||||
if user.CanWrite() {
|
||||
if storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{}); err == nil {
|
||||
directUploadTools = op.GetDirectUploadTools(storage)
|
||||
}
|
||||
}
|
||||
common.SuccessResp(c, FsListResp{
|
||||
Content: toObjsResp(objs, reqPath, isEncrypt(meta, reqPath)),
|
||||
Total: int64(total),
|
||||
Readme: getReadme(meta, reqPath),
|
||||
Header: getHeader(meta, reqPath),
|
||||
Write: user.CanWrite() || common.CanWrite(meta, reqPath),
|
||||
Provider: provider,
|
||||
Content: toObjsResp(objs, reqPath, isEncrypt(meta, reqPath)),
|
||||
Total: int64(total),
|
||||
Readme: getReadme(meta, reqPath),
|
||||
Header: getHeader(meta, reqPath),
|
||||
Write: user.CanWrite() || common.CanWrite(meta, reqPath),
|
||||
Provider: provider,
|
||||
DirectUploadTools: directUploadTools,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -408,7 +408,7 @@ func ListSharings(c *gin.Context) {
|
||||
})
|
||||
}
|
||||
|
||||
type CreateSharingReq struct {
|
||||
type UpdateSharingReq struct {
|
||||
Files []string `json:"files"`
|
||||
Expires *time.Time `json:"expires"`
|
||||
Pwd string `json:"pwd"`
|
||||
@@ -418,12 +418,9 @@ type CreateSharingReq struct {
|
||||
Readme string `json:"readme"`
|
||||
Header string `json:"header"`
|
||||
model.Sort
|
||||
}
|
||||
|
||||
type UpdateSharingReq struct {
|
||||
ID string `json:"id"`
|
||||
Accessed int `json:"accessed"`
|
||||
CreateSharingReq
|
||||
CreatorName string `json:"creator"`
|
||||
Accessed int `json:"accessed"`
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
func UpdateSharing(c *gin.Context) {
|
||||
@@ -436,24 +433,38 @@ func UpdateSharing(c *gin.Context) {
|
||||
common.ErrorStrResp(c, "must add at least 1 object", 400)
|
||||
return
|
||||
}
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if !user.CanShare() {
|
||||
common.ErrorStrResp(c, "permission denied", 403)
|
||||
return
|
||||
var user *model.User
|
||||
var err error
|
||||
reqUser := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if reqUser.IsAdmin() && req.CreatorName != "" {
|
||||
user, err = op.GetUserByName(req.CreatorName)
|
||||
if err != nil {
|
||||
common.ErrorStrResp(c, "no such a user", 400)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
user = reqUser
|
||||
if !user.CanShare() {
|
||||
common.ErrorStrResp(c, "permission denied", 403)
|
||||
return
|
||||
}
|
||||
}
|
||||
for i, s := range req.Files {
|
||||
s = utils.FixAndCleanPath(s)
|
||||
req.Files[i] = s
|
||||
if !user.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
||||
if !reqUser.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
||||
common.ErrorStrResp(c, fmt.Sprintf("permission denied to share path [%s]", s), 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
s, err := op.GetSharingById(req.ID)
|
||||
if err != nil || (!user.IsAdmin() && s.CreatorId != user.ID) {
|
||||
if err != nil || (!reqUser.IsAdmin() && s.CreatorId != user.ID) {
|
||||
common.ErrorStrResp(c, "sharing not found", 404)
|
||||
return
|
||||
}
|
||||
if reqUser.IsAdmin() && req.CreatorName == "" {
|
||||
user = s.Creator
|
||||
}
|
||||
s.Files = req.Files
|
||||
s.Expires = req.Expires
|
||||
s.Pwd = req.Pwd
|
||||
@@ -464,6 +475,7 @@ func UpdateSharing(c *gin.Context) {
|
||||
s.Header = req.Header
|
||||
s.Readme = req.Readme
|
||||
s.Remark = req.Remark
|
||||
s.Creator = user
|
||||
if err = op.UpdateSharing(s); err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
} else {
|
||||
@@ -476,7 +488,7 @@ func UpdateSharing(c *gin.Context) {
|
||||
}
|
||||
|
||||
func CreateSharing(c *gin.Context) {
|
||||
var req CreateSharingReq
|
||||
var req UpdateSharingReq
|
||||
var err error
|
||||
if err = c.ShouldBind(&req); err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
@@ -486,24 +498,35 @@ func CreateSharing(c *gin.Context) {
|
||||
common.ErrorStrResp(c, "must add at least 1 object", 400)
|
||||
return
|
||||
}
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if !user.CanShare() {
|
||||
common.ErrorStrResp(c, "permission denied", 403)
|
||||
return
|
||||
var user *model.User
|
||||
reqUser := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if reqUser.IsAdmin() && req.CreatorName != "" {
|
||||
user, err = op.GetUserByName(req.CreatorName)
|
||||
if err != nil {
|
||||
common.ErrorStrResp(c, "no such a user", 400)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
user = reqUser
|
||||
if !user.CanShare() || (!user.IsAdmin() && req.ID != "") {
|
||||
common.ErrorStrResp(c, "permission denied", 403)
|
||||
return
|
||||
}
|
||||
}
|
||||
for i, s := range req.Files {
|
||||
s = utils.FixAndCleanPath(s)
|
||||
req.Files[i] = s
|
||||
if !user.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
||||
if !reqUser.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
||||
common.ErrorStrResp(c, fmt.Sprintf("permission denied to share path [%s]", s), 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
s := &model.Sharing{
|
||||
SharingDB: &model.SharingDB{
|
||||
ID: req.ID,
|
||||
Expires: req.Expires,
|
||||
Pwd: req.Pwd,
|
||||
Accessed: 0,
|
||||
Accessed: req.Accessed,
|
||||
MaxAccessed: req.MaxAccessed,
|
||||
Disabled: req.Disabled,
|
||||
Sort: req.Sort,
|
||||
|
||||
@@ -211,6 +211,8 @@ func _fs(g *gin.RouterGroup) {
|
||||
// g.POST("/add_transmission", handles.SetTransmission)
|
||||
g.POST("/add_offline_download", handles.AddOfflineDownload)
|
||||
g.POST("/archive/decompress", handles.FsArchiveDecompress)
|
||||
// Direct upload (client-side upload to storage)
|
||||
g.POST("/get_direct_upload_info", middlewares.FsUp, handles.FsGetDirectUploadInfo)
|
||||
}
|
||||
|
||||
func _task(g *gin.RouterGroup) {
|
||||
|
||||
Reference in New Issue
Block a user