mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-11-25 19:37:41 +08:00
Compare commits
36 Commits
9d09ee133d
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
72206ac9f6 | ||
|
|
62dedb2a2e | ||
|
|
7189c5b461 | ||
|
|
1a445f9d3f | ||
|
|
aa22884079 | ||
|
|
316d4caf37 | ||
|
|
60a489eb68 | ||
|
|
b22e211044 | ||
|
|
ca401b9af9 | ||
|
|
addce8b691 | ||
|
|
42fc841dc1 | ||
|
|
4c0916b64b | ||
|
|
3989d35abd | ||
|
|
72e2ae1f14 | ||
|
|
3e37f575d8 | ||
|
|
c0d480366d | ||
|
|
9de7561154 | ||
|
|
0866b9075f | ||
|
|
055696f576 | ||
|
|
854415160c | ||
|
|
8f4f7d1291 | ||
|
|
ee2c77acd8 | ||
|
|
fc90ec1b53 | ||
|
|
7d78944d14 | ||
|
|
f2e0fe8589 | ||
|
|
39dcf9bd19 | ||
|
|
25f38df4ca | ||
|
|
a1f1f98f94 | ||
|
|
affc499913 | ||
|
|
c7574b545c | ||
|
|
9e852ba12d | ||
|
|
174eae802a | ||
|
|
b9f058fcc9 | ||
|
|
6de15b6310 | ||
|
|
2844797684 | ||
|
|
9f4e439478 |
16
.github/ISSUE_TEMPLATE/00-bug_report_zh.yml
vendored
16
.github/ISSUE_TEMPLATE/00-bug_report_zh.yml
vendored
@@ -13,7 +13,7 @@ body:
|
|||||||
attributes:
|
attributes:
|
||||||
label: 请确认以下事项
|
label: 请确认以下事项
|
||||||
description: |
|
description: |
|
||||||
您必须勾选以下内容,否则您的问题可能会被直接关闭。
|
您必须确认、同意并勾选以下内容,否则您的问题一定会被直接关闭。
|
||||||
或者您可以去[讨论区](https://github.com/OpenListTeam/OpenList/discussions)。
|
或者您可以去[讨论区](https://github.com/OpenListTeam/OpenList/discussions)。
|
||||||
options:
|
options:
|
||||||
- label: |
|
- label: |
|
||||||
@@ -59,6 +59,14 @@ body:
|
|||||||
label: 问题描述(必填)
|
label: 问题描述(必填)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: logs
|
||||||
|
attributes:
|
||||||
|
label: 日志(必填)
|
||||||
|
description: |
|
||||||
|
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: config
|
id: config
|
||||||
attributes:
|
attributes:
|
||||||
@@ -67,12 +75,6 @@ body:
|
|||||||
请提供您的`OpenList`应用的配置文件,并截图相关存储配置。(可隐藏隐私字段)
|
请提供您的`OpenList`应用的配置文件,并截图相关存储配置。(可隐藏隐私字段)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
|
||||||
id: logs
|
|
||||||
attributes:
|
|
||||||
label: 日志(可选)
|
|
||||||
description: |
|
|
||||||
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: reproduction
|
id: reproduction
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
16
.github/ISSUE_TEMPLATE/01-bug_report_en.yml
vendored
16
.github/ISSUE_TEMPLATE/01-bug_report_en.yml
vendored
@@ -13,7 +13,7 @@ body:
|
|||||||
attributes:
|
attributes:
|
||||||
label: Please confirm the following
|
label: Please confirm the following
|
||||||
description: |
|
description: |
|
||||||
You must check all the following, otherwise your issue may be closed directly.
|
You must confirm, agree, and check all the following, otherwise your issue will definitely be closed directly.
|
||||||
Or you can go to the [discussions](https://github.com/OpenListTeam/OpenList/discussions).
|
Or you can go to the [discussions](https://github.com/OpenListTeam/OpenList/discussions).
|
||||||
options:
|
options:
|
||||||
- label: |
|
- label: |
|
||||||
@@ -59,6 +59,14 @@ body:
|
|||||||
label: Bug Description (required)
|
label: Bug Description (required)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: logs
|
||||||
|
attributes:
|
||||||
|
label: Logs (required)
|
||||||
|
description: |
|
||||||
|
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: config
|
id: config
|
||||||
attributes:
|
attributes:
|
||||||
@@ -67,12 +75,6 @@ body:
|
|||||||
Please provide your `OpenList` application's configuration file and a screenshot of the relevant storage configuration. (You may mask sensitive fields)
|
Please provide your `OpenList` application's configuration file and a screenshot of the relevant storage configuration. (You may mask sensitive fields)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
|
||||||
id: logs
|
|
||||||
attributes:
|
|
||||||
label: Logs (optional)
|
|
||||||
description: |
|
|
||||||
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: reproduction
|
id: reproduction
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ func init() {
|
|||||||
return &Pan123{
|
return &Pan123{
|
||||||
Addition: Addition{
|
Addition: Addition{
|
||||||
UploadThread: 3,
|
UploadThread: 3,
|
||||||
|
Platform: "web",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -200,10 +200,7 @@ func (d *Cloud189) GetDetails(ctx context.Context) (*model.StorageDetails, error
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &model.StorageDetails{
|
return &model.StorageDetails{
|
||||||
DiskUsage: model.DiskUsage{
|
DiskUsage: driver.DiskUsageFromUsedAndTotal(capacityInfo.CloudCapacityInfo.UsedSize, capacityInfo.CloudCapacityInfo.TotalSize),
|
||||||
TotalSpace: capacityInfo.CloudCapacityInfo.TotalSize,
|
|
||||||
FreeSpace: capacityInfo.CloudCapacityInfo.FreeSize,
|
|
||||||
},
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -72,13 +72,13 @@ type CapacityResp struct {
|
|||||||
ResMessage string `json:"res_message"`
|
ResMessage string `json:"res_message"`
|
||||||
Account string `json:"account"`
|
Account string `json:"account"`
|
||||||
CloudCapacityInfo struct {
|
CloudCapacityInfo struct {
|
||||||
FreeSize uint64 `json:"freeSize"`
|
FreeSize int64 `json:"freeSize"`
|
||||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||||
TotalSize uint64 `json:"totalSize"`
|
TotalSize uint64 `json:"totalSize"`
|
||||||
UsedSize uint64 `json:"usedSize"`
|
UsedSize uint64 `json:"usedSize"`
|
||||||
} `json:"cloudCapacityInfo"`
|
} `json:"cloudCapacityInfo"`
|
||||||
FamilyCapacityInfo struct {
|
FamilyCapacityInfo struct {
|
||||||
FreeSize uint64 `json:"freeSize"`
|
FreeSize int64 `json:"freeSize"`
|
||||||
TotalSize uint64 `json:"totalSize"`
|
TotalSize uint64 `json:"totalSize"`
|
||||||
UsedSize uint64 `json:"usedSize"`
|
UsedSize uint64 `json:"usedSize"`
|
||||||
} `json:"familyCapacityInfo"`
|
} `json:"familyCapacityInfo"`
|
||||||
|
|||||||
@@ -284,18 +284,15 @@ func (y *Cloud189TV) GetDetails(ctx context.Context) (*model.StorageDetails, err
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var total, free uint64
|
var total, used uint64
|
||||||
if y.isFamily() {
|
if y.isFamily() {
|
||||||
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
||||||
free = capacityInfo.FamilyCapacityInfo.FreeSize
|
used = capacityInfo.FamilyCapacityInfo.UsedSize
|
||||||
} else {
|
} else {
|
||||||
total = capacityInfo.CloudCapacityInfo.TotalSize
|
total = capacityInfo.CloudCapacityInfo.TotalSize
|
||||||
free = capacityInfo.CloudCapacityInfo.FreeSize
|
used = capacityInfo.CloudCapacityInfo.UsedSize
|
||||||
}
|
}
|
||||||
return &model.StorageDetails{
|
return &model.StorageDetails{
|
||||||
DiskUsage: model.DiskUsage{
|
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||||
TotalSpace: total,
|
|
||||||
FreeSpace: free,
|
|
||||||
},
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -322,13 +322,13 @@ type CapacityResp struct {
|
|||||||
ResMessage string `json:"res_message"`
|
ResMessage string `json:"res_message"`
|
||||||
Account string `json:"account"`
|
Account string `json:"account"`
|
||||||
CloudCapacityInfo struct {
|
CloudCapacityInfo struct {
|
||||||
FreeSize uint64 `json:"freeSize"`
|
FreeSize int64 `json:"freeSize"`
|
||||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||||
TotalSize uint64 `json:"totalSize"`
|
TotalSize uint64 `json:"totalSize"`
|
||||||
UsedSize uint64 `json:"usedSize"`
|
UsedSize uint64 `json:"usedSize"`
|
||||||
} `json:"cloudCapacityInfo"`
|
} `json:"cloudCapacityInfo"`
|
||||||
FamilyCapacityInfo struct {
|
FamilyCapacityInfo struct {
|
||||||
FreeSize uint64 `json:"freeSize"`
|
FreeSize int64 `json:"freeSize"`
|
||||||
TotalSize uint64 `json:"totalSize"`
|
TotalSize uint64 `json:"totalSize"`
|
||||||
UsedSize uint64 `json:"usedSize"`
|
UsedSize uint64 `json:"usedSize"`
|
||||||
} `json:"familyCapacityInfo"`
|
} `json:"familyCapacityInfo"`
|
||||||
|
|||||||
@@ -416,18 +416,15 @@ func (y *Cloud189PC) GetDetails(ctx context.Context) (*model.StorageDetails, err
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var total, free uint64
|
var total, used uint64
|
||||||
if y.isFamily() {
|
if y.isFamily() {
|
||||||
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
||||||
free = capacityInfo.FamilyCapacityInfo.FreeSize
|
used = capacityInfo.FamilyCapacityInfo.UsedSize
|
||||||
} else {
|
} else {
|
||||||
total = capacityInfo.CloudCapacityInfo.TotalSize
|
total = capacityInfo.CloudCapacityInfo.TotalSize
|
||||||
free = capacityInfo.CloudCapacityInfo.FreeSize
|
used = capacityInfo.CloudCapacityInfo.UsedSize
|
||||||
}
|
}
|
||||||
return &model.StorageDetails{
|
return &model.StorageDetails{
|
||||||
DiskUsage: model.DiskUsage{
|
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||||
TotalSpace: total,
|
|
||||||
FreeSpace: free,
|
|
||||||
},
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -415,13 +415,13 @@ type CapacityResp struct {
|
|||||||
ResMessage string `json:"res_message"`
|
ResMessage string `json:"res_message"`
|
||||||
Account string `json:"account"`
|
Account string `json:"account"`
|
||||||
CloudCapacityInfo struct {
|
CloudCapacityInfo struct {
|
||||||
FreeSize uint64 `json:"freeSize"`
|
FreeSize int64 `json:"freeSize"`
|
||||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||||
TotalSize uint64 `json:"totalSize"`
|
TotalSize uint64 `json:"totalSize"`
|
||||||
UsedSize uint64 `json:"usedSize"`
|
UsedSize uint64 `json:"usedSize"`
|
||||||
} `json:"cloudCapacityInfo"`
|
} `json:"cloudCapacityInfo"`
|
||||||
FamilyCapacityInfo struct {
|
FamilyCapacityInfo struct {
|
||||||
FreeSize uint64 `json:"freeSize"`
|
FreeSize int64 `json:"freeSize"`
|
||||||
TotalSize uint64 `json:"totalSize"`
|
TotalSize uint64 `json:"totalSize"`
|
||||||
UsedSize uint64 `json:"usedSize"`
|
UsedSize uint64 `json:"usedSize"`
|
||||||
} `json:"familyCapacityInfo"`
|
} `json:"familyCapacityInfo"`
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
@@ -17,9 +16,15 @@ import (
|
|||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type detailWithIndex struct {
|
||||||
|
idx int
|
||||||
|
val *model.StorageDetails
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model.Obj {
|
func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model.Obj {
|
||||||
var objs []model.Obj
|
var objs []model.Obj
|
||||||
var wg sync.WaitGroup
|
detailsChan := make(chan detailWithIndex, len(d.pathMap))
|
||||||
|
workerCount := 0
|
||||||
for _, k := range d.rootOrder {
|
for _, k := range d.rootOrder {
|
||||||
obj := model.Object{
|
obj := model.Object{
|
||||||
Name: k,
|
Name: k,
|
||||||
@@ -47,22 +52,26 @@ func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model
|
|||||||
DriverName: remoteDriver.Config().Name,
|
DriverName: remoteDriver.Config().Name,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
wg.Add(1)
|
workerCount++
|
||||||
go func() {
|
go func(dri driver.Driver, i int) {
|
||||||
defer wg.Done()
|
details, e := op.GetStorageDetails(ctx, dri, refresh)
|
||||||
c, cancel := context.WithTimeout(ctx, time.Second)
|
|
||||||
defer cancel()
|
|
||||||
details, e := op.GetStorageDetails(c, remoteDriver, refresh)
|
|
||||||
if e != nil {
|
if e != nil {
|
||||||
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
|
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
|
||||||
log.Errorf("failed get %s storage details: %+v", remoteDriver.GetStorage().MountPath, e)
|
log.Errorf("failed get %s storage details: %+v", dri.GetStorage().MountPath, e)
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
objs[idx].(*model.ObjStorageDetails).StorageDetails = details
|
detailsChan <- detailWithIndex{idx: i, val: details}
|
||||||
}()
|
}(remoteDriver, idx)
|
||||||
|
}
|
||||||
|
for workerCount > 0 {
|
||||||
|
select {
|
||||||
|
case r := <-detailsChan:
|
||||||
|
objs[r.idx].(*model.ObjStorageDetails).StorageDetails = r.val
|
||||||
|
workerCount--
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
workerCount = 0
|
||||||
|
}
|
||||||
}
|
}
|
||||||
wg.Wait()
|
|
||||||
return objs
|
return objs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,11 +5,15 @@ import (
|
|||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
@@ -18,8 +22,10 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
"github.com/avast/retry-go"
|
"github.com/avast/retry-go"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -29,8 +35,20 @@ type BaiduNetdisk struct {
|
|||||||
|
|
||||||
uploadThread int
|
uploadThread int
|
||||||
vipType int // 会员类型,0普通用户(4G/4M)、1普通会员(10G/16M)、2超级会员(20G/32M)
|
vipType int // 会员类型,0普通用户(4G/4M)、1普通会员(10G/16M)、2超级会员(20G/32M)
|
||||||
|
|
||||||
|
upClient *resty.Client // 上传文件使用的http客户端
|
||||||
|
uploadUrlG singleflight.Group[string]
|
||||||
|
uploadUrlMu sync.RWMutex
|
||||||
|
uploadUrlCache map[string]uploadURLCacheEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type uploadURLCacheEntry struct {
|
||||||
|
url string
|
||||||
|
updateTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrUploadIDExpired = errors.New("uploadid expired")
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Config() driver.Config {
|
func (d *BaiduNetdisk) Config() driver.Config {
|
||||||
return config
|
return config
|
||||||
}
|
}
|
||||||
@@ -40,19 +58,32 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Init(ctx context.Context) error {
|
func (d *BaiduNetdisk) Init(ctx context.Context) error {
|
||||||
|
timeout := DEFAULT_UPLOAD_SLICE_TIMEOUT
|
||||||
|
if d.UploadSliceTimeout > 0 {
|
||||||
|
timeout = time.Second * time.Duration(d.UploadSliceTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.upClient = base.NewRestyClient().
|
||||||
|
SetTimeout(timeout).
|
||||||
|
SetRetryCount(UPLOAD_RETRY_COUNT).
|
||||||
|
SetRetryWaitTime(UPLOAD_RETRY_WAIT_TIME).
|
||||||
|
SetRetryMaxWaitTime(UPLOAD_RETRY_MAX_WAIT_TIME)
|
||||||
|
d.uploadUrlCache = make(map[string]uploadURLCacheEntry)
|
||||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
if d.uploadThread < 1 {
|
||||||
d.uploadThread, d.UploadThread = 3, "3"
|
d.uploadThread, d.UploadThread = 1, "1"
|
||||||
|
} else if d.uploadThread > 32 {
|
||||||
|
d.uploadThread, d.UploadThread = 32, "32"
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
|
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
|
||||||
d.UploadAPI = "https://d.pcs.baidu.com"
|
d.UploadAPI = UPLOAD_FALLBACK_API
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := d.get("/xpan/nas", map[string]string{
|
res, err := d.get("/xpan/nas", map[string]string{
|
||||||
"method": "uinfo",
|
"method": "uinfo",
|
||||||
}, nil)
|
}, nil)
|
||||||
log.Debugf("[baidu] get uinfo: %s", string(res))
|
log.Debugf("[baidu_netdisk] get uinfo: %s", string(res))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -179,6 +210,11 @@ func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream mo
|
|||||||
// **注意**: 截至 2024/04/20 百度云盘 api 接口返回的时间永远是当前时间,而不是文件时间。
|
// **注意**: 截至 2024/04/20 百度云盘 api 接口返回的时间永远是当前时间,而不是文件时间。
|
||||||
// 而实际上云盘存储的时间是文件时间,所以此处需要覆盖时间,保证缓存与云盘的数据一致
|
// 而实际上云盘存储的时间是文件时间,所以此处需要覆盖时间,保证缓存与云盘的数据一致
|
||||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
// 百度网盘不允许上传空文件
|
||||||
|
if stream.GetSize() < 1 {
|
||||||
|
return nil, ErrBaiduEmptyFilesNotAllowed
|
||||||
|
}
|
||||||
|
|
||||||
// rapid upload
|
// rapid upload
|
||||||
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
|
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
|
||||||
return newObj, nil
|
return newObj, nil
|
||||||
@@ -214,7 +250,6 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
|
|
||||||
// cal md5 for first 256k data
|
// cal md5 for first 256k data
|
||||||
const SliceSize int64 = 256 * utils.KB
|
const SliceSize int64 = 256 * utils.KB
|
||||||
// cal md5
|
|
||||||
blockList := make([]string, 0, count)
|
blockList := make([]string, 0, count)
|
||||||
byteSize := sliceSize
|
byteSize := sliceSize
|
||||||
fileMd5H := md5.New()
|
fileMd5H := md5.New()
|
||||||
@@ -244,7 +279,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
}
|
}
|
||||||
if tmpF != nil {
|
if tmpF != nil {
|
||||||
if written != streamSize {
|
if written != streamSize {
|
||||||
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
|
return nil, errs.NewErr(err, "CreateTempFile failed, size mismatch: %d != %d ", written, streamSize)
|
||||||
}
|
}
|
||||||
_, err = tmpF.Seek(0, io.SeekStart)
|
_, err = tmpF.Seek(0, io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -258,31 +293,14 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
mtime := stream.ModTime().Unix()
|
mtime := stream.ModTime().Unix()
|
||||||
ctime := stream.CreateTime().Unix()
|
ctime := stream.CreateTime().Unix()
|
||||||
|
|
||||||
// step.1 预上传
|
// step.1 尝试读取已保存进度
|
||||||
// 尝试获取之前的进度
|
|
||||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||||
if !ok {
|
if !ok {
|
||||||
params := map[string]string{
|
// 没有进度,走预上传
|
||||||
"method": "precreate",
|
precreateResp, err = d.precreate(ctx, path, streamSize, blockListStr, contentMd5, sliceMd5, ctime, mtime)
|
||||||
}
|
|
||||||
form := map[string]string{
|
|
||||||
"path": path,
|
|
||||||
"size": strconv.FormatInt(streamSize, 10),
|
|
||||||
"isdir": "0",
|
|
||||||
"autoinit": "1",
|
|
||||||
"rtype": "3",
|
|
||||||
"block_list": blockListStr,
|
|
||||||
"content-md5": contentMd5,
|
|
||||||
"slice-md5": sliceMd5,
|
|
||||||
}
|
|
||||||
joinTime(form, ctime, mtime)
|
|
||||||
|
|
||||||
log.Debugf("[baidu_netdisk] precreate data: %s", form)
|
|
||||||
_, err = d.postForm("/xpan/file", params, form, &precreateResp)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Debugf("%+v", precreateResp)
|
|
||||||
if precreateResp.ReturnType == 2 {
|
if precreateResp.ReturnType == 2 {
|
||||||
// rapid upload, since got md5 match from baidu server
|
// rapid upload, since got md5 match from baidu server
|
||||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
||||||
@@ -291,20 +309,43 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
return fileToObj(precreateResp.File), nil
|
return fileToObj(precreateResp.File), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ensureUploadURL := func() {
|
||||||
|
if precreateResp.UploadURL != "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
precreateResp.UploadURL = d.getUploadUrl(path, precreateResp.Uploadid)
|
||||||
|
}
|
||||||
|
ensureUploadURL()
|
||||||
|
|
||||||
// step.2 上传分片
|
// step.2 上传分片
|
||||||
|
uploadLoop:
|
||||||
|
for attempt := 0; attempt < 2; attempt++ {
|
||||||
|
// 获取上传域名
|
||||||
|
if precreateResp.UploadURL == "" {
|
||||||
|
ensureUploadURL()
|
||||||
|
}
|
||||||
|
uploadUrl := precreateResp.UploadURL
|
||||||
|
// 并发上传
|
||||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||||
retry.Attempts(1),
|
retry.Attempts(1),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
|
||||||
for i, partseq := range precreateResp.BlockList {
|
cacheReaderAt, okReaderAt := cache.(io.ReaderAt)
|
||||||
if utils.IsCanceled(upCtx) {
|
if !okReaderAt {
|
||||||
break
|
return nil, fmt.Errorf("cache object must implement io.ReaderAt interface for upload operations")
|
||||||
}
|
}
|
||||||
|
|
||||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
|
totalParts := len(precreateResp.BlockList)
|
||||||
|
|
||||||
|
for i, partseq := range precreateResp.BlockList {
|
||||||
|
if utils.IsCanceled(upCtx) || partseq < 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i, partseq := i, partseq
|
||||||
|
offset, size := int64(partseq)*sliceSize, sliceSize
|
||||||
if partseq+1 == count {
|
if partseq+1 == count {
|
||||||
byteSize = lastBlockSize
|
size = lastBlockSize
|
||||||
}
|
}
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
@@ -315,21 +356,49 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
"uploadid": precreateResp.Uploadid,
|
"uploadid": precreateResp.Uploadid,
|
||||||
"partseq": strconv.Itoa(partseq),
|
"partseq": strconv.Itoa(partseq),
|
||||||
}
|
}
|
||||||
err := d.uploadSlice(ctx, params, stream.GetName(),
|
section := io.NewSectionReader(cacheReaderAt, offset, size)
|
||||||
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)))
|
err := d.uploadSlice(ctx, uploadUrl, params, stream.GetName(), driver.NewLimitedUploadStream(ctx, section))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
|
|
||||||
precreateResp.BlockList[i] = -1
|
precreateResp.BlockList[i] = -1
|
||||||
|
// 当前goroutine还没退出,+1才是真正成功的数量
|
||||||
|
success := threadG.Success() + 1
|
||||||
|
progress := float64(success) * 100 / float64(totalParts)
|
||||||
|
up(progress)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err = threadG.Wait(); err != nil {
|
|
||||||
// 如果属于用户主动取消,则保存上传进度
|
err = threadG.Wait()
|
||||||
if errors.Is(err, context.Canceled) {
|
if err == nil {
|
||||||
|
break uploadLoop
|
||||||
|
}
|
||||||
|
|
||||||
|
// 保存进度(所有错误都会保存)
|
||||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||||
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||||
|
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if errors.Is(err, ErrUploadIDExpired) {
|
||||||
|
log.Warn("[baidu_netdisk] uploadid expired, will restart from scratch")
|
||||||
|
d.clearUploadUrlCache(precreateResp.Uploadid)
|
||||||
|
// 重新 precreate(所有分片都要重传)
|
||||||
|
newPre, err2 := d.precreate(ctx, path, streamSize, blockListStr, "", "", ctime, mtime)
|
||||||
|
if err2 != nil {
|
||||||
|
return nil, err2
|
||||||
|
}
|
||||||
|
if newPre.ReturnType == 2 {
|
||||||
|
return fileToObj(newPre.File), nil
|
||||||
|
}
|
||||||
|
precreateResp = newPre
|
||||||
|
precreateResp.UploadURL = ""
|
||||||
|
ensureUploadURL()
|
||||||
|
// 覆盖掉旧的进度
|
||||||
|
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||||
|
continue uploadLoop
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -343,23 +412,72 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
||||||
newFile.Ctime = ctime
|
newFile.Ctime = ctime
|
||||||
newFile.Mtime = mtime
|
newFile.Mtime = mtime
|
||||||
|
// 上传成功清理进度
|
||||||
|
base.SaveUploadProgress(d, nil, d.AccessToken, contentMd5)
|
||||||
|
d.clearUploadUrlCache(precreateResp.Uploadid)
|
||||||
return fileToObj(newFile), nil
|
return fileToObj(newFile), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
|
// precreate 执行预上传操作,支持首次上传和 uploadid 过期重试
|
||||||
res, err := base.RestyClient.R().
|
func (d *BaiduNetdisk) precreate(ctx context.Context, path string, streamSize int64, blockListStr, contentMd5, sliceMd5 string, ctime, mtime int64) (*PrecreateResp, error) {
|
||||||
|
params := map[string]string{"method": "precreate"}
|
||||||
|
form := map[string]string{
|
||||||
|
"path": path,
|
||||||
|
"size": strconv.FormatInt(streamSize, 10),
|
||||||
|
"isdir": "0",
|
||||||
|
"autoinit": "1",
|
||||||
|
"rtype": "3",
|
||||||
|
"block_list": blockListStr,
|
||||||
|
}
|
||||||
|
|
||||||
|
// 只有在首次上传时才包含 content-md5 和 slice-md5
|
||||||
|
if contentMd5 != "" && sliceMd5 != "" {
|
||||||
|
form["content-md5"] = contentMd5
|
||||||
|
form["slice-md5"] = sliceMd5
|
||||||
|
}
|
||||||
|
|
||||||
|
joinTime(form, ctime, mtime)
|
||||||
|
|
||||||
|
var precreateResp PrecreateResp
|
||||||
|
_, err := d.postForm("/xpan/file", params, form, &precreateResp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
||||||
|
if precreateResp.ReturnType == 2 {
|
||||||
|
precreateResp.File.Ctime = ctime
|
||||||
|
precreateResp.File.Mtime = mtime
|
||||||
|
}
|
||||||
|
|
||||||
|
return &precreateResp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, uploadUrl string, params map[string]string, fileName string, file io.Reader) error {
|
||||||
|
res, err := d.upClient.R().
|
||||||
SetContext(ctx).
|
SetContext(ctx).
|
||||||
SetQueryParams(params).
|
SetQueryParams(params).
|
||||||
SetFileReader("file", fileName, file).
|
SetFileReader("file", fileName, file).
|
||||||
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
|
Post(uploadUrl + "/rest/2.0/pcs/superfile2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Debugln(res.RawResponse.Status + res.String())
|
log.Debugln(res.RawResponse.Status + res.String())
|
||||||
|
if res.StatusCode() != http.StatusOK {
|
||||||
|
return errs.NewErr(errs.StreamIncomplete, "baidu upload failed, status=%d, body=%s", res.StatusCode(), res.String())
|
||||||
|
}
|
||||||
errCode := utils.Json.Get(res.Body(), "error_code").ToInt()
|
errCode := utils.Json.Get(res.Body(), "error_code").ToInt()
|
||||||
errNo := utils.Json.Get(res.Body(), "errno").ToInt()
|
errNo := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||||
|
respStr := res.String()
|
||||||
|
lower := strings.ToLower(respStr)
|
||||||
|
// 合并 uploadid 过期检测逻辑
|
||||||
|
if strings.Contains(lower, "uploadid") &&
|
||||||
|
(strings.Contains(lower, "invalid") || strings.Contains(lower, "expired") || strings.Contains(lower, "not found")) {
|
||||||
|
return ErrUploadIDExpired
|
||||||
|
}
|
||||||
|
|
||||||
if errCode != 0 || errNo != 0 {
|
if errCode != 0 || errNo != 0 {
|
||||||
return errs.NewErr(errs.StreamIncomplete, "error in uploading to baidu, will retry. response=%s", res.String())
|
return errs.NewErr(errs.StreamIncomplete, "error uploading to baidu, response=%s", res.String())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package baidu_netdisk
|
|||||||
import (
|
import (
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
@@ -18,12 +19,23 @@ type Addition struct {
|
|||||||
AccessToken string
|
AccessToken string
|
||||||
RefreshToken string `json:"refresh_token" required:"true"`
|
RefreshToken string `json:"refresh_token" required:"true"`
|
||||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||||
|
UploadSliceTimeout int `json:"upload_timeout" type:"number" default:"60" help:"per-slice upload timeout in seconds"`
|
||||||
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||||
|
UseDynamicUploadAPI bool `json:"use_dynamic_upload_api" default:"true" help:"dynamically get upload api domain, when enabled, the 'Upload API' setting will be used as a fallback if failed to get"`
|
||||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||||
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
|
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
|
||||||
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
|
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
UPLOAD_FALLBACK_API = "https://d.pcs.baidu.com" // 备用上传地址
|
||||||
|
UPLOAD_URL_EXPIRE_TIME = time.Minute * 60 // 上传地址有效期(分钟)
|
||||||
|
DEFAULT_UPLOAD_SLICE_TIMEOUT = time.Second * 60 // 上传分片请求默认超时时间
|
||||||
|
UPLOAD_RETRY_COUNT = 3
|
||||||
|
UPLOAD_RETRY_WAIT_TIME = time.Second * 1
|
||||||
|
UPLOAD_RETRY_MAX_WAIT_TIME = time.Second * 5
|
||||||
|
)
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "BaiduNetdisk",
|
Name: "BaiduNetdisk",
|
||||||
DefaultRoot: "/",
|
DefaultRoot: "/",
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package baidu_netdisk
|
package baidu_netdisk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
@@ -9,6 +10,10 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrBaiduEmptyFilesNotAllowed = errors.New("empty files are not allowed by baidu netdisk")
|
||||||
|
)
|
||||||
|
|
||||||
type TokenErrResp struct {
|
type TokenErrResp struct {
|
||||||
ErrorDescription string `json:"error_description"`
|
ErrorDescription string `json:"error_description"`
|
||||||
Error string `json:"error"`
|
Error string `json:"error"`
|
||||||
@@ -188,6 +193,32 @@ type PrecreateResp struct {
|
|||||||
|
|
||||||
// return_type=2
|
// return_type=2
|
||||||
File File `json:"info"`
|
File File `json:"info"`
|
||||||
|
|
||||||
|
UploadURL string `json:"-"` // 保存断点续传对应的上传域名
|
||||||
|
}
|
||||||
|
|
||||||
|
type UploadServerResp struct {
|
||||||
|
BakServer []any `json:"bak_server"`
|
||||||
|
BakServers []struct {
|
||||||
|
Server string `json:"server"`
|
||||||
|
} `json:"bak_servers"`
|
||||||
|
ClientIP string `json:"client_ip"`
|
||||||
|
ErrorCode int `json:"error_code"`
|
||||||
|
ErrorMsg string `json:"error_msg"`
|
||||||
|
Expire int `json:"expire"`
|
||||||
|
Host string `json:"host"`
|
||||||
|
Newno string `json:"newno"`
|
||||||
|
QuicServer []any `json:"quic_server"`
|
||||||
|
QuicServers []struct {
|
||||||
|
Server string `json:"server"`
|
||||||
|
} `json:"quic_servers"`
|
||||||
|
RequestID int64 `json:"request_id"`
|
||||||
|
Server []any `json:"server"`
|
||||||
|
ServerTime int `json:"server_time"`
|
||||||
|
Servers []struct {
|
||||||
|
Server string `json:"server"`
|
||||||
|
} `json:"servers"`
|
||||||
|
Sl int `json:"sl"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type QuotaResp struct {
|
type QuotaResp struct {
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
|
|||||||
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||||
if errno != 0 {
|
if errno != 0 {
|
||||||
if utils.SliceContains([]int{111, -6}, errno) {
|
if utils.SliceContains([]int{111, -6}, errno) {
|
||||||
log.Info("refreshing baidu_netdisk token.")
|
log.Info("[baidu_netdisk] refreshing baidu_netdisk token.")
|
||||||
err2 := d.refreshToken()
|
err2 := d.refreshToken()
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
return retry.Unrecoverable(err2)
|
return retry.Unrecoverable(err2)
|
||||||
@@ -326,10 +326,10 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
|||||||
// 非会员固定为 4MB
|
// 非会员固定为 4MB
|
||||||
if d.vipType == 0 {
|
if d.vipType == 0 {
|
||||||
if d.CustomUploadPartSize != 0 {
|
if d.CustomUploadPartSize != 0 {
|
||||||
log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
|
log.Warnf("[baidu_netdisk] CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
|
||||||
}
|
}
|
||||||
if filesize > MaxSliceNum*DefaultSliceSize {
|
if filesize > MaxSliceNum*DefaultSliceSize {
|
||||||
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
|
log.Warnf("[baidu_netdisk] File size(%d) is too large, may cause upload failure", filesize)
|
||||||
}
|
}
|
||||||
|
|
||||||
return DefaultSliceSize
|
return DefaultSliceSize
|
||||||
@@ -337,17 +337,17 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
|||||||
|
|
||||||
if d.CustomUploadPartSize != 0 {
|
if d.CustomUploadPartSize != 0 {
|
||||||
if d.CustomUploadPartSize < DefaultSliceSize {
|
if d.CustomUploadPartSize < DefaultSliceSize {
|
||||||
log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
|
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
|
||||||
return DefaultSliceSize
|
return DefaultSliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize {
|
if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize {
|
||||||
log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
|
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
|
||||||
return VipSliceSize
|
return VipSliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize {
|
if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize {
|
||||||
log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
|
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
|
||||||
return SVipSliceSize
|
return SVipSliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -377,7 +377,7 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if filesize > MaxSliceNum*maxSliceSize {
|
if filesize > MaxSliceNum*maxSliceSize {
|
||||||
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
|
log.Warnf("[baidu_netdisk] File size(%d) is too large, may cause upload failure", filesize)
|
||||||
}
|
}
|
||||||
|
|
||||||
return maxSliceSize
|
return maxSliceSize
|
||||||
@@ -394,6 +394,97 @@ func (d *BaiduNetdisk) quota(ctx context.Context) (model.DiskUsage, error) {
|
|||||||
return driver.DiskUsageFromUsedAndTotal(resp.Used, resp.Total), nil
|
return driver.DiskUsageFromUsedAndTotal(resp.Used, resp.Total), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getUploadUrl 从开放平台获取上传域名/地址,并发请求会被合并,结果会在 uploadid 生命周期内复用。
|
||||||
|
// 如果获取失败,则返回 Upload API设置项。
|
||||||
|
func (d *BaiduNetdisk) getUploadUrl(path, uploadId string) string {
|
||||||
|
if !d.UseDynamicUploadAPI || uploadId == "" {
|
||||||
|
return d.UploadAPI
|
||||||
|
}
|
||||||
|
getCachedUrlFunc := func() (string, bool) {
|
||||||
|
d.uploadUrlMu.RLock()
|
||||||
|
defer d.uploadUrlMu.RUnlock()
|
||||||
|
if entry, ok := d.uploadUrlCache[uploadId]; ok {
|
||||||
|
return entry.url, true
|
||||||
|
}
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
// 检查地址缓存
|
||||||
|
if uploadUrl, ok := getCachedUrlFunc(); ok {
|
||||||
|
return uploadUrl
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadUrlGetFunc := func() (string, error) {
|
||||||
|
// 双重检查缓存
|
||||||
|
if uploadUrl, ok := getCachedUrlFunc(); ok {
|
||||||
|
return uploadUrl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadUrl, err := d.requestForUploadUrl(path, uploadId)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.uploadUrlMu.Lock()
|
||||||
|
d.uploadUrlCache[uploadId] = uploadURLCacheEntry{
|
||||||
|
url: uploadUrl,
|
||||||
|
updateTime: time.Now(),
|
||||||
|
}
|
||||||
|
d.uploadUrlMu.Unlock()
|
||||||
|
return uploadUrl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadUrl, err, _ := d.uploadUrlG.Do(uploadId, uploadUrlGetFunc)
|
||||||
|
if err != nil {
|
||||||
|
fallback := d.UploadAPI
|
||||||
|
log.Warnf("[baidu_netdisk] get upload URL failed (%v), will use fallback URL: %s", err, fallback)
|
||||||
|
return fallback
|
||||||
|
}
|
||||||
|
return uploadUrl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *BaiduNetdisk) clearUploadUrlCache(uploadId string) {
|
||||||
|
if uploadId == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.uploadUrlMu.Lock()
|
||||||
|
if _, ok := d.uploadUrlCache[uploadId]; ok {
|
||||||
|
delete(d.uploadUrlCache, uploadId)
|
||||||
|
}
|
||||||
|
d.uploadUrlMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// requestForUploadUrl 请求获取上传地址。
|
||||||
|
// 实测此接口不需要认证,传method和upload_version就行,不过还是按文档规范调用。
|
||||||
|
// https://pan.baidu.com/union/doc/Mlvw5hfnr
|
||||||
|
func (d *BaiduNetdisk) requestForUploadUrl(path, uploadId string) (string, error) {
|
||||||
|
params := map[string]string{
|
||||||
|
"method": "locateupload",
|
||||||
|
"appid": "250528",
|
||||||
|
"path": path,
|
||||||
|
"uploadid": uploadId,
|
||||||
|
"upload_version": "2.0",
|
||||||
|
}
|
||||||
|
apiUrl := "https://d.pcs.baidu.com/rest/2.0/pcs/file"
|
||||||
|
var resp UploadServerResp
|
||||||
|
_, err := d.request(apiUrl, http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(params)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
// 应该是https开头的一个地址
|
||||||
|
var uploadUrl string
|
||||||
|
if len(resp.Servers) > 0 {
|
||||||
|
uploadUrl = resp.Servers[0].Server
|
||||||
|
} else if len(resp.BakServers) > 0 {
|
||||||
|
uploadUrl = resp.BakServers[0].Server
|
||||||
|
}
|
||||||
|
if uploadUrl == "" {
|
||||||
|
return "", errors.New("upload URL is empty")
|
||||||
|
}
|
||||||
|
return uploadUrl, nil
|
||||||
|
}
|
||||||
|
|
||||||
// func encodeURIComponent(str string) string {
|
// func encodeURIComponent(str string) string {
|
||||||
// r := url.QueryEscape(str)
|
// r := url.QueryEscape(str)
|
||||||
// r = strings.ReplaceAll(r, "+", "%20")
|
// r = strings.ReplaceAll(r, "+", "%20")
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Doubao struct {
|
type Doubao struct {
|
||||||
@@ -23,6 +24,7 @@ type Doubao struct {
|
|||||||
*UploadToken
|
*UploadToken
|
||||||
UserId string
|
UserId string
|
||||||
uploadThread int
|
uploadThread int
|
||||||
|
limiter *rate.Limiter
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Doubao) Config() driver.Config {
|
func (d *Doubao) Config() driver.Config {
|
||||||
@@ -61,6 +63,17 @@ func (d *Doubao) Init(ctx context.Context) error {
|
|||||||
d.UploadToken = uploadToken
|
d.UploadToken = uploadToken
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.LimitRate > 0 {
|
||||||
|
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) WaitLimit(ctx context.Context) error {
|
||||||
|
if d.limiter != nil {
|
||||||
|
return d.limiter.Wait(ctx)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,6 +82,10 @@ func (d *Doubao) Drop(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
var files []model.Obj
|
var files []model.Obj
|
||||||
fileList, err := d.getFiles(dir.GetID(), "")
|
fileList, err := d.getFiles(dir.GetID(), "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -95,6 +112,10 @@ func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
var downloadUrl string
|
var downloadUrl string
|
||||||
|
|
||||||
if u, ok := file.(*Object); ok {
|
if u, ok := file.(*Object); ok {
|
||||||
@@ -160,6 +181,10 @@ func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var r UploadNodeResp
|
var r UploadNodeResp
|
||||||
_, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
@@ -177,6 +202,10 @@ func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var r UploadNodeResp
|
var r UploadNodeResp
|
||||||
_, err := d.request("/samantha/aispace/move_node", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/samantha/aispace/move_node", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
@@ -191,6 +220,10 @@ func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var r BaseResp
|
var r BaseResp
|
||||||
_, err := d.request("/samantha/aispace/rename_node", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/samantha/aispace/rename_node", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
@@ -207,6 +240,10 @@ func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var r BaseResp
|
var r BaseResp
|
||||||
_, err := d.request("/samantha/aispace/delete_node", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/samantha/aispace/delete_node", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
|
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
|
||||||
@@ -215,6 +252,10 @@ func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// 根据MIME类型确定数据类型
|
// 根据MIME类型确定数据类型
|
||||||
mimetype := file.GetMimetype()
|
mimetype := file.GetMimetype()
|
||||||
dataType := FileDataType
|
dataType := FileDataType
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ type Addition struct {
|
|||||||
Cookie string `json:"cookie" type:"text"`
|
Cookie string `json:"cookie" type:"text"`
|
||||||
UploadThread string `json:"upload_thread" default:"3"`
|
UploadThread string `json:"upload_thread" default:"3"`
|
||||||
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
|
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
|
||||||
|
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
@@ -23,6 +24,10 @@ var config = driver.Config{
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
op.RegisterDriver(func() driver.Driver {
|
op.RegisterDriver(func() driver.Driver {
|
||||||
return &Doubao{}
|
return &Doubao{
|
||||||
|
Addition: Addition{
|
||||||
|
LimitRate: 2,
|
||||||
|
},
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -113,9 +113,7 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
RangeReader: &model.FileRangeReader{
|
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
|
||||||
RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
|
|
||||||
},
|
|
||||||
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
|
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,6 +51,9 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
|
|||||||
if d.Addition.ShowReadme {
|
if d.Addition.ShowReadme {
|
||||||
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
|
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
|
||||||
}
|
}
|
||||||
|
if d.Addition.ShowSourceCode{
|
||||||
|
files = append(files, point.GetSourceCode()...)
|
||||||
|
}
|
||||||
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
|
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
|
||||||
nextDir := GetNextDir(point.Point, path)
|
nextDir := GetNextDir(point.Point, path)
|
||||||
if nextDir == "" {
|
if nextDir == "" {
|
||||||
@@ -117,6 +120,10 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
|
|||||||
}
|
}
|
||||||
|
|
||||||
files = append(files, point.GetReleaseByTagName(tagName)...)
|
files = append(files, point.GetReleaseByTagName(tagName)...)
|
||||||
|
|
||||||
|
if d.Addition.ShowSourceCode{
|
||||||
|
files = append(files, point.GetSourceCodeByTagName(tagName)...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ type Addition struct {
|
|||||||
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"OpenListTeam/OpenList" help:"structure:[path:]org/repo"`
|
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"OpenListTeam/OpenList" help:"structure:[path:]org/repo"`
|
||||||
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
|
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
|
||||||
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
|
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
|
||||||
|
ShowSourceCode bool `json:"show_source_code" type:"bool" default:"false" help:"show Source code (zip/tar.gz)"`
|
||||||
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
|
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
|
||||||
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
|
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -143,6 +143,60 @@ func (m *MountPoint) GetAllVersionSize() int64 {
|
|||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *MountPoint) GetSourceCode() []File {
|
||||||
|
files := make([]File, 0)
|
||||||
|
|
||||||
|
// 无法获取文件大小,此处设为 1
|
||||||
|
files = append(files, File{
|
||||||
|
Path: m.Point + "/" + "Source code (zip)",
|
||||||
|
FileName: "Source code (zip)",
|
||||||
|
Size: 1,
|
||||||
|
Type: "file",
|
||||||
|
UpdateAt: m.Release.CreatedAt,
|
||||||
|
CreateAt: m.Release.CreatedAt,
|
||||||
|
Url: m.Release.ZipballUrl,
|
||||||
|
})
|
||||||
|
files = append(files, File{
|
||||||
|
Path: m.Point + "/" + "Source code (tar.gz)",
|
||||||
|
FileName: "Source code (tar.gz)",
|
||||||
|
Size: 1,
|
||||||
|
Type: "file",
|
||||||
|
UpdateAt: m.Release.CreatedAt,
|
||||||
|
CreateAt: m.Release.CreatedAt,
|
||||||
|
Url: m.Release.TarballUrl,
|
||||||
|
})
|
||||||
|
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MountPoint) GetSourceCodeByTagName(tagName string) []File {
|
||||||
|
for _, item := range *m.Releases {
|
||||||
|
if item.TagName == tagName {
|
||||||
|
files := make([]File, 0)
|
||||||
|
files = append(files, File{
|
||||||
|
Path: m.Point + "/" + "Source code (zip)",
|
||||||
|
FileName: "Source code (zip)",
|
||||||
|
Size: 1,
|
||||||
|
Type: "file",
|
||||||
|
UpdateAt: item.CreatedAt,
|
||||||
|
CreateAt: item.CreatedAt,
|
||||||
|
Url: item.ZipballUrl,
|
||||||
|
})
|
||||||
|
files = append(files, File{
|
||||||
|
Path: m.Point + "/" + "Source code (tar.gz)",
|
||||||
|
FileName: "Source code (tar.gz)",
|
||||||
|
Size: 1,
|
||||||
|
Type: "file",
|
||||||
|
UpdateAt: item.CreatedAt,
|
||||||
|
CreateAt: item.CreatedAt,
|
||||||
|
Url: item.TarballUrl,
|
||||||
|
})
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *MountPoint) GetOtherFile(get func(url string) (*resty.Response, error), refresh bool) []File {
|
func (m *MountPoint) GetOtherFile(get func(url string) (*resty.Response, error), refresh bool) []File {
|
||||||
if m.OtherFile == nil || refresh {
|
if m.OtherFile == nil || refresh {
|
||||||
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/contents")
|
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/contents")
|
||||||
|
|||||||
16
drivers/local/copy_namedpipes.go
Normal file
16
drivers/local/copy_namedpipes.go
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
//go:build !windows && !plan9 && !netbsd && !aix && !illumos && !solaris && !js
|
||||||
|
|
||||||
|
package local
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func copyNamedPipe(dstPath string, mode os.FileMode, dirMode os.FileMode) error {
|
||||||
|
if err := os.MkdirAll(filepath.Dir(dstPath), dirMode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return syscall.Mkfifo(dstPath, uint32(mode))
|
||||||
|
}
|
||||||
9
drivers/local/copy_namedpipes_x.go
Normal file
9
drivers/local/copy_namedpipes_x.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build windows || plan9 || netbsd || aix || illumos || solaris || js
|
||||||
|
|
||||||
|
package local
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
func copyNamedPipe(_ string, _, _ os.FileMode) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -23,7 +23,6 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||||
"github.com/OpenListTeam/times"
|
"github.com/OpenListTeam/times"
|
||||||
cp "github.com/otiai10/copy"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
_ "golang.org/x/image/webp"
|
_ "golang.org/x/image/webp"
|
||||||
)
|
)
|
||||||
@@ -297,16 +296,9 @@ func (d *Local) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
return fmt.Errorf("the destination folder is a subfolder of the source folder")
|
return fmt.Errorf("the destination folder is a subfolder of the source folder")
|
||||||
}
|
}
|
||||||
err := os.Rename(srcPath, dstPath)
|
err := os.Rename(srcPath, dstPath)
|
||||||
if err != nil && strings.Contains(err.Error(), "invalid cross-device link") {
|
if isCrossDeviceError(err) {
|
||||||
// 跨设备移动,先复制再删除
|
// 跨设备移动,变更为移动任务
|
||||||
if err := d.Copy(ctx, srcObj, dstDir); err != nil {
|
return errs.NotImplement
|
||||||
return err
|
|
||||||
}
|
|
||||||
// 复制成功后直接删除源文件/文件夹
|
|
||||||
if srcObj.IsDir() {
|
|
||||||
return os.RemoveAll(srcObj.GetPath())
|
|
||||||
}
|
|
||||||
return os.Remove(srcObj.GetPath())
|
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
srcParent := filepath.Dir(srcPath)
|
srcParent := filepath.Dir(srcPath)
|
||||||
@@ -347,15 +339,14 @@ func (d *Local) Copy(_ context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
if utils.IsSubPath(srcPath, dstPath) {
|
if utils.IsSubPath(srcPath, dstPath) {
|
||||||
return fmt.Errorf("the destination folder is a subfolder of the source folder")
|
return fmt.Errorf("the destination folder is a subfolder of the source folder")
|
||||||
}
|
}
|
||||||
// Copy using otiai10/copy to perform more secure & efficient copy
|
info, err := os.Lstat(srcPath)
|
||||||
err := cp.Copy(srcPath, dstPath, cp.Options{
|
|
||||||
Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS
|
|
||||||
PreserveTimes: true,
|
|
||||||
PreserveOwner: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// 复制regular文件会返回errs.NotImplement, 转为复制任务
|
||||||
|
if err = d.tryCopy(srcPath, dstPath, info); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if d.directoryMap.Has(filepath.Dir(dstPath)) {
|
if d.directoryMap.Has(filepath.Dir(dstPath)) {
|
||||||
d.directoryMap.UpdateDirSize(filepath.Dir(dstPath))
|
d.directoryMap.UpdateDirSize(filepath.Dir(dstPath))
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package local
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
@@ -14,7 +15,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/KarpelesLab/reflink"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
"github.com/disintegration/imaging"
|
"github.com/disintegration/imaging"
|
||||||
@@ -148,7 +151,7 @@ func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if d.ThumbCacheFolder != "" {
|
if d.ThumbCacheFolder != "" {
|
||||||
err = os.WriteFile(filepath.Join(d.ThumbCacheFolder, thumbName), buf.Bytes(), 0666)
|
err = os.WriteFile(filepath.Join(d.ThumbCacheFolder, thumbName), buf.Bytes(), 0o666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -405,3 +408,79 @@ func (m *DirectoryMap) DeleteDirNode(dirname string) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Local) tryCopy(srcPath, dstPath string, info os.FileInfo) error {
|
||||||
|
if info.Mode()&os.ModeDevice != 0 {
|
||||||
|
return errors.New("cannot copy a device")
|
||||||
|
} else if info.Mode()&os.ModeSymlink != 0 {
|
||||||
|
return d.copySymlink(srcPath, dstPath)
|
||||||
|
} else if info.Mode()&os.ModeNamedPipe != 0 {
|
||||||
|
return copyNamedPipe(dstPath, info.Mode(), os.FileMode(d.mkdirPerm))
|
||||||
|
} else if info.IsDir() {
|
||||||
|
return d.recurAndTryCopy(srcPath, dstPath)
|
||||||
|
} else {
|
||||||
|
return tryReflinkCopy(srcPath, dstPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Local) copySymlink(srcPath, dstPath string) error {
|
||||||
|
linkOrig, err := os.Readlink(srcPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dstDir := filepath.Dir(dstPath)
|
||||||
|
if !filepath.IsAbs(linkOrig) {
|
||||||
|
srcDir := filepath.Dir(srcPath)
|
||||||
|
rel, err := filepath.Rel(dstDir, srcDir)
|
||||||
|
if err != nil {
|
||||||
|
rel, err = filepath.Abs(srcDir)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
linkOrig = filepath.Clean(filepath.Join(rel, linkOrig))
|
||||||
|
}
|
||||||
|
err = os.MkdirAll(dstDir, os.FileMode(d.mkdirPerm))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Symlink(linkOrig, dstPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Local) recurAndTryCopy(srcPath, dstPath string) error {
|
||||||
|
err := os.MkdirAll(dstPath, os.FileMode(d.mkdirPerm))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
files, err := readDir(srcPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, f := range files {
|
||||||
|
if !f.IsDir() {
|
||||||
|
sp := filepath.Join(srcPath, f.Name())
|
||||||
|
dp := filepath.Join(dstPath, f.Name())
|
||||||
|
if err = d.tryCopy(sp, dp, f); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range files {
|
||||||
|
if f.IsDir() {
|
||||||
|
sp := filepath.Join(srcPath, f.Name())
|
||||||
|
dp := filepath.Join(dstPath, f.Name())
|
||||||
|
if err = d.recurAndTryCopy(sp, dp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func tryReflinkCopy(srcPath, dstPath string) error {
|
||||||
|
err := reflink.Always(srcPath, dstPath)
|
||||||
|
if errors.Is(err, reflink.ErrReflinkUnsupported) || errors.Is(err, reflink.ErrReflinkFailed) || isCrossDeviceError(err) {
|
||||||
|
return errs.NotImplement
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,11 +3,13 @@
|
|||||||
package local
|
package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func isHidden(f fs.FileInfo, _ string) bool {
|
func isHidden(f fs.FileInfo, _ string) bool {
|
||||||
@@ -27,3 +29,7 @@ func getDiskUsage(path string) (model.DiskUsage, error) {
|
|||||||
FreeSpace: free,
|
FreeSpace: free,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isCrossDeviceError(err error) bool {
|
||||||
|
return errors.Is(err, unix.EXDEV)
|
||||||
|
}
|
||||||
|
|||||||
@@ -49,3 +49,7 @@ func getDiskUsage(path string) (model.DiskUsage, error) {
|
|||||||
FreeSpace: freeBytes,
|
FreeSpace: freeBytes,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isCrossDeviceError(err error) bool {
|
||||||
|
return errors.Is(err, windows.ERROR_NOT_SAME_DEVICE)
|
||||||
|
}
|
||||||
|
|||||||
@@ -57,18 +57,22 @@ func setBody(body interface{}) base.ReqCallback {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func handleFolderId(dir model.Obj) interface{} {
|
func handleFolderId(dir model.Obj) interface{} {
|
||||||
if dir.GetID() == "" {
|
if isRootFolder(dir) {
|
||||||
return nil
|
return nil // Root folder doesn't need folderId
|
||||||
}
|
}
|
||||||
return dir.GetID()
|
return dir.GetID()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isRootFolder(dir model.Obj) bool {
|
||||||
|
return dir.GetID() == ""
|
||||||
|
}
|
||||||
|
|
||||||
// API layer methods
|
// API layer methods
|
||||||
|
|
||||||
func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) {
|
func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) {
|
||||||
var files []MFile
|
var files []MFile
|
||||||
var body map[string]string
|
var body map[string]string
|
||||||
if dir.GetPath() != "/" {
|
if !isRootFolder(dir) {
|
||||||
body = map[string]string{"folderId": dir.GetID()}
|
body = map[string]string{"folderId": dir.GetID()}
|
||||||
} else {
|
} else {
|
||||||
body = map[string]string{}
|
body = map[string]string{}
|
||||||
@@ -85,7 +89,7 @@ func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) {
|
|||||||
func (d *Misskey) getFolders(dir model.Obj) ([]model.Obj, error) {
|
func (d *Misskey) getFolders(dir model.Obj) ([]model.Obj, error) {
|
||||||
var folders []MFolder
|
var folders []MFolder
|
||||||
var body map[string]string
|
var body map[string]string
|
||||||
if dir.GetPath() != "/" {
|
if !isRootFolder(dir) {
|
||||||
body = map[string]string{"folderId": dir.GetID()}
|
body = map[string]string{"folderId": dir.GetID()}
|
||||||
} else {
|
} else {
|
||||||
body = map[string]string{}
|
body = map[string]string{}
|
||||||
@@ -197,16 +201,24 @@ func (d *Misskey) put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
Reader: stream,
|
Reader: stream,
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
})
|
})
|
||||||
req := base.RestyClient.R().
|
|
||||||
SetContext(ctx).
|
// Build form data, only add folderId if not root folder
|
||||||
SetFileReader("file", stream.GetName(), reader).
|
formData := map[string]string{
|
||||||
SetFormData(map[string]string{
|
|
||||||
"folderId": handleFolderId(dstDir).(string),
|
|
||||||
"name": stream.GetName(),
|
"name": stream.GetName(),
|
||||||
"comment": "",
|
"comment": "",
|
||||||
"isSensitive": "false",
|
"isSensitive": "false",
|
||||||
"force": "false",
|
"force": "false",
|
||||||
}).
|
}
|
||||||
|
|
||||||
|
folderId := handleFolderId(dstDir)
|
||||||
|
if folderId != nil {
|
||||||
|
formData["folderId"] = folderId.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := base.RestyClient.R().
|
||||||
|
SetContext(ctx).
|
||||||
|
SetFileReader("file", stream.GetName(), reader).
|
||||||
|
SetFormData(formData).
|
||||||
SetResult(&file).
|
SetResult(&file).
|
||||||
SetAuthToken(d.AccessToken)
|
SetAuthToken(d.AccessToken)
|
||||||
|
|
||||||
|
|||||||
@@ -236,4 +236,19 @@ func (d *Onedrive) GetDetails(ctx context.Context) (*model.StorageDetails, error
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Onedrive) GetDirectUploadTools() []string {
|
||||||
|
if !d.EnableDirectUpload {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return []string{"HttpDirect"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDirectUploadInfo returns the direct upload info for OneDrive
|
||||||
|
func (d *Onedrive) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
|
||||||
|
if !d.EnableDirectUpload {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
return d.getDirectUploadInfo(ctx, path.Join(dstDir.GetPath(), fileName))
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Onedrive)(nil)
|
var _ driver.Driver = (*Onedrive)(nil)
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ type Addition struct {
|
|||||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||||
|
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ func (d *Onedrive) _refreshToken() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
|
||||||
if d.ref != nil {
|
if d.ref != nil {
|
||||||
return d.ref.Request(url, method, callback, resp)
|
return d.ref.Request(url, method, callback, resp)
|
||||||
}
|
}
|
||||||
@@ -152,7 +152,7 @@ func (d *Onedrive) Request(url string, method string, callback base.ReqCallback,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if e.Error.Code != "" {
|
if e.Error.Code != "" {
|
||||||
if e.Error.Code == "InvalidAuthenticationToken" {
|
if e.Error.Code == "InvalidAuthenticationToken" && !utils.IsBool(noRetry...) {
|
||||||
err = d.refreshToken()
|
err = d.refreshToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -310,9 +310,36 @@ func (d *Onedrive) getDrive(ctx context.Context) (*DriveResp, error) {
|
|||||||
var resp DriveResp
|
var resp DriveResp
|
||||||
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
|
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
}, &resp)
|
}, &resp, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Onedrive) getDirectUploadInfo(ctx context.Context, path string) (*model.HttpDirectUploadInfo, error) {
|
||||||
|
// Create upload session
|
||||||
|
url := d.GetMetaUrl(false, path) + "/createUploadSession"
|
||||||
|
metadata := map[string]any{
|
||||||
|
"item": map[string]any{
|
||||||
|
"@microsoft.graph.conflictBehavior": "rename",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(metadata).SetContext(ctx)
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||||
|
if uploadUrl == "" {
|
||||||
|
return nil, fmt.Errorf("failed to get upload URL from response")
|
||||||
|
}
|
||||||
|
return &model.HttpDirectUploadInfo{
|
||||||
|
UploadURL: uploadUrl,
|
||||||
|
ChunkSize: d.ChunkSize * 1024 * 1024, // Convert MB to bytes
|
||||||
|
Method: "PUT",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -222,4 +222,18 @@ func (d *OnedriveAPP) GetDetails(ctx context.Context) (*model.StorageDetails, er
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *OnedriveAPP) GetDirectUploadTools() []string {
|
||||||
|
if !d.EnableDirectUpload {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return []string{"HttpDirect"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *OnedriveAPP) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
|
||||||
|
if !d.EnableDirectUpload {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
return d.getDirectUploadInfo(ctx, path.Join(dstDir.GetPath(), fileName))
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*OnedriveAPP)(nil)
|
var _ driver.Driver = (*OnedriveAPP)(nil)
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ type Addition struct {
|
|||||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||||
|
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ func (d *OnedriveAPP) _accessToken() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
@@ -104,7 +104,7 @@ func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallba
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if e.Error.Code != "" {
|
if e.Error.Code != "" {
|
||||||
if e.Error.Code == "InvalidAuthenticationToken" {
|
if e.Error.Code == "InvalidAuthenticationToken" && !utils.IsBool(noRetry...) {
|
||||||
err = d.accessToken()
|
err = d.accessToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -216,9 +216,36 @@ func (d *OnedriveAPP) getDrive(ctx context.Context) (*DriveResp, error) {
|
|||||||
var resp DriveResp
|
var resp DriveResp
|
||||||
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
|
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
}, &resp)
|
}, &resp, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *OnedriveAPP) getDirectUploadInfo(ctx context.Context, path string) (*model.HttpDirectUploadInfo, error) {
|
||||||
|
// Create upload session
|
||||||
|
url := d.GetMetaUrl(false, path) + "/createUploadSession"
|
||||||
|
metadata := map[string]any{
|
||||||
|
"item": map[string]any{
|
||||||
|
"@microsoft.graph.conflictBehavior": "rename",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(metadata).SetContext(ctx)
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||||
|
if uploadUrl == "" {
|
||||||
|
return nil, fmt.Errorf("failed to get upload URL from response")
|
||||||
|
}
|
||||||
|
return &model.HttpDirectUploadInfo{
|
||||||
|
UploadURL: uploadUrl,
|
||||||
|
ChunkSize: d.ChunkSize * 1024 * 1024, // Convert MB to bytes
|
||||||
|
Method: "PUT",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ func (d *OpenList) Link(ctx context.Context, file model.Obj, args model.LinkArgs
|
|||||||
if d.PassUAToUpsteam {
|
if d.PassUAToUpsteam {
|
||||||
userAgent := args.Header.Get("user-agent")
|
userAgent := args.Header.Get("user-agent")
|
||||||
if userAgent != "" {
|
if userAgent != "" {
|
||||||
headers["User-Agent"] = base.UserAgent
|
headers["User-Agent"] = userAgent
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// if PassIPToUpsteam is true, then pass the ip address to the upstream
|
// if PassIPToUpsteam is true, then pass the ip address to the upstream
|
||||||
@@ -360,6 +360,7 @@ func (d *OpenList) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.O
|
|||||||
Name: []string{name},
|
Name: []string{name},
|
||||||
PutIntoNewDir: args.PutIntoNewDir,
|
PutIntoNewDir: args.PutIntoNewDir,
|
||||||
SrcDir: dir,
|
SrcDir: dir,
|
||||||
|
Overwrite: args.Overwrite,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -167,4 +167,5 @@ type DecompressReq struct {
|
|||||||
Name []string `json:"name"`
|
Name []string `json:"name"`
|
||||||
PutIntoNewDir bool `json:"put_into_new_dir"`
|
PutIntoNewDir bool `json:"put_into_new_dir"`
|
||||||
SrcDir string `json:"src_dir"`
|
SrcDir string `json:"src_dir"`
|
||||||
|
Overwrite bool `json:"overwrite"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -190,9 +190,7 @@ func (d *ProtonDrive) Link(ctx context.Context, file model.Obj, args model.LinkA
|
|||||||
|
|
||||||
expiration := time.Minute
|
expiration := time.Minute
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
RangeReader: &model.FileRangeReader{
|
RangeReader: stream.RateLimitRangeReaderFunc(rangeReaderFunc),
|
||||||
RangeReaderIF: stream.RateLimitRangeReaderFunc(rangeReaderFunc),
|
|
||||||
},
|
|
||||||
ContentLength: size,
|
ContentLength: size,
|
||||||
Expiration: &expiration,
|
Expiration: &expiration,
|
||||||
}, nil
|
}, nil
|
||||||
|
|||||||
@@ -217,11 +217,10 @@ func (d *QuarkOrUC) GetDetails(ctx context.Context) (*model.StorageDetails, erro
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
used := memberInfo.Data.UseCapacity
|
||||||
|
total := memberInfo.Data.TotalCapacity
|
||||||
return &model.StorageDetails{
|
return &model.StorageDetails{
|
||||||
DiskUsage: model.DiskUsage{
|
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||||
TotalSpace: memberInfo.Data.TotalCapacity,
|
|
||||||
FreeSpace: memberInfo.Data.TotalCapacity - memberInfo.Data.UseCapacity,
|
|
||||||
},
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||||
@@ -27,6 +28,7 @@ type S3 struct {
|
|||||||
Session *session.Session
|
Session *session.Session
|
||||||
client *s3.S3
|
client *s3.S3
|
||||||
linkClient *s3.S3
|
linkClient *s3.S3
|
||||||
|
directUploadClient *s3.S3
|
||||||
|
|
||||||
config driver.Config
|
config driver.Config
|
||||||
cron *cron.Cron
|
cron *cron.Cron
|
||||||
@@ -52,16 +54,18 @@ func (d *S3) Init(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorln("Doge init session error:", err)
|
log.Errorln("Doge init session error:", err)
|
||||||
}
|
}
|
||||||
d.client = d.getClient(false)
|
d.client = d.getClient(ClientTypeNormal)
|
||||||
d.linkClient = d.getClient(true)
|
d.linkClient = d.getClient(ClientTypeLink)
|
||||||
|
d.directUploadClient = d.getClient(ClientTypeDirectUpload)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
err := d.initSession()
|
err := d.initSession()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.client = d.getClient(false)
|
d.client = d.getClient(ClientTypeNormal)
|
||||||
d.linkClient = d.getClient(true)
|
d.linkClient = d.getClient(ClientTypeLink)
|
||||||
|
d.directUploadClient = d.getClient(ClientTypeDirectUpload)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -210,4 +214,33 @@ func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *S3) GetDirectUploadTools() []string {
|
||||||
|
if !d.EnableDirectUpload {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return []string{"HttpDirect"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *S3) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
|
||||||
|
if !d.EnableDirectUpload {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
path := getKey(stdpath.Join(dstDir.GetPath(), fileName), false)
|
||||||
|
req, _ := d.directUploadClient.PutObjectRequest(&s3.PutObjectInput{
|
||||||
|
Bucket: &d.Bucket,
|
||||||
|
Key: &path,
|
||||||
|
})
|
||||||
|
if req == nil {
|
||||||
|
return nil, fmt.Errorf("failed to create PutObject request")
|
||||||
|
}
|
||||||
|
link, err := req.Presign(time.Hour * time.Duration(d.SignURLExpire))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.HttpDirectUploadInfo{
|
||||||
|
UploadURL: link,
|
||||||
|
Method: "PUT",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*S3)(nil)
|
var _ driver.Driver = (*S3)(nil)
|
||||||
|
|||||||
@@ -21,6 +21,8 @@ type Addition struct {
|
|||||||
ListObjectVersion string `json:"list_object_version" type:"select" options:"v1,v2" default:"v1"`
|
ListObjectVersion string `json:"list_object_version" type:"select" options:"v1,v2" default:"v1"`
|
||||||
RemoveBucket bool `json:"remove_bucket" help:"Remove bucket name from path when using custom host."`
|
RemoveBucket bool `json:"remove_bucket" help:"Remove bucket name from path when using custom host."`
|
||||||
AddFilenameToDisposition bool `json:"add_filename_to_disposition" help:"Add filename to Content-Disposition header."`
|
AddFilenameToDisposition bool `json:"add_filename_to_disposition" help:"Add filename to Content-Disposition header."`
|
||||||
|
EnableDirectUpload bool `json:"enable_direct_upload" default:"false"`
|
||||||
|
DirectUploadHost string `json:"direct_upload_host" required:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -41,9 +41,15 @@ func (d *S3) initSession() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *S3) getClient(link bool) *s3.S3 {
|
const (
|
||||||
|
ClientTypeNormal = iota
|
||||||
|
ClientTypeLink
|
||||||
|
ClientTypeDirectUpload
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *S3) getClient(clientType int) *s3.S3 {
|
||||||
client := s3.New(d.Session)
|
client := s3.New(d.Session)
|
||||||
if link && d.CustomHost != "" {
|
if clientType == ClientTypeLink && d.CustomHost != "" {
|
||||||
client.Handlers.Build.PushBack(func(r *request.Request) {
|
client.Handlers.Build.PushBack(func(r *request.Request) {
|
||||||
if r.HTTPRequest.Method != http.MethodGet {
|
if r.HTTPRequest.Method != http.MethodGet {
|
||||||
return
|
return
|
||||||
@@ -58,6 +64,20 @@ func (d *S3) getClient(link bool) *s3.S3 {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
if clientType == ClientTypeDirectUpload && d.DirectUploadHost != "" {
|
||||||
|
client.Handlers.Build.PushBack(func(r *request.Request) {
|
||||||
|
if r.HTTPRequest.Method != http.MethodPut {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
split := strings.SplitN(d.DirectUploadHost, "://", 2)
|
||||||
|
if utils.SliceContains([]string{"http", "https"}, split[0]) {
|
||||||
|
r.HTTPRequest.URL.Scheme = split[0]
|
||||||
|
r.HTTPRequest.URL.Host = split[1]
|
||||||
|
} else {
|
||||||
|
r.HTTPRequest.URL.Host = d.DirectUploadHost
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -52,12 +52,14 @@ func (d *Strm) Init(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
k, v := getPair(path)
|
k, v := getPair(path)
|
||||||
d.pathMap[k] = append(d.pathMap[k], v)
|
d.pathMap[k] = append(d.pathMap[k], v)
|
||||||
|
if d.SaveStrmToLocal {
|
||||||
err := InsertStrm(utils.FixAndCleanPath(strings.TrimSpace(path)), d)
|
err := InsertStrm(utils.FixAndCleanPath(strings.TrimSpace(path)), d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("insert strmTrie error: %v", err)
|
log.Errorf("insert strmTrie error: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if len(d.pathMap) == 1 {
|
if len(d.pathMap) == 1 {
|
||||||
for k := range d.pathMap {
|
for k := range d.pathMap {
|
||||||
d.oneKey = k
|
d.oneKey = k
|
||||||
@@ -68,26 +70,52 @@ func (d *Strm) Init(ctx context.Context) error {
|
|||||||
d.autoFlatten = false
|
d.autoFlatten = false
|
||||||
}
|
}
|
||||||
|
|
||||||
d.supportSuffix = supportSuffix()
|
var supportTypes []string
|
||||||
if d.FilterFileTypes != "" {
|
if d.FilterFileTypes == "" {
|
||||||
types := strings.Split(d.FilterFileTypes, ",")
|
d.FilterFileTypes = "mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac"
|
||||||
for _, ext := range types {
|
}
|
||||||
|
supportTypes = strings.Split(d.FilterFileTypes, ",")
|
||||||
|
d.supportSuffix = map[string]struct{}{}
|
||||||
|
for _, ext := range supportTypes {
|
||||||
ext = strings.ToLower(strings.TrimSpace(ext))
|
ext = strings.ToLower(strings.TrimSpace(ext))
|
||||||
if ext != "" {
|
if ext != "" {
|
||||||
d.supportSuffix[ext] = struct{}{}
|
d.supportSuffix[ext] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
d.downloadSuffix = downloadSuffix()
|
var downloadTypes []string
|
||||||
if d.DownloadFileTypes != "" {
|
if d.DownloadFileTypes == "" {
|
||||||
downloadTypes := strings.Split(d.DownloadFileTypes, ",")
|
d.DownloadFileTypes = "ass,srt,vtt,sub,strm"
|
||||||
|
}
|
||||||
|
downloadTypes = strings.Split(d.DownloadFileTypes, ",")
|
||||||
|
d.downloadSuffix = map[string]struct{}{}
|
||||||
for _, ext := range downloadTypes {
|
for _, ext := range downloadTypes {
|
||||||
ext = strings.ToLower(strings.TrimSpace(ext))
|
ext = strings.ToLower(strings.TrimSpace(ext))
|
||||||
if ext != "" {
|
if ext != "" {
|
||||||
d.downloadSuffix[ext] = struct{}{}
|
d.downloadSuffix[ext] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.Version != 5 {
|
||||||
|
types := strings.Split("mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac", ",")
|
||||||
|
for _, ext := range types {
|
||||||
|
if _, ok := d.supportSuffix[ext]; !ok {
|
||||||
|
d.supportSuffix[ext] = struct{}{}
|
||||||
|
supportTypes = append(supportTypes, ext)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.FilterFileTypes = strings.Join(supportTypes, ",")
|
||||||
|
|
||||||
|
types = strings.Split("ass,srt,vtt,sub,strm", ",")
|
||||||
|
for _, ext := range types {
|
||||||
|
if _, ok := d.downloadSuffix[ext]; !ok {
|
||||||
|
d.downloadSuffix[ext] = struct{}{}
|
||||||
|
downloadTypes = append(downloadTypes, ext)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.DownloadFileTypes = strings.Join(downloadTypes, ",")
|
||||||
|
d.PathPrefix = "/d"
|
||||||
|
d.Version = 5
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package strm
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -11,6 +10,7 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/tchap/go-patricia/v2/patricia"
|
"github.com/tchap/go-patricia/v2/patricia"
|
||||||
@@ -27,7 +27,7 @@ func UpdateLocalStrm(ctx context.Context, path string, objs []model.Obj) {
|
|||||||
localPath := stdpath.Join(localParentPath, obj.GetName())
|
localPath := stdpath.Join(localParentPath, obj.GetName())
|
||||||
generateStrm(ctx, driver, obj, localPath)
|
generateStrm(ctx, driver, obj, localPath)
|
||||||
}
|
}
|
||||||
deleteExtraFiles(localParentPath, objs)
|
deleteExtraFiles(driver, localParentPath, objs)
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = strmTrie.VisitPrefixes(patricia.Prefix(path), func(needPathPrefix patricia.Prefix, item patricia.Item) error {
|
_ = strmTrie.VisitPrefixes(patricia.Prefix(path), func(needPathPrefix patricia.Prefix, item patricia.Item) error {
|
||||||
@@ -38,10 +38,7 @@ func UpdateLocalStrm(ctx context.Context, path string, objs []model.Obj) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for _, strmDriver := range strmDrivers {
|
for _, strmDriver := range strmDrivers {
|
||||||
strmObjs, _ := utils.SliceConvert(objs, func(obj model.Obj) (model.Obj, error) {
|
strmObjs := strmDriver.convert2strmObjs(ctx, path, objs)
|
||||||
ret := strmDriver.convert2strmObj(ctx, path, obj)
|
|
||||||
return &ret, nil
|
|
||||||
})
|
|
||||||
updateLocal(strmDriver, stdpath.Join(stdpath.Base(needPath), restPath), strmObjs)
|
updateLocal(strmDriver, stdpath.Join(stdpath.Base(needPath), restPath), strmObjs)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -92,33 +89,47 @@ func RemoveStrm(dstPath string, d *Strm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func generateStrm(ctx context.Context, driver *Strm, obj model.Obj, localPath string) {
|
func generateStrm(ctx context.Context, driver *Strm, obj model.Obj, localPath string) {
|
||||||
|
if obj.IsDir() {
|
||||||
|
err := utils.CreateNestedDirectory(localPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("failed to generate strm dir %s: failed to create dir: %v", localPath, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
link, err := driver.Link(ctx, obj, model.LinkArgs{})
|
link, err := driver.Link(ctx, obj, model.LinkArgs{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("failed to generate strm of obj %s: failed to link: %v", localPath, err)
|
log.Warnf("failed to generate strm of obj %s: failed to link: %v", localPath, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
seekableStream, err := stream.NewSeekableStream(&stream.FileStream{
|
defer link.Close()
|
||||||
Obj: obj,
|
size := link.ContentLength
|
||||||
Ctx: ctx,
|
if size <= 0 {
|
||||||
}, link)
|
size = obj.GetSize()
|
||||||
|
}
|
||||||
|
rrf, err := stream.GetRangeReaderFromLink(size, link)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = link.Close()
|
log.Warnf("failed to generate strm of obj %s: failed to get range reader: %v", localPath, err)
|
||||||
log.Warnf("failed to generate strm of obj %s: failed to get seekable stream: %v", localPath, err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer seekableStream.Close()
|
rc, err := rrf.RangeRead(ctx, http_range.Range{Length: -1})
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("failed to generate strm of obj %s: failed to read range: %v", localPath, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer rc.Close()
|
||||||
file, err := utils.CreateNestedFile(localPath)
|
file, err := utils.CreateNestedFile(localPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("failed to generate strm of obj %s: failed to create local file: %v", localPath, err)
|
log.Warnf("failed to generate strm of obj %s: failed to create local file: %v", localPath, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
if _, err := io.Copy(file, seekableStream); err != nil {
|
if _, err := utils.CopyWithBuffer(file, rc); err != nil {
|
||||||
log.Warnf("failed to generate strm of obj %s: copy failed: %v", localPath, err)
|
log.Warnf("failed to generate strm of obj %s: copy failed: %v", localPath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func deleteExtraFiles(localPath string, objs []model.Obj) {
|
func deleteExtraFiles(driver *Strm, localPath string, objs []model.Obj) {
|
||||||
localFiles, err := getLocalFiles(localPath)
|
localFiles, err := getLocalFiles(localPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Failed to read local files from %s: %v", localPath, err)
|
log.Errorf("Failed to read local files from %s: %v", localPath, err)
|
||||||
@@ -126,15 +137,29 @@ func deleteExtraFiles(localPath string, objs []model.Obj) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
objsSet := make(map[string]struct{})
|
objsSet := make(map[string]struct{})
|
||||||
|
objsBaseNameSet := make(map[string]struct{})
|
||||||
for _, obj := range objs {
|
for _, obj := range objs {
|
||||||
if obj.IsDir() {
|
if obj.IsDir() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
objsSet[stdpath.Join(localPath, obj.GetName())] = struct{}{}
|
objName := obj.GetName()
|
||||||
|
objsSet[stdpath.Join(localPath, objName)] = struct{}{}
|
||||||
|
|
||||||
|
objBaseName := strings.TrimSuffix(objName, utils.SourceExt(objName))
|
||||||
|
objsBaseNameSet[stdpath.Join(localPath, objBaseName[:len(objBaseName)-1])] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, localFile := range localFiles {
|
for _, localFile := range localFiles {
|
||||||
if _, exists := objsSet[localFile]; !exists {
|
if _, exists := objsSet[localFile]; !exists {
|
||||||
|
ext := utils.Ext(localFile)
|
||||||
|
localFileName := stdpath.Base(localFile)
|
||||||
|
localFileBaseName := strings.TrimSuffix(localFile, utils.SourceExt(localFileName))
|
||||||
|
_, nameExists := objsBaseNameSet[localFileBaseName[:len(localFileBaseName)-1]]
|
||||||
|
_, downloadFile := driver.downloadSuffix[ext]
|
||||||
|
if driver.KeepLocalDownloadFile && nameExists && downloadFile {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
err := os.Remove(localFile)
|
err := os.Remove(localFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Failed to delete file: %s, error: %v\n", localFile, err)
|
log.Errorf("Failed to delete file: %s, error: %v\n", localFile, err)
|
||||||
|
|||||||
@@ -8,12 +8,15 @@ import (
|
|||||||
type Addition struct {
|
type Addition struct {
|
||||||
Paths string `json:"paths" required:"true" type:"text"`
|
Paths string `json:"paths" required:"true" type:"text"`
|
||||||
SiteUrl string `json:"siteUrl" type:"text" required:"false" help:"The prefix URL of the strm file"`
|
SiteUrl string `json:"siteUrl" type:"text" required:"false" help:"The prefix URL of the strm file"`
|
||||||
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"strm" required:"false" help:"Supports suffix name of strm file"`
|
PathPrefix string `json:"PathPrefix" type:"text" required:"false" default:"/d" help:"Path prefix"`
|
||||||
DownloadFileTypes string `json:"downloadFileTypes" type:"text" default:"ass" required:"false" help:"Files need to download with strm (usally subtitles)"`
|
DownloadFileTypes string `json:"downloadFileTypes" type:"text" default:"ass,srt,vtt,sub,strm" required:"false" help:"Files need to download with strm (usally subtitles)"`
|
||||||
|
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac" required:"false" help:"Supports suffix name of strm file"`
|
||||||
EncodePath bool `json:"encodePath" default:"true" required:"true" help:"encode the path in the strm file"`
|
EncodePath bool `json:"encodePath" default:"true" required:"true" help:"encode the path in the strm file"`
|
||||||
WithoutUrl bool `json:"withoutUrl" default:"false" help:"strm file content without URL prefix"`
|
WithoutUrl bool `json:"withoutUrl" default:"false" help:"strm file content without URL prefix"`
|
||||||
SaveStrmToLocal bool `json:"SaveStrmToLocal" default:"false" help:"save strm file locally"`
|
SaveStrmToLocal bool `json:"SaveStrmToLocal" default:"false" help:"save strm file locally"`
|
||||||
SaveStrmLocalPath string `json:"SaveStrmLocalPath" type:"text" help:"save strm file local path"`
|
SaveStrmLocalPath string `json:"SaveStrmLocalPath" type:"text" help:"save strm file local path"`
|
||||||
|
KeepLocalDownloadFile bool `json:"KeepLocalDownloadFile" default:"false" help:"keep local download files"`
|
||||||
|
Version int
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
|||||||
@@ -1,36 +0,0 @@
|
|||||||
package strm
|
|
||||||
|
|
||||||
func supportSuffix() map[string]struct{} {
|
|
||||||
return map[string]struct{}{
|
|
||||||
// video
|
|
||||||
"mp4": {},
|
|
||||||
"mkv": {},
|
|
||||||
"flv": {},
|
|
||||||
"avi": {},
|
|
||||||
"wmv": {},
|
|
||||||
"ts": {},
|
|
||||||
"rmvb": {},
|
|
||||||
"webm": {},
|
|
||||||
// audio
|
|
||||||
"mp3": {},
|
|
||||||
"flac": {},
|
|
||||||
"aac": {},
|
|
||||||
"wav": {},
|
|
||||||
"ogg": {},
|
|
||||||
"m4a": {},
|
|
||||||
"wma": {},
|
|
||||||
"alac": {},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func downloadSuffix() map[string]struct{} {
|
|
||||||
return map[string]struct{}{
|
|
||||||
// strm
|
|
||||||
"strm": {},
|
|
||||||
// subtitles
|
|
||||||
"ass": {},
|
|
||||||
"srt": {},
|
|
||||||
"vtt": {},
|
|
||||||
"sub": {},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,7 +3,6 @@ package strm
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@@ -58,10 +57,36 @@ func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return d.convert2strmObjs(ctx, reqPath, objs), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []model.Obj) []model.Obj {
|
||||||
var validObjs []model.Obj
|
var validObjs []model.Obj
|
||||||
for _, obj := range objs {
|
for _, obj := range objs {
|
||||||
objRes := d.convert2strmObj(ctx, reqPath, obj)
|
id, name, path := "", obj.GetName(), ""
|
||||||
|
size := int64(0)
|
||||||
|
if !obj.IsDir() {
|
||||||
|
path = stdpath.Join(reqPath, obj.GetName())
|
||||||
|
ext := strings.ToLower(utils.Ext(name))
|
||||||
|
sourceExt := utils.SourceExt(name)
|
||||||
|
if _, ok := d.downloadSuffix[ext]; ok {
|
||||||
|
size = obj.GetSize()
|
||||||
|
} else if _, ok := d.supportSuffix[ext]; ok {
|
||||||
|
id = "strm"
|
||||||
|
name = strings.TrimSuffix(name, sourceExt) + "strm"
|
||||||
|
size = int64(len(d.getLink(ctx, path)))
|
||||||
|
} else {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
objRes := model.Object{
|
||||||
|
ID: id,
|
||||||
|
Path: path,
|
||||||
|
Name: name,
|
||||||
|
Size: size,
|
||||||
|
Modified: obj.ModTime(),
|
||||||
|
IsFolder: obj.IsDir(),
|
||||||
|
}
|
||||||
thumb, ok := model.GetThumb(obj)
|
thumb, ok := model.GetThumb(obj)
|
||||||
if !ok {
|
if !ok {
|
||||||
validObjs = append(validObjs, &objRes)
|
validObjs = append(validObjs, &objRes)
|
||||||
@@ -74,33 +99,7 @@ func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return validObjs, nil
|
return validObjs
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Strm) convert2strmObj(ctx context.Context, reqPath string, obj model.Obj) model.Object {
|
|
||||||
id, name, path := "", obj.GetName(), ""
|
|
||||||
size := int64(0)
|
|
||||||
if !obj.IsDir() {
|
|
||||||
path = stdpath.Join(reqPath, obj.GetName())
|
|
||||||
ext := strings.ToLower(utils.Ext(name))
|
|
||||||
if _, ok := d.supportSuffix[ext]; ok {
|
|
||||||
id = "strm"
|
|
||||||
name = strings.TrimSuffix(name, ext) + "strm"
|
|
||||||
size = int64(len(d.getLink(ctx, path)))
|
|
||||||
} else if _, ok := d.downloadSuffix[ext]; ok {
|
|
||||||
size = obj.GetSize()
|
|
||||||
} else {
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return model.Object{
|
|
||||||
ID: id,
|
|
||||||
Path: path,
|
|
||||||
Name: name,
|
|
||||||
Size: size,
|
|
||||||
Modified: obj.ModTime(),
|
|
||||||
IsFolder: obj.IsDir(),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Strm) getLink(ctx context.Context, path string) string {
|
func (d *Strm) getLink(ctx context.Context, path string) string {
|
||||||
@@ -112,6 +111,13 @@ func (d *Strm) getLink(ctx context.Context, path string) string {
|
|||||||
signPath := sign.Sign(path)
|
signPath := sign.Sign(path)
|
||||||
finalPath = fmt.Sprintf("%s?sign=%s", finalPath, signPath)
|
finalPath = fmt.Sprintf("%s?sign=%s", finalPath, signPath)
|
||||||
}
|
}
|
||||||
|
pathPrefix := d.PathPrefix
|
||||||
|
if len(pathPrefix) > 0 {
|
||||||
|
finalPath = stdpath.Join(pathPrefix, finalPath)
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(finalPath, "/") {
|
||||||
|
finalPath = "/" + finalPath
|
||||||
|
}
|
||||||
if d.WithoutUrl {
|
if d.WithoutUrl {
|
||||||
return finalPath
|
return finalPath
|
||||||
}
|
}
|
||||||
@@ -121,10 +127,7 @@ func (d *Strm) getLink(ctx context.Context, path string) string {
|
|||||||
} else {
|
} else {
|
||||||
apiUrl = common.GetApiUrl(ctx)
|
apiUrl = common.GetApiUrl(ctx)
|
||||||
}
|
}
|
||||||
if !strings.HasPrefix(finalPath, "/") {
|
return fmt.Sprintf("%s%s",
|
||||||
finalPath = "/" + finalPath
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s/d%s",
|
|
||||||
apiUrl,
|
apiUrl,
|
||||||
finalPath)
|
finalPath)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ func (d *Terabox) request(rurl string, method string, callback base.ReqCallback,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||||
if errno == 4000023 || errno == 4500016 {
|
if errno == 4000023 || errno == 450016 {
|
||||||
// reget jsToken
|
// reget jsToken
|
||||||
err = d.resetJsToken()
|
err = d.resetJsToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ var config = driver.Config{
|
|||||||
LocalSort: true,
|
LocalSort: true,
|
||||||
NoCache: true,
|
NoCache: true,
|
||||||
CheckStatus: true,
|
CheckStatus: true,
|
||||||
|
OnlyIndices: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
17
go.mod
17
go.mod
@@ -5,12 +5,15 @@ go 1.23.4
|
|||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
|
||||||
|
github.com/KarpelesLab/reflink v1.0.2
|
||||||
|
github.com/KirCute/zip v1.0.1
|
||||||
github.com/OpenListTeam/go-cache v0.1.0
|
github.com/OpenListTeam/go-cache v0.1.0
|
||||||
github.com/OpenListTeam/sftpd-openlist v1.0.1
|
github.com/OpenListTeam/sftpd-openlist v1.0.1
|
||||||
github.com/OpenListTeam/tache v0.2.0
|
github.com/OpenListTeam/tache v0.2.1
|
||||||
github.com/OpenListTeam/times v0.1.0
|
github.com/OpenListTeam/times v0.1.0
|
||||||
github.com/OpenListTeam/wopan-sdk-go v0.1.5
|
github.com/OpenListTeam/wopan-sdk-go v0.1.5
|
||||||
github.com/ProtonMail/go-crypto v1.3.0
|
github.com/ProtonMail/go-crypto v1.3.0
|
||||||
|
github.com/ProtonMail/gopenpgp/v2 v2.9.0
|
||||||
github.com/SheltonZhu/115driver v1.1.1
|
github.com/SheltonZhu/115driver v1.1.1
|
||||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
|
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
|
||||||
github.com/avast/retry-go v3.0.0+incompatible
|
github.com/avast/retry-go v3.0.0+incompatible
|
||||||
@@ -40,6 +43,7 @@ require (
|
|||||||
github.com/gorilla/websocket v1.5.3
|
github.com/gorilla/websocket v1.5.3
|
||||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499
|
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499
|
||||||
github.com/hekmon/transmissionrpc/v3 v3.0.0
|
github.com/hekmon/transmissionrpc/v3 v3.0.0
|
||||||
|
github.com/henrybear327/go-proton-api v1.0.0
|
||||||
github.com/ipfs/go-ipfs-api v0.7.0
|
github.com/ipfs/go-ipfs-api v0.7.0
|
||||||
github.com/itsHenry35/gofakes3 v0.0.8
|
github.com/itsHenry35/gofakes3 v0.0.8
|
||||||
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3
|
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3
|
||||||
@@ -55,7 +59,6 @@ require (
|
|||||||
github.com/pquerna/otp v1.5.0
|
github.com/pquerna/otp v1.5.0
|
||||||
github.com/quic-go/quic-go v0.54.1
|
github.com/quic-go/quic-go v0.54.1
|
||||||
github.com/rclone/rclone v1.70.3
|
github.com/rclone/rclone v1.70.3
|
||||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
|
||||||
github.com/shirou/gopsutil/v4 v4.25.5
|
github.com/shirou/gopsutil/v4 v4.25.5
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/spf13/afero v1.14.0
|
github.com/spf13/afero v1.14.0
|
||||||
@@ -66,7 +69,6 @@ require (
|
|||||||
github.com/u2takey/ffmpeg-go v0.5.0
|
github.com/u2takey/ffmpeg-go v0.5.0
|
||||||
github.com/upyun/go-sdk/v3 v3.0.4
|
github.com/upyun/go-sdk/v3 v3.0.4
|
||||||
github.com/winfsp/cgofuse v1.6.0
|
github.com/winfsp/cgofuse v1.6.0
|
||||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
|
|
||||||
github.com/zzzhr1990/go-common-entity v0.0.0-20250202070650-1a200048f0d3
|
github.com/zzzhr1990/go-common-entity v0.0.0-20250202070650-1a200048f0d3
|
||||||
golang.org/x/crypto v0.40.0
|
golang.org/x/crypto v0.40.0
|
||||||
golang.org/x/image v0.29.0
|
golang.org/x/image v0.29.0
|
||||||
@@ -88,7 +90,6 @@ require (
|
|||||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
||||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||||
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
||||||
github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect
|
|
||||||
github.com/PuerkitoBio/goquery v1.10.3 // indirect
|
github.com/PuerkitoBio/goquery v1.10.3 // indirect
|
||||||
github.com/RoaringBitmap/roaring/v2 v2.4.5 // indirect
|
github.com/RoaringBitmap/roaring/v2 v2.4.5 // indirect
|
||||||
github.com/andybalholm/cascadia v1.3.3 // indirect
|
github.com/andybalholm/cascadia v1.3.3 // indirect
|
||||||
@@ -101,7 +102,6 @@ require (
|
|||||||
github.com/ebitengine/purego v0.8.4 // indirect
|
github.com/ebitengine/purego v0.8.4 // indirect
|
||||||
github.com/emersion/go-message v0.18.2 // indirect
|
github.com/emersion/go-message v0.18.2 // indirect
|
||||||
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
|
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
|
||||||
github.com/henrybear327/go-proton-api v1.0.0 // indirect
|
|
||||||
github.com/geoffgarside/ber v1.2.0 // indirect
|
github.com/geoffgarside/ber v1.2.0 // indirect
|
||||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||||
@@ -115,13 +115,11 @@ require (
|
|||||||
github.com/minio/minlz v1.0.0 // indirect
|
github.com/minio/minlz v1.0.0 // indirect
|
||||||
github.com/minio/xxml v0.0.3 // indirect
|
github.com/minio/xxml v0.0.3 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/otiai10/mint v1.6.3 // indirect
|
github.com/quic-go/qpack v0.5.1 // indirect
|
||||||
github.com/relvacode/iso8601 v1.6.0 // indirect
|
github.com/relvacode/iso8601 v1.6.0 // indirect
|
||||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
|
|
||||||
github.com/quic-go/qpack v0.5.1 // indirect
|
|
||||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
|
||||||
go.uber.org/mock v0.5.0 // indirect
|
go.uber.org/mock v0.5.0 // indirect
|
||||||
|
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
|
||||||
golang.org/x/mod v0.27.0 // indirect
|
golang.org/x/mod v0.27.0 // indirect
|
||||||
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
|
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
|
||||||
)
|
)
|
||||||
@@ -258,7 +256,6 @@ require (
|
|||||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||||
github.com/multiformats/go-multistream v0.4.1 // indirect
|
github.com/multiformats/go-multistream v0.4.1 // indirect
|
||||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||||
github.com/otiai10/copy v1.14.1
|
|
||||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
|
|||||||
26
go.sum
26
go.sum
@@ -39,6 +39,12 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
|||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/Da3zKi7/saferith v0.33.0-fixed h1:fnIWTk7EP9mZAICf7aQjeoAwpfrlCrkOvqmi6CbWdTk=
|
github.com/Da3zKi7/saferith v0.33.0-fixed h1:fnIWTk7EP9mZAICf7aQjeoAwpfrlCrkOvqmi6CbWdTk=
|
||||||
github.com/Da3zKi7/saferith v0.33.0-fixed/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA=
|
github.com/Da3zKi7/saferith v0.33.0-fixed/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA=
|
||||||
|
github.com/KarpelesLab/reflink v1.0.2 h1:hQ1aM3TmjU2kTNUx5p/HaobDoADYk+a6AuEinG4Cv88=
|
||||||
|
github.com/KarpelesLab/reflink v1.0.2/go.mod h1:WGkTOKNjd1FsJKBw3mu4JvrPEDJyJJ+JPtxBkbPoCok=
|
||||||
|
github.com/KirCute/zip v1.0.1 h1:L/tVZglOiDVKDi9Ud+fN49htgKdQ3Z0H80iX8OZk13c=
|
||||||
|
github.com/KirCute/zip v1.0.1/go.mod h1:xhF7dCB+Bjvy+5a56lenYCKBsH+gxDNPZSy5Cp+nlXk=
|
||||||
|
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||||
|
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||||
github.com/OpenListTeam/115-sdk-go v0.2.2 h1:JCrGHqQjBX3laOA6Hw4CuBovSg7g+FC5s0LEAYsRciU=
|
github.com/OpenListTeam/115-sdk-go v0.2.2 h1:JCrGHqQjBX3laOA6Hw4CuBovSg7g+FC5s0LEAYsRciU=
|
||||||
@@ -49,8 +55,8 @@ github.com/OpenListTeam/gsync v0.1.0 h1:ywzGybOvA3lW8K1BUjKZ2IUlT2FSlzPO4DOazfYX
|
|||||||
github.com/OpenListTeam/gsync v0.1.0/go.mod h1:h/Rvv9aX/6CdW/7B8di3xK3xNV8dUg45Fehrd/ksZ9s=
|
github.com/OpenListTeam/gsync v0.1.0/go.mod h1:h/Rvv9aX/6CdW/7B8di3xK3xNV8dUg45Fehrd/ksZ9s=
|
||||||
github.com/OpenListTeam/sftpd-openlist v1.0.1 h1:j4S3iPFOpnXCUKRPS7uCT4mF2VCl34GyqvH6lqwnkUU=
|
github.com/OpenListTeam/sftpd-openlist v1.0.1 h1:j4S3iPFOpnXCUKRPS7uCT4mF2VCl34GyqvH6lqwnkUU=
|
||||||
github.com/OpenListTeam/sftpd-openlist v1.0.1/go.mod h1:uO/wKnbvbdq3rBLmClMTZXuCnw7XW4wlAq4dZe91a40=
|
github.com/OpenListTeam/sftpd-openlist v1.0.1/go.mod h1:uO/wKnbvbdq3rBLmClMTZXuCnw7XW4wlAq4dZe91a40=
|
||||||
github.com/OpenListTeam/tache v0.2.0 h1:Q4MjuyECn0CZCf1ZF91JaVaZTaps1mOTAm8bFj8sr9Q=
|
github.com/OpenListTeam/tache v0.2.1 h1:Uy/xAr05clHuMrr9+5fXAhv0Z5PGJivp4P5DnRez6cw=
|
||||||
github.com/OpenListTeam/tache v0.2.0/go.mod h1:qmnZ/VpY2DUlmjg3UoDeNFy/LRqrw0biN3hYEEGc/+A=
|
github.com/OpenListTeam/tache v0.2.1/go.mod h1:qmnZ/VpY2DUlmjg3UoDeNFy/LRqrw0biN3hYEEGc/+A=
|
||||||
github.com/OpenListTeam/times v0.1.0 h1:qknxw+qj5CYKgXAwydA102UEpPcpU8TYNGRmwRyPYpg=
|
github.com/OpenListTeam/times v0.1.0 h1:qknxw+qj5CYKgXAwydA102UEpPcpU8TYNGRmwRyPYpg=
|
||||||
github.com/OpenListTeam/times v0.1.0/go.mod h1:Jx7qen5NCYzKk2w14YuvU48YYMcPa1P9a+EJePC15Pc=
|
github.com/OpenListTeam/times v0.1.0/go.mod h1:Jx7qen5NCYzKk2w14YuvU48YYMcPa1P9a+EJePC15Pc=
|
||||||
github.com/OpenListTeam/wopan-sdk-go v0.1.5 h1:iKKcVzIqBgtGDbn0QbdWrCazSGxXFmYFyrnFBG+U8dI=
|
github.com/OpenListTeam/wopan-sdk-go v0.1.5 h1:iKKcVzIqBgtGDbn0QbdWrCazSGxXFmYFyrnFBG+U8dI=
|
||||||
@@ -390,8 +396,6 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg
|
|||||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006100629-ba7a40dce261 h1:47L8SHM80cXszQydLrpp9MhVkFLLWCvrU9XmJ6XtRu0=
|
|
||||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006100629-ba7a40dce261/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
|
|
||||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499 h1:4ovnBdiGDFi8putQGxhipuuhXItAgh4/YnzufPYkZkQ=
|
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499 h1:4ovnBdiGDFi8putQGxhipuuhXItAgh4/YnzufPYkZkQ=
|
||||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
|
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
|
||||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
@@ -419,8 +423,6 @@ github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0
|
|||||||
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
|
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
|
||||||
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
|
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
|
||||||
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
|
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
|
||||||
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
|
|
||||||
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
|
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
@@ -585,10 +587,6 @@ github.com/ncw/swift/v2 v2.0.4 h1:hHWVFxn5/YaTWAASmn4qyq2p6OyP/Hm3vMLzkjEqR7w=
|
|||||||
github.com/ncw/swift/v2 v2.0.4/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk=
|
github.com/ncw/swift/v2 v2.0.4/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk=
|
||||||
github.com/nwaples/rardecode/v2 v2.1.1 h1:OJaYalXdliBUXPmC8CZGQ7oZDxzX1/5mQmgn0/GASew=
|
github.com/nwaples/rardecode/v2 v2.1.1 h1:OJaYalXdliBUXPmC8CZGQ7oZDxzX1/5mQmgn0/GASew=
|
||||||
github.com/nwaples/rardecode/v2 v2.1.1/go.mod h1:7uz379lSxPe6j9nvzxUZ+n7mnJNgjsRNb6IbvGVHRmw=
|
github.com/nwaples/rardecode/v2 v2.1.1/go.mod h1:7uz379lSxPe6j9nvzxUZ+n7mnJNgjsRNb6IbvGVHRmw=
|
||||||
github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8=
|
|
||||||
github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I=
|
|
||||||
github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs=
|
|
||||||
github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
|
|
||||||
github.com/panjf2000/ants/v2 v2.4.2/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A=
|
github.com/panjf2000/ants/v2 v2.4.2/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A=
|
||||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||||
@@ -641,8 +639,6 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
|||||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
|
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
|
||||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
|
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
|
||||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d h1:hrujxIzL1woJ7AwssoOcM/tq5JjjG2yYOc8odClEiXA=
|
|
||||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
|
|
||||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
|
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
|
||||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
|
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
|
||||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM=
|
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM=
|
||||||
@@ -715,8 +711,6 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavM
|
|||||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
||||||
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
||||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9 h1:K8gF0eekWPEX+57l30ixxzGhHH/qscI3JCnuhbN6V4M=
|
|
||||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9/go.mod h1:9BnoKCcgJ/+SLhfAXj15352hTOuVmG5Gzo8xNRINfqI=
|
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA=
|
github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA=
|
||||||
github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
|
github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
|
||||||
@@ -744,6 +738,8 @@ go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5J
|
|||||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||||
|
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||||
|
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||||
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
||||||
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||||
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
|
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
|
||||||
@@ -758,8 +754,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
|
|||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
|
||||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||||
|
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||||
@@ -21,7 +22,7 @@ func (RarDecoder) AcceptedExtensions() []string {
|
|||||||
|
|
||||||
func (RarDecoder) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
func (RarDecoder) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||||
return map[string]tool.MultipartExtension{
|
return map[string]tool.MultipartExtension{
|
||||||
".part1.rar": {".part%d.rar", 2},
|
".part1.rar": {PartFileFormat: regexp.MustCompile(`^.*\.part(\d+)\.rar$`), SecondPartIndex: 2},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package sevenzip
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||||
@@ -18,7 +19,7 @@ func (SevenZip) AcceptedExtensions() []string {
|
|||||||
|
|
||||||
func (SevenZip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
func (SevenZip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||||
return map[string]tool.MultipartExtension{
|
return map[string]tool.MultipartExtension{
|
||||||
".7z.001": {".7z.%.3d", 2},
|
".7z.001": {PartFileFormat: regexp.MustCompile(`^.*\.7z\.(\d+)$`), SecondPartIndex: 2},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,13 +2,14 @@ package tool
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MultipartExtension struct {
|
type MultipartExtension struct {
|
||||||
PartFileFormat string
|
PartFileFormat *regexp.Regexp
|
||||||
SecondPartIndex int
|
SecondPartIndex int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,22 +4,15 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
stdpath "path"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/KirCute/zip"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
"github.com/saintfish/chardet"
|
"golang.org/x/text/encoding/ianaindex"
|
||||||
"github.com/yeka/zip"
|
|
||||||
"golang.org/x/text/encoding"
|
|
||||||
"golang.org/x/text/encoding/charmap"
|
|
||||||
"golang.org/x/text/encoding/japanese"
|
|
||||||
"golang.org/x/text/encoding/korean"
|
|
||||||
"golang.org/x/text/encoding/simplifiedchinese"
|
|
||||||
"golang.org/x/text/encoding/traditionalchinese"
|
|
||||||
"golang.org/x/text/encoding/unicode"
|
|
||||||
"golang.org/x/text/encoding/unicode/utf32"
|
|
||||||
"golang.org/x/text/transform"
|
"golang.org/x/text/transform"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -37,10 +30,11 @@ func (r *WrapReader) Files() []tool.SubFile {
|
|||||||
|
|
||||||
type WrapFileInfo struct {
|
type WrapFileInfo struct {
|
||||||
fs.FileInfo
|
fs.FileInfo
|
||||||
|
efs bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *WrapFileInfo) Name() string {
|
func (f *WrapFileInfo) Name() string {
|
||||||
return decodeName(f.FileInfo.Name())
|
return decodeName(f.FileInfo.Name(), f.efs)
|
||||||
}
|
}
|
||||||
|
|
||||||
type WrapFile struct {
|
type WrapFile struct {
|
||||||
@@ -48,11 +42,11 @@ type WrapFile struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *WrapFile) Name() string {
|
func (f *WrapFile) Name() string {
|
||||||
return decodeName(f.f.Name)
|
return decodeName(f.f.Name, isEFS(f.f.Flags))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *WrapFile) FileInfo() fs.FileInfo {
|
func (f *WrapFile) FileInfo() fs.FileInfo {
|
||||||
return &WrapFileInfo{FileInfo: f.f.FileInfo()}
|
return &WrapFileInfo{FileInfo: f.f.FileInfo(), efs: isEFS(f.f.Flags)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *WrapFile) Open() (io.ReadCloser, error) {
|
func (f *WrapFile) Open() (io.ReadCloser, error) {
|
||||||
@@ -67,17 +61,34 @@ func (f *WrapFile) SetPassword(password string) {
|
|||||||
f.f.SetPassword(password)
|
f.f.SetPassword(password)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
|
func makePart(ss *stream.SeekableStream) (zip.SizeReaderAt, error) {
|
||||||
if len(ss) > 1 && stdpath.Ext(ss[1].GetName()) == ".z01" {
|
ra, err := stream.NewReadAtSeeker(ss, 0)
|
||||||
// FIXME: Incorrect parsing method for standard multipart zip format
|
if err != nil {
|
||||||
ss = append(ss[1:], ss[0])
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return &inlineSizeReaderAt{ReaderAt: ra, size: ss.GetSize()}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (z *Zip) getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
|
||||||
|
if len(ss) > 1 && z.traditionalSecondPartRegExp.MatchString(ss[1].GetName()) {
|
||||||
|
ss = append(ss[1:], ss[0])
|
||||||
|
ras := make([]zip.SizeReaderAt, 0, len(ss))
|
||||||
|
for _, s := range ss {
|
||||||
|
ra, err := makePart(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ras = append(ras, ra)
|
||||||
|
}
|
||||||
|
return zip.NewMultipartReader(ras)
|
||||||
|
} else {
|
||||||
reader, err := stream.NewMultiReaderAt(ss)
|
reader, err := stream.NewMultiReaderAt(ss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return zip.NewReader(reader, reader.Size())
|
return zip.NewReader(reader, reader.Size())
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func filterPassword(err error) error {
|
func filterPassword(err error) error {
|
||||||
if err != nil && strings.Contains(err.Error(), "password") {
|
if err != nil && strings.Contains(err.Error(), "password") {
|
||||||
@@ -86,110 +97,29 @@ func filterPassword(err error) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeName(name string) string {
|
func decodeName(name string, efs bool) string {
|
||||||
b := []byte(name)
|
if efs {
|
||||||
detector := chardet.NewTextDetector()
|
return name
|
||||||
results, err := detector.DetectAll(b)
|
}
|
||||||
|
enc, err := ianaindex.IANA.Encoding(setting.GetStr(conf.NonEFSZipEncoding))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
var ce, re, enc encoding.Encoding
|
i := bytes.NewReader([]byte(name))
|
||||||
for _, r := range results {
|
|
||||||
if r.Confidence > 30 {
|
|
||||||
ce = getCommonEncoding(r.Charset)
|
|
||||||
if ce != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if re == nil {
|
|
||||||
re = getEncoding(r.Charset)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ce != nil {
|
|
||||||
enc = ce
|
|
||||||
} else if re != nil {
|
|
||||||
enc = re
|
|
||||||
} else {
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
i := bytes.NewReader(b)
|
|
||||||
decoder := transform.NewReader(i, enc.NewDecoder())
|
decoder := transform.NewReader(i, enc.NewDecoder())
|
||||||
content, _ := io.ReadAll(decoder)
|
content, _ := io.ReadAll(decoder)
|
||||||
return string(content)
|
return string(content)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCommonEncoding(name string) (enc encoding.Encoding) {
|
func isEFS(flags uint16) bool {
|
||||||
switch name {
|
return (flags & 0x800) > 0
|
||||||
case "UTF-8":
|
|
||||||
enc = unicode.UTF8
|
|
||||||
case "UTF-16LE":
|
|
||||||
enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
|
|
||||||
case "Shift_JIS":
|
|
||||||
enc = japanese.ShiftJIS
|
|
||||||
case "GB-18030":
|
|
||||||
enc = simplifiedchinese.GB18030
|
|
||||||
case "EUC-KR":
|
|
||||||
enc = korean.EUCKR
|
|
||||||
case "Big5":
|
|
||||||
enc = traditionalchinese.Big5
|
|
||||||
default:
|
|
||||||
enc = nil
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getEncoding(name string) (enc encoding.Encoding) {
|
type inlineSizeReaderAt struct {
|
||||||
switch name {
|
io.ReaderAt
|
||||||
case "UTF-8":
|
size int64
|
||||||
enc = unicode.UTF8
|
|
||||||
case "UTF-16BE":
|
|
||||||
enc = unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM)
|
|
||||||
case "UTF-16LE":
|
|
||||||
enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
|
|
||||||
case "UTF-32BE":
|
|
||||||
enc = utf32.UTF32(utf32.BigEndian, utf32.IgnoreBOM)
|
|
||||||
case "UTF-32LE":
|
|
||||||
enc = utf32.UTF32(utf32.LittleEndian, utf32.IgnoreBOM)
|
|
||||||
case "ISO-8859-1":
|
|
||||||
enc = charmap.ISO8859_1
|
|
||||||
case "ISO-8859-2":
|
|
||||||
enc = charmap.ISO8859_2
|
|
||||||
case "ISO-8859-3":
|
|
||||||
enc = charmap.ISO8859_3
|
|
||||||
case "ISO-8859-4":
|
|
||||||
enc = charmap.ISO8859_4
|
|
||||||
case "ISO-8859-5":
|
|
||||||
enc = charmap.ISO8859_5
|
|
||||||
case "ISO-8859-6":
|
|
||||||
enc = charmap.ISO8859_6
|
|
||||||
case "ISO-8859-7":
|
|
||||||
enc = charmap.ISO8859_7
|
|
||||||
case "ISO-8859-8":
|
|
||||||
enc = charmap.ISO8859_8
|
|
||||||
case "ISO-8859-8-I":
|
|
||||||
enc = charmap.ISO8859_8I
|
|
||||||
case "ISO-8859-9":
|
|
||||||
enc = charmap.ISO8859_9
|
|
||||||
case "windows-1251":
|
|
||||||
enc = charmap.Windows1251
|
|
||||||
case "windows-1256":
|
|
||||||
enc = charmap.Windows1256
|
|
||||||
case "KOI8-R":
|
|
||||||
enc = charmap.KOI8R
|
|
||||||
case "Shift_JIS":
|
|
||||||
enc = japanese.ShiftJIS
|
|
||||||
case "GB-18030":
|
|
||||||
enc = simplifiedchinese.GB18030
|
|
||||||
case "EUC-JP":
|
|
||||||
enc = japanese.EUCJP
|
|
||||||
case "EUC-KR":
|
|
||||||
enc = korean.EUCKR
|
|
||||||
case "Big5":
|
|
||||||
enc = traditionalchinese.Big5
|
|
||||||
case "ISO-2022-JP":
|
|
||||||
enc = japanese.ISO2022JP
|
|
||||||
default:
|
|
||||||
enc = nil
|
|
||||||
}
|
}
|
||||||
return
|
|
||||||
|
func (i *inlineSizeReaderAt) Size() int64 {
|
||||||
|
return i.size
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package zip
|
|||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||||
@@ -12,34 +13,39 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Zip struct {
|
type Zip struct {
|
||||||
|
traditionalSecondPartRegExp *regexp.Regexp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Zip) AcceptedExtensions() []string {
|
func (z *Zip) AcceptedExtensions() []string {
|
||||||
return []string{}
|
return []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
func (z *Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||||
return map[string]tool.MultipartExtension{
|
return map[string]tool.MultipartExtension{
|
||||||
".zip": {".z%.2d", 1},
|
".zip": {PartFileFormat: regexp.MustCompile(`^.*\.z(\d+)$`), SecondPartIndex: 1},
|
||||||
".zip.001": {".zip.%.3d", 2},
|
".zip.001": {PartFileFormat: regexp.MustCompile(`^.*\.zip\.(\d+)$`), SecondPartIndex: 2},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
func (z *Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
zipReader, err := getReader(ss)
|
zipReader, err := z.getReader(ss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
efs := true
|
||||||
|
if len(zipReader.File) > 0 {
|
||||||
|
efs = isEFS(zipReader.File[0].Flags)
|
||||||
|
}
|
||||||
encrypted, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: zipReader})
|
encrypted, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: zipReader})
|
||||||
return &model.ArchiveMetaInfo{
|
return &model.ArchiveMetaInfo{
|
||||||
Comment: zipReader.Comment,
|
Comment: decodeName(zipReader.Comment, efs),
|
||||||
Encrypted: encrypted,
|
Encrypted: encrypted,
|
||||||
Tree: tree,
|
Tree: tree,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
zipReader, err := getReader(ss)
|
zipReader, err := z.getReader(ss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -57,7 +63,7 @@ func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]mod
|
|||||||
_ = rc.Close()
|
_ = rc.Close()
|
||||||
passVerified = true
|
passVerified = true
|
||||||
}
|
}
|
||||||
name := strings.TrimSuffix(decodeName(file.Name), "/")
|
name := strings.TrimSuffix(decodeName(file.Name, isEFS(file.Flags)), "/")
|
||||||
if strings.Contains(name, "/") {
|
if strings.Contains(name, "/") {
|
||||||
// 有些压缩包不压缩第一个文件夹
|
// 有些压缩包不压缩第一个文件夹
|
||||||
strs := strings.Split(name, "/")
|
strs := strings.Split(name, "/")
|
||||||
@@ -70,7 +76,7 @@ func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]mod
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo()}))
|
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo(), efs: isEFS(file.Flags)}))
|
||||||
}
|
}
|
||||||
if len(ret) == 0 && dir != nil {
|
if len(ret) == 0 && dir != nil {
|
||||||
ret = append(ret, dir)
|
ret = append(ret, dir)
|
||||||
@@ -81,13 +87,13 @@ func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]mod
|
|||||||
ret := make([]model.Obj, 0)
|
ret := make([]model.Obj, 0)
|
||||||
exist := false
|
exist := false
|
||||||
for _, file := range zipReader.File {
|
for _, file := range zipReader.File {
|
||||||
name := decodeName(file.Name)
|
name := decodeName(file.Name, isEFS(file.Flags))
|
||||||
dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/"
|
dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/"
|
||||||
if dir != innerPath {
|
if dir != innerPath {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
exist = true
|
exist = true
|
||||||
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo()}))
|
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo(), isEFS(file.Flags)}))
|
||||||
}
|
}
|
||||||
if !exist {
|
if !exist {
|
||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
@@ -96,14 +102,14 @@ func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]mod
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
func (z *Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||||
zipReader, err := getReader(ss)
|
zipReader, err := z.getReader(ss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||||
for _, file := range zipReader.File {
|
for _, file := range zipReader.File {
|
||||||
if decodeName(file.Name) == innerPath {
|
if decodeName(file.Name, isEFS(file.Flags)) == innerPath {
|
||||||
if file.IsEncrypted() {
|
if file.IsEncrypted() {
|
||||||
file.SetPassword(args.Password)
|
file.SetPassword(args.Password)
|
||||||
}
|
}
|
||||||
@@ -117,8 +123,8 @@ func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io
|
|||||||
return nil, 0, errs.ObjectNotFound
|
return nil, 0, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
func (z *Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||||
zipReader, err := getReader(ss)
|
zipReader, err := z.getReader(ss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -128,5 +134,7 @@ func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model
|
|||||||
var _ tool.Tool = (*Zip)(nil)
|
var _ tool.Tool = (*Zip)(nil)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
tool.RegisterTool(Zip{})
|
tool.RegisterTool(&Zip{
|
||||||
|
traditionalSecondPartRegExp: regexp.MustCompile(`^.*\.z0*1$`),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -154,6 +154,7 @@ func InitialSettings() []model.SettingItem {
|
|||||||
{Key: conf.SharePreviewArchivesByDefault, Value: "false", Type: conf.TypeBool, Group: model.PREVIEW},
|
{Key: conf.SharePreviewArchivesByDefault, Value: "false", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||||
{Key: conf.ReadMeAutoRender, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
{Key: conf.ReadMeAutoRender, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||||
{Key: conf.FilterReadMeScripts, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
{Key: conf.FilterReadMeScripts, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||||
|
{Key: conf.NonEFSZipEncoding, Value: "IBM437", Type: conf.TypeString, Group: model.PREVIEW},
|
||||||
// global settings
|
// global settings
|
||||||
{Key: conf.HideFiles, Value: "/\\/README.md/i", Type: conf.TypeText, Group: model.GLOBAL},
|
{Key: conf.HideFiles, Value: "/\\/README.md/i", Type: conf.TypeText, Group: model.GLOBAL},
|
||||||
{Key: "package_download", Value: "true", Type: conf.TypeBool, Group: model.GLOBAL},
|
{Key: "package_download", Value: "true", Type: conf.TypeBool, Group: model.GLOBAL},
|
||||||
@@ -176,6 +177,9 @@ func InitialSettings() []model.SettingItem {
|
|||||||
{Key: conf.ShareArchivePreview, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
|
{Key: conf.ShareArchivePreview, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
|
||||||
{Key: conf.ShareForceProxy, Value: "true", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PRIVATE},
|
{Key: conf.ShareForceProxy, Value: "true", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PRIVATE},
|
||||||
{Key: conf.ShareSummaryContent, Value: "@{{creator}} shared {{#each files}}{{#if @first}}\"{{filename this}}\"{{/if}}{{#if @last}}{{#unless (eq @index 0)}} and {{@index}} more files{{/unless}}{{/if}}{{/each}} from {{site_title}}: {{base_url}}/@s/{{id}}{{#if pwd}} , the share code is {{pwd}}{{/if}}{{#if expires}}, please access before {{dateLocaleString expires}}.{{/if}}", Type: conf.TypeText, Group: model.GLOBAL, Flag: model.PUBLIC},
|
{Key: conf.ShareSummaryContent, Value: "@{{creator}} shared {{#each files}}{{#if @first}}\"{{filename this}}\"{{/if}}{{#if @last}}{{#unless (eq @index 0)}} and {{@index}} more files{{/unless}}{{/if}}{{/each}} from {{site_title}}: {{base_url}}/@s/{{id}}{{#if pwd}} , the share code is {{pwd}}{{/if}}{{#if expires}}, please access before {{dateLocaleString expires}}.{{/if}}", Type: conf.TypeText, Group: model.GLOBAL, Flag: model.PUBLIC},
|
||||||
|
{Key: conf.HandleHookAfterWriting, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PRIVATE},
|
||||||
|
{Key: conf.HandleHookRateLimit, Value: "0", Type: conf.TypeNumber, Group: model.GLOBAL, Flag: model.PRIVATE},
|
||||||
|
{Key: conf.IgnoreSystemFiles, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PRIVATE, Help: `When enabled, ignores common system files during upload (.DS_Store, desktop.ini, Thumbs.db, and files starting with ._)`},
|
||||||
|
|
||||||
// single settings
|
// single settings
|
||||||
{Key: conf.Token, Value: token, Type: conf.TypeString, Group: model.SINGLE, Flag: model.PRIVATE},
|
{Key: conf.Token, Value: token, Type: conf.TypeString, Group: model.SINGLE, Flag: model.PRIVATE},
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ const (
|
|||||||
SharePreviewArchivesByDefault = "share_preview_archives_by_default"
|
SharePreviewArchivesByDefault = "share_preview_archives_by_default"
|
||||||
ReadMeAutoRender = "readme_autorender"
|
ReadMeAutoRender = "readme_autorender"
|
||||||
FilterReadMeScripts = "filter_readme_scripts"
|
FilterReadMeScripts = "filter_readme_scripts"
|
||||||
|
NonEFSZipEncoding = "non_efs_zip_encoding"
|
||||||
|
|
||||||
// global
|
// global
|
||||||
HideFiles = "hide_files"
|
HideFiles = "hide_files"
|
||||||
@@ -55,6 +56,9 @@ const (
|
|||||||
ShareArchivePreview = "share_archive_preview"
|
ShareArchivePreview = "share_archive_preview"
|
||||||
ShareForceProxy = "share_force_proxy"
|
ShareForceProxy = "share_force_proxy"
|
||||||
ShareSummaryContent = "share_summary_content"
|
ShareSummaryContent = "share_summary_content"
|
||||||
|
HandleHookAfterWriting = "handle_hook_after_writing"
|
||||||
|
HandleHookRateLimit = "handle_hook_rate_limit"
|
||||||
|
IgnoreSystemFiles = "ignore_system_files"
|
||||||
|
|
||||||
// index
|
// index
|
||||||
SearchIndex = "search_index"
|
SearchIndex = "search_index"
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ func GetSharingsByCreatorId(creator uint, pageIndex, pageSize int) (sharings []m
|
|||||||
}
|
}
|
||||||
|
|
||||||
func CreateSharing(s *model.SharingDB) (string, error) {
|
func CreateSharing(s *model.SharingDB) (string, error) {
|
||||||
|
if s.ID == "" {
|
||||||
id := random.String(8)
|
id := random.String(8)
|
||||||
for len(id) < 12 {
|
for len(id) < 12 {
|
||||||
old := model.SharingDB{
|
old := model.SharingDB{
|
||||||
@@ -50,6 +51,13 @@ func CreateSharing(s *model.SharingDB) (string, error) {
|
|||||||
id += random.String(1)
|
id += random.String(1)
|
||||||
}
|
}
|
||||||
return "", errors.New("failed find valid id")
|
return "", errors.New("failed find valid id")
|
||||||
|
} else {
|
||||||
|
query := model.SharingDB{ID: s.ID}
|
||||||
|
if err := db.Where(query).First(&query).Error; err == nil {
|
||||||
|
return "", errors.New("sharing already exist")
|
||||||
|
}
|
||||||
|
return s.ID, errors.WithStack(db.Create(s).Error)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpdateSharing(s *model.SharingDB) error {
|
func UpdateSharing(s *model.SharingDB) error {
|
||||||
|
|||||||
@@ -22,6 +22,8 @@ type Config struct {
|
|||||||
// - LinkCacheNone: no extra info added to cache key (default)
|
// - LinkCacheNone: no extra info added to cache key (default)
|
||||||
// - flags (OR-able) can add more attributes to cache key (IP, UA, ...)
|
// - flags (OR-able) can add more attributes to cache key (IP, UA, ...)
|
||||||
LinkCacheMode `json:"-"`
|
LinkCacheMode `json:"-"`
|
||||||
|
// if the driver only store indices of files (e.g. UrlTree)
|
||||||
|
OnlyIndices bool `json:"only_indices"`
|
||||||
}
|
}
|
||||||
type LinkCacheMode int8
|
type LinkCacheMode int8
|
||||||
|
|
||||||
|
|||||||
@@ -218,3 +218,12 @@ type LinkCacheModeResolver interface {
|
|||||||
// ResolveLinkCacheMode returns the LinkCacheMode for the given path.
|
// ResolveLinkCacheMode returns the LinkCacheMode for the given path.
|
||||||
ResolveLinkCacheMode(path string) LinkCacheMode
|
ResolveLinkCacheMode(path string) LinkCacheMode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DirectUploader interface {
|
||||||
|
// GetDirectUploadTools returns available frontend-direct upload tools
|
||||||
|
GetDirectUploadTools() []string
|
||||||
|
// GetDirectUploadInfo returns the information needed for direct upload from client to storage
|
||||||
|
// actualPath is the path relative to the storage root (after removing mount path prefix)
|
||||||
|
// return errs.NotImplement if the driver does not support the given direct upload tool
|
||||||
|
GetDirectUploadInfo(ctx context.Context, tool string, dstDir model.Obj, fileName string, fileSize int64) (any, error)
|
||||||
|
}
|
||||||
|
|||||||
@@ -8,8 +8,10 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
ObjectNotFound = errors.New("object not found")
|
ObjectNotFound = errors.New("object not found")
|
||||||
|
ObjectAlreadyExists = errors.New("object already exists")
|
||||||
NotFolder = errors.New("not a folder")
|
NotFolder = errors.New("not a folder")
|
||||||
NotFile = errors.New("not a file")
|
NotFile = errors.New("not a file")
|
||||||
|
IgnoredSystemFile = errors.New("system file upload ignored")
|
||||||
)
|
)
|
||||||
|
|
||||||
func IsObjectNotFound(err error) bool {
|
func IsObjectNotFound(err error) bool {
|
||||||
|
|||||||
@@ -125,6 +125,7 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT
|
|||||||
DstActualPath: t.DstActualPath,
|
DstActualPath: t.DstActualPath,
|
||||||
dstStorage: t.DstStorage,
|
dstStorage: t.DstStorage,
|
||||||
DstStorageMp: t.DstStorageMp,
|
DstStorageMp: t.DstStorageMp,
|
||||||
|
overwrite: t.Overwrite,
|
||||||
}
|
}
|
||||||
return uploadTask, nil
|
return uploadTask, nil
|
||||||
}
|
}
|
||||||
@@ -142,6 +143,7 @@ type ArchiveContentUploadTask struct {
|
|||||||
DstStorageMp string
|
DstStorageMp string
|
||||||
finalized bool
|
finalized bool
|
||||||
groupID string
|
groupID string
|
||||||
|
overwrite bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *ArchiveContentUploadTask) GetName() string {
|
func (t *ArchiveContentUploadTask) GetName() string {
|
||||||
@@ -232,6 +234,7 @@ func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTask *Arch
|
|||||||
dstStorage: t.dstStorage,
|
dstStorage: t.dstStorage,
|
||||||
DstStorageMp: t.DstStorageMp,
|
DstStorageMp: t.DstStorageMp,
|
||||||
groupID: t.groupID,
|
groupID: t.groupID,
|
||||||
|
overwrite: t.overwrite,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
es = stderrors.Join(es, err)
|
es = stderrors.Join(es, err)
|
||||||
@@ -241,6 +244,12 @@ func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTask *Arch
|
|||||||
return es
|
return es
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if !t.overwrite {
|
||||||
|
dstPath := stdpath.Join(t.DstActualPath, t.ObjName)
|
||||||
|
if res, _ := op.Get(t.Ctx(), t.dstStorage, dstPath); res != nil {
|
||||||
|
return errs.ObjectAlreadyExists
|
||||||
|
}
|
||||||
|
}
|
||||||
file, err := os.Open(t.FilePath)
|
file, err := os.Open(t.FilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -24,14 +24,17 @@ type taskType uint8
|
|||||||
func (t taskType) String() string {
|
func (t taskType) String() string {
|
||||||
if t == 0 {
|
if t == 0 {
|
||||||
return "copy"
|
return "copy"
|
||||||
} else {
|
} else if t == 1 {
|
||||||
return "move"
|
return "move"
|
||||||
|
} else {
|
||||||
|
return "merge"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
copy taskType = iota
|
copy taskType = iota
|
||||||
move
|
move
|
||||||
|
merge
|
||||||
)
|
)
|
||||||
|
|
||||||
type FileTransferTask struct {
|
type FileTransferTask struct {
|
||||||
@@ -67,7 +70,7 @@ func (t *FileTransferTask) Run() error {
|
|||||||
return t.RunWithNextTaskCallback(func(nextTask *FileTransferTask) error {
|
return t.RunWithNextTaskCallback(func(nextTask *FileTransferTask) error {
|
||||||
nextTask.groupID = t.groupID
|
nextTask.groupID = t.groupID
|
||||||
task_group.TransferCoordinator.AddTask(t.groupID, nil)
|
task_group.TransferCoordinator.AddTask(t.groupID, nil)
|
||||||
if t.TaskType == copy {
|
if t.TaskType == copy || t.TaskType == merge {
|
||||||
CopyTaskManager.Add(nextTask)
|
CopyTaskManager.Add(nextTask)
|
||||||
} else {
|
} else {
|
||||||
MoveTaskManager.Add(nextTask)
|
MoveTaskManager.Add(nextTask)
|
||||||
@@ -109,7 +112,7 @@ func transfer(ctx context.Context, taskType taskType, srcObjPath, dstDirPath str
|
|||||||
}
|
}
|
||||||
|
|
||||||
if srcStorage.GetStorage() == dstStorage.GetStorage() {
|
if srcStorage.GetStorage() == dstStorage.GetStorage() {
|
||||||
if taskType == copy {
|
if taskType == copy || taskType == merge {
|
||||||
err = op.Copy(ctx, srcStorage, srcObjActualPath, dstDirActualPath, lazyCache...)
|
err = op.Copy(ctx, srcStorage, srcObjActualPath, dstDirActualPath, lazyCache...)
|
||||||
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.NotSupport) {
|
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.NotSupport) {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -161,7 +164,7 @@ func transfer(ctx context.Context, taskType taskType, srcObjPath, dstDirPath str
|
|||||||
t.Creator, _ = ctx.Value(conf.UserKey).(*model.User)
|
t.Creator, _ = ctx.Value(conf.UserKey).(*model.User)
|
||||||
t.ApiUrl = common.GetApiUrl(ctx)
|
t.ApiUrl = common.GetApiUrl(ctx)
|
||||||
t.groupID = dstDirPath
|
t.groupID = dstDirPath
|
||||||
if taskType == copy {
|
if taskType == copy || taskType == merge {
|
||||||
task_group.TransferCoordinator.AddTask(dstDirPath, nil)
|
task_group.TransferCoordinator.AddTask(dstDirPath, nil)
|
||||||
CopyTaskManager.Add(t)
|
CopyTaskManager.Add(t)
|
||||||
} else {
|
} else {
|
||||||
@@ -177,6 +180,7 @@ func (t *FileTransferTask) RunWithNextTaskCallback(f func(nextTask *FileTransfer
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithMessagef(err, "failed get src [%s] file", t.SrcActualPath)
|
return errors.WithMessagef(err, "failed get src [%s] file", t.SrcActualPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
if srcObj.IsDir() {
|
if srcObj.IsDir() {
|
||||||
t.Status = "src object is dir, listing objs"
|
t.Status = "src object is dir, listing objs"
|
||||||
objs, err := op.List(t.Ctx(), t.SrcStorage, t.SrcActualPath, model.ListArgs{})
|
objs, err := op.List(t.Ctx(), t.SrcStorage, t.SrcActualPath, model.ListArgs{})
|
||||||
@@ -184,17 +188,34 @@ func (t *FileTransferTask) RunWithNextTaskCallback(f func(nextTask *FileTransfer
|
|||||||
return errors.WithMessagef(err, "failed list src [%s] objs", t.SrcActualPath)
|
return errors.WithMessagef(err, "failed list src [%s] objs", t.SrcActualPath)
|
||||||
}
|
}
|
||||||
dstActualPath := stdpath.Join(t.DstActualPath, srcObj.GetName())
|
dstActualPath := stdpath.Join(t.DstActualPath, srcObj.GetName())
|
||||||
if t.TaskType == copy {
|
if t.TaskType == copy || t.TaskType == merge {
|
||||||
if t.Ctx().Value(conf.NoTaskKey) != nil {
|
if t.Ctx().Value(conf.NoTaskKey) != nil {
|
||||||
defer op.Cache.DeleteDirectory(t.DstStorage, dstActualPath)
|
defer op.Cache.DeleteDirectory(t.DstStorage, dstActualPath)
|
||||||
} else {
|
} else {
|
||||||
task_group.TransferCoordinator.AppendPayload(t.groupID, task_group.DstPathToRefresh(dstActualPath))
|
task_group.TransferCoordinator.AppendPayload(t.groupID, task_group.DstPathToRefresh(dstActualPath))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
existedObjs := make(map[string]bool)
|
||||||
|
if t.TaskType == merge {
|
||||||
|
dstObjs, _ := op.List(t.Ctx(), t.DstStorage, dstActualPath, model.ListArgs{})
|
||||||
|
for _, obj := range dstObjs {
|
||||||
|
if !obj.IsDir() {
|
||||||
|
existedObjs[obj.GetName()] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, obj := range objs {
|
for _, obj := range objs {
|
||||||
if utils.IsCanceled(t.Ctx()) {
|
if utils.IsCanceled(t.Ctx()) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if t.TaskType == merge && !obj.IsDir() && existedObjs[obj.GetName()] {
|
||||||
|
// skip existed file
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
err = f(&FileTransferTask{
|
err = f(&FileTransferTask{
|
||||||
TaskType: t.TaskType,
|
TaskType: t.TaskType,
|
||||||
TaskData: TaskData{
|
TaskData: TaskData{
|
||||||
|
|||||||
@@ -84,6 +84,14 @@ func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool)
|
|||||||
return res, err
|
return res, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Merge(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskExtensionInfo, error) {
|
||||||
|
res, err := transfer(ctx, merge, srcObjPath, dstDirPath, lazyCache...)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed merge %s to %s: %+v", srcObjPath, dstDirPath, err)
|
||||||
|
}
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
func Rename(ctx context.Context, srcPath, dstName string, lazyCache ...bool) error {
|
func Rename(ctx context.Context, srcPath, dstName string, lazyCache ...bool) error {
|
||||||
err := rename(ctx, srcPath, dstName, lazyCache...)
|
err := rename(ctx, srcPath, dstName, lazyCache...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -167,6 +175,14 @@ func GetStorage(path string, args *GetStoragesArgs) (driver.Driver, error) {
|
|||||||
return storageDriver, nil
|
return storageDriver, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetStorageAndActualPath(path string) (driver.Driver, string, error) {
|
||||||
|
return op.GetStorageAndActualPath(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetByActualPath(ctx context.Context, storage driver.Driver, actualPath string) (model.Obj, error) {
|
||||||
|
return op.Get(ctx, storage, actualPath)
|
||||||
|
}
|
||||||
|
|
||||||
func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
|
func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
|
||||||
res, err := other(ctx, args)
|
res, err := other(ctx, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -190,3 +206,11 @@ func PutURL(ctx context.Context, path, dstName, urlStr string) error {
|
|||||||
}
|
}
|
||||||
return op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr)
|
return op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetDirectUploadInfo(ctx context.Context, tool, path, dstName string, fileSize int64) (any, error) {
|
||||||
|
info, err := getDirectUploadInfo(ctx, tool, path, dstName, fileSize)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed get %s direct upload info for %s(%d bytes): %+v", path, dstName, fileSize, err)
|
||||||
|
}
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|||||||
@@ -105,3 +105,11 @@ func putDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer
|
|||||||
}
|
}
|
||||||
return op.Put(ctx, storage, dstDirActualPath, file, nil, lazyCache...)
|
return op.Put(ctx, storage, dstDirActualPath, file, nil, lazyCache...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getDirectUploadInfo(ctx context.Context, tool, dstDirPath, dstName string, fileSize int64) (any, error) {
|
||||||
|
storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.WithMessage(err, "failed get storage")
|
||||||
|
}
|
||||||
|
return op.GetDirectUploadInfo(ctx, tool, storage, dstDirActualPath, dstName, fileSize)
|
||||||
|
}
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ type Link struct {
|
|||||||
//for accelerating request, use multi-thread downloading
|
//for accelerating request, use multi-thread downloading
|
||||||
Concurrency int `json:"concurrency"`
|
Concurrency int `json:"concurrency"`
|
||||||
PartSize int `json:"part_size"`
|
PartSize int `json:"part_size"`
|
||||||
ContentLength int64 `json:"-"` // 转码视频、缩略图
|
ContentLength int64 `json:"content_length"` // 转码视频、缩略图
|
||||||
|
|
||||||
utils.SyncClosers `json:"-"`
|
utils.SyncClosers `json:"-"`
|
||||||
// 如果SyncClosers中的资源被关闭后Link将不可用,则此值应为 true
|
// 如果SyncClosers中的资源被关闭后Link将不可用,则此值应为 true
|
||||||
@@ -77,6 +77,7 @@ type ArchiveDecompressArgs struct {
|
|||||||
ArchiveInnerArgs
|
ArchiveInnerArgs
|
||||||
CacheFull bool
|
CacheFull bool
|
||||||
PutIntoNewDir bool
|
PutIntoNewDir bool
|
||||||
|
Overwrite bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type SharingListArgs struct {
|
type SharingListArgs struct {
|
||||||
|
|||||||
8
internal/model/direct_upload.go
Normal file
8
internal/model/direct_upload.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
package model
|
||||||
|
|
||||||
|
type HttpDirectUploadInfo struct {
|
||||||
|
UploadURL string `json:"upload_url"` // The URL to upload the file
|
||||||
|
ChunkSize int64 `json:"chunk_size"` // The chunk size for uploading, 0 means no chunking required
|
||||||
|
Headers map[string]string `json:"headers,omitempty"` // Optional headers to include in the upload request
|
||||||
|
Method string `json:"method,omitempty"` // HTTP method, default is PUT
|
||||||
|
}
|
||||||
@@ -27,6 +27,9 @@ func (f *FileCloser) Close() error {
|
|||||||
return errors.Join(errs...)
|
return errors.Join(errs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileRangeReader 是对 RangeReaderIF 的轻量包装,表明由 RangeReaderIF.RangeRead
|
||||||
|
// 返回的 io.ReadCloser 同时实现了 model.File(即支持 Read/ReadAt/Seek)。
|
||||||
|
// 只有满足这些才需要使用 FileRangeReader,否则直接使用 RangeReaderIF 即可。
|
||||||
type FileRangeReader struct {
|
type FileRangeReader struct {
|
||||||
RangeReaderIF
|
RangeReaderIF
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -48,7 +48,6 @@ type FileStreamer interface {
|
|||||||
// for a non-seekable Stream, if Read is called, this function won't work.
|
// for a non-seekable Stream, if Read is called, this function won't work.
|
||||||
// caches the full Stream and writes it to writer (if provided, even if the stream is already cached).
|
// caches the full Stream and writes it to writer (if provided, even if the stream is already cached).
|
||||||
CacheFullAndWriter(up *UpdateProgress, writer io.Writer) (File, error)
|
CacheFullAndWriter(up *UpdateProgress, writer io.Writer) (File, error)
|
||||||
SetTmpFile(file File)
|
|
||||||
// if the Stream is not a File and is not cached, returns nil.
|
// if the Stream is not a File and is not cached, returns nil.
|
||||||
GetFile() File
|
GetFile() File
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,7 @@
|
|||||||
package net
|
package net
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
@@ -13,6 +11,7 @@ import (
|
|||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@@ -308,39 +307,9 @@ func rangesMIMESize(ranges []http_range.Range, contentType string, contentSize i
|
|||||||
return encSize, nil
|
return encSize, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LimitedReadCloser wraps a io.ReadCloser and limits the number of bytes that can be read from it.
|
|
||||||
type LimitedReadCloser struct {
|
|
||||||
rc io.ReadCloser
|
|
||||||
remaining int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LimitedReadCloser) Read(buf []byte) (int, error) {
|
|
||||||
if l.remaining <= 0 {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(buf) > l.remaining {
|
|
||||||
buf = buf[0:l.remaining]
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := l.rc.Read(buf)
|
|
||||||
l.remaining -= n
|
|
||||||
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LimitedReadCloser) Close() error {
|
|
||||||
return l.rc.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRangedHttpReader some http server doesn't support "Range" header,
|
// GetRangedHttpReader some http server doesn't support "Range" header,
|
||||||
// so this function read readCloser with whole data, skip offset, then return ReaderCloser.
|
// so this function read readCloser with whole data, skip offset, then return ReaderCloser.
|
||||||
func GetRangedHttpReader(readCloser io.ReadCloser, offset, length int64) (io.ReadCloser, error) {
|
func GetRangedHttpReader(readCloser io.ReadCloser, offset, length int64) (io.ReadCloser, error) {
|
||||||
var length_int int
|
|
||||||
if length > math.MaxInt {
|
|
||||||
return nil, fmt.Errorf("doesnot support length bigger than int32 max ")
|
|
||||||
}
|
|
||||||
length_int = int(length)
|
|
||||||
|
|
||||||
if offset > 100*1024*1024 {
|
if offset > 100*1024*1024 {
|
||||||
log.Warnf("offset is more than 100MB, if loading data from internet, high-latency and wasting of bandwidth is expected")
|
log.Warnf("offset is more than 100MB, if loading data from internet, high-latency and wasting of bandwidth is expected")
|
||||||
@@ -351,7 +320,7 @@ func GetRangedHttpReader(readCloser io.ReadCloser, offset, length int64) (io.Rea
|
|||||||
}
|
}
|
||||||
|
|
||||||
// return an io.ReadCloser that is limited to `length` bytes.
|
// return an io.ReadCloser that is limited to `length` bytes.
|
||||||
return &LimitedReadCloser{readCloser, length_int}, nil
|
return readers.NewLimitedReadCloser(readCloser, length), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetProxyIfConfigured sets proxy for HTTP Transport if configured
|
// SetProxyIfConfigured sets proxy for HTTP Transport if configured
|
||||||
|
|||||||
@@ -3,28 +3,31 @@ package op
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
stderrors "errors"
|
stderrors "errors"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/cache"
|
"github.com/OpenListTeam/OpenList/v4/internal/cache"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
gocache "github.com/OpenListTeam/go-cache"
|
gocache "github.com/OpenListTeam/go-cache"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
var archiveMetaCache = gocache.NewMemCache(gocache.WithShards[*model.ArchiveMetaProvider](64))
|
var (
|
||||||
var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider]
|
archiveMetaCache = gocache.NewMemCache(gocache.WithShards[*model.ArchiveMetaProvider](64))
|
||||||
|
archiveMetaG singleflight.Group[*model.ArchiveMetaProvider]
|
||||||
|
)
|
||||||
|
|
||||||
func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
|
func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
|
||||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||||
@@ -61,20 +64,25 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path)
|
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path)
|
||||||
}
|
}
|
||||||
baseName, ext, found := strings.Cut(obj.GetName(), ".")
|
|
||||||
|
// Get archive tool
|
||||||
|
var partExt *tool.MultipartExtension
|
||||||
|
var t tool.Tool
|
||||||
|
ext := obj.GetName()
|
||||||
|
for {
|
||||||
|
var found bool
|
||||||
|
_, ext, found = strings.Cut(ext, ".")
|
||||||
if !found {
|
if !found {
|
||||||
_ = l.Close()
|
_ = l.Close()
|
||||||
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
|
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
|
||||||
}
|
}
|
||||||
partExt, t, err := tool.GetArchiveTool("." + ext)
|
partExt, t, err = tool.GetArchiveTool("." + ext)
|
||||||
if err != nil {
|
if err == nil {
|
||||||
var e error
|
break
|
||||||
partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName()))
|
|
||||||
if e != nil {
|
|
||||||
_ = l.Close()
|
|
||||||
return nil, nil, nil, errors.WithMessagef(stderrors.Join(err, e), "failed get archive tool: %s", ext)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get first part stream
|
||||||
ss, err := stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: obj}, l)
|
ss, err := stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: obj}, l)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = l.Close()
|
_ = l.Close()
|
||||||
@@ -83,29 +91,62 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
|
|||||||
ret := []*stream.SeekableStream{ss}
|
ret := []*stream.SeekableStream{ss}
|
||||||
if partExt == nil {
|
if partExt == nil {
|
||||||
return obj, t, ret, nil
|
return obj, t, ret, nil
|
||||||
} else {
|
}
|
||||||
index := partExt.SecondPartIndex
|
|
||||||
|
// Merge multi-part archive
|
||||||
dir := stdpath.Dir(path)
|
dir := stdpath.Dir(path)
|
||||||
for {
|
objs, err := List(ctx, storage, dir, model.ListArgs{})
|
||||||
p := stdpath.Join(dir, baseName+fmt.Sprintf(partExt.PartFileFormat, index))
|
|
||||||
var o model.Obj
|
|
||||||
l, o, err = Link(ctx, storage, p, args)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
|
||||||
}
|
|
||||||
ss, err = stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: o}, l)
|
|
||||||
if err != nil {
|
|
||||||
_ = l.Close()
|
|
||||||
for _, s := range ret {
|
|
||||||
_ = s.Close()
|
|
||||||
}
|
|
||||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
|
|
||||||
}
|
|
||||||
ret = append(ret, ss)
|
|
||||||
index++
|
|
||||||
}
|
|
||||||
return obj, t, ret, nil
|
return obj, t, ret, nil
|
||||||
}
|
}
|
||||||
|
for _, o := range objs {
|
||||||
|
submatch := partExt.PartFileFormat.FindStringSubmatch(o.GetName())
|
||||||
|
if submatch == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
partIdx, e := strconv.Atoi(submatch[1])
|
||||||
|
if e != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
partIdx = partIdx - partExt.SecondPartIndex + 1
|
||||||
|
if partIdx < 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := stdpath.Join(dir, o.GetName())
|
||||||
|
l1, o1, e := Link(ctx, storage, p, args)
|
||||||
|
if e != nil {
|
||||||
|
err = errors.WithMessagef(e, "failed get [%s] link", p)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ss1, e := stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: o1}, l1)
|
||||||
|
if e != nil {
|
||||||
|
_ = l1.Close()
|
||||||
|
err = errors.WithMessagef(e, "failed get [%s] stream", p)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
for partIdx >= len(ret) {
|
||||||
|
ret = append(ret, nil)
|
||||||
|
}
|
||||||
|
ret[partIdx] = ss1
|
||||||
|
}
|
||||||
|
closeAll := func(r []*stream.SeekableStream) {
|
||||||
|
for _, s := range r {
|
||||||
|
if s != nil {
|
||||||
|
_ = s.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
closeAll(ret)
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
for i, ss1 := range ret {
|
||||||
|
if ss1 == nil {
|
||||||
|
closeAll(ret)
|
||||||
|
return nil, nil, nil, errors.Errorf("failed merge [%s] parts, missing part %d", path, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return obj, t, ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) {
|
func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) {
|
||||||
@@ -159,8 +200,10 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
|
|||||||
return obj, archiveMetaProvider, err
|
return obj, archiveMetaProvider, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var archiveListCache = gocache.NewMemCache(gocache.WithShards[[]model.Obj](64))
|
var (
|
||||||
var archiveListG singleflight.Group[[]model.Obj]
|
archiveListCache = gocache.NewMemCache(gocache.WithShards[[]model.Obj](64))
|
||||||
|
archiveListG singleflight.Group[[]model.Obj]
|
||||||
|
)
|
||||||
|
|
||||||
func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
|
func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
|
||||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||||
@@ -360,8 +403,10 @@ type objWithLink struct {
|
|||||||
obj model.Obj
|
obj model.Obj
|
||||||
}
|
}
|
||||||
|
|
||||||
var extractCache = cache.NewKeyedCache[*objWithLink](5 * time.Minute)
|
var (
|
||||||
var extractG = singleflight.Group[*objWithLink]{}
|
extractCache = cache.NewKeyedCache[*objWithLink](5 * time.Minute)
|
||||||
|
extractG = singleflight.Group[*objWithLink]{}
|
||||||
|
)
|
||||||
|
|
||||||
func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
|
func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
|
||||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||||
@@ -469,9 +514,9 @@ func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstD
|
|||||||
return errors.WithMessage(err, "failed to get dst dir")
|
return errors.WithMessage(err, "failed to get dst dir")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var newObjs []model.Obj
|
||||||
switch s := storage.(type) {
|
switch s := storage.(type) {
|
||||||
case driver.ArchiveDecompressResult:
|
case driver.ArchiveDecompressResult:
|
||||||
var newObjs []model.Obj
|
|
||||||
newObjs, err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
|
newObjs, err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(newObjs) > 0 {
|
if len(newObjs) > 0 {
|
||||||
@@ -490,5 +535,31 @@ func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstD
|
|||||||
default:
|
default:
|
||||||
return errs.NotImplement
|
return errs.NotImplement
|
||||||
}
|
}
|
||||||
|
if !utils.IsBool(lazyCache...) && err == nil && needHandleObjsUpdateHook() {
|
||||||
|
onlyList := false
|
||||||
|
targetPath := dstDirPath
|
||||||
|
if newObjs != nil && len(newObjs) == 1 && newObjs[0].IsDir() {
|
||||||
|
targetPath = stdpath.Join(dstDirPath, newObjs[0].GetName())
|
||||||
|
} else if newObjs != nil && len(newObjs) == 1 && !newObjs[0].IsDir() {
|
||||||
|
onlyList = true
|
||||||
|
} else if args.PutIntoNewDir {
|
||||||
|
targetPath = stdpath.Join(dstDirPath, strings.TrimSuffix(srcObj.GetName(), stdpath.Ext(srcObj.GetName())))
|
||||||
|
} else if innerBase := stdpath.Base(args.InnerPath); innerBase != "." && innerBase != "/" {
|
||||||
|
targetPath = stdpath.Join(dstDirPath, innerBase)
|
||||||
|
dstObj, e := GetUnwrap(ctx, storage, targetPath)
|
||||||
|
onlyList = e != nil || !dstObj.IsDir()
|
||||||
|
}
|
||||||
|
if onlyList {
|
||||||
|
go List(context.Background(), storage, dstDirPath, model.ListArgs{Refresh: true})
|
||||||
|
} else {
|
||||||
|
var limiter *rate.Limiter
|
||||||
|
if l, _ := GetSettingItemByKey(conf.HandleHookRateLimit); l != nil {
|
||||||
|
if f, e := strconv.ParseFloat(l.Value, 64); e == nil && f > .0 {
|
||||||
|
limiter = rate.NewLimiter(rate.Limit(f), 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
go RecursivelyListStorage(context.Background(), storage, targetPath, limiter, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,10 +2,11 @@ package op
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
stderrors "errors"
|
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
@@ -14,6 +15,7 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
var listG singleflight.Group[[]model.Obj]
|
var listG singleflight.Group[[]model.Obj]
|
||||||
@@ -173,10 +175,10 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
|
|||||||
mode = storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(path)
|
mode = storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(path)
|
||||||
}
|
}
|
||||||
typeKey := args.Type
|
typeKey := args.Type
|
||||||
if mode&driver.LinkCacheIP == 1 {
|
if mode&driver.LinkCacheIP == driver.LinkCacheIP {
|
||||||
typeKey += "/" + args.IP
|
typeKey += "/" + args.IP
|
||||||
}
|
}
|
||||||
if mode&driver.LinkCacheUA == 1 {
|
if mode&driver.LinkCacheUA == driver.LinkCacheUA {
|
||||||
typeKey += "/" + args.Header.Get("User-Agent")
|
typeKey += "/" + args.Header.Get("User-Agent")
|
||||||
}
|
}
|
||||||
key := Key(storage, path)
|
key := Key(storage, path)
|
||||||
@@ -310,7 +312,7 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
|
|||||||
srcDirPath := stdpath.Dir(srcPath)
|
srcDirPath := stdpath.Dir(srcPath)
|
||||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||||
if dstDirPath == srcDirPath {
|
if dstDirPath == srcDirPath {
|
||||||
return stderrors.New("move in place")
|
return errors.New("move in place")
|
||||||
}
|
}
|
||||||
srcRawObj, err := Get(ctx, storage, srcPath)
|
srcRawObj, err := Get(ctx, storage, srcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -343,8 +345,24 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return errs.NotImplement
|
err = errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !utils.IsBool(lazyCache...) && err == nil && needHandleObjsUpdateHook() {
|
||||||
|
if !srcObj.IsDir() {
|
||||||
|
go List(context.Background(), storage, dstDirPath, model.ListArgs{Refresh: true})
|
||||||
|
} else {
|
||||||
|
targetPath := stdpath.Join(dstDirPath, srcObj.GetName())
|
||||||
|
var limiter *rate.Limiter
|
||||||
|
if l, _ := GetSettingItemByKey(conf.HandleHookRateLimit); l != nil {
|
||||||
|
if f, e := strconv.ParseFloat(l.Value, 64); e == nil && f > .0 {
|
||||||
|
limiter = rate.NewLimiter(rate.Limit(f), 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
go RecursivelyListStorage(context.Background(), storage, targetPath, limiter, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -397,7 +415,7 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
|
|||||||
srcPath = utils.FixAndCleanPath(srcPath)
|
srcPath = utils.FixAndCleanPath(srcPath)
|
||||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||||
if dstDirPath == stdpath.Dir(srcPath) {
|
if dstDirPath == stdpath.Dir(srcPath) {
|
||||||
return stderrors.New("copy in place")
|
return errors.New("copy in place")
|
||||||
}
|
}
|
||||||
srcRawObj, err := Get(ctx, storage, srcPath)
|
srcRawObj, err := Get(ctx, storage, srcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -428,8 +446,24 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return errs.NotImplement
|
err = errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !utils.IsBool(lazyCache...) && err == nil && needHandleObjsUpdateHook() {
|
||||||
|
if !srcObj.IsDir() {
|
||||||
|
go List(context.Background(), storage, dstDirPath, model.ListArgs{Refresh: true})
|
||||||
|
} else {
|
||||||
|
targetPath := stdpath.Join(dstDirPath, srcObj.GetName())
|
||||||
|
var limiter *rate.Limiter
|
||||||
|
if l, _ := GetSettingItemByKey(conf.HandleHookRateLimit); l != nil {
|
||||||
|
if f, e := strconv.ParseFloat(l.Value, 64); e == nil && f > .0 {
|
||||||
|
limiter = rate.NewLimiter(rate.Limit(f), 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
go RecursivelyListStorage(context.Background(), storage, targetPath, limiter, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -475,7 +509,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
|
|||||||
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||||
}
|
}
|
||||||
// UrlTree PUT
|
// UrlTree PUT
|
||||||
if storage.GetStorage().Driver == "UrlTree" {
|
if storage.Config().OnlyIndices {
|
||||||
var link string
|
var link string
|
||||||
dstDirPath, link = urlTreeSplitLineFormPath(stdpath.Join(dstDirPath, file.GetName()))
|
dstDirPath, link = urlTreeSplitLineFormPath(stdpath.Join(dstDirPath, file.GetName()))
|
||||||
file = &stream.FileStream{Obj: &model.Object{Name: link}}
|
file = &stream.FileStream{Obj: &model.Object{Name: link}}
|
||||||
@@ -557,6 +591,9 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
|
|||||||
err = Remove(ctx, storage, tempPath)
|
err = Remove(ctx, storage, tempPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !utils.IsBool(lazyCache...) && err == nil && needHandleObjsUpdateHook() {
|
||||||
|
go List(context.Background(), storage, dstDirPath, model.ListArgs{Refresh: true})
|
||||||
|
}
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -568,15 +605,15 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
|
|||||||
dstPath := stdpath.Join(dstDirPath, dstName)
|
dstPath := stdpath.Join(dstDirPath, dstName)
|
||||||
_, err := GetUnwrap(ctx, storage, dstPath)
|
_, err := GetUnwrap(ctx, storage, dstPath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return errors.New("obj already exists")
|
return errors.WithStack(errs.ObjectAlreadyExists)
|
||||||
}
|
}
|
||||||
err = MakeDir(ctx, storage, dstDirPath)
|
err = MakeDir(ctx, storage, dstDirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithMessagef(err, "failed to put url")
|
return errors.WithMessagef(err, "failed to make dir [%s]", dstDirPath)
|
||||||
}
|
}
|
||||||
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithMessagef(err, "failed to put url")
|
return errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath)
|
||||||
}
|
}
|
||||||
switch s := storage.(type) {
|
switch s := storage.(type) {
|
||||||
case driver.PutURLResult:
|
case driver.PutURLResult:
|
||||||
@@ -599,8 +636,56 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return errs.NotImplement
|
return errors.WithStack(errs.NotImplement)
|
||||||
|
}
|
||||||
|
if !utils.IsBool(lazyCache...) && err == nil && needHandleObjsUpdateHook() {
|
||||||
|
go List(context.Background(), storage, dstDirPath, model.ListArgs{Refresh: true})
|
||||||
}
|
}
|
||||||
log.Debugf("put url [%s](%s) done", dstName, url)
|
log.Debugf("put url [%s](%s) done", dstName, url)
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetDirectUploadTools(storage driver.Driver) []string {
|
||||||
|
du, ok := storage.(driver.DirectUploader)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return du.GetDirectUploadTools()
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetDirectUploadInfo(ctx context.Context, tool string, storage driver.Driver, dstDirPath, dstName string, fileSize int64) (any, error) {
|
||||||
|
du, ok := storage.(driver.DirectUploader)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.WithStack(errs.NotImplement)
|
||||||
|
}
|
||||||
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||||
|
return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||||
|
}
|
||||||
|
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||||
|
dstPath := stdpath.Join(dstDirPath, dstName)
|
||||||
|
_, err := GetUnwrap(ctx, storage, dstPath)
|
||||||
|
if err == nil {
|
||||||
|
return nil, errors.WithStack(errs.ObjectAlreadyExists)
|
||||||
|
}
|
||||||
|
err = MakeDir(ctx, storage, dstDirPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.WithMessagef(err, "failed to make dir [%s]", dstDirPath)
|
||||||
|
}
|
||||||
|
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath)
|
||||||
|
}
|
||||||
|
info, err := du.GetDirectUploadInfo(ctx, tool, dstDir, dstName, fileSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.WithStack(err)
|
||||||
|
}
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func needHandleObjsUpdateHook() bool {
|
||||||
|
needHandle, _ := GetSettingItemByKey(conf.HandleHookAfterWriting)
|
||||||
|
return needHandle != nil && (needHandle.Value == "true" || needHandle.Value == "1")
|
||||||
|
}
|
||||||
|
|||||||
125
internal/op/recursive_list.go
Normal file
125
internal/op/recursive_list.go
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
package op
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
stdpath "path"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ManualScanCancel = atomic.Pointer[context.CancelFunc]{}
|
||||||
|
ScannedCount = atomic.Uint64{}
|
||||||
|
)
|
||||||
|
|
||||||
|
func ManualScanRunning() bool {
|
||||||
|
return ManualScanCancel.Load() != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func BeginManualScan(rawPath string, limit float64) error {
|
||||||
|
rawPath = utils.FixAndCleanPath(rawPath)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
if !ManualScanCancel.CompareAndSwap(nil, &cancel) {
|
||||||
|
cancel()
|
||||||
|
return errors.New("manual scan is running, please try later")
|
||||||
|
}
|
||||||
|
ScannedCount.Store(0)
|
||||||
|
go func() {
|
||||||
|
defer func() { (*ManualScanCancel.Swap(nil))() }()
|
||||||
|
err := RecursivelyList(ctx, rawPath, rate.Limit(limit), &ScannedCount)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed recursively list: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func StopManualScan() {
|
||||||
|
c := ManualScanCancel.Load()
|
||||||
|
if c != nil {
|
||||||
|
(*c)()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecursivelyList(ctx context.Context, rawPath string, limit rate.Limit, counter *atomic.Uint64) error {
|
||||||
|
storage, actualPath, err := GetStorageAndActualPath(rawPath)
|
||||||
|
if err != nil && !errors.Is(err, errs.StorageNotFound) {
|
||||||
|
return err
|
||||||
|
} else if err == nil {
|
||||||
|
var limiter *rate.Limiter
|
||||||
|
if limit > .0 {
|
||||||
|
limiter = rate.NewLimiter(limit, 1)
|
||||||
|
}
|
||||||
|
RecursivelyListStorage(ctx, storage, actualPath, limiter, counter)
|
||||||
|
} else {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
recursivelyListVirtual(ctx, rawPath, limit, counter, &wg)
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func recursivelyListVirtual(ctx context.Context, rawPath string, limit rate.Limit, counter *atomic.Uint64, wg *sync.WaitGroup) {
|
||||||
|
objs := GetStorageVirtualFilesByPath(rawPath)
|
||||||
|
if counter != nil {
|
||||||
|
counter.Add(uint64(len(objs)))
|
||||||
|
}
|
||||||
|
for _, obj := range objs {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
nextPath := stdpath.Join(rawPath, obj.GetName())
|
||||||
|
storage, actualPath, err := GetStorageAndActualPath(nextPath)
|
||||||
|
if err != nil && !errors.Is(err, errs.StorageNotFound) {
|
||||||
|
log.Errorf("error recursively list: failed get storage [%s]: %v", nextPath, err)
|
||||||
|
} else if err == nil {
|
||||||
|
var limiter *rate.Limiter
|
||||||
|
if limit > .0 {
|
||||||
|
limiter = rate.NewLimiter(limit, 1)
|
||||||
|
}
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
RecursivelyListStorage(ctx, storage, actualPath, limiter, counter)
|
||||||
|
}()
|
||||||
|
} else {
|
||||||
|
recursivelyListVirtual(ctx, nextPath, limit, counter, wg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RecursivelyListStorage(ctx context.Context, storage driver.Driver, actualPath string, limiter *rate.Limiter, counter *atomic.Uint64) {
|
||||||
|
objs, err := List(ctx, storage, actualPath, model.ListArgs{Refresh: true})
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, context.Canceled) {
|
||||||
|
log.Errorf("error recursively list: failed list (%s)[%s]: %v", storage.GetStorage().MountPath, actualPath, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if counter != nil {
|
||||||
|
counter.Add(uint64(len(objs)))
|
||||||
|
}
|
||||||
|
for _, obj := range objs {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !obj.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if limiter != nil {
|
||||||
|
if err = limiter.Wait(ctx); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nextPath := stdpath.Join(actualPath, obj.GetName())
|
||||||
|
RecursivelyListStorage(ctx, storage, nextPath, limiter, counter)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -358,16 +358,21 @@ func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string,
|
|||||||
DriverName: d.Config().Name,
|
DriverName: d.Config().Name,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Second)
|
resultChan := make(chan *model.StorageDetails, 1)
|
||||||
defer cancel()
|
go func(dri driver.Driver) {
|
||||||
details, err := GetStorageDetails(timeoutCtx, d, refresh)
|
details, err := GetStorageDetails(ctx, dri, refresh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
|
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
|
||||||
log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err)
|
log.Errorf("failed get %s storage details: %+v", dri.GetStorage().MountPath, err)
|
||||||
}
|
}
|
||||||
return ret
|
|
||||||
}
|
}
|
||||||
ret.StorageDetails = details
|
resultChan <- details
|
||||||
|
}(d)
|
||||||
|
select {
|
||||||
|
case r := <-resultChan:
|
||||||
|
ret.StorageDetails = r
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
}
|
||||||
return ret
|
return ret
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -215,6 +215,16 @@ func Update(ctx context.Context, parent string, objs []model.Obj) {
|
|||||||
if !progress.IsDone {
|
if !progress.IsDone {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use task queue for Meilisearch to avoid race conditions with async indexing
|
||||||
|
if msInstance, ok := instance.(interface {
|
||||||
|
EnqueueUpdate(parent string, objs []model.Obj)
|
||||||
|
}); ok {
|
||||||
|
// Enqueue task for async processing (diff calculation happens at consumption time)
|
||||||
|
msInstance.EnqueueUpdate(parent, objs)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
nodes, err := instance.Get(ctx, parent)
|
nodes, err := instance.Get(ctx, parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("update search index error while get nodes: %+v", err)
|
log.Errorf("update search index error while get nodes: %+v", err)
|
||||||
@@ -241,28 +251,24 @@ func Update(ctx context.Context, parent string, objs []model.Obj) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// collect files and folders to add in batch
|
||||||
|
var toAddObjs []ObjWithParent
|
||||||
for i := range objs {
|
for i := range objs {
|
||||||
if toAdd.Contains(objs[i].GetName()) {
|
if toAdd.Contains(objs[i].GetName()) {
|
||||||
if !objs[i].IsDir() {
|
|
||||||
log.Debugf("add index: %s", path.Join(parent, objs[i].GetName()))
|
log.Debugf("add index: %s", path.Join(parent, objs[i].GetName()))
|
||||||
err = Index(ctx, parent, objs[i])
|
toAddObjs = append(toAddObjs, ObjWithParent{
|
||||||
|
Parent: parent,
|
||||||
|
Obj: objs[i],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// batch index all files and folders at once
|
||||||
|
if len(toAddObjs) > 0 {
|
||||||
|
err = BatchIndex(ctx, toAddObjs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("update search index error while index new node: %+v", err)
|
log.Errorf("update search index error while batch index new nodes: %+v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// build index if it's a folder
|
|
||||||
dir := path.Join(parent, objs[i].GetName())
|
|
||||||
err = BuildIndex(ctx,
|
|
||||||
[]string{dir},
|
|
||||||
conf.SlicesMap[conf.IgnorePaths],
|
|
||||||
setting.GetInt(conf.MaxIndexDepth, 20)-strings.Count(dir, "/"), false)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("update search index error while build index: %+v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -91,6 +91,11 @@ func init() {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize and start task queue manager
|
||||||
|
m.taskQueue = NewTaskQueueManager(&m)
|
||||||
|
m.taskQueue.Start()
|
||||||
|
|
||||||
return &m, nil
|
return &m, nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ type Meilisearch struct {
|
|||||||
IndexUid string
|
IndexUid string
|
||||||
FilterableAttributes []string
|
FilterableAttributes []string
|
||||||
SearchableAttributes []string
|
SearchableAttributes []string
|
||||||
|
taskQueue *TaskQueueManager
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Meilisearch) Config() searcher.Config {
|
func (m *Meilisearch) Config() searcher.Config {
|
||||||
@@ -82,14 +83,17 @@ func (m *Meilisearch) Index(ctx context.Context, node model.SearchNode) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Meilisearch) BatchIndex(ctx context.Context, nodes []model.SearchNode) error {
|
func (m *Meilisearch) BatchIndex(ctx context.Context, nodes []model.SearchNode) error {
|
||||||
documents, _ := utils.SliceConvert(nodes, func(src model.SearchNode) (*searchDocument, error) {
|
documents, err := utils.SliceConvert(nodes, func(src model.SearchNode) (*searchDocument, error) {
|
||||||
parentHash := hashPath(src.Parent)
|
parentHash := hashPath(src.Parent)
|
||||||
nodePath := path.Join(src.Parent, src.Name)
|
nodePath := path.Join(src.Parent, src.Name)
|
||||||
nodePathHash := hashPath(nodePath)
|
nodePathHash := hashPath(nodePath)
|
||||||
parentPaths := utils.GetPathHierarchy(src.Parent)
|
parentPaths := utils.GetPathHierarchy(src.Parent)
|
||||||
parentPathHashes, _ := utils.SliceConvert(parentPaths, func(parentPath string) (string, error) {
|
parentPathHashes, err := utils.SliceConvert(parentPaths, func(parentPath string) (string, error) {
|
||||||
return hashPath(parentPath), nil
|
return hashPath(parentPath), nil
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return &searchDocument{
|
return &searchDocument{
|
||||||
ID: nodePathHash,
|
ID: nodePathHash,
|
||||||
@@ -98,9 +102,12 @@ func (m *Meilisearch) BatchIndex(ctx context.Context, nodes []model.SearchNode)
|
|||||||
SearchNode: src,
|
SearchNode: src,
|
||||||
}, nil
|
}, nil
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// max up to 10,000 documents per batch to reduce error rate while uploading over the Internet
|
// max up to 10,000 documents per batch to reduce error rate while uploading over the Internet
|
||||||
_, err := m.Client.Index(m.IndexUid).AddDocumentsInBatchesWithContext(ctx, documents, 10000)
|
_, err = m.Client.Index(m.IndexUid).AddDocumentsInBatchesWithContext(ctx, documents, 10000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -203,6 +210,9 @@ func (m *Meilisearch) Del(ctx context.Context, prefix string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Meilisearch) Release(ctx context.Context) error {
|
func (m *Meilisearch) Release(ctx context.Context) error {
|
||||||
|
if m.taskQueue != nil {
|
||||||
|
m.taskQueue.Stop()
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,3 +229,115 @@ func (m *Meilisearch) getTaskStatus(ctx context.Context, taskUID int64) (meilise
|
|||||||
}
|
}
|
||||||
return forTask.Status, nil
|
return forTask.Status, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EnqueueUpdate enqueues an update task to the task queue
|
||||||
|
func (m *Meilisearch) EnqueueUpdate(parent string, objs []model.Obj) {
|
||||||
|
if m.taskQueue == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m.taskQueue.Enqueue(parent, objs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// batchIndexWithTaskUID indexes documents and returns all taskUIDs
|
||||||
|
func (m *Meilisearch) batchIndexWithTaskUID(ctx context.Context, nodes []model.SearchNode) ([]int64, error) {
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
documents, err := utils.SliceConvert(nodes, func(src model.SearchNode) (*searchDocument, error) {
|
||||||
|
parentHash := hashPath(src.Parent)
|
||||||
|
nodePath := path.Join(src.Parent, src.Name)
|
||||||
|
nodePathHash := hashPath(nodePath)
|
||||||
|
parentPaths := utils.GetPathHierarchy(src.Parent)
|
||||||
|
parentPathHashes, err := utils.SliceConvert(parentPaths, func(parentPath string) (string, error) {
|
||||||
|
return hashPath(parentPath), nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &searchDocument{
|
||||||
|
ID: nodePathHash,
|
||||||
|
ParentHash: parentHash,
|
||||||
|
ParentPathHashes: parentPathHashes,
|
||||||
|
SearchNode: src,
|
||||||
|
}, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// max up to 10,000 documents per batch to reduce error rate while uploading over the Internet
|
||||||
|
tasks, err := m.Client.Index(m.IndexUid).AddDocumentsInBatchesWithContext(ctx, documents, 10000)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return all task UIDs
|
||||||
|
taskUIDs := make([]int64, 0, len(tasks))
|
||||||
|
for _, task := range tasks {
|
||||||
|
taskUIDs = append(taskUIDs, task.TaskUID)
|
||||||
|
}
|
||||||
|
return taskUIDs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// batchDeleteWithTaskUID deletes documents and returns all taskUIDs
|
||||||
|
func (m *Meilisearch) batchDeleteWithTaskUID(ctx context.Context, paths []string) ([]int64, error) {
|
||||||
|
if len(paths) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deduplicate paths first
|
||||||
|
pathSet := make(map[string]struct{})
|
||||||
|
uniquePaths := make([]string, 0, len(paths))
|
||||||
|
for _, p := range paths {
|
||||||
|
p = utils.FixAndCleanPath(p)
|
||||||
|
if _, exists := pathSet[p]; !exists {
|
||||||
|
pathSet[p] = struct{}{}
|
||||||
|
uniquePaths = append(uniquePaths, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const batchSize = 100 // max paths per batch to avoid filter length limits
|
||||||
|
var taskUIDs []int64
|
||||||
|
|
||||||
|
// Process in batches to avoid filter length limits
|
||||||
|
for i := 0; i < len(uniquePaths); i += batchSize {
|
||||||
|
end := i + batchSize
|
||||||
|
if end > len(uniquePaths) {
|
||||||
|
end = len(uniquePaths)
|
||||||
|
}
|
||||||
|
batch := uniquePaths[i:end]
|
||||||
|
|
||||||
|
// Build combined filter to delete all children in one request
|
||||||
|
// Format: parent_path_hashes = 'hash1' OR parent_path_hashes = 'hash2' OR ...
|
||||||
|
var filters []string
|
||||||
|
for _, p := range batch {
|
||||||
|
pathHash := hashPath(p)
|
||||||
|
filters = append(filters, fmt.Sprintf("parent_path_hashes = '%s'", pathHash))
|
||||||
|
}
|
||||||
|
if len(filters) > 0 {
|
||||||
|
combinedFilter := strings.Join(filters, " OR ")
|
||||||
|
// Delete all children for all paths in one request
|
||||||
|
task, err := m.Client.Index(m.IndexUid).DeleteDocumentsByFilterWithContext(ctx, combinedFilter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
taskUIDs = append(taskUIDs, task.TaskUID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert paths to document IDs and batch delete
|
||||||
|
documentIDs := make([]string, 0, len(batch))
|
||||||
|
for _, p := range batch {
|
||||||
|
documentIDs = append(documentIDs, hashPath(p))
|
||||||
|
}
|
||||||
|
// Use batch delete API
|
||||||
|
task, err := m.Client.Index(m.IndexUid).DeleteDocumentsWithContext(ctx, documentIDs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
taskUIDs = append(taskUIDs, task.TaskUID)
|
||||||
|
}
|
||||||
|
return taskUIDs, nil
|
||||||
|
}
|
||||||
|
|||||||
265
internal/search/meilisearch/task_queue.go
Normal file
265
internal/search/meilisearch/task_queue.go
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
package meilisearch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"path"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
mapset "github.com/deckarep/golang-set/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// QueuedTask represents a task in the queue
|
||||||
|
type QueuedTask struct {
|
||||||
|
Parent string
|
||||||
|
Objs []model.Obj // current file system state
|
||||||
|
Depth int // path depth for sorting
|
||||||
|
EnqueueAt time.Time // enqueue time
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskQueueManager manages the task queue for async index operations
|
||||||
|
type TaskQueueManager struct {
|
||||||
|
queue map[string]*QueuedTask // parent -> task
|
||||||
|
pendingTasks map[string][]int64 // parent -> all submitted taskUIDs
|
||||||
|
mu sync.RWMutex
|
||||||
|
ticker *time.Ticker
|
||||||
|
stopCh chan struct{}
|
||||||
|
m *Meilisearch
|
||||||
|
consuming atomic.Bool // flag to prevent concurrent consumption
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTaskQueueManager creates a new task queue manager
|
||||||
|
func NewTaskQueueManager(m *Meilisearch) *TaskQueueManager {
|
||||||
|
return &TaskQueueManager{
|
||||||
|
queue: make(map[string]*QueuedTask),
|
||||||
|
pendingTasks: make(map[string][]int64),
|
||||||
|
stopCh: make(chan struct{}),
|
||||||
|
m: m,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateDepth calculates the depth of a path
|
||||||
|
func calculateDepth(path string) int {
|
||||||
|
if path == "/" {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return strings.Count(strings.Trim(path, "/"), "/") + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enqueue enqueues a task with current file system state
|
||||||
|
func (tqm *TaskQueueManager) Enqueue(parent string, objs []model.Obj) {
|
||||||
|
tqm.mu.Lock()
|
||||||
|
defer tqm.mu.Unlock()
|
||||||
|
|
||||||
|
// deduplicate: overwrite existing task with the same parent
|
||||||
|
tqm.queue[parent] = &QueuedTask{
|
||||||
|
Parent: parent,
|
||||||
|
Objs: objs,
|
||||||
|
Depth: calculateDepth(parent),
|
||||||
|
EnqueueAt: time.Now(),
|
||||||
|
}
|
||||||
|
log.Debugf("enqueued update task for parent: %s, depth: %d, objs: %d", parent, calculateDepth(parent), len(objs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the task queue consumer
|
||||||
|
func (tqm *TaskQueueManager) Start() {
|
||||||
|
tqm.ticker = time.NewTicker(30 * time.Second)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tqm.ticker.C:
|
||||||
|
tqm.consume()
|
||||||
|
case <-tqm.stopCh:
|
||||||
|
log.Info("task queue manager stopped")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
log.Info("task queue manager started, will consume every 30 seconds")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the task queue consumer
|
||||||
|
func (tqm *TaskQueueManager) Stop() {
|
||||||
|
if tqm.ticker != nil {
|
||||||
|
tqm.ticker.Stop()
|
||||||
|
}
|
||||||
|
close(tqm.stopCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
// consume processes all tasks in the queue
|
||||||
|
func (tqm *TaskQueueManager) consume() {
|
||||||
|
// Prevent concurrent consumption
|
||||||
|
if !tqm.consuming.CompareAndSwap(false, true) {
|
||||||
|
log.Warn("previous consume still running, skip this round")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer tqm.consuming.Store(false)
|
||||||
|
|
||||||
|
tqm.mu.Lock()
|
||||||
|
|
||||||
|
// extract all tasks
|
||||||
|
tasks := make([]*QueuedTask, 0, len(tqm.queue))
|
||||||
|
for _, task := range tqm.queue {
|
||||||
|
tasks = append(tasks, task)
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear queue
|
||||||
|
tqm.queue = make(map[string]*QueuedTask)
|
||||||
|
|
||||||
|
tqm.mu.Unlock()
|
||||||
|
|
||||||
|
if len(tasks) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("consuming task queue: %d tasks", len(tasks))
|
||||||
|
|
||||||
|
// sort tasks: shallow paths first, then by enqueue time
|
||||||
|
sort.Slice(tasks, func(i, j int) bool {
|
||||||
|
if tasks[i].Depth != tasks[j].Depth {
|
||||||
|
return tasks[i].Depth < tasks[j].Depth
|
||||||
|
}
|
||||||
|
return tasks[i].EnqueueAt.Before(tasks[j].EnqueueAt)
|
||||||
|
})
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// execute tasks in order
|
||||||
|
for _, task := range tasks {
|
||||||
|
// Check if there are pending tasks for this parent
|
||||||
|
tqm.mu.RLock()
|
||||||
|
pendingTaskUIDs, hasPending := tqm.pendingTasks[task.Parent]
|
||||||
|
tqm.mu.RUnlock()
|
||||||
|
|
||||||
|
if hasPending && len(pendingTaskUIDs) > 0 {
|
||||||
|
// Check all pending task statuses
|
||||||
|
allCompleted := true
|
||||||
|
for _, taskUID := range pendingTaskUIDs {
|
||||||
|
taskStatus, err := tqm.m.getTaskStatus(ctx, taskUID)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to get task status for parent %s (taskUID: %d): %v", task.Parent, taskUID, err)
|
||||||
|
// If we can't get status, assume it's done and continue checking
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if task is still running
|
||||||
|
if taskStatus == "enqueued" || taskStatus == "processing" {
|
||||||
|
log.Warnf("skipping task for parent %s: previous task %d still %s", task.Parent, taskUID, taskStatus)
|
||||||
|
allCompleted = false
|
||||||
|
break // No need to check remaining tasks
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !allCompleted {
|
||||||
|
// Re-enqueue the task if not already in queue (avoid overwriting newer snapshots)
|
||||||
|
tqm.mu.Lock()
|
||||||
|
if _, exists := tqm.queue[task.Parent]; !exists {
|
||||||
|
tqm.queue[task.Parent] = task
|
||||||
|
log.Debugf("re-enqueued skipped task for parent %s due to pending tasks", task.Parent)
|
||||||
|
} else {
|
||||||
|
log.Debugf("skipped task for parent %s not re-enqueued (newer task already in queue)", task.Parent)
|
||||||
|
}
|
||||||
|
tqm.mu.Unlock()
|
||||||
|
continue // Skip this task, some previous tasks are still running
|
||||||
|
}
|
||||||
|
|
||||||
|
// All tasks are in terminal state, remove from pending
|
||||||
|
log.Debugf("all previous tasks for parent %s are completed, proceeding with new task", task.Parent)
|
||||||
|
tqm.mu.Lock()
|
||||||
|
delete(tqm.pendingTasks, task.Parent)
|
||||||
|
tqm.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute the task
|
||||||
|
tqm.executeTask(ctx, task)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("task queue consumption completed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeTask executes a single task
|
||||||
|
func (tqm *TaskQueueManager) executeTask(ctx context.Context, task *QueuedTask) {
|
||||||
|
parent := task.Parent
|
||||||
|
currentObjs := task.Objs
|
||||||
|
|
||||||
|
// Query index to get old state
|
||||||
|
nodes, err := tqm.m.Get(ctx, parent)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to get indexed nodes for parent %s: %v", parent, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate diff based on current index state
|
||||||
|
now := mapset.NewSet[string]()
|
||||||
|
for i := range currentObjs {
|
||||||
|
now.Add(currentObjs[i].GetName())
|
||||||
|
}
|
||||||
|
old := mapset.NewSet[string]()
|
||||||
|
for i := range nodes {
|
||||||
|
old.Add(nodes[i].Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
toDelete := old.Difference(now)
|
||||||
|
toAdd := now.Difference(old)
|
||||||
|
|
||||||
|
// Collect paths to delete
|
||||||
|
var pathsToDelete []string
|
||||||
|
for i := range nodes {
|
||||||
|
if toDelete.Contains(nodes[i].Name) && !op.HasStorage(path.Join(parent, nodes[i].Name)) {
|
||||||
|
pathsToDelete = append(pathsToDelete, path.Join(parent, nodes[i].Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var allTaskUIDs []int64
|
||||||
|
|
||||||
|
// Execute delete first
|
||||||
|
if len(pathsToDelete) > 0 {
|
||||||
|
log.Debugf("executing delete for parent %s: %d paths", parent, len(pathsToDelete))
|
||||||
|
taskUIDs, err := tqm.m.batchDeleteWithTaskUID(ctx, pathsToDelete)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to batch delete for parent %s: %v", parent, err)
|
||||||
|
// Continue to add even if delete fails
|
||||||
|
} else {
|
||||||
|
allTaskUIDs = append(allTaskUIDs, taskUIDs...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect objects to add
|
||||||
|
var nodesToAdd []model.SearchNode
|
||||||
|
for i := range currentObjs {
|
||||||
|
if toAdd.Contains(currentObjs[i].GetName()) {
|
||||||
|
log.Debugf("will add index: %s", path.Join(parent, currentObjs[i].GetName()))
|
||||||
|
nodesToAdd = append(nodesToAdd, model.SearchNode{
|
||||||
|
Parent: parent,
|
||||||
|
Name: currentObjs[i].GetName(),
|
||||||
|
IsDir: currentObjs[i].IsDir(),
|
||||||
|
Size: currentObjs[i].GetSize(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute add
|
||||||
|
if len(nodesToAdd) > 0 {
|
||||||
|
log.Debugf("executing add for parent %s: %d nodes", parent, len(nodesToAdd))
|
||||||
|
taskUIDs, err := tqm.m.batchIndexWithTaskUID(ctx, nodesToAdd)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to batch index for parent %s: %v", parent, err)
|
||||||
|
} else {
|
||||||
|
allTaskUIDs = append(allTaskUIDs, taskUIDs...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record all task UIDs for this parent
|
||||||
|
if len(allTaskUIDs) > 0 {
|
||||||
|
tqm.mu.Lock()
|
||||||
|
tqm.pendingTasks[parent] = allTaskUIDs
|
||||||
|
tqm.mu.Unlock()
|
||||||
|
log.Debugf("recorded %d taskUIDs for parent %s", len(allTaskUIDs), parent)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
package meilisearch
|
package meilisearch
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -13,16 +12,16 @@ func hashPath(path string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func buildSearchDocumentFromResults(results map[string]any) *searchDocument {
|
func buildSearchDocumentFromResults(results map[string]any) *searchDocument {
|
||||||
searchNode := model.SearchNode{}
|
document := &searchDocument{}
|
||||||
document := &searchDocument{
|
|
||||||
SearchNode: searchNode,
|
|
||||||
}
|
|
||||||
|
|
||||||
// use assertion test to avoid panic
|
// use assertion test to avoid panic
|
||||||
searchNode.Parent, _ = results["parent"].(string)
|
document.SearchNode.Parent, _ = results["parent"].(string)
|
||||||
searchNode.Name, _ = results["name"].(string)
|
document.SearchNode.Name, _ = results["name"].(string)
|
||||||
searchNode.IsDir, _ = results["is_dir"].(bool)
|
document.SearchNode.IsDir, _ = results["is_dir"].(bool)
|
||||||
searchNode.Size, _ = results["size"].(int64)
|
// JSON numbers are typically float64, not int64
|
||||||
|
if size, ok := results["size"].(float64); ok {
|
||||||
|
document.SearchNode.Size = int64(size)
|
||||||
|
}
|
||||||
|
|
||||||
document.ID, _ = results["id"].(string)
|
document.ID, _ = results["id"].(string)
|
||||||
document.ParentHash, _ = results["parent_hash"].(string)
|
document.ParentHash, _ = results["parent_hash"].(string)
|
||||||
|
|||||||
@@ -28,3 +28,11 @@ func GetInt(key string, defaultVal int) int {
|
|||||||
func GetBool(key string) bool {
|
func GetBool(key string) bool {
|
||||||
return GetStr(key) == "true" || GetStr(key) == "1"
|
return GetStr(key) == "true" || GetStr(key) == "1"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetFloat(key string, defaultVal float64) float64 {
|
||||||
|
f, err := strconv.ParseFloat(GetStr(key), 64)
|
||||||
|
if err != nil {
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -42,17 +41,14 @@ type RateLimitReader struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RateLimitReader) Read(p []byte) (n int, err error) {
|
func (r *RateLimitReader) Read(p []byte) (n int, err error) {
|
||||||
if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
|
if err = r.Ctx.Err(); err != nil {
|
||||||
return 0, r.Ctx.Err()
|
return 0, err
|
||||||
}
|
}
|
||||||
n, err = r.Reader.Read(p)
|
n, err = r.Reader.Read(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if r.Limiter != nil {
|
if r.Limiter != nil {
|
||||||
if r.Ctx == nil {
|
|
||||||
r.Ctx = context.Background()
|
|
||||||
}
|
|
||||||
err = r.Limiter.WaitN(r.Ctx, n)
|
err = r.Limiter.WaitN(r.Ctx, n)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@@ -72,17 +68,14 @@ type RateLimitWriter struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w *RateLimitWriter) Write(p []byte) (n int, err error) {
|
func (w *RateLimitWriter) Write(p []byte) (n int, err error) {
|
||||||
if w.Ctx != nil && utils.IsCanceled(w.Ctx) {
|
if err = w.Ctx.Err(); err != nil {
|
||||||
return 0, w.Ctx.Err()
|
return 0, err
|
||||||
}
|
}
|
||||||
n, err = w.Writer.Write(p)
|
n, err = w.Writer.Write(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if w.Limiter != nil {
|
if w.Limiter != nil {
|
||||||
if w.Ctx == nil {
|
|
||||||
w.Ctx = context.Background()
|
|
||||||
}
|
|
||||||
err = w.Limiter.WaitN(w.Ctx, n)
|
err = w.Limiter.WaitN(w.Ctx, n)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@@ -102,34 +95,28 @@ type RateLimitFile struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RateLimitFile) Read(p []byte) (n int, err error) {
|
func (r *RateLimitFile) Read(p []byte) (n int, err error) {
|
||||||
if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
|
if err = r.Ctx.Err(); err != nil {
|
||||||
return 0, r.Ctx.Err()
|
return 0, err
|
||||||
}
|
}
|
||||||
n, err = r.File.Read(p)
|
n, err = r.File.Read(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if r.Limiter != nil {
|
if r.Limiter != nil {
|
||||||
if r.Ctx == nil {
|
|
||||||
r.Ctx = context.Background()
|
|
||||||
}
|
|
||||||
err = r.Limiter.WaitN(r.Ctx, n)
|
err = r.Limiter.WaitN(r.Ctx, n)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RateLimitFile) ReadAt(p []byte, off int64) (n int, err error) {
|
func (r *RateLimitFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||||
if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
|
if err = r.Ctx.Err(); err != nil {
|
||||||
return 0, r.Ctx.Err()
|
return 0, err
|
||||||
}
|
}
|
||||||
n, err = r.File.ReadAt(p, off)
|
n, err = r.File.ReadAt(p, off)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if r.Limiter != nil {
|
if r.Limiter != nil {
|
||||||
if r.Ctx == nil {
|
|
||||||
r.Ctx = context.Background()
|
|
||||||
}
|
|
||||||
err = r.Limiter.WaitN(r.Ctx, n)
|
err = r.Limiter.WaitN(r.Ctx, n)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@@ -145,16 +132,16 @@ func (r *RateLimitFile) Close() error {
|
|||||||
type RateLimitRangeReaderFunc RangeReaderFunc
|
type RateLimitRangeReaderFunc RangeReaderFunc
|
||||||
|
|
||||||
func (f RateLimitRangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
func (f RateLimitRangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
|
if ServerDownloadLimit == nil {
|
||||||
|
return f(ctx, httpRange)
|
||||||
|
}
|
||||||
rc, err := f(ctx, httpRange)
|
rc, err := f(ctx, httpRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if ServerDownloadLimit != nil {
|
return &RateLimitReader{
|
||||||
rc = &RateLimitReader{
|
|
||||||
Ctx: ctx,
|
Ctx: ctx,
|
||||||
Reader: rc,
|
Reader: rc,
|
||||||
Limiter: ServerDownloadLimit,
|
Limiter: ServerDownloadLimit,
|
||||||
}
|
}, nil
|
||||||
}
|
|
||||||
return rc, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/buffer"
|
"github.com/OpenListTeam/OpenList/v4/pkg/buffer"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||||
@@ -28,10 +27,8 @@ type FileStream struct {
|
|||||||
ForceStreamUpload bool
|
ForceStreamUpload bool
|
||||||
Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it
|
Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it
|
||||||
utils.Closers
|
utils.Closers
|
||||||
|
|
||||||
tmpFile model.File //if present, tmpFile has full content, it will be deleted at last
|
|
||||||
peekBuff *buffer.Reader
|
|
||||||
size int64
|
size int64
|
||||||
|
peekBuff *buffer.Reader
|
||||||
oriReader io.Reader // the original reader, used for caching
|
oriReader io.Reader // the original reader, used for caching
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -39,12 +36,6 @@ func (f *FileStream) GetSize() int64 {
|
|||||||
if f.size > 0 {
|
if f.size > 0 {
|
||||||
return f.size
|
return f.size
|
||||||
}
|
}
|
||||||
if file, ok := f.tmpFile.(*os.File); ok {
|
|
||||||
info, err := file.Stat()
|
|
||||||
if err == nil {
|
|
||||||
return info.Size()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return f.Obj.GetSize()
|
return f.Obj.GetSize()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,24 +54,10 @@ func (f *FileStream) IsForceStreamUpload() bool {
|
|||||||
func (f *FileStream) Close() error {
|
func (f *FileStream) Close() error {
|
||||||
if f.peekBuff != nil {
|
if f.peekBuff != nil {
|
||||||
f.peekBuff.Reset()
|
f.peekBuff.Reset()
|
||||||
|
f.oriReader = nil
|
||||||
f.peekBuff = nil
|
f.peekBuff = nil
|
||||||
}
|
}
|
||||||
|
return f.Closers.Close()
|
||||||
var err1, err2 error
|
|
||||||
err1 = f.Closers.Close()
|
|
||||||
if errors.Is(err1, os.ErrClosed) {
|
|
||||||
err1 = nil
|
|
||||||
}
|
|
||||||
if file, ok := f.tmpFile.(*os.File); ok {
|
|
||||||
err2 = os.RemoveAll(file.Name())
|
|
||||||
if err2 != nil {
|
|
||||||
err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", file.Name())
|
|
||||||
} else {
|
|
||||||
f.tmpFile = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.Join(err1, err2)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileStream) GetExist() model.Obj {
|
func (f *FileStream) GetExist() model.Obj {
|
||||||
@@ -94,11 +71,13 @@ func (f *FileStream) SetExist(obj model.Obj) {
|
|||||||
// It's not thread-safe!
|
// It's not thread-safe!
|
||||||
func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writer) (model.File, error) {
|
func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writer) (model.File, error) {
|
||||||
if cache := f.GetFile(); cache != nil {
|
if cache := f.GetFile(); cache != nil {
|
||||||
|
_, err := cache.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if writer == nil {
|
if writer == nil {
|
||||||
return cache, nil
|
return cache, nil
|
||||||
}
|
}
|
||||||
_, err := cache.Seek(0, io.SeekStart)
|
|
||||||
if err == nil {
|
|
||||||
reader := f.Reader
|
reader := f.Reader
|
||||||
if up != nil {
|
if up != nil {
|
||||||
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||||
@@ -115,7 +94,6 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
_, err = cache.Seek(0, io.SeekStart)
|
_, err = cache.Seek(0, io.SeekStart)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -123,21 +101,20 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
|||||||
}
|
}
|
||||||
|
|
||||||
reader := f.Reader
|
reader := f.Reader
|
||||||
if up != nil {
|
if f.peekBuff != nil {
|
||||||
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
f.peekBuff.Seek(0, io.SeekStart)
|
||||||
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
if writer != nil {
|
||||||
reader = &ReaderUpdatingProgress{
|
_, err := utils.CopyWithBuffer(writer, f.peekBuff)
|
||||||
Reader: &SimpleReaderWithSize{
|
if err != nil {
|
||||||
Reader: reader,
|
return nil, err
|
||||||
Size: f.GetSize(),
|
|
||||||
},
|
|
||||||
UpdateProgress: cacheProgress,
|
|
||||||
}
|
}
|
||||||
|
f.peekBuff.Seek(0, io.SeekStart)
|
||||||
|
}
|
||||||
|
reader = f.oriReader
|
||||||
}
|
}
|
||||||
if writer != nil {
|
if writer != nil {
|
||||||
reader = io.TeeReader(reader, writer)
|
reader = io.TeeReader(reader, writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.GetSize() < 0 {
|
if f.GetSize() < 0 {
|
||||||
if f.peekBuff == nil {
|
if f.peekBuff == nil {
|
||||||
f.peekBuff = &buffer.Reader{}
|
f.peekBuff = &buffer.Reader{}
|
||||||
@@ -174,7 +151,6 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpF, err := utils.CreateTempFile(reader, 0)
|
tmpF, err := utils.CreateTempFile(reader, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -191,22 +167,42 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
|||||||
return peekF, nil
|
return peekF, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if up != nil {
|
||||||
|
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||||
|
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||||
|
size := f.GetSize()
|
||||||
|
if f.peekBuff != nil {
|
||||||
|
peekSize := f.peekBuff.Size()
|
||||||
|
cacheProgress(float64(peekSize) / float64(size) * 100)
|
||||||
|
size -= peekSize
|
||||||
|
}
|
||||||
|
reader = &ReaderUpdatingProgress{
|
||||||
|
Reader: &SimpleReaderWithSize{
|
||||||
|
Reader: reader,
|
||||||
|
Size: size,
|
||||||
|
},
|
||||||
|
UpdateProgress: cacheProgress,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.peekBuff != nil {
|
||||||
|
f.oriReader = reader
|
||||||
|
} else {
|
||||||
f.Reader = reader
|
f.Reader = reader
|
||||||
|
}
|
||||||
return f.cache(f.GetSize())
|
return f.cache(f.GetSize())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileStream) GetFile() model.File {
|
func (f *FileStream) GetFile() model.File {
|
||||||
if f.tmpFile != nil {
|
|
||||||
return f.tmpFile
|
|
||||||
}
|
|
||||||
if file, ok := f.Reader.(model.File); ok {
|
if file, ok := f.Reader.(model.File); ok {
|
||||||
return file
|
return file
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RangeRead have to cache all data first since only Reader is provided.
|
// 从流读取指定范围的一块数据,并且不消耗流。
|
||||||
// It's not thread-safe!
|
// 当读取的边界超过内部设置大小后会缓存整个流。
|
||||||
|
// 流未缓存时线程不完全
|
||||||
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > f.GetSize() {
|
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > f.GetSize() {
|
||||||
httpRange.Length = f.GetSize() - httpRange.Start
|
httpRange.Length = f.GetSize() - httpRange.Start
|
||||||
@@ -215,12 +211,7 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
|||||||
return io.NewSectionReader(f.GetFile(), httpRange.Start, httpRange.Length), nil
|
return io.NewSectionReader(f.GetFile(), httpRange.Start, httpRange.Length), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
size := httpRange.Start + httpRange.Length
|
cache, err := f.cache(httpRange.Start + httpRange.Length)
|
||||||
if f.peekBuff != nil && size <= int64(f.peekBuff.Size()) {
|
|
||||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cache, err := f.cache(size)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -232,14 +223,30 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
|||||||
// 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
|
// 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
|
||||||
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
|
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
|
||||||
|
|
||||||
|
// 确保指定大小的数据被缓存
|
||||||
func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||||
if maxCacheSize > int64(conf.MaxBufferLimit) {
|
if maxCacheSize > int64(conf.MaxBufferLimit) {
|
||||||
tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize())
|
size := f.GetSize()
|
||||||
|
reader := f.Reader
|
||||||
|
if f.peekBuff != nil {
|
||||||
|
size -= f.peekBuff.Size()
|
||||||
|
reader = f.oriReader
|
||||||
|
}
|
||||||
|
tmpF, err := utils.CreateTempFile(reader, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.Add(tmpF)
|
f.Add(utils.CloseFunc(func() error {
|
||||||
f.tmpFile = tmpF
|
return errors.Join(tmpF.Close(), os.RemoveAll(tmpF.Name()))
|
||||||
|
}))
|
||||||
|
if f.peekBuff != nil {
|
||||||
|
peekF, err := buffer.NewPeekFile(f.peekBuff, tmpF)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f.Reader = peekF
|
||||||
|
return peekF, nil
|
||||||
|
}
|
||||||
f.Reader = tmpF
|
f.Reader = tmpF
|
||||||
return tmpF, nil
|
return tmpF, nil
|
||||||
}
|
}
|
||||||
@@ -247,8 +254,12 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
|||||||
if f.peekBuff == nil {
|
if f.peekBuff == nil {
|
||||||
f.peekBuff = &buffer.Reader{}
|
f.peekBuff = &buffer.Reader{}
|
||||||
f.oriReader = f.Reader
|
f.oriReader = f.Reader
|
||||||
|
f.Reader = io.MultiReader(f.peekBuff, f.oriReader)
|
||||||
|
}
|
||||||
|
bufSize := maxCacheSize - f.peekBuff.Size()
|
||||||
|
if bufSize <= 0 {
|
||||||
|
return f.peekBuff, nil
|
||||||
}
|
}
|
||||||
bufSize := maxCacheSize - int64(f.peekBuff.Size())
|
|
||||||
var buf []byte
|
var buf []byte
|
||||||
if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) {
|
if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) {
|
||||||
m, err := mmap.Alloc(int(bufSize))
|
m, err := mmap.Alloc(int(bufSize))
|
||||||
@@ -267,37 +278,24 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
|||||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
||||||
}
|
}
|
||||||
f.peekBuff.Append(buf)
|
f.peekBuff.Append(buf)
|
||||||
if int64(f.peekBuff.Size()) >= f.GetSize() {
|
if f.peekBuff.Size() >= f.GetSize() {
|
||||||
f.Reader = f.peekBuff
|
f.Reader = f.peekBuff
|
||||||
f.oriReader = nil
|
|
||||||
} else {
|
|
||||||
f.Reader = io.MultiReader(f.peekBuff, f.oriReader)
|
|
||||||
}
|
}
|
||||||
return f.peekBuff, nil
|
return f.peekBuff, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileStream) SetTmpFile(file model.File) {
|
|
||||||
f.AddIfCloser(file)
|
|
||||||
f.tmpFile = file
|
|
||||||
f.Reader = file
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ model.FileStreamer = (*SeekableStream)(nil)
|
var _ model.FileStreamer = (*SeekableStream)(nil)
|
||||||
var _ model.FileStreamer = (*FileStream)(nil)
|
var _ model.FileStreamer = (*FileStream)(nil)
|
||||||
|
|
||||||
//var _ seekableStream = (*FileStream)(nil)
|
|
||||||
|
|
||||||
// for most internal stream, which is either RangeReadCloser or MFile
|
|
||||||
// Any functionality implemented based on SeekableStream should implement a Close method,
|
|
||||||
// whose only purpose is to close the SeekableStream object. If such functionality has
|
|
||||||
// additional resources that need to be closed, they should be added to the Closer property of
|
|
||||||
// the SeekableStream object and be closed together when the SeekableStream object is closed.
|
|
||||||
type SeekableStream struct {
|
type SeekableStream struct {
|
||||||
*FileStream
|
*FileStream
|
||||||
// should have one of belows to support rangeRead
|
// should have one of belows to support rangeRead
|
||||||
rangeReadCloser model.RangeReadCloserIF
|
rangeReader model.RangeReaderIF
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewSeekableStream create a SeekableStream from FileStream and Link
|
||||||
|
// if FileStream.Reader is not nil, use it directly
|
||||||
|
// else create RangeReader from Link
|
||||||
func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error) {
|
func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error) {
|
||||||
if len(fs.Mimetype) == 0 {
|
if len(fs.Mimetype) == 0 {
|
||||||
fs.Mimetype = utils.GetMimeType(fs.Obj.GetName())
|
fs.Mimetype = utils.GetMimeType(fs.Obj.GetName())
|
||||||
@@ -317,30 +315,31 @@ func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rrc := &model.RangeReadCloser{
|
|
||||||
RangeReader: rr,
|
|
||||||
}
|
|
||||||
if _, ok := rr.(*model.FileRangeReader); ok {
|
if _, ok := rr.(*model.FileRangeReader); ok {
|
||||||
fs.Reader, err = rrc.RangeRead(fs.Ctx, http_range.Range{Length: -1})
|
var rc io.ReadCloser
|
||||||
|
rc, err = rr.RangeRead(fs.Ctx, http_range.Range{Length: -1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
fs.Reader = rc
|
||||||
|
fs.Add(rc)
|
||||||
}
|
}
|
||||||
fs.size = size
|
fs.size = size
|
||||||
fs.Add(link)
|
fs.Add(link)
|
||||||
fs.Add(rrc)
|
return &SeekableStream{FileStream: fs, rangeReader: rr}, nil
|
||||||
return &SeekableStream{FileStream: fs, rangeReadCloser: rrc}, nil
|
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("illegal seekableStream")
|
return nil, fmt.Errorf("illegal seekableStream")
|
||||||
}
|
}
|
||||||
|
|
||||||
// RangeRead is not thread-safe, pls use it in single thread only.
|
// 如果使用缓存或者rangeReader读取指定范围的数据,是线程安全的
|
||||||
|
// 其他特性继承自FileStream.RangeRead
|
||||||
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||||
if ss.GetFile() == nil && ss.rangeReadCloser != nil {
|
if ss.GetFile() == nil && ss.rangeReader != nil {
|
||||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, httpRange)
|
rc, err := ss.rangeReader.RangeRead(ss.Ctx, httpRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
ss.Add(rc)
|
||||||
return rc, nil
|
return rc, nil
|
||||||
}
|
}
|
||||||
return ss.FileStream.RangeRead(httpRange)
|
return ss.FileStream.RangeRead(httpRange)
|
||||||
@@ -356,13 +355,14 @@ func (ss *SeekableStream) Read(p []byte) (n int, err error) {
|
|||||||
|
|
||||||
func (ss *SeekableStream) generateReader() error {
|
func (ss *SeekableStream) generateReader() error {
|
||||||
if ss.Reader == nil {
|
if ss.Reader == nil {
|
||||||
if ss.rangeReadCloser == nil {
|
if ss.rangeReader == nil {
|
||||||
return fmt.Errorf("illegal seekableStream")
|
return fmt.Errorf("illegal seekableStream")
|
||||||
}
|
}
|
||||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
rc, err := ss.rangeReader.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
ss.Add(rc)
|
||||||
ss.Reader = rc
|
ss.Reader = rc
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -456,7 +456,7 @@ func (r *headCache) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
||||||
if r.ss.GetFile() == nil && r.masterOff == 0 {
|
if r.masterOff == 0 {
|
||||||
value, _ := r.readerMap.LoadAndDelete(int64(0))
|
value, _ := r.readerMap.LoadAndDelete(int64(0))
|
||||||
r.headCache = &headCache{reader: value.(io.Reader)}
|
r.headCache = &headCache{reader: value.(io.Reader)}
|
||||||
r.ss.Closers.Add(r.headCache)
|
r.ss.Closers.Add(r.headCache)
|
||||||
@@ -464,12 +464,12 @@ func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (model.File, error) {
|
func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (model.File, error) {
|
||||||
if ss.GetFile() != nil {
|
if cache := ss.GetFile(); cache != nil {
|
||||||
_, err := ss.GetFile().Seek(offset, io.SeekStart)
|
_, err := cache.Seek(offset, io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return ss.GetFile(), nil
|
return cache, nil
|
||||||
}
|
}
|
||||||
r := &RangeReadReadAtSeeker{
|
r := &RangeReadReadAtSeeker{
|
||||||
ss: ss,
|
ss: ss,
|
||||||
@@ -479,10 +479,11 @@ func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (mode
|
|||||||
if offset < 0 || offset > ss.GetSize() {
|
if offset < 0 || offset > ss.GetSize() {
|
||||||
return nil, errors.New("offset out of range")
|
return nil, errors.New("offset out of range")
|
||||||
}
|
}
|
||||||
_, err := r.getReaderAtOffset(offset)
|
reader, err := r.getReaderAtOffset(offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
r.readerMap.Store(int64(offset), reader)
|
||||||
} else {
|
} else {
|
||||||
r.readerMap.Store(int64(offset), ss)
|
r.readerMap.Store(int64(offset), ss)
|
||||||
}
|
}
|
||||||
@@ -502,39 +503,41 @@ func NewMultiReaderAt(ss []*SeekableStream) (readerutil.SizeReaderAt, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (io.Reader, error) {
|
func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (io.Reader, error) {
|
||||||
var rr io.Reader
|
for {
|
||||||
var cur int64 = -1
|
var cur int64 = -1
|
||||||
r.readerMap.Range(func(key, value any) bool {
|
r.readerMap.Range(func(key, value any) bool {
|
||||||
k := key.(int64)
|
k := key.(int64)
|
||||||
if off == k {
|
if off == k {
|
||||||
cur = k
|
cur = k
|
||||||
rr = value.(io.Reader)
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if off > k && off-k <= 4*utils.MB && (rr == nil || k < cur) {
|
if off > k && off-k <= 4*utils.MB && k > cur {
|
||||||
rr = value.(io.Reader)
|
|
||||||
cur = k
|
cur = k
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
if cur >= 0 {
|
if cur < 0 {
|
||||||
r.readerMap.Delete(int64(cur))
|
break
|
||||||
}
|
}
|
||||||
|
v, ok := r.readerMap.LoadAndDelete(int64(cur))
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rr := v.(io.Reader)
|
||||||
if off == int64(cur) {
|
if off == int64(cur) {
|
||||||
// logrus.Debugf("getReaderAtOffset match_%d", off)
|
// logrus.Debugf("getReaderAtOffset match_%d", off)
|
||||||
return rr, nil
|
return rr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if rr != nil {
|
|
||||||
n, _ := utils.CopyWithBufferN(io.Discard, rr, off-cur)
|
n, _ := utils.CopyWithBufferN(io.Discard, rr, off-cur)
|
||||||
cur += n
|
cur += n
|
||||||
if cur == off {
|
if cur == off {
|
||||||
// logrus.Debugf("getReaderAtOffset old_%d", off)
|
// logrus.Debugf("getReaderAtOffset old_%d", off)
|
||||||
return rr, nil
|
return rr, nil
|
||||||
}
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
// logrus.Debugf("getReaderAtOffset new_%d", off)
|
|
||||||
|
|
||||||
|
// logrus.Debugf("getReaderAtOffset new_%d", off)
|
||||||
reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: -1})
|
reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: -1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -7,13 +7,12 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFileStream_RangeRead(t *testing.T) {
|
func TestFileStream_RangeRead(t *testing.T) {
|
||||||
conf.MaxBufferLimit = 16 * 1024 * 1024
|
|
||||||
type args struct {
|
type args struct {
|
||||||
httpRange http_range.Range
|
httpRange http_range.Range
|
||||||
}
|
}
|
||||||
@@ -73,7 +72,6 @@ func TestFileStream_RangeRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
t.Run("after", func(t *testing.T) {
|
|
||||||
if f.GetFile() == nil {
|
if f.GetFile() == nil {
|
||||||
t.Error("not cached")
|
t.Error("not cached")
|
||||||
}
|
}
|
||||||
@@ -84,5 +82,28 @@ func TestFileStream_RangeRead(t *testing.T) {
|
|||||||
if !bytes.Equal(buf, buf2) {
|
if !bytes.Equal(buf, buf2) {
|
||||||
t.Errorf("FileStream.Read() = %s, want %s", buf2, buf)
|
t.Errorf("FileStream.Read() = %s, want %s", buf2, buf)
|
||||||
}
|
}
|
||||||
})
|
}
|
||||||
|
|
||||||
|
func TestFileStream_With_PreHash(t *testing.T) {
|
||||||
|
buf := []byte("github.com/OpenListTeam/OpenList")
|
||||||
|
f := &FileStream{
|
||||||
|
Obj: &model.Object{
|
||||||
|
Size: int64(len(buf)),
|
||||||
|
},
|
||||||
|
Reader: io.NopCloser(bytes.NewReader(buf)),
|
||||||
|
}
|
||||||
|
|
||||||
|
const hashSize int64 = 20
|
||||||
|
reader, _ := f.RangeRead(http_range.Range{Start: 0, Length: hashSize})
|
||||||
|
preHash, _ := utils.HashReader(utils.SHA1, reader)
|
||||||
|
if preHash == "" {
|
||||||
|
t.Error("preHash is empty")
|
||||||
|
}
|
||||||
|
tmpF, fullHash, _ := CacheFullAndHash(f, nil, utils.SHA1)
|
||||||
|
fmt.Println(fullHash)
|
||||||
|
fileFullHash, _ := utils.HashFile(utils.SHA1, tmpF)
|
||||||
|
fmt.Println(fileFullHash)
|
||||||
|
if fullHash != fileFullHash {
|
||||||
|
t.Errorf("fullHash and fileFullHash should match: fullHash=%s fileFullHash=%s", fullHash, fileFullHash)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,44 +28,61 @@ func (f RangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Ran
|
|||||||
}
|
}
|
||||||
|
|
||||||
func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) {
|
func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) {
|
||||||
if link.Concurrency > 0 || link.PartSize > 0 {
|
if link.RangeReader != nil {
|
||||||
|
if link.Concurrency < 1 && link.PartSize < 1 {
|
||||||
|
return link.RangeReader, nil
|
||||||
|
}
|
||||||
down := net.NewDownloader(func(d *net.Downloader) {
|
down := net.NewDownloader(func(d *net.Downloader) {
|
||||||
d.Concurrency = link.Concurrency
|
d.Concurrency = link.Concurrency
|
||||||
d.PartSize = link.PartSize
|
d.PartSize = link.PartSize
|
||||||
|
d.HttpClient = net.GetRangeReaderHttpRequestFunc(link.RangeReader)
|
||||||
})
|
})
|
||||||
var rangeReader RangeReaderFunc = func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
var req *net.HttpRequestParams
|
return down.Download(ctx, &net.HttpRequestParams{
|
||||||
if link.RangeReader != nil {
|
|
||||||
req = &net.HttpRequestParams{
|
|
||||||
Range: httpRange,
|
Range: httpRange,
|
||||||
Size: size,
|
Size: size,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
} else {
|
// RangeReader只能在驱动限速
|
||||||
requestHeader, _ := ctx.Value(conf.RequestHeaderKey).(http.Header)
|
return RangeReaderFunc(rangeReader), nil
|
||||||
header := net.ProcessHeader(requestHeader, link.Header)
|
|
||||||
req = &net.HttpRequestParams{
|
|
||||||
Range: httpRange,
|
|
||||||
Size: size,
|
|
||||||
URL: link.URL,
|
|
||||||
HeaderRef: header,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return down.Download(ctx, req)
|
|
||||||
}
|
|
||||||
if link.RangeReader != nil {
|
|
||||||
down.HttpClient = net.GetRangeReaderHttpRequestFunc(link.RangeReader)
|
|
||||||
return rangeReader, nil
|
|
||||||
}
|
|
||||||
return RateLimitRangeReaderFunc(rangeReader), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if link.RangeReader != nil {
|
|
||||||
return link.RangeReader, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(link.URL) == 0 {
|
if len(link.URL) == 0 {
|
||||||
return nil, errors.New("invalid link: must have at least one of URL or RangeReader")
|
return nil, errors.New("invalid link: must have at least one of URL or RangeReader")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if link.Concurrency > 0 || link.PartSize > 0 {
|
||||||
|
down := net.NewDownloader(func(d *net.Downloader) {
|
||||||
|
d.Concurrency = link.Concurrency
|
||||||
|
d.PartSize = link.PartSize
|
||||||
|
d.HttpClient = func(ctx context.Context, params *net.HttpRequestParams) (*http.Response, error) {
|
||||||
|
if ServerDownloadLimit == nil {
|
||||||
|
return net.DefaultHttpRequestFunc(ctx, params)
|
||||||
|
}
|
||||||
|
resp, err := net.DefaultHttpRequestFunc(ctx, params)
|
||||||
|
if err == nil && resp.Body != nil {
|
||||||
|
resp.Body = &RateLimitReader{
|
||||||
|
Ctx: ctx,
|
||||||
|
Reader: resp.Body,
|
||||||
|
Limiter: ServerDownloadLimit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
})
|
||||||
|
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
|
requestHeader, _ := ctx.Value(conf.RequestHeaderKey).(http.Header)
|
||||||
|
header := net.ProcessHeader(requestHeader, link.Header)
|
||||||
|
return down.Download(ctx, &net.HttpRequestParams{
|
||||||
|
Range: httpRange,
|
||||||
|
Size: size,
|
||||||
|
URL: link.URL,
|
||||||
|
HeaderRef: header,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return RangeReaderFunc(rangeReader), nil
|
||||||
|
}
|
||||||
|
|
||||||
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > size {
|
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > size {
|
||||||
httpRange.Length = size - httpRange.Start
|
httpRange.Length = size - httpRange.Start
|
||||||
@@ -81,7 +98,15 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
|
|||||||
}
|
}
|
||||||
return nil, fmt.Errorf("http request failure, err:%w", err)
|
return nil, fmt.Errorf("http request failure, err:%w", err)
|
||||||
}
|
}
|
||||||
if httpRange.Start == 0 && (httpRange.Length == -1 || httpRange.Length == size) || response.StatusCode == http.StatusPartialContent ||
|
if ServerDownloadLimit != nil {
|
||||||
|
response.Body = &RateLimitReader{
|
||||||
|
Ctx: ctx,
|
||||||
|
Reader: response.Body,
|
||||||
|
Limiter: ServerDownloadLimit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if httpRange.Start == 0 && httpRange.Length == size ||
|
||||||
|
response.StatusCode == http.StatusPartialContent ||
|
||||||
checkContentRange(&response.Header, httpRange.Start) {
|
checkContentRange(&response.Header, httpRange.Start) {
|
||||||
return response.Body, nil
|
return response.Body, nil
|
||||||
} else if response.StatusCode == http.StatusOK {
|
} else if response.StatusCode == http.StatusOK {
|
||||||
@@ -94,11 +119,10 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
|
|||||||
}
|
}
|
||||||
return response.Body, nil
|
return response.Body, nil
|
||||||
}
|
}
|
||||||
return RateLimitRangeReaderFunc(rangeReader), nil
|
return RangeReaderFunc(rangeReader), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RangeReaderIF.RangeRead返回的io.ReadCloser保留file的签名。
|
func GetRangeReaderFromMFile(size int64, file model.File) *model.FileRangeReader {
|
||||||
func GetRangeReaderFromMFile(size int64, file model.File) model.RangeReaderIF {
|
|
||||||
return &model.FileRangeReader{
|
return &model.FileRangeReader{
|
||||||
RangeReaderIF: RangeReaderFunc(func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
RangeReaderIF: RangeReaderFunc(func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
length := httpRange.Length
|
length := httpRange.Length
|
||||||
|
|||||||
@@ -5,11 +5,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SrcPathToRemove string
|
type SrcPathToRemove string
|
||||||
@@ -27,13 +30,32 @@ func RefreshAndRemove(dstPath string, payloads ...any) {
|
|||||||
if dstNeedRefresh {
|
if dstNeedRefresh {
|
||||||
op.Cache.DeleteDirectory(dstStorage, dstActualPath)
|
op.Cache.DeleteDirectory(dstStorage, dstActualPath)
|
||||||
}
|
}
|
||||||
|
dstNeedHandleHook := setting.GetBool(conf.HandleHookAfterWriting)
|
||||||
|
dstHandleHookLimit := setting.GetFloat(conf.HandleHookRateLimit, .0)
|
||||||
|
var listLimiter *rate.Limiter
|
||||||
|
if dstNeedRefresh && dstNeedHandleHook && dstHandleHookLimit > .0 {
|
||||||
|
listLimiter = rate.NewLimiter(rate.Limit(dstHandleHookLimit), 1)
|
||||||
|
}
|
||||||
var ctx context.Context
|
var ctx context.Context
|
||||||
for _, payload := range payloads {
|
for _, payload := range payloads {
|
||||||
switch p := payload.(type) {
|
switch p := payload.(type) {
|
||||||
case DstPathToRefresh:
|
case DstPathToRefresh:
|
||||||
if dstNeedRefresh {
|
if dstNeedRefresh {
|
||||||
|
if dstNeedHandleHook {
|
||||||
|
if ctx == nil {
|
||||||
|
ctx = context.Background()
|
||||||
|
}
|
||||||
|
if listLimiter != nil {
|
||||||
|
_ = listLimiter.Wait(ctx)
|
||||||
|
}
|
||||||
|
_, e := op.List(ctx, dstStorage, string(p), model.ListArgs{Refresh: true})
|
||||||
|
if e != nil {
|
||||||
|
log.Errorf("failed handle objs update hook: %v", e)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
op.Cache.DeleteDirectory(dstStorage, string(p))
|
op.Cache.DeleteDirectory(dstStorage, string(p))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
case SrcPathToRemove:
|
case SrcPathToRemove:
|
||||||
if ctx == nil {
|
if ctx == nil {
|
||||||
ctx = context.Background()
|
ctx = context.Background()
|
||||||
|
|||||||
@@ -69,15 +69,12 @@ func (h *httpCaller) setNotifier(ctx context.Context, u url.URL, notifier Notifi
|
|||||||
go func() {
|
go func() {
|
||||||
defer h.wg.Done()
|
defer h.wg.Done()
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
select {
|
<-ctx.Done()
|
||||||
case <-ctx.Done():
|
|
||||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||||
if err := conn.WriteMessage(websocket.CloseMessage,
|
if err := conn.WriteMessage(websocket.CloseMessage,
|
||||||
websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")); err != nil {
|
websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")); err != nil {
|
||||||
log.Printf("sending websocket close message: %v", err)
|
log.Printf("sending websocket close message: %v", err)
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
h.wg.Add(1)
|
h.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -120,7 +117,7 @@ func (h *httpCaller) setNotifier(ctx context.Context, u url.URL, notifier Notifi
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h httpCaller) Call(method string, params, reply interface{}) (err error) {
|
func (h *httpCaller) Call(method string, params, reply interface{}) (err error) {
|
||||||
payload, err := EncodeClientRequest(method, params)
|
payload, err := EncodeClientRequest(method, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@@ -236,7 +233,7 @@ func (w *websocketCaller) Close() (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w websocketCaller) Call(method string, params, reply interface{}) (err error) {
|
func (w *websocketCaller) Call(method string, params, reply interface{}) (err error) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), w.timeout)
|
ctx, cancel := context.WithTimeout(context.Background(), w.timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
select {
|
select {
|
||||||
@@ -251,12 +248,10 @@ func (w websocketCaller) Call(method string, params, reply interface{}) (err err
|
|||||||
return errors.New("sending channel blocking")
|
return errors.New("sending channel blocking")
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
<-ctx.Done()
|
||||||
case <-ctx.Done():
|
|
||||||
if err := ctx.Err(); err == context.DeadlineExceeded {
|
if err := ctx.Err(); err == context.DeadlineExceeded {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -185,3 +185,20 @@ const (
|
|||||||
GB
|
GB
|
||||||
TB
|
TB
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// IsSystemFile checks if a filename is a common system file that should be ignored
|
||||||
|
// Returns true for files like .DS_Store, desktop.ini, Thumbs.db, and Apple Double files (._*)
|
||||||
|
func IsSystemFile(filename string) bool {
|
||||||
|
// Common system files
|
||||||
|
switch filename {
|
||||||
|
case ".DS_Store", "desktop.ini", "Thumbs.db":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apple Double files (._*)
|
||||||
|
if strings.HasPrefix(filename, "._") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|||||||
42
pkg/utils/file_test.go
Normal file
42
pkg/utils/file_test.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIsSystemFile(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
filename string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
// System files that should be filtered
|
||||||
|
{".DS_Store", true},
|
||||||
|
{"desktop.ini", true},
|
||||||
|
{"Thumbs.db", true},
|
||||||
|
{"._test.txt", true},
|
||||||
|
{"._", true},
|
||||||
|
{"._somefile", true},
|
||||||
|
{"._folder_name", true},
|
||||||
|
|
||||||
|
// Regular files that should not be filtered
|
||||||
|
{"test.txt", false},
|
||||||
|
{"file.pdf", false},
|
||||||
|
{"document.docx", false},
|
||||||
|
{".gitignore", false},
|
||||||
|
{".env", false},
|
||||||
|
{"_underscore.txt", false},
|
||||||
|
{"normal_file.txt", false},
|
||||||
|
{"", false},
|
||||||
|
{".hidden", false},
|
||||||
|
{"..special", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.filename, func(t *testing.T) {
|
||||||
|
result := IsSystemFile(tc.filename)
|
||||||
|
if result != tc.expected {
|
||||||
|
t.Errorf("IsSystemFile(%q) = %v, want %v", tc.filename, result, tc.expected)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -44,11 +44,15 @@ func IsSubPath(path string, subPath string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Ext(path string) string {
|
func Ext(path string) string {
|
||||||
|
return strings.ToLower(SourceExt(path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func SourceExt(path string) string {
|
||||||
ext := stdpath.Ext(path)
|
ext := stdpath.Ext(path)
|
||||||
if len(ext) > 0 && ext[0] == '.' {
|
if len(ext) > 0 && ext[0] == '.' {
|
||||||
ext = ext[1:]
|
ext = ext[1:]
|
||||||
}
|
}
|
||||||
return strings.ToLower(ext)
|
return ext
|
||||||
}
|
}
|
||||||
|
|
||||||
func EncodePath(path string, all ...bool) string {
|
func EncodePath(path string, all ...bool) string {
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserve
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if (flags & os.O_EXCL) != 0 {
|
if (flags & os.O_EXCL) != 0 {
|
||||||
return nil, errors.New("file already exists")
|
return nil, errs.ObjectAlreadyExists
|
||||||
}
|
}
|
||||||
if (flags & os.O_WRONLY) != 0 {
|
if (flags & os.O_WRONLY) != 0 {
|
||||||
return nil, errors.New("cannot write to uploading file")
|
return nil, errors.New("cannot write to uploading file")
|
||||||
@@ -122,7 +122,7 @@ func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserve
|
|||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
if (flags&os.O_EXCL) != 0 && exists {
|
if (flags&os.O_EXCL) != 0 && exists {
|
||||||
return nil, errors.New("file already exists")
|
return nil, errs.ObjectAlreadyExists
|
||||||
}
|
}
|
||||||
if (flags & os.O_WRONLY) != 0 {
|
if (flags & os.O_WRONLY) != 0 {
|
||||||
if offset != 0 {
|
if offset != 0 {
|
||||||
|
|||||||
@@ -15,7 +15,9 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||||
ftpserver "github.com/fclairamb/ftpserverlib"
|
ftpserver "github.com/fclairamb/ftpserverlib"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -49,6 +51,11 @@ func OpenUpload(ctx context.Context, path string, trunc bool) (*FileUploadProxy,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// Check if system file should be ignored
|
||||||
|
_, name := stdpath.Split(path)
|
||||||
|
if setting.GetBool(conf.IgnoreSystemFiles) && utils.IsSystemFile(name) {
|
||||||
|
return nil, errs.IgnoredSystemFile
|
||||||
|
}
|
||||||
tmpFile, err := os.CreateTemp(conf.Conf.TempDir, "file-*")
|
tmpFile, err := os.CreateTemp(conf.Conf.TempDir, "file-*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -150,6 +157,11 @@ func OpenUploadWithLength(ctx context.Context, path string, trunc bool, length i
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// Check if system file should be ignored
|
||||||
|
_, name := stdpath.Split(path)
|
||||||
|
if setting.GetBool(conf.IgnoreSystemFiles) && utils.IsSystemFile(name) {
|
||||||
|
return nil, errs.IgnoredSystemFile
|
||||||
|
}
|
||||||
if trunc {
|
if trunc {
|
||||||
_ = fs.Remove(ctx, path)
|
_ = fs.Remove(ctx, path)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package handles
|
package handles
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
@@ -229,30 +228,15 @@ func FsArchiveList(c *gin.Context, req *ArchiveListReq, user *model.User) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type StringOrArray []string
|
|
||||||
|
|
||||||
func (s *StringOrArray) UnmarshalJSON(data []byte) error {
|
|
||||||
var value string
|
|
||||||
if err := json.Unmarshal(data, &value); err == nil {
|
|
||||||
*s = []string{value}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var sliceValue []string
|
|
||||||
if err := json.Unmarshal(data, &sliceValue); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*s = sliceValue
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type ArchiveDecompressReq struct {
|
type ArchiveDecompressReq struct {
|
||||||
SrcDir string `json:"src_dir" form:"src_dir"`
|
SrcDir string `json:"src_dir" form:"src_dir"`
|
||||||
DstDir string `json:"dst_dir" form:"dst_dir"`
|
DstDir string `json:"dst_dir" form:"dst_dir"`
|
||||||
Name StringOrArray `json:"name" form:"name"`
|
Name []string `json:"name" form:"name"`
|
||||||
ArchivePass string `json:"archive_pass" form:"archive_pass"`
|
ArchivePass string `json:"archive_pass" form:"archive_pass"`
|
||||||
InnerPath string `json:"inner_path" form:"inner_path"`
|
InnerPath string `json:"inner_path" form:"inner_path"`
|
||||||
CacheFull bool `json:"cache_full" form:"cache_full"`
|
CacheFull bool `json:"cache_full" form:"cache_full"`
|
||||||
PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"`
|
PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"`
|
||||||
|
Overwrite bool `json:"overwrite" form:"overwrite"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func FsArchiveDecompress(c *gin.Context) {
|
func FsArchiveDecompress(c *gin.Context) {
|
||||||
@@ -295,6 +279,7 @@ func FsArchiveDecompress(c *gin.Context) {
|
|||||||
},
|
},
|
||||||
CacheFull: req.CacheFull,
|
CacheFull: req.CacheFull,
|
||||||
PutIntoNewDir: req.PutIntoNewDir,
|
PutIntoNewDir: req.PutIntoNewDir,
|
||||||
|
Overwrite: req.Overwrite,
|
||||||
})
|
})
|
||||||
if e != nil {
|
if e != nil {
|
||||||
if errors.Is(e, errs.WrongArchivePassword) {
|
if errors.Is(e, errs.WrongArchivePassword) {
|
||||||
|
|||||||
54
server/handles/direct_upload.go
Normal file
54
server/handles/direct_upload.go
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
package handles
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FsGetDirectUploadInfoReq struct {
|
||||||
|
Path string `json:"path" form:"path"`
|
||||||
|
FileName string `json:"file_name" form:"file_name"`
|
||||||
|
FileSize int64 `json:"file_size" form:"file_size"`
|
||||||
|
Tool string `json:"tool" form:"tool"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FsGetDirectUploadInfo returns the direct upload info if supported by the driver
|
||||||
|
// If the driver does not support direct upload, returns null for upload_info
|
||||||
|
func FsGetDirectUploadInfo(c *gin.Context) {
|
||||||
|
var req FsGetDirectUploadInfoReq
|
||||||
|
if err := c.ShouldBind(&req); err != nil {
|
||||||
|
common.ErrorResp(c, err, 400)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Decode path
|
||||||
|
path, err := url.PathUnescape(req.Path)
|
||||||
|
if err != nil {
|
||||||
|
common.ErrorResp(c, err, 400)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Get user and join path
|
||||||
|
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||||
|
path, err = user.JoinPath(path)
|
||||||
|
if err != nil {
|
||||||
|
common.ErrorResp(c, err, 403)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
overwrite := c.GetHeader("Overwrite") != "false"
|
||||||
|
if !overwrite {
|
||||||
|
if res, _ := fs.Get(c.Request.Context(), path, &fs.GetArgs{NoLog: true}); res != nil {
|
||||||
|
common.ErrorStrResp(c, "file exists", 403)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
directUploadInfo, err := fs.GetDirectUploadInfo(c, req.Tool, path, req.FileName, req.FileSize)
|
||||||
|
if err != nil {
|
||||||
|
common.ErrorResp(c, err, 500)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
common.SuccessResp(c, directUploadInfo)
|
||||||
|
}
|
||||||
@@ -61,6 +61,8 @@ type MoveCopyReq struct {
|
|||||||
DstDir string `json:"dst_dir"`
|
DstDir string `json:"dst_dir"`
|
||||||
Names []string `json:"names"`
|
Names []string `json:"names"`
|
||||||
Overwrite bool `json:"overwrite"`
|
Overwrite bool `json:"overwrite"`
|
||||||
|
SkipExisting bool `json:"skip_existing"`
|
||||||
|
Merge bool `json:"merge"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func FsMove(c *gin.Context) {
|
func FsMove(c *gin.Context) {
|
||||||
@@ -89,20 +91,25 @@ func FsMove(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var validNames []string
|
||||||
if !req.Overwrite {
|
if !req.Overwrite {
|
||||||
for _, name := range req.Names {
|
for _, name := range req.Names {
|
||||||
if res, _ := fs.Get(c.Request.Context(), stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil {
|
if res, _ := fs.Get(c.Request.Context(), stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil && !req.SkipExisting {
|
||||||
common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403)
|
common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403)
|
||||||
return
|
return
|
||||||
|
} else if res == nil {
|
||||||
|
validNames = append(validNames, name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
validNames = req.Names
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create all tasks immediately without any synchronous validation
|
// Create all tasks immediately without any synchronous validation
|
||||||
// All validation will be done asynchronously in the background
|
// All validation will be done asynchronously in the background
|
||||||
var addedTasks []task.TaskExtensionInfo
|
var addedTasks []task.TaskExtensionInfo
|
||||||
for i, name := range req.Names {
|
for i, name := range validNames {
|
||||||
t, err := fs.Move(c.Request.Context(), stdpath.Join(srcDir, name), dstDir, len(req.Names) > i+1)
|
t, err := fs.Move(c.Request.Context(), stdpath.Join(srcDir, name), dstDir, len(validNames) > i+1)
|
||||||
if t != nil {
|
if t != nil {
|
||||||
addedTasks = append(addedTasks, t)
|
addedTasks = append(addedTasks, t)
|
||||||
}
|
}
|
||||||
@@ -151,20 +158,34 @@ func FsCopy(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var validNames []string
|
||||||
if !req.Overwrite {
|
if !req.Overwrite {
|
||||||
for _, name := range req.Names {
|
for _, name := range req.Names {
|
||||||
if res, _ := fs.Get(c.Request.Context(), stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil {
|
if res, _ := fs.Get(c.Request.Context(), stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil {
|
||||||
|
if !req.SkipExisting && !req.Merge {
|
||||||
common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403)
|
common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403)
|
||||||
return
|
return
|
||||||
|
} else if req.Merge && res.IsDir() {
|
||||||
|
validNames = append(validNames, name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
validNames = append(validNames, name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
validNames = req.Names
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create all tasks immediately without any synchronous validation
|
// Create all tasks immediately without any synchronous validation
|
||||||
// All validation will be done asynchronously in the background
|
// All validation will be done asynchronously in the background
|
||||||
var addedTasks []task.TaskExtensionInfo
|
var addedTasks []task.TaskExtensionInfo
|
||||||
for i, name := range req.Names {
|
for i, name := range validNames {
|
||||||
t, err := fs.Copy(c.Request.Context(), stdpath.Join(srcDir, name), dstDir, len(req.Names) > i+1)
|
var t task.TaskExtensionInfo
|
||||||
|
if req.Merge {
|
||||||
|
t, err = fs.Merge(c.Request.Context(), stdpath.Join(srcDir, name), dstDir, len(validNames) > i+1)
|
||||||
|
} else {
|
||||||
|
t, err = fs.Copy(c.Request.Context(), stdpath.Join(srcDir, name), dstDir, len(validNames) > i+1)
|
||||||
|
}
|
||||||
if t != nil {
|
if t != nil {
|
||||||
addedTasks = append(addedTasks, t)
|
addedTasks = append(addedTasks, t)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -55,6 +55,7 @@ type FsListResp struct {
|
|||||||
Header string `json:"header"`
|
Header string `json:"header"`
|
||||||
Write bool `json:"write"`
|
Write bool `json:"write"`
|
||||||
Provider string `json:"provider"`
|
Provider string `json:"provider"`
|
||||||
|
DirectUploadTools []string `json:"direct_upload_tools,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func FsListSplit(c *gin.Context) {
|
func FsListSplit(c *gin.Context) {
|
||||||
@@ -109,9 +110,11 @@ func FsList(c *gin.Context, req *ListReq, user *model.User) {
|
|||||||
}
|
}
|
||||||
total, objs := pagination(objs, &req.PageReq)
|
total, objs := pagination(objs, &req.PageReq)
|
||||||
provider := "unknown"
|
provider := "unknown"
|
||||||
storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{})
|
var directUploadTools []string
|
||||||
if err == nil {
|
if user.CanWrite() {
|
||||||
provider = storage.GetStorage().Driver
|
if storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{}); err == nil {
|
||||||
|
directUploadTools = op.GetDirectUploadTools(storage)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
common.SuccessResp(c, FsListResp{
|
common.SuccessResp(c, FsListResp{
|
||||||
Content: toObjsResp(objs, reqPath, isEncrypt(meta, reqPath)),
|
Content: toObjsResp(objs, reqPath, isEncrypt(meta, reqPath)),
|
||||||
@@ -120,6 +123,7 @@ func FsList(c *gin.Context, req *ListReq, user *model.User) {
|
|||||||
Header: getHeader(meta, reqPath),
|
Header: getHeader(meta, reqPath),
|
||||||
Write: user.CanWrite() || common.CanWrite(meta, reqPath),
|
Write: user.CanWrite() || common.CanWrite(meta, reqPath),
|
||||||
Provider: provider,
|
Provider: provider,
|
||||||
|
DirectUploadTools: directUploadTools,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,8 +8,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/task"
|
"github.com/OpenListTeam/OpenList/v4/internal/task"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
@@ -28,6 +30,14 @@ func getLastModified(c *gin.Context) time.Time {
|
|||||||
return lastModified
|
return lastModified
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// shouldIgnoreSystemFile checks if the filename should be ignored based on settings
|
||||||
|
func shouldIgnoreSystemFile(filename string) bool {
|
||||||
|
if setting.GetBool(conf.IgnoreSystemFiles) {
|
||||||
|
return utils.IsSystemFile(filename)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func FsStream(c *gin.Context) {
|
func FsStream(c *gin.Context) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if n, _ := io.ReadFull(c.Request.Body, []byte{0}); n == 1 {
|
if n, _ := io.ReadFull(c.Request.Body, []byte{0}); n == 1 {
|
||||||
@@ -56,6 +66,11 @@ func FsStream(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
dir, name := stdpath.Split(path)
|
dir, name := stdpath.Split(path)
|
||||||
|
// Check if system file should be ignored
|
||||||
|
if shouldIgnoreSystemFile(name) {
|
||||||
|
common.ErrorStrResp(c, errs.IgnoredSystemFile.Error(), 403)
|
||||||
|
return
|
||||||
|
}
|
||||||
// 如果请求头 Content-Length 和 X-File-Size 都没有,则 size=-1,表示未知大小的流式上传
|
// 如果请求头 Content-Length 和 X-File-Size 都没有,则 size=-1,表示未知大小的流式上传
|
||||||
size := c.Request.ContentLength
|
size := c.Request.ContentLength
|
||||||
if size < 0 {
|
if size < 0 {
|
||||||
@@ -160,6 +175,11 @@ func FsForm(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
dir, name := stdpath.Split(path)
|
dir, name := stdpath.Split(path)
|
||||||
|
// Check if system file should be ignored
|
||||||
|
if shouldIgnoreSystemFile(name) {
|
||||||
|
common.ErrorStrResp(c, errs.IgnoredSystemFile.Error(), 403)
|
||||||
|
return
|
||||||
|
}
|
||||||
h := make(map[*utils.HashType]string)
|
h := make(map[*utils.HashType]string)
|
||||||
if md5 := c.GetHeader("X-File-Md5"); md5 != "" {
|
if md5 := c.GetHeader("X-File-Md5"); md5 != "" {
|
||||||
h[utils.MD5] = md5
|
h[utils.MD5] = md5
|
||||||
|
|||||||
47
server/handles/scan.go
Normal file
47
server/handles/scan.go
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
package handles
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ManualScanReq struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
Limit float64 `json:"limit"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func StartManualScan(c *gin.Context) {
|
||||||
|
var req ManualScanReq
|
||||||
|
if err := c.ShouldBind(&req); err != nil {
|
||||||
|
common.ErrorResp(c, err, 400)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := op.BeginManualScan(req.Path, req.Limit); err != nil {
|
||||||
|
common.ErrorResp(c, err, 400)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
common.SuccessResp(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StopManualScan(c *gin.Context) {
|
||||||
|
if !op.ManualScanRunning() {
|
||||||
|
common.ErrorStrResp(c, "manual scan is not running", 400)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
op.StopManualScan()
|
||||||
|
common.SuccessResp(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ManualScanResp struct {
|
||||||
|
ObjCount uint64 `json:"obj_count"`
|
||||||
|
IsDone bool `json:"is_done"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetManualScanProgress(c *gin.Context) {
|
||||||
|
ret := ManualScanResp{
|
||||||
|
ObjCount: op.ScannedCount.Load(),
|
||||||
|
IsDone: !op.ManualScanRunning(),
|
||||||
|
}
|
||||||
|
common.SuccessResp(c, ret)
|
||||||
|
}
|
||||||
@@ -408,7 +408,7 @@ func ListSharings(c *gin.Context) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type CreateSharingReq struct {
|
type UpdateSharingReq struct {
|
||||||
Files []string `json:"files"`
|
Files []string `json:"files"`
|
||||||
Expires *time.Time `json:"expires"`
|
Expires *time.Time `json:"expires"`
|
||||||
Pwd string `json:"pwd"`
|
Pwd string `json:"pwd"`
|
||||||
@@ -418,12 +418,9 @@ type CreateSharingReq struct {
|
|||||||
Readme string `json:"readme"`
|
Readme string `json:"readme"`
|
||||||
Header string `json:"header"`
|
Header string `json:"header"`
|
||||||
model.Sort
|
model.Sort
|
||||||
}
|
CreatorName string `json:"creator"`
|
||||||
|
|
||||||
type UpdateSharingReq struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Accessed int `json:"accessed"`
|
Accessed int `json:"accessed"`
|
||||||
CreateSharingReq
|
ID string `json:"id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpdateSharing(c *gin.Context) {
|
func UpdateSharing(c *gin.Context) {
|
||||||
@@ -436,24 +433,38 @@ func UpdateSharing(c *gin.Context) {
|
|||||||
common.ErrorStrResp(c, "must add at least 1 object", 400)
|
common.ErrorStrResp(c, "must add at least 1 object", 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
var user *model.User
|
||||||
|
var err error
|
||||||
|
reqUser := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||||
|
if reqUser.IsAdmin() && req.CreatorName != "" {
|
||||||
|
user, err = op.GetUserByName(req.CreatorName)
|
||||||
|
if err != nil {
|
||||||
|
common.ErrorStrResp(c, "no such a user", 400)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
user = reqUser
|
||||||
if !user.CanShare() {
|
if !user.CanShare() {
|
||||||
common.ErrorStrResp(c, "permission denied", 403)
|
common.ErrorStrResp(c, "permission denied", 403)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
}
|
||||||
for i, s := range req.Files {
|
for i, s := range req.Files {
|
||||||
s = utils.FixAndCleanPath(s)
|
s = utils.FixAndCleanPath(s)
|
||||||
req.Files[i] = s
|
req.Files[i] = s
|
||||||
if !user.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
if !reqUser.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
||||||
common.ErrorStrResp(c, fmt.Sprintf("permission denied to share path [%s]", s), 500)
|
common.ErrorStrResp(c, fmt.Sprintf("permission denied to share path [%s]", s), 500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s, err := op.GetSharingById(req.ID)
|
s, err := op.GetSharingById(req.ID)
|
||||||
if err != nil || (!user.IsAdmin() && s.CreatorId != user.ID) {
|
if err != nil || (!reqUser.IsAdmin() && s.CreatorId != user.ID) {
|
||||||
common.ErrorStrResp(c, "sharing not found", 404)
|
common.ErrorStrResp(c, "sharing not found", 404)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if reqUser.IsAdmin() && req.CreatorName == "" {
|
||||||
|
user = s.Creator
|
||||||
|
}
|
||||||
s.Files = req.Files
|
s.Files = req.Files
|
||||||
s.Expires = req.Expires
|
s.Expires = req.Expires
|
||||||
s.Pwd = req.Pwd
|
s.Pwd = req.Pwd
|
||||||
@@ -464,6 +475,7 @@ func UpdateSharing(c *gin.Context) {
|
|||||||
s.Header = req.Header
|
s.Header = req.Header
|
||||||
s.Readme = req.Readme
|
s.Readme = req.Readme
|
||||||
s.Remark = req.Remark
|
s.Remark = req.Remark
|
||||||
|
s.Creator = user
|
||||||
if err = op.UpdateSharing(s); err != nil {
|
if err = op.UpdateSharing(s); err != nil {
|
||||||
common.ErrorResp(c, err, 500)
|
common.ErrorResp(c, err, 500)
|
||||||
} else {
|
} else {
|
||||||
@@ -476,7 +488,7 @@ func UpdateSharing(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func CreateSharing(c *gin.Context) {
|
func CreateSharing(c *gin.Context) {
|
||||||
var req CreateSharingReq
|
var req UpdateSharingReq
|
||||||
var err error
|
var err error
|
||||||
if err = c.ShouldBind(&req); err != nil {
|
if err = c.ShouldBind(&req); err != nil {
|
||||||
common.ErrorResp(c, err, 400)
|
common.ErrorResp(c, err, 400)
|
||||||
@@ -486,24 +498,35 @@ func CreateSharing(c *gin.Context) {
|
|||||||
common.ErrorStrResp(c, "must add at least 1 object", 400)
|
common.ErrorStrResp(c, "must add at least 1 object", 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
var user *model.User
|
||||||
if !user.CanShare() {
|
reqUser := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||||
|
if reqUser.IsAdmin() && req.CreatorName != "" {
|
||||||
|
user, err = op.GetUserByName(req.CreatorName)
|
||||||
|
if err != nil {
|
||||||
|
common.ErrorStrResp(c, "no such a user", 400)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
user = reqUser
|
||||||
|
if !user.CanShare() || (!user.IsAdmin() && req.ID != "") {
|
||||||
common.ErrorStrResp(c, "permission denied", 403)
|
common.ErrorStrResp(c, "permission denied", 403)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
}
|
||||||
for i, s := range req.Files {
|
for i, s := range req.Files {
|
||||||
s = utils.FixAndCleanPath(s)
|
s = utils.FixAndCleanPath(s)
|
||||||
req.Files[i] = s
|
req.Files[i] = s
|
||||||
if !user.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
if !reqUser.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
||||||
common.ErrorStrResp(c, fmt.Sprintf("permission denied to share path [%s]", s), 500)
|
common.ErrorStrResp(c, fmt.Sprintf("permission denied to share path [%s]", s), 500)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s := &model.Sharing{
|
s := &model.Sharing{
|
||||||
SharingDB: &model.SharingDB{
|
SharingDB: &model.SharingDB{
|
||||||
|
ID: req.ID,
|
||||||
Expires: req.Expires,
|
Expires: req.Expires,
|
||||||
Pwd: req.Pwd,
|
Pwd: req.Pwd,
|
||||||
Accessed: 0,
|
Accessed: req.Accessed,
|
||||||
MaxAccessed: req.MaxAccessed,
|
MaxAccessed: req.MaxAccessed,
|
||||||
Disabled: req.Disabled,
|
Disabled: req.Disabled,
|
||||||
Sort: req.Sort,
|
Sort: req.Sort,
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||||
@@ -24,9 +23,15 @@ type StorageResp struct {
|
|||||||
MountDetails *model.StorageDetails `json:"mount_details,omitempty"`
|
MountDetails *model.StorageDetails `json:"mount_details,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeStorageResp(c *gin.Context, storages []model.Storage) []*StorageResp {
|
type detailWithIndex struct {
|
||||||
|
idx int
|
||||||
|
val *model.StorageDetails
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeStorageResp(ctx *gin.Context, storages []model.Storage) []*StorageResp {
|
||||||
ret := make([]*StorageResp, len(storages))
|
ret := make([]*StorageResp, len(storages))
|
||||||
var wg sync.WaitGroup
|
detailsChan := make(chan detailWithIndex, len(storages))
|
||||||
|
workerCount := 0
|
||||||
for i, s := range storages {
|
for i, s := range storages {
|
||||||
ret[i] = &StorageResp{
|
ret[i] = &StorageResp{
|
||||||
Storage: s,
|
Storage: s,
|
||||||
@@ -43,22 +48,26 @@ func makeStorageResp(c *gin.Context, storages []model.Storage) []*StorageResp {
|
|||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
wg.Add(1)
|
workerCount++
|
||||||
go func() {
|
go func(dri driver.Driver, idx int) {
|
||||||
defer wg.Done()
|
details, e := op.GetStorageDetails(ctx, dri)
|
||||||
ctx, cancel := context.WithTimeout(c, time.Second*3)
|
if e != nil {
|
||||||
defer cancel()
|
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
|
||||||
details, err := op.GetStorageDetails(ctx, d)
|
log.Errorf("failed get %s details: %+v", dri.GetStorage().MountPath, e)
|
||||||
if err != nil {
|
|
||||||
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
|
|
||||||
log.Errorf("failed get %s details: %+v", s.MountPath, err)
|
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
ret[i].MountDetails = details
|
detailsChan <- detailWithIndex{idx: idx, val: details}
|
||||||
}()
|
}(d, i)
|
||||||
|
}
|
||||||
|
for workerCount > 0 {
|
||||||
|
select {
|
||||||
|
case r := <-detailsChan:
|
||||||
|
ret[r.idx].MountDetails = r.val
|
||||||
|
workerCount--
|
||||||
|
case <-time.After(time.Second * 3):
|
||||||
|
workerCount = 0
|
||||||
|
}
|
||||||
}
|
}
|
||||||
wg.Wait()
|
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
func SearchIndex(c *gin.Context) {
|
func SearchIndex(c *gin.Context) {
|
||||||
mode := setting.GetStr(conf.SearchIndex)
|
mode := setting.GetStr(conf.SearchIndex)
|
||||||
if mode == "none" {
|
if mode == "none" {
|
||||||
common.ErrorResp(c, errs.SearchNotAvailable, 500)
|
common.ErrorResp(c, errs.SearchNotAvailable, 404)
|
||||||
c.Abort()
|
c.Abort()
|
||||||
} else {
|
} else {
|
||||||
c.Next()
|
c.Next()
|
||||||
|
|||||||
@@ -179,6 +179,11 @@ func admin(g *gin.RouterGroup) {
|
|||||||
index.POST("/stop", middlewares.SearchIndex, handles.StopIndex)
|
index.POST("/stop", middlewares.SearchIndex, handles.StopIndex)
|
||||||
index.POST("/clear", middlewares.SearchIndex, handles.ClearIndex)
|
index.POST("/clear", middlewares.SearchIndex, handles.ClearIndex)
|
||||||
index.GET("/progress", middlewares.SearchIndex, handles.GetProgress)
|
index.GET("/progress", middlewares.SearchIndex, handles.GetProgress)
|
||||||
|
|
||||||
|
scan := g.Group("/scan")
|
||||||
|
scan.POST("/start", handles.StartManualScan)
|
||||||
|
scan.POST("/stop", handles.StopManualScan)
|
||||||
|
scan.GET("/progress", handles.GetManualScanProgress)
|
||||||
}
|
}
|
||||||
|
|
||||||
func fsAndShare(g *gin.RouterGroup) {
|
func fsAndShare(g *gin.RouterGroup) {
|
||||||
@@ -211,6 +216,8 @@ func _fs(g *gin.RouterGroup) {
|
|||||||
// g.POST("/add_transmission", handles.SetTransmission)
|
// g.POST("/add_transmission", handles.SetTransmission)
|
||||||
g.POST("/add_offline_download", handles.AddOfflineDownload)
|
g.POST("/add_offline_download", handles.AddOfflineDownload)
|
||||||
g.POST("/archive/decompress", handles.FsArchiveDecompress)
|
g.POST("/archive/decompress", handles.FsArchiveDecompress)
|
||||||
|
// Direct upload (client-side upload to storage)
|
||||||
|
g.POST("/get_direct_upload_info", middlewares.FsUp, handles.FsGetDirectUploadInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _task(g *gin.RouterGroup) {
|
func _task(g *gin.RouterGroup) {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user