mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-11-25 19:37:41 +08:00
Compare commits
13 Commits
mark-stale
...
plugin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6417f71527 | ||
|
|
ae93fb0479 | ||
|
|
ce3f8e36c1 | ||
|
|
33f1fbc9fb | ||
|
|
fbc4d6d3f8 | ||
|
|
834248b9e4 | ||
|
|
9235c7dff1 | ||
|
|
7b377b1d54 | ||
|
|
d312db3db1 | ||
|
|
7e1358e686 | ||
|
|
62e381a764 | ||
|
|
bbc328d589 | ||
|
|
5780db293a |
16
.github/ISSUE_TEMPLATE/00-bug_report_zh.yml
vendored
16
.github/ISSUE_TEMPLATE/00-bug_report_zh.yml
vendored
@@ -13,7 +13,7 @@ body:
|
||||
attributes:
|
||||
label: 请确认以下事项
|
||||
description: |
|
||||
您必须确认、同意并勾选以下内容,否则您的问题一定会被直接关闭。
|
||||
您必须勾选以下内容,否则您的问题可能会被直接关闭。
|
||||
或者您可以去[讨论区](https://github.com/OpenListTeam/OpenList/discussions)。
|
||||
options:
|
||||
- label: |
|
||||
@@ -59,14 +59,6 @@ body:
|
||||
label: 问题描述(必填)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: 日志(必填)
|
||||
description: |
|
||||
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
@@ -75,6 +67,12 @@ body:
|
||||
请提供您的`OpenList`应用的配置文件,并截图相关存储配置。(可隐藏隐私字段)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: 日志(可选)
|
||||
description: |
|
||||
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
attributes:
|
||||
|
||||
16
.github/ISSUE_TEMPLATE/01-bug_report_en.yml
vendored
16
.github/ISSUE_TEMPLATE/01-bug_report_en.yml
vendored
@@ -13,7 +13,7 @@ body:
|
||||
attributes:
|
||||
label: Please confirm the following
|
||||
description: |
|
||||
You must confirm, agree, and check all the following, otherwise your issue will definitely be closed directly.
|
||||
You must check all the following, otherwise your issue may be closed directly.
|
||||
Or you can go to the [discussions](https://github.com/OpenListTeam/OpenList/discussions).
|
||||
options:
|
||||
- label: |
|
||||
@@ -59,14 +59,6 @@ body:
|
||||
label: Bug Description (required)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Logs (required)
|
||||
description: |
|
||||
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
@@ -75,6 +67,12 @@ body:
|
||||
Please provide your `OpenList` application's configuration file and a screenshot of the relevant storage configuration. (You may mask sensitive fields)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Logs (optional)
|
||||
description: |
|
||||
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
attributes:
|
||||
|
||||
27
.github/workflows/mark_stale_issues.yml
vendored
27
.github/workflows/mark_stale_issues.yml
vendored
@@ -1,27 +0,0 @@
|
||||
name: Mark stale issues
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0/2 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
mark-stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Mark issues stale using actions/stale
|
||||
uses: actions/stale@v10
|
||||
with:
|
||||
days-before-stale: 90
|
||||
days-before-close: 90
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
stale-issue-message: "This issue has been automatically marked as stale because it hasn't had recent activity for 90 days. If you'd like to keep it open, please reply and the label will be removed."
|
||||
stale-issue-label: "stale"
|
||||
close-issue-message: "This issue has been automatically closed due to inactivity for 90 days after being marked as stale. If you believe this was done in error, please feel free to reopen the issue or contact the maintainers."
|
||||
any-of-labels: "bug"
|
||||
exempt-labels: "WIP,has-parent,collection,Announcement"
|
||||
operations-per-run: 30
|
||||
@@ -17,6 +17,7 @@ func Init() {
|
||||
bootstrap.Log()
|
||||
bootstrap.InitDB()
|
||||
data.InitData()
|
||||
bootstrap.InitPlugins()
|
||||
bootstrap.InitStreamLimit()
|
||||
bootstrap.InitIndex()
|
||||
bootstrap.InitUpgradePatch()
|
||||
|
||||
@@ -41,9 +41,7 @@ func (d *Pan123) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *Pan123) Init(ctx context.Context) error {
|
||||
_, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetHeader("platform", "web")
|
||||
}, nil)
|
||||
_, err := d.Request(UserInfo, http.MethodGet, nil, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -12,8 +12,7 @@ type Addition struct {
|
||||
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
|
||||
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
AccessToken string
|
||||
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
||||
Platform string `json:"platform" type:"string" default:"web" help:"the platform header value, sent with API requests"`
|
||||
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -203,7 +203,7 @@ do:
|
||||
"referer": "https://www.123pan.com/",
|
||||
"authorization": "Bearer " + d.AccessToken,
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) openlist-client",
|
||||
"platform": d.Platform,
|
||||
"platform": "web",
|
||||
"app-version": "3",
|
||||
//"user-agent": base.UserAgent,
|
||||
})
|
||||
|
||||
@@ -200,7 +200,10 @@ func (d *Cloud189) GetDetails(ctx context.Context) (*model.StorageDetails, error
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(capacityInfo.CloudCapacityInfo.UsedSize, capacityInfo.CloudCapacityInfo.TotalSize),
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: capacityInfo.CloudCapacityInfo.TotalSize,
|
||||
FreeSpace: capacityInfo.CloudCapacityInfo.FreeSize,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -72,13 +72,13 @@ type CapacityResp struct {
|
||||
ResMessage string `json:"res_message"`
|
||||
Account string `json:"account"`
|
||||
CloudCapacityInfo struct {
|
||||
FreeSize int64 `json:"freeSize"`
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"cloudCapacityInfo"`
|
||||
FamilyCapacityInfo struct {
|
||||
FreeSize int64 `json:"freeSize"`
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"familyCapacityInfo"`
|
||||
|
||||
@@ -284,15 +284,18 @@ func (y *Cloud189TV) GetDetails(ctx context.Context) (*model.StorageDetails, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var total, used uint64
|
||||
var total, free uint64
|
||||
if y.isFamily() {
|
||||
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
||||
used = capacityInfo.FamilyCapacityInfo.UsedSize
|
||||
free = capacityInfo.FamilyCapacityInfo.FreeSize
|
||||
} else {
|
||||
total = capacityInfo.CloudCapacityInfo.TotalSize
|
||||
used = capacityInfo.CloudCapacityInfo.UsedSize
|
||||
free = capacityInfo.CloudCapacityInfo.FreeSize
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -322,13 +322,13 @@ type CapacityResp struct {
|
||||
ResMessage string `json:"res_message"`
|
||||
Account string `json:"account"`
|
||||
CloudCapacityInfo struct {
|
||||
FreeSize int64 `json:"freeSize"`
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"cloudCapacityInfo"`
|
||||
FamilyCapacityInfo struct {
|
||||
FreeSize int64 `json:"freeSize"`
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"familyCapacityInfo"`
|
||||
|
||||
@@ -416,15 +416,18 @@ func (y *Cloud189PC) GetDetails(ctx context.Context) (*model.StorageDetails, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var total, used uint64
|
||||
var total, free uint64
|
||||
if y.isFamily() {
|
||||
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
||||
used = capacityInfo.FamilyCapacityInfo.UsedSize
|
||||
free = capacityInfo.FamilyCapacityInfo.FreeSize
|
||||
} else {
|
||||
total = capacityInfo.CloudCapacityInfo.TotalSize
|
||||
used = capacityInfo.CloudCapacityInfo.UsedSize
|
||||
free = capacityInfo.CloudCapacityInfo.FreeSize
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -415,13 +415,13 @@ type CapacityResp struct {
|
||||
ResMessage string `json:"res_message"`
|
||||
Account string `json:"account"`
|
||||
CloudCapacityInfo struct {
|
||||
FreeSize int64 `json:"freeSize"`
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"cloudCapacityInfo"`
|
||||
FamilyCapacityInfo struct {
|
||||
FreeSize int64 `json:"freeSize"`
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"familyCapacityInfo"`
|
||||
|
||||
@@ -5,15 +5,11 @@ import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
@@ -22,10 +18,8 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -35,20 +29,8 @@ type BaiduNetdisk struct {
|
||||
|
||||
uploadThread int
|
||||
vipType int // 会员类型,0普通用户(4G/4M)、1普通会员(10G/16M)、2超级会员(20G/32M)
|
||||
|
||||
upClient *resty.Client // 上传文件使用的http客户端
|
||||
uploadUrlG singleflight.Group[string]
|
||||
uploadUrlMu sync.RWMutex
|
||||
uploadUrlCache map[string]uploadURLCacheEntry
|
||||
}
|
||||
|
||||
type uploadURLCacheEntry struct {
|
||||
url string
|
||||
updateTime time.Time
|
||||
}
|
||||
|
||||
var ErrUploadIDExpired = errors.New("uploadid expired")
|
||||
|
||||
func (d *BaiduNetdisk) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
@@ -58,27 +40,19 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Init(ctx context.Context) error {
|
||||
d.upClient = base.NewRestyClient().
|
||||
SetTimeout(UPLOAD_TIMEOUT).
|
||||
SetRetryCount(UPLOAD_RETRY_COUNT).
|
||||
SetRetryWaitTime(UPLOAD_RETRY_WAIT_TIME).
|
||||
SetRetryMaxWaitTime(UPLOAD_RETRY_MAX_WAIT_TIME)
|
||||
d.uploadUrlCache = make(map[string]uploadURLCacheEntry)
|
||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||
if d.uploadThread < 1 {
|
||||
d.uploadThread, d.UploadThread = 1, "1"
|
||||
} else if d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 32, "32"
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, "3"
|
||||
}
|
||||
|
||||
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
|
||||
d.UploadAPI = UPLOAD_FALLBACK_API
|
||||
d.UploadAPI = "https://d.pcs.baidu.com"
|
||||
}
|
||||
|
||||
res, err := d.get("/xpan/nas", map[string]string{
|
||||
"method": "uinfo",
|
||||
}, nil)
|
||||
log.Debugf("[baidu_netdisk] get uinfo: %s", string(res))
|
||||
log.Debugf("[baidu] get uinfo: %s", string(res))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -205,11 +179,6 @@ func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream mo
|
||||
// **注意**: 截至 2024/04/20 百度云盘 api 接口返回的时间永远是当前时间,而不是文件时间。
|
||||
// 而实际上云盘存储的时间是文件时间,所以此处需要覆盖时间,保证缓存与云盘的数据一致
|
||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 百度网盘不允许上传空文件
|
||||
if stream.GetSize() < 1 {
|
||||
return nil, ErrBaiduEmptyFilesNotAllowed
|
||||
}
|
||||
|
||||
// rapid upload
|
||||
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
|
||||
return newObj, nil
|
||||
@@ -245,6 +214,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
|
||||
// cal md5 for first 256k data
|
||||
const SliceSize int64 = 256 * utils.KB
|
||||
// cal md5
|
||||
blockList := make([]string, 0, count)
|
||||
byteSize := sliceSize
|
||||
fileMd5H := md5.New()
|
||||
@@ -274,7 +244,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
}
|
||||
if tmpF != nil {
|
||||
if written != streamSize {
|
||||
return nil, errs.NewErr(err, "CreateTempFile failed, size mismatch: %d != %d ", written, streamSize)
|
||||
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
|
||||
}
|
||||
_, err = tmpF.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
@@ -288,14 +258,31 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
mtime := stream.ModTime().Unix()
|
||||
ctime := stream.CreateTime().Unix()
|
||||
|
||||
// step.1 尝试读取已保存进度
|
||||
// step.1 预上传
|
||||
// 尝试获取之前的进度
|
||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||
if !ok {
|
||||
// 没有进度,走预上传
|
||||
precreateResp, err = d.precreate(ctx, path, streamSize, blockListStr, contentMd5, sliceMd5, ctime, mtime)
|
||||
params := map[string]string{
|
||||
"method": "precreate",
|
||||
}
|
||||
form := map[string]string{
|
||||
"path": path,
|
||||
"size": strconv.FormatInt(streamSize, 10),
|
||||
"isdir": "0",
|
||||
"autoinit": "1",
|
||||
"rtype": "3",
|
||||
"block_list": blockListStr,
|
||||
"content-md5": contentMd5,
|
||||
"slice-md5": sliceMd5,
|
||||
}
|
||||
joinTime(form, ctime, mtime)
|
||||
|
||||
log.Debugf("[baidu_netdisk] precreate data: %s", form)
|
||||
_, err = d.postForm("/xpan/file", params, form, &precreateResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("%+v", precreateResp)
|
||||
if precreateResp.ReturnType == 2 {
|
||||
// rapid upload, since got md5 match from baidu server
|
||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
||||
@@ -304,96 +291,45 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
return fileToObj(precreateResp.File), nil
|
||||
}
|
||||
}
|
||||
ensureUploadURL := func() {
|
||||
if precreateResp.UploadURL != "" {
|
||||
return
|
||||
}
|
||||
precreateResp.UploadURL = d.getUploadUrl(path, precreateResp.Uploadid)
|
||||
}
|
||||
ensureUploadURL()
|
||||
|
||||
// step.2 上传分片
|
||||
uploadLoop:
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
// 获取上传域名
|
||||
if precreateResp.UploadURL == "" {
|
||||
ensureUploadURL()
|
||||
}
|
||||
uploadUrl := precreateResp.UploadURL
|
||||
// 并发上传
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(1),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(1),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
cacheReaderAt, okReaderAt := cache.(io.ReaderAt)
|
||||
if !okReaderAt {
|
||||
return nil, fmt.Errorf("cache object must implement io.ReaderAt interface for upload operations")
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
totalParts := len(precreateResp.BlockList)
|
||||
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) || partseq < 0 {
|
||||
continue
|
||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
|
||||
if partseq+1 == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
params := map[string]string{
|
||||
"method": "upload",
|
||||
"access_token": d.AccessToken,
|
||||
"type": "tmpfile",
|
||||
"path": path,
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
"partseq": strconv.Itoa(partseq),
|
||||
}
|
||||
i, partseq := i, partseq
|
||||
offset, size := int64(partseq)*sliceSize, sliceSize
|
||||
if partseq+1 == count {
|
||||
size = lastBlockSize
|
||||
err := d.uploadSlice(ctx, params, stream.GetName(),
|
||||
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
params := map[string]string{
|
||||
"method": "upload",
|
||||
"access_token": d.AccessToken,
|
||||
"type": "tmpfile",
|
||||
"path": path,
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
"partseq": strconv.Itoa(partseq),
|
||||
}
|
||||
section := io.NewSectionReader(cacheReaderAt, offset, size)
|
||||
err := d.uploadSlice(ctx, uploadUrl, params, stream.GetName(), driver.NewLimitedUploadStream(ctx, section))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
precreateResp.BlockList[i] = -1
|
||||
// 当前goroutine还没退出,+1才是真正成功的数量
|
||||
success := threadG.Success() + 1
|
||||
progress := float64(success) * 100 / float64(totalParts)
|
||||
up(progress)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err = threadG.Wait()
|
||||
if err == nil {
|
||||
break uploadLoop
|
||||
}
|
||||
|
||||
// 保存进度(所有错误都会保存)
|
||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||
|
||||
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
|
||||
precreateResp.BlockList[i] = -1
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
// 如果属于用户主动取消,则保存上传进度
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return nil, err
|
||||
}
|
||||
if errors.Is(err, ErrUploadIDExpired) {
|
||||
log.Warn("[baidu_netdisk] uploadid expired, will restart from scratch")
|
||||
d.clearUploadUrlCache(precreateResp.Uploadid)
|
||||
// 重新 precreate(所有分片都要重传)
|
||||
newPre, err2 := d.precreate(ctx, path, streamSize, blockListStr, "", "", ctime, mtime)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
if newPre.ReturnType == 2 {
|
||||
return fileToObj(newPre.File), nil
|
||||
}
|
||||
precreateResp = newPre
|
||||
precreateResp.UploadURL = ""
|
||||
ensureUploadURL()
|
||||
// 覆盖掉旧的进度
|
||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||
continue uploadLoop
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -407,72 +343,23 @@ uploadLoop:
|
||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
||||
newFile.Ctime = ctime
|
||||
newFile.Mtime = mtime
|
||||
// 上传成功清理进度
|
||||
base.SaveUploadProgress(d, nil, d.AccessToken, contentMd5)
|
||||
d.clearUploadUrlCache(precreateResp.Uploadid)
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
|
||||
// precreate 执行预上传操作,支持首次上传和 uploadid 过期重试
|
||||
func (d *BaiduNetdisk) precreate(ctx context.Context, path string, streamSize int64, blockListStr, contentMd5, sliceMd5 string, ctime, mtime int64) (*PrecreateResp, error) {
|
||||
params := map[string]string{"method": "precreate"}
|
||||
form := map[string]string{
|
||||
"path": path,
|
||||
"size": strconv.FormatInt(streamSize, 10),
|
||||
"isdir": "0",
|
||||
"autoinit": "1",
|
||||
"rtype": "3",
|
||||
"block_list": blockListStr,
|
||||
}
|
||||
|
||||
// 只有在首次上传时才包含 content-md5 和 slice-md5
|
||||
if contentMd5 != "" && sliceMd5 != "" {
|
||||
form["content-md5"] = contentMd5
|
||||
form["slice-md5"] = sliceMd5
|
||||
}
|
||||
|
||||
joinTime(form, ctime, mtime)
|
||||
|
||||
var precreateResp PrecreateResp
|
||||
_, err := d.postForm("/xpan/file", params, form, &precreateResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
||||
if precreateResp.ReturnType == 2 {
|
||||
precreateResp.File.Ctime = ctime
|
||||
precreateResp.File.Mtime = mtime
|
||||
}
|
||||
|
||||
return &precreateResp, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, uploadUrl string, params map[string]string, fileName string, file io.Reader) error {
|
||||
res, err := d.upClient.R().
|
||||
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
|
||||
res, err := base.RestyClient.R().
|
||||
SetContext(ctx).
|
||||
SetQueryParams(params).
|
||||
SetFileReader("file", fileName, file).
|
||||
Post(uploadUrl + "/rest/2.0/pcs/superfile2")
|
||||
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugln(res.RawResponse.Status + res.String())
|
||||
if res.StatusCode() != http.StatusOK {
|
||||
return errs.NewErr(errs.StreamIncomplete, "baidu upload failed, status=%d, body=%s", res.StatusCode(), res.String())
|
||||
}
|
||||
errCode := utils.Json.Get(res.Body(), "error_code").ToInt()
|
||||
errNo := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||
respStr := res.String()
|
||||
lower := strings.ToLower(respStr)
|
||||
// 合并 uploadid 过期检测逻辑
|
||||
if strings.Contains(lower, "uploadid") &&
|
||||
(strings.Contains(lower, "invalid") || strings.Contains(lower, "expired") || strings.Contains(lower, "not found")) {
|
||||
return ErrUploadIDExpired
|
||||
}
|
||||
|
||||
if errCode != 0 || errNo != 0 {
|
||||
return errs.NewErr(errs.StreamIncomplete, "error uploading to baidu, response=%s", res.String())
|
||||
return errs.NewErr(errs.StreamIncomplete, "error in uploading to baidu, will retry. response=%s", res.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package baidu_netdisk
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
@@ -20,21 +19,11 @@ type Addition struct {
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||
UseDynamicUploadAPI bool `json:"use_dynamic_upload_api" default:"true" help:"dynamically get upload api domain, when enabled, the 'Upload API' setting will be used as a fallback if failed to get"`
|
||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
|
||||
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
|
||||
}
|
||||
|
||||
const (
|
||||
UPLOAD_FALLBACK_API = "https://d.pcs.baidu.com" // 备用上传地址
|
||||
UPLOAD_URL_EXPIRE_TIME = time.Minute * 60 // 上传地址有效期(分钟)
|
||||
UPLOAD_TIMEOUT = time.Minute * 30 // 上传请求超时时间
|
||||
UPLOAD_RETRY_COUNT = 3
|
||||
UPLOAD_RETRY_WAIT_TIME = time.Second * 1
|
||||
UPLOAD_RETRY_MAX_WAIT_TIME = time.Second * 5
|
||||
)
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "BaiduNetdisk",
|
||||
DefaultRoot: "/",
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package baidu_netdisk
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -10,10 +9,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrBaiduEmptyFilesNotAllowed = errors.New("empty files are not allowed by baidu netdisk")
|
||||
)
|
||||
|
||||
type TokenErrResp struct {
|
||||
ErrorDescription string `json:"error_description"`
|
||||
Error string `json:"error"`
|
||||
@@ -193,32 +188,6 @@ type PrecreateResp struct {
|
||||
|
||||
// return_type=2
|
||||
File File `json:"info"`
|
||||
|
||||
UploadURL string `json:"-"` // 保存断点续传对应的上传域名
|
||||
}
|
||||
|
||||
type UploadServerResp struct {
|
||||
BakServer []any `json:"bak_server"`
|
||||
BakServers []struct {
|
||||
Server string `json:"server"`
|
||||
} `json:"bak_servers"`
|
||||
ClientIP string `json:"client_ip"`
|
||||
ErrorCode int `json:"error_code"`
|
||||
ErrorMsg string `json:"error_msg"`
|
||||
Expire int `json:"expire"`
|
||||
Host string `json:"host"`
|
||||
Newno string `json:"newno"`
|
||||
QuicServer []any `json:"quic_server"`
|
||||
QuicServers []struct {
|
||||
Server string `json:"server"`
|
||||
} `json:"quic_servers"`
|
||||
RequestID int64 `json:"request_id"`
|
||||
Server []any `json:"server"`
|
||||
ServerTime int `json:"server_time"`
|
||||
Servers []struct {
|
||||
Server string `json:"server"`
|
||||
} `json:"servers"`
|
||||
Sl int `json:"sl"`
|
||||
}
|
||||
|
||||
type QuotaResp struct {
|
||||
|
||||
@@ -115,7 +115,7 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
|
||||
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||
if errno != 0 {
|
||||
if utils.SliceContains([]int{111, -6}, errno) {
|
||||
log.Info("[baidu_netdisk] refreshing baidu_netdisk token.")
|
||||
log.Info("refreshing baidu_netdisk token.")
|
||||
err2 := d.refreshToken()
|
||||
if err2 != nil {
|
||||
return retry.Unrecoverable(err2)
|
||||
@@ -326,10 +326,10 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||
// 非会员固定为 4MB
|
||||
if d.vipType == 0 {
|
||||
if d.CustomUploadPartSize != 0 {
|
||||
log.Warnf("[baidu_netdisk] CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
|
||||
log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
|
||||
}
|
||||
if filesize > MaxSliceNum*DefaultSliceSize {
|
||||
log.Warnf("[baidu_netdisk] File size(%d) is too large, may cause upload failure", filesize)
|
||||
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
|
||||
}
|
||||
|
||||
return DefaultSliceSize
|
||||
@@ -337,17 +337,17 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||
|
||||
if d.CustomUploadPartSize != 0 {
|
||||
if d.CustomUploadPartSize < DefaultSliceSize {
|
||||
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
|
||||
log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
|
||||
return DefaultSliceSize
|
||||
}
|
||||
|
||||
if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize {
|
||||
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
|
||||
log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
|
||||
return VipSliceSize
|
||||
}
|
||||
|
||||
if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize {
|
||||
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
|
||||
log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
|
||||
return SVipSliceSize
|
||||
}
|
||||
|
||||
@@ -377,7 +377,7 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||
}
|
||||
|
||||
if filesize > MaxSliceNum*maxSliceSize {
|
||||
log.Warnf("[baidu_netdisk] File size(%d) is too large, may cause upload failure", filesize)
|
||||
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
|
||||
}
|
||||
|
||||
return maxSliceSize
|
||||
@@ -394,97 +394,6 @@ func (d *BaiduNetdisk) quota(ctx context.Context) (model.DiskUsage, error) {
|
||||
return driver.DiskUsageFromUsedAndTotal(resp.Used, resp.Total), nil
|
||||
}
|
||||
|
||||
// getUploadUrl 从开放平台获取上传域名/地址,并发请求会被合并,结果会在 uploadid 生命周期内复用。
|
||||
// 如果获取失败,则返回 Upload API设置项。
|
||||
func (d *BaiduNetdisk) getUploadUrl(path, uploadId string) string {
|
||||
if !d.UseDynamicUploadAPI || uploadId == "" {
|
||||
return d.UploadAPI
|
||||
}
|
||||
getCachedUrlFunc := func() (string, bool) {
|
||||
d.uploadUrlMu.RLock()
|
||||
defer d.uploadUrlMu.RUnlock()
|
||||
if entry, ok := d.uploadUrlCache[uploadId]; ok {
|
||||
return entry.url, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
// 检查地址缓存
|
||||
if uploadUrl, ok := getCachedUrlFunc(); ok {
|
||||
return uploadUrl
|
||||
}
|
||||
|
||||
uploadUrlGetFunc := func() (string, error) {
|
||||
// 双重检查缓存
|
||||
if uploadUrl, ok := getCachedUrlFunc(); ok {
|
||||
return uploadUrl, nil
|
||||
}
|
||||
|
||||
uploadUrl, err := d.requestForUploadUrl(path, uploadId)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
d.uploadUrlMu.Lock()
|
||||
d.uploadUrlCache[uploadId] = uploadURLCacheEntry{
|
||||
url: uploadUrl,
|
||||
updateTime: time.Now(),
|
||||
}
|
||||
d.uploadUrlMu.Unlock()
|
||||
return uploadUrl, nil
|
||||
}
|
||||
|
||||
uploadUrl, err, _ := d.uploadUrlG.Do(uploadId, uploadUrlGetFunc)
|
||||
if err != nil {
|
||||
fallback := d.UploadAPI
|
||||
log.Warnf("[baidu_netdisk] get upload URL failed (%v), will use fallback URL: %s", err, fallback)
|
||||
return fallback
|
||||
}
|
||||
return uploadUrl
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) clearUploadUrlCache(uploadId string) {
|
||||
if uploadId == "" {
|
||||
return
|
||||
}
|
||||
d.uploadUrlMu.Lock()
|
||||
if _, ok := d.uploadUrlCache[uploadId]; ok {
|
||||
delete(d.uploadUrlCache, uploadId)
|
||||
}
|
||||
d.uploadUrlMu.Unlock()
|
||||
}
|
||||
|
||||
// requestForUploadUrl 请求获取上传地址。
|
||||
// 实测此接口不需要认证,传method和upload_version就行,不过还是按文档规范调用。
|
||||
// https://pan.baidu.com/union/doc/Mlvw5hfnr
|
||||
func (d *BaiduNetdisk) requestForUploadUrl(path, uploadId string) (string, error) {
|
||||
params := map[string]string{
|
||||
"method": "locateupload",
|
||||
"appid": "250528",
|
||||
"path": path,
|
||||
"uploadid": uploadId,
|
||||
"upload_version": "2.0",
|
||||
}
|
||||
apiUrl := "https://d.pcs.baidu.com/rest/2.0/pcs/file"
|
||||
var resp UploadServerResp
|
||||
_, err := d.request(apiUrl, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(params)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// 应该是https开头的一个地址
|
||||
var uploadUrl string
|
||||
if len(resp.Servers) > 0 {
|
||||
uploadUrl = resp.Servers[0].Server
|
||||
} else if len(resp.BakServers) > 0 {
|
||||
uploadUrl = resp.BakServers[0].Server
|
||||
}
|
||||
if uploadUrl == "" {
|
||||
return "", errors.New("upload URL is empty")
|
||||
}
|
||||
return uploadUrl, nil
|
||||
}
|
||||
|
||||
// func encodeURIComponent(str string) string {
|
||||
// r := url.QueryEscape(str)
|
||||
// r = strings.ReplaceAll(r, "+", "%20")
|
||||
|
||||
@@ -25,7 +25,6 @@ func InitClient() {
|
||||
}),
|
||||
).SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
||||
net.SetRestyProxyIfConfigured(NoRedirectClient)
|
||||
|
||||
RestyClient = NewRestyClient()
|
||||
HttpClient = net.NewHttpClient()
|
||||
@@ -38,7 +37,5 @@ func NewRestyClient() *resty.Client {
|
||||
SetRetryResetReaders(true).
|
||||
SetTimeout(DefaultTimeout).
|
||||
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||
|
||||
net.SetRestyProxyIfConfigured(client)
|
||||
return client
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type Doubao struct {
|
||||
@@ -24,7 +23,6 @@ type Doubao struct {
|
||||
*UploadToken
|
||||
UserId string
|
||||
uploadThread int
|
||||
limiter *rate.Limiter
|
||||
}
|
||||
|
||||
func (d *Doubao) Config() driver.Config {
|
||||
@@ -63,17 +61,6 @@ func (d *Doubao) Init(ctx context.Context) error {
|
||||
d.UploadToken = uploadToken
|
||||
}
|
||||
|
||||
if d.LimitRate > 0 {
|
||||
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Doubao) WaitLimit(ctx context.Context) error {
|
||||
if d.limiter != nil {
|
||||
return d.limiter.Wait(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -82,10 +69,6 @@ func (d *Doubao) Drop(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var files []model.Obj
|
||||
fileList, err := d.getFiles(dir.GetID(), "")
|
||||
if err != nil {
|
||||
@@ -112,10 +95,6 @@ func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
|
||||
}
|
||||
|
||||
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var downloadUrl string
|
||||
|
||||
if u, ok := file.(*Object); ok {
|
||||
@@ -181,10 +160,6 @@ func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}
|
||||
|
||||
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var r UploadNodeResp
|
||||
_, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
@@ -202,10 +177,6 @@ func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
||||
}
|
||||
|
||||
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var r UploadNodeResp
|
||||
_, err := d.request("/samantha/aispace/move_node", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
@@ -220,10 +191,6 @@ func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var r BaseResp
|
||||
_, err := d.request("/samantha/aispace/rename_node", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
@@ -240,10 +207,6 @@ func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
|
||||
}
|
||||
|
||||
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var r BaseResp
|
||||
_, err := d.request("/samantha/aispace/delete_node", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
|
||||
@@ -252,10 +215,6 @@ func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 根据MIME类型确定数据类型
|
||||
mimetype := file.GetMimetype()
|
||||
dataType := FileDataType
|
||||
|
||||
@@ -10,10 +10,9 @@ type Addition struct {
|
||||
// driver.RootPath
|
||||
driver.RootID
|
||||
// define other
|
||||
Cookie string `json:"cookie" type:"text"`
|
||||
UploadThread string `json:"upload_thread" default:"3"`
|
||||
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
|
||||
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"`
|
||||
Cookie string `json:"cookie" type:"text"`
|
||||
UploadThread string `json:"upload_thread" default:"3"`
|
||||
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
@@ -24,10 +23,6 @@ var config = driver.Config{
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Doubao{
|
||||
Addition: Addition{
|
||||
LimitRate: 2,
|
||||
},
|
||||
}
|
||||
return &Doubao{}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -113,7 +113,9 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
|
||||
RangeReader: &model.FileRangeReader{
|
||||
RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
|
||||
},
|
||||
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -51,9 +51,6 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
|
||||
if d.Addition.ShowReadme {
|
||||
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
|
||||
}
|
||||
if d.Addition.ShowSourceCode{
|
||||
files = append(files, point.GetSourceCode()...)
|
||||
}
|
||||
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
|
||||
nextDir := GetNextDir(point.Point, path)
|
||||
if nextDir == "" {
|
||||
@@ -120,10 +117,6 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
|
||||
}
|
||||
|
||||
files = append(files, point.GetReleaseByTagName(tagName)...)
|
||||
|
||||
if d.Addition.ShowSourceCode{
|
||||
files = append(files, point.GetSourceCodeByTagName(tagName)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ type Addition struct {
|
||||
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"OpenListTeam/OpenList" help:"structure:[path:]org/repo"`
|
||||
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
|
||||
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
|
||||
ShowSourceCode bool `json:"show_source_code" type:"bool" default:"false" help:"show Source code (zip/tar.gz)"`
|
||||
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
|
||||
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
|
||||
}
|
||||
|
||||
@@ -143,60 +143,6 @@ func (m *MountPoint) GetAllVersionSize() int64 {
|
||||
return size
|
||||
}
|
||||
|
||||
func (m *MountPoint) GetSourceCode() []File {
|
||||
files := make([]File, 0)
|
||||
|
||||
// 无法获取文件大小,此处设为 1
|
||||
files = append(files, File{
|
||||
Path: m.Point + "/" + "Source code (zip)",
|
||||
FileName: "Source code (zip)",
|
||||
Size: 1,
|
||||
Type: "file",
|
||||
UpdateAt: m.Release.CreatedAt,
|
||||
CreateAt: m.Release.CreatedAt,
|
||||
Url: m.Release.ZipballUrl,
|
||||
})
|
||||
files = append(files, File{
|
||||
Path: m.Point + "/" + "Source code (tar.gz)",
|
||||
FileName: "Source code (tar.gz)",
|
||||
Size: 1,
|
||||
Type: "file",
|
||||
UpdateAt: m.Release.CreatedAt,
|
||||
CreateAt: m.Release.CreatedAt,
|
||||
Url: m.Release.TarballUrl,
|
||||
})
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
func (m *MountPoint) GetSourceCodeByTagName(tagName string) []File {
|
||||
for _, item := range *m.Releases {
|
||||
if item.TagName == tagName {
|
||||
files := make([]File, 0)
|
||||
files = append(files, File{
|
||||
Path: m.Point + "/" + "Source code (zip)",
|
||||
FileName: "Source code (zip)",
|
||||
Size: 1,
|
||||
Type: "file",
|
||||
UpdateAt: item.CreatedAt,
|
||||
CreateAt: item.CreatedAt,
|
||||
Url: item.ZipballUrl,
|
||||
})
|
||||
files = append(files, File{
|
||||
Path: m.Point + "/" + "Source code (tar.gz)",
|
||||
FileName: "Source code (tar.gz)",
|
||||
Size: 1,
|
||||
Type: "file",
|
||||
UpdateAt: item.CreatedAt,
|
||||
CreateAt: item.CreatedAt,
|
||||
Url: item.TarballUrl,
|
||||
})
|
||||
return files
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MountPoint) GetOtherFile(get func(url string) (*resty.Response, error), refresh bool) []File {
|
||||
if m.OtherFile == nil || refresh {
|
||||
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/contents")
|
||||
|
||||
@@ -27,14 +27,6 @@ import (
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
// Google Drive API field constants
|
||||
const (
|
||||
// File list query fields
|
||||
FilesListFields = "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken"
|
||||
// Single file query fields
|
||||
FileInfoFields = "id,name,mimeType,size,md5Checksum,sha1Checksum,sha256Checksum"
|
||||
)
|
||||
|
||||
type googleDriveServiceAccount struct {
|
||||
// Type string `json:"type"`
|
||||
// ProjectID string `json:"project_id"`
|
||||
@@ -243,7 +235,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
||||
}
|
||||
query := map[string]string{
|
||||
"orderBy": orderBy,
|
||||
"fields": FilesListFields,
|
||||
"fields": "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken",
|
||||
"pageSize": "1000",
|
||||
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
|
||||
//"includeItemsFromAllDrives": "true",
|
||||
@@ -257,82 +249,11 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
||||
return nil, err
|
||||
}
|
||||
pageToken = resp.NextPageToken
|
||||
|
||||
// Batch process shortcuts, API calls only for file shortcuts
|
||||
shortcutTargetIds := make([]string, 0)
|
||||
shortcutIndices := make([]int, 0)
|
||||
|
||||
// Collect target IDs of all file shortcuts (skip folder shortcuts)
|
||||
for i := range resp.Files {
|
||||
if resp.Files[i].MimeType == "application/vnd.google-apps.shortcut" &&
|
||||
resp.Files[i].ShortcutDetails.TargetId != "" &&
|
||||
resp.Files[i].ShortcutDetails.TargetMimeType != "application/vnd.google-apps.folder" {
|
||||
shortcutTargetIds = append(shortcutTargetIds, resp.Files[i].ShortcutDetails.TargetId)
|
||||
shortcutIndices = append(shortcutIndices, i)
|
||||
}
|
||||
}
|
||||
|
||||
// Batch get target file info (only for file shortcuts)
|
||||
if len(shortcutTargetIds) > 0 {
|
||||
targetFiles := d.batchGetTargetFilesInfo(shortcutTargetIds)
|
||||
// Update shortcut file info
|
||||
for j, targetId := range shortcutTargetIds {
|
||||
if targetFile, exists := targetFiles[targetId]; exists {
|
||||
fileIndex := shortcutIndices[j]
|
||||
if targetFile.Size != "" {
|
||||
resp.Files[fileIndex].Size = targetFile.Size
|
||||
}
|
||||
if targetFile.MD5Checksum != "" {
|
||||
resp.Files[fileIndex].MD5Checksum = targetFile.MD5Checksum
|
||||
}
|
||||
if targetFile.SHA1Checksum != "" {
|
||||
resp.Files[fileIndex].SHA1Checksum = targetFile.SHA1Checksum
|
||||
}
|
||||
if targetFile.SHA256Checksum != "" {
|
||||
resp.Files[fileIndex].SHA256Checksum = targetFile.SHA256Checksum
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res = append(res, resp.Files...)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// getTargetFileInfo gets target file details for shortcuts
|
||||
func (d *GoogleDrive) getTargetFileInfo(targetId string) (File, error) {
|
||||
var targetFile File
|
||||
url := fmt.Sprintf("https://www.googleapis.com/drive/v3/files/%s", targetId)
|
||||
query := map[string]string{
|
||||
"fields": FileInfoFields,
|
||||
}
|
||||
_, err := d.request(url, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(query)
|
||||
}, &targetFile)
|
||||
if err != nil {
|
||||
return File{}, err
|
||||
}
|
||||
return targetFile, nil
|
||||
}
|
||||
|
||||
// batchGetTargetFilesInfo batch gets target file info, sequential processing to avoid concurrency complexity
|
||||
func (d *GoogleDrive) batchGetTargetFilesInfo(targetIds []string) map[string]File {
|
||||
if len(targetIds) == 0 {
|
||||
return make(map[string]File)
|
||||
}
|
||||
|
||||
result := make(map[string]File)
|
||||
// Sequential processing to avoid concurrency complexity
|
||||
for _, targetId := range targetIds {
|
||||
file, err := d.getTargetFileInfo(targetId)
|
||||
if err == nil {
|
||||
result[targetId] = file
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
|
||||
defaultChunkSize := d.ChunkSize * 1024 * 1024
|
||||
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
|
||||
|
||||
@@ -236,19 +236,4 @@ func (d *Onedrive) GetDetails(ctx context.Context) (*model.StorageDetails, error
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Onedrive) GetDirectUploadTools() []string {
|
||||
if !d.EnableDirectUpload {
|
||||
return nil
|
||||
}
|
||||
return []string{"HttpDirect"}
|
||||
}
|
||||
|
||||
// GetDirectUploadInfo returns the direct upload info for OneDrive
|
||||
func (d *Onedrive) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
|
||||
if !d.EnableDirectUpload {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
return d.getDirectUploadInfo(ctx, path.Join(dstDir.GetPath(), fileName))
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Onedrive)(nil)
|
||||
|
||||
@@ -19,7 +19,6 @@ type Addition struct {
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -133,7 +133,7 @@ func (d *Onedrive) _refreshToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
|
||||
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
if d.ref != nil {
|
||||
return d.ref.Request(url, method, callback, resp)
|
||||
}
|
||||
@@ -152,7 +152,7 @@ func (d *Onedrive) Request(url string, method string, callback base.ReqCallback,
|
||||
return nil, err
|
||||
}
|
||||
if e.Error.Code != "" {
|
||||
if e.Error.Code == "InvalidAuthenticationToken" && !utils.IsBool(noRetry...) {
|
||||
if e.Error.Code == "InvalidAuthenticationToken" {
|
||||
err = d.refreshToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -310,36 +310,9 @@ func (d *Onedrive) getDrive(ctx context.Context) (*DriveResp, error) {
|
||||
var resp DriveResp
|
||||
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp, true)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Onedrive) getDirectUploadInfo(ctx context.Context, path string) (*model.HttpDirectUploadInfo, error) {
|
||||
// Create upload session
|
||||
url := d.GetMetaUrl(false, path) + "/createUploadSession"
|
||||
metadata := map[string]any{
|
||||
"item": map[string]any{
|
||||
"@microsoft.graph.conflictBehavior": "rename",
|
||||
},
|
||||
}
|
||||
|
||||
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(metadata).SetContext(ctx)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
if uploadUrl == "" {
|
||||
return nil, fmt.Errorf("failed to get upload URL from response")
|
||||
}
|
||||
return &model.HttpDirectUploadInfo{
|
||||
UploadURL: uploadUrl,
|
||||
ChunkSize: d.ChunkSize * 1024 * 1024, // Convert MB to bytes
|
||||
Method: "PUT",
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -222,18 +222,4 @@ func (d *OnedriveAPP) GetDetails(ctx context.Context) (*model.StorageDetails, er
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *OnedriveAPP) GetDirectUploadTools() []string {
|
||||
if !d.EnableDirectUpload {
|
||||
return nil
|
||||
}
|
||||
return []string{"HttpDirect"}
|
||||
}
|
||||
|
||||
func (d *OnedriveAPP) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
|
||||
if !d.EnableDirectUpload {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
return d.getDirectUploadInfo(ctx, path.Join(dstDir.GetPath(), fileName))
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*OnedriveAPP)(nil)
|
||||
|
||||
@@ -7,15 +7,14 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
|
||||
ClientID string `json:"client_id" required:"true"`
|
||||
ClientSecret string `json:"client_secret" required:"true"`
|
||||
TenantID string `json:"tenant_id"`
|
||||
Email string `json:"email"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
|
||||
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
|
||||
ClientID string `json:"client_id" required:"true"`
|
||||
ClientSecret string `json:"client_secret" required:"true"`
|
||||
TenantID string `json:"tenant_id"`
|
||||
Email string `json:"email"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -88,7 +88,7 @@ func (d *OnedriveAPP) _accessToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
|
||||
func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||
if callback != nil {
|
||||
@@ -104,7 +104,7 @@ func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallba
|
||||
return nil, err
|
||||
}
|
||||
if e.Error.Code != "" {
|
||||
if e.Error.Code == "InvalidAuthenticationToken" && !utils.IsBool(noRetry...) {
|
||||
if e.Error.Code == "InvalidAuthenticationToken" {
|
||||
err = d.accessToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -216,36 +216,9 @@ func (d *OnedriveAPP) getDrive(ctx context.Context) (*DriveResp, error) {
|
||||
var resp DriveResp
|
||||
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp, true)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *OnedriveAPP) getDirectUploadInfo(ctx context.Context, path string) (*model.HttpDirectUploadInfo, error) {
|
||||
// Create upload session
|
||||
url := d.GetMetaUrl(false, path) + "/createUploadSession"
|
||||
metadata := map[string]any{
|
||||
"item": map[string]any{
|
||||
"@microsoft.graph.conflictBehavior": "rename",
|
||||
},
|
||||
}
|
||||
|
||||
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(metadata).SetContext(ctx)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
if uploadUrl == "" {
|
||||
return nil, fmt.Errorf("failed to get upload URL from response")
|
||||
}
|
||||
return &model.HttpDirectUploadInfo{
|
||||
UploadURL: uploadUrl,
|
||||
ChunkSize: d.ChunkSize * 1024 * 1024, // Convert MB to bytes
|
||||
Method: "PUT",
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -190,7 +190,9 @@ func (d *ProtonDrive) Link(ctx context.Context, file model.Obj, args model.LinkA
|
||||
|
||||
expiration := time.Minute
|
||||
return &model.Link{
|
||||
RangeReader: stream.RateLimitRangeReaderFunc(rangeReaderFunc),
|
||||
RangeReader: &model.FileRangeReader{
|
||||
RangeReaderIF: stream.RateLimitRangeReaderFunc(rangeReaderFunc),
|
||||
},
|
||||
ContentLength: size,
|
||||
Expiration: &expiration,
|
||||
}, nil
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Strm struct {
|
||||
@@ -41,9 +40,6 @@ func (d *Strm) Init(ctx context.Context) error {
|
||||
if d.Paths == "" {
|
||||
return errors.New("paths is required")
|
||||
}
|
||||
if d.SaveStrmToLocal && len(d.SaveStrmLocalPath) <= 0 {
|
||||
return errors.New("SaveStrmLocalPath is required")
|
||||
}
|
||||
d.pathMap = make(map[string][]string)
|
||||
for _, path := range strings.Split(d.Paths, "\n") {
|
||||
path = strings.TrimSpace(path)
|
||||
@@ -52,13 +48,6 @@ func (d *Strm) Init(ctx context.Context) error {
|
||||
}
|
||||
k, v := getPair(path)
|
||||
d.pathMap[k] = append(d.pathMap[k], v)
|
||||
if d.SaveStrmToLocal {
|
||||
err := InsertStrm(utils.FixAndCleanPath(strings.TrimSpace(path)), d)
|
||||
if err != nil {
|
||||
log.Errorf("insert strmTrie error: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(d.pathMap) == 1 {
|
||||
for k := range d.pathMap {
|
||||
@@ -70,51 +59,26 @@ func (d *Strm) Init(ctx context.Context) error {
|
||||
d.autoFlatten = false
|
||||
}
|
||||
|
||||
var supportTypes []string
|
||||
if d.FilterFileTypes == "" {
|
||||
d.FilterFileTypes = "mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac"
|
||||
}
|
||||
supportTypes = strings.Split(d.FilterFileTypes, ",")
|
||||
d.supportSuffix = map[string]struct{}{}
|
||||
for _, ext := range supportTypes {
|
||||
ext = strings.ToLower(strings.TrimSpace(ext))
|
||||
if ext != "" {
|
||||
d.supportSuffix[ext] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var downloadTypes []string
|
||||
if d.DownloadFileTypes == "" {
|
||||
d.DownloadFileTypes = "ass,srt,vtt,sub,strm"
|
||||
}
|
||||
downloadTypes = strings.Split(d.DownloadFileTypes, ",")
|
||||
d.downloadSuffix = map[string]struct{}{}
|
||||
for _, ext := range downloadTypes {
|
||||
ext = strings.ToLower(strings.TrimSpace(ext))
|
||||
if ext != "" {
|
||||
d.downloadSuffix[ext] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if d.Version != 3 {
|
||||
types := strings.Split("mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac", ",")
|
||||
d.supportSuffix = supportSuffix()
|
||||
if d.FilterFileTypes != "" {
|
||||
types := strings.Split(d.FilterFileTypes, ",")
|
||||
for _, ext := range types {
|
||||
if _, ok := d.supportSuffix[ext]; !ok {
|
||||
ext = strings.ToLower(strings.TrimSpace(ext))
|
||||
if ext != "" {
|
||||
d.supportSuffix[ext] = struct{}{}
|
||||
supportTypes = append(supportTypes, ext)
|
||||
}
|
||||
}
|
||||
d.FilterFileTypes = strings.Join(supportTypes, ",")
|
||||
}
|
||||
|
||||
types = strings.Split("ass,srt,vtt,sub,strm", ",")
|
||||
for _, ext := range types {
|
||||
if _, ok := d.downloadSuffix[ext]; !ok {
|
||||
d.supportSuffix[ext] = struct{}{}
|
||||
downloadTypes = append(downloadTypes, ext)
|
||||
d.downloadSuffix = downloadSuffix()
|
||||
if d.DownloadFileTypes != "" {
|
||||
downloadTypes := strings.Split(d.DownloadFileTypes, ",")
|
||||
for _, ext := range downloadTypes {
|
||||
ext = strings.ToLower(strings.TrimSpace(ext))
|
||||
if ext != "" {
|
||||
d.downloadSuffix[ext] = struct{}{}
|
||||
}
|
||||
}
|
||||
d.DownloadFileTypes = strings.Join(downloadTypes, ",")
|
||||
d.Version = 3
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -123,9 +87,6 @@ func (d *Strm) Drop(ctx context.Context) error {
|
||||
d.pathMap = nil
|
||||
d.downloadSuffix = nil
|
||||
d.supportSuffix = nil
|
||||
for _, path := range strings.Split(d.Paths, "\n") {
|
||||
RemoveStrm(utils.FixAndCleanPath(strings.TrimSpace(path)), d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,175 +0,0 @@
|
||||
package strm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tchap/go-patricia/v2/patricia"
|
||||
)
|
||||
|
||||
var strmTrie = patricia.NewTrie()
|
||||
|
||||
func UpdateLocalStrm(ctx context.Context, path string, objs []model.Obj) {
|
||||
path = utils.FixAndCleanPath(path)
|
||||
updateLocal := func(driver *Strm, basePath string, objs []model.Obj) {
|
||||
relParent := strings.TrimPrefix(basePath, driver.MountPath)
|
||||
localParentPath := stdpath.Join(driver.SaveStrmLocalPath, relParent)
|
||||
for _, obj := range objs {
|
||||
localPath := stdpath.Join(localParentPath, obj.GetName())
|
||||
generateStrm(ctx, driver, obj, localPath)
|
||||
}
|
||||
deleteExtraFiles(localParentPath, objs)
|
||||
}
|
||||
|
||||
_ = strmTrie.VisitPrefixes(patricia.Prefix(path), func(needPathPrefix patricia.Prefix, item patricia.Item) error {
|
||||
strmDrivers := item.([]*Strm)
|
||||
needPath := string(needPathPrefix)
|
||||
restPath := strings.TrimPrefix(path, needPath)
|
||||
if len(restPath) > 0 && restPath[0] != '/' {
|
||||
return nil
|
||||
}
|
||||
for _, strmDriver := range strmDrivers {
|
||||
strmObjs := strmDriver.convert2strmObjs(ctx, path, objs)
|
||||
updateLocal(strmDriver, stdpath.Join(stdpath.Base(needPath), restPath), strmObjs)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func InsertStrm(dstPath string, d *Strm) error {
|
||||
prefix := patricia.Prefix(strings.TrimRight(dstPath, "/"))
|
||||
existing := strmTrie.Get(prefix)
|
||||
|
||||
if existing == nil {
|
||||
if !strmTrie.Insert(prefix, []*Strm{d}) {
|
||||
return errors.New("failed to insert strm")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if lst, ok := existing.([]*Strm); ok {
|
||||
strmTrie.Set(prefix, append(lst, d))
|
||||
} else {
|
||||
return errors.New("invalid trie item type")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemoveStrm(dstPath string, d *Strm) {
|
||||
prefix := patricia.Prefix(strings.TrimRight(dstPath, "/"))
|
||||
existing := strmTrie.Get(prefix)
|
||||
if existing == nil {
|
||||
return
|
||||
}
|
||||
lst, ok := existing.([]*Strm)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if len(lst) == 1 && lst[0] == d {
|
||||
strmTrie.Delete(prefix)
|
||||
return
|
||||
}
|
||||
|
||||
for i, di := range lst {
|
||||
if di == d {
|
||||
newList := append(lst[:i], lst[i+1:]...)
|
||||
strmTrie.Set(prefix, newList)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func generateStrm(ctx context.Context, driver *Strm, obj model.Obj, localPath string) {
|
||||
if obj.IsDir() {
|
||||
err := utils.CreateNestedDirectory(localPath)
|
||||
if err != nil {
|
||||
log.Warnf("failed to generate strm dir %s: failed to create dir: %v", localPath, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
link, err := driver.Link(ctx, obj, model.LinkArgs{})
|
||||
if err != nil {
|
||||
log.Warnf("failed to generate strm of obj %s: failed to link: %v", localPath, err)
|
||||
return
|
||||
}
|
||||
defer link.Close()
|
||||
size := link.ContentLength
|
||||
if size <= 0 {
|
||||
size = obj.GetSize()
|
||||
}
|
||||
rrf, err := stream.GetRangeReaderFromLink(size, link)
|
||||
if err != nil {
|
||||
log.Warnf("failed to generate strm of obj %s: failed to get range reader: %v", localPath, err)
|
||||
return
|
||||
}
|
||||
rc, err := rrf.RangeRead(ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
log.Warnf("failed to generate strm of obj %s: failed to read range: %v", localPath, err)
|
||||
return
|
||||
}
|
||||
defer rc.Close()
|
||||
file, err := utils.CreateNestedFile(localPath)
|
||||
if err != nil {
|
||||
log.Warnf("failed to generate strm of obj %s: failed to create local file: %v", localPath, err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
if _, err := utils.CopyWithBuffer(file, rc); err != nil {
|
||||
log.Warnf("failed to generate strm of obj %s: copy failed: %v", localPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func deleteExtraFiles(localPath string, objs []model.Obj) {
|
||||
localFiles, err := getLocalFiles(localPath)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to read local files from %s: %v", localPath, err)
|
||||
return
|
||||
}
|
||||
|
||||
objsSet := make(map[string]struct{})
|
||||
for _, obj := range objs {
|
||||
if obj.IsDir() {
|
||||
continue
|
||||
}
|
||||
objsSet[stdpath.Join(localPath, obj.GetName())] = struct{}{}
|
||||
}
|
||||
|
||||
for _, localFile := range localFiles {
|
||||
if _, exists := objsSet[localFile]; !exists {
|
||||
err := os.Remove(localFile)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to delete file: %s, error: %v\n", localFile, err)
|
||||
} else {
|
||||
log.Infof("Deleted file %s", localFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getLocalFiles(localPath string) ([]string, error) {
|
||||
var files []string
|
||||
entries, err := os.ReadDir(localPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
files = append(files, stdpath.Join(localPath, entry.Name()))
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterObjsUpdateHook(UpdateLocalStrm)
|
||||
}
|
||||
@@ -8,13 +8,10 @@ import (
|
||||
type Addition struct {
|
||||
Paths string `json:"paths" required:"true" type:"text"`
|
||||
SiteUrl string `json:"siteUrl" type:"text" required:"false" help:"The prefix URL of the strm file"`
|
||||
DownloadFileTypes string `json:"downloadFileTypes" type:"text" default:"ass,srt,vtt,sub,strm" required:"false" help:"Files need to download with strm (usally subtitles)"`
|
||||
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"mp4,mkv,flv,avi,wmv,ts,rmvb,webm,mp3,flac,aac,wav,ogg,m4a,wma,alac" required:"false" help:"Supports suffix name of strm file"`
|
||||
FilterFileTypes string `json:"filterFileTypes" type:"text" default:"strm" required:"false" help:"Supports suffix name of strm file"`
|
||||
DownloadFileTypes string `json:"downloadFileTypes" type:"text" default:"ass" required:"false" help:"Files need to download with strm (usally subtitles)"`
|
||||
EncodePath bool `json:"encodePath" default:"true" required:"true" help:"encode the path in the strm file"`
|
||||
WithoutUrl bool `json:"withoutUrl" default:"false" help:"strm file content without URL prefix"`
|
||||
SaveStrmToLocal bool `json:"SaveStrmToLocal" default:"false" help:"save strm file locally"`
|
||||
SaveStrmLocalPath string `json:"SaveStrmLocalPath" type:"text" help:"save strm file local path"`
|
||||
Version int
|
||||
LocalModel bool `json:"localModel" default:"false" help:"enable local mode"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
36
drivers/strm/types.go
Normal file
36
drivers/strm/types.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package strm
|
||||
|
||||
func supportSuffix() map[string]struct{} {
|
||||
return map[string]struct{}{
|
||||
// video
|
||||
"mp4": {},
|
||||
"mkv": {},
|
||||
"flv": {},
|
||||
"avi": {},
|
||||
"wmv": {},
|
||||
"ts": {},
|
||||
"rmvb": {},
|
||||
"webm": {},
|
||||
// audio
|
||||
"mp3": {},
|
||||
"flac": {},
|
||||
"aac": {},
|
||||
"wav": {},
|
||||
"ogg": {},
|
||||
"m4a": {},
|
||||
"wma": {},
|
||||
"alac": {},
|
||||
}
|
||||
}
|
||||
|
||||
func downloadSuffix() map[string]struct{} {
|
||||
return map[string]struct{}{
|
||||
// strm
|
||||
"strm": {},
|
||||
// subtitles
|
||||
"ass": {},
|
||||
"srt": {},
|
||||
"vtt": {},
|
||||
"sub": {},
|
||||
}
|
||||
}
|
||||
@@ -58,10 +58,7 @@ func (d *Strm) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.convert2strmObjs(ctx, reqPath, objs), nil
|
||||
}
|
||||
|
||||
func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []model.Obj) []model.Obj {
|
||||
var validObjs []model.Obj
|
||||
for _, obj := range objs {
|
||||
id, name, path := "", obj.GetName(), ""
|
||||
@@ -69,12 +66,12 @@ func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []mode
|
||||
if !obj.IsDir() {
|
||||
path = stdpath.Join(reqPath, obj.GetName())
|
||||
ext := strings.ToLower(utils.Ext(name))
|
||||
if _, ok := d.downloadSuffix[ext]; ok {
|
||||
size = obj.GetSize()
|
||||
} else if _, ok := d.supportSuffix[ext]; ok {
|
||||
if _, ok := d.supportSuffix[ext]; ok {
|
||||
id = "strm"
|
||||
name = strings.TrimSuffix(name, ext) + "strm"
|
||||
size = int64(len(d.getLink(ctx, path)))
|
||||
} else if _, ok := d.downloadSuffix[ext]; ok {
|
||||
size = obj.GetSize()
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
@@ -87,11 +84,13 @@ func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []mode
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
}
|
||||
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
if !ok {
|
||||
validObjs = append(validObjs, &objRes)
|
||||
continue
|
||||
}
|
||||
|
||||
validObjs = append(validObjs, &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
@@ -99,7 +98,7 @@ func (d *Strm) convert2strmObjs(ctx context.Context, reqPath string, objs []mode
|
||||
},
|
||||
})
|
||||
}
|
||||
return validObjs
|
||||
return validObjs, nil
|
||||
}
|
||||
|
||||
func (d *Strm) getLink(ctx context.Context, path string) string {
|
||||
@@ -111,7 +110,7 @@ func (d *Strm) getLink(ctx context.Context, path string) string {
|
||||
signPath := sign.Sign(path)
|
||||
finalPath = fmt.Sprintf("%s?sign=%s", finalPath, signPath)
|
||||
}
|
||||
if d.WithoutUrl {
|
||||
if d.LocalModel {
|
||||
return finalPath
|
||||
}
|
||||
apiUrl := d.SiteUrl
|
||||
@@ -120,9 +119,7 @@ func (d *Strm) getLink(ctx context.Context, path string) string {
|
||||
} else {
|
||||
apiUrl = common.GetApiUrl(ctx)
|
||||
}
|
||||
if !strings.HasPrefix(finalPath, "/") {
|
||||
finalPath = "/" + finalPath
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/d%s",
|
||||
apiUrl,
|
||||
finalPath)
|
||||
|
||||
@@ -88,7 +88,7 @@ func (d *Terabox) request(rurl string, method string, callback base.ReqCallback,
|
||||
return nil, err
|
||||
}
|
||||
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||
if errno == 4000023 || errno == 450016 {
|
||||
if errno == 4000023 || errno == 4500016 {
|
||||
// reget jsToken
|
||||
err = d.resetJsToken()
|
||||
if err != nil {
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -69,7 +68,6 @@ func (x *Thunder) Init(ctx context.Context) (err error) {
|
||||
PackageName: "com.xunlei.downloadprovider",
|
||||
UserAgent: "ANDROID-com.xunlei.downloadprovider/8.31.0.9726 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/512000 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)",
|
||||
DownloadUserAgent: "Dalvik/2.1.0 (Linux; U; Android 12; M2004J7AC Build/SP1A.210812.016)",
|
||||
Space: x.Space,
|
||||
refreshCTokenCk: func(token string) {
|
||||
x.CaptchaToken = token
|
||||
op.MustSaveDriverStorage(x)
|
||||
@@ -169,7 +167,6 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) {
|
||||
UserAgent: x.UserAgent,
|
||||
DownloadUserAgent: x.DownloadUserAgent,
|
||||
UseVideoUrl: x.UseVideoUrl,
|
||||
Space: x.Space,
|
||||
|
||||
refreshCTokenCk: func(token string) {
|
||||
x.CaptchaToken = token
|
||||
@@ -284,7 +281,7 @@ func (xc *XunLeiCommon) Link(ctx context.Context, file model.Obj, args model.Lin
|
||||
_, err := xc.Request(FILE_API_URL+"/{fileID}", http.MethodGet, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetPathParam("fileID", file.GetID())
|
||||
r.SetQueryParam("space", xc.Space)
|
||||
//r.SetQueryParam("space", "")
|
||||
}, &lFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -325,7 +322,6 @@ func (xc *XunLeiCommon) MakeDir(ctx context.Context, parentDir model.Obj, dirNam
|
||||
"kind": FOLDER,
|
||||
"name": dirName,
|
||||
"parent_id": parentDir.GetID(),
|
||||
"space": xc.Space,
|
||||
})
|
||||
}, nil)
|
||||
return err
|
||||
@@ -335,9 +331,8 @@ func (xc *XunLeiCommon) Move(ctx context.Context, srcObj, dstDir model.Obj) erro
|
||||
_, err := xc.Request(FILE_API_URL+":batchMove", http.MethodPost, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetBody(&base.Json{
|
||||
"to": base.Json{"parent_id": dstDir.GetID()},
|
||||
"ids": []string{srcObj.GetID()},
|
||||
"space": xc.Space,
|
||||
"to": base.Json{"parent_id": dstDir.GetID()},
|
||||
"ids": []string{srcObj.GetID()},
|
||||
})
|
||||
}, nil)
|
||||
return err
|
||||
@@ -347,10 +342,7 @@ func (xc *XunLeiCommon) Rename(ctx context.Context, srcObj model.Obj, newName st
|
||||
_, err := xc.Request(FILE_API_URL+"/{fileID}", http.MethodPatch, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetPathParam("fileID", srcObj.GetID())
|
||||
r.SetBody(&base.Json{
|
||||
"name": newName,
|
||||
"space": xc.Space,
|
||||
})
|
||||
r.SetBody(&base.Json{"name": newName})
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
@@ -359,9 +351,8 @@ func (xc *XunLeiCommon) Copy(ctx context.Context, srcObj, dstDir model.Obj) erro
|
||||
_, err := xc.Request(FILE_API_URL+":batchCopy", http.MethodPost, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetBody(&base.Json{
|
||||
"to": base.Json{"parent_id": dstDir.GetID()},
|
||||
"ids": []string{srcObj.GetID()},
|
||||
"space": xc.Space,
|
||||
"to": base.Json{"parent_id": dstDir.GetID()},
|
||||
"ids": []string{srcObj.GetID()},
|
||||
})
|
||||
}, nil)
|
||||
return err
|
||||
@@ -371,7 +362,6 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
|
||||
_, err := xc.Request(FILE_API_URL+"/{fileID}/trash", http.MethodPatch, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetPathParam("fileID", obj.GetID())
|
||||
r.SetQueryParam("space", xc.Space)
|
||||
r.SetBody("{}")
|
||||
}, nil)
|
||||
return err
|
||||
@@ -397,7 +387,6 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.Fi
|
||||
"size": file.GetSize(),
|
||||
"hash": gcid,
|
||||
"upload_type": UPLOAD_TYPE_RESUMABLE,
|
||||
"space": xc.Space,
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
@@ -441,7 +430,7 @@ func (xc *XunLeiCommon) getFiles(ctx context.Context, folderId string) ([]model.
|
||||
_, err := xc.Request(FILE_API_URL, http.MethodGet, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(map[string]string{
|
||||
"space": xc.Space,
|
||||
"space": "",
|
||||
"__type": "drive",
|
||||
"refresh": "true",
|
||||
"__sync": "true",
|
||||
@@ -451,17 +440,6 @@ func (xc *XunLeiCommon) getFiles(ctx context.Context, folderId string) ([]model.
|
||||
"limit": "100",
|
||||
"filters": `{"phase":{"eq":"PHASE_TYPE_COMPLETE"},"trashed":{"eq":false}}`,
|
||||
})
|
||||
// 获取硬盘挂载目录等
|
||||
if xc.Space != "" {
|
||||
r.SetQueryParamsFromValues(url.Values{
|
||||
"with": []string{
|
||||
"withCategoryDiskMountPath",
|
||||
"withCategoryDriveCachePath",
|
||||
"withCategoryHistoryDownloadPath",
|
||||
"withReadOnlyFS",
|
||||
},
|
||||
})
|
||||
}
|
||||
}, &fileList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -598,7 +576,6 @@ func (xc *XunLeiCommon) OfflineDownload(ctx context.Context, fileUrl string, par
|
||||
"name": fileName,
|
||||
"parent_id": parentDir.GetID(),
|
||||
"upload_type": UPLOAD_TYPE_URL,
|
||||
"space": xc.Space,
|
||||
"url": base.Json{
|
||||
"url": fileUrl,
|
||||
},
|
||||
@@ -625,7 +602,6 @@ func (xc *XunLeiCommon) OfflineList(ctx context.Context, nextPageToken string) (
|
||||
"type": "offline",
|
||||
"limit": "10000",
|
||||
"page_token": nextPageToken,
|
||||
"space": xc.Space,
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
@@ -642,7 +618,6 @@ func (xc *XunLeiCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string
|
||||
SetQueryParams(map[string]string{
|
||||
"task_ids": strings.Join(taskIDs, ","),
|
||||
"delete_files": strconv.FormatBool(deleteFiles),
|
||||
"space": xc.Space,
|
||||
})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
|
||||
@@ -46,8 +46,6 @@ type ExpertAddition struct {
|
||||
|
||||
//优先使用视频链接代替下载链接
|
||||
UseVideoUrl bool `json:"use_video_url"`
|
||||
|
||||
Space string `json:"space" default:"" help:"device id for remote device"`
|
||||
}
|
||||
|
||||
// 登录特征,用于判断是否重新登录
|
||||
@@ -82,8 +80,6 @@ type Addition struct {
|
||||
CreditKey string `json:"credit_key" help:"credit key,used for login"`
|
||||
// 登录设备ID
|
||||
DeviceID string `json:"device_id" default:""`
|
||||
|
||||
Space string `json:"space" default:"" help:"device id for remote device"`
|
||||
}
|
||||
|
||||
// 登录特征,用于判断是否重新登录
|
||||
@@ -94,6 +90,7 @@ func (i *Addition) GetIdentity() string {
|
||||
var config = driver.Config{
|
||||
Name: "Thunder",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
}
|
||||
|
||||
var configExpert = driver.Config{
|
||||
|
||||
@@ -68,7 +68,6 @@ type Common struct {
|
||||
UserAgent string
|
||||
DownloadUserAgent string
|
||||
UseVideoUrl bool
|
||||
Space string
|
||||
|
||||
// 验证码token刷新成功回调
|
||||
refreshCTokenCk func(token string)
|
||||
|
||||
20
go.mod
20
go.mod
@@ -5,14 +5,13 @@ go 1.23.4
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
|
||||
github.com/KirCute/zip v1.0.1
|
||||
github.com/OpenListTeam/go-cache v0.1.0
|
||||
github.com/OpenListTeam/sftpd-openlist v1.0.1
|
||||
github.com/OpenListTeam/tache v0.2.1
|
||||
github.com/OpenListTeam/tache v0.2.0
|
||||
github.com/OpenListTeam/times v0.1.0
|
||||
github.com/OpenListTeam/wazero-wasip2 v0.0.0-20251015145605-cd3a2c9131d9
|
||||
github.com/OpenListTeam/wopan-sdk-go v0.1.5
|
||||
github.com/ProtonMail/go-crypto v1.3.0
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.9.0
|
||||
github.com/SheltonZhu/115driver v1.1.1
|
||||
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
|
||||
github.com/avast/retry-go v3.0.0+incompatible
|
||||
@@ -42,7 +41,6 @@ require (
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0
|
||||
github.com/henrybear327/go-proton-api v1.0.0
|
||||
github.com/ipfs/go-ipfs-api v0.7.0
|
||||
github.com/itsHenry35/gofakes3 v0.0.8
|
||||
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3
|
||||
@@ -58,6 +56,7 @@ require (
|
||||
github.com/pquerna/otp v1.5.0
|
||||
github.com/quic-go/quic-go v0.54.1
|
||||
github.com/rclone/rclone v1.70.3
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
||||
github.com/shirou/gopsutil/v4 v4.25.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.14.0
|
||||
@@ -65,9 +64,11 @@ require (
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5
|
||||
github.com/tchap/go-patricia/v2 v2.3.3
|
||||
github.com/tetratelabs/wazero v1.9.0
|
||||
github.com/u2takey/ffmpeg-go v0.5.0
|
||||
github.com/upyun/go-sdk/v3 v3.0.4
|
||||
github.com/winfsp/cgofuse v1.6.0
|
||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
|
||||
github.com/zzzhr1990/go-common-entity v0.0.0-20250202070650-1a200048f0d3
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/image v0.29.0
|
||||
@@ -89,6 +90,7 @@ require (
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.10.3 // indirect
|
||||
github.com/RoaringBitmap/roaring/v2 v2.4.5 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.3 // indirect
|
||||
@@ -101,6 +103,7 @@ require (
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/emersion/go-message v0.18.2 // indirect
|
||||
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
|
||||
github.com/henrybear327/go-proton-api v1.0.0 // indirect
|
||||
github.com/geoffgarside/ber v1.2.0 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||
@@ -115,11 +118,12 @@ require (
|
||||
github.com/minio/xxml v0.0.3 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/otiai10/mint v1.6.3 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/relvacode/iso8601 v1.6.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
|
||||
)
|
||||
@@ -190,7 +194,7 @@ require (
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
|
||||
github.com/bytedance/sonic v1.13.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
|
||||
@@ -301,3 +305,5 @@ replace github.com/ProtonMail/go-proton-api => github.com/henrybear327/go-proton
|
||||
replace github.com/cronokirby/saferith => github.com/Da3zKi7/saferith v0.33.0-fixed
|
||||
|
||||
// replace github.com/OpenListTeam/115-sdk-go => ../../OpenListTeam/115-sdk-go
|
||||
|
||||
replace google.golang.org/genproto => google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822
|
||||
|
||||
24
go.sum
24
go.sum
@@ -39,10 +39,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Da3zKi7/saferith v0.33.0-fixed h1:fnIWTk7EP9mZAICf7aQjeoAwpfrlCrkOvqmi6CbWdTk=
|
||||
github.com/Da3zKi7/saferith v0.33.0-fixed/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA=
|
||||
github.com/KirCute/zip v1.0.1 h1:L/tVZglOiDVKDi9Ud+fN49htgKdQ3Z0H80iX8OZk13c=
|
||||
github.com/KirCute/zip v1.0.1/go.mod h1:xhF7dCB+Bjvy+5a56lenYCKBsH+gxDNPZSy5Cp+nlXk=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/OpenListTeam/115-sdk-go v0.2.2 h1:JCrGHqQjBX3laOA6Hw4CuBovSg7g+FC5s0LEAYsRciU=
|
||||
@@ -53,10 +49,12 @@ github.com/OpenListTeam/gsync v0.1.0 h1:ywzGybOvA3lW8K1BUjKZ2IUlT2FSlzPO4DOazfYX
|
||||
github.com/OpenListTeam/gsync v0.1.0/go.mod h1:h/Rvv9aX/6CdW/7B8di3xK3xNV8dUg45Fehrd/ksZ9s=
|
||||
github.com/OpenListTeam/sftpd-openlist v1.0.1 h1:j4S3iPFOpnXCUKRPS7uCT4mF2VCl34GyqvH6lqwnkUU=
|
||||
github.com/OpenListTeam/sftpd-openlist v1.0.1/go.mod h1:uO/wKnbvbdq3rBLmClMTZXuCnw7XW4wlAq4dZe91a40=
|
||||
github.com/OpenListTeam/tache v0.2.1 h1:Uy/xAr05clHuMrr9+5fXAhv0Z5PGJivp4P5DnRez6cw=
|
||||
github.com/OpenListTeam/tache v0.2.1/go.mod h1:qmnZ/VpY2DUlmjg3UoDeNFy/LRqrw0biN3hYEEGc/+A=
|
||||
github.com/OpenListTeam/tache v0.2.0 h1:Q4MjuyECn0CZCf1ZF91JaVaZTaps1mOTAm8bFj8sr9Q=
|
||||
github.com/OpenListTeam/tache v0.2.0/go.mod h1:qmnZ/VpY2DUlmjg3UoDeNFy/LRqrw0biN3hYEEGc/+A=
|
||||
github.com/OpenListTeam/times v0.1.0 h1:qknxw+qj5CYKgXAwydA102UEpPcpU8TYNGRmwRyPYpg=
|
||||
github.com/OpenListTeam/times v0.1.0/go.mod h1:Jx7qen5NCYzKk2w14YuvU48YYMcPa1P9a+EJePC15Pc=
|
||||
github.com/OpenListTeam/wazero-wasip2 v0.0.0-20251015145605-cd3a2c9131d9 h1:yddTD9Fxh6bLMLmG0hSR7Eh6XkoK0RMlE4N1e6/+Iy8=
|
||||
github.com/OpenListTeam/wazero-wasip2 v0.0.0-20251015145605-cd3a2c9131d9/go.mod h1:+BpydPG2cUQHYFwH3/lVmvXyMl/zxHW+XM+XTSzqu2Q=
|
||||
github.com/OpenListTeam/wopan-sdk-go v0.1.5 h1:iKKcVzIqBgtGDbn0QbdWrCazSGxXFmYFyrnFBG+U8dI=
|
||||
github.com/OpenListTeam/wopan-sdk-go v0.1.5/go.mod h1:otynv0CgSNUClPpUgZ44qCZGcMRe0dc83Pkk65xAunI=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I=
|
||||
@@ -394,6 +392,8 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006100629-ba7a40dce261 h1:47L8SHM80cXszQydLrpp9MhVkFLLWCvrU9XmJ6XtRu0=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006100629-ba7a40dce261/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499 h1:4ovnBdiGDFi8putQGxhipuuhXItAgh4/YnzufPYkZkQ=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@@ -421,6 +421,8 @@ github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
|
||||
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
|
||||
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@@ -641,6 +643,8 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d h1:hrujxIzL1woJ7AwssoOcM/tq5JjjG2yYOc8odClEiXA=
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
|
||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
|
||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM=
|
||||
@@ -686,6 +690,8 @@ github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 h1:6Y51mutOvRGRx6K
|
||||
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543/go.mod h1:jpwqYA8KUVEvSUJHkCXsnBRJCSKP1BMa81QZ6kvRpow=
|
||||
github.com/tchap/go-patricia/v2 v2.3.3 h1:xfNEsODumaEcCcY3gI0hYPZ/PcpVv5ju6RMAhgwZDDc=
|
||||
github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I=
|
||||
github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM=
|
||||
github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
|
||||
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
|
||||
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
|
||||
@@ -713,6 +719,8 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavM
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
||||
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9 h1:K8gF0eekWPEX+57l30ixxzGhHH/qscI3JCnuhbN6V4M=
|
||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9/go.mod h1:9BnoKCcgJ/+SLhfAXj15352hTOuVmG5Gzo8xNRINfqI=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA=
|
||||
github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
|
||||
@@ -740,8 +748,6 @@ go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5J
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
||||
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
|
||||
@@ -756,8 +762,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
|
||||
24
internal/alloc/alloc_other.go
Normal file
24
internal/alloc/alloc_other.go
Normal file
@@ -0,0 +1,24 @@
|
||||
//go:build !unix && !windows
|
||||
|
||||
package alloc // import "github.com/ncruces/go-sqlite3/internal/alloc"
|
||||
|
||||
import "github.com/tetratelabs/wazero/experimental"
|
||||
|
||||
func NewMemory(cap, max uint64) experimental.LinearMemory {
|
||||
return &sliceMemory{make([]byte, 0, cap)}
|
||||
}
|
||||
|
||||
type sliceMemory struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (b *sliceMemory) Free() {}
|
||||
|
||||
func (b *sliceMemory) Reallocate(size uint64) []byte {
|
||||
if cap := uint64(cap(b.buf)); size > cap {
|
||||
b.buf = append(b.buf[:cap], make([]byte, size-cap)...)
|
||||
} else {
|
||||
b.buf = b.buf[:size]
|
||||
}
|
||||
return b.buf
|
||||
}
|
||||
14
internal/alloc/alloc_test.go
Normal file
14
internal/alloc/alloc_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package alloc_test // import "github.com/ncruces/go-sqlite3/internal/alloc"
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/alloc"
|
||||
)
|
||||
|
||||
func TestVirtual(t *testing.T) {
|
||||
defer func() { _ = recover() }()
|
||||
alloc.NewMemory(math.MaxInt+2, math.MaxInt+2)
|
||||
t.Error("want panic")
|
||||
}
|
||||
75
internal/alloc/alloc_unix.go
Normal file
75
internal/alloc/alloc_unix.go
Normal file
@@ -0,0 +1,75 @@
|
||||
//go:build unix
|
||||
|
||||
package alloc // import "github.com/ncruces/go-sqlite3/internal/alloc"
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/tetratelabs/wazero/experimental"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func NewMemory(cap, max uint64) experimental.LinearMemory {
|
||||
// Round up to the page size.
|
||||
rnd := uint64(unix.Getpagesize() - 1)
|
||||
res := (max + rnd) &^ rnd
|
||||
|
||||
if res > math.MaxInt {
|
||||
// This ensures int(res) overflows to a negative value,
|
||||
// and unix.Mmap returns EINVAL.
|
||||
res = math.MaxUint64
|
||||
}
|
||||
|
||||
com := res
|
||||
prot := unix.PROT_READ | unix.PROT_WRITE
|
||||
if cap < max { // Commit memory only if cap=max.
|
||||
com = 0
|
||||
prot = unix.PROT_NONE
|
||||
}
|
||||
|
||||
// Reserve res bytes of address space, to ensure we won't need to move it.
|
||||
// A protected, private, anonymous mapping should not commit memory.
|
||||
b, err := unix.Mmap(-1, 0, int(res), prot, unix.MAP_PRIVATE|unix.MAP_ANON)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &mmappedMemory{buf: b[:com]}
|
||||
}
|
||||
|
||||
// The slice covers the entire mmapped memory:
|
||||
// - len(buf) is the already committed memory,
|
||||
// - cap(buf) is the reserved address space.
|
||||
type mmappedMemory struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (m *mmappedMemory) Reallocate(size uint64) []byte {
|
||||
com := uint64(len(m.buf))
|
||||
res := uint64(cap(m.buf))
|
||||
if com < size && size <= res {
|
||||
// Grow geometrically, round up to the page size.
|
||||
rnd := uint64(unix.Getpagesize() - 1)
|
||||
new := com + com>>3
|
||||
new = min(max(size, new), res)
|
||||
new = (new + rnd) &^ rnd
|
||||
|
||||
// Commit additional memory up to new bytes.
|
||||
err := unix.Mprotect(m.buf[com:new], unix.PROT_READ|unix.PROT_WRITE)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
m.buf = m.buf[:new] // Update committed memory.
|
||||
}
|
||||
// Limit returned capacity because bytes beyond
|
||||
// len(m.buf) have not yet been committed.
|
||||
return m.buf[:size:len(m.buf)]
|
||||
}
|
||||
|
||||
func (m *mmappedMemory) Free() {
|
||||
err := unix.Munmap(m.buf[:cap(m.buf)])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
m.buf = nil
|
||||
}
|
||||
76
internal/alloc/alloc_windows.go
Normal file
76
internal/alloc/alloc_windows.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package alloc // import "github.com/ncruces/go-sqlite3/internal/alloc"
|
||||
|
||||
import (
|
||||
"math"
|
||||
"unsafe"
|
||||
|
||||
"github.com/tetratelabs/wazero/experimental"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func NewMemory(cap, max uint64) experimental.LinearMemory {
|
||||
// Round up to the page size.
|
||||
rnd := uint64(windows.Getpagesize() - 1)
|
||||
res := (max + rnd) &^ rnd
|
||||
|
||||
if res > math.MaxInt {
|
||||
// This ensures uintptr(res) overflows to a large value,
|
||||
// and windows.VirtualAlloc returns an error.
|
||||
res = math.MaxUint64
|
||||
}
|
||||
|
||||
com := res
|
||||
kind := windows.MEM_COMMIT
|
||||
if cap < max { // Commit memory only if cap=max.
|
||||
com = 0
|
||||
kind = windows.MEM_RESERVE
|
||||
}
|
||||
|
||||
// Reserve res bytes of address space, to ensure we won't need to move it.
|
||||
r, err := windows.VirtualAlloc(0, uintptr(res), uint32(kind), windows.PAGE_READWRITE)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
buf := unsafe.Slice((*byte)(unsafe.Pointer(r)), int(max))
|
||||
mem := virtualMemory{addr: r, buf: buf[:com:res]}
|
||||
return &mem
|
||||
}
|
||||
|
||||
// The slice covers the entire mmapped memory:
|
||||
// - len(buf) is the already committed memory,
|
||||
// - cap(buf) is the reserved address space.
|
||||
type virtualMemory struct {
|
||||
buf []byte
|
||||
addr uintptr
|
||||
}
|
||||
|
||||
func (m *virtualMemory) Reallocate(size uint64) []byte {
|
||||
com := uint64(len(m.buf))
|
||||
res := uint64(cap(m.buf))
|
||||
if com < size && size <= res {
|
||||
// Grow geometrically, round up to the page size.
|
||||
rnd := uint64(windows.Getpagesize() - 1)
|
||||
new := com + com>>3
|
||||
new = min(max(size, new), res)
|
||||
new = (new + rnd) &^ rnd
|
||||
|
||||
// Commit additional memory up to new bytes.
|
||||
_, err := windows.VirtualAlloc(m.addr, uintptr(new), windows.MEM_COMMIT, windows.PAGE_READWRITE)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
m.buf = m.buf[:new] // Update committed memory.
|
||||
}
|
||||
// Limit returned capacity because bytes beyond
|
||||
// len(m.buf) have not yet been committed.
|
||||
return m.buf[:size:len(m.buf)]
|
||||
}
|
||||
|
||||
func (m *virtualMemory) Free() {
|
||||
err := windows.VirtualFree(m.addr, 0, windows.MEM_RELEASE)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
m.addr = 0
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
@@ -22,7 +21,7 @@ func (RarDecoder) AcceptedExtensions() []string {
|
||||
|
||||
func (RarDecoder) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
return map[string]tool.MultipartExtension{
|
||||
".part1.rar": {regexp.MustCompile("^.*\\.part(\\d+)\\.rar$"), 2},
|
||||
".part1.rar": {".part%d.rar", 2},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package sevenzip
|
||||
|
||||
import (
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
@@ -19,7 +18,7 @@ func (SevenZip) AcceptedExtensions() []string {
|
||||
|
||||
func (SevenZip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
return map[string]tool.MultipartExtension{
|
||||
".7z.001": {regexp.MustCompile("^.*\\.7z\\.(\\d+)$"), 2},
|
||||
".7z.001": {".7z.%.3d", 2},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,14 +2,13 @@ package tool
|
||||
|
||||
import (
|
||||
"io"
|
||||
"regexp"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
)
|
||||
|
||||
type MultipartExtension struct {
|
||||
PartFileFormat *regexp.Regexp
|
||||
PartFileFormat string
|
||||
SecondPartIndex int
|
||||
}
|
||||
|
||||
|
||||
@@ -4,15 +4,22 @@ import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/fs"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/KirCute/zip"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"golang.org/x/text/encoding/ianaindex"
|
||||
"github.com/saintfish/chardet"
|
||||
"github.com/yeka/zip"
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/charmap"
|
||||
"golang.org/x/text/encoding/japanese"
|
||||
"golang.org/x/text/encoding/korean"
|
||||
"golang.org/x/text/encoding/simplifiedchinese"
|
||||
"golang.org/x/text/encoding/traditionalchinese"
|
||||
"golang.org/x/text/encoding/unicode"
|
||||
"golang.org/x/text/encoding/unicode/utf32"
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
@@ -30,11 +37,10 @@ func (r *WrapReader) Files() []tool.SubFile {
|
||||
|
||||
type WrapFileInfo struct {
|
||||
fs.FileInfo
|
||||
efs bool
|
||||
}
|
||||
|
||||
func (f *WrapFileInfo) Name() string {
|
||||
return decodeName(f.FileInfo.Name(), f.efs)
|
||||
return decodeName(f.FileInfo.Name())
|
||||
}
|
||||
|
||||
type WrapFile struct {
|
||||
@@ -42,11 +48,11 @@ type WrapFile struct {
|
||||
}
|
||||
|
||||
func (f *WrapFile) Name() string {
|
||||
return decodeName(f.f.Name, isEFS(f.f.Flags))
|
||||
return decodeName(f.f.Name)
|
||||
}
|
||||
|
||||
func (f *WrapFile) FileInfo() fs.FileInfo {
|
||||
return &WrapFileInfo{FileInfo: f.f.FileInfo(), efs: isEFS(f.f.Flags)}
|
||||
return &WrapFileInfo{FileInfo: f.f.FileInfo()}
|
||||
}
|
||||
|
||||
func (f *WrapFile) Open() (io.ReadCloser, error) {
|
||||
@@ -61,33 +67,16 @@ func (f *WrapFile) SetPassword(password string) {
|
||||
f.f.SetPassword(password)
|
||||
}
|
||||
|
||||
func makePart(ss *stream.SeekableStream) (zip.SizeReaderAt, error) {
|
||||
ra, err := stream.NewReadAtSeeker(ss, 0)
|
||||
func getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
|
||||
if len(ss) > 1 && stdpath.Ext(ss[1].GetName()) == ".z01" {
|
||||
// FIXME: Incorrect parsing method for standard multipart zip format
|
||||
ss = append(ss[1:], ss[0])
|
||||
}
|
||||
reader, err := stream.NewMultiReaderAt(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &inlineSizeReaderAt{ReaderAt: ra, size: ss.GetSize()}, nil
|
||||
}
|
||||
|
||||
func (z *Zip) getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
|
||||
if len(ss) > 1 && z.traditionalSecondPartRegExp.MatchString(ss[1].GetName()) {
|
||||
ss = append(ss[1:], ss[0])
|
||||
ras := make([]zip.SizeReaderAt, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
ra, err := makePart(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ras = append(ras, ra)
|
||||
}
|
||||
return zip.NewMultipartReader(ras)
|
||||
} else {
|
||||
reader, err := stream.NewMultiReaderAt(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return zip.NewReader(reader, reader.Size())
|
||||
}
|
||||
return zip.NewReader(reader, reader.Size())
|
||||
}
|
||||
|
||||
func filterPassword(err error) error {
|
||||
@@ -97,29 +86,110 @@ func filterPassword(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func decodeName(name string, efs bool) string {
|
||||
if efs {
|
||||
return name
|
||||
}
|
||||
enc, err := ianaindex.IANA.Encoding(setting.GetStr(conf.NonEFSZipEncoding))
|
||||
func decodeName(name string) string {
|
||||
b := []byte(name)
|
||||
detector := chardet.NewTextDetector()
|
||||
results, err := detector.DetectAll(b)
|
||||
if err != nil {
|
||||
return name
|
||||
}
|
||||
i := bytes.NewReader([]byte(name))
|
||||
var ce, re, enc encoding.Encoding
|
||||
for _, r := range results {
|
||||
if r.Confidence > 30 {
|
||||
ce = getCommonEncoding(r.Charset)
|
||||
if ce != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if re == nil {
|
||||
re = getEncoding(r.Charset)
|
||||
}
|
||||
}
|
||||
if ce != nil {
|
||||
enc = ce
|
||||
} else if re != nil {
|
||||
enc = re
|
||||
} else {
|
||||
return name
|
||||
}
|
||||
i := bytes.NewReader(b)
|
||||
decoder := transform.NewReader(i, enc.NewDecoder())
|
||||
content, _ := io.ReadAll(decoder)
|
||||
return string(content)
|
||||
}
|
||||
|
||||
func isEFS(flags uint16) bool {
|
||||
return (flags & 0x800) > 0
|
||||
func getCommonEncoding(name string) (enc encoding.Encoding) {
|
||||
switch name {
|
||||
case "UTF-8":
|
||||
enc = unicode.UTF8
|
||||
case "UTF-16LE":
|
||||
enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
|
||||
case "Shift_JIS":
|
||||
enc = japanese.ShiftJIS
|
||||
case "GB-18030":
|
||||
enc = simplifiedchinese.GB18030
|
||||
case "EUC-KR":
|
||||
enc = korean.EUCKR
|
||||
case "Big5":
|
||||
enc = traditionalchinese.Big5
|
||||
default:
|
||||
enc = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type inlineSizeReaderAt struct {
|
||||
io.ReaderAt
|
||||
size int64
|
||||
}
|
||||
|
||||
func (i *inlineSizeReaderAt) Size() int64 {
|
||||
return i.size
|
||||
func getEncoding(name string) (enc encoding.Encoding) {
|
||||
switch name {
|
||||
case "UTF-8":
|
||||
enc = unicode.UTF8
|
||||
case "UTF-16BE":
|
||||
enc = unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM)
|
||||
case "UTF-16LE":
|
||||
enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
|
||||
case "UTF-32BE":
|
||||
enc = utf32.UTF32(utf32.BigEndian, utf32.IgnoreBOM)
|
||||
case "UTF-32LE":
|
||||
enc = utf32.UTF32(utf32.LittleEndian, utf32.IgnoreBOM)
|
||||
case "ISO-8859-1":
|
||||
enc = charmap.ISO8859_1
|
||||
case "ISO-8859-2":
|
||||
enc = charmap.ISO8859_2
|
||||
case "ISO-8859-3":
|
||||
enc = charmap.ISO8859_3
|
||||
case "ISO-8859-4":
|
||||
enc = charmap.ISO8859_4
|
||||
case "ISO-8859-5":
|
||||
enc = charmap.ISO8859_5
|
||||
case "ISO-8859-6":
|
||||
enc = charmap.ISO8859_6
|
||||
case "ISO-8859-7":
|
||||
enc = charmap.ISO8859_7
|
||||
case "ISO-8859-8":
|
||||
enc = charmap.ISO8859_8
|
||||
case "ISO-8859-8-I":
|
||||
enc = charmap.ISO8859_8I
|
||||
case "ISO-8859-9":
|
||||
enc = charmap.ISO8859_9
|
||||
case "windows-1251":
|
||||
enc = charmap.Windows1251
|
||||
case "windows-1256":
|
||||
enc = charmap.Windows1256
|
||||
case "KOI8-R":
|
||||
enc = charmap.KOI8R
|
||||
case "Shift_JIS":
|
||||
enc = japanese.ShiftJIS
|
||||
case "GB-18030":
|
||||
enc = simplifiedchinese.GB18030
|
||||
case "EUC-JP":
|
||||
enc = japanese.EUCJP
|
||||
case "EUC-KR":
|
||||
enc = korean.EUCKR
|
||||
case "Big5":
|
||||
enc = traditionalchinese.Big5
|
||||
case "ISO-2022-JP":
|
||||
enc = japanese.ISO2022JP
|
||||
default:
|
||||
enc = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package zip
|
||||
import (
|
||||
"io"
|
||||
stdpath "path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
@@ -13,39 +12,34 @@ import (
|
||||
)
|
||||
|
||||
type Zip struct {
|
||||
traditionalSecondPartRegExp *regexp.Regexp
|
||||
}
|
||||
|
||||
func (z *Zip) AcceptedExtensions() []string {
|
||||
func (Zip) AcceptedExtensions() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (z *Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
func (Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||
return map[string]tool.MultipartExtension{
|
||||
".zip": {regexp.MustCompile("^.*\\.z(\\d+)$"), 1},
|
||||
".zip.001": {regexp.MustCompile("^.*\\.zip\\.(\\d+)$"), 2},
|
||||
".zip": {".z%.2d", 1},
|
||||
".zip.001": {".zip.%.3d", 2},
|
||||
}
|
||||
}
|
||||
|
||||
func (z *Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
zipReader, err := z.getReader(ss)
|
||||
func (Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
zipReader, err := getReader(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
efs := true
|
||||
if len(zipReader.File) > 0 {
|
||||
efs = isEFS(zipReader.File[0].Flags)
|
||||
}
|
||||
encrypted, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: zipReader})
|
||||
return &model.ArchiveMetaInfo{
|
||||
Comment: decodeName(zipReader.Comment, efs),
|
||||
Comment: zipReader.Comment,
|
||||
Encrypted: encrypted,
|
||||
Tree: tree,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
zipReader, err := z.getReader(ss)
|
||||
func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
zipReader, err := getReader(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -63,7 +57,7 @@ func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]
|
||||
_ = rc.Close()
|
||||
passVerified = true
|
||||
}
|
||||
name := strings.TrimSuffix(decodeName(file.Name, isEFS(file.Flags)), "/")
|
||||
name := strings.TrimSuffix(decodeName(file.Name), "/")
|
||||
if strings.Contains(name, "/") {
|
||||
// 有些压缩包不压缩第一个文件夹
|
||||
strs := strings.Split(name, "/")
|
||||
@@ -76,7 +70,7 @@ func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]
|
||||
}
|
||||
continue
|
||||
}
|
||||
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo(), efs: isEFS(file.Flags)}))
|
||||
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo()}))
|
||||
}
|
||||
if len(ret) == 0 && dir != nil {
|
||||
ret = append(ret, dir)
|
||||
@@ -87,13 +81,13 @@ func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]
|
||||
ret := make([]model.Obj, 0)
|
||||
exist := false
|
||||
for _, file := range zipReader.File {
|
||||
name := decodeName(file.Name, isEFS(file.Flags))
|
||||
name := decodeName(file.Name)
|
||||
dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/"
|
||||
if dir != innerPath {
|
||||
continue
|
||||
}
|
||||
exist = true
|
||||
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo(), isEFS(file.Flags)}))
|
||||
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo()}))
|
||||
}
|
||||
if !exist {
|
||||
return nil, errs.ObjectNotFound
|
||||
@@ -102,14 +96,14 @@ func (z *Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]
|
||||
}
|
||||
}
|
||||
|
||||
func (z *Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
zipReader, err := z.getReader(ss)
|
||||
func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
zipReader, err := getReader(ss)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
for _, file := range zipReader.File {
|
||||
if decodeName(file.Name, isEFS(file.Flags)) == innerPath {
|
||||
if decodeName(file.Name) == innerPath {
|
||||
if file.IsEncrypted() {
|
||||
file.SetPassword(args.Password)
|
||||
}
|
||||
@@ -123,8 +117,8 @@ func (z *Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs)
|
||||
return nil, 0, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (z *Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
zipReader, err := z.getReader(ss)
|
||||
func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
zipReader, err := getReader(ss)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -134,7 +128,5 @@ func (z *Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args mo
|
||||
var _ tool.Tool = (*Zip)(nil)
|
||||
|
||||
func init() {
|
||||
tool.RegisterTool(&Zip{
|
||||
traditionalSecondPartRegExp: regexp.MustCompile("^.*\\.z0*1$"),
|
||||
})
|
||||
tool.RegisterTool(Zip{})
|
||||
}
|
||||
|
||||
@@ -140,10 +140,6 @@ func InitConfig() {
|
||||
log.Fatalf("create temp dir error: %+v", err)
|
||||
}
|
||||
log.Debugf("config: %+v", conf.Conf)
|
||||
|
||||
// Validate and display proxy configuration status
|
||||
validateProxyConfig()
|
||||
|
||||
base.InitClient()
|
||||
initURL()
|
||||
}
|
||||
@@ -183,14 +179,3 @@ func CleanTempDir() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// validateProxyConfig validates proxy configuration and displays status at startup
|
||||
func validateProxyConfig() {
|
||||
if conf.Conf.ProxyAddress != "" {
|
||||
if _, err := url.Parse(conf.Conf.ProxyAddress); err == nil {
|
||||
log.Infof("Proxy enabled: %s", conf.Conf.ProxyAddress)
|
||||
} else {
|
||||
log.Errorf("Invalid proxy address format: %s, error: %v", conf.Conf.ProxyAddress, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,7 +154,6 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: conf.SharePreviewArchivesByDefault, Value: "false", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.ReadMeAutoRender, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.FilterReadMeScripts, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.NonEFSZipEncoding, Value: "IBM437", Type: conf.TypeString, Group: model.PREVIEW},
|
||||
// global settings
|
||||
{Key: conf.HideFiles, Value: "/\\/README.md/i", Type: conf.TypeText, Group: model.GLOBAL},
|
||||
{Key: "package_download", Value: "true", Type: conf.TypeBool, Group: model.GLOBAL},
|
||||
|
||||
23
internal/bootstrap/plugin.go
Normal file
23
internal/bootstrap/plugin.go
Normal file
@@ -0,0 +1,23 @@
|
||||
// internal/bootstrap/plugin.go
|
||||
package bootstrap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/plugin"
|
||||
)
|
||||
|
||||
// InitPlugins 初始化插件管理器
|
||||
func InitPlugins() {
|
||||
// 2. 创建并初始化 Manager
|
||||
// "data" 目录应从配置中获取
|
||||
manager, err := plugin.NewManager(context.Background(), flags.DataDir)
|
||||
if err != nil {
|
||||
// 在启动时,如果插件系统失败,应该 panic
|
||||
panic(fmt.Sprintf("Failed to initialize plugin manager: %v", err))
|
||||
}
|
||||
|
||||
plugin.PluginManager = manager
|
||||
}
|
||||
@@ -131,7 +131,6 @@ type Config struct {
|
||||
FTP FTP `json:"ftp" envPrefix:"FTP_"`
|
||||
SFTP SFTP `json:"sftp" envPrefix:"SFTP_"`
|
||||
LastLaunchedVersion string `json:"last_launched_version"`
|
||||
ProxyAddress string `json:"proxy_address" env:"PROXY_ADDRESS"`
|
||||
}
|
||||
|
||||
func DefaultConfig(dataDir string) *Config {
|
||||
@@ -245,6 +244,5 @@ func DefaultConfig(dataDir string) *Config {
|
||||
Listen: ":5222",
|
||||
},
|
||||
LastLaunchedVersion: "",
|
||||
ProxyAddress: "",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,6 @@ const (
|
||||
SharePreviewArchivesByDefault = "share_preview_archives_by_default"
|
||||
ReadMeAutoRender = "readme_autorender"
|
||||
FilterReadMeScripts = "filter_readme_scripts"
|
||||
NonEFSZipEncoding = "non_efs_zip_encoding"
|
||||
|
||||
// global
|
||||
HideFiles = "hide_files"
|
||||
|
||||
@@ -12,7 +12,7 @@ var db *gorm.DB
|
||||
|
||||
func Init(d *gorm.DB) {
|
||||
db = d
|
||||
err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey), new(model.SharingDB))
|
||||
err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey), new(model.SharingDB), new(model.Plugin))
|
||||
if err != nil {
|
||||
log.Fatalf("failed migrate database: %s", err.Error())
|
||||
}
|
||||
|
||||
47
internal/db/plugin.go
Normal file
47
internal/db/plugin.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// CreatePlugin 在数据库中插入一条新的插件记录
|
||||
// 如果记录已存在,则会更新它 (Upsert)
|
||||
func CreatePlugin(ctx context.Context, plugin *model.Plugin) error {
|
||||
return db.WithContext(ctx).Save(plugin).Error
|
||||
}
|
||||
|
||||
// GetPluginByID 从数据库中根据 ID 查询单个插件
|
||||
func GetPluginByID(ctx context.Context, id string) (*model.Plugin, error) {
|
||||
var plugin model.Plugin
|
||||
err := db.WithContext(ctx).First(&plugin, "id = ?", id).Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil // 返回 nil, nil 表示未找到
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &plugin, nil
|
||||
}
|
||||
|
||||
// GetAllPlugins 从数据库中获取所有已安装的插件
|
||||
func GetAllPlugins(ctx context.Context) ([]*model.Plugin, error) {
|
||||
var plugins []*model.Plugin
|
||||
err := db.WithContext(ctx).Find(&plugins).Error
|
||||
return plugins, err
|
||||
}
|
||||
|
||||
// DeletePluginByID 从数据库中根据 ID 删除一个插件
|
||||
func DeletePluginByID(ctx context.Context, id string) error {
|
||||
return db.WithContext(ctx).Delete(&model.Plugin{}, "id = ?", id).Error
|
||||
}
|
||||
|
||||
// UpdatePluginStatus 更新指定插件的状态和消息
|
||||
func UpdatePluginStatus(ctx context.Context, pluginID string, status model.PluginStatus, message string) error {
|
||||
return db.WithContext(ctx).Model(&model.Plugin{}).Where("id = ?", pluginID).Updates(map[string]interface{}{
|
||||
"status": status,
|
||||
"message": message,
|
||||
}).Error
|
||||
}
|
||||
@@ -38,26 +38,18 @@ func GetSharingsByCreatorId(creator uint, pageIndex, pageSize int) (sharings []m
|
||||
}
|
||||
|
||||
func CreateSharing(s *model.SharingDB) (string, error) {
|
||||
if s.ID == "" {
|
||||
id := random.String(8)
|
||||
for len(id) < 12 {
|
||||
old := model.SharingDB{
|
||||
ID: id,
|
||||
}
|
||||
if err := db.Where(old).First(&old).Error; err != nil {
|
||||
s.ID = id
|
||||
return id, errors.WithStack(db.Create(s).Error)
|
||||
}
|
||||
id += random.String(1)
|
||||
id := random.String(8)
|
||||
for len(id) < 12 {
|
||||
old := model.SharingDB{
|
||||
ID: id,
|
||||
}
|
||||
return "", errors.New("failed find valid id")
|
||||
} else {
|
||||
query := model.SharingDB{ID: s.ID}
|
||||
if err := db.Where(query).First(&query).Error; err == nil {
|
||||
return "", errors.New("sharing already exist")
|
||||
if err := db.Where(old).First(&old).Error; err != nil {
|
||||
s.ID = id
|
||||
return id, errors.WithStack(db.Create(s).Error)
|
||||
}
|
||||
return s.ID, errors.WithStack(db.Create(s).Error)
|
||||
id += random.String(1)
|
||||
}
|
||||
return "", errors.New("failed find valid id")
|
||||
}
|
||||
|
||||
func UpdateSharing(s *model.SharingDB) error {
|
||||
|
||||
@@ -218,12 +218,3 @@ type LinkCacheModeResolver interface {
|
||||
// ResolveLinkCacheMode returns the LinkCacheMode for the given path.
|
||||
ResolveLinkCacheMode(path string) LinkCacheMode
|
||||
}
|
||||
|
||||
type DirectUploader interface {
|
||||
// GetDirectUploadTools returns available frontend-direct upload tools
|
||||
GetDirectUploadTools() []string
|
||||
// GetDirectUploadInfo returns the information needed for direct upload from client to storage
|
||||
// actualPath is the path relative to the storage root (after removing mount path prefix)
|
||||
// return errs.NotImplement if the driver does not support the given direct upload tool
|
||||
GetDirectUploadInfo(ctx context.Context, tool string, dstDir model.Obj, fileName string, fileSize int64) (any, error)
|
||||
}
|
||||
|
||||
@@ -19,6 +19,10 @@ type Info struct {
|
||||
Config Config `json:"config"`
|
||||
}
|
||||
|
||||
type IGetItem interface {
|
||||
GetItems() []Item
|
||||
}
|
||||
|
||||
type IRootPath interface {
|
||||
GetRootPath() string
|
||||
}
|
||||
|
||||
@@ -7,10 +7,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ObjectNotFound = errors.New("object not found")
|
||||
ObjectAlreadyExists = errors.New("object already exists")
|
||||
NotFolder = errors.New("not a folder")
|
||||
NotFile = errors.New("not a file")
|
||||
ObjectNotFound = errors.New("object not found")
|
||||
NotFolder = errors.New("not a folder")
|
||||
NotFile = errors.New("not a file")
|
||||
)
|
||||
|
||||
func IsObjectNotFound(err error) bool {
|
||||
|
||||
@@ -167,14 +167,6 @@ func GetStorage(path string, args *GetStoragesArgs) (driver.Driver, error) {
|
||||
return storageDriver, nil
|
||||
}
|
||||
|
||||
func GetStorageAndActualPath(path string) (driver.Driver, string, error) {
|
||||
return op.GetStorageAndActualPath(path)
|
||||
}
|
||||
|
||||
func GetByActualPath(ctx context.Context, storage driver.Driver, actualPath string) (model.Obj, error) {
|
||||
return op.Get(ctx, storage, actualPath)
|
||||
}
|
||||
|
||||
func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
|
||||
res, err := other(ctx, args)
|
||||
if err != nil {
|
||||
@@ -198,11 +190,3 @@ func PutURL(ctx context.Context, path, dstName, urlStr string) error {
|
||||
}
|
||||
return op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr)
|
||||
}
|
||||
|
||||
func GetDirectUploadInfo(ctx context.Context, tool, path, dstName string, fileSize int64) (any, error) {
|
||||
info, err := getDirectUploadInfo(ctx, tool, path, dstName, fileSize)
|
||||
if err != nil {
|
||||
log.Errorf("failed get %s direct upload info for %s(%d bytes): %+v", path, dstName, fileSize, err)
|
||||
}
|
||||
return info, err
|
||||
}
|
||||
|
||||
@@ -105,11 +105,3 @@ func putDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer
|
||||
}
|
||||
return op.Put(ctx, storage, dstDirActualPath, file, nil, lazyCache...)
|
||||
}
|
||||
|
||||
func getDirectUploadInfo(ctx context.Context, tool, dstDirPath, dstName string, fileSize int64) (any, error) {
|
||||
storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get storage")
|
||||
}
|
||||
return op.GetDirectUploadInfo(ctx, tool, storage, dstDirActualPath, dstName, fileSize)
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ type Link struct {
|
||||
//for accelerating request, use multi-thread downloading
|
||||
Concurrency int `json:"concurrency"`
|
||||
PartSize int `json:"part_size"`
|
||||
ContentLength int64 `json:"content_length"` // 转码视频、缩略图
|
||||
ContentLength int64 `json:"-"` // 转码视频、缩略图
|
||||
|
||||
utils.SyncClosers `json:"-"`
|
||||
// 如果SyncClosers中的资源被关闭后Link将不可用,则此值应为 true
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
package model
|
||||
|
||||
type HttpDirectUploadInfo struct {
|
||||
UploadURL string `json:"upload_url"` // The URL to upload the file
|
||||
ChunkSize int64 `json:"chunk_size"` // The chunk size for uploading, 0 means no chunking required
|
||||
Headers map[string]string `json:"headers,omitempty"` // Optional headers to include in the upload request
|
||||
Method string `json:"method,omitempty"` // HTTP method, default is PUT
|
||||
}
|
||||
@@ -27,9 +27,6 @@ func (f *FileCloser) Close() error {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// FileRangeReader 是对 RangeReaderIF 的轻量包装,表明由 RangeReaderIF.RangeRead
|
||||
// 返回的 io.ReadCloser 同时实现了 model.File(即支持 Read/ReadAt/Seek)。
|
||||
// 只有满足这些才需要使用 FileRangeReader,否则直接使用 RangeReaderIF 即可。
|
||||
type FileRangeReader struct {
|
||||
RangeReaderIF
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ type FileStreamer interface {
|
||||
// for a non-seekable Stream, if Read is called, this function won't work.
|
||||
// caches the full Stream and writes it to writer (if provided, even if the stream is already cached).
|
||||
CacheFullAndWriter(up *UpdateProgress, writer io.Writer) (File, error)
|
||||
SetTmpFile(file File)
|
||||
// if the Stream is not a File and is not cached, returns nil.
|
||||
GetFile() File
|
||||
}
|
||||
|
||||
42
internal/model/plugin.go
Normal file
42
internal/model/plugin.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package model
|
||||
|
||||
import "time"
|
||||
|
||||
// PluginStatus 定义了插件的几种可能状态
|
||||
type PluginStatus string
|
||||
|
||||
const (
|
||||
// StatusActive 表示插件已成功加载并正在运行
|
||||
StatusActive PluginStatus = "active"
|
||||
// StatusInactive 表示插件已安装但未加载(例如,等待重启)
|
||||
StatusInactive PluginStatus = "inactive"
|
||||
// StatusError 表示插件在加载或运行时遇到错误
|
||||
StatusError PluginStatus = "error"
|
||||
)
|
||||
|
||||
type Plugin struct {
|
||||
// 插件的唯一标识符,例如 "com.openlist.driver.s3"
|
||||
// 这是主键
|
||||
ID string `gorm:"primaryKey" json:"id"`
|
||||
|
||||
// --- 来自插件元数据 ---
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
Author string `json:"author"`
|
||||
Description string `gorm:"type:text" json:"description"`
|
||||
IconURL string `json:"icon_url"`
|
||||
|
||||
// --- 管理器需要的信息 ---
|
||||
// 插件的下载源地址
|
||||
SourceURL string `json:"source_url"`
|
||||
// Wasm 文件在本地的存储路径
|
||||
WasmPath string `json:"wasm_path"`
|
||||
|
||||
// 新增状态字段
|
||||
Status PluginStatus `gorm:"default:'inactive'" json:"status"`
|
||||
Message string `gorm:"type:text" json:"message"` // 用于存储错误信息
|
||||
|
||||
// --- GORM 自动管理字段 ---
|
||||
CreatedAt time.Time `json:"-"`
|
||||
UpdatedAt time.Time `json:"-"`
|
||||
}
|
||||
@@ -283,15 +283,11 @@ func HttpClient() *http.Client {
|
||||
}
|
||||
|
||||
func NewHttpClient() *http.Client {
|
||||
transport := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
|
||||
}
|
||||
|
||||
SetProxyIfConfigured(transport)
|
||||
|
||||
return &http.Client{
|
||||
Timeout: time.Hour * 48,
|
||||
Transport: transport,
|
||||
Timeout: time.Hour * 48,
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
package net
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -307,9 +305,39 @@ func rangesMIMESize(ranges []http_range.Range, contentType string, contentSize i
|
||||
return encSize, nil
|
||||
}
|
||||
|
||||
// LimitedReadCloser wraps a io.ReadCloser and limits the number of bytes that can be read from it.
|
||||
type LimitedReadCloser struct {
|
||||
rc io.ReadCloser
|
||||
remaining int
|
||||
}
|
||||
|
||||
func (l *LimitedReadCloser) Read(buf []byte) (int, error) {
|
||||
if l.remaining <= 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if len(buf) > l.remaining {
|
||||
buf = buf[0:l.remaining]
|
||||
}
|
||||
|
||||
n, err := l.rc.Read(buf)
|
||||
l.remaining -= n
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (l *LimitedReadCloser) Close() error {
|
||||
return l.rc.Close()
|
||||
}
|
||||
|
||||
// GetRangedHttpReader some http server doesn't support "Range" header,
|
||||
// so this function read readCloser with whole data, skip offset, then return ReaderCloser.
|
||||
func GetRangedHttpReader(readCloser io.ReadCloser, offset, length int64) (io.ReadCloser, error) {
|
||||
var length_int int
|
||||
if length > math.MaxInt {
|
||||
return nil, fmt.Errorf("doesnot support length bigger than int32 max ")
|
||||
}
|
||||
length_int = int(length)
|
||||
|
||||
if offset > 100*1024*1024 {
|
||||
log.Warnf("offset is more than 100MB, if loading data from internet, high-latency and wasting of bandwidth is expected")
|
||||
@@ -320,25 +348,5 @@ func GetRangedHttpReader(readCloser io.ReadCloser, offset, length int64) (io.Rea
|
||||
}
|
||||
|
||||
// return an io.ReadCloser that is limited to `length` bytes.
|
||||
return readers.NewLimitedReadCloser(readCloser, length), nil
|
||||
}
|
||||
|
||||
// SetProxyIfConfigured sets proxy for HTTP Transport if configured
|
||||
func SetProxyIfConfigured(transport *http.Transport) {
|
||||
// If proxy address is configured, override environment variable settings
|
||||
if conf.Conf.ProxyAddress != "" {
|
||||
if proxyURL, err := url.Parse(conf.Conf.ProxyAddress); err == nil {
|
||||
transport.Proxy = http.ProxyURL(proxyURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetRestyProxyIfConfigured sets proxy for Resty client if configured
|
||||
func SetRestyProxyIfConfigured(client *resty.Client) {
|
||||
// If proxy address is configured, override environment variable settings
|
||||
if conf.Conf.ProxyAddress != "" {
|
||||
if proxyURL, err := url.Parse(conf.Conf.ProxyAddress); err == nil {
|
||||
client.SetProxy(proxyURL.String())
|
||||
}
|
||||
}
|
||||
return &LimitedReadCloser{readCloser, length_int}, nil
|
||||
}
|
||||
|
||||
@@ -3,18 +3,19 @@ package op
|
||||
import (
|
||||
"context"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/cache"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
gocache "github.com/OpenListTeam/go-cache"
|
||||
@@ -60,25 +61,20 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path)
|
||||
}
|
||||
|
||||
// Get archive tool
|
||||
var partExt *tool.MultipartExtension
|
||||
var t tool.Tool
|
||||
ext := obj.GetName()
|
||||
for {
|
||||
var found bool
|
||||
_, ext, found = strings.Cut(ext, ".")
|
||||
if !found {
|
||||
baseName, ext, found := strings.Cut(obj.GetName(), ".")
|
||||
if !found {
|
||||
_ = l.Close()
|
||||
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
|
||||
}
|
||||
partExt, t, err := tool.GetArchiveTool("." + ext)
|
||||
if err != nil {
|
||||
var e error
|
||||
partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName()))
|
||||
if e != nil {
|
||||
_ = l.Close()
|
||||
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
|
||||
}
|
||||
partExt, t, err = tool.GetArchiveTool("." + ext)
|
||||
if err == nil {
|
||||
break
|
||||
return nil, nil, nil, errors.WithMessagef(stderrors.Join(err, e), "failed get archive tool: %s", ext)
|
||||
}
|
||||
}
|
||||
|
||||
// Get first part stream
|
||||
ss, err := stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: obj}, l)
|
||||
if err != nil {
|
||||
_ = l.Close()
|
||||
@@ -87,62 +83,29 @@ func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path st
|
||||
ret := []*stream.SeekableStream{ss}
|
||||
if partExt == nil {
|
||||
return obj, t, ret, nil
|
||||
}
|
||||
|
||||
// Merge multi-part archive
|
||||
dir := stdpath.Dir(path)
|
||||
objs, err := List(ctx, storage, dir, model.ListArgs{})
|
||||
if err != nil {
|
||||
} else {
|
||||
index := partExt.SecondPartIndex
|
||||
dir := stdpath.Dir(path)
|
||||
for {
|
||||
p := stdpath.Join(dir, baseName+fmt.Sprintf(partExt.PartFileFormat, index))
|
||||
var o model.Obj
|
||||
l, o, err = Link(ctx, storage, p, args)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
ss, err = stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: o}, l)
|
||||
if err != nil {
|
||||
_ = l.Close()
|
||||
for _, s := range ret {
|
||||
_ = s.Close()
|
||||
}
|
||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
|
||||
}
|
||||
ret = append(ret, ss)
|
||||
index++
|
||||
}
|
||||
return obj, t, ret, nil
|
||||
}
|
||||
for _, o := range objs {
|
||||
submatch := partExt.PartFileFormat.FindStringSubmatch(o.GetName())
|
||||
if submatch == nil {
|
||||
continue
|
||||
}
|
||||
partIdx, e := strconv.Atoi(submatch[1])
|
||||
if e != nil {
|
||||
continue
|
||||
}
|
||||
partIdx = partIdx - partExt.SecondPartIndex + 1
|
||||
if partIdx < 1 {
|
||||
continue
|
||||
}
|
||||
p := stdpath.Join(dir, o.GetName())
|
||||
l1, o1, e := Link(ctx, storage, p, args)
|
||||
if e != nil {
|
||||
err = errors.WithMessagef(e, "failed get [%s] link", p)
|
||||
break
|
||||
}
|
||||
ss1, e := stream.NewSeekableStream(&stream.FileStream{Ctx: ctx, Obj: o1}, l1)
|
||||
if e != nil {
|
||||
_ = l1.Close()
|
||||
err = errors.WithMessagef(e, "failed get [%s] stream", p)
|
||||
break
|
||||
}
|
||||
for partIdx >= len(ret) {
|
||||
ret = append(ret, nil)
|
||||
}
|
||||
ret[partIdx] = ss1
|
||||
}
|
||||
closeAll := func(r []*stream.SeekableStream) {
|
||||
for _, s := range r {
|
||||
if s != nil {
|
||||
_ = s.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
closeAll(ret)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
for i, ss1 := range ret {
|
||||
if ss1 == nil {
|
||||
closeAll(ret)
|
||||
return nil, nil, nil, errors.Errorf("failed merge [%s] parts, missing part %d", path, i)
|
||||
}
|
||||
}
|
||||
return obj, t, ret, nil
|
||||
}
|
||||
|
||||
func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) {
|
||||
|
||||
@@ -15,12 +15,27 @@ type DriverConstructor func() driver.Driver
|
||||
var driverMap = map[string]DriverConstructor{}
|
||||
var driverInfoMap = map[string]driver.Info{}
|
||||
|
||||
func RegisterDriver(driver DriverConstructor) {
|
||||
func RegisterDriver(driver DriverConstructor) error {
|
||||
// log.Infof("register driver: [%s]", config.Name)
|
||||
tempDriver := driver()
|
||||
if tempDriver == nil {
|
||||
return errors.New("register driver is null")
|
||||
}
|
||||
tempConfig := tempDriver.Config()
|
||||
|
||||
if driverMap[tempConfig.Name] != nil {
|
||||
return errors.New("driver is registered")
|
||||
}
|
||||
registerDriverItems(tempConfig, tempDriver.GetAddition())
|
||||
driverMap[tempConfig.Name] = driver
|
||||
return nil
|
||||
}
|
||||
|
||||
func UnRegisterDriver(driver DriverConstructor) {
|
||||
if tempDriver := driver(); tempDriver != nil {
|
||||
tempConfig := tempDriver.Config()
|
||||
delete(driverMap, tempConfig.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func GetDriver(name string) (DriverConstructor, error) {
|
||||
@@ -45,12 +60,18 @@ func GetDriverInfoMap() map[string]driver.Info {
|
||||
|
||||
func registerDriverItems(config driver.Config, addition driver.Additional) {
|
||||
// log.Debugf("addition of %s: %+v", config.Name, addition)
|
||||
tAddition := reflect.TypeOf(addition)
|
||||
for tAddition.Kind() == reflect.Pointer {
|
||||
tAddition = tAddition.Elem()
|
||||
var additionalItems []driver.Item
|
||||
if v, ok := addition.(driver.IGetItem); ok {
|
||||
additionalItems = v.GetItems()
|
||||
} else {
|
||||
tAddition := reflect.TypeOf(addition)
|
||||
for tAddition.Kind() == reflect.Pointer {
|
||||
tAddition = tAddition.Elem()
|
||||
}
|
||||
additionalItems = getAdditionalItems(tAddition, config.DefaultRoot)
|
||||
}
|
||||
|
||||
mainItems := getMainItems(config)
|
||||
additionalItems := getAdditionalItems(tAddition, config.DefaultRoot)
|
||||
driverInfoMap[config.Name] = driver.Info{
|
||||
Common: mainItems,
|
||||
Additional: additionalItems,
|
||||
|
||||
@@ -57,7 +57,7 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
|
||||
model.WrapObjsName(files)
|
||||
// call hooks
|
||||
go func(reqPath string, files []model.Obj) {
|
||||
HandleObjsUpdateHook(context.WithoutCancel(ctx), reqPath, files)
|
||||
HandleObjsUpdateHook(reqPath, files)
|
||||
}(utils.GetFullPath(storage.GetStorage().MountPath, path), files)
|
||||
|
||||
// sort objs
|
||||
@@ -568,15 +568,15 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
|
||||
dstPath := stdpath.Join(dstDirPath, dstName)
|
||||
_, err := GetUnwrap(ctx, storage, dstPath)
|
||||
if err == nil {
|
||||
return errors.WithStack(errs.ObjectAlreadyExists)
|
||||
return errors.New("obj already exists")
|
||||
}
|
||||
err = MakeDir(ctx, storage, dstDirPath)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed to make dir [%s]", dstDirPath)
|
||||
return errors.WithMessagef(err, "failed to put url")
|
||||
}
|
||||
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath)
|
||||
return errors.WithMessagef(err, "failed to put url")
|
||||
}
|
||||
switch s := storage.(type) {
|
||||
case driver.PutURLResult:
|
||||
@@ -599,48 +599,8 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
|
||||
}
|
||||
}
|
||||
default:
|
||||
return errors.WithStack(errs.NotImplement)
|
||||
return errs.NotImplement
|
||||
}
|
||||
log.Debugf("put url [%s](%s) done", dstName, url)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
func GetDirectUploadTools(storage driver.Driver) []string {
|
||||
du, ok := storage.(driver.DirectUploader)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil
|
||||
}
|
||||
return du.GetDirectUploadTools()
|
||||
}
|
||||
|
||||
func GetDirectUploadInfo(ctx context.Context, tool string, storage driver.Driver, dstDirPath, dstName string, fileSize int64) (any, error) {
|
||||
du, ok := storage.(driver.DirectUploader)
|
||||
if !ok {
|
||||
return nil, errors.WithStack(errs.NotImplement)
|
||||
}
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||
dstPath := stdpath.Join(dstDirPath, dstName)
|
||||
_, err := GetUnwrap(ctx, storage, dstPath)
|
||||
if err == nil {
|
||||
return nil, errors.WithStack(errs.ObjectAlreadyExists)
|
||||
}
|
||||
err = MakeDir(ctx, storage, dstDirPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed to make dir [%s]", dstDirPath)
|
||||
}
|
||||
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath)
|
||||
}
|
||||
info, err := du.GetDirectUploadInfo(ctx, tool, dstDir, dstName, fileSize)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package op
|
||||
|
||||
import (
|
||||
"context"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
@@ -14,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
// Obj
|
||||
type ObjsUpdateHook = func(ctx context.Context, parent string, objs []model.Obj)
|
||||
type ObjsUpdateHook = func(parent string, objs []model.Obj)
|
||||
|
||||
var (
|
||||
objsUpdateHooks = make([]ObjsUpdateHook, 0)
|
||||
@@ -24,9 +23,9 @@ func RegisterObjsUpdateHook(hook ObjsUpdateHook) {
|
||||
objsUpdateHooks = append(objsUpdateHooks, hook)
|
||||
}
|
||||
|
||||
func HandleObjsUpdateHook(ctx context.Context, parent string, objs []model.Obj) {
|
||||
func HandleObjsUpdateHook(parent string, objs []model.Obj) {
|
||||
for _, hook := range objsUpdateHooks {
|
||||
hook(ctx, parent, objs)
|
||||
hook(parent, objs)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
909
internal/plugin/driver.go
Normal file
909
internal/plugin/driver.go
Normal file
@@ -0,0 +1,909 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/alloc"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
plugin_warp "github.com/OpenListTeam/OpenList/v4/internal/plugin/warp"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
pool "github.com/jolestar/go-commons-pool/v2"
|
||||
|
||||
manager_io "github.com/OpenListTeam/wazero-wasip2/manager/io"
|
||||
io_v_0_2 "github.com/OpenListTeam/wazero-wasip2/wasip2/io/v0_2"
|
||||
witgo "github.com/OpenListTeam/wazero-wasip2/wit-go"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/tetratelabs/wazero"
|
||||
"github.com/tetratelabs/wazero/api"
|
||||
"github.com/tetratelabs/wazero/experimental"
|
||||
"github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1"
|
||||
)
|
||||
|
||||
var PluginPrefix = "openlist:plugin-driver/exports@0.1.0#"
|
||||
|
||||
// DriverPlugin 是*插件*管理器 (每个 .wasm 文件一个)
|
||||
// 它管理共享的 wazero 资源
|
||||
type DriverPlugin struct {
|
||||
plugin *PluginInfo
|
||||
runtime wazero.Runtime // 共享的 wazero 运行时
|
||||
compiledModule wazero.CompiledModule // 共享的已编译模块
|
||||
host *DriverHost // 注册的 wasi host 资源, 这里的self.driver始终为nil
|
||||
}
|
||||
|
||||
// WasmInstance 代表池中的一个可重用对象
|
||||
// 它包含一个活动的 WASM 实例及其宿主/Guest API
|
||||
type WasmInstance struct {
|
||||
instance api.Module
|
||||
exports *DriverHost
|
||||
guest *witgo.Host
|
||||
}
|
||||
|
||||
// 内部函数,用于动态调用 Guest 以获取属性
|
||||
func (d *WasmInstance) GetProperties(ctx context.Context) (plugin_warp.DriverProps, error) {
|
||||
var propertiesResult plugin_warp.DriverProps
|
||||
err := d.guest.Call(ctx, PluginPrefix+"get-properties", &propertiesResult)
|
||||
if err != nil {
|
||||
return plugin_warp.DriverProps{}, err
|
||||
}
|
||||
return propertiesResult, nil
|
||||
}
|
||||
|
||||
// 内部函数,用于动态调用 Guest 以获取表单
|
||||
func (d *WasmInstance) GetFormMeta(ctx context.Context) ([]plugin_warp.FormField, error) {
|
||||
var formMeta []plugin_warp.FormField
|
||||
err := d.guest.Call(ctx, PluginPrefix+"get-form-meta", &formMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return formMeta, nil
|
||||
}
|
||||
|
||||
func (i *WasmInstance) Close() error {
|
||||
return i.instance.Close(context.Background())
|
||||
// exports 借用WasmDriver的资源这里不销毁
|
||||
}
|
||||
|
||||
// 用于创建和管理 WasmInstance
|
||||
type driverPoolFactory struct {
|
||||
ctx context.Context
|
||||
driver *WasmDriver // 指向 WasmDriver (状态持有者)
|
||||
compiledModule wazero.CompiledModule // 共享的模块
|
||||
runtime wazero.Runtime // 共享的运行时
|
||||
host *DriverHost
|
||||
}
|
||||
|
||||
func (f *driverPoolFactory) makeObject(ctx context.Context) (*WasmInstance, error) {
|
||||
// 1. 配置模块
|
||||
moduleConfig := wazero.NewModuleConfig().
|
||||
WithFS(os.DirFS("/")).
|
||||
WithStartFunctions("_initialize").
|
||||
WithStdout(os.Stdout).
|
||||
WithStderr(os.Stderr).
|
||||
WithStdin(os.Stdin).
|
||||
// WithSysNanosleep().
|
||||
// WithSysNanotime().
|
||||
// WithSysWalltime().
|
||||
WithOsyield(func() {
|
||||
runtime.Gosched()
|
||||
}).
|
||||
WithName(f.driver.plugin.plugin.ID)
|
||||
|
||||
instanceCtx := experimental.WithMemoryAllocator(f.ctx, experimental.MemoryAllocatorFunc(alloc.NewMemory))
|
||||
|
||||
// 2. 实例化共享的已编译模块
|
||||
instance, err := f.runtime.InstantiateModule(instanceCtx, f.compiledModule, moduleConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to instantiate module: %w", err)
|
||||
}
|
||||
|
||||
// 3. 创建 Guest API
|
||||
guest, err := witgo.NewHost(instance)
|
||||
if err != nil {
|
||||
instance.Close(ctx)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 5. 组装 WasmInstance
|
||||
wasmInstance := &WasmInstance{
|
||||
instance: instance,
|
||||
exports: f.host,
|
||||
guest: guest,
|
||||
}
|
||||
return wasmInstance, nil
|
||||
}
|
||||
|
||||
// MakeObject 创建一个新的 WasmInstance 并将其放入池中
|
||||
func (f *driverPoolFactory) MakeObject(ctx context.Context) (*pool.PooledObject, error) {
|
||||
wasmInstance, err := f.makeObject(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 设置Host端句柄,用于配置获取等host端方法
|
||||
if err := wasmInstance.guest.Call(ctx, PluginPrefix+"set-handle", nil, uint32(f.driver.ID)); err != nil {
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
wasmInstance.Close()
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
// 调用实例的初始化方法
|
||||
ctxHandle := f.host.ContextManager().Add(ctx)
|
||||
defer f.host.ContextManager().Remove(ctxHandle)
|
||||
|
||||
var result witgo.Result[witgo.Unit, plugin_warp.ErrCode]
|
||||
if err := wasmInstance.guest.Call(ctx, PluginPrefix+"init", &result, ctxHandle); err != nil {
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
wasmInstance.Close()
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
if result.Err != nil {
|
||||
wasmInstance.Close()
|
||||
return nil, result.Err.ToError()
|
||||
}
|
||||
|
||||
return pool.NewPooledObject(wasmInstance), nil
|
||||
}
|
||||
|
||||
// DestroyObject 销毁池中的 WasmInstance
|
||||
func (f *driverPoolFactory) DestroyObject(ctx context.Context, object *pool.PooledObject) error {
|
||||
instance := object.Object.(*WasmInstance)
|
||||
log.Debugf("Destroying pooled WASM instance for plugin: %s", f.driver.Storage.MountPath)
|
||||
|
||||
var err error
|
||||
// 4. 调用实例的销毁化方法
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
var result witgo.Result[witgo.Unit, plugin_warp.ErrCode]
|
||||
if err = instance.guest.Call(ctx, PluginPrefix+"drop", &result, ctxHandle); err != nil {
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
err = errors.New("Internal error in plugin")
|
||||
} else if result.Err != nil {
|
||||
err = result.Err.ToError()
|
||||
}
|
||||
|
||||
return stderrors.Join(err, instance.Close())
|
||||
}
|
||||
|
||||
// ValidateObject 验证实例是否仍然有效
|
||||
func (f *driverPoolFactory) ValidateObject(ctx context.Context, object *pool.PooledObject) bool {
|
||||
instance := object.Object.(*WasmInstance)
|
||||
return instance.instance != nil && !instance.instance.IsClosed()
|
||||
}
|
||||
|
||||
// ActivateObject 在借用时调用
|
||||
func (f *driverPoolFactory) ActivateObject(ctx context.Context, object *pool.PooledObject) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PassivateObject 在归还时调用
|
||||
func (f *driverPoolFactory) PassivateObject(ctx context.Context, object *pool.PooledObject) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WasmDriver 是*驱动*实例 (每个挂载点一个)
|
||||
// 它管理池和*状态*
|
||||
type WasmDriver struct {
|
||||
model.Storage
|
||||
flag uint32
|
||||
|
||||
plugin *DriverPlugin
|
||||
|
||||
host *DriverHost
|
||||
pool *pool.ObjectPool
|
||||
|
||||
config plugin_warp.DriverProps
|
||||
additional plugin_warp.Additional
|
||||
}
|
||||
|
||||
// NewDriverPlugin
|
||||
// 创建插件管理器
|
||||
func NewDriverPlugin(ctx context.Context, plugin *PluginInfo) (*DriverPlugin, error) {
|
||||
wasmBytes, err := os.ReadFile(plugin.WasmPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read wasm file '%s': %w", plugin.WasmPath, err)
|
||||
}
|
||||
|
||||
// 1. 创建共享的 wazero 运行时
|
||||
rt := wazero.NewRuntime(ctx)
|
||||
|
||||
// 2. 注册 wasip1/wasip2 资源
|
||||
wasi_snapshot_preview1.MustInstantiate(ctx, rt)
|
||||
host := NewDriverHost()
|
||||
if err := host.Instantiate(ctx, rt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 3. 编译共享的模块
|
||||
compiledModule, err := rt.CompileModule(ctx, wasmBytes)
|
||||
if err != nil {
|
||||
rt.Close(ctx)
|
||||
return nil, fmt.Errorf("failed to compile wasm module for plugin '%s': %w", plugin.ID, err)
|
||||
}
|
||||
|
||||
// 4. 创建 DriverPlugin 实例(管理器)
|
||||
driverPlugin := &DriverPlugin{
|
||||
plugin: plugin,
|
||||
runtime: rt,
|
||||
compiledModule: compiledModule,
|
||||
host: host,
|
||||
}
|
||||
return driverPlugin, nil
|
||||
}
|
||||
|
||||
// Close 关闭共享的 wazero 运行时
|
||||
func (dp *DriverPlugin) Close(ctx context.Context) error {
|
||||
log.Infof("Closing plugin runtime for: %s", dp.plugin.ID)
|
||||
if dp.runtime != nil {
|
||||
return dp.runtime.Close(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewWasmDriver
|
||||
// 创建*驱动实例* (每个挂载一个)
|
||||
func (dp *DriverPlugin) NewWasmDriver() (driver.Driver, error) {
|
||||
ctx := context.Background() // Factory/Pool context
|
||||
|
||||
// 1. 创建 WasmDriver 实例 (状态持有者)
|
||||
driver := &WasmDriver{
|
||||
plugin: dp, // 指向共享资源的管理器
|
||||
host: dp.host,
|
||||
}
|
||||
|
||||
type WasmDirverWarp struct {
|
||||
*WasmDriver
|
||||
}
|
||||
driverWarp := &WasmDirverWarp{driver}
|
||||
runtime.SetFinalizer(driverWarp, func(driver *WasmDirverWarp) {
|
||||
dp.host.driver.Remove(uint32(driver.ID))
|
||||
})
|
||||
|
||||
// 3. 创建池工厂
|
||||
factory := &driverPoolFactory{
|
||||
ctx: ctx,
|
||||
driver: driver,
|
||||
compiledModule: dp.compiledModule,
|
||||
runtime: dp.runtime,
|
||||
host: dp.host,
|
||||
}
|
||||
|
||||
// 4. 配置并创建池
|
||||
poolConfig := pool.NewDefaultPoolConfig()
|
||||
poolConfig.MaxIdle = 2
|
||||
poolConfig.MaxTotal = 8
|
||||
poolConfig.TestOnBorrow = true
|
||||
poolConfig.BlockWhenExhausted = true
|
||||
driver.pool = pool.NewObjectPool(ctx, factory, poolConfig)
|
||||
|
||||
// 5. 首次获取插件信息
|
||||
initConfig := func() error {
|
||||
instance, err := factory.makeObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer instance.Close()
|
||||
|
||||
props, err := instance.GetProperties(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to refresh properties: %w", err)
|
||||
}
|
||||
driver.config = props
|
||||
|
||||
forms, err := instance.GetFormMeta(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to refresh forms: %w", err)
|
||||
}
|
||||
driver.additional.Forms = forms
|
||||
return nil
|
||||
}
|
||||
if err := initConfig(); err != nil {
|
||||
driver.Close(ctx) // 构造失败,关闭池
|
||||
return nil, err
|
||||
}
|
||||
return driverWarp, nil
|
||||
}
|
||||
|
||||
// Close (在 WasmDriver 上) 关闭此*实例*的池
|
||||
func (d *WasmDriver) Close(ctx context.Context) error {
|
||||
log.Infof("Closing pool for driver: %s", d.MountPath)
|
||||
if d.pool != nil {
|
||||
d.pool.Close(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleError 处理 wasm 驱动返回的错误
|
||||
func (d *WasmDriver) handleError(errcode *plugin_warp.ErrCode) error {
|
||||
if errcode != nil {
|
||||
err := errcode.ToError()
|
||||
if errcode.Unauthorized != nil && d.Status == op.WORK {
|
||||
if atomic.CompareAndSwapUint32(&d.flag, 0, 1) {
|
||||
d.Status = err.Error()
|
||||
op.MustSaveDriverStorage(d)
|
||||
atomic.StoreUint32(&d.flag, 0)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// // 内部函数,用于动态调用 Guest 以获取属性
|
||||
// func (d *WasmDriver) getProperties(ctx context.Context) (plugin_warp.DriverProps, error) {
|
||||
// obj, err := d.pool.BorrowObject(ctx)
|
||||
// if err != nil {
|
||||
// return plugin_warp.DriverProps{}, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
// }
|
||||
// instance := obj.(*WasmInstance)
|
||||
// defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
// return instance.GetProperties(ctx)
|
||||
// }
|
||||
|
||||
// // 内部函数,用于动态调用 Guest 以获取表单
|
||||
// func (d *WasmDriver) getFormMeta(ctx context.Context) ([]plugin_warp.FormField, error) {
|
||||
// obj, err := d.pool.BorrowObject(ctx)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
// }
|
||||
// instance := obj.(*WasmInstance)
|
||||
// defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
// return instance.GetFormMeta(ctx)
|
||||
// }
|
||||
|
||||
// Config 返回缓存的配置
|
||||
func (d *WasmDriver) Config() driver.Config {
|
||||
// props, err := d.getProperties(context.Background())
|
||||
// if err != nil {
|
||||
// log.Errorf("failed to get properties: %s", err)
|
||||
// return d.config.ToConfig()
|
||||
// }
|
||||
|
||||
// d.config = props
|
||||
return d.config.ToConfig()
|
||||
}
|
||||
|
||||
func (d *WasmDriver) GetAddition() driver.Additional {
|
||||
// newFormMeta, err := d.getFormMeta(context.Background())
|
||||
// if err != nil {
|
||||
// log.Errorf("failed to get form meta: %s", err)
|
||||
// return &d.additional
|
||||
// }
|
||||
// d.additional.Forms = newFormMeta
|
||||
return &d.additional
|
||||
}
|
||||
|
||||
// Init 初始化驱动
|
||||
func (d *WasmDriver) Init(ctx context.Context) error {
|
||||
log.Debugf("Re-initializing pool for plugin %s by clearing idle.", d.MountPath)
|
||||
d.pool.Clear(ctx)
|
||||
|
||||
// 注册
|
||||
d.host.driver.Set(uint32(d.ID), d)
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pre-warm pool after re-init: %w", err)
|
||||
}
|
||||
d.pool.ReturnObject(ctx, obj)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Drop 销毁驱动 (由 Guest 调用)
|
||||
func (d *WasmDriver) Drop(ctx context.Context) error {
|
||||
log.Infof("Guest triggered Drop, closing pool for driver: %s", d.MountPath)
|
||||
return d.Close(ctx)
|
||||
}
|
||||
|
||||
func (d *WasmDriver) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.ListFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
var result witgo.Result[plugin_warp.Object, plugin_warp.ErrCode]
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"get-root", &result, ctxHandle)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok, nil
|
||||
}
|
||||
|
||||
// GetFile 获取文件信息
|
||||
func (d *WasmDriver) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.GetFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
var result witgo.Result[plugin_warp.Object, plugin_warp.ErrCode]
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"get-file", &result, ctxHandle, path)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok, nil
|
||||
}
|
||||
|
||||
// List 列出文件
|
||||
func (d *WasmDriver) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if !d.config.Capabilitys.ListFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
robj := dir.(*plugin_warp.Object)
|
||||
var result witgo.Result[[]plugin_warp.Object, plugin_warp.ErrCode]
|
||||
|
||||
param := struct {
|
||||
Handle plugin_warp.Context
|
||||
Obj *plugin_warp.Object
|
||||
}{ctxHandle, robj}
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"list-files", &result, param)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
return utils.MustSliceConvert(*result.Ok, func(o plugin_warp.Object) model.Obj { return &o }), nil
|
||||
}
|
||||
|
||||
// Link 获取文件直链或读取流
|
||||
func (d *WasmDriver) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if !d.config.Capabilitys.LinkFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
// 这部分资源全由Host端管理
|
||||
// TODO: 或许应该把创建的Stream生命周期一同绑定到此处结束,防止忘记关闭导致的资源泄漏
|
||||
|
||||
pobj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := pobj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, pobj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
headersHandle := instance.exports.HTTPManager().Fields.Add(args.Header)
|
||||
defer instance.exports.HTTPManager().Fields.Remove(headersHandle)
|
||||
|
||||
obj := file.(*plugin_warp.Object)
|
||||
|
||||
var result witgo.Result[plugin_warp.LinkResult, plugin_warp.ErrCode]
|
||||
|
||||
param := struct {
|
||||
Handle plugin_warp.Context
|
||||
Obj *plugin_warp.Object
|
||||
LinkArgs plugin_warp.LinkArgs
|
||||
}{ctxHandle, obj, plugin_warp.LinkArgs{IP: args.IP, Header: headersHandle}}
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"link-file", &result, param)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
// 覆盖旧的Obj
|
||||
if result.Ok.File.IsSome() {
|
||||
*obj = *result.Ok.File.Some
|
||||
}
|
||||
|
||||
if result.Ok.Resource.Direct != nil {
|
||||
direct := result.Ok.Resource.Direct
|
||||
header, _ := instance.exports.HTTPManager().Fields.Pop(direct.Header)
|
||||
link := &model.Link{URL: direct.Url, Header: http.Header(header)}
|
||||
if direct.Expiratcion.IsSome() {
|
||||
exp := direct.Expiratcion.Some.ToDuration()
|
||||
link.Expiration = &exp
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
|
||||
if result.Ok.Resource.RangeStream != nil {
|
||||
fileSize := obj.GetSize()
|
||||
return &model.Link{
|
||||
RangeReader: stream.RateLimitRangeReaderFunc(func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
var size uint64
|
||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > fileSize {
|
||||
size = uint64(fileSize - httpRange.Start)
|
||||
} else {
|
||||
size = uint64(httpRange.Length)
|
||||
}
|
||||
|
||||
pobj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
instance := pobj.(*WasmInstance)
|
||||
|
||||
r, w := io.Pipe()
|
||||
cw := &checkWriter{W: w, N: size}
|
||||
streamHandle := instance.exports.StreamManager().Add(&manager_io.Stream{
|
||||
Writer: cw,
|
||||
CheckWriter: cw,
|
||||
})
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
|
||||
type RangeSpec struct {
|
||||
Offset uint64
|
||||
Size uint64
|
||||
Stream io_v_0_2.OutputStream
|
||||
}
|
||||
|
||||
var result witgo.Result[witgo.Unit, plugin_warp.ErrCode]
|
||||
param := struct {
|
||||
Handle plugin_warp.Context
|
||||
Obj *plugin_warp.Object
|
||||
LinkArgs plugin_warp.LinkArgs
|
||||
RangeSpec RangeSpec
|
||||
}{ctxHandle, obj, plugin_warp.LinkArgs{IP: args.IP, Header: headersHandle}, RangeSpec{Offset: uint64(httpRange.Start), Size: size, Stream: streamHandle}}
|
||||
|
||||
go func() {
|
||||
defer d.pool.ReturnObject(ctx, instance)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
if err := instance.guest.Call(ctx, PluginPrefix+"link-range", &result, param); err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
w.CloseWithError(errs.NotImplement)
|
||||
return
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
w.CloseWithError(d.handleError(result.Err))
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
return utils.NewReadCloser(r, func() error {
|
||||
instance.exports.StreamManager().Remove(streamHandle)
|
||||
return r.Close()
|
||||
}), nil
|
||||
}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
type checkWriter struct {
|
||||
W io.Writer
|
||||
N uint64
|
||||
}
|
||||
|
||||
func (c *checkWriter) Write(p []byte) (n int, err error) {
|
||||
if c.N <= 0 {
|
||||
return 0, stderrors.New("write limit exceeded")
|
||||
}
|
||||
n, err = c.W.Write(p[:min(uint64(len(p)), c.N)])
|
||||
c.N -= uint64(n)
|
||||
return
|
||||
}
|
||||
func (c *checkWriter) CheckWrite() uint64 {
|
||||
return max(c.N, 1)
|
||||
}
|
||||
|
||||
func (d *WasmDriver) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.MkdirFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
robj := parentDir.(*plugin_warp.Object)
|
||||
var result witgo.Result[witgo.Option[plugin_warp.Object], plugin_warp.ErrCode]
|
||||
|
||||
if err := instance.guest.Call(ctx, PluginPrefix+"make-dir", &result, ctxHandle, robj, dirName); err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok.Some, nil
|
||||
}
|
||||
|
||||
func (d *WasmDriver) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.RenameFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
robj := srcObj.(*plugin_warp.Object)
|
||||
var result witgo.Result[witgo.Option[plugin_warp.Object], plugin_warp.ErrCode]
|
||||
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"rename-file", &result, ctxHandle, robj, newName)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok.Some, nil
|
||||
}
|
||||
|
||||
func (d *WasmDriver) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.MoveFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
srcobj := srcObj.(*plugin_warp.Object)
|
||||
dstobj := dstDir.(*plugin_warp.Object)
|
||||
|
||||
var result witgo.Result[witgo.Option[plugin_warp.Object], plugin_warp.ErrCode]
|
||||
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"move-file", &result, ctxHandle, srcobj, dstobj)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok.Some, nil
|
||||
}
|
||||
|
||||
func (d *WasmDriver) Remove(ctx context.Context, srcObj model.Obj) error {
|
||||
if !d.config.Capabilitys.RemoveFile {
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
srcobj := srcObj.(*plugin_warp.Object)
|
||||
|
||||
var result witgo.Result[witgo.Unit, plugin_warp.ErrCode]
|
||||
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"remove-file", &result, ctxHandle, srcobj)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *WasmDriver) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.CopyFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
srcobj := srcObj.(*plugin_warp.Object)
|
||||
dstobj := dstDir.(*plugin_warp.Object)
|
||||
|
||||
var result witgo.Result[witgo.Option[plugin_warp.Object], plugin_warp.ErrCode]
|
||||
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"copy-file", &result, ctxHandle, srcobj, dstobj)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok.Some, nil
|
||||
}
|
||||
|
||||
func (d *WasmDriver) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.UploadFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
stream := instance.exports.uploads.Add(&plugin_warp.UploadReadableType{FileStreamer: file, UpdateProgress: up})
|
||||
defer instance.exports.uploads.Remove(stream)
|
||||
|
||||
dstobj := dstDir.(*plugin_warp.Object)
|
||||
|
||||
var result witgo.Result[witgo.Option[plugin_warp.Object], plugin_warp.ErrCode]
|
||||
|
||||
exist := witgo.None[plugin_warp.Object]()
|
||||
if file.GetExist() != nil {
|
||||
exist = witgo.Some(plugin_warp.ConvertObjToObject(file.GetExist()))
|
||||
}
|
||||
|
||||
uploadReq := &plugin_warp.UploadRequest{
|
||||
Target: plugin_warp.ConvertObjToObject(file),
|
||||
Content: stream,
|
||||
Exist: exist,
|
||||
}
|
||||
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"upload-file", &result, ctxHandle, dstobj, uploadReq)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok.Some, nil
|
||||
}
|
||||
|
||||
var _ driver.Meta = (*WasmDriver)(nil)
|
||||
var _ driver.Reader = (*WasmDriver)(nil)
|
||||
var _ driver.Getter = (*WasmDriver)(nil)
|
||||
var _ driver.GetRooter = (*WasmDriver)(nil)
|
||||
var _ driver.MkdirResult = (*WasmDriver)(nil)
|
||||
var _ driver.RenameResult = (*WasmDriver)(nil)
|
||||
var _ driver.MoveResult = (*WasmDriver)(nil)
|
||||
var _ driver.Remove = (*WasmDriver)(nil)
|
||||
var _ driver.CopyResult = (*WasmDriver)(nil)
|
||||
var _ driver.PutResult = (*WasmDriver)(nil)
|
||||
284
internal/plugin/host.go
Normal file
284
internal/plugin/host.go
Normal file
@@ -0,0 +1,284 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"maps"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tetratelabs/wazero"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
plugin_warp "github.com/OpenListTeam/OpenList/v4/internal/plugin/warp"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
|
||||
manager_io "github.com/OpenListTeam/wazero-wasip2/manager/io"
|
||||
"github.com/OpenListTeam/wazero-wasip2/wasip2"
|
||||
wasi_clocks "github.com/OpenListTeam/wazero-wasip2/wasip2/clocks"
|
||||
wasi_filesystem "github.com/OpenListTeam/wazero-wasip2/wasip2/filesystem"
|
||||
wasi_http "github.com/OpenListTeam/wazero-wasip2/wasip2/http"
|
||||
wasi_io "github.com/OpenListTeam/wazero-wasip2/wasip2/io"
|
||||
io_v0_2 "github.com/OpenListTeam/wazero-wasip2/wasip2/io/v0_2"
|
||||
wasi_random "github.com/OpenListTeam/wazero-wasip2/wasip2/random"
|
||||
wasi_sockets "github.com/OpenListTeam/wazero-wasip2/wasip2/sockets"
|
||||
witgo "github.com/OpenListTeam/wazero-wasip2/wit-go"
|
||||
)
|
||||
|
||||
type DriverHost struct {
|
||||
*wasip2.Host
|
||||
contexts *plugin_warp.ContextManaget
|
||||
uploads *plugin_warp.UploadReadableManager
|
||||
|
||||
driver *witgo.ResourceManager[*WasmDriver]
|
||||
}
|
||||
|
||||
func NewDriverHost() *DriverHost {
|
||||
waspi2_host := wasip2.NewHost(
|
||||
wasi_io.Module("0.2.2"),
|
||||
wasi_filesystem.Module("0.2.2"),
|
||||
wasi_random.Module("0.2.2"),
|
||||
wasi_clocks.Module("0.2.2"),
|
||||
wasi_sockets.Module("0.2.0"),
|
||||
wasi_http.Module("0.2.0"),
|
||||
)
|
||||
return &DriverHost{
|
||||
Host: waspi2_host,
|
||||
contexts: plugin_warp.NewContextManager(),
|
||||
uploads: plugin_warp.NewUploadManager(),
|
||||
driver: witgo.NewResourceManager[*WasmDriver](nil),
|
||||
}
|
||||
}
|
||||
|
||||
func (host *DriverHost) Instantiate(ctx context.Context, rt wazero.Runtime) error {
|
||||
if err := host.Host.Instantiate(ctx, rt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
module := rt.NewHostModuleBuilder("openlist:plugin-driver/host@0.1.0")
|
||||
exports := witgo.NewExporter(module)
|
||||
|
||||
exports.Export("log", host.Log)
|
||||
exports.Export("load-config", host.LoadConfig)
|
||||
exports.Export("save-config", host.SaveConfig)
|
||||
if _, err := exports.Instantiate(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
moduleType := rt.NewHostModuleBuilder("openlist:plugin-driver/types@0.1.0")
|
||||
exportsType := witgo.NewExporter(moduleType)
|
||||
exportsType.Export("[resource-drop]cancellable", host.DropContext)
|
||||
exportsType.Export("[method]cancellable.subscribe", host.Subscribe)
|
||||
|
||||
exportsType.Export("[resource-drop]readable", host.DropReadable)
|
||||
exportsType.Export("[method]readable.streams", host.Stream)
|
||||
exportsType.Export("[method]readable.peek", host.StreamPeek)
|
||||
exportsType.Export("[method]readable.chunks", host.Chunks)
|
||||
exportsType.Export("[method]readable.next-chunk", host.NextChunk)
|
||||
exportsType.Export("[method]readable.chunk-reset", host.ChunkReset)
|
||||
exportsType.Export("[method]readable.get-hasher", host.GetHasher)
|
||||
exportsType.Export("[method]readable.update-progress", host.UpdateProgress)
|
||||
if _, err := exportsType.Instantiate(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (host *DriverHost) ContextManager() *plugin_warp.ContextManaget {
|
||||
return host.contexts
|
||||
}
|
||||
|
||||
func (host *DriverHost) UploadManager() *plugin_warp.UploadReadableManager {
|
||||
return host.uploads
|
||||
}
|
||||
|
||||
func (host *DriverHost) DropReadable(this plugin_warp.UploadReadable) {
|
||||
host.uploads.Remove(this)
|
||||
}
|
||||
|
||||
func (host *DriverHost) DropContext(this plugin_warp.Context) {
|
||||
host.contexts.Remove(this)
|
||||
}
|
||||
|
||||
// log: func(level: log-level, message: string);
|
||||
func (host *DriverHost) Log(level plugin_warp.LogLevel, message string) {
|
||||
if level.Debug != nil {
|
||||
log.Debugln(message)
|
||||
} else if level.Error != nil {
|
||||
log.Errorln(message)
|
||||
} else if level.Info != nil {
|
||||
log.Infoln(message)
|
||||
} else if level.Warn != nil {
|
||||
log.Warnln(message)
|
||||
} else {
|
||||
log.Traceln(message)
|
||||
}
|
||||
}
|
||||
|
||||
// load-config: func(driver: u32) -> result<list<u8>, string>;
|
||||
func (host *DriverHost) LoadConfig(driverHandle uint32) witgo.Result[[]byte, string] {
|
||||
driver, ok := host.driver.Get(driverHandle)
|
||||
if !ok || driver == nil {
|
||||
return witgo.Err[[]byte]("host.driver is null, loading timing too early")
|
||||
}
|
||||
return witgo.Ok[[]byte, string](driver.additional.Bytes())
|
||||
}
|
||||
|
||||
// save-config: func(driver: u32, config: list<u8>) -> result<_, string>;
|
||||
func (host *DriverHost) SaveConfig(driverHandle uint32, config []byte) witgo.Result[witgo.Unit, string] {
|
||||
driver, ok := host.driver.Get(driverHandle)
|
||||
if !ok || driver == nil {
|
||||
return witgo.Err[witgo.Unit]("host.driver is null, loading timing too early")
|
||||
}
|
||||
|
||||
driver.additional.SetBytes(config)
|
||||
op.MustSaveDriverStorage(driver)
|
||||
return witgo.Ok[witgo.Unit, string](witgo.Unit{})
|
||||
}
|
||||
|
||||
// streams: func() -> result<input-stream, string>;
|
||||
func (host *DriverHost) Stream(this plugin_warp.UploadReadable) witgo.Result[io_v0_2.InputStream, string] {
|
||||
upload, ok := host.uploads.Get(this)
|
||||
if !ok {
|
||||
return witgo.Err[io_v0_2.InputStream]("UploadReadable::Stream: ErrorCodeBadDescriptor")
|
||||
}
|
||||
if upload.StreamConsume {
|
||||
return witgo.Err[io_v0_2.InputStream]("UploadReadable::Stream: StreamConsume")
|
||||
}
|
||||
|
||||
upload.StreamConsume = true
|
||||
streamHandle := host.StreamManager().Add(&manager_io.Stream{Reader: upload, Seeker: upload.GetFile()})
|
||||
return witgo.Ok[io_v0_2.InputStream, string](streamHandle)
|
||||
}
|
||||
|
||||
// peek: func(offset: u64, len: u64) -> result<input-stream, string>;
|
||||
func (host *DriverHost) StreamPeek(this plugin_warp.UploadReadable, offset uint64, len uint64) witgo.Result[io_v0_2.InputStream, string] {
|
||||
upload, ok := host.uploads.Get(this)
|
||||
if !ok {
|
||||
return witgo.Err[io_v0_2.InputStream]("UploadReadable::StreamPeek: ErrorCodeBadDescriptor")
|
||||
}
|
||||
if upload.StreamConsume {
|
||||
return witgo.Err[io_v0_2.InputStream]("UploadReadable::StreamPeek: StreamConsume")
|
||||
}
|
||||
|
||||
peekReader, err := upload.RangeRead(http_range.Range{Start: int64(offset), Length: int64(len)})
|
||||
if err != nil {
|
||||
return witgo.Err[io_v0_2.InputStream](err.Error())
|
||||
}
|
||||
seeker, _ := peekReader.(io.Seeker)
|
||||
streamHandle := host.StreamManager().Add(&manager_io.Stream{Reader: peekReader, Seeker: seeker})
|
||||
return witgo.Ok[io_v0_2.InputStream, string](streamHandle)
|
||||
}
|
||||
|
||||
// chunks: func(len: u32) -> result<u32, string>;
|
||||
func (host *DriverHost) Chunks(this plugin_warp.UploadReadable, len uint32) witgo.Result[uint32, string] {
|
||||
upload, ok := host.uploads.Get(this)
|
||||
if !ok {
|
||||
return witgo.Err[uint32]("UploadReadable::Chunks: ErrorCodeBadDescriptor")
|
||||
}
|
||||
if upload.StreamConsume {
|
||||
return witgo.Err[uint32]("UploadReadable::Chunks: StreamConsume")
|
||||
}
|
||||
if upload.SectionReader != nil {
|
||||
return witgo.Err[uint32]("UploadReadable::Chunks: Already exist chunk reader")
|
||||
}
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(upload, int(len), &upload.UpdateProgress)
|
||||
if err != nil {
|
||||
return witgo.Err[uint32](err.Error())
|
||||
}
|
||||
chunkSize := int64(len)
|
||||
upload.SectionReader = &plugin_warp.StreamSectionReader{StreamSectionReaderIF: ss, CunketSize: chunkSize}
|
||||
return witgo.Ok[uint32, string](uint32((upload.GetSize() + chunkSize - 1) / chunkSize))
|
||||
}
|
||||
|
||||
// next-chunk: func() -> result<input-stream, string>;
|
||||
func (host *DriverHost) NextChunk(this plugin_warp.UploadReadable) witgo.Result[io_v0_2.InputStream, string] {
|
||||
upload, ok := host.uploads.Get(this)
|
||||
if !ok {
|
||||
return witgo.Err[io_v0_2.InputStream]("UploadReadable::NextChunk: ErrorCodeBadDescriptor")
|
||||
}
|
||||
if upload.SectionReader == nil {
|
||||
return witgo.Err[io_v0_2.InputStream]("UploadReadable::NextChunk: No chunk reader")
|
||||
}
|
||||
|
||||
chunkSize := min(upload.SectionReader.CunketSize, upload.GetSize()-upload.SectionReader.Offset)
|
||||
sr, err := upload.SectionReader.GetSectionReader(upload.SectionReader.Offset, chunkSize)
|
||||
if err != nil {
|
||||
return witgo.Err[io_v0_2.InputStream](err.Error())
|
||||
}
|
||||
upload.SectionReader.Offset += chunkSize
|
||||
streamHandle := host.StreamManager().Add(&manager_io.Stream{Reader: sr, Seeker: sr, Closer: utils.CloseFunc(func() error {
|
||||
upload.SectionReader.FreeSectionReader(sr)
|
||||
return nil
|
||||
})})
|
||||
return witgo.Ok[io_v0_2.InputStream, string](streamHandle)
|
||||
}
|
||||
|
||||
// chunk-reset: func(chunk: input-stream) -> result<_, string>;
|
||||
func (host *DriverHost) ChunkReset(this plugin_warp.UploadReadable, chunk io_v0_2.InputStream) witgo.Result[witgo.Unit, string] {
|
||||
stream, ok := host.StreamManager().Get(chunk)
|
||||
if !ok {
|
||||
return witgo.Err[witgo.Unit]("UploadReadable::ChunkReset: ErrorCodeBadDescriptor")
|
||||
}
|
||||
if stream.Seeker == nil {
|
||||
return witgo.Err[witgo.Unit]("UploadReadable::ChunkReset: Not Seeker")
|
||||
}
|
||||
_, err := stream.Seeker.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return witgo.Err[witgo.Unit](err.Error())
|
||||
}
|
||||
return witgo.Ok[witgo.Unit, string](witgo.Unit{})
|
||||
}
|
||||
|
||||
// get-hasher: func(hashs: list<hash-alg>) -> result<list<hash-info>, string>;
|
||||
func (host *DriverHost) GetHasher(this plugin_warp.UploadReadable, hashs []plugin_warp.HashAlg) witgo.Result[[]plugin_warp.HashInfo, string] {
|
||||
upload, ok := host.uploads.Get(this)
|
||||
if !ok {
|
||||
return witgo.Err[[]plugin_warp.HashInfo]("UploadReadable: ErrorCodeBadDescriptor")
|
||||
}
|
||||
|
||||
resultHashs := plugin_warp.HashInfoConvert2(upload.GetHash(), hashs)
|
||||
if resultHashs != nil {
|
||||
return witgo.Ok[[]plugin_warp.HashInfo, string](resultHashs)
|
||||
}
|
||||
|
||||
if upload.StreamConsume {
|
||||
return witgo.Err[[]plugin_warp.HashInfo]("UploadReadable: StreamConsume")
|
||||
}
|
||||
|
||||
// 无法从obj中获取需要的hash,或者获取的hash不完整。
|
||||
// 需要缓存整个文件并进行hash计算
|
||||
hashTypes := plugin_warp.HashAlgConverts(hashs)
|
||||
|
||||
hashers := utils.NewMultiHasher(hashTypes)
|
||||
if _, err := upload.CacheFullAndWriter(&upload.UpdateProgress, hashers); err != nil {
|
||||
return witgo.Err[[]plugin_warp.HashInfo](err.Error())
|
||||
}
|
||||
|
||||
maps.Copy(upload.GetHash().Export(), hashers.GetHashInfo().Export())
|
||||
|
||||
return witgo.Ok[[]plugin_warp.HashInfo, string](plugin_warp.HashInfoConvert(*hashers.GetHashInfo()))
|
||||
}
|
||||
|
||||
// update-progress: func(progress: f64);
|
||||
func (host *DriverHost) UpdateProgress(this plugin_warp.UploadReadable, progress float64) {
|
||||
upload, ok := host.uploads.Get(this)
|
||||
if ok {
|
||||
upload.UpdateProgress(progress)
|
||||
}
|
||||
}
|
||||
|
||||
// resource cancellable { subscribe: func() -> pollable; }
|
||||
func (host *DriverHost) Subscribe(this plugin_warp.Context) io_v0_2.Pollable {
|
||||
poll := host.Host.PollManager()
|
||||
|
||||
ctx, ok := host.contexts.Get(this)
|
||||
if !ok {
|
||||
return poll.Add(manager_io.ReadyPollable)
|
||||
}
|
||||
|
||||
return poll.Add(&plugin_warp.ContextPollable{Context: ctx})
|
||||
}
|
||||
650
internal/plugin/manager.go
Normal file
650
internal/plugin/manager.go
Normal file
@@ -0,0 +1,650 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
)
|
||||
|
||||
var (
|
||||
PluginManager *Manager
|
||||
)
|
||||
|
||||
// PluginInfo 只包含从数据库加载的插件元数据。
|
||||
type PluginInfo struct {
|
||||
*model.Plugin
|
||||
handler PluginHandler // 缓存与此插件匹配的处理器
|
||||
driver *DriverPlugin // 缓存已创建的驱动插件实例
|
||||
}
|
||||
|
||||
// PluginHandler 定义了处理特定类型插件的接口
|
||||
type PluginHandler interface {
|
||||
// Prefix 返回此处理器能处理的插件ID前缀
|
||||
Prefix() string
|
||||
// Register 注册一个插件
|
||||
Register(ctx context.Context, plugin *PluginInfo) error
|
||||
// Unregister 注销一个插件
|
||||
Unregister(ctx context.Context, plugin *PluginInfo) error
|
||||
}
|
||||
|
||||
// Manager 负责管理插件的生命周期(安装、卸载、加载元数据)。
|
||||
type Manager struct {
|
||||
sync.RWMutex
|
||||
plugins map[string]*PluginInfo // Key: 插件 ID
|
||||
pluginDir string
|
||||
httpClient *http.Client
|
||||
handlers []PluginHandler // 插件处理器列表
|
||||
}
|
||||
|
||||
// NewManager 创建一个新的、轻量级的插件管理器。
|
||||
func NewManager(ctx context.Context, dataDir string) (*Manager, error) {
|
||||
pluginDir := filepath.Join(dataDir, "plugins")
|
||||
if err := os.MkdirAll(pluginDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create plugin directory: %w", err)
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
plugins: make(map[string]*PluginInfo),
|
||||
pluginDir: pluginDir,
|
||||
httpClient: &http.Client{},
|
||||
// 在这里注册所有支持的插件处理器
|
||||
handlers: []PluginHandler{
|
||||
&DriverPluginHandler{}, // 注册驱动插件处理器
|
||||
// 未来可以添加 newThemePluginHandler(), newOtherPluginHandler() 等
|
||||
},
|
||||
}
|
||||
|
||||
if err := m.loadPluginsFromDB(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to load plugins from database: %w", err)
|
||||
}
|
||||
|
||||
// 在 NewManager 中直接调用 RegisterAll,确保启动时所有插件都被注册
|
||||
m.RegisterAll(ctx)
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// loadPluginsFromDB 在启动时仅从数据库加载插件元数据。
|
||||
func (m *Manager) loadPluginsFromDB(ctx context.Context) error {
|
||||
storedPlugins, err := db.GetAllPlugins(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("Found %d installed plugins in the database.", len(storedPlugins))
|
||||
for _, p := range storedPlugins {
|
||||
if _, err := os.Stat(p.WasmPath); os.IsNotExist(err) {
|
||||
log.Warnf("Plugin '%s' found in database but its wasm file is missing at %s. Skipping.", p.ID, p.WasmPath)
|
||||
continue
|
||||
}
|
||||
pluginInfo := &PluginInfo{Plugin: p}
|
||||
// 为插件找到匹配的处理器
|
||||
for _, h := range m.handlers {
|
||||
if strings.HasPrefix(p.ID, h.Prefix()) {
|
||||
pluginInfo.handler = h
|
||||
break
|
||||
}
|
||||
}
|
||||
if pluginInfo.handler == nil {
|
||||
log.Warnf("No handler found for plugin type with ID '%s'. Skipping registration.", p.ID)
|
||||
}
|
||||
m.plugins[p.ID] = pluginInfo
|
||||
log.Infof("Loaded plugin metadata: %s (v%s)", p.Name, p.Version)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterAll 遍历所有已加载的插件,并使用对应的处理器进行注册。
|
||||
func (m *Manager) RegisterAll(ctx context.Context) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
log.Infof("Registering all loaded plugins...")
|
||||
for id, pluginInfo := range m.plugins {
|
||||
if pluginInfo.handler != nil {
|
||||
if err := pluginInfo.handler.Register(ctx, pluginInfo); err != nil {
|
||||
// 注册失败,更新数据库状态
|
||||
log.Errorf("Failed to register plugin '%s': %v", id, err)
|
||||
pluginInfo.Status = model.StatusError
|
||||
pluginInfo.Message = err.Error()
|
||||
// 更新数据库
|
||||
if err := db.UpdatePluginStatus(ctx, id, model.StatusError, err.Error()); err != nil {
|
||||
log.Errorf("Failed to update status for plugin '%s' in database: %v", id, err)
|
||||
}
|
||||
} else {
|
||||
// 注册成功,更新状态
|
||||
pluginInfo.Status = model.StatusActive
|
||||
pluginInfo.Message = ""
|
||||
if err := db.UpdatePluginStatus(ctx, id, model.StatusActive, ""); err != nil {
|
||||
log.Errorf("Failed to update status for plugin '%s' in database: %v", id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Install 根据源字符串的格式自动选择安装方式。
|
||||
func (m *Manager) Install(ctx context.Context, source string) (*PluginInfo, error) {
|
||||
if strings.HasSuffix(source, ".zip") {
|
||||
log.Infof("Installing plugin from archive URL: %s", source)
|
||||
return m.InstallFromArchiveURL(ctx, source)
|
||||
}
|
||||
if strings.HasPrefix(source, "https://github.com/") {
|
||||
log.Infof("Installing plugin from GitHub repository: %s", source)
|
||||
return m.InstallFromGitHub(ctx, source)
|
||||
}
|
||||
// 默认认为是本地文件系统路径
|
||||
log.Infof("Installing plugin from local path: %s", source)
|
||||
return m.InstallFromLocal(ctx, source, "")
|
||||
}
|
||||
|
||||
// InstallFromLocal 从本地清单和 Wasm 文件安装插件。
|
||||
// manifestPath 是必需的,wasmPath 是可选的(如果为空,则在 manifestPath 相同目录下查找 .wasm 文件)。
|
||||
func (m *Manager) InstallFromLocal(ctx context.Context, manifestPath string, wasmPath string) (*PluginInfo, error) {
|
||||
manifestBytes, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read manifest file '%s': %w", manifestPath, err)
|
||||
}
|
||||
|
||||
if wasmPath == "" {
|
||||
wasmPath = strings.TrimSuffix(manifestPath, filepath.Ext(manifestPath)) + ".wasm"
|
||||
}
|
||||
|
||||
wasmBytes, err := os.ReadFile(wasmPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read wasm file at '%s': %w", wasmPath, err)
|
||||
}
|
||||
|
||||
return m.install(ctx, manifestBytes, wasmBytes, "local:"+manifestPath)
|
||||
}
|
||||
|
||||
// InstallFromUpload 从一个上传的文件流 (io.Reader) 安装插件。
|
||||
func (m *Manager) InstallFromUpload(ctx context.Context, fileReader io.Reader, originalFileName string) (*PluginInfo, error) {
|
||||
// 1. 将上传的文件内容保存到一个临时文件中
|
||||
tmpFile, err := os.CreateTemp("", "plugin-upload-*.zip")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temporary file for upload: %w", err)
|
||||
}
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
_, err = io.Copy(tmpFile, fileReader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save uploaded file to temporary location: %w", err)
|
||||
}
|
||||
// 必须关闭文件,以便 zip.OpenReader 能够读取它
|
||||
tmpFile.Close()
|
||||
|
||||
// 2. 从这个临时的 zip 文件中提取 manifest 和 wasm
|
||||
manifestBytes, wasmBytes, err := extractPluginFromZip(tmpFile.Name())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to extract plugin from uploaded archive: %w", err)
|
||||
}
|
||||
|
||||
// 3. 调用核心安装逻辑,使用 "upload:[filename]" 作为来源标识
|
||||
return m.install(ctx, manifestBytes, wasmBytes, "upload:"+originalFileName)
|
||||
}
|
||||
|
||||
// InstallFromArchiveURL 从一个 zip 压缩包的 URL 安装插件。
|
||||
func (m *Manager) InstallFromArchiveURL(ctx context.Context, url string) (*PluginInfo, error) {
|
||||
tmpFile, err := downloadTempFile(m.httpClient, url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to download archive from %s: %w", url, err)
|
||||
}
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
manifestBytes, wasmBytes, err := extractPluginFromZip(tmpFile.Name())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to extract plugin from archive '%s': %w", url, err)
|
||||
}
|
||||
|
||||
return m.install(ctx, manifestBytes, wasmBytes, url)
|
||||
}
|
||||
|
||||
// InstallFromGitHub 从 GitHub 仓库的最新 release 安装插件。
|
||||
func (m *Manager) InstallFromGitHub(ctx context.Context, repoURL string) (*PluginInfo, error) {
|
||||
repoURL = strings.TrimSuffix(repoURL, ".git")
|
||||
parts := strings.Split(strings.TrimPrefix(repoURL, "https://github.com/"), "/")
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf("invalid github repo URL format: %s", repoURL)
|
||||
}
|
||||
owner, repo := parts[0], parts[1]
|
||||
|
||||
// 1. 获取最新 release 信息
|
||||
apiURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", owner, repo)
|
||||
log.Infof("Fetching latest release from GitHub API: %s", apiURL)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", "application/vnd.github.v3+json")
|
||||
|
||||
resp, err := m.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to call GitHub API: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("GitHub API returned non-200 status: %s", resp.Status)
|
||||
}
|
||||
|
||||
var release struct {
|
||||
Assets []struct {
|
||||
Name string `json:"name"`
|
||||
DownloadURL string `json:"browser_download_url"`
|
||||
} `json:"assets"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&release); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse GitHub API response: %w", err)
|
||||
}
|
||||
|
||||
// 2. 查找包含插件的 zip 资产
|
||||
var assetURL string
|
||||
for _, asset := range release.Assets {
|
||||
// 寻找第一个 .zip 文件作为目标
|
||||
if strings.HasSuffix(asset.Name, ".zip") {
|
||||
assetURL = asset.DownloadURL
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if assetURL == "" {
|
||||
return nil, fmt.Errorf("no .zip asset found in the latest release of %s/%s", owner, repo)
|
||||
}
|
||||
|
||||
log.Infof("Found release asset to download: %s", assetURL)
|
||||
return m.InstallFromArchiveURL(ctx, assetURL)
|
||||
}
|
||||
|
||||
// install 是安装插件的核心逻辑
|
||||
func (m *Manager) install(ctx context.Context, manifestBytes []byte, wasmBytes []byte, sourceURL string) (*PluginInfo, error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
var meta model.Plugin
|
||||
if err := json.Unmarshal(manifestBytes, &meta); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse plugin manifest: %w", err)
|
||||
}
|
||||
if meta.ID == "" || meta.Name == "" || meta.Version == "" {
|
||||
return nil, fmt.Errorf("plugin manifest is missing required fields (id, name, version)")
|
||||
}
|
||||
|
||||
// 1. 查找匹配的处理器并检查插件类型
|
||||
var handler PluginHandler
|
||||
for _, h := range m.handlers {
|
||||
if strings.HasPrefix(meta.ID, h.Prefix()) {
|
||||
handler = h
|
||||
break
|
||||
}
|
||||
}
|
||||
if handler == nil {
|
||||
return nil, fmt.Errorf("unsupported plugin type for ID '%s'", meta.ID)
|
||||
}
|
||||
|
||||
if _, exists := m.plugins[meta.ID]; exists {
|
||||
return nil, fmt.Errorf("plugin with id '%s' already exists", meta.ID)
|
||||
}
|
||||
|
||||
fileName := formatPluginFileName(meta.Author, meta.ID)
|
||||
wasmPath := filepath.Join(m.pluginDir, fileName)
|
||||
if err := os.WriteFile(wasmPath, wasmBytes, 0644); err != nil {
|
||||
return nil, fmt.Errorf("failed to save wasm file: %w", err)
|
||||
}
|
||||
|
||||
pluginModel := &model.Plugin{
|
||||
ID: meta.ID,
|
||||
Name: meta.Name,
|
||||
Version: meta.Version,
|
||||
Author: meta.Author,
|
||||
Description: meta.Description,
|
||||
IconURL: meta.IconURL,
|
||||
SourceURL: sourceURL,
|
||||
WasmPath: wasmPath,
|
||||
}
|
||||
|
||||
// 先存入数据库,初始状态为 'inactive'
|
||||
if err := db.CreatePlugin(ctx, pluginModel); err != nil {
|
||||
os.Remove(wasmPath)
|
||||
return nil, fmt.Errorf("failed to save plugin metadata to database: %w", err)
|
||||
}
|
||||
log.Infof("Plugin '%s' metadata saved to database with status: inactive.", pluginModel.ID)
|
||||
|
||||
pluginInfo := &PluginInfo{Plugin: pluginModel, handler: handler}
|
||||
m.plugins[pluginInfo.ID] = pluginInfo
|
||||
|
||||
// 使用找到的处理器进行注册
|
||||
if err := handler.Register(ctx, pluginInfo); err != nil {
|
||||
// 注册失败,更新数据库状态
|
||||
log.Errorf("Failed to register newly installed plugin '%s': %v", pluginInfo.ID, err)
|
||||
pluginInfo.Status = model.StatusError
|
||||
pluginInfo.Message = err.Error()
|
||||
if dbErr := db.UpdatePluginStatus(ctx, pluginInfo.ID, model.StatusError, err.Error()); dbErr != nil {
|
||||
log.Errorf("Failed to update error status for plugin '%s' in database: %v", pluginInfo.ID, dbErr)
|
||||
}
|
||||
} else {
|
||||
// 注册成功,更新状态
|
||||
pluginInfo.Status = model.StatusActive
|
||||
pluginInfo.Message = ""
|
||||
if dbErr := db.UpdatePluginStatus(ctx, pluginInfo.ID, model.StatusActive, ""); dbErr != nil {
|
||||
log.Errorf("Failed to update active status for plugin '%s' in database: %v", pluginInfo.ID, dbErr)
|
||||
}
|
||||
}
|
||||
|
||||
return pluginInfo, nil
|
||||
}
|
||||
|
||||
// Uninstall 卸载一个插件
|
||||
func (m *Manager) Uninstall(ctx context.Context, pluginID string) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
plugin, ok := m.plugins[pluginID]
|
||||
if !ok {
|
||||
return fmt.Errorf("plugin with ID '%s' not found", pluginID)
|
||||
}
|
||||
|
||||
// 1. 使用对应的处理器进行注销
|
||||
if plugin.handler != nil {
|
||||
if err := plugin.handler.Unregister(ctx, plugin); err != nil {
|
||||
// 即便注销失败,也要继续删除流程
|
||||
log.Warnf("Failed to unregister plugin '%s', but continuing with uninstallation: %v", pluginID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// 2. 关闭插件内部资源 (如果 driver 实例存在)
|
||||
if plugin.driver != nil {
|
||||
if err := plugin.driver.Close(ctx); err != nil {
|
||||
log.Warnf("Error closing driver resources for plugin %s: %v", pluginID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// 3. 从数据库删除
|
||||
if err := db.DeletePluginByID(ctx, pluginID); err != nil {
|
||||
return fmt.Errorf("failed to delete plugin '%s' from database: %w", pluginID, err)
|
||||
}
|
||||
|
||||
// 4. 删除文件
|
||||
if err := os.Remove(plugin.WasmPath); err != nil && !os.IsNotExist(err) {
|
||||
log.Warnf("Failed to remove wasm file %s, but database entry was removed: %v", plugin.WasmPath, err)
|
||||
}
|
||||
|
||||
// 5. 从内存中删除
|
||||
delete(m.plugins, pluginID)
|
||||
log.Infof("Plugin '%s' has been successfully uninstalled.", pluginID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckForUpdate 检查单个指定插件的更新。
|
||||
// 如果有可用更新,则返回新版本号;否则返回空字符串。
|
||||
func (m *Manager) CheckForUpdate(ctx context.Context, pluginID string) (string, error) {
|
||||
m.RLock()
|
||||
plugin, ok := m.plugins[pluginID]
|
||||
m.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return "", fmt.Errorf("plugin with ID '%s' not found", pluginID)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(plugin.SourceURL, "https://github.com/") {
|
||||
return "", fmt.Errorf("only plugins installed from GitHub can be checked for updates")
|
||||
}
|
||||
|
||||
latestVersionStr, err := m.getLatestGitHubVersionTag(ctx, plugin.SourceURL)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to check for updates for plugin '%s': %w", pluginID, err)
|
||||
}
|
||||
|
||||
latestVersion, err := semver.NewVersion(latestVersionStr)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid latest version format '%s' for plugin '%s': %w", latestVersionStr, pluginID, err)
|
||||
}
|
||||
|
||||
currentVersion, err := semver.NewVersion(plugin.Version)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid current version format '%s' for plugin '%s': %w", plugin.Version, pluginID, err)
|
||||
}
|
||||
|
||||
if latestVersion.Compare(*currentVersion) > 0 {
|
||||
return latestVersion.String(), nil
|
||||
}
|
||||
|
||||
// 没有可用更新
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// CheckForUpdates 检查所有已安装插件的更新。
|
||||
func (m *Manager) CheckForUpdates(ctx context.Context) (map[string]string, error) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
updatesAvailable := make(map[string]string)
|
||||
|
||||
for id, plugin := range m.plugins {
|
||||
if !strings.HasPrefix(plugin.SourceURL, "https://github.com/") {
|
||||
continue // 只支持检查来自 GitHub 的插件
|
||||
}
|
||||
|
||||
latestVersionStr, err := m.getLatestGitHubVersionTag(ctx, plugin.SourceURL)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to check for updates for plugin '%s': %v", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
latestVersion, err := semver.NewVersion(latestVersionStr)
|
||||
if err != nil {
|
||||
log.Warnf("Invalid latest version format '%s' for plugin '%s': %v", latestVersionStr, id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
currentVersion, err := semver.NewVersion(plugin.Version)
|
||||
if err != nil {
|
||||
log.Warnf("Invalid current version format '%s' for plugin '%s': %v", plugin.Version, id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// 使用 Compare 方法进行比较
|
||||
if latestVersion.Compare(*currentVersion) > 0 {
|
||||
updatesAvailable[id] = latestVersion.String()
|
||||
log.Infof("Update available for plugin '%s': %s -> %s", id, currentVersion.String(), latestVersion.String())
|
||||
}
|
||||
}
|
||||
|
||||
return updatesAvailable, nil
|
||||
}
|
||||
|
||||
// Update 更新指定的插件到最新版本。
|
||||
func (m *Manager) Update(ctx context.Context, pluginID string) (*PluginInfo, error) {
|
||||
m.Lock()
|
||||
plugin, ok := m.plugins[pluginID]
|
||||
m.Unlock() // 提前解锁
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("plugin with ID '%s' not found", pluginID)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(plugin.SourceURL, "https://github.com/") {
|
||||
return nil, fmt.Errorf("only plugins installed from GitHub can be updated automatically")
|
||||
}
|
||||
|
||||
log.Infof("Updating plugin '%s' from %s", pluginID, plugin.SourceURL)
|
||||
|
||||
// 先卸载旧版本
|
||||
if err := m.Uninstall(ctx, pluginID); err != nil {
|
||||
return nil, fmt.Errorf("failed to uninstall old version of plugin '%s' during update: %w", pluginID, err)
|
||||
}
|
||||
|
||||
// 重新从 GitHub 安装
|
||||
return m.Install(ctx, plugin.SourceURL)
|
||||
}
|
||||
|
||||
// getLatestGitHubVersionTag 从 GitHub API 获取最新的 release tag 字符串。
|
||||
func (m *Manager) getLatestGitHubVersionTag(ctx context.Context, repoURL string) (string, error) {
|
||||
// 规范化 URL 并解析 owner/repo
|
||||
repoURL = strings.TrimSuffix(repoURL, ".git")
|
||||
parts := strings.Split(strings.TrimPrefix(repoURL, "https://github.com/"), "/")
|
||||
if len(parts) < 2 {
|
||||
return "", fmt.Errorf("invalid github repo URL format: %s", repoURL)
|
||||
}
|
||||
owner, repo := parts[0], parts[1]
|
||||
|
||||
// 构建 API URL
|
||||
apiURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", owner, repo)
|
||||
|
||||
// 创建带上下文的 HTTP 请求
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create request for GitHub API: %w", err)
|
||||
}
|
||||
// 根据 GitHub API v3 的要求设置 Accept header
|
||||
req.Header.Set("Accept", "application/vnd.github.v3+json")
|
||||
|
||||
// 执行请求
|
||||
resp, err := m.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to call GitHub API at %s: %w", apiURL, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// 检查响应状态码
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
// 读取响应体以获取更详细的错误信息
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return "", fmt.Errorf("GitHub API returned non-200 status: %s, body: %s", resp.Status, string(body))
|
||||
}
|
||||
|
||||
// 定义一个结构体来仅解析我们需要的字段 (tag_name)
|
||||
var release struct {
|
||||
TagName string `json:"tag_name"`
|
||||
}
|
||||
|
||||
// 解析 JSON 响应
|
||||
if err := json.NewDecoder(resp.Body).Decode(&release); err != nil {
|
||||
return "", fmt.Errorf("failed to parse GitHub API response: %w", err)
|
||||
}
|
||||
|
||||
if release.TagName == "" {
|
||||
return "", errors.New("no tag_name found in the latest release")
|
||||
}
|
||||
|
||||
return release.TagName, nil
|
||||
}
|
||||
|
||||
// --- 辅助函数 ---
|
||||
|
||||
// extractPluginFromZip 从 zip 文件中提取 plugin.json 和 .wasm 文件
|
||||
func extractPluginFromZip(zipPath string) ([]byte, []byte, error) {
|
||||
r, err := zip.OpenReader(zipPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
var manifestBytes, wasmBytes []byte
|
||||
|
||||
for _, f := range r.File {
|
||||
// 忽略目录和非插件文件
|
||||
if f.FileInfo().IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
baseName := filepath.Base(f.Name)
|
||||
if baseName == "plugin.json" {
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
manifestBytes, err = io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else if strings.HasSuffix(baseName, ".wasm") {
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
wasmBytes, err = io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if manifestBytes == nil {
|
||||
return nil, nil, errors.New("manifest 'plugin.json' not found in archive")
|
||||
}
|
||||
if wasmBytes == nil {
|
||||
return nil, nil, errors.New("no .wasm file found in archive")
|
||||
}
|
||||
|
||||
return manifestBytes, wasmBytes, nil
|
||||
}
|
||||
|
||||
// downloadTempFile 将文件从 URL 下载到临时目录
|
||||
func downloadTempFile(client *http.Client, url string) (*os.File, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("server returned status: %s", resp.Status)
|
||||
}
|
||||
|
||||
tmpFile, err := os.CreateTemp("", "plugin-download-*.zip")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = io.Copy(tmpFile, resp.Body)
|
||||
if err != nil {
|
||||
tmpFile.Close()
|
||||
os.Remove(tmpFile.Name())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 确保内容写入磁盘
|
||||
if err := tmpFile.Sync(); err != nil {
|
||||
tmpFile.Close()
|
||||
os.Remove(tmpFile.Name())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tmpFile.Close()
|
||||
return tmpFile, nil
|
||||
}
|
||||
|
||||
var nonAlphanumericRegex = regexp.MustCompile(`[^a-zA-Z0-9_.-]+`)
|
||||
|
||||
func sanitize(s string) string {
|
||||
if s == "" {
|
||||
return "unknown"
|
||||
}
|
||||
return nonAlphanumericRegex.ReplaceAllString(s, "_")
|
||||
}
|
||||
|
||||
func formatPluginFileName(author, id string) string {
|
||||
return fmt.Sprintf("%s-%s.wasm", sanitize(author), sanitize(id))
|
||||
}
|
||||
70
internal/plugin/manager_driver.go
Normal file
70
internal/plugin/manager_driver.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// --- 驱动插件处理器 ---
|
||||
|
||||
// DriverPluginHandler 实现了 PluginHandler 接口,专门处理驱动插件
|
||||
type DriverPluginHandler struct{}
|
||||
|
||||
func (h *DriverPluginHandler) Prefix() string {
|
||||
return "openlist.driver."
|
||||
}
|
||||
|
||||
func (h *DriverPluginHandler) Register(ctx context.Context, plugin *PluginInfo) error {
|
||||
if plugin.driver != nil {
|
||||
return nil // 已经注册过了
|
||||
}
|
||||
|
||||
var err error
|
||||
plugin.driver, err = NewDriverPlugin(ctx, plugin)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load driver plugin err: %w", err)
|
||||
}
|
||||
|
||||
err = op.RegisterDriver(func() driver.Driver {
|
||||
tempDriver, err := plugin.driver.NewWasmDriver()
|
||||
if err != nil {
|
||||
log.Errorf("deferred load driver plugin err: %v", err)
|
||||
return nil
|
||||
}
|
||||
return tempDriver
|
||||
})
|
||||
if err != nil {
|
||||
// 如果注册失败,关闭运行时
|
||||
plugin.driver.Close(ctx)
|
||||
return fmt.Errorf("failed to register driver in op: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Successfully registered driver for plugin: %s", plugin.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *DriverPluginHandler) Unregister(ctx context.Context, plugin *PluginInfo) error {
|
||||
if plugin.driver == nil {
|
||||
log.Errorf("plugin.driver is nil during unregister for plugin '%s', cannot get config", plugin.ID)
|
||||
return fmt.Errorf("plugin.driver instance not found, cannot properly unregister from op")
|
||||
}
|
||||
|
||||
op.UnRegisterDriver(func() driver.Driver {
|
||||
tempDriver, err := plugin.driver.NewWasmDriver()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to create temp driver for unregister: %v", err)
|
||||
return nil
|
||||
}
|
||||
return tempDriver
|
||||
})
|
||||
|
||||
if err := plugin.driver.Close(ctx); err != nil {
|
||||
log.Warnf("Error closing driver plugin runtime for %s: %v", plugin.ID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
44
internal/plugin/warp/context.go
Normal file
44
internal/plugin/warp/context.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package plugin_warp
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
witgo "github.com/OpenListTeam/wazero-wasip2/wit-go"
|
||||
)
|
||||
|
||||
type ContextManaget = witgo.ResourceManager[context.Context]
|
||||
type Context = uint32
|
||||
|
||||
func NewContextManager() *ContextManaget {
|
||||
return witgo.NewResourceManager[context.Context](nil)
|
||||
}
|
||||
|
||||
type ContextPollable struct {
|
||||
context.Context
|
||||
}
|
||||
|
||||
func (c *ContextPollable) IsReady() bool {
|
||||
select {
|
||||
case <-c.Done():
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Block 阻塞直到 Pollable 就绪。
|
||||
func (c *ContextPollable) Block() {
|
||||
<-c.Done()
|
||||
}
|
||||
|
||||
func (*ContextPollable) SetReady() {
|
||||
|
||||
}
|
||||
|
||||
func (ContextPollable) Close() {
|
||||
|
||||
}
|
||||
|
||||
func (c *ContextPollable) Channel() <-chan struct{} {
|
||||
return c.Done()
|
||||
}
|
||||
45
internal/plugin/warp/errors.go
Normal file
45
internal/plugin/warp/errors.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package plugin_warp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
)
|
||||
|
||||
type ErrCode struct {
|
||||
InvalidHandle *struct{} `wit:"case(0)"`
|
||||
// 表示功能未实现。
|
||||
NotImplemented *struct{} `wit:"case(1)"`
|
||||
// 表示功能不支持。
|
||||
NotSupport *struct{} `wit:"case(2)"`
|
||||
// 表示资源未找到。
|
||||
NotFound *struct{} `wit:"case(3)"`
|
||||
// 表示路径是文件而非目录。
|
||||
NotFolder *struct{} `wit:"case(4)"`
|
||||
// 表示路径是目录而非文件。
|
||||
NotFile *struct{} `wit:"case(5)"`
|
||||
// 包含描述信息的通用错误。
|
||||
Generic *string `wit:"case(6)"`
|
||||
// 授权失效,此时驱动处于无法自动恢复的状态
|
||||
Unauthorized *string `wit:"case(7)"`
|
||||
}
|
||||
|
||||
func (e ErrCode) ToError() error {
|
||||
if e.InvalidHandle != nil {
|
||||
return errs.StorageNotFound
|
||||
} else if e.NotImplemented != nil {
|
||||
return errs.NotImplement
|
||||
} else if e.NotSupport != nil {
|
||||
return errs.NotSupport
|
||||
} else if e.NotFound != nil {
|
||||
return errs.ObjectNotFound
|
||||
} else if e.NotFile != nil {
|
||||
return errs.NotFile
|
||||
} else if e.NotFolder != nil {
|
||||
return errs.NotFolder
|
||||
} else if e.Unauthorized != nil {
|
||||
return errors.New(*e.Unauthorized)
|
||||
}
|
||||
|
||||
return errors.New(*e.Generic)
|
||||
}
|
||||
98
internal/plugin/warp/object.go
Normal file
98
internal/plugin/warp/object.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package plugin_warp
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
clocks "github.com/OpenListTeam/wazero-wasip2/wasip2/clocks/v0_2"
|
||||
witgo "github.com/OpenListTeam/wazero-wasip2/wit-go"
|
||||
)
|
||||
|
||||
type Object struct {
|
||||
// 对象的绝对路径。
|
||||
Path string
|
||||
// 对象的id信息
|
||||
ID string
|
||||
// 对象的名称。
|
||||
Name string
|
||||
// 对象的大小(字节)。
|
||||
Size int64
|
||||
// 是否为目录。
|
||||
IsFolder bool
|
||||
// 创建时间戳
|
||||
Created clocks.Duration
|
||||
// 修改时间戳
|
||||
Modified clocks.Duration
|
||||
// 缩略图链接。
|
||||
Thumbnail witgo.Option[string]
|
||||
// 文件的哈希信息列表。
|
||||
Hashes []HashInfo
|
||||
// 用于存储驱动特定的、非标准的元数据。
|
||||
Extra [][2]string
|
||||
}
|
||||
|
||||
func (o *Object) GetName() string {
|
||||
return o.Name
|
||||
}
|
||||
|
||||
func (o *Object) GetSize() int64 {
|
||||
return o.Size
|
||||
}
|
||||
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.Modified.ToTime()
|
||||
}
|
||||
func (o *Object) CreateTime() time.Time {
|
||||
if o.Created == 0 {
|
||||
return o.ModTime()
|
||||
}
|
||||
return o.Created.ToTime()
|
||||
}
|
||||
|
||||
func (o *Object) IsDir() bool {
|
||||
return o.IsFolder
|
||||
}
|
||||
|
||||
func (o *Object) GetID() string {
|
||||
return o.ID
|
||||
}
|
||||
|
||||
func (o *Object) GetPath() string {
|
||||
return o.Path
|
||||
}
|
||||
|
||||
func (o *Object) SetPath(path string) {
|
||||
o.Path = path
|
||||
}
|
||||
|
||||
func (o *Object) GetHash() utils.HashInfo {
|
||||
return HashInfoConvert3(o.Hashes)
|
||||
}
|
||||
|
||||
func (o *Object) Thumb() string {
|
||||
return o.Thumbnail.UnwrapOr("")
|
||||
}
|
||||
|
||||
var _ model.Obj = (*Object)(nil)
|
||||
var _ model.Thumb = (*Object)(nil)
|
||||
var _ model.SetPath = (*Object)(nil)
|
||||
|
||||
func ConvertObjToObject(obj model.Obj) Object {
|
||||
|
||||
thumbnail := witgo.None[string]()
|
||||
if t, ok := obj.(model.Thumb); ok {
|
||||
thumbnail = witgo.Some(t.Thumb())
|
||||
}
|
||||
return Object{
|
||||
Path: obj.GetPath(),
|
||||
ID: obj.GetID(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsFolder: obj.IsDir(),
|
||||
Created: clocks.Duration(obj.CreateTime().UnixNano()),
|
||||
Modified: clocks.Duration(obj.ModTime().UnixNano()),
|
||||
Thumbnail: thumbnail,
|
||||
Hashes: HashInfoConvert(obj.GetHash()),
|
||||
}
|
||||
}
|
||||
198
internal/plugin/warp/types.go
Normal file
198
internal/plugin/warp/types.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package plugin_warp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
clocks "github.com/OpenListTeam/wazero-wasip2/wasip2/clocks/v0_2"
|
||||
wasi_http "github.com/OpenListTeam/wazero-wasip2/wasip2/http/v0_2"
|
||||
witgo "github.com/OpenListTeam/wazero-wasip2/wit-go"
|
||||
)
|
||||
|
||||
type LogLevel struct {
|
||||
Debug *struct{} `wit:"case(0)"`
|
||||
Info *struct{} `wit:"case(1)"`
|
||||
Warn *struct{} `wit:"case(2)"`
|
||||
Error *struct{} `wit:"case(3)"`
|
||||
}
|
||||
|
||||
type HashAlg struct {
|
||||
MD5 *struct{} `wit:"case(0)"`
|
||||
SHA1 *struct{} `wit:"case(1)"`
|
||||
SHA256 *struct{} `wit:"case(2)"`
|
||||
GCID *struct{} `wit:"case(3)"`
|
||||
}
|
||||
|
||||
type HashInfo struct {
|
||||
Alg HashAlg
|
||||
Val string
|
||||
}
|
||||
|
||||
type LinkResult struct {
|
||||
File witgo.Option[Object]
|
||||
Resource LinkResource
|
||||
}
|
||||
type LinkResource struct {
|
||||
Direct *struct {
|
||||
Url string
|
||||
Header wasi_http.Headers
|
||||
Expiratcion witgo.Option[clocks.Duration]
|
||||
} `wit:"case(0)"`
|
||||
RangeStream *struct{} `wit:"case(1)"`
|
||||
}
|
||||
|
||||
type Capability struct {
|
||||
GetFile bool
|
||||
ListFile bool
|
||||
LinkFile bool
|
||||
MkdirFile bool
|
||||
RenameFile bool
|
||||
MoveFile bool
|
||||
RemoveFile bool
|
||||
CopyFile bool
|
||||
UploadFile bool
|
||||
}
|
||||
|
||||
func (Capability) IsFlags() {}
|
||||
|
||||
type DriverProps struct {
|
||||
Name string
|
||||
|
||||
OnlyProxy bool
|
||||
NoCache bool
|
||||
|
||||
Alert string
|
||||
|
||||
NoOverwriteUpload bool
|
||||
ProxyRange bool
|
||||
|
||||
// 网盘能力标记
|
||||
Capabilitys Capability
|
||||
}
|
||||
|
||||
func (c DriverProps) ToConfig() driver.Config {
|
||||
return driver.Config{
|
||||
Name: c.Name,
|
||||
LocalSort: true,
|
||||
OnlyProxy: c.OnlyProxy,
|
||||
NoCache: c.NoCache,
|
||||
NoUpload: !c.Capabilitys.UploadFile,
|
||||
|
||||
CheckStatus: true,
|
||||
Alert: c.Alert,
|
||||
|
||||
NoOverwriteUpload: c.NoOverwriteUpload,
|
||||
ProxyRangeOption: c.ProxyRange,
|
||||
}
|
||||
}
|
||||
|
||||
type FormField struct {
|
||||
// 字段的唯一标识符(键)。
|
||||
Name string
|
||||
// 显示给用户的标签。
|
||||
Label string
|
||||
// 字段的输入类型,用于 UI 渲染。
|
||||
Kind FieldKind
|
||||
// 是否必填
|
||||
Required bool
|
||||
// 字段的帮助或提示信息。
|
||||
Help string
|
||||
}
|
||||
|
||||
type FieldKind struct {
|
||||
String *string `wit:"case(0)"`
|
||||
Password *string `wit:"case(1)"`
|
||||
Number *float64 `wit:"case(2)"`
|
||||
Boolean *bool `wit:"case(3)"`
|
||||
Text *string `wit:"case(4)"`
|
||||
Select *[]string `wit:"case(5)"`
|
||||
}
|
||||
|
||||
type Additional struct {
|
||||
Json []byte
|
||||
Forms []FormField
|
||||
}
|
||||
|
||||
func NewAdditional(forms []FormField) Additional {
|
||||
return Additional{
|
||||
Forms: forms,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Additional) String() string {
|
||||
return string(m.Json)
|
||||
}
|
||||
func (m *Additional) SetString(config string) {
|
||||
m.Json = []byte(config)
|
||||
}
|
||||
|
||||
func (m *Additional) Bytes() []byte {
|
||||
return m.Json
|
||||
}
|
||||
|
||||
func (m *Additional) SetBytes(config []byte) {
|
||||
m.Json = config
|
||||
}
|
||||
|
||||
// MarshalJSON returns m as the JSON encoding of m.
|
||||
func (m Additional) MarshalJSON() ([]byte, error) {
|
||||
return m.Json, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets *m to a copy of data.
|
||||
func (m *Additional) UnmarshalJSON(data []byte) error {
|
||||
if m == nil {
|
||||
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
|
||||
}
|
||||
m.Json = slices.Clone(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (addit *Additional) GetItems() []driver.Item {
|
||||
return utils.MustSliceConvert(addit.Forms, func(item FormField) driver.Item {
|
||||
var typ string
|
||||
var def string
|
||||
var opts string
|
||||
if item.Kind.Boolean != nil {
|
||||
typ = conf.TypeBool
|
||||
def = fmt.Sprintf("%t", *item.Kind.Boolean)
|
||||
} else if item.Kind.Password != nil {
|
||||
typ = conf.TypeString
|
||||
def = *item.Kind.Password
|
||||
} else if item.Kind.Number != nil {
|
||||
typ = conf.TypeNumber
|
||||
def = fmt.Sprintf("%f", *item.Kind.Number)
|
||||
} else if item.Kind.Select != nil {
|
||||
typ = conf.TypeSelect
|
||||
if len(*item.Kind.Select) > 0 {
|
||||
def = (*item.Kind.Select)[0]
|
||||
opts = strings.Join((*item.Kind.Select), ",")
|
||||
}
|
||||
} else if item.Kind.String != nil {
|
||||
typ = conf.TypeString
|
||||
def = *item.Kind.String
|
||||
} else if item.Kind.Text != nil {
|
||||
typ = conf.TypeText
|
||||
def = *item.Kind.Text
|
||||
}
|
||||
|
||||
return driver.Item{
|
||||
Name: item.Name,
|
||||
Type: typ,
|
||||
Default: def,
|
||||
Options: opts,
|
||||
Required: item.Required,
|
||||
Help: item.Help,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type LinkArgs struct {
|
||||
IP string
|
||||
Header wasi_http.Headers
|
||||
}
|
||||
105
internal/plugin/warp/upload.go
Normal file
105
internal/plugin/warp/upload.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package plugin_warp
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
hash_extend "github.com/OpenListTeam/OpenList/v4/pkg/utils/hash"
|
||||
witgo "github.com/OpenListTeam/wazero-wasip2/wit-go"
|
||||
)
|
||||
|
||||
type UploadRequest struct {
|
||||
Target Object
|
||||
// 指向宿主端文件内容的句柄
|
||||
// 由host控制释放
|
||||
Content UploadReadable
|
||||
// 如果是覆盖上传,宿主会提供被覆盖文件的原始对象数据
|
||||
Exist witgo.Option[Object]
|
||||
}
|
||||
|
||||
type UploadReadableType struct {
|
||||
model.FileStreamer
|
||||
StreamConsume bool
|
||||
UpdateProgress driver.UpdateProgress
|
||||
SectionReader *StreamSectionReader
|
||||
}
|
||||
|
||||
type StreamSectionReader struct {
|
||||
stream.StreamSectionReaderIF
|
||||
Offset int64
|
||||
CunketSize int64
|
||||
}
|
||||
|
||||
type UploadReadableManager = witgo.ResourceManager[*UploadReadableType]
|
||||
type UploadReadable = uint32
|
||||
|
||||
func NewUploadManager() *UploadReadableManager {
|
||||
return witgo.NewResourceManager[*UploadReadableType](nil)
|
||||
}
|
||||
|
||||
func HashTypeConvert(typ *utils.HashType) HashAlg {
|
||||
switch typ {
|
||||
case utils.MD5:
|
||||
return HashAlg{MD5: &struct{}{}}
|
||||
case utils.SHA1:
|
||||
return HashAlg{SHA1: &struct{}{}}
|
||||
case utils.SHA256:
|
||||
return HashAlg{SHA256: &struct{}{}}
|
||||
case hash_extend.GCID:
|
||||
return HashAlg{GCID: &struct{}{}}
|
||||
}
|
||||
panic("plase add hash convert")
|
||||
}
|
||||
func HashAlgConvert(hash HashAlg) *utils.HashType {
|
||||
if hash.MD5 != nil {
|
||||
return utils.MD5
|
||||
} else if hash.SHA1 != nil {
|
||||
return utils.SHA1
|
||||
} else if hash.SHA256 != nil {
|
||||
return utils.SHA256
|
||||
} else if hash.GCID != nil {
|
||||
return hash_extend.GCID
|
||||
}
|
||||
panic("plase add hash convert")
|
||||
}
|
||||
func HashAlgConverts(HashAlgs []HashAlg) []*utils.HashType {
|
||||
hashTypes := make([]*utils.HashType, 0, len(HashAlgs))
|
||||
for _, needHash := range HashAlgs {
|
||||
hashTypes = append(hashTypes, HashAlgConvert(needHash))
|
||||
}
|
||||
return hashTypes
|
||||
}
|
||||
|
||||
func HashInfoConvert(hashInfo utils.HashInfo) []HashInfo {
|
||||
result := make([]HashInfo, 0, 4)
|
||||
for hash, val := range hashInfo.All() {
|
||||
if hash.Width != len(val) {
|
||||
continue
|
||||
}
|
||||
result = append(result, HashInfo{Alg: HashTypeConvert(hash), Val: val})
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func HashInfoConvert2(hashInfo utils.HashInfo, needHashs []HashAlg) []HashInfo {
|
||||
resultHashs := make([]HashInfo, 0, len(needHashs))
|
||||
|
||||
for _, needHash := range needHashs {
|
||||
hashType := HashAlgConvert(needHash)
|
||||
hash := hashInfo.GetHash(hashType)
|
||||
if hashType.Width != len(hash) {
|
||||
return nil
|
||||
}
|
||||
resultHashs = append(resultHashs, HashInfo{Alg: needHash, Val: hash})
|
||||
}
|
||||
return resultHashs
|
||||
}
|
||||
func HashInfoConvert3(hashInfo []HashInfo) utils.HashInfo {
|
||||
newHashInfo := make(map[*utils.HashType]string, len(hashInfo))
|
||||
for _, hashInfo := range hashInfo {
|
||||
newHashInfo[HashAlgConvert(hashInfo.Alg)] = hashInfo.Val
|
||||
}
|
||||
return utils.NewHashInfoByMap(newHashInfo)
|
||||
}
|
||||
@@ -199,13 +199,14 @@ func Config(ctx context.Context) searcher.Config {
|
||||
return instance.Config()
|
||||
}
|
||||
|
||||
func Update(ctx context.Context, parent string, objs []model.Obj) {
|
||||
func Update(parent string, objs []model.Obj) {
|
||||
if instance == nil || !instance.Config().AutoUpdate || !setting.GetBool(conf.AutoUpdateIndex) || Running() {
|
||||
return
|
||||
}
|
||||
if isIgnorePath(parent) {
|
||||
return
|
||||
}
|
||||
ctx := context.Background()
|
||||
// only update when index have built
|
||||
progress, err := Progress()
|
||||
if err != nil {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
@@ -41,14 +42,17 @@ type RateLimitReader struct {
|
||||
}
|
||||
|
||||
func (r *RateLimitReader) Read(p []byte) (n int, err error) {
|
||||
if err = r.Ctx.Err(); err != nil {
|
||||
return 0, err
|
||||
if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
|
||||
return 0, r.Ctx.Err()
|
||||
}
|
||||
n, err = r.Reader.Read(p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if r.Limiter != nil {
|
||||
if r.Ctx == nil {
|
||||
r.Ctx = context.Background()
|
||||
}
|
||||
err = r.Limiter.WaitN(r.Ctx, n)
|
||||
}
|
||||
return
|
||||
@@ -68,14 +72,17 @@ type RateLimitWriter struct {
|
||||
}
|
||||
|
||||
func (w *RateLimitWriter) Write(p []byte) (n int, err error) {
|
||||
if err = w.Ctx.Err(); err != nil {
|
||||
return 0, err
|
||||
if w.Ctx != nil && utils.IsCanceled(w.Ctx) {
|
||||
return 0, w.Ctx.Err()
|
||||
}
|
||||
n, err = w.Writer.Write(p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if w.Limiter != nil {
|
||||
if w.Ctx == nil {
|
||||
w.Ctx = context.Background()
|
||||
}
|
||||
err = w.Limiter.WaitN(w.Ctx, n)
|
||||
}
|
||||
return
|
||||
@@ -95,28 +102,34 @@ type RateLimitFile struct {
|
||||
}
|
||||
|
||||
func (r *RateLimitFile) Read(p []byte) (n int, err error) {
|
||||
if err = r.Ctx.Err(); err != nil {
|
||||
return 0, err
|
||||
if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
|
||||
return 0, r.Ctx.Err()
|
||||
}
|
||||
n, err = r.File.Read(p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if r.Limiter != nil {
|
||||
if r.Ctx == nil {
|
||||
r.Ctx = context.Background()
|
||||
}
|
||||
err = r.Limiter.WaitN(r.Ctx, n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *RateLimitFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if err = r.Ctx.Err(); err != nil {
|
||||
return 0, err
|
||||
if r.Ctx != nil && utils.IsCanceled(r.Ctx) {
|
||||
return 0, r.Ctx.Err()
|
||||
}
|
||||
n, err = r.File.ReadAt(p, off)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if r.Limiter != nil {
|
||||
if r.Ctx == nil {
|
||||
r.Ctx = context.Background()
|
||||
}
|
||||
err = r.Limiter.WaitN(r.Ctx, n)
|
||||
}
|
||||
return
|
||||
@@ -132,16 +145,16 @@ func (r *RateLimitFile) Close() error {
|
||||
type RateLimitRangeReaderFunc RangeReaderFunc
|
||||
|
||||
func (f RateLimitRangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
if ServerDownloadLimit == nil {
|
||||
return f(ctx, httpRange)
|
||||
}
|
||||
rc, err := f(ctx, httpRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &RateLimitReader{
|
||||
Ctx: ctx,
|
||||
Reader: rc,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}, nil
|
||||
if ServerDownloadLimit != nil {
|
||||
rc = &RateLimitReader{
|
||||
Ctx: ctx,
|
||||
Reader: rc,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}
|
||||
}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/buffer"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
@@ -27,8 +28,10 @@ type FileStream struct {
|
||||
ForceStreamUpload bool
|
||||
Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it
|
||||
utils.Closers
|
||||
size int64
|
||||
|
||||
tmpFile model.File //if present, tmpFile has full content, it will be deleted at last
|
||||
peekBuff *buffer.Reader
|
||||
size int64
|
||||
oriReader io.Reader // the original reader, used for caching
|
||||
}
|
||||
|
||||
@@ -36,6 +39,12 @@ func (f *FileStream) GetSize() int64 {
|
||||
if f.size > 0 {
|
||||
return f.size
|
||||
}
|
||||
if file, ok := f.tmpFile.(*os.File); ok {
|
||||
info, err := file.Stat()
|
||||
if err == nil {
|
||||
return info.Size()
|
||||
}
|
||||
}
|
||||
return f.Obj.GetSize()
|
||||
}
|
||||
|
||||
@@ -54,10 +63,24 @@ func (f *FileStream) IsForceStreamUpload() bool {
|
||||
func (f *FileStream) Close() error {
|
||||
if f.peekBuff != nil {
|
||||
f.peekBuff.Reset()
|
||||
f.oriReader = nil
|
||||
f.peekBuff = nil
|
||||
}
|
||||
return f.Closers.Close()
|
||||
|
||||
var err1, err2 error
|
||||
err1 = f.Closers.Close()
|
||||
if errors.Is(err1, os.ErrClosed) {
|
||||
err1 = nil
|
||||
}
|
||||
if file, ok := f.tmpFile.(*os.File); ok {
|
||||
err2 = os.RemoveAll(file.Name())
|
||||
if err2 != nil {
|
||||
err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", file.Name())
|
||||
} else {
|
||||
f.tmpFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(err1, err2)
|
||||
}
|
||||
|
||||
func (f *FileStream) GetExist() model.Obj {
|
||||
@@ -71,28 +94,27 @@ func (f *FileStream) SetExist(obj model.Obj) {
|
||||
// It's not thread-safe!
|
||||
func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writer) (model.File, error) {
|
||||
if cache := f.GetFile(); cache != nil {
|
||||
_, err := cache.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if writer == nil {
|
||||
return cache, nil
|
||||
}
|
||||
reader := f.Reader
|
||||
if up != nil {
|
||||
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||
reader = &ReaderUpdatingProgress{
|
||||
Reader: &SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: f.GetSize(),
|
||||
},
|
||||
UpdateProgress: cacheProgress,
|
||||
}
|
||||
}
|
||||
_, err = utils.CopyWithBuffer(writer, reader)
|
||||
_, err := cache.Seek(0, io.SeekStart)
|
||||
if err == nil {
|
||||
_, err = cache.Seek(0, io.SeekStart)
|
||||
reader := f.Reader
|
||||
if up != nil {
|
||||
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||
reader = &ReaderUpdatingProgress{
|
||||
Reader: &SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: f.GetSize(),
|
||||
},
|
||||
UpdateProgress: cacheProgress,
|
||||
}
|
||||
}
|
||||
_, err = utils.CopyWithBuffer(writer, reader)
|
||||
if err == nil {
|
||||
_, err = cache.Seek(0, io.SeekStart)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -101,20 +123,21 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
||||
}
|
||||
|
||||
reader := f.Reader
|
||||
if f.peekBuff != nil {
|
||||
f.peekBuff.Seek(0, io.SeekStart)
|
||||
if writer != nil {
|
||||
_, err := utils.CopyWithBuffer(writer, f.peekBuff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.peekBuff.Seek(0, io.SeekStart)
|
||||
if up != nil {
|
||||
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||
reader = &ReaderUpdatingProgress{
|
||||
Reader: &SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: f.GetSize(),
|
||||
},
|
||||
UpdateProgress: cacheProgress,
|
||||
}
|
||||
reader = f.oriReader
|
||||
}
|
||||
if writer != nil {
|
||||
reader = io.TeeReader(reader, writer)
|
||||
}
|
||||
|
||||
if f.GetSize() < 0 {
|
||||
if f.peekBuff == nil {
|
||||
f.peekBuff = &buffer.Reader{}
|
||||
@@ -151,6 +174,7 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tmpF, err := utils.CreateTempFile(reader, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -167,42 +191,22 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ
|
||||
return peekF, nil
|
||||
}
|
||||
|
||||
if up != nil {
|
||||
cacheProgress := model.UpdateProgressWithRange(*up, 0, 50)
|
||||
*up = model.UpdateProgressWithRange(*up, 50, 100)
|
||||
size := f.GetSize()
|
||||
if f.peekBuff != nil {
|
||||
peekSize := f.peekBuff.Size()
|
||||
cacheProgress(float64(peekSize) / float64(size) * 100)
|
||||
size -= peekSize
|
||||
}
|
||||
reader = &ReaderUpdatingProgress{
|
||||
Reader: &SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: size,
|
||||
},
|
||||
UpdateProgress: cacheProgress,
|
||||
}
|
||||
}
|
||||
|
||||
if f.peekBuff != nil {
|
||||
f.oriReader = reader
|
||||
} else {
|
||||
f.Reader = reader
|
||||
}
|
||||
f.Reader = reader
|
||||
return f.cache(f.GetSize())
|
||||
}
|
||||
|
||||
func (f *FileStream) GetFile() model.File {
|
||||
if f.tmpFile != nil {
|
||||
return f.tmpFile
|
||||
}
|
||||
if file, ok := f.Reader.(model.File); ok {
|
||||
return file
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// 从流读取指定范围的一块数据,并且不消耗流。
|
||||
// 当读取的边界超过内部设置大小后会缓存整个流。
|
||||
// 流未缓存时线程不完全
|
||||
// RangeRead have to cache all data first since only Reader is provided.
|
||||
// It's not thread-safe!
|
||||
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > f.GetSize() {
|
||||
httpRange.Length = f.GetSize() - httpRange.Start
|
||||
@@ -211,7 +215,12 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
return io.NewSectionReader(f.GetFile(), httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
cache, err := f.cache(httpRange.Start + httpRange.Length)
|
||||
size := httpRange.Start + httpRange.Length
|
||||
if f.peekBuff != nil && size <= int64(f.peekBuff.Size()) {
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
cache, err := f.cache(size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -223,30 +232,14 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
// 使用bytes.Buffer作为io.CopyBuffer的写入对象,CopyBuffer会调用Buffer.ReadFrom
|
||||
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
|
||||
|
||||
// 确保指定大小的数据被缓存
|
||||
func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||
if maxCacheSize > int64(conf.MaxBufferLimit) {
|
||||
size := f.GetSize()
|
||||
reader := f.Reader
|
||||
if f.peekBuff != nil {
|
||||
size -= f.peekBuff.Size()
|
||||
reader = f.oriReader
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(reader, size)
|
||||
tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Add(utils.CloseFunc(func() error {
|
||||
return errors.Join(tmpF.Close(), os.RemoveAll(tmpF.Name()))
|
||||
}))
|
||||
if f.peekBuff != nil {
|
||||
peekF, err := buffer.NewPeekFile(f.peekBuff, tmpF)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Reader = peekF
|
||||
return peekF, nil
|
||||
}
|
||||
f.Add(tmpF)
|
||||
f.tmpFile = tmpF
|
||||
f.Reader = tmpF
|
||||
return tmpF, nil
|
||||
}
|
||||
@@ -254,12 +247,8 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||
if f.peekBuff == nil {
|
||||
f.peekBuff = &buffer.Reader{}
|
||||
f.oriReader = f.Reader
|
||||
f.Reader = io.MultiReader(f.peekBuff, f.oriReader)
|
||||
}
|
||||
bufSize := maxCacheSize - f.peekBuff.Size()
|
||||
if bufSize <= 0 {
|
||||
return f.peekBuff, nil
|
||||
}
|
||||
bufSize := maxCacheSize - int64(f.peekBuff.Size())
|
||||
var buf []byte
|
||||
if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) {
|
||||
m, err := mmap.Alloc(int(bufSize))
|
||||
@@ -278,24 +267,37 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err)
|
||||
}
|
||||
f.peekBuff.Append(buf)
|
||||
if f.peekBuff.Size() >= f.GetSize() {
|
||||
if int64(f.peekBuff.Size()) >= f.GetSize() {
|
||||
f.Reader = f.peekBuff
|
||||
f.oriReader = nil
|
||||
} else {
|
||||
f.Reader = io.MultiReader(f.peekBuff, f.oriReader)
|
||||
}
|
||||
return f.peekBuff, nil
|
||||
}
|
||||
|
||||
func (f *FileStream) SetTmpFile(file model.File) {
|
||||
f.AddIfCloser(file)
|
||||
f.tmpFile = file
|
||||
f.Reader = file
|
||||
}
|
||||
|
||||
var _ model.FileStreamer = (*SeekableStream)(nil)
|
||||
var _ model.FileStreamer = (*FileStream)(nil)
|
||||
|
||||
//var _ seekableStream = (*FileStream)(nil)
|
||||
|
||||
// for most internal stream, which is either RangeReadCloser or MFile
|
||||
// Any functionality implemented based on SeekableStream should implement a Close method,
|
||||
// whose only purpose is to close the SeekableStream object. If such functionality has
|
||||
// additional resources that need to be closed, they should be added to the Closer property of
|
||||
// the SeekableStream object and be closed together when the SeekableStream object is closed.
|
||||
type SeekableStream struct {
|
||||
*FileStream
|
||||
// should have one of belows to support rangeRead
|
||||
rangeReader model.RangeReaderIF
|
||||
rangeReadCloser model.RangeReadCloserIF
|
||||
}
|
||||
|
||||
// NewSeekableStream create a SeekableStream from FileStream and Link
|
||||
// if FileStream.Reader is not nil, use it directly
|
||||
// else create RangeReader from Link
|
||||
func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error) {
|
||||
if len(fs.Mimetype) == 0 {
|
||||
fs.Mimetype = utils.GetMimeType(fs.Obj.GetName())
|
||||
@@ -315,31 +317,30 @@ func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rrc := &model.RangeReadCloser{
|
||||
RangeReader: rr,
|
||||
}
|
||||
if _, ok := rr.(*model.FileRangeReader); ok {
|
||||
var rc io.ReadCloser
|
||||
rc, err = rr.RangeRead(fs.Ctx, http_range.Range{Length: -1})
|
||||
fs.Reader, err = rrc.RangeRead(fs.Ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Reader = rc
|
||||
fs.Add(rc)
|
||||
}
|
||||
fs.size = size
|
||||
fs.Add(link)
|
||||
return &SeekableStream{FileStream: fs, rangeReader: rr}, nil
|
||||
fs.Add(rrc)
|
||||
return &SeekableStream{FileStream: fs, rangeReadCloser: rrc}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
|
||||
// 如果使用缓存或者rangeReader读取指定范围的数据,是线程安全的
|
||||
// 其他特性继承自FileStream.RangeRead
|
||||
// RangeRead is not thread-safe, pls use it in single thread only.
|
||||
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if ss.GetFile() == nil && ss.rangeReader != nil {
|
||||
rc, err := ss.rangeReader.RangeRead(ss.Ctx, httpRange)
|
||||
if ss.GetFile() == nil && ss.rangeReadCloser != nil {
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, httpRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss.Add(rc)
|
||||
return rc, nil
|
||||
}
|
||||
return ss.FileStream.RangeRead(httpRange)
|
||||
@@ -355,14 +356,13 @@ func (ss *SeekableStream) Read(p []byte) (n int, err error) {
|
||||
|
||||
func (ss *SeekableStream) generateReader() error {
|
||||
if ss.Reader == nil {
|
||||
if ss.rangeReader == nil {
|
||||
if ss.rangeReadCloser == nil {
|
||||
return fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
rc, err := ss.rangeReader.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ss.Add(rc)
|
||||
ss.Reader = rc
|
||||
}
|
||||
return nil
|
||||
@@ -456,7 +456,7 @@ func (r *headCache) Close() error {
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
||||
if r.masterOff == 0 {
|
||||
if r.ss.GetFile() == nil && r.masterOff == 0 {
|
||||
value, _ := r.readerMap.LoadAndDelete(int64(0))
|
||||
r.headCache = &headCache{reader: value.(io.Reader)}
|
||||
r.ss.Closers.Add(r.headCache)
|
||||
@@ -464,12 +464,12 @@ func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
||||
}
|
||||
|
||||
func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (model.File, error) {
|
||||
if cache := ss.GetFile(); cache != nil {
|
||||
_, err := cache.Seek(offset, io.SeekStart)
|
||||
if ss.GetFile() != nil {
|
||||
_, err := ss.GetFile().Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cache, nil
|
||||
return ss.GetFile(), nil
|
||||
}
|
||||
r := &RangeReadReadAtSeeker{
|
||||
ss: ss,
|
||||
@@ -479,11 +479,10 @@ func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (mode
|
||||
if offset < 0 || offset > ss.GetSize() {
|
||||
return nil, errors.New("offset out of range")
|
||||
}
|
||||
reader, err := r.getReaderAtOffset(offset)
|
||||
_, err := r.getReaderAtOffset(offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.readerMap.Store(int64(offset), reader)
|
||||
} else {
|
||||
r.readerMap.Store(int64(offset), ss)
|
||||
}
|
||||
@@ -503,41 +502,39 @@ func NewMultiReaderAt(ss []*SeekableStream) (readerutil.SizeReaderAt, error) {
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (io.Reader, error) {
|
||||
for {
|
||||
var cur int64 = -1
|
||||
r.readerMap.Range(func(key, value any) bool {
|
||||
k := key.(int64)
|
||||
if off == k {
|
||||
cur = k
|
||||
return false
|
||||
}
|
||||
if off > k && off-k <= 4*utils.MB && k > cur {
|
||||
cur = k
|
||||
}
|
||||
return true
|
||||
})
|
||||
if cur < 0 {
|
||||
break
|
||||
var rr io.Reader
|
||||
var cur int64 = -1
|
||||
r.readerMap.Range(func(key, value any) bool {
|
||||
k := key.(int64)
|
||||
if off == k {
|
||||
cur = k
|
||||
rr = value.(io.Reader)
|
||||
return false
|
||||
}
|
||||
v, ok := r.readerMap.LoadAndDelete(int64(cur))
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
rr := v.(io.Reader)
|
||||
if off == int64(cur) {
|
||||
// logrus.Debugf("getReaderAtOffset match_%d", off)
|
||||
return rr, nil
|
||||
if off > k && off-k <= 4*utils.MB && (rr == nil || k < cur) {
|
||||
rr = value.(io.Reader)
|
||||
cur = k
|
||||
}
|
||||
return true
|
||||
})
|
||||
if cur >= 0 {
|
||||
r.readerMap.Delete(int64(cur))
|
||||
}
|
||||
if off == int64(cur) {
|
||||
// logrus.Debugf("getReaderAtOffset match_%d", off)
|
||||
return rr, nil
|
||||
}
|
||||
|
||||
if rr != nil {
|
||||
n, _ := utils.CopyWithBufferN(io.Discard, rr, off-cur)
|
||||
cur += n
|
||||
if cur == off {
|
||||
// logrus.Debugf("getReaderAtOffset old_%d", off)
|
||||
return rr, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// logrus.Debugf("getReaderAtOffset new_%d", off)
|
||||
|
||||
reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: -1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -7,12 +7,13 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
)
|
||||
|
||||
func TestFileStream_RangeRead(t *testing.T) {
|
||||
conf.MaxBufferLimit = 16 * 1024 * 1024
|
||||
type args struct {
|
||||
httpRange http_range.Range
|
||||
}
|
||||
@@ -72,38 +73,16 @@ func TestFileStream_RangeRead(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
if f.GetFile() == nil {
|
||||
t.Error("not cached")
|
||||
}
|
||||
buf2 := make([]byte, len(buf))
|
||||
if _, err := io.ReadFull(f, buf2); err != nil {
|
||||
t.Errorf("FileStream.Read() error = %v", err)
|
||||
}
|
||||
if !bytes.Equal(buf, buf2) {
|
||||
t.Errorf("FileStream.Read() = %s, want %s", buf2, buf)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStream_With_PreHash(t *testing.T) {
|
||||
buf := []byte("github.com/OpenListTeam/OpenList")
|
||||
f := &FileStream{
|
||||
Obj: &model.Object{
|
||||
Size: int64(len(buf)),
|
||||
},
|
||||
Reader: io.NopCloser(bytes.NewReader(buf)),
|
||||
}
|
||||
|
||||
const hashSize int64 = 20
|
||||
reader, _ := f.RangeRead(http_range.Range{Start: 0, Length: hashSize})
|
||||
preHash, _ := utils.HashReader(utils.SHA1, reader)
|
||||
if preHash == "" {
|
||||
t.Error("preHash is empty")
|
||||
}
|
||||
tmpF, fullHash, _ := CacheFullAndHash(f, nil, utils.SHA1)
|
||||
fmt.Println(fullHash)
|
||||
fileFullHash, _ := utils.HashFile(utils.SHA1, tmpF)
|
||||
fmt.Println(fileFullHash)
|
||||
if fullHash != fileFullHash {
|
||||
t.Errorf("fullHash and fileFullHash should match: fullHash=%s fileFullHash=%s", fullHash, fileFullHash)
|
||||
}
|
||||
t.Run("after", func(t *testing.T) {
|
||||
if f.GetFile() == nil {
|
||||
t.Error("not cached")
|
||||
}
|
||||
buf2 := make([]byte, len(buf))
|
||||
if _, err := io.ReadFull(f, buf2); err != nil {
|
||||
t.Errorf("FileStream.Read() error = %v", err)
|
||||
}
|
||||
if !bytes.Equal(buf, buf2) {
|
||||
t.Errorf("FileStream.Read() = %s, want %s", buf2, buf)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -28,61 +28,44 @@ func (f RangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Ran
|
||||
}
|
||||
|
||||
func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) {
|
||||
if link.RangeReader != nil {
|
||||
if link.Concurrency < 1 && link.PartSize < 1 {
|
||||
return link.RangeReader, nil
|
||||
}
|
||||
if link.Concurrency > 0 || link.PartSize > 0 {
|
||||
down := net.NewDownloader(func(d *net.Downloader) {
|
||||
d.Concurrency = link.Concurrency
|
||||
d.PartSize = link.PartSize
|
||||
d.HttpClient = net.GetRangeReaderHttpRequestFunc(link.RangeReader)
|
||||
})
|
||||
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
return down.Download(ctx, &net.HttpRequestParams{
|
||||
Range: httpRange,
|
||||
Size: size,
|
||||
})
|
||||
var rangeReader RangeReaderFunc = func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
var req *net.HttpRequestParams
|
||||
if link.RangeReader != nil {
|
||||
req = &net.HttpRequestParams{
|
||||
Range: httpRange,
|
||||
Size: size,
|
||||
}
|
||||
} else {
|
||||
requestHeader, _ := ctx.Value(conf.RequestHeaderKey).(http.Header)
|
||||
header := net.ProcessHeader(requestHeader, link.Header)
|
||||
req = &net.HttpRequestParams{
|
||||
Range: httpRange,
|
||||
Size: size,
|
||||
URL: link.URL,
|
||||
HeaderRef: header,
|
||||
}
|
||||
}
|
||||
return down.Download(ctx, req)
|
||||
}
|
||||
// RangeReader只能在驱动限速
|
||||
return RangeReaderFunc(rangeReader), nil
|
||||
if link.RangeReader != nil {
|
||||
down.HttpClient = net.GetRangeReaderHttpRequestFunc(link.RangeReader)
|
||||
return rangeReader, nil
|
||||
}
|
||||
return RateLimitRangeReaderFunc(rangeReader), nil
|
||||
}
|
||||
|
||||
if link.RangeReader != nil {
|
||||
return link.RangeReader, nil
|
||||
}
|
||||
|
||||
if len(link.URL) == 0 {
|
||||
return nil, errors.New("invalid link: must have at least one of URL or RangeReader")
|
||||
}
|
||||
|
||||
if link.Concurrency > 0 || link.PartSize > 0 {
|
||||
down := net.NewDownloader(func(d *net.Downloader) {
|
||||
d.Concurrency = link.Concurrency
|
||||
d.PartSize = link.PartSize
|
||||
d.HttpClient = func(ctx context.Context, params *net.HttpRequestParams) (*http.Response, error) {
|
||||
if ServerDownloadLimit == nil {
|
||||
return net.DefaultHttpRequestFunc(ctx, params)
|
||||
}
|
||||
resp, err := net.DefaultHttpRequestFunc(ctx, params)
|
||||
if err == nil && resp.Body != nil {
|
||||
resp.Body = &RateLimitReader{
|
||||
Ctx: ctx,
|
||||
Reader: resp.Body,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
})
|
||||
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
requestHeader, _ := ctx.Value(conf.RequestHeaderKey).(http.Header)
|
||||
header := net.ProcessHeader(requestHeader, link.Header)
|
||||
return down.Download(ctx, &net.HttpRequestParams{
|
||||
Range: httpRange,
|
||||
Size: size,
|
||||
URL: link.URL,
|
||||
HeaderRef: header,
|
||||
})
|
||||
}
|
||||
return RangeReaderFunc(rangeReader), nil
|
||||
}
|
||||
|
||||
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > size {
|
||||
httpRange.Length = size - httpRange.Start
|
||||
@@ -98,15 +81,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
|
||||
}
|
||||
return nil, fmt.Errorf("http request failure, err:%w", err)
|
||||
}
|
||||
if ServerDownloadLimit != nil {
|
||||
response.Body = &RateLimitReader{
|
||||
Ctx: ctx,
|
||||
Reader: response.Body,
|
||||
Limiter: ServerDownloadLimit,
|
||||
}
|
||||
}
|
||||
if httpRange.Start == 0 && httpRange.Length == size ||
|
||||
response.StatusCode == http.StatusPartialContent ||
|
||||
if httpRange.Start == 0 && (httpRange.Length == -1 || httpRange.Length == size) || response.StatusCode == http.StatusPartialContent ||
|
||||
checkContentRange(&response.Header, httpRange.Start) {
|
||||
return response.Body, nil
|
||||
} else if response.StatusCode == http.StatusOK {
|
||||
@@ -119,10 +94,11 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF,
|
||||
}
|
||||
return response.Body, nil
|
||||
}
|
||||
return RangeReaderFunc(rangeReader), nil
|
||||
return RateLimitRangeReaderFunc(rangeReader), nil
|
||||
}
|
||||
|
||||
func GetRangeReaderFromMFile(size int64, file model.File) *model.FileRangeReader {
|
||||
// RangeReaderIF.RangeRead返回的io.ReadCloser保留file的签名。
|
||||
func GetRangeReaderFromMFile(size int64, file model.File) model.RangeReaderIF {
|
||||
return &model.FileRangeReader{
|
||||
RangeReaderIF: RangeReaderFunc(func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
|
||||
@@ -104,7 +104,7 @@ func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserve
|
||||
return nil, err
|
||||
}
|
||||
if (flags & os.O_EXCL) != 0 {
|
||||
return nil, errs.ObjectAlreadyExists
|
||||
return nil, errors.New("file already exists")
|
||||
}
|
||||
if (flags & os.O_WRONLY) != 0 {
|
||||
return nil, errors.New("cannot write to uploading file")
|
||||
@@ -122,7 +122,7 @@ func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserve
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
if (flags&os.O_EXCL) != 0 && exists {
|
||||
return nil, errs.ObjectAlreadyExists
|
||||
return nil, errors.New("file already exists")
|
||||
}
|
||||
if (flags & os.O_WRONLY) != 0 {
|
||||
if offset != 0 {
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package handles
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type FsGetDirectUploadInfoReq struct {
|
||||
Path string `json:"path" form:"path"`
|
||||
FileName string `json:"file_name" form:"file_name"`
|
||||
FileSize int64 `json:"file_size" form:"file_size"`
|
||||
Tool string `json:"tool" form:"tool"`
|
||||
}
|
||||
|
||||
// FsGetDirectUploadInfo returns the direct upload info if supported by the driver
|
||||
// If the driver does not support direct upload, returns null for upload_info
|
||||
func FsGetDirectUploadInfo(c *gin.Context) {
|
||||
var req FsGetDirectUploadInfoReq
|
||||
if err := c.ShouldBind(&req); err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
// Decode path
|
||||
path, err := url.PathUnescape(req.Path)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
// Get user and join path
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
path, err = user.JoinPath(path)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 403)
|
||||
return
|
||||
}
|
||||
overwrite := c.GetHeader("Overwrite") != "false"
|
||||
if !overwrite {
|
||||
if res, _ := fs.Get(c.Request.Context(), path, &fs.GetArgs{NoLog: true}); res != nil {
|
||||
common.ErrorStrResp(c, "file exists", 403)
|
||||
return
|
||||
}
|
||||
}
|
||||
directUploadInfo, err := fs.GetDirectUploadInfo(c, req.Tool, path, req.FileName, req.FileSize)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
common.SuccessResp(c, directUploadInfo)
|
||||
}
|
||||
@@ -49,13 +49,12 @@ type ObjResp struct {
|
||||
}
|
||||
|
||||
type FsListResp struct {
|
||||
Content []ObjResp `json:"content"`
|
||||
Total int64 `json:"total"`
|
||||
Readme string `json:"readme"`
|
||||
Header string `json:"header"`
|
||||
Write bool `json:"write"`
|
||||
Provider string `json:"provider"`
|
||||
DirectUploadTools []string `json:"direct_upload_tools,omitempty"`
|
||||
Content []ObjResp `json:"content"`
|
||||
Total int64 `json:"total"`
|
||||
Readme string `json:"readme"`
|
||||
Header string `json:"header"`
|
||||
Write bool `json:"write"`
|
||||
Provider string `json:"provider"`
|
||||
}
|
||||
|
||||
func FsListSplit(c *gin.Context) {
|
||||
@@ -110,20 +109,17 @@ func FsList(c *gin.Context, req *ListReq, user *model.User) {
|
||||
}
|
||||
total, objs := pagination(objs, &req.PageReq)
|
||||
provider := "unknown"
|
||||
var directUploadTools []string
|
||||
if user.CanWrite() {
|
||||
if storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{}); err == nil {
|
||||
directUploadTools = op.GetDirectUploadTools(storage)
|
||||
}
|
||||
storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{})
|
||||
if err == nil {
|
||||
provider = storage.GetStorage().Driver
|
||||
}
|
||||
common.SuccessResp(c, FsListResp{
|
||||
Content: toObjsResp(objs, reqPath, isEncrypt(meta, reqPath)),
|
||||
Total: int64(total),
|
||||
Readme: getReadme(meta, reqPath),
|
||||
Header: getHeader(meta, reqPath),
|
||||
Write: user.CanWrite() || common.CanWrite(meta, reqPath),
|
||||
Provider: provider,
|
||||
DirectUploadTools: directUploadTools,
|
||||
Content: toObjsResp(objs, reqPath, isEncrypt(meta, reqPath)),
|
||||
Total: int64(total),
|
||||
Readme: getReadme(meta, reqPath),
|
||||
Header: getHeader(meta, reqPath),
|
||||
Write: user.CanWrite() || common.CanWrite(meta, reqPath),
|
||||
Provider: provider,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
245
server/handles/plugin.go
Normal file
245
server/handles/plugin.go
Normal file
@@ -0,0 +1,245 @@
|
||||
package handles
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/plugin"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
"github.com/gin-gonic/gin"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// InstallPluginReq 定义了安装插件 API 的请求体结构
|
||||
type InstallPluginReq struct {
|
||||
// Source 是插件的来源地址,可以是:
|
||||
// 1. GitHub 仓库 URL (e.g., "https://github.com/user/repo")
|
||||
// 2. Zip 压缩包 URL (e.g., "https://example.com/plugin.zip")
|
||||
// 3. 本地 manifest 文件路径 (e.g., "/path/to/plugin.json")
|
||||
Source string `json:"source" binding:"required"`
|
||||
}
|
||||
|
||||
// PluginIDReq 定义了需要插件 ID 的通用请求体结构
|
||||
type PluginIDReq struct {
|
||||
ID string `json:"id" binding:"required"`
|
||||
}
|
||||
|
||||
// --- API 处理器 ---
|
||||
|
||||
// ListPlugins godoc
|
||||
// @Summary List all installed plugins
|
||||
// @Description Get a list of all plugins that are currently installed.
|
||||
// @Tags plugin
|
||||
// @Produce json
|
||||
// @Success 200 {object} common.Resp{data=[]model.Plugin} "A list of installed plugins"
|
||||
// @Failure 500 {object} common.Resp "Internal server error"
|
||||
// @Router /api/plugin/list [get]
|
||||
func ListPlugins(c *gin.Context) {
|
||||
// 直接从数据库获取最新的插件列表,确保状态是最新的
|
||||
plugins, err := db.GetAllPlugins(c.Request.Context())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get all plugins from database: %v", err)
|
||||
common.ErrorResp(c, err, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
common.SuccessResp(c, plugins)
|
||||
}
|
||||
|
||||
// InstallPlugin godoc
|
||||
// @Summary Install a new plugin
|
||||
// @Description Install a plugin from a source URL (GitHub, Zip) or a local path.
|
||||
// @Tags plugin
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param req body InstallPluginReq true "Plugin source"
|
||||
// @Success 200 {object} common.Resp{data=model.Plugin} "Plugin installed successfully"
|
||||
// @Failure 400 {object} common.Resp "Bad request"
|
||||
// @Failure 500 {object} common.Resp "Internal server error"
|
||||
// @Router /api/plugin/install [post]
|
||||
func InstallPlugin(c *gin.Context) {
|
||||
var req InstallPluginReq
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
common.ErrorResp(c, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Attempting to install plugin from source: %s", req.Source)
|
||||
|
||||
pluginInfo, err := plugin.PluginManager.Install(c.Request.Context(), req.Source)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to install plugin from source '%s': %v", req.Source, err)
|
||||
common.ErrorResp(c, err, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Successfully installed plugin: %s (v%s)", pluginInfo.Name, pluginInfo.Version)
|
||||
common.SuccessResp(c, pluginInfo.Plugin)
|
||||
}
|
||||
|
||||
// InstallPluginFromUpload godoc
|
||||
// @Summary Install a plugin from an uploaded zip file
|
||||
// @Description Upload a .zip file containing plugin.json and a .wasm file to install a new plugin.
|
||||
// @Tags plugin
|
||||
// @Accept multipart/form-data
|
||||
// @Produce json
|
||||
// @Param file formData file true "The plugin zip file to upload"
|
||||
// @Success 200 {object} common.Resp{data=model.Plugin} "Plugin installed successfully"
|
||||
// @Failure 400 {object} common.Resp "Bad request (e.g., no file uploaded)"
|
||||
// @Failure 500 {object} common.Resp "Internal server error"
|
||||
// @Router /api/plugin/upload [post]
|
||||
func InstallPluginFromUpload(c *gin.Context) {
|
||||
// "file" 必须是前端上传文件时使用的表单字段名 (form field name)
|
||||
file, err := c.FormFile("file")
|
||||
if err != nil {
|
||||
common.ErrorResp(c, fmt.Errorf("failed to get 'file' from form: %w", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Attempting to install plugin from uploaded file: %s", file.Filename)
|
||||
|
||||
// 打开上传的文件以获取 io.Reader
|
||||
f, err := file.Open()
|
||||
if err != nil {
|
||||
common.ErrorResp(c, fmt.Errorf("failed to open uploaded file: %w", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// 调用管理器的 InstallFromUpload 方法
|
||||
pluginInfo, err := plugin.PluginManager.InstallFromUpload(c.Request.Context(), f, file.Filename)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to install plugin from uploaded file '%s': %v", file.Filename, err)
|
||||
common.ErrorResp(c, err, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Successfully installed plugin from upload: %s (v%s)", pluginInfo.Name, pluginInfo.Version)
|
||||
common.SuccessResp(c, pluginInfo.Plugin)
|
||||
}
|
||||
|
||||
// UninstallPlugin godoc
|
||||
// @Summary Uninstall a plugin
|
||||
// @Description Uninstall a plugin by its ID.
|
||||
// @Tags plugin
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param req body PluginIDReq true "Plugin ID to uninstall"
|
||||
// @Success 200 {object} common.Resp "Plugin uninstalled successfully"
|
||||
// @Failure 400 {object} common.Resp "Bad request"
|
||||
// @Failure 500 {object} common.Resp "Internal server error"
|
||||
// @Router /api/plugin/uninstall [post]
|
||||
func UninstallPlugin(c *gin.Context) {
|
||||
var req PluginIDReq
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
common.ErrorResp(c, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Attempting to uninstall plugin with ID: %s", req.ID)
|
||||
|
||||
if err := plugin.PluginManager.Uninstall(c.Request.Context(), req.ID); err != nil {
|
||||
log.Errorf("Failed to uninstall plugin '%s': %v", req.ID, err)
|
||||
common.ErrorResp(c, err, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Successfully uninstalled plugin: %s", req.ID)
|
||||
common.SuccessResp(c, "Plugin uninstalled successfully")
|
||||
}
|
||||
|
||||
// CheckForUpdates godoc
|
||||
// @Summary Check for plugin updates
|
||||
// @Description Checks all installed plugins from GitHub for available updates.
|
||||
// @Tags plugin
|
||||
// @Produce json
|
||||
// @Success 200 {object} common.Resp{data=map[string]string} "A map of plugins with available updates (id: new_version)"
|
||||
// @Failure 500 {object} common.Resp "Internal server error"
|
||||
// @Router /api/plugin/updates/check [get]
|
||||
func CheckForUpdates(c *gin.Context) {
|
||||
log.Info("Checking for plugin updates...")
|
||||
|
||||
updates, err := plugin.PluginManager.CheckForUpdates(c.Request.Context())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to check for plugin updates: %v", err)
|
||||
common.ErrorResp(c, err, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Found %d available plugin updates.", len(updates))
|
||||
common.SuccessResp(c, updates)
|
||||
}
|
||||
|
||||
// UpdatePlugin godoc
|
||||
// @Summary Update a plugin
|
||||
// @Description Update a specific plugin to its latest version. The plugin must have been installed from GitHub.
|
||||
// @Tags plugin
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param req body PluginIDReq true "Plugin ID to update"
|
||||
// @Success 200 {object} common.Resp{data=model.Plugin} "Plugin updated successfully"
|
||||
// @Failure 400 {object} common.Resp "Bad request"
|
||||
// @Failure 500 {object} common.Resp "Internal server error"
|
||||
// @Router /api/plugin/update [post]
|
||||
func UpdatePlugin(c *gin.Context) {
|
||||
var req PluginIDReq
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
common.ErrorResp(c, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Attempting to update plugin with ID: %s", req.ID)
|
||||
|
||||
updatedPluginInfo, err := plugin.PluginManager.Update(c.Request.Context(), req.ID)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to update plugin '%s': %v", req.ID, err)
|
||||
common.ErrorResp(c, err, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Successfully updated plugin: %s", req.ID)
|
||||
common.SuccessResp(c, updatedPluginInfo.Plugin)
|
||||
}
|
||||
|
||||
// internal/server/handles/plugin.go
|
||||
|
||||
// CheckForUpdateSingle godoc
|
||||
// @Summary Check for a single plugin update
|
||||
// @Description Checks a specific plugin for an available update.
|
||||
// @Tags plugin
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param req body PluginIDReq true "Plugin ID to check"
|
||||
// @Success 200 {object} common.Resp{data=map[string]string} "A map containing the new version if an update is available (e.g., {\"new_version\": \"1.1.0\"})"
|
||||
// @Failure 400 {object} common.Resp "Bad request"
|
||||
// @Failure 404 {object} common.Resp "Plugin not found or not eligible for update"
|
||||
// @Failure 500 {object} common.Resp "Internal server error"
|
||||
// @Router /api/plugin/updates/check_one [post]
|
||||
func CheckForUpdateSingle(c *gin.Context) {
|
||||
var req PluginIDReq
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
common.ErrorResp(c, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Checking for update for plugin: %s", req.ID)
|
||||
|
||||
newVersion, err := plugin.PluginManager.CheckForUpdate(c.Request.Context(), req.ID)
|
||||
if err != nil {
|
||||
// 区分是插件找不到还是检查过程出错
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
common.ErrorResp(c, err, http.StatusNotFound)
|
||||
} else {
|
||||
common.ErrorResp(c, err, http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
response := make(map[string]string)
|
||||
if newVersion != "" {
|
||||
response["new_version"] = newVersion
|
||||
}
|
||||
|
||||
common.SuccessResp(c, response)
|
||||
}
|
||||
@@ -408,7 +408,7 @@ func ListSharings(c *gin.Context) {
|
||||
})
|
||||
}
|
||||
|
||||
type UpdateSharingReq struct {
|
||||
type CreateSharingReq struct {
|
||||
Files []string `json:"files"`
|
||||
Expires *time.Time `json:"expires"`
|
||||
Pwd string `json:"pwd"`
|
||||
@@ -418,9 +418,12 @@ type UpdateSharingReq struct {
|
||||
Readme string `json:"readme"`
|
||||
Header string `json:"header"`
|
||||
model.Sort
|
||||
CreatorName string `json:"creator"`
|
||||
Accessed int `json:"accessed"`
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
type UpdateSharingReq struct {
|
||||
ID string `json:"id"`
|
||||
Accessed int `json:"accessed"`
|
||||
CreateSharingReq
|
||||
}
|
||||
|
||||
func UpdateSharing(c *gin.Context) {
|
||||
@@ -433,38 +436,24 @@ func UpdateSharing(c *gin.Context) {
|
||||
common.ErrorStrResp(c, "must add at least 1 object", 400)
|
||||
return
|
||||
}
|
||||
var user *model.User
|
||||
var err error
|
||||
reqUser := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if reqUser.IsAdmin() && req.CreatorName != "" {
|
||||
user, err = op.GetUserByName(req.CreatorName)
|
||||
if err != nil {
|
||||
common.ErrorStrResp(c, "no such a user", 400)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
user = reqUser
|
||||
if !user.CanShare() {
|
||||
common.ErrorStrResp(c, "permission denied", 403)
|
||||
return
|
||||
}
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if !user.CanShare() {
|
||||
common.ErrorStrResp(c, "permission denied", 403)
|
||||
return
|
||||
}
|
||||
for i, s := range req.Files {
|
||||
s = utils.FixAndCleanPath(s)
|
||||
req.Files[i] = s
|
||||
if !reqUser.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
||||
if !user.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
||||
common.ErrorStrResp(c, fmt.Sprintf("permission denied to share path [%s]", s), 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
s, err := op.GetSharingById(req.ID)
|
||||
if err != nil || (!reqUser.IsAdmin() && s.CreatorId != user.ID) {
|
||||
if err != nil || (!user.IsAdmin() && s.CreatorId != user.ID) {
|
||||
common.ErrorStrResp(c, "sharing not found", 404)
|
||||
return
|
||||
}
|
||||
if reqUser.IsAdmin() && req.CreatorName == "" {
|
||||
user = s.Creator
|
||||
}
|
||||
s.Files = req.Files
|
||||
s.Expires = req.Expires
|
||||
s.Pwd = req.Pwd
|
||||
@@ -475,7 +464,6 @@ func UpdateSharing(c *gin.Context) {
|
||||
s.Header = req.Header
|
||||
s.Readme = req.Readme
|
||||
s.Remark = req.Remark
|
||||
s.Creator = user
|
||||
if err = op.UpdateSharing(s); err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
} else {
|
||||
@@ -488,7 +476,7 @@ func UpdateSharing(c *gin.Context) {
|
||||
}
|
||||
|
||||
func CreateSharing(c *gin.Context) {
|
||||
var req UpdateSharingReq
|
||||
var req CreateSharingReq
|
||||
var err error
|
||||
if err = c.ShouldBind(&req); err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
@@ -498,35 +486,24 @@ func CreateSharing(c *gin.Context) {
|
||||
common.ErrorStrResp(c, "must add at least 1 object", 400)
|
||||
return
|
||||
}
|
||||
var user *model.User
|
||||
reqUser := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if reqUser.IsAdmin() && req.CreatorName != "" {
|
||||
user, err = op.GetUserByName(req.CreatorName)
|
||||
if err != nil {
|
||||
common.ErrorStrResp(c, "no such a user", 400)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
user = reqUser
|
||||
if !user.CanShare() || (!user.IsAdmin() && req.ID != "") {
|
||||
common.ErrorStrResp(c, "permission denied", 403)
|
||||
return
|
||||
}
|
||||
user := c.Request.Context().Value(conf.UserKey).(*model.User)
|
||||
if !user.CanShare() {
|
||||
common.ErrorStrResp(c, "permission denied", 403)
|
||||
return
|
||||
}
|
||||
for i, s := range req.Files {
|
||||
s = utils.FixAndCleanPath(s)
|
||||
req.Files[i] = s
|
||||
if !reqUser.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
||||
if !user.IsAdmin() && !strings.HasPrefix(s, user.BasePath) {
|
||||
common.ErrorStrResp(c, fmt.Sprintf("permission denied to share path [%s]", s), 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
s := &model.Sharing{
|
||||
SharingDB: &model.SharingDB{
|
||||
ID: req.ID,
|
||||
Expires: req.Expires,
|
||||
Pwd: req.Pwd,
|
||||
Accessed: req.Accessed,
|
||||
Accessed: 0,
|
||||
MaxAccessed: req.MaxAccessed,
|
||||
Disabled: req.Disabled,
|
||||
Sort: req.Sort,
|
||||
|
||||
@@ -166,6 +166,23 @@ func admin(g *gin.RouterGroup) {
|
||||
setting.POST("/set_thunderx", handles.SetThunderX)
|
||||
setting.POST("/set_thunder_browser", handles.SetThunderBrowser)
|
||||
|
||||
// 添加插件管理 API 路由组
|
||||
plugin := g.Group("/plugin")
|
||||
{
|
||||
plugin.GET("/list", handles.ListPlugins)
|
||||
plugin.POST("/install", handles.InstallPlugin)
|
||||
plugin.POST("/upload", handles.InstallPluginFromUpload)
|
||||
plugin.POST("/uninstall", handles.UninstallPlugin)
|
||||
plugin.POST("/update", handles.UpdatePlugin)
|
||||
|
||||
// 将检查更新的路由放在一个子组中,更符合 RESTful 风格
|
||||
updates := plugin.Group("/updates")
|
||||
{
|
||||
updates.GET("/check", handles.CheckForUpdates)
|
||||
updates.POST("/check_one", handles.CheckForUpdateSingle)
|
||||
}
|
||||
}
|
||||
|
||||
// retain /admin/task API to ensure compatibility with legacy automation scripts
|
||||
_task(g.Group("/task"))
|
||||
|
||||
@@ -211,8 +228,6 @@ func _fs(g *gin.RouterGroup) {
|
||||
// g.POST("/add_transmission", handles.SetTransmission)
|
||||
g.POST("/add_offline_download", handles.AddOfflineDownload)
|
||||
g.POST("/archive/decompress", handles.FsArchiveDecompress)
|
||||
// Direct upload (client-side upload to storage)
|
||||
g.POST("/get_direct_upload_info", middlewares.FsUp, handles.FsGetDirectUploadInfo)
|
||||
}
|
||||
|
||||
func _task(g *gin.RouterGroup) {
|
||||
|
||||
Reference in New Issue
Block a user