mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-11-25 11:29:29 +08:00
Compare commits
27 Commits
renovate/g
...
plugin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6417f71527 | ||
|
|
ae93fb0479 | ||
|
|
ce3f8e36c1 | ||
|
|
33f1fbc9fb | ||
|
|
fbc4d6d3f8 | ||
|
|
834248b9e4 | ||
|
|
9235c7dff1 | ||
|
|
7b377b1d54 | ||
|
|
d312db3db1 | ||
|
|
7e1358e686 | ||
|
|
62e381a764 | ||
|
|
8f18e34da0 | ||
|
|
525f26dc23 | ||
|
|
a0fcfa3ed2 | ||
|
|
15f276537c | ||
|
|
623a12050e | ||
|
|
ae2d2d1021 | ||
|
|
a109152a13 | ||
|
|
febbcd6027 | ||
|
|
549e60136b | ||
|
|
14d2b8290a | ||
|
|
bbc328d589 | ||
|
|
5780db293a | ||
|
|
cdc069d8e7 | ||
|
|
fb5094f688 | ||
|
|
670e0bdc45 | ||
|
|
89235012af |
@@ -64,8 +64,9 @@ Thank you for your support and understanding of the OpenList project.
|
||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([China](https://www.teambition.com), [International](https://us.teambition.com))
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [139yun](https://yun.139.com) (Personal, Family, Group)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
||||
|
||||
@@ -64,8 +64,9 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
|
||||
- [x] [又拍云对象存储](https://www.upyun.com/products/file-storage)
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([中国](https://www.teambition.com), [国际](https://us.teambition.com))
|
||||
- [x] [分秒帧](https://www.mediatrack.cn)
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [分秒帧](https://www.mediatrack.cn)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [和彩云](https://yun.139.com)(个人、家庭、群组)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
- [x] [百度网盘](http://pan.baidu.com)
|
||||
|
||||
@@ -65,6 +65,7 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([中国](https://www.teambition.com), [国際](https://us.teambition.com))
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [139yun](https://yun.139.com)(個人、家族、グループ)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
||||
|
||||
@@ -66,6 +66,7 @@ Dank u voor uw ondersteuning en begrip
|
||||
- [x] Teambition([China](https://www.teambition.com), [Internationaal](https://us.teambition.com))
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [139yun](https://yun.139.com) (Persoonlijk, Familie, Groep)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
||||
|
||||
@@ -17,6 +17,7 @@ func Init() {
|
||||
bootstrap.Log()
|
||||
bootstrap.InitDB()
|
||||
data.InitData()
|
||||
bootstrap.InitPlugins()
|
||||
bootstrap.InitStreamLimit()
|
||||
bootstrap.InitIndex()
|
||||
bootstrap.InitUpgradePatch()
|
||||
|
||||
@@ -2,6 +2,7 @@ package flags
|
||||
|
||||
var (
|
||||
DataDir string
|
||||
ConfigPath string
|
||||
Debug bool
|
||||
NoPrefix bool
|
||||
Dev bool
|
||||
|
||||
@@ -27,7 +27,8 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
|
||||
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data directory (relative paths are resolved against the current working directory)")
|
||||
RootCmd.PersistentFlags().StringVar(&flags.ConfigPath, "config", "", "path to config.json (relative to current working directory; defaults to [data directory]/config.json, where [data directory] is set by --data)")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")
|
||||
|
||||
@@ -27,6 +27,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
|
||||
"github.com/quic-go/quic-go/http3"
|
||||
)
|
||||
|
||||
// ServerCmd represents the server command
|
||||
@@ -63,6 +65,7 @@ the address is defined in config file`,
|
||||
httpHandler = h2c.NewHandler(r, &http2.Server{})
|
||||
}
|
||||
var httpSrv, httpsSrv, unixSrv *http.Server
|
||||
var quicSrv *http3.Server
|
||||
if conf.Conf.Scheme.HttpPort != -1 {
|
||||
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
||||
fmt.Printf("start HTTP server @ %s\n", httpBase)
|
||||
@@ -86,6 +89,24 @@ the address is defined in config file`,
|
||||
utils.Log.Fatalf("failed to start https: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
if conf.Conf.Scheme.EnableH3 {
|
||||
fmt.Printf("start HTTP3 (quic) server @ %s\n", httpsBase)
|
||||
utils.Log.Infof("start HTTP3 (quic) server @ %s", httpsBase)
|
||||
r.Use(func(c *gin.Context) {
|
||||
if c.Request.TLS != nil {
|
||||
port := conf.Conf.Scheme.HttpsPort
|
||||
c.Header("Alt-Svc", fmt.Sprintf("h3=\":%d\"; ma=86400", port))
|
||||
}
|
||||
c.Next()
|
||||
})
|
||||
quicSrv = &http3.Server{Addr: httpsBase, Handler: r}
|
||||
go func() {
|
||||
err := quicSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start http3 (quic): %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
|
||||
@@ -203,6 +224,15 @@ the address is defined in config file`,
|
||||
utils.Log.Fatal("HTTPS server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
if conf.Conf.Scheme.EnableH3 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := quicSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("HTTP3 (quic) server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
wg.Add(1)
|
||||
|
||||
@@ -15,10 +15,9 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "115 Cloud",
|
||||
DefaultRoot: "0",
|
||||
// OnlyProxy: true,
|
||||
// NoOverwriteUpload: true,
|
||||
Name: "115 Cloud",
|
||||
DefaultRoot: "0",
|
||||
LinkCacheMode: driver.LinkCacheUA,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -131,23 +131,6 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := d.client.GetFolderInfoByPath(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Obj{
|
||||
Fid: resp.FileID,
|
||||
Fn: resp.FileName,
|
||||
Fc: resp.FileCategory,
|
||||
Sha1: resp.Sha1,
|
||||
Pc: resp.PickCode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -17,8 +17,9 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "115 Open",
|
||||
DefaultRoot: "0",
|
||||
Name: "115 Open",
|
||||
DefaultRoot: "0",
|
||||
LinkCacheMode: driver.LinkCacheUA,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -130,7 +130,7 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
path := dir.GetPath()
|
||||
if utils.PathEqual(path, "/") && !d.autoFlatten {
|
||||
return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough), nil
|
||||
return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough, args.Refresh), nil
|
||||
}
|
||||
root, sub := d.getRootAndPath(path)
|
||||
dsts, ok := d.pathMap[root]
|
||||
@@ -211,9 +211,6 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
if resultLink.ContentLength == 0 {
|
||||
resultLink.ContentLength = fi.GetSize()
|
||||
}
|
||||
if resultLink.MFile != nil {
|
||||
return &resultLink, nil
|
||||
}
|
||||
if d.DownloadConcurrency > 0 {
|
||||
resultLink.Concurrency = d.DownloadConcurrency
|
||||
}
|
||||
@@ -527,4 +524,25 @@ func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Alias) ResolveLinkCacheMode(path string) driver.LinkCacheMode {
|
||||
root, sub := d.getRootAndPath(path)
|
||||
dsts, ok := d.pathMap[root]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(stdpath.Join(dst, sub))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
mode := storage.Config().LinkCacheMode
|
||||
if mode == -1 {
|
||||
return storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(actualPath)
|
||||
} else {
|
||||
return mode
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Alias)(nil)
|
||||
|
||||
@@ -26,6 +26,7 @@ var config = driver.Config{
|
||||
NoUpload: false,
|
||||
DefaultRoot: "/",
|
||||
ProxyRangeOption: true,
|
||||
LinkCacheMode: driver.LinkCacheAuto,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (d *Alias) listRoot(ctx context.Context, withDetails bool) []model.Obj {
|
||||
func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model.Obj {
|
||||
var objs []model.Obj
|
||||
var wg sync.WaitGroup
|
||||
for _, k := range d.rootOrder {
|
||||
@@ -52,7 +52,7 @@ func (d *Alias) listRoot(ctx context.Context, withDetails bool) []model.Obj {
|
||||
defer wg.Done()
|
||||
c, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
details, e := op.GetStorageDetails(c, remoteDriver)
|
||||
details, e := op.GetStorageDetails(c, remoteDriver, refresh)
|
||||
if e != nil {
|
||||
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
|
||||
log.Errorf("failed get %s storage details: %+v", remoteDriver.GetStorage().MountPath, e)
|
||||
|
||||
@@ -299,10 +299,7 @@ func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails
|
||||
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
|
||||
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: total - used,
|
||||
},
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_drive"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_photo"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud_open"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/ilanzou"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/ipfs_api"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/kodbox"
|
||||
@@ -55,6 +56,7 @@ import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/proton_drive"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc_tv"
|
||||
|
||||
@@ -18,8 +18,9 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "BaiduPhoto",
|
||||
LocalSort: true,
|
||||
Name: "BaiduPhoto",
|
||||
LocalSort: true,
|
||||
LinkCacheMode: driver.LinkCacheUA,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -239,7 +240,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = writer.WriteField("puid", fmt.Sprintf("%d", resp.Msg.Puid))
|
||||
err = writer.WriteField("puid", strconv.Itoa(resp.Msg.Puid))
|
||||
if err != nil {
|
||||
fmt.Println("Error writing param2 to request body:", err)
|
||||
return err
|
||||
@@ -260,7 +261,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
|
||||
req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
|
||||
resps, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -258,7 +258,7 @@ type UploadDoneParam struct {
|
||||
func fileToObj(f File) *model.Object {
|
||||
if len(f.Content.FolderName) > 0 {
|
||||
return &model.Object{
|
||||
ID: fmt.Sprintf("%d", f.ID),
|
||||
ID: strconv.Itoa(f.ID),
|
||||
Name: f.Content.FolderName,
|
||||
Size: 0,
|
||||
Modified: time.UnixMilli(f.Inserttime),
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
@@ -172,7 +173,7 @@ func (d *ChaoXing) Login() (string, error) {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
|
||||
req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -317,7 +317,8 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
}
|
||||
return readSeeker, nil
|
||||
}),
|
||||
SyncClosers: utils.NewSyncClosers(remoteLink),
|
||||
SyncClosers: utils.NewSyncClosers(remoteLink),
|
||||
RequireReference: remoteLink.RequireReference,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -486,7 +486,7 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.
|
||||
"Authorization": {storeInfo.Auth},
|
||||
"Content-Type": {"application/octet-stream"},
|
||||
"Content-Crc32": {crc32Value},
|
||||
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
|
||||
"Content-Length": {strconv.FormatInt(file.GetSize(), 10)},
|
||||
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
|
||||
}
|
||||
res, err := base.HttpClient.Do(req)
|
||||
@@ -612,7 +612,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
|
||||
"Authorization": {storeInfo.Auth},
|
||||
"Content-Type": {"application/octet-stream"},
|
||||
"Content-Crc32": {crc32Value},
|
||||
"Content-Length": {fmt.Sprintf("%d", size)},
|
||||
"Content-Length": {strconv.FormatInt(size, 10)},
|
||||
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
|
||||
}
|
||||
res, err := base.HttpClient.Do(req)
|
||||
|
||||
@@ -16,9 +16,10 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "FebBox",
|
||||
NoUpload: true,
|
||||
DefaultRoot: "0",
|
||||
Name: "FebBox",
|
||||
NoUpload: true,
|
||||
DefaultRoot: "0",
|
||||
LinkCacheMode: driver.LinkCacheIP,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -31,11 +31,11 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "FTP",
|
||||
LocalSort: true,
|
||||
OnlyLinkMFile: false,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
Name: "FTP",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
111
drivers/halalcloud_open/common.go
Normal file
111
drivers/halalcloud_open/common.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
|
||||
)
|
||||
|
||||
var (
|
||||
slicePostErrorRetryInterval = time.Second * 120
|
||||
retryTimes = 5
|
||||
)
|
||||
|
||||
type halalCommon struct {
|
||||
// *AuthService // 登录信息
|
||||
UserInfo *sdkUser.User // 用户信息
|
||||
refreshTokenFunc func(token string) error
|
||||
// serv *AuthService
|
||||
configs sync.Map
|
||||
}
|
||||
|
||||
func (m *halalCommon) GetAccessToken() (string, error) {
|
||||
value, exists := m.configs.Load("access_token")
|
||||
if !exists {
|
||||
return "", nil // 如果不存在,返回空字符串
|
||||
}
|
||||
return value.(string), nil // 返回配置项的值
|
||||
}
|
||||
|
||||
// GetRefreshToken implements ConfigStore.
|
||||
func (m *halalCommon) GetRefreshToken() (string, error) {
|
||||
value, exists := m.configs.Load("refresh_token")
|
||||
if !exists {
|
||||
return "", nil // 如果不存在,返回空字符串
|
||||
}
|
||||
return value.(string), nil // 返回配置项的值
|
||||
}
|
||||
|
||||
// SetAccessToken implements ConfigStore.
|
||||
func (m *halalCommon) SetAccessToken(token string) error {
|
||||
m.configs.Store("access_token", token)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRefreshToken implements ConfigStore.
|
||||
func (m *halalCommon) SetRefreshToken(token string) error {
|
||||
m.configs.Store("refresh_token", token)
|
||||
if m.refreshTokenFunc != nil {
|
||||
return m.refreshTokenFunc(token)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetToken implements ConfigStore.
|
||||
func (m *halalCommon) SetToken(accessToken string, refreshToken string, expiresIn int64) error {
|
||||
m.configs.Store("access_token", accessToken)
|
||||
m.configs.Store("refresh_token", refreshToken)
|
||||
m.configs.Store("expires_in", expiresIn)
|
||||
if m.refreshTokenFunc != nil {
|
||||
return m.refreshTokenFunc(refreshToken)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearConfigs implements ConfigStore.
|
||||
func (m *halalCommon) ClearConfigs() error {
|
||||
m.configs = sync.Map{} // 清空map
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteConfig implements ConfigStore.
|
||||
func (m *halalCommon) DeleteConfig(key string) error {
|
||||
_, exists := m.configs.Load(key)
|
||||
if !exists {
|
||||
return nil // 如果不存在,直接返回
|
||||
}
|
||||
m.configs.Delete(key) // 删除指定的配置项
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConfig implements ConfigStore.
|
||||
func (m *halalCommon) GetConfig(key string) (string, error) {
|
||||
value, exists := m.configs.Load(key)
|
||||
if !exists {
|
||||
return "", nil // 如果不存在,返回空字符串
|
||||
}
|
||||
return value.(string), nil // 返回配置项的值
|
||||
}
|
||||
|
||||
// ListConfigs implements ConfigStore.
|
||||
func (m *halalCommon) ListConfigs() (map[string]string, error) {
|
||||
configs := make(map[string]string)
|
||||
m.configs.Range(func(key, value interface{}) bool {
|
||||
configs[key.(string)] = value.(string) // 将每个配置项添加到map中
|
||||
return true // 继续遍历
|
||||
})
|
||||
return configs, nil // 返回所有配置项
|
||||
}
|
||||
|
||||
// SetConfig implements ConfigStore.
|
||||
func (m *halalCommon) SetConfig(key string, value string) error {
|
||||
m.configs.Store(key, value) // 使用Store方法设置或更新配置项
|
||||
return nil // 成功设置配置项后返回nil
|
||||
}
|
||||
|
||||
func NewHalalCommon() *halalCommon {
|
||||
return &halalCommon{
|
||||
configs: sync.Map{},
|
||||
}
|
||||
}
|
||||
29
drivers/halalcloud_open/driver.go
Normal file
29
drivers/halalcloud_open/driver.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
sdkClient "github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
|
||||
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
)
|
||||
|
||||
type HalalCloudOpen struct {
|
||||
*halalCommon
|
||||
model.Storage
|
||||
Addition
|
||||
sdkClient *sdkClient.Client
|
||||
sdkUserFileService *sdkUserFile.UserFileService
|
||||
sdkUserService *sdkUser.UserService
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*HalalCloudOpen)(nil)
|
||||
131
drivers/halalcloud_open/driver_curd_impl.go
Normal file
131
drivers/halalcloud_open/driver_curd_impl.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
sdkModel "github.com/halalcloud/golang-sdk-lite/halalcloud/model"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) getFiles(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
|
||||
|
||||
files := make([]model.Obj, 0)
|
||||
limit := int64(100)
|
||||
token := ""
|
||||
|
||||
for {
|
||||
result, err := d.sdkUserFileService.List(ctx, &sdkUserFile.FileListRequest{
|
||||
Parent: &sdkUserFile.File{Path: dir.GetPath()},
|
||||
ListInfo: &sdkModel.ScanListRequest{
|
||||
Limit: strconv.FormatInt(limit, 10),
|
||||
Token: token,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; len(result.Files) > i; i++ {
|
||||
files = append(files, NewObjFile(result.Files[i]))
|
||||
}
|
||||
|
||||
if result.ListInfo == nil || result.ListInfo.Token == "" {
|
||||
break
|
||||
}
|
||||
token = result.ListInfo.Token
|
||||
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) makeDir(ctx context.Context, dir model.Obj, name string) (model.Obj, error) {
|
||||
_, err := d.sdkUserFileService.Create(ctx, &sdkUserFile.File{
|
||||
Path: dir.GetPath(),
|
||||
Name: name,
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) move(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
|
||||
oldDir := obj.GetPath()
|
||||
newDir := dir.GetPath()
|
||||
_, err := d.sdkUserFileService.Move(ctx, &sdkUserFile.BatchOperationRequest{
|
||||
Source: []*sdkUserFile.File{
|
||||
{
|
||||
Path: oldDir,
|
||||
},
|
||||
},
|
||||
Dest: &sdkUserFile.File{
|
||||
Path: newDir,
|
||||
},
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) rename(ctx context.Context, obj model.Obj, name string) (model.Obj, error) {
|
||||
|
||||
_, err := d.sdkUserFileService.Rename(ctx, &sdkUserFile.File{
|
||||
Path: obj.GetPath(),
|
||||
Name: name,
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) copy(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
|
||||
id := obj.GetID()
|
||||
sourcePath := obj.GetPath()
|
||||
if len(id) > 0 {
|
||||
sourcePath = ""
|
||||
}
|
||||
|
||||
destID := dir.GetID()
|
||||
destPath := dir.GetPath()
|
||||
if len(destID) > 0 {
|
||||
destPath = ""
|
||||
}
|
||||
dest := &sdkUserFile.File{
|
||||
Path: destPath,
|
||||
Identity: destID,
|
||||
}
|
||||
_, err := d.sdkUserFileService.Copy(ctx, &sdkUserFile.BatchOperationRequest{
|
||||
Source: []*sdkUserFile.File{
|
||||
{
|
||||
Path: sourcePath,
|
||||
Identity: id,
|
||||
},
|
||||
},
|
||||
Dest: dest,
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) remove(ctx context.Context, obj model.Obj) error {
|
||||
id := obj.GetID()
|
||||
_, err := d.sdkUserFileService.Delete(ctx, &sdkUserFile.BatchOperationRequest{
|
||||
Source: []*sdkUserFile.File{
|
||||
{
|
||||
Identity: id,
|
||||
Path: obj.GetPath(),
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) details(ctx context.Context) (*model.StorageDetails, error) {
|
||||
ret, err := d.sdkUserService.GetStatisticsAndQuota(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := uint64(ret.DiskStatisticsQuota.BytesQuota)
|
||||
|
||||
free := uint64(ret.DiskStatisticsQuota.BytesFree)
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
108
drivers/halalcloud_open/driver_get_link.go
Normal file
108
drivers/halalcloud_open/driver_get_link.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) getLink(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if args.Redirect {
|
||||
// return nil, model.ErrUnsupported
|
||||
fid := file.GetID()
|
||||
fpath := file.GetPath()
|
||||
if fid != "" {
|
||||
fpath = ""
|
||||
}
|
||||
fi, err := d.sdkUserFileService.GetDirectDownloadAddress(ctx, &sdkUserFile.DirectDownloadRequest{
|
||||
Identity: fid,
|
||||
Path: fpath,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expireAt := fi.ExpireAt
|
||||
duration := time.Until(time.UnixMilli(expireAt))
|
||||
return &model.Link{
|
||||
URL: fi.DownloadAddress,
|
||||
Expiration: &duration,
|
||||
}, nil
|
||||
}
|
||||
result, err := d.sdkUserFileService.ParseFileSlice(ctx, &sdkUserFile.File{
|
||||
Identity: file.GetID(),
|
||||
Path: file.GetPath(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileAddrs := []*sdkUserFile.SliceDownloadInfo{}
|
||||
var addressDuration int64
|
||||
|
||||
nodesNumber := len(result.RawNodes)
|
||||
nodesIndex := nodesNumber - 1
|
||||
startIndex, endIndex := 0, nodesIndex
|
||||
for nodesIndex >= 0 {
|
||||
if nodesIndex >= 200 {
|
||||
endIndex = 200
|
||||
} else {
|
||||
endIndex = nodesNumber
|
||||
}
|
||||
for ; endIndex <= nodesNumber; endIndex += 200 {
|
||||
if endIndex == 0 {
|
||||
endIndex = 1
|
||||
}
|
||||
sliceAddress, err := d.sdkUserFileService.GetSliceDownloadAddress(ctx, &sdkUserFile.SliceDownloadAddressRequest{
|
||||
Identity: result.RawNodes[startIndex:endIndex],
|
||||
Version: 1,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addressDuration, _ = strconv.ParseInt(sliceAddress.ExpireAt, 10, 64)
|
||||
fileAddrs = append(fileAddrs, sliceAddress.Addresses...)
|
||||
startIndex = endIndex
|
||||
nodesIndex -= 200
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
size, _ := strconv.ParseInt(result.FileSize, 10, 64)
|
||||
chunks := getChunkSizes(result.Sizes)
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length >= size {
|
||||
length = size - httpRange.Start
|
||||
}
|
||||
oo := &openObject{
|
||||
ctx: ctx,
|
||||
d: fileAddrs,
|
||||
chunk: []byte{},
|
||||
chunks: chunks,
|
||||
skip: httpRange.Start,
|
||||
sha: result.Sha1,
|
||||
shaTemp: sha1.New(),
|
||||
}
|
||||
|
||||
return readers.NewLimitedReadCloser(oo, length), nil
|
||||
}
|
||||
|
||||
var duration time.Duration
|
||||
if addressDuration != 0 {
|
||||
duration = time.Until(time.UnixMilli(addressDuration))
|
||||
} else {
|
||||
duration = time.Until(time.Now().Add(time.Hour))
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
|
||||
Expiration: &duration,
|
||||
}, nil
|
||||
}
|
||||
50
drivers/halalcloud_open/driver_init.go
Normal file
50
drivers/halalcloud_open/driver_init.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
|
||||
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) Init(ctx context.Context) error {
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, 3
|
||||
}
|
||||
if d.halalCommon == nil {
|
||||
d.halalCommon = &halalCommon{
|
||||
UserInfo: &sdkUser.User{},
|
||||
refreshTokenFunc: func(token string) error {
|
||||
d.Addition.RefreshToken = token
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
if d.Addition.RefreshToken != "" {
|
||||
d.halalCommon.SetRefreshToken(d.Addition.RefreshToken)
|
||||
}
|
||||
timeout := d.Addition.TimeOut
|
||||
if timeout <= 0 {
|
||||
timeout = 60
|
||||
}
|
||||
host := d.Addition.Host
|
||||
if host == "" {
|
||||
host = "openapi.2dland.cn"
|
||||
}
|
||||
|
||||
client := apiclient.NewClient(nil, host, d.Addition.ClientID, d.Addition.ClientSecret, d.halalCommon, apiclient.WithTimeout(time.Second*time.Duration(timeout)))
|
||||
d.sdkClient = client
|
||||
d.sdkUserFileService = sdkUserFile.NewUserFileService(client)
|
||||
d.sdkUserService = sdkUser.NewUserService(client)
|
||||
userInfo, err := d.sdkUserService.Get(ctx, &sdkUser.User{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.halalCommon.UserInfo = userInfo
|
||||
// 能够获取到用户信息,已经检查了 RefreshToken 的有效性,无需再次检查
|
||||
return nil
|
||||
}
|
||||
48
drivers/halalcloud_open/driver_interface.go
Normal file
48
drivers/halalcloud_open/driver_interface.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
return d.getFiles(ctx, dir)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
return d.getLink(ctx, file, args)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
return d.makeDir(ctx, parentDir, dirName)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return d.move(ctx, srcObj, dstDir)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
return d.rename(ctx, srcObj, newName)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return d.copy(ctx, srcObj, dstDir)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return d.remove(ctx, obj)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
return d.put(ctx, dstDir, stream, up)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
return d.details(ctx)
|
||||
}
|
||||
258
drivers/halalcloud_open/halalcloud_upload.go
Normal file
258
drivers/halalcloud_open/halalcloud_upload.go
Normal file
@@ -0,0 +1,258 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) put(ctx context.Context, dstDir model.Obj, fileStream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
|
||||
newPath := path.Join(dstDir.GetPath(), fileStream.GetName())
|
||||
|
||||
uploadTask, err := d.sdkUserFileService.CreateUploadTask(ctx, &sdkUserFile.File{
|
||||
Path: newPath,
|
||||
Size: fileStream.GetSize(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uploadTask.Created {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
slicesList := make([]string, 0)
|
||||
codec := uint64(0x55)
|
||||
if uploadTask.BlockCodec > 0 {
|
||||
codec = uint64(uploadTask.BlockCodec)
|
||||
}
|
||||
blockHashType := uploadTask.BlockHashType
|
||||
mhType := uint64(0x12)
|
||||
if blockHashType > 0 {
|
||||
mhType = uint64(blockHashType)
|
||||
}
|
||||
prefix := cid.Prefix{
|
||||
Codec: codec,
|
||||
MhLength: -1,
|
||||
MhType: mhType,
|
||||
Version: 1,
|
||||
}
|
||||
blockSize := uploadTask.BlockSize
|
||||
useSingleUpload := true
|
||||
//
|
||||
if fileStream.GetSize() <= int64(blockSize) || d.uploadThread <= 1 {
|
||||
useSingleUpload = true
|
||||
}
|
||||
// Not sure whether FileStream supports concurrent read and write operations, so currently using single-threaded upload to ensure safety.
|
||||
// read file
|
||||
if useSingleUpload {
|
||||
bufferSize := int(blockSize)
|
||||
buffer := make([]byte, bufferSize)
|
||||
reader := driver.NewLimitedUploadStream(ctx, fileStream)
|
||||
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
|
||||
// fileStream.Seek(0, os.SEEK_SET)
|
||||
for {
|
||||
n, err := teeReader.Read(buffer)
|
||||
if n > 0 {
|
||||
data := buffer[:n]
|
||||
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
slicesList = append(slicesList, uploadCid.String())
|
||||
}
|
||||
if err == io.EOF || n == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// TODO: implement multipart upload, currently using single-threaded upload to ensure safety.
|
||||
bufferSize := int(blockSize)
|
||||
buffer := make([]byte, bufferSize)
|
||||
reader := driver.NewLimitedUploadStream(ctx, fileStream)
|
||||
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
|
||||
for {
|
||||
n, err := teeReader.Read(buffer)
|
||||
if n > 0 {
|
||||
data := buffer[:n]
|
||||
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
slicesList = append(slicesList, uploadCid.String())
|
||||
}
|
||||
if err == io.EOF || n == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
newFile, err := makeFile(ctx, slicesList, uploadTask.Task, uploadTask.UploadAddress, retryTimes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewObjFile(newFile), nil
|
||||
|
||||
}
|
||||
|
||||
func makeFile(ctx context.Context, fileSlice []string, taskID string, uploadAddress string, retry int) (*sdkUserFile.File, error) {
|
||||
var lastError error = nil
|
||||
for range retry {
|
||||
newFile, err := doMakeFile(fileSlice, taskID, uploadAddress)
|
||||
if err == nil {
|
||||
return newFile, nil
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return nil, err
|
||||
}
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return nil, err
|
||||
}
|
||||
lastError = err
|
||||
time.Sleep(slicePostErrorRetryInterval)
|
||||
}
|
||||
return nil, fmt.Errorf("mk file slice failed after %d times, error: %s", retry, lastError.Error())
|
||||
}
|
||||
|
||||
func doMakeFile(fileSlice []string, taskID string, uploadAddress string) (*sdkUserFile.File, error) {
|
||||
accessUrl := uploadAddress + "/" + taskID
|
||||
getTimeOut := time.Minute * 2
|
||||
u, err := url.Parse(accessUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n, _ := json.Marshal(fileSlice)
|
||||
httpRequest := http.Request{
|
||||
Method: http.MethodPost,
|
||||
URL: u,
|
||||
Header: map[string][]string{
|
||||
"Accept": {"application/json"},
|
||||
"Content-Type": {"application/json"},
|
||||
//"Content-Length": {strconv.Itoa(len(n))},
|
||||
},
|
||||
Body: io.NopCloser(bytes.NewReader(n)),
|
||||
}
|
||||
httpClient := http.Client{
|
||||
Timeout: getTimeOut,
|
||||
}
|
||||
httpResponse, err := httpClient.Do(&httpRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer httpResponse.Body.Close()
|
||||
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
|
||||
b, _ := io.ReadAll(httpResponse.Body)
|
||||
message := string(b)
|
||||
return nil, fmt.Errorf("mk file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
|
||||
}
|
||||
b, _ := io.ReadAll(httpResponse.Body)
|
||||
var result *sdkUserFile.File
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
func postFileSlice(ctx context.Context, fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix, retry int) (cid.Cid, error) {
|
||||
var lastError error = nil
|
||||
for range retry {
|
||||
newCid, err := doPostFileSlice(fileSlice, taskID, uploadAddress, preix)
|
||||
if err == nil {
|
||||
return newCid, nil
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
time.Sleep(slicePostErrorRetryInterval)
|
||||
lastError = err
|
||||
}
|
||||
return cid.Undef, fmt.Errorf("upload file slice failed after %d times, error: %s", retry, lastError.Error())
|
||||
}
|
||||
func doPostFileSlice(fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix) (cid.Cid, error) {
|
||||
// 1. sum file slice
|
||||
newCid, err := preix.Sum(fileSlice)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
// 2. post file slice
|
||||
sliceCidString := newCid.String()
|
||||
// /{taskID}/{sliceID}
|
||||
accessUrl := uploadAddress + "/" + taskID + "/" + sliceCidString
|
||||
getTimeOut := time.Second * 30
|
||||
// get {accessUrl} in {getTimeOut}
|
||||
u, err := url.Parse(accessUrl)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
// header: accept: application/json
|
||||
// header: content-type: application/octet-stream
|
||||
// header: content-length: {fileSlice.length}
|
||||
// header: x-content-cid: {sliceCidString}
|
||||
// header: x-task-id: {taskID}
|
||||
httpRequest := http.Request{
|
||||
Method: http.MethodGet,
|
||||
URL: u,
|
||||
Header: map[string][]string{
|
||||
"Accept": {"application/json"},
|
||||
},
|
||||
}
|
||||
httpClient := http.Client{
|
||||
Timeout: getTimeOut,
|
||||
}
|
||||
httpResponse, err := httpClient.Do(&httpRequest)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
if httpResponse.StatusCode != http.StatusOK {
|
||||
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d", httpResponse.StatusCode)
|
||||
}
|
||||
var result bool
|
||||
b, err := io.ReadAll(httpResponse.Body)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
if result {
|
||||
return newCid, nil
|
||||
}
|
||||
|
||||
httpRequest = http.Request{
|
||||
Method: http.MethodPost,
|
||||
URL: u,
|
||||
Header: map[string][]string{
|
||||
"Accept": {"application/json"},
|
||||
"Content-Type": {"application/octet-stream"},
|
||||
// "Content-Length": {strconv.Itoa(len(fileSlice))},
|
||||
},
|
||||
Body: io.NopCloser(bytes.NewReader(fileSlice)),
|
||||
}
|
||||
httpResponse, err = httpClient.Do(&httpRequest)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
defer httpResponse.Body.Close()
|
||||
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
|
||||
b, _ := io.ReadAll(httpResponse.Body)
|
||||
message := string(b)
|
||||
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
|
||||
}
|
||||
//
|
||||
|
||||
return newCid, nil
|
||||
}
|
||||
32
drivers/halalcloud_open/meta.go
Normal file
32
drivers/halalcloud_open/meta.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
driver.RootPath
|
||||
// define other
|
||||
RefreshToken string `json:"refresh_token" required:"false" help:"If using a personal API approach, the RefreshToken is not required."`
|
||||
UploadThread int `json:"upload_thread" type:"number" default:"3" help:"1 <= thread <= 32"`
|
||||
|
||||
ClientID string `json:"client_id" required:"true" default:""`
|
||||
ClientSecret string `json:"client_secret" required:"true" default:""`
|
||||
Host string `json:"host" required:"false" default:"openapi.2dland.cn"`
|
||||
TimeOut int `json:"timeout" type:"number" default:"60" help:"timeout in seconds"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "HalalCloudOpen",
|
||||
OnlyProxy: false,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &HalalCloudOpen{}
|
||||
})
|
||||
}
|
||||
60
drivers/halalcloud_open/obj_file.go
Normal file
60
drivers/halalcloud_open/obj_file.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
)
|
||||
|
||||
type ObjFile struct {
|
||||
sdkFile *sdkUserFile.File
|
||||
fileSize int64
|
||||
modTime time.Time
|
||||
createTime time.Time
|
||||
}
|
||||
|
||||
func NewObjFile(f *sdkUserFile.File) model.Obj {
|
||||
ofile := &ObjFile{sdkFile: f}
|
||||
ofile.fileSize = f.Size
|
||||
modTimeTs := f.UpdateTs
|
||||
ofile.modTime = time.UnixMilli(modTimeTs)
|
||||
createTimeTs := f.CreateTs
|
||||
ofile.createTime = time.UnixMilli(createTimeTs)
|
||||
return ofile
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetSize() int64 {
|
||||
return f.fileSize
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetName() string {
|
||||
return f.sdkFile.Name
|
||||
}
|
||||
|
||||
func (f *ObjFile) ModTime() time.Time {
|
||||
return f.modTime
|
||||
}
|
||||
|
||||
func (f *ObjFile) IsDir() bool {
|
||||
return f.sdkFile.Dir
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{
|
||||
// TODO: support more hash types
|
||||
}
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetID() string {
|
||||
return f.sdkFile.Identity
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetPath() string {
|
||||
return f.sdkFile.Path
|
||||
}
|
||||
|
||||
func (f *ObjFile) CreateTime() time.Time {
|
||||
return f.createTime
|
||||
}
|
||||
185
drivers/halalcloud_open/utils.go
Normal file
185
drivers/halalcloud_open/utils.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
// get the next chunk
|
||||
func (oo *openObject) getChunk(_ context.Context) (err error) {
|
||||
if oo.id >= len(oo.chunks) {
|
||||
return io.EOF
|
||||
}
|
||||
var chunk []byte
|
||||
err = utils.Retry(3, time.Second, func() (err error) {
|
||||
chunk, err = getRawFiles(oo.d[oo.id])
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oo.id++
|
||||
oo.chunk = chunk
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p.
|
||||
func (oo *openObject) Read(p []byte) (n int, err error) {
|
||||
oo.mu.Lock()
|
||||
defer oo.mu.Unlock()
|
||||
if oo.closed {
|
||||
return 0, fmt.Errorf("read on closed file")
|
||||
}
|
||||
// Skip data at the start if requested
|
||||
for oo.skip > 0 {
|
||||
//size := 1024 * 1024
|
||||
_, size, err := oo.ChunkLocation(oo.id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if oo.skip < int64(size) {
|
||||
break
|
||||
}
|
||||
oo.id++
|
||||
oo.skip -= int64(size)
|
||||
}
|
||||
if len(oo.chunk) == 0 {
|
||||
err = oo.getChunk(oo.ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if oo.skip > 0 {
|
||||
oo.chunk = (oo.chunk)[oo.skip:]
|
||||
oo.skip = 0
|
||||
}
|
||||
}
|
||||
n = copy(p, oo.chunk)
|
||||
oo.shaTemp.Write(p[:n])
|
||||
oo.chunk = (oo.chunk)[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Close closed the file - MAC errors are reported here
|
||||
func (oo *openObject) Close() (err error) {
|
||||
oo.mu.Lock()
|
||||
defer oo.mu.Unlock()
|
||||
if oo.closed {
|
||||
return nil
|
||||
}
|
||||
// 校验Sha1
|
||||
if string(oo.shaTemp.Sum(nil)) != oo.sha {
|
||||
return fmt.Errorf("failed to finish download: SHA mismatch")
|
||||
}
|
||||
|
||||
oo.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetMD5Hash(text string) string {
|
||||
tHash := md5.Sum([]byte(text))
|
||||
return hex.EncodeToString(tHash[:])
|
||||
}
|
||||
|
||||
type chunkSize struct {
|
||||
position int64
|
||||
size int
|
||||
}
|
||||
|
||||
type openObject struct {
|
||||
ctx context.Context
|
||||
mu sync.Mutex
|
||||
d []*sdkUserFile.SliceDownloadInfo
|
||||
id int
|
||||
skip int64
|
||||
chunk []byte
|
||||
chunks []chunkSize
|
||||
closed bool
|
||||
sha string
|
||||
shaTemp hash.Hash
|
||||
}
|
||||
|
||||
func getChunkSizes(sliceSize []*sdkUserFile.SliceSize) (chunks []chunkSize) {
|
||||
chunks = make([]chunkSize, 0)
|
||||
for _, s := range sliceSize {
|
||||
// 对最后一个做特殊处理
|
||||
endIndex := s.EndIndex
|
||||
startIndex := s.StartIndex
|
||||
if endIndex == 0 {
|
||||
endIndex = startIndex
|
||||
}
|
||||
for j := startIndex; j <= endIndex; j++ {
|
||||
size := s.Size
|
||||
chunks = append(chunks, chunkSize{position: j, size: int(size)})
|
||||
}
|
||||
}
|
||||
return chunks
|
||||
}
|
||||
|
||||
func (oo *openObject) ChunkLocation(id int) (position int64, size int, err error) {
|
||||
if id < 0 || id >= len(oo.chunks) {
|
||||
return 0, 0, errors.New("invalid arguments")
|
||||
}
|
||||
|
||||
return (oo.chunks)[id].position, (oo.chunks)[id].size, nil
|
||||
}
|
||||
|
||||
func getRawFiles(addr *sdkUserFile.SliceDownloadInfo) ([]byte, error) {
|
||||
|
||||
if addr == nil {
|
||||
return nil, errors.New("addr is nil")
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: time.Duration(60 * time.Second), // Set timeout to 60 seconds
|
||||
}
|
||||
resp, err := client.Get(addr.DownloadAddress)
|
||||
if err != nil {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("bad status: %s, body: %s", resp.Status, body)
|
||||
}
|
||||
|
||||
if addr.Encrypt > 0 {
|
||||
cd := uint8(addr.Encrypt)
|
||||
for idx := 0; idx < len(body); idx++ {
|
||||
body[idx] = body[idx] ^ cd
|
||||
}
|
||||
}
|
||||
storeType := addr.StoreType
|
||||
if storeType != 10 {
|
||||
|
||||
sourceCid, err := cid.Decode(addr.Identity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
checkCid, err := sourceCid.Prefix().Sum(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !checkCid.Equals(sourceCid) {
|
||||
return nil, fmt.Errorf("bad cid: %s, body: %s", checkCid.String(), body)
|
||||
}
|
||||
}
|
||||
|
||||
return body, nil
|
||||
|
||||
}
|
||||
@@ -235,6 +235,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
fullPath := file.GetPath()
|
||||
link := &model.Link{}
|
||||
var MFile model.File
|
||||
if args.Type == "thumb" && utils.Ext(file.GetName()) != "svg" {
|
||||
var buf *bytes.Buffer
|
||||
var thumbPath *string
|
||||
@@ -261,9 +262,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
return nil, err
|
||||
}
|
||||
link.ContentLength = int64(stat.Size())
|
||||
link.MFile = open
|
||||
MFile = open
|
||||
} else {
|
||||
link.MFile = bytes.NewReader(buf.Bytes())
|
||||
MFile = bytes.NewReader(buf.Bytes())
|
||||
link.ContentLength = int64(buf.Len())
|
||||
}
|
||||
} else {
|
||||
@@ -272,13 +273,11 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
return nil, err
|
||||
}
|
||||
link.ContentLength = file.GetSize()
|
||||
link.MFile = open
|
||||
}
|
||||
link.AddIfCloser(link.MFile)
|
||||
if !d.Config().OnlyLinkMFile {
|
||||
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, link.MFile)
|
||||
link.MFile = nil
|
||||
MFile = open
|
||||
}
|
||||
link.SyncClosers.AddIfCloser(MFile)
|
||||
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, MFile)
|
||||
link.RequireReference = link.SyncClosers.Length() > 0
|
||||
return link, nil
|
||||
}
|
||||
|
||||
@@ -375,18 +374,26 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
|
||||
err = os.Remove(obj.GetPath())
|
||||
}
|
||||
} else {
|
||||
if !utils.Exists(d.RecycleBinPath) {
|
||||
err = os.MkdirAll(d.RecycleBinPath, 0o755)
|
||||
objPath := obj.GetPath()
|
||||
objName := obj.GetName()
|
||||
var relPath string
|
||||
relPath, err = filepath.Rel(d.GetRootPath(), filepath.Dir(objPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
recycleBinPath := filepath.Join(d.RecycleBinPath, relPath)
|
||||
if !utils.Exists(recycleBinPath) {
|
||||
err = os.MkdirAll(recycleBinPath, 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dstPath := filepath.Join(d.RecycleBinPath, obj.GetName())
|
||||
dstPath := filepath.Join(recycleBinPath, objName)
|
||||
if utils.Exists(dstPath) {
|
||||
dstPath = filepath.Join(d.RecycleBinPath, obj.GetName()+"_"+time.Now().Format("20060102150405"))
|
||||
dstPath = filepath.Join(recycleBinPath, objName+"_"+time.Now().Format("20060102150405"))
|
||||
}
|
||||
err = os.Rename(obj.GetPath(), dstPath)
|
||||
err = os.Rename(objPath, dstPath)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -18,12 +18,12 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Local",
|
||||
OnlyLinkMFile: false,
|
||||
LocalSort: true,
|
||||
NoCache: true,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
Name: "Local",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
NoCache: true,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -36,7 +36,6 @@ type Addition struct {
|
||||
var config = driver.Config{
|
||||
Name: "MediaFire",
|
||||
LocalSort: false,
|
||||
OnlyLinkMFile: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
|
||||
@@ -22,6 +22,7 @@ type Onedrive struct {
|
||||
AccessToken string
|
||||
root *Object
|
||||
mutex sync.Mutex
|
||||
ref *Onedrive
|
||||
}
|
||||
|
||||
func (d *Onedrive) Config() driver.Config {
|
||||
@@ -36,10 +37,22 @@ func (d *Onedrive) Init(ctx context.Context) error {
|
||||
if d.ChunkSize < 1 {
|
||||
d.ChunkSize = 5
|
||||
}
|
||||
if d.ref != nil {
|
||||
return nil
|
||||
}
|
||||
return d.refreshToken()
|
||||
}
|
||||
|
||||
func (d *Onedrive) InitReference(refStorage driver.Driver) error {
|
||||
if ref, ok := refStorage.(*Onedrive); ok {
|
||||
d.ref = ref
|
||||
return nil
|
||||
}
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Onedrive) Drop(ctx context.Context) error {
|
||||
d.ref = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -134,6 +134,9 @@ func (d *Onedrive) _refreshToken() error {
|
||||
}
|
||||
|
||||
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
if d.ref != nil {
|
||||
return d.ref.Request(url, method, callback, resp)
|
||||
}
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||
if callback != nil {
|
||||
|
||||
@@ -110,19 +110,29 @@ func (d *OpenList) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
|
||||
func (d *OpenList) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
var resp common.Resp[FsGetResp]
|
||||
headers := map[string]string{
|
||||
"User-Agent": base.UserAgent,
|
||||
}
|
||||
// if PassUAToUpsteam is true, then pass the user-agent to the upstream
|
||||
userAgent := base.UserAgent
|
||||
if d.PassUAToUpsteam {
|
||||
userAgent = args.Header.Get("user-agent")
|
||||
if userAgent == "" {
|
||||
userAgent = base.UserAgent
|
||||
userAgent := args.Header.Get("user-agent")
|
||||
if userAgent != "" {
|
||||
headers["User-Agent"] = base.UserAgent
|
||||
}
|
||||
}
|
||||
// if PassIPToUpsteam is true, then pass the ip address to the upstream
|
||||
if d.PassIPToUpsteam {
|
||||
ip := args.IP
|
||||
if ip != "" {
|
||||
headers["X-Forwarded-For"] = ip
|
||||
headers["X-Real-Ip"] = ip
|
||||
}
|
||||
}
|
||||
_, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(FsGetReq{
|
||||
Path: file.GetPath(),
|
||||
Password: d.MetaPassword,
|
||||
}).SetHeader("user-agent", userAgent)
|
||||
}).SetHeaders(headers)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -355,8 +365,15 @@ func (d *OpenList) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.O
|
||||
return err
|
||||
}
|
||||
|
||||
//func (d *OpenList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
func (d *OpenList) ResolveLinkCacheMode(_ string) driver.LinkCacheMode {
|
||||
var mode driver.LinkCacheMode
|
||||
if d.PassIPToUpsteam {
|
||||
mode |= driver.LinkCacheIP
|
||||
}
|
||||
if d.PassUAToUpsteam {
|
||||
mode |= driver.LinkCacheUA
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*OpenList)(nil)
|
||||
|
||||
@@ -12,6 +12,7 @@ type Addition struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Token string `json:"token"`
|
||||
PassIPToUpsteam bool `json:"pass_ip_to_upsteam" default:"true"`
|
||||
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
|
||||
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
|
||||
}
|
||||
@@ -22,6 +23,7 @@ var config = driver.Config{
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: true,
|
||||
ProxyRangeOption: true,
|
||||
LinkCacheMode: driver.LinkCacheAuto,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
290
drivers/proton_drive/driver.go
Normal file
290
drivers/proton_drive/driver.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
proton_api_bridge "github.com/henrybear327/Proton-API-Bridge"
|
||||
"github.com/henrybear327/Proton-API-Bridge/common"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
)
|
||||
|
||||
type ProtonDrive struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
protonDrive *proton_api_bridge.ProtonDrive
|
||||
|
||||
apiBase string
|
||||
appVersion string
|
||||
protonJson string
|
||||
userAgent string
|
||||
sdkVersion string
|
||||
webDriveAV string
|
||||
|
||||
c *proton.Client
|
||||
|
||||
// userKR *crypto.KeyRing
|
||||
addrKRs map[string]*crypto.KeyRing
|
||||
addrData map[string]proton.Address
|
||||
|
||||
MainShare *proton.Share
|
||||
|
||||
DefaultAddrKR *crypto.KeyRing
|
||||
MainShareKR *crypto.KeyRing
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Init(ctx context.Context) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); err == nil && r != nil {
|
||||
err = fmt.Errorf("ProtonDrive initialization panic: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
if d.Email == "" {
|
||||
return fmt.Errorf("email is required")
|
||||
}
|
||||
if d.Password == "" {
|
||||
return fmt.Errorf("password is required")
|
||||
}
|
||||
|
||||
config := &common.Config{
|
||||
AppVersion: d.appVersion,
|
||||
UserAgent: d.userAgent,
|
||||
FirstLoginCredential: &common.FirstLoginCredentialData{
|
||||
Username: d.Email,
|
||||
Password: d.Password,
|
||||
TwoFA: d.TwoFACode,
|
||||
},
|
||||
EnableCaching: true,
|
||||
ConcurrentBlockUploadCount: setting.GetInt(conf.TaskUploadThreadsNum, conf.Conf.Tasks.Upload.Workers),
|
||||
//ConcurrentFileCryptoCount: 2,
|
||||
UseReusableLogin: d.UseReusableLogin && d.ReusableCredential != (common.ReusableCredentialData{}),
|
||||
ReplaceExistingDraft: true,
|
||||
ReusableCredential: &d.ReusableCredential,
|
||||
}
|
||||
|
||||
protonDrive, _, err := proton_api_bridge.NewProtonDrive(
|
||||
ctx,
|
||||
config,
|
||||
d.authHandler,
|
||||
func() {},
|
||||
)
|
||||
|
||||
if err != nil && config.UseReusableLogin {
|
||||
config.UseReusableLogin = false
|
||||
protonDrive, _, err = proton_api_bridge.NewProtonDrive(ctx,
|
||||
config,
|
||||
d.authHandler,
|
||||
func() {},
|
||||
)
|
||||
if err == nil {
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize ProtonDrive: %w", err)
|
||||
}
|
||||
|
||||
if err := d.initClient(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.protonDrive = protonDrive
|
||||
d.MainShare = protonDrive.MainShare
|
||||
if d.RootFolderID == "root" || d.RootFolderID == "" {
|
||||
d.RootFolderID = protonDrive.RootLink.LinkID
|
||||
}
|
||||
d.MainShareKR = protonDrive.MainShareKR
|
||||
d.DefaultAddrKR = protonDrive.DefaultAddrKR
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
entries, err := d.protonDrive.ListDirectory(ctx, dir.GetID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list directory: %w", err)
|
||||
}
|
||||
|
||||
objects := make([]model.Obj, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
obj := &model.Object{
|
||||
ID: entry.Link.LinkID,
|
||||
Name: entry.Name,
|
||||
Size: entry.Link.Size,
|
||||
Modified: time.Unix(entry.Link.ModifyTime, 0),
|
||||
IsFolder: entry.IsFolder,
|
||||
}
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
link, err := d.getLink(ctx, file.GetID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed get file link: %+v", err)
|
||||
}
|
||||
fileSystemAttrs, err := d.protonDrive.GetActiveRevisionAttrs(ctx, link)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed get file revision: %+v", err)
|
||||
}
|
||||
// 解密后的文件大小
|
||||
size := fileSystemAttrs.Size
|
||||
|
||||
rangeReaderFunc := func(rangeCtx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
if length < 0 || httpRange.Start+length > size {
|
||||
length = size - httpRange.Start
|
||||
}
|
||||
reader, _, _, err := d.protonDrive.DownloadFile(rangeCtx, link, httpRange.Start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed start download: %+v", err)
|
||||
}
|
||||
return utils.ReadCloser{
|
||||
Reader: io.LimitReader(reader, length),
|
||||
Closer: reader,
|
||||
}, nil
|
||||
}
|
||||
|
||||
expiration := time.Minute
|
||||
return &model.Link{
|
||||
RangeReader: &model.FileRangeReader{
|
||||
RangeReaderIF: stream.RateLimitRangeReaderFunc(rangeReaderFunc),
|
||||
},
|
||||
ContentLength: size,
|
||||
Expiration: &expiration,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
id, err := d.protonDrive.CreateNewFolderByID(ctx, parentDir.GetID(), dirName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
|
||||
newDir := &model.Object{
|
||||
ID: id,
|
||||
Name: dirName,
|
||||
IsFolder: true,
|
||||
Modified: time.Now(),
|
||||
}
|
||||
return newDir, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return d.DirectMove(ctx, srcObj, dstDir)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
if d.protonDrive == nil {
|
||||
return nil, fmt.Errorf("protonDrive bridge is nil")
|
||||
}
|
||||
|
||||
return d.DirectRename(ctx, srcObj, newName)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if srcObj.IsDir() {
|
||||
return nil, fmt.Errorf("directory copy not supported")
|
||||
}
|
||||
|
||||
srcLink, err := d.getLink(ctx, srcObj.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, linkSize, fileSystemAttrs, err := d.protonDrive.DownloadFile(ctx, srcLink, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to download source file: %w", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
actualSize := linkSize
|
||||
if fileSystemAttrs != nil && fileSystemAttrs.Size > 0 {
|
||||
actualSize = fileSystemAttrs.Size
|
||||
}
|
||||
|
||||
file := &stream.FileStream{
|
||||
Ctx: ctx,
|
||||
Obj: &model.Object{
|
||||
Name: srcObj.GetName(),
|
||||
// Use the accurate and real size
|
||||
Size: actualSize,
|
||||
Modified: srcObj.ModTime(),
|
||||
},
|
||||
Reader: reader,
|
||||
}
|
||||
defer file.Close()
|
||||
return d.Put(ctx, dstDir, file, func(percentage float64) {})
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if obj.IsDir() {
|
||||
return d.protonDrive.MoveFolderToTrashByID(ctx, obj.GetID(), false)
|
||||
} else {
|
||||
return d.protonDrive.MoveFileToTrashByID(ctx, obj.GetID())
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
return d.uploadFile(ctx, dstDir.GetID(), file, up)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
about, err := d.protonDrive.About(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := uint64(about.MaxSpace)
|
||||
free := total - uint64(about.UsedSpace)
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*ProtonDrive)(nil)
|
||||
56
drivers/proton_drive/meta.go
Normal file
56
drivers/proton_drive/meta.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/henrybear327/Proton-API-Bridge/common"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
Email string `json:"email" required:"true" type:"string"`
|
||||
Password string `json:"password" required:"true" type:"string"`
|
||||
TwoFACode string `json:"two_fa_code" type:"string"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"100"`
|
||||
UseReusableLogin bool `json:"use_reusable_login" type:"bool" default:"true" help:"Use reusable login credentials instead of username/password"`
|
||||
ReusableCredential common.ReusableCredentialData
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "ProtonDrive",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
DefaultRoot: "root",
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &ProtonDrive{
|
||||
Addition: Addition{
|
||||
UseReusableLogin: true,
|
||||
},
|
||||
apiBase: "https://drive.proton.me/api",
|
||||
appVersion: "windows-drive@1.11.3+rclone+proton",
|
||||
protonJson: "application/vnd.protonmail.v1+json",
|
||||
sdkVersion: "js@0.3.0",
|
||||
userAgent: "ProtonDrive/v1.70.0 (Windows NT 10.0.22000; Win64; x64)",
|
||||
webDriveAV: "web-drive@5.2.0+0f69f7a8",
|
||||
}
|
||||
})
|
||||
}
|
||||
38
drivers/proton_drive/types.go
Normal file
38
drivers/proton_drive/types.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
type MoveRequest struct {
|
||||
ParentLinkID string `json:"ParentLinkID"`
|
||||
NodePassphrase string `json:"NodePassphrase"`
|
||||
NodePassphraseSignature *string `json:"NodePassphraseSignature"`
|
||||
Name string `json:"Name"`
|
||||
NameSignatureEmail string `json:"NameSignatureEmail"`
|
||||
Hash string `json:"Hash"`
|
||||
OriginalHash string `json:"OriginalHash"`
|
||||
ContentHash *string `json:"ContentHash"` // Maybe null
|
||||
}
|
||||
|
||||
type RenameRequest struct {
|
||||
Name string `json:"Name"` // PGP encrypted name
|
||||
NameSignatureEmail string `json:"NameSignatureEmail"` // User's signature email
|
||||
Hash string `json:"Hash"` // New name hash
|
||||
OriginalHash string `json:"OriginalHash"` // Current name hash
|
||||
}
|
||||
|
||||
type RenameResponse struct {
|
||||
Code int `json:"Code"`
|
||||
}
|
||||
670
drivers/proton_drive/util.go
Normal file
670
drivers/proton_drive/util.go
Normal file
@@ -0,0 +1,670 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
)
|
||||
|
||||
func (d *ProtonDrive) uploadFile(ctx context.Context, parentLinkID string, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
_, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
var reader io.Reader
|
||||
// Use buffered reader with larger buffer for better performance
|
||||
var bufferSize int
|
||||
|
||||
// File > 100MB (default)
|
||||
if file.GetSize() > d.ChunkSize*1024*1024 {
|
||||
// 256KB for large files
|
||||
bufferSize = 256 * 1024
|
||||
// File > 10MB
|
||||
} else if file.GetSize() > 10*1024*1024 {
|
||||
// 128KB for medium files
|
||||
bufferSize = 128 * 1024
|
||||
} else {
|
||||
// 64KB for small files
|
||||
bufferSize = 64 * 1024
|
||||
}
|
||||
|
||||
// reader = bufio.NewReader(file)
|
||||
reader = bufio.NewReaderSize(file, bufferSize)
|
||||
reader = &driver.ReaderUpdatingProgress{
|
||||
Reader: &stream.SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: file.GetSize(),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
}
|
||||
reader = driver.NewLimitedUploadStream(ctx, reader)
|
||||
|
||||
id, _, err := d.protonDrive.UploadFileByReader(ctx, parentLinkID, file.GetName(), file.ModTime(), reader, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: id,
|
||||
Name: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
Modified: file.ModTime(),
|
||||
IsFolder: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) encryptFileName(ctx context.Context, name string, parentLinkID string) (string, error) {
|
||||
parentLink, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get parent node keyring
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Temporary file (request)
|
||||
tempReq := proton.CreateFileReq{
|
||||
SignatureAddress: d.MainShare.Creator,
|
||||
}
|
||||
|
||||
// Encrypt the filename
|
||||
err = tempReq.SetName(name, d.DefaultAddrKR, parentNodeKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to encrypt filename: %w", err)
|
||||
}
|
||||
|
||||
return tempReq.Name, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) generateFileNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
|
||||
parentLink, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get parent node keyring
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent keyring: %w", err)
|
||||
}
|
||||
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
|
||||
}
|
||||
|
||||
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent hash key: %w", err)
|
||||
}
|
||||
|
||||
nameHash, err := proton.GetNameHash(name, parentHashKey)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate name hash: %w", err)
|
||||
}
|
||||
|
||||
return nameHash, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getOriginalNameHash(link *proton.Link) (string, error) {
|
||||
if link == nil {
|
||||
return "", fmt.Errorf("link cannot be nil")
|
||||
}
|
||||
|
||||
if link.Hash == "" {
|
||||
return "", fmt.Errorf("link hash is empty")
|
||||
}
|
||||
|
||||
return link.Hash, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getLink(ctx context.Context, linkID string) (*proton.Link, error) {
|
||||
if linkID == "" {
|
||||
return nil, fmt.Errorf("linkID cannot be empty")
|
||||
}
|
||||
|
||||
link, err := d.c.GetLink(ctx, d.MainShare.ShareID, linkID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &link, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getLinkKR(ctx context.Context, link *proton.Link) (*crypto.KeyRing, error) {
|
||||
if link == nil {
|
||||
return nil, fmt.Errorf("link cannot be nil")
|
||||
}
|
||||
|
||||
// Root Link or Root Dir
|
||||
if link.ParentLinkID == "" {
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return link.GetKeyRing(d.MainShareKR, signatureVerificationKR)
|
||||
}
|
||||
|
||||
// Get parent keyring recursively
|
||||
parentLink, err := d.getLink(ctx, link.ParentLinkID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return link.GetKeyRing(parentNodeKR, signatureVerificationKR)
|
||||
}
|
||||
|
||||
var (
|
||||
ErrKeyPassOrSaltedKeyPassMustBeNotNil = errors.New("either keyPass or saltedKeyPass must be not nil")
|
||||
ErrFailedToUnlockUserKeys = errors.New("failed to unlock user keys")
|
||||
)
|
||||
|
||||
func getAccountKRs(ctx context.Context, c *proton.Client, keyPass, saltedKeyPass []byte) (*crypto.KeyRing, map[string]*crypto.KeyRing, map[string]proton.Address, []byte, error) {
|
||||
user, err := c.GetUser(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("user %#v", user)
|
||||
|
||||
addrsArr, err := c.GetAddresses(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("addr %#v", addr)
|
||||
|
||||
if saltedKeyPass == nil {
|
||||
if keyPass == nil {
|
||||
return nil, nil, nil, nil, ErrKeyPassOrSaltedKeyPassMustBeNotNil
|
||||
}
|
||||
|
||||
// Due to limitations, salts are stored using cacheCredentialToFile
|
||||
salts, err := c.GetSalts(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("salts %#v", salts)
|
||||
|
||||
saltedKeyPass, err = salts.SaltForKey(keyPass, user.Keys.Primary().ID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("saltedKeyPass ok")
|
||||
}
|
||||
|
||||
userKR, addrKRs, err := proton.Unlock(user, addrsArr, saltedKeyPass, nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
} else if userKR.CountDecryptionEntities() == 0 {
|
||||
return nil, nil, nil, nil, ErrFailedToUnlockUserKeys
|
||||
}
|
||||
|
||||
addrs := make(map[string]proton.Address)
|
||||
for _, addr := range addrsArr {
|
||||
addrs[addr.Email] = addr
|
||||
}
|
||||
|
||||
return userKR, addrKRs, addrs, saltedKeyPass, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getSignatureVerificationKeyring(emailAddresses []string, verificationAddrKRs ...*crypto.KeyRing) (*crypto.KeyRing, error) {
|
||||
ret, err := crypto.NewKeyRing(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, emailAddress := range emailAddresses {
|
||||
if addr, ok := d.addrData[emailAddress]; ok {
|
||||
if addrKR, exists := d.addrKRs[addr.ID]; exists {
|
||||
err = d.addKeysFromKR(ret, addrKR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, kr := range verificationAddrKRs {
|
||||
err = d.addKeysFromKR(ret, kr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if ret.CountEntities() == 0 {
|
||||
return nil, fmt.Errorf("no keyring for signature verification")
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) addKeysFromKR(kr *crypto.KeyRing, newKRs ...*crypto.KeyRing) error {
|
||||
for i := range newKRs {
|
||||
for _, key := range newKRs[i].GetKeys() {
|
||||
err := kr.AddKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) DirectRename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
// fmt.Printf("DEBUG DirectRename: path=%s, newName=%s", srcObj.GetPath(), newName)
|
||||
|
||||
if d.MainShare == nil || d.DefaultAddrKR == nil {
|
||||
return nil, fmt.Errorf("missing required fields: MainShare=%v, DefaultAddrKR=%v",
|
||||
d.MainShare != nil, d.DefaultAddrKR != nil)
|
||||
}
|
||||
|
||||
if d.protonDrive == nil {
|
||||
return nil, fmt.Errorf("protonDrive bridge is nil")
|
||||
}
|
||||
|
||||
srcLink, err := d.getLink(ctx, srcObj.GetID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find source: %w", err)
|
||||
}
|
||||
|
||||
parentLinkID := srcLink.ParentLinkID
|
||||
if parentLinkID == "" {
|
||||
return nil, fmt.Errorf("cannot rename root folder")
|
||||
}
|
||||
|
||||
encryptedName, err := d.encryptFileName(ctx, newName, parentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
|
||||
}
|
||||
|
||||
newHash, err := d.generateFileNameHash(ctx, newName, parentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate new hash: %w", err)
|
||||
}
|
||||
|
||||
originalHash, err := d.getOriginalNameHash(srcLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get original hash: %w", err)
|
||||
}
|
||||
|
||||
renameReq := RenameRequest{
|
||||
Name: encryptedName,
|
||||
NameSignatureEmail: d.MainShare.Creator,
|
||||
Hash: newHash,
|
||||
OriginalHash: originalHash,
|
||||
}
|
||||
|
||||
err = d.executeRenameAPI(ctx, srcLink.LinkID, renameReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename API call failed: %w", err)
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: srcLink.LinkID,
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) executeRenameAPI(ctx context.Context, linkID string, req RenameRequest) error {
|
||||
renameURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/rename",
|
||||
d.MainShare.VolumeID, linkID)
|
||||
|
||||
reqBody, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal rename request: %w", err)
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "PUT", renameURL, bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
httpReq.Header.Set("Accept", d.protonJson)
|
||||
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
|
||||
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
|
||||
httpReq.Header.Set("X-Pm-Uid", d.ReusableCredential.UID)
|
||||
httpReq.Header.Set("Authorization", "Bearer "+d.ReusableCredential.AccessToken)
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(httpReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute rename request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("rename failed with status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var renameResp RenameResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&renameResp); err != nil {
|
||||
return fmt.Errorf("failed to decode rename response: %w", err)
|
||||
}
|
||||
|
||||
if renameResp.Code != 1000 {
|
||||
return fmt.Errorf("rename failed with code %d", renameResp.Code)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) executeMoveAPI(ctx context.Context, linkID string, req MoveRequest) error {
|
||||
// fmt.Printf("DEBUG Move Request - Name: %s\n", req.Name)
|
||||
// fmt.Printf("DEBUG Move Request - Hash: %s\n", req.Hash)
|
||||
// fmt.Printf("DEBUG Move Request - OriginalHash: %s\n", req.OriginalHash)
|
||||
// fmt.Printf("DEBUG Move Request - ParentLinkID: %s\n", req.ParentLinkID)
|
||||
|
||||
// fmt.Printf("DEBUG Move Request - Name length: %d\n", len(req.Name))
|
||||
// fmt.Printf("DEBUG Move Request - NameSignatureEmail: %s\n", req.NameSignatureEmail)
|
||||
// fmt.Printf("DEBUG Move Request - ContentHash: %v\n", req.ContentHash)
|
||||
// fmt.Printf("DEBUG Move Request - NodePassphrase length: %d\n", len(req.NodePassphrase))
|
||||
// fmt.Printf("DEBUG Move Request - NodePassphraseSignature length: %d\n", len(req.NodePassphraseSignature))
|
||||
|
||||
// fmt.Printf("DEBUG Move Request - SrcLinkID: %s\n", linkID)
|
||||
// fmt.Printf("DEBUG Move Request - DstParentLinkID: %s\n", req.ParentLinkID)
|
||||
// fmt.Printf("DEBUG Move Request - ShareID: %s\n", d.MainShare.ShareID)
|
||||
|
||||
srcLink, _ := d.getLink(ctx, linkID)
|
||||
if srcLink != nil && srcLink.ParentLinkID == req.ParentLinkID {
|
||||
return fmt.Errorf("cannot move to same parent directory")
|
||||
}
|
||||
|
||||
moveURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/move",
|
||||
d.MainShare.VolumeID, linkID)
|
||||
|
||||
reqBody, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal move request: %w", err)
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "PUT", moveURL, bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("Authorization", "Bearer "+d.ReusableCredential.AccessToken)
|
||||
httpReq.Header.Set("Accept", d.protonJson)
|
||||
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
|
||||
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
|
||||
httpReq.Header.Set("X-Pm-Uid", d.ReusableCredential.UID)
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(httpReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute move request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var moveResp RenameResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&moveResp); err != nil {
|
||||
return fmt.Errorf("failed to decode move response: %w", err)
|
||||
}
|
||||
|
||||
if moveResp.Code != 1000 {
|
||||
return fmt.Errorf("move operation failed with code: %d", moveResp.Code)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) DirectMove(ctx context.Context, srcObj model.Obj, dstDir model.Obj) (model.Obj, error) {
|
||||
// fmt.Printf("DEBUG DirectMove: srcPath=%s, dstPath=%s", srcObj.GetPath(), dstDir.GetPath())
|
||||
|
||||
srcLink, err := d.getLink(ctx, srcObj.GetID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find source: %w", err)
|
||||
}
|
||||
|
||||
dstParentLinkID := dstDir.GetID()
|
||||
|
||||
if srcObj.IsDir() {
|
||||
// Check if destination is a descendant of source
|
||||
if err := d.checkCircularMove(ctx, srcLink.LinkID, dstParentLinkID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Encrypt the filename for the new location
|
||||
encryptedName, err := d.encryptFileName(ctx, srcObj.GetName(), dstParentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
|
||||
}
|
||||
|
||||
newHash, err := d.generateNameHash(ctx, srcObj.GetName(), dstParentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate new hash: %w", err)
|
||||
}
|
||||
|
||||
originalHash, err := d.getOriginalNameHash(srcLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get original hash: %w", err)
|
||||
}
|
||||
|
||||
// Re-encrypt node passphrase for new parent context
|
||||
reencryptedPassphrase, err := d.reencryptNodePassphrase(ctx, srcLink, dstParentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to re-encrypt node passphrase: %w", err)
|
||||
}
|
||||
|
||||
moveReq := MoveRequest{
|
||||
ParentLinkID: dstParentLinkID,
|
||||
NodePassphrase: reencryptedPassphrase,
|
||||
Name: encryptedName,
|
||||
NameSignatureEmail: d.MainShare.Creator,
|
||||
Hash: newHash,
|
||||
OriginalHash: originalHash,
|
||||
ContentHash: nil,
|
||||
|
||||
// *** Causes rejection ***
|
||||
/* NodePassphraseSignature: srcLink.NodePassphraseSignature, */
|
||||
}
|
||||
|
||||
//fmt.Printf("DEBUG MoveRequest validation:\n")
|
||||
//fmt.Printf(" Name length: %d\n", len(moveReq.Name))
|
||||
//fmt.Printf(" Hash: %s\n", moveReq.Hash)
|
||||
//fmt.Printf(" OriginalHash: %s\n", moveReq.OriginalHash)
|
||||
//fmt.Printf(" NodePassphrase length: %d\n", len(moveReq.NodePassphrase))
|
||||
/* fmt.Printf(" NodePassphraseSignature length: %d\n", len(moveReq.NodePassphraseSignature)) */
|
||||
//fmt.Printf(" NameSignatureEmail: %s\n", moveReq.NameSignatureEmail)
|
||||
|
||||
err = d.executeMoveAPI(ctx, srcLink.LinkID, moveReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move API call failed: %w", err)
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: srcLink.LinkID,
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) reencryptNodePassphrase(ctx context.Context, srcLink *proton.Link, dstParentLinkID string) (string, error) {
|
||||
// Get source parent link with metadata
|
||||
srcParentLink, err := d.getLink(ctx, srcLink.ParentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get source parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get source parent keyring using link object
|
||||
srcParentKR, err := d.getLinkKR(ctx, srcParentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get source parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Get destination parent link with metadata
|
||||
dstParentLink, err := d.getLink(ctx, dstParentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get destination parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get destination parent keyring using link object
|
||||
dstParentKR, err := d.getLinkKR(ctx, dstParentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get destination parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Re-encrypt the node passphrase from source parent context to destination parent context
|
||||
reencryptedPassphrase, err := reencryptKeyPacket(srcParentKR, dstParentKR, d.DefaultAddrKR, srcLink.NodePassphrase)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to re-encrypt key packet: %w", err)
|
||||
}
|
||||
|
||||
return reencryptedPassphrase, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) generateNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
|
||||
parentLink, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get parent node keyring
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Get signature verification keyring
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
|
||||
}
|
||||
|
||||
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent hash key: %w", err)
|
||||
}
|
||||
|
||||
nameHash, err := proton.GetNameHash(name, parentHashKey)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate name hash: %w", err)
|
||||
}
|
||||
|
||||
return nameHash, nil
|
||||
}
|
||||
|
||||
func reencryptKeyPacket(srcKR, dstKR, _ *crypto.KeyRing, passphrase string) (string, error) { // addrKR (3)
|
||||
oldSplitMessage, err := crypto.NewPGPSplitMessageFromArmored(passphrase)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
sessionKey, err := srcKR.DecryptSessionKey(oldSplitMessage.KeyPacket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
newKeyPacket, err := dstKR.EncryptSessionKey(sessionKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
newSplitMessage := crypto.NewPGPSplitMessage(newKeyPacket, oldSplitMessage.DataPacket)
|
||||
|
||||
return newSplitMessage.GetArmored()
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) checkCircularMove(ctx context.Context, srcLinkID, dstParentLinkID string) error {
|
||||
currentLinkID := dstParentLinkID
|
||||
|
||||
for currentLinkID != "" && currentLinkID != d.RootFolderID {
|
||||
if currentLinkID == srcLinkID {
|
||||
return fmt.Errorf("cannot move folder into itself or its subfolder")
|
||||
}
|
||||
|
||||
currentLink, err := d.getLink(ctx, currentLinkID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentLinkID = currentLink.ParentLinkID
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) authHandler(auth proton.Auth) {
|
||||
if auth.AccessToken != d.ReusableCredential.AccessToken || auth.RefreshToken != d.ReusableCredential.RefreshToken {
|
||||
d.ReusableCredential.UID = auth.UID
|
||||
d.ReusableCredential.AccessToken = auth.AccessToken
|
||||
d.ReusableCredential.RefreshToken = auth.RefreshToken
|
||||
|
||||
if err := d.initClient(context.Background()); err != nil {
|
||||
fmt.Printf("ProtonDrive: failed to reinitialize client after auth refresh: %v\n", err)
|
||||
}
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) initClient(ctx context.Context) error {
|
||||
clientOptions := []proton.Option{
|
||||
proton.WithAppVersion(d.appVersion),
|
||||
proton.WithUserAgent(d.userAgent),
|
||||
}
|
||||
manager := proton.New(clientOptions...)
|
||||
d.c = manager.NewClient(d.ReusableCredential.UID, d.ReusableCredential.AccessToken, d.ReusableCredential.RefreshToken)
|
||||
|
||||
saltedKeyPassBytes, err := base64.StdEncoding.DecodeString(d.ReusableCredential.SaltedKeyPass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode salted key pass: %w", err)
|
||||
}
|
||||
|
||||
_, addrKRs, addrs, _, err := getAccountKRs(ctx, d.c, nil, saltedKeyPassBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get account keyrings: %w", err)
|
||||
}
|
||||
|
||||
d.addrKRs = addrKRs
|
||||
d.addrData = addrs
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -69,15 +69,10 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
||||
Limiter: stream.ServerDownloadLimit,
|
||||
Ctx: ctx,
|
||||
}
|
||||
if !d.Config().OnlyLinkMFile {
|
||||
return &model.Link{
|
||||
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
}, nil
|
||||
}
|
||||
return &model.Link{
|
||||
MFile: mFile,
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
RequireReference: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,12 +16,12 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "SFTP",
|
||||
LocalSort: true,
|
||||
OnlyLinkMFile: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: true,
|
||||
NoLinkURL: true,
|
||||
Name: "SFTP",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: true,
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -86,15 +86,10 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
||||
Limiter: stream.ServerDownloadLimit,
|
||||
Ctx: ctx,
|
||||
}
|
||||
if !d.Config().OnlyLinkMFile {
|
||||
return &model.Link{
|
||||
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
}, nil
|
||||
}
|
||||
return &model.Link{
|
||||
MFile: mFile,
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
RequireReference: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -14,12 +14,12 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "SMB",
|
||||
LocalSort: true,
|
||||
OnlyLinkMFile: false,
|
||||
DefaultRoot: ".",
|
||||
NoCache: true,
|
||||
NoLinkURL: true,
|
||||
Name: "SMB",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
DefaultRoot: ".",
|
||||
NoCache: true,
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
)
|
||||
@@ -156,7 +157,7 @@ func (d *Strm) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
||||
if file.GetID() == "strm" {
|
||||
link := d.getLink(ctx, file.GetPath())
|
||||
return &model.Link{
|
||||
MFile: strings.NewReader(link),
|
||||
RangeReader: stream.GetRangeReaderFromMFile(int64(len(link)), strings.NewReader(link)),
|
||||
}, nil
|
||||
}
|
||||
// ftp,s3
|
||||
|
||||
@@ -15,14 +15,13 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Strm",
|
||||
LocalSort: true,
|
||||
NoCache: true,
|
||||
NoUpload: true,
|
||||
DefaultRoot: "/",
|
||||
OnlyLinkMFile: true,
|
||||
OnlyProxy: true,
|
||||
NoLinkURL: true,
|
||||
Name: "Strm",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
NoCache: true,
|
||||
NoUpload: true,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -164,7 +164,7 @@ func (d *Teldrive) doSingleUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
if err := d.singleUploadRequest(fileId, func(req *resty.Request) {
|
||||
uploadParams := map[string]string{
|
||||
"partName": func() string {
|
||||
digits := len(fmt.Sprintf("%d", totalParts))
|
||||
digits := len(strconv.Itoa(totalParts))
|
||||
return file.GetName() + fmt.Sprintf(".%0*d", digits, 1)
|
||||
}(),
|
||||
"partNo": strconv.Itoa(1),
|
||||
@@ -333,7 +333,7 @@ func (d *Teldrive) uploadSingleChunk(ctx context.Context, fileId string, task ch
|
||||
err := d.singleUploadRequest(fileId, func(req *resty.Request) {
|
||||
uploadParams := map[string]string{
|
||||
"partName": func() string {
|
||||
digits := len(fmt.Sprintf("%d", totalParts))
|
||||
digits := len(strconv.Itoa(totalParts))
|
||||
return task.fileName + fmt.Sprintf(".%0*d", digits, task.chunkIdx)
|
||||
}(),
|
||||
"partNo": strconv.Itoa(task.chunkIdx),
|
||||
|
||||
@@ -16,7 +16,6 @@ type Addition struct {
|
||||
var config = driver.Config{
|
||||
Name: "Template",
|
||||
LocalSort: false,
|
||||
OnlyLinkMFile: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -840,7 +841,7 @@ func (xc *XunLeiBrowserCommon) OfflineList(ctx context.Context, nextPageToken st
|
||||
func (xc *XunLeiBrowserCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string) error {
|
||||
queryParams := map[string]string{
|
||||
"task_ids": strings.Join(taskIDs, ","),
|
||||
"_t": fmt.Sprintf("%d", time.Now().UnixMilli()),
|
||||
"_t": strconv.FormatInt(time.Now().UnixMilli(), 10),
|
||||
}
|
||||
if xc.UseFluentPlay {
|
||||
queryParams["space"] = ThunderBrowserDriveFluentPlayFolderType
|
||||
|
||||
@@ -2,11 +2,11 @@ package virtual
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils/random"
|
||||
)
|
||||
|
||||
@@ -42,16 +42,14 @@ func (d *Virtual) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
type DummyMFile struct {
|
||||
io.Reader
|
||||
}
|
||||
type DummyMFile struct{}
|
||||
|
||||
func (f DummyMFile) Read(p []byte) (n int, err error) {
|
||||
return f.Reader.Read(p)
|
||||
return random.Rand.Read(p)
|
||||
}
|
||||
|
||||
func (f DummyMFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return f.Reader.Read(p)
|
||||
return random.Rand.Read(p)
|
||||
}
|
||||
|
||||
func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
|
||||
@@ -60,7 +58,7 @@ func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
|
||||
|
||||
func (d *Virtual) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
return &model.Link{
|
||||
MFile: DummyMFile{Reader: random.Rand},
|
||||
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), DummyMFile{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -14,11 +14,11 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Virtual",
|
||||
OnlyLinkMFile: true,
|
||||
LocalSort: true,
|
||||
NeedMs: true,
|
||||
NoLinkURL: true,
|
||||
Name: "Virtual",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
NeedMs: true,
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
33
go.mod
33
go.mod
@@ -9,6 +9,7 @@ require (
|
||||
github.com/OpenListTeam/sftpd-openlist v1.0.1
|
||||
github.com/OpenListTeam/tache v0.2.0
|
||||
github.com/OpenListTeam/times v0.1.0
|
||||
github.com/OpenListTeam/wazero-wasip2 v0.0.0-20251015145605-cd3a2c9131d9
|
||||
github.com/OpenListTeam/wopan-sdk-go v0.1.5
|
||||
github.com/ProtonMail/go-crypto v1.3.0
|
||||
github.com/SheltonZhu/115driver v1.1.1
|
||||
@@ -38,6 +39,7 @@ require (
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0
|
||||
github.com/ipfs/go-ipfs-api v0.7.0
|
||||
github.com/itsHenry35/gofakes3 v0.0.8
|
||||
@@ -52,6 +54,7 @@ require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.13.9
|
||||
github.com/pquerna/otp v1.5.0
|
||||
github.com/quic-go/quic-go v0.54.1
|
||||
github.com/rclone/rclone v1.70.3
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
||||
github.com/shirou/gopsutil/v4 v4.25.5
|
||||
@@ -61,6 +64,7 @@ require (
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5
|
||||
github.com/tchap/go-patricia/v2 v2.3.3
|
||||
github.com/tetratelabs/wazero v1.9.0
|
||||
github.com/u2takey/ffmpeg-go v0.5.0
|
||||
github.com/upyun/go-sdk/v3 v3.0.4
|
||||
github.com/winfsp/cgofuse v1.6.0
|
||||
@@ -82,12 +86,24 @@ require (
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.10.3 // indirect
|
||||
github.com/RoaringBitmap/roaring/v2 v2.4.5 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.3 // indirect
|
||||
github.com/bradenaw/juniper v0.15.3 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
|
||||
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/emersion/go-message v0.18.2 // indirect
|
||||
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
|
||||
github.com/henrybear327/go-proton-api v1.0.0 // indirect
|
||||
github.com/geoffgarside/ber v1.2.0 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||
@@ -102,7 +118,13 @@ require (
|
||||
github.com/minio/xxml v0.0.3 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/otiai10/mint v1.6.3 // indirect
|
||||
github.com/relvacode/iso8601 v1.6.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
|
||||
)
|
||||
|
||||
@@ -172,7 +194,7 @@ require (
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
|
||||
github.com/bytedance/sonic v1.13.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
|
||||
@@ -194,6 +216,7 @@ require (
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/ipfs/go-cid v0.5.0
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
@@ -267,7 +290,7 @@ require (
|
||||
golang.org/x/sys v0.34.0
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/text v0.27.0
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
golang.org/x/tools v0.35.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
@@ -277,4 +300,10 @@ require (
|
||||
lukechampine.com/blake3 v1.1.7 // indirect
|
||||
)
|
||||
|
||||
replace github.com/ProtonMail/go-proton-api => github.com/henrybear327/go-proton-api v1.0.0
|
||||
|
||||
replace github.com/cronokirby/saferith => github.com/Da3zKi7/saferith v0.33.0-fixed
|
||||
|
||||
// replace github.com/OpenListTeam/115-sdk-go => ../../OpenListTeam/115-sdk-go
|
||||
|
||||
replace google.golang.org/genproto => google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822
|
||||
|
||||
60
go.sum
60
go.sum
@@ -37,6 +37,8 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Da3zKi7/saferith v0.33.0-fixed h1:fnIWTk7EP9mZAICf7aQjeoAwpfrlCrkOvqmi6CbWdTk=
|
||||
github.com/Da3zKi7/saferith v0.33.0-fixed/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/OpenListTeam/115-sdk-go v0.2.2 h1:JCrGHqQjBX3laOA6Hw4CuBovSg7g+FC5s0LEAYsRciU=
|
||||
@@ -51,10 +53,26 @@ github.com/OpenListTeam/tache v0.2.0 h1:Q4MjuyECn0CZCf1ZF91JaVaZTaps1mOTAm8bFj8s
|
||||
github.com/OpenListTeam/tache v0.2.0/go.mod h1:qmnZ/VpY2DUlmjg3UoDeNFy/LRqrw0biN3hYEEGc/+A=
|
||||
github.com/OpenListTeam/times v0.1.0 h1:qknxw+qj5CYKgXAwydA102UEpPcpU8TYNGRmwRyPYpg=
|
||||
github.com/OpenListTeam/times v0.1.0/go.mod h1:Jx7qen5NCYzKk2w14YuvU48YYMcPa1P9a+EJePC15Pc=
|
||||
github.com/OpenListTeam/wazero-wasip2 v0.0.0-20251015145605-cd3a2c9131d9 h1:yddTD9Fxh6bLMLmG0hSR7Eh6XkoK0RMlE4N1e6/+Iy8=
|
||||
github.com/OpenListTeam/wazero-wasip2 v0.0.0-20251015145605-cd3a2c9131d9/go.mod h1:+BpydPG2cUQHYFwH3/lVmvXyMl/zxHW+XM+XTSzqu2Q=
|
||||
github.com/OpenListTeam/wopan-sdk-go v0.1.5 h1:iKKcVzIqBgtGDbn0QbdWrCazSGxXFmYFyrnFBG+U8dI=
|
||||
github.com/OpenListTeam/wopan-sdk-go v0.1.5/go.mod h1:otynv0CgSNUClPpUgZ44qCZGcMRe0dc83Pkk65xAunI=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e h1:lCsqUUACrcMC83lg5rTo9Y0PnPItE61JSfvMyIcANwk=
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE=
|
||||
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
|
||||
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
|
||||
github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI=
|
||||
github.com/ProtonMail/go-srp v0.0.7/go.mod h1:giCp+7qRnMIcCvI6V6U3S1lDDXDQYx2ewJ6F/9wdlJk=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.9.0 h1:ruLzBmwe4dR1hdnrsEJ/S7psSBmV15gFttFUPP/+/kE=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.9.0/go.mod h1:IldDyh9Hv1ZCCYatTuuEt1XZJ0OPjxLpTarDfglih7s=
|
||||
github.com/PuerkitoBio/goquery v1.10.3 h1:pFYcNSqHxBD06Fpj/KsbStFRsgRATgnf3LeXiUkhzPo=
|
||||
github.com/PuerkitoBio/goquery v1.10.3/go.mod h1:tMUX0zDMHXYlAQk6p35XxQMqMweEKB7iK7iLNd4RH4Y=
|
||||
github.com/RoaringBitmap/roaring/v2 v2.4.5 h1:uGrrMreGjvAtTBobc0g5IrW1D5ldxDQYe2JW2gggRdg=
|
||||
github.com/RoaringBitmap/roaring/v2 v2.4.5/go.mod h1:FiJcsfkGje/nZBZgCu0ZxCPOKD/hVXDS2dXi7/eUFE0=
|
||||
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
|
||||
@@ -72,6 +90,8 @@ github.com/andreburgaud/crypt2go v1.8.0/go.mod h1:L5nfShQ91W78hOWhUH2tlGRPO+POAP
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 h1:8PmGpDEZl9yDpcdEr6Odf23feCxK3LNUNMxjXg41pZQ=
|
||||
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM=
|
||||
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
|
||||
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
|
||||
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
|
||||
github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
@@ -164,6 +184,9 @@ github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
|
||||
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bradenaw/juniper v0.15.3 h1:RHIAMEDTpvmzV1wg1jMAHGOoI2oJUSPx3lxRldXnFGo=
|
||||
github.com/bradenaw/juniper v0.15.3/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8=
|
||||
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/bytedance/sonic v1.13.3 h1:MS8gmaH16Gtirygw7jV91pDCN33NyMrPbN7qiYhEsF0=
|
||||
github.com/bytedance/sonic v1.13.3/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
@@ -198,6 +221,7 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e h1:GLC8iDDcbt1H8+RkNao2nRGjyNTIo81e1rAJT9/uWYA=
|
||||
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e/go.mod h1:ln9Whp+wVY/FTbn2SK0ag+SKD2fC0yQCF/Lqowc1LmU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
|
||||
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
|
||||
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc h1:t8YjNUCt1DimB4HCIXBztwWMhgxr5yG5/YaRl9Afdfg=
|
||||
@@ -239,6 +263,10 @@ github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJL
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8=
|
||||
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
|
||||
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/emersion/go-message v0.18.2 h1:rl55SQdjd9oJcIoQNhubD2Acs1E6IzlZISRTK7x/Lpg=
|
||||
github.com/emersion/go-message v0.18.2/go.mod h1:XpJyL70LwRvq2a8rVbHXikPgKj8+aI0kGdHlg16ibYA=
|
||||
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff h1:4N8wnS3f1hNHSmFD5zgFkWCyA4L1kCDkImPAtK7D6tg=
|
||||
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff/go.mod h1:HMJKR5wlh/ziNp+sHEDV2ltblO4JD2+IdDOWtGcQBTM=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
@@ -364,6 +392,10 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006100629-ba7a40dce261 h1:47L8SHM80cXszQydLrpp9MhVkFLLWCvrU9XmJ6XtRu0=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006100629-ba7a40dce261/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499 h1:4ovnBdiGDFi8putQGxhipuuhXItAgh4/YnzufPYkZkQ=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@@ -385,6 +417,12 @@ github.com/hekmon/cunits/v2 v2.1.0 h1:k6wIjc4PlacNOHwKEMBgWV2/c8jyD4eRMs5mR1BBhI
|
||||
github.com/hekmon/cunits/v2 v2.1.0/go.mod h1:9r1TycXYXaTmEWlAIfFV8JT+Xo59U96yUJAYHxzii2M=
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0 h1:0Fb11qE0IBh4V4GlOwHNYpqpjcYDp5GouolwrpmcUDQ=
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0/go.mod h1:38SlNhFzinVUuY87wGj3acOmRxeYZAZfrj6Re7UgCDg=
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0LTKWQciUm8PMZb0=
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
|
||||
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
|
||||
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@@ -585,8 +623,14 @@ github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQP
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg=
|
||||
github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
|
||||
github.com/rclone/rclone v1.70.3 h1:rg/WNh4DmSVZyKP2tHZ4lAaWEyMi7h/F0r7smOMA3IE=
|
||||
github.com/rclone/rclone v1.70.3/go.mod h1:nLyN+hpxAsQn9Rgt5kM774lcRDad82x/KqQeBZ83cMo=
|
||||
github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU=
|
||||
github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
|
||||
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=
|
||||
github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@@ -646,6 +690,8 @@ github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 h1:6Y51mutOvRGRx6K
|
||||
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543/go.mod h1:jpwqYA8KUVEvSUJHkCXsnBRJCSKP1BMa81QZ6kvRpow=
|
||||
github.com/tchap/go-patricia/v2 v2.3.3 h1:xfNEsODumaEcCcY3gI0hYPZ/PcpVv5ju6RMAhgwZDDc=
|
||||
github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I=
|
||||
github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM=
|
||||
github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
|
||||
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
|
||||
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
|
||||
@@ -702,6 +748,8 @@ go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5J
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
||||
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
|
||||
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
|
||||
gocv.io/x/gocv v0.25.0/go.mod h1:Rar2PS6DV+T4FL+PM535EImD/h13hGVaHhnCu1xarBs=
|
||||
@@ -714,6 +762,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
@@ -756,6 +805,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -775,10 +826,12 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -821,6 +874,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -839,6 +893,7 @@ golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXct
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
@@ -854,6 +909,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
@@ -895,8 +951,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
24
internal/alloc/alloc_other.go
Normal file
24
internal/alloc/alloc_other.go
Normal file
@@ -0,0 +1,24 @@
|
||||
//go:build !unix && !windows
|
||||
|
||||
package alloc // import "github.com/ncruces/go-sqlite3/internal/alloc"
|
||||
|
||||
import "github.com/tetratelabs/wazero/experimental"
|
||||
|
||||
func NewMemory(cap, max uint64) experimental.LinearMemory {
|
||||
return &sliceMemory{make([]byte, 0, cap)}
|
||||
}
|
||||
|
||||
type sliceMemory struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (b *sliceMemory) Free() {}
|
||||
|
||||
func (b *sliceMemory) Reallocate(size uint64) []byte {
|
||||
if cap := uint64(cap(b.buf)); size > cap {
|
||||
b.buf = append(b.buf[:cap], make([]byte, size-cap)...)
|
||||
} else {
|
||||
b.buf = b.buf[:size]
|
||||
}
|
||||
return b.buf
|
||||
}
|
||||
14
internal/alloc/alloc_test.go
Normal file
14
internal/alloc/alloc_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package alloc_test // import "github.com/ncruces/go-sqlite3/internal/alloc"
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/alloc"
|
||||
)
|
||||
|
||||
func TestVirtual(t *testing.T) {
|
||||
defer func() { _ = recover() }()
|
||||
alloc.NewMemory(math.MaxInt+2, math.MaxInt+2)
|
||||
t.Error("want panic")
|
||||
}
|
||||
75
internal/alloc/alloc_unix.go
Normal file
75
internal/alloc/alloc_unix.go
Normal file
@@ -0,0 +1,75 @@
|
||||
//go:build unix
|
||||
|
||||
package alloc // import "github.com/ncruces/go-sqlite3/internal/alloc"
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/tetratelabs/wazero/experimental"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func NewMemory(cap, max uint64) experimental.LinearMemory {
|
||||
// Round up to the page size.
|
||||
rnd := uint64(unix.Getpagesize() - 1)
|
||||
res := (max + rnd) &^ rnd
|
||||
|
||||
if res > math.MaxInt {
|
||||
// This ensures int(res) overflows to a negative value,
|
||||
// and unix.Mmap returns EINVAL.
|
||||
res = math.MaxUint64
|
||||
}
|
||||
|
||||
com := res
|
||||
prot := unix.PROT_READ | unix.PROT_WRITE
|
||||
if cap < max { // Commit memory only if cap=max.
|
||||
com = 0
|
||||
prot = unix.PROT_NONE
|
||||
}
|
||||
|
||||
// Reserve res bytes of address space, to ensure we won't need to move it.
|
||||
// A protected, private, anonymous mapping should not commit memory.
|
||||
b, err := unix.Mmap(-1, 0, int(res), prot, unix.MAP_PRIVATE|unix.MAP_ANON)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &mmappedMemory{buf: b[:com]}
|
||||
}
|
||||
|
||||
// The slice covers the entire mmapped memory:
|
||||
// - len(buf) is the already committed memory,
|
||||
// - cap(buf) is the reserved address space.
|
||||
type mmappedMemory struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (m *mmappedMemory) Reallocate(size uint64) []byte {
|
||||
com := uint64(len(m.buf))
|
||||
res := uint64(cap(m.buf))
|
||||
if com < size && size <= res {
|
||||
// Grow geometrically, round up to the page size.
|
||||
rnd := uint64(unix.Getpagesize() - 1)
|
||||
new := com + com>>3
|
||||
new = min(max(size, new), res)
|
||||
new = (new + rnd) &^ rnd
|
||||
|
||||
// Commit additional memory up to new bytes.
|
||||
err := unix.Mprotect(m.buf[com:new], unix.PROT_READ|unix.PROT_WRITE)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
m.buf = m.buf[:new] // Update committed memory.
|
||||
}
|
||||
// Limit returned capacity because bytes beyond
|
||||
// len(m.buf) have not yet been committed.
|
||||
return m.buf[:size:len(m.buf)]
|
||||
}
|
||||
|
||||
func (m *mmappedMemory) Free() {
|
||||
err := unix.Munmap(m.buf[:cap(m.buf)])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
m.buf = nil
|
||||
}
|
||||
76
internal/alloc/alloc_windows.go
Normal file
76
internal/alloc/alloc_windows.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package alloc // import "github.com/ncruces/go-sqlite3/internal/alloc"
|
||||
|
||||
import (
|
||||
"math"
|
||||
"unsafe"
|
||||
|
||||
"github.com/tetratelabs/wazero/experimental"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func NewMemory(cap, max uint64) experimental.LinearMemory {
|
||||
// Round up to the page size.
|
||||
rnd := uint64(windows.Getpagesize() - 1)
|
||||
res := (max + rnd) &^ rnd
|
||||
|
||||
if res > math.MaxInt {
|
||||
// This ensures uintptr(res) overflows to a large value,
|
||||
// and windows.VirtualAlloc returns an error.
|
||||
res = math.MaxUint64
|
||||
}
|
||||
|
||||
com := res
|
||||
kind := windows.MEM_COMMIT
|
||||
if cap < max { // Commit memory only if cap=max.
|
||||
com = 0
|
||||
kind = windows.MEM_RESERVE
|
||||
}
|
||||
|
||||
// Reserve res bytes of address space, to ensure we won't need to move it.
|
||||
r, err := windows.VirtualAlloc(0, uintptr(res), uint32(kind), windows.PAGE_READWRITE)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
buf := unsafe.Slice((*byte)(unsafe.Pointer(r)), int(max))
|
||||
mem := virtualMemory{addr: r, buf: buf[:com:res]}
|
||||
return &mem
|
||||
}
|
||||
|
||||
// The slice covers the entire mmapped memory:
|
||||
// - len(buf) is the already committed memory,
|
||||
// - cap(buf) is the reserved address space.
|
||||
type virtualMemory struct {
|
||||
buf []byte
|
||||
addr uintptr
|
||||
}
|
||||
|
||||
func (m *virtualMemory) Reallocate(size uint64) []byte {
|
||||
com := uint64(len(m.buf))
|
||||
res := uint64(cap(m.buf))
|
||||
if com < size && size <= res {
|
||||
// Grow geometrically, round up to the page size.
|
||||
rnd := uint64(windows.Getpagesize() - 1)
|
||||
new := com + com>>3
|
||||
new = min(max(size, new), res)
|
||||
new = (new + rnd) &^ rnd
|
||||
|
||||
// Commit additional memory up to new bytes.
|
||||
_, err := windows.VirtualAlloc(m.addr, uintptr(new), windows.MEM_COMMIT, windows.PAGE_READWRITE)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
m.buf = m.buf[:new] // Update committed memory.
|
||||
}
|
||||
// Limit returned capacity because bytes beyond
|
||||
// len(m.buf) have not yet been committed.
|
||||
return m.buf[:size:len(m.buf)]
|
||||
}
|
||||
|
||||
func (m *virtualMemory) Free() {
|
||||
err := windows.VirtualFree(m.addr, 0, windows.MEM_RELEASE)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
m.addr = 0
|
||||
}
|
||||
@@ -39,7 +39,21 @@ func InitConfig() {
|
||||
if !filepath.IsAbs(dataDir) {
|
||||
flags.DataDir = filepath.Join(pwd, flags.DataDir)
|
||||
}
|
||||
configPath := filepath.Join(flags.DataDir, "config.json")
|
||||
// Determine config file path: use flags.ConfigPath if provided, otherwise default to <dataDir>/config.json
|
||||
configPath := flags.ConfigPath
|
||||
if configPath == "" {
|
||||
configPath = filepath.Join(flags.DataDir, "config.json")
|
||||
} else {
|
||||
// if relative, resolve relative to working directory
|
||||
if !filepath.IsAbs(configPath) {
|
||||
if absPath, err := filepath.Abs(configPath); err == nil {
|
||||
configPath = absPath
|
||||
} else {
|
||||
configPath = filepath.Join(pwd, configPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
configPath = filepath.Clean(configPath)
|
||||
log.Infof("reading config file: %s", configPath)
|
||||
if !utils.Exists(configPath) {
|
||||
log.Infof("config file not exists, creating default config file")
|
||||
|
||||
@@ -148,7 +148,10 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: "audio_cover", Value: "https://res.oplist.org/logo/logo.svg", MigrationValue: "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg", Type: conf.TypeString, Group: model.PREVIEW},
|
||||
{Key: conf.AudioAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.VideoAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.PreviewDownloadByDefault, Value: "false", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.PreviewArchivesByDefault, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.SharePreviewDownloadByDefault, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.SharePreviewArchivesByDefault, Value: "false", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.ReadMeAutoRender, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.FilterReadMeScripts, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
// global settings
|
||||
|
||||
23
internal/bootstrap/plugin.go
Normal file
23
internal/bootstrap/plugin.go
Normal file
@@ -0,0 +1,23 @@
|
||||
// internal/bootstrap/plugin.go
|
||||
package bootstrap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/cmd/flags"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/plugin"
|
||||
)
|
||||
|
||||
// InitPlugins 初始化插件管理器
|
||||
func InitPlugins() {
|
||||
// 2. 创建并初始化 Manager
|
||||
// "data" 目录应从配置中获取
|
||||
manager, err := plugin.NewManager(context.Background(), flags.DataDir)
|
||||
if err != nil {
|
||||
// 在启动时,如果插件系统失败,应该 panic
|
||||
panic(fmt.Sprintf("Failed to initialize plugin manager: %v", err))
|
||||
}
|
||||
|
||||
plugin.PluginManager = manager
|
||||
}
|
||||
@@ -25,6 +25,6 @@ func LoadStorages() {
|
||||
storages[i].MountPath, storages[i].Driver, storages[i].Order)
|
||||
}
|
||||
}
|
||||
conf.StoragesLoaded = true
|
||||
conf.SendStoragesLoadedSignal()
|
||||
}(storages)
|
||||
}
|
||||
|
||||
101
internal/cache/keyed_cache.go
vendored
Normal file
101
internal/cache/keyed_cache.go
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type KeyedCache[T any] struct {
|
||||
entries map[string]*CacheEntry[T]
|
||||
mu sync.RWMutex
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
func NewKeyedCache[T any](ttl time.Duration) *KeyedCache[T] {
|
||||
c := &KeyedCache[T]{
|
||||
entries: make(map[string]*CacheEntry[T]),
|
||||
ttl: ttl,
|
||||
}
|
||||
gcFuncs = append(gcFuncs, c.GC)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *KeyedCache[T]) Set(key string, value T) {
|
||||
c.SetWithExpirable(key, value, ExpirationTime(time.Now().Add(c.ttl)))
|
||||
}
|
||||
|
||||
func (c *KeyedCache[T]) SetWithTTL(key string, value T, ttl time.Duration) {
|
||||
c.SetWithExpirable(key, value, ExpirationTime(time.Now().Add(ttl)))
|
||||
}
|
||||
|
||||
func (c *KeyedCache[T]) SetWithExpirable(key string, value T, exp Expirable) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.entries[key] = &CacheEntry[T]{
|
||||
data: value,
|
||||
Expirable: exp,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *KeyedCache[T]) Get(key string) (T, bool) {
|
||||
c.mu.RLock()
|
||||
entry, exists := c.entries[key]
|
||||
if !exists {
|
||||
c.mu.RUnlock()
|
||||
return *new(T), false
|
||||
}
|
||||
|
||||
expired := entry.Expired()
|
||||
c.mu.RUnlock()
|
||||
|
||||
if !expired {
|
||||
return entry.data, true
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
if c.entries[key] == entry {
|
||||
delete(c.entries, key)
|
||||
c.mu.Unlock()
|
||||
return *new(T), false
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return *new(T), false
|
||||
}
|
||||
|
||||
func (c *KeyedCache[T]) Delete(key string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
delete(c.entries, key)
|
||||
}
|
||||
|
||||
func (c *KeyedCache[T]) Take(key string) (T, bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if entry, exists := c.entries[key]; exists {
|
||||
delete(c.entries, key)
|
||||
return entry.data, true
|
||||
}
|
||||
return *new(T), false
|
||||
}
|
||||
|
||||
func (c *KeyedCache[T]) Clear() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.entries = make(map[string]*CacheEntry[T])
|
||||
}
|
||||
|
||||
func (c *KeyedCache[T]) GC() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
expiredKeys := make([]string, 0, len(c.entries))
|
||||
for key, entry := range c.entries {
|
||||
if entry.Expired() {
|
||||
expiredKeys = append(expiredKeys, key)
|
||||
}
|
||||
}
|
||||
for _, key := range expiredKeys {
|
||||
delete(c.entries, key)
|
||||
}
|
||||
}
|
||||
18
internal/cache/type.go
vendored
Normal file
18
internal/cache/type.go
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
package cache
|
||||
|
||||
import "time"
|
||||
|
||||
type Expirable interface {
|
||||
Expired() bool
|
||||
}
|
||||
|
||||
type ExpirationTime time.Time
|
||||
|
||||
func (e ExpirationTime) Expired() bool {
|
||||
return time.Now().After(time.Time(e))
|
||||
}
|
||||
|
||||
type CacheEntry[T any] struct {
|
||||
Expirable
|
||||
data T
|
||||
}
|
||||
113
internal/cache/typed_cache.go
vendored
Normal file
113
internal/cache/typed_cache.go
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TypedCache[T any] struct {
|
||||
entries map[string]map[string]*CacheEntry[T]
|
||||
mu sync.RWMutex
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
func NewTypedCache[T any](ttl time.Duration) *TypedCache[T] {
|
||||
c := &TypedCache[T]{
|
||||
entries: make(map[string]map[string]*CacheEntry[T]),
|
||||
ttl: ttl,
|
||||
}
|
||||
gcFuncs = append(gcFuncs, c.GC)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *TypedCache[T]) SetType(key, typeKey string, value T) {
|
||||
c.SetTypeWithExpirable(key, typeKey, value, ExpirationTime(time.Now().Add(c.ttl)))
|
||||
}
|
||||
|
||||
func (c *TypedCache[T]) SetTypeWithTTL(key, typeKey string, value T, ttl time.Duration) {
|
||||
c.SetTypeWithExpirable(key, typeKey, value, ExpirationTime(time.Now().Add(ttl)))
|
||||
}
|
||||
|
||||
func (c *TypedCache[T]) SetTypeWithExpirable(key, typeKey string, value T, exp Expirable) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
cache, exists := c.entries[key]
|
||||
if !exists {
|
||||
cache = make(map[string]*CacheEntry[T])
|
||||
c.entries[key] = cache
|
||||
}
|
||||
|
||||
cache[typeKey] = &CacheEntry[T]{
|
||||
data: value,
|
||||
Expirable: exp,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *TypedCache[T]) GetType(key, typeKey string) (T, bool) {
|
||||
c.mu.RLock()
|
||||
cache, exists := c.entries[key]
|
||||
if !exists {
|
||||
c.mu.RUnlock()
|
||||
return *new(T), false
|
||||
}
|
||||
entry, exists := cache[typeKey]
|
||||
if !exists {
|
||||
c.mu.RUnlock()
|
||||
return *new(T), false
|
||||
}
|
||||
expired := entry.Expired()
|
||||
c.mu.RUnlock()
|
||||
|
||||
if !expired {
|
||||
return entry.data, true
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
if cache[typeKey] == entry {
|
||||
delete(cache, typeKey)
|
||||
if len(cache) == 0 {
|
||||
delete(c.entries, key)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return *new(T), false
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return *new(T), false
|
||||
}
|
||||
|
||||
func (c *TypedCache[T]) DeleteKey(key string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
delete(c.entries, key)
|
||||
}
|
||||
|
||||
func (c *TypedCache[T]) Clear() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.entries = make(map[string]map[string]*CacheEntry[T])
|
||||
}
|
||||
|
||||
func (c *TypedCache[T]) GC() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
expiredKeys := make(map[string][]string)
|
||||
for tk, entries := range c.entries {
|
||||
for key, entry := range entries {
|
||||
if !entry.Expired() {
|
||||
continue
|
||||
}
|
||||
if _, ok := expiredKeys[tk]; !ok {
|
||||
expiredKeys[tk] = make([]string, 0, len(entries))
|
||||
}
|
||||
expiredKeys[tk] = append(expiredKeys[tk], key)
|
||||
}
|
||||
}
|
||||
for tk, keys := range expiredKeys {
|
||||
for _, key := range keys {
|
||||
delete(c.entries[tk], key)
|
||||
}
|
||||
if len(c.entries[tk]) == 0 {
|
||||
delete(c.entries, tk)
|
||||
}
|
||||
}
|
||||
}
|
||||
24
internal/cache/utils.go
vendored
Normal file
24
internal/cache/utils.go
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
cacheGcCron *cron.Cron
|
||||
gcFuncs []func()
|
||||
)
|
||||
|
||||
func init() {
|
||||
// TODO Move to bootstrap
|
||||
cacheGcCron = cron.NewCron(time.Hour)
|
||||
cacheGcCron.Do(func() {
|
||||
log.Infof("Start cache GC")
|
||||
for _, f := range gcFuncs {
|
||||
f()
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -35,6 +35,7 @@ type Scheme struct {
|
||||
UnixFile string `json:"unix_file" env:"UNIX_FILE"`
|
||||
UnixFilePerm string `json:"unix_file_perm" env:"UNIX_FILE_PERM"`
|
||||
EnableH2c bool `json:"enable_h2c" env:"ENABLE_H2C"`
|
||||
EnableH3 bool `json:"enable_h3" env:"ENABLE_H3"`
|
||||
}
|
||||
|
||||
type LogConfig struct {
|
||||
|
||||
@@ -24,17 +24,20 @@ const (
|
||||
HideStorageDetailsInManagePage = "hide_storage_details_in_manage_page"
|
||||
|
||||
// preview
|
||||
TextTypes = "text_types"
|
||||
AudioTypes = "audio_types"
|
||||
VideoTypes = "video_types"
|
||||
ImageTypes = "image_types"
|
||||
ProxyTypes = "proxy_types"
|
||||
ProxyIgnoreHeaders = "proxy_ignore_headers"
|
||||
AudioAutoplay = "audio_autoplay"
|
||||
VideoAutoplay = "video_autoplay"
|
||||
PreviewArchivesByDefault = "preview_archives_by_default"
|
||||
ReadMeAutoRender = "readme_autorender"
|
||||
FilterReadMeScripts = "filter_readme_scripts"
|
||||
TextTypes = "text_types"
|
||||
AudioTypes = "audio_types"
|
||||
VideoTypes = "video_types"
|
||||
ImageTypes = "image_types"
|
||||
ProxyTypes = "proxy_types"
|
||||
ProxyIgnoreHeaders = "proxy_ignore_headers"
|
||||
AudioAutoplay = "audio_autoplay"
|
||||
VideoAutoplay = "video_autoplay"
|
||||
PreviewDownloadByDefault = "preview_download_by_default"
|
||||
PreviewArchivesByDefault = "preview_archives_by_default"
|
||||
SharePreviewDownloadByDefault = "share_preview_download_by_default"
|
||||
SharePreviewArchivesByDefault = "share_preview_archives_by_default"
|
||||
ReadMeAutoRender = "readme_autorender"
|
||||
FilterReadMeScripts = "filter_readme_scripts"
|
||||
|
||||
// global
|
||||
HideFiles = "hide_files"
|
||||
|
||||
@@ -3,6 +3,7 @@ package conf
|
||||
import (
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -23,8 +24,6 @@ var FilenameCharMap = make(map[string]string)
|
||||
var PrivacyReg []*regexp.Regexp
|
||||
|
||||
var (
|
||||
// StoragesLoaded loaded success if empty
|
||||
StoragesLoaded = false
|
||||
// 单个Buffer最大限制
|
||||
MaxBufferLimit = 16 * 1024 * 1024
|
||||
// 超过该阈值的Buffer将使用 mmap 分配,可主动释放内存
|
||||
@@ -35,3 +34,39 @@ var (
|
||||
ManageHtml string
|
||||
IndexHtml string
|
||||
)
|
||||
|
||||
var (
|
||||
// StoragesLoaded loaded success if empty
|
||||
StoragesLoaded = false
|
||||
storagesLoadMu sync.RWMutex
|
||||
storagesLoadSignal chan struct{} = make(chan struct{})
|
||||
)
|
||||
|
||||
func StoragesLoadSignal() <-chan struct{} {
|
||||
storagesLoadMu.RLock()
|
||||
ch := storagesLoadSignal
|
||||
storagesLoadMu.RUnlock()
|
||||
return ch
|
||||
}
|
||||
func SendStoragesLoadedSignal() {
|
||||
storagesLoadMu.Lock()
|
||||
select {
|
||||
case <-storagesLoadSignal:
|
||||
// already closed
|
||||
default:
|
||||
StoragesLoaded = true
|
||||
close(storagesLoadSignal)
|
||||
}
|
||||
storagesLoadMu.Unlock()
|
||||
}
|
||||
func ResetStoragesLoadSignal() {
|
||||
storagesLoadMu.Lock()
|
||||
select {
|
||||
case <-storagesLoadSignal:
|
||||
StoragesLoaded = false
|
||||
storagesLoadSignal = make(chan struct{})
|
||||
default:
|
||||
// not closed -> nothing to do
|
||||
}
|
||||
storagesLoadMu.Unlock()
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ var db *gorm.DB
|
||||
|
||||
func Init(d *gorm.DB) {
|
||||
db = d
|
||||
err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey), new(model.SharingDB))
|
||||
err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey), new(model.SharingDB), new(model.Plugin))
|
||||
if err != nil {
|
||||
log.Fatalf("failed migrate database: %s", err.Error())
|
||||
}
|
||||
|
||||
47
internal/db/plugin.go
Normal file
47
internal/db/plugin.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// CreatePlugin 在数据库中插入一条新的插件记录
|
||||
// 如果记录已存在,则会更新它 (Upsert)
|
||||
func CreatePlugin(ctx context.Context, plugin *model.Plugin) error {
|
||||
return db.WithContext(ctx).Save(plugin).Error
|
||||
}
|
||||
|
||||
// GetPluginByID 从数据库中根据 ID 查询单个插件
|
||||
func GetPluginByID(ctx context.Context, id string) (*model.Plugin, error) {
|
||||
var plugin model.Plugin
|
||||
err := db.WithContext(ctx).First(&plugin, "id = ?", id).Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return nil, nil // 返回 nil, nil 表示未找到
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &plugin, nil
|
||||
}
|
||||
|
||||
// GetAllPlugins 从数据库中获取所有已安装的插件
|
||||
func GetAllPlugins(ctx context.Context) ([]*model.Plugin, error) {
|
||||
var plugins []*model.Plugin
|
||||
err := db.WithContext(ctx).Find(&plugins).Error
|
||||
return plugins, err
|
||||
}
|
||||
|
||||
// DeletePluginByID 从数据库中根据 ID 删除一个插件
|
||||
func DeletePluginByID(ctx context.Context, id string) error {
|
||||
return db.WithContext(ctx).Delete(&model.Plugin{}, "id = ?", id).Error
|
||||
}
|
||||
|
||||
// UpdatePluginStatus 更新指定插件的状态和消息
|
||||
func UpdatePluginStatus(ctx context.Context, pluginID string, status model.PluginStatus, message string) error {
|
||||
return db.WithContext(ctx).Model(&model.Plugin{}).Where("id = ?", pluginID).Updates(map[string]interface{}{
|
||||
"status": status,
|
||||
"message": message,
|
||||
}).Error
|
||||
}
|
||||
@@ -60,3 +60,7 @@ func DeleteSharingById(id string) error {
|
||||
s := model.SharingDB{ID: id}
|
||||
return errors.WithStack(db.Where(s).Delete(&s).Error)
|
||||
}
|
||||
|
||||
func DeleteSharingsByCreatorId(creatorId uint) error {
|
||||
return errors.WithStack(db.Where("creator_id = ?", creatorId).Delete(&model.SharingDB{}).Error)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -30,6 +31,7 @@ func GetTaskDataFunc(type_s string, enabled bool) func() ([]byte, error) {
|
||||
return nil
|
||||
}
|
||||
return func() ([]byte, error) {
|
||||
<-conf.StoragesLoadSignal()
|
||||
return []byte(task.PersistData), nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,11 +3,9 @@ package driver
|
||||
type Config struct {
|
||||
Name string `json:"name"`
|
||||
LocalSort bool `json:"local_sort"`
|
||||
// if the driver returns Link with MFile, this should be set to true
|
||||
OnlyLinkMFile bool `json:"only_local"`
|
||||
OnlyProxy bool `json:"only_proxy"`
|
||||
NoCache bool `json:"no_cache"`
|
||||
NoUpload bool `json:"no_upload"`
|
||||
OnlyProxy bool `json:"only_proxy"`
|
||||
NoCache bool `json:"no_cache"`
|
||||
NoUpload bool `json:"no_upload"`
|
||||
// if need get message from user, such as validate code
|
||||
NeedMs bool `json:"need_ms"`
|
||||
DefaultRoot string `json:"default_root"`
|
||||
@@ -19,8 +17,24 @@ type Config struct {
|
||||
ProxyRangeOption bool `json:"-"`
|
||||
// if the driver returns Link without URL, this should be set to true
|
||||
NoLinkURL bool `json:"-"`
|
||||
// Link cache behaviour:
|
||||
// - LinkCacheAuto: let driver decide per-path (implement driver.LinkCacheModeResolver)
|
||||
// - LinkCacheNone: no extra info added to cache key (default)
|
||||
// - flags (OR-able) can add more attributes to cache key (IP, UA, ...)
|
||||
LinkCacheMode `json:"-"`
|
||||
}
|
||||
type LinkCacheMode int8
|
||||
|
||||
const (
|
||||
LinkCacheAuto LinkCacheMode = -1 // Let the driver decide per-path (use driver.LinkCacheModeResolver)
|
||||
LinkCacheNone LinkCacheMode = 0 // No extra info added to cache key (default)
|
||||
)
|
||||
|
||||
const (
|
||||
LinkCacheIP LinkCacheMode = 1 << iota // include client IP in cache key
|
||||
LinkCacheUA // include User-Agent in cache key
|
||||
)
|
||||
|
||||
func (c Config) MustProxy() bool {
|
||||
return c.OnlyProxy || c.OnlyLinkMFile || c.NoLinkURL
|
||||
return c.OnlyProxy || c.NoLinkURL
|
||||
}
|
||||
|
||||
@@ -47,11 +47,6 @@ type Getter interface {
|
||||
Get(ctx context.Context, path string) (model.Obj, error)
|
||||
}
|
||||
|
||||
type GetObjInfo interface {
|
||||
// GetObjInfo get file info by path
|
||||
GetObjInfo(ctx context.Context, path string) (model.Obj, error)
|
||||
}
|
||||
|
||||
//type Writer interface {
|
||||
// Mkdir
|
||||
// Move
|
||||
@@ -218,3 +213,8 @@ type WithDetails interface {
|
||||
type Reference interface {
|
||||
InitReference(storage Driver) error
|
||||
}
|
||||
|
||||
type LinkCacheModeResolver interface {
|
||||
// ResolveLinkCacheMode returns the LinkCacheMode for the given path.
|
||||
ResolveLinkCacheMode(path string) LinkCacheMode
|
||||
}
|
||||
|
||||
@@ -19,6 +19,10 @@ type Info struct {
|
||||
Config Config `json:"config"`
|
||||
}
|
||||
|
||||
type IGetItem interface {
|
||||
GetItems() []Item
|
||||
}
|
||||
|
||||
type IRootPath interface {
|
||||
GetRootPath() string
|
||||
}
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
package errs
|
||||
|
||||
import "errors"
|
||||
|
||||
func UnwrapOrSelf(err error) error {
|
||||
// errors.Unwrap has no fallback mechanism
|
||||
unwrapped := errors.Unwrap(err)
|
||||
if unwrapped == nil {
|
||||
u, ok := err.(interface {
|
||||
Unwrap() error
|
||||
})
|
||||
if !ok {
|
||||
return err
|
||||
}
|
||||
return unwrapped
|
||||
return u.Unwrap()
|
||||
}
|
||||
|
||||
@@ -41,6 +41,18 @@ func (t *ArchiveDownloadTask) Run() error {
|
||||
if err := t.ReinitCtx(); err != nil {
|
||||
return err
|
||||
}
|
||||
if t.SrcStorage == nil {
|
||||
if srcStorage, _, err := op.GetStorageAndActualPath(t.SrcStorageMp); err == nil {
|
||||
t.SrcStorage = srcStorage
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
if dstStorage, _, err := op.GetStorageAndActualPath(t.DstStorageMp); err == nil {
|
||||
t.DstStorage = dstStorage
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
t.ClearEndTime()
|
||||
t.SetStartTime(time.Now())
|
||||
defer func() { t.SetEndTime(time.Now()) }()
|
||||
|
||||
@@ -48,6 +48,19 @@ func (t *FileTransferTask) Run() error {
|
||||
if err := t.ReinitCtx(); err != nil {
|
||||
return err
|
||||
}
|
||||
if t.SrcStorage == nil {
|
||||
if srcStorage, _, err := op.GetStorageAndActualPath(t.SrcStorageMp); err == nil {
|
||||
t.SrcStorage = srcStorage
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
if dstStorage, _, err := op.GetStorageAndActualPath(t.DstStorageMp); err == nil {
|
||||
t.DstStorage = dstStorage
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
t.ClearEndTime()
|
||||
t.SetStartTime(time.Now())
|
||||
defer func() { t.SetEndTime(time.Now()) }()
|
||||
@@ -139,7 +152,7 @@ func transfer(ctx context.Context, taskType taskType, srcObjPath, dstDirPath str
|
||||
if taskType == move {
|
||||
task_group.RefreshAndRemove(dstDirPath, task_group.SrcPathToRemove(srcObjPath))
|
||||
} else {
|
||||
op.DeleteCache(t.DstStorage, dstDirActualPath)
|
||||
op.Cache.DeleteDirectory(t.DstStorage, dstDirActualPath)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
@@ -173,7 +186,7 @@ func (t *FileTransferTask) RunWithNextTaskCallback(f func(nextTask *FileTransfer
|
||||
dstActualPath := stdpath.Join(t.DstActualPath, srcObj.GetName())
|
||||
if t.TaskType == copy {
|
||||
if t.Ctx().Value(conf.NoTaskKey) != nil {
|
||||
defer op.DeleteCache(t.DstStorage, dstActualPath)
|
||||
defer op.Cache.DeleteDirectory(t.DstStorage, dstActualPath)
|
||||
} else {
|
||||
task_group.TransferCoordinator.AppendPayload(t.groupID, task_group.DstPathToRefresh(dstActualPath))
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ func get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) {
|
||||
path = utils.FixAndCleanPath(path)
|
||||
// maybe a virtual file
|
||||
if path != "/" {
|
||||
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails)
|
||||
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails, false)
|
||||
for _, f := range virtualFiles {
|
||||
if f.GetName() == stdpath.Base(path) {
|
||||
return f, nil
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) {
|
||||
meta, _ := ctx.Value(conf.MetaKey).(*model.Meta)
|
||||
user, _ := ctx.Value(conf.UserKey).(*model.User)
|
||||
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails)
|
||||
virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails, args.Refresh)
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(path)
|
||||
if err != nil && len(virtualFiles) == 0 {
|
||||
return nil, errors.WithMessage(err, "failed get storage")
|
||||
|
||||
@@ -28,7 +28,6 @@ type Link struct {
|
||||
URL string `json:"url"` // most common way
|
||||
Header http.Header `json:"header"` // needed header (for url)
|
||||
RangeReader RangeReaderIF `json:"-"` // recommended way if can't use URL
|
||||
MFile File `json:"-"` // best for local,smb... file system, which exposes MFile
|
||||
|
||||
Expiration *time.Duration // local cache expire Duration
|
||||
|
||||
@@ -38,6 +37,8 @@ type Link struct {
|
||||
ContentLength int64 `json:"-"` // 转码视频、缩略图
|
||||
|
||||
utils.SyncClosers `json:"-"`
|
||||
// 如果SyncClosers中的资源被关闭后Link将不可用,则此值应为 true
|
||||
RequireReference bool `json:"-"`
|
||||
}
|
||||
|
||||
type OtherArgs struct {
|
||||
|
||||
42
internal/model/plugin.go
Normal file
42
internal/model/plugin.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package model
|
||||
|
||||
import "time"
|
||||
|
||||
// PluginStatus 定义了插件的几种可能状态
|
||||
type PluginStatus string
|
||||
|
||||
const (
|
||||
// StatusActive 表示插件已成功加载并正在运行
|
||||
StatusActive PluginStatus = "active"
|
||||
// StatusInactive 表示插件已安装但未加载(例如,等待重启)
|
||||
StatusInactive PluginStatus = "inactive"
|
||||
// StatusError 表示插件在加载或运行时遇到错误
|
||||
StatusError PluginStatus = "error"
|
||||
)
|
||||
|
||||
type Plugin struct {
|
||||
// 插件的唯一标识符,例如 "com.openlist.driver.s3"
|
||||
// 这是主键
|
||||
ID string `gorm:"primaryKey" json:"id"`
|
||||
|
||||
// --- 来自插件元数据 ---
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
Author string `json:"author"`
|
||||
Description string `gorm:"type:text" json:"description"`
|
||||
IconURL string `json:"icon_url"`
|
||||
|
||||
// --- 管理器需要的信息 ---
|
||||
// 插件的下载源地址
|
||||
SourceURL string `json:"source_url"`
|
||||
// Wasm 文件在本地的存储路径
|
||||
WasmPath string `json:"wasm_path"`
|
||||
|
||||
// 新增状态字段
|
||||
Status PluginStatus `gorm:"default:'inactive'" json:"status"`
|
||||
Message string `gorm:"type:text" json:"message"` // 用于存储错误信息
|
||||
|
||||
// --- GORM 自动管理字段 ---
|
||||
CreatedAt time.Time `json:"-"`
|
||||
UpdatedAt time.Time `json:"-"`
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func (s *Sharing) Valid() bool {
|
||||
if len(s.Files) == 0 {
|
||||
return false
|
||||
}
|
||||
if !s.Creator.CanShare() {
|
||||
if s.Creator == nil || !s.Creator.CanShare() {
|
||||
return false
|
||||
}
|
||||
if s.Expires != nil && !s.Expires.IsZero() && s.Expires.Before(time.Now()) {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/rclone/rclone/lib/mmap"
|
||||
@@ -403,7 +404,7 @@ var errInfiniteRetry = errors.New("infinite retry")
|
||||
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
|
||||
resp, err := d.cfg.HttpClient(d.ctx, params)
|
||||
if err != nil {
|
||||
statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError)
|
||||
statusCode, ok := errs.UnwrapOrSelf(err).(HttpStatusCodeError)
|
||||
if !ok {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -34,6 +34,20 @@ func (t *TransferTask) Run() error {
|
||||
if err := t.ReinitCtx(); err != nil {
|
||||
return err
|
||||
}
|
||||
if t.SrcStorage == nil && t.SrcStorageMp != "" {
|
||||
if srcStorage, _, err := op.GetStorageAndActualPath(t.SrcStorageMp); err == nil {
|
||||
t.SrcStorage = srcStorage
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
if t.DstStorage == nil {
|
||||
if dstStorage, _, err := op.GetStorageAndActualPath(t.DstStorageMp); err == nil {
|
||||
t.DstStorage = dstStorage
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
t.ClearEndTime()
|
||||
t.SetStartTime(time.Now())
|
||||
defer func() { t.SetEndTime(time.Now()) }()
|
||||
@@ -64,9 +78,8 @@ func (t *TransferTask) Run() error {
|
||||
return op.Put(t.Ctx(), t.DstStorage, t.DstActualPath, s, t.SetProgress)
|
||||
}
|
||||
return transferStdPath(t)
|
||||
} else {
|
||||
return transferObjPath(t)
|
||||
}
|
||||
return transferObjPath(t)
|
||||
}
|
||||
|
||||
func (t *TransferTask) GetName() string {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/archive/tool"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/cache"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
@@ -17,12 +18,12 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/go-cache"
|
||||
gocache "github.com/OpenListTeam/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var archiveMetaCache = cache.NewMemCache(cache.WithShards[*model.ArchiveMetaProvider](64))
|
||||
var archiveMetaCache = gocache.NewMemCache(gocache.WithShards[*model.ArchiveMetaProvider](64))
|
||||
var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider]
|
||||
|
||||
func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
|
||||
@@ -37,14 +38,14 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
|
||||
return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err)
|
||||
}
|
||||
if m.Expiration != nil {
|
||||
archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](*m.Expiration))
|
||||
archiveMetaCache.Set(key, m, gocache.WithEx[*model.ArchiveMetaProvider](*m.Expiration))
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
if storage.Config().OnlyLinkMFile {
|
||||
meta, err := fn()
|
||||
return meta, err
|
||||
}
|
||||
// if storage.Config().NoLinkSingleflight {
|
||||
// meta, err := fn()
|
||||
// return meta, err
|
||||
// }
|
||||
if !args.Refresh {
|
||||
if meta, ok := archiveMetaCache.Get(key); ok {
|
||||
log.Debugf("use cache when get %s archive meta", path)
|
||||
@@ -158,7 +159,7 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
|
||||
return obj, archiveMetaProvider, err
|
||||
}
|
||||
|
||||
var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
|
||||
var archiveListCache = gocache.NewMemCache(gocache.WithShards[[]model.Obj](64))
|
||||
var archiveListG singleflight.Group[[]model.Obj]
|
||||
|
||||
func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
|
||||
@@ -199,7 +200,7 @@ func ListArchive(ctx context.Context, storage driver.Driver, path string, args m
|
||||
if !storage.Config().NoCache {
|
||||
if len(files) > 0 {
|
||||
log.Debugf("set cache: %s => %+v", key, files)
|
||||
archiveListCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
||||
archiveListCache.Set(key, files, gocache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
||||
} else {
|
||||
log.Debugf("del cache: %s", key)
|
||||
archiveListCache.Del(key)
|
||||
@@ -354,75 +355,50 @@ func ArchiveGet(ctx context.Context, storage driver.Driver, path string, args mo
|
||||
return nil, nil, errors.WithStack(errs.ObjectNotFound)
|
||||
}
|
||||
|
||||
type extractLink struct {
|
||||
*model.Link
|
||||
Obj model.Obj
|
||||
type objWithLink struct {
|
||||
link *model.Link
|
||||
obj model.Obj
|
||||
}
|
||||
|
||||
var extractCache = cache.NewMemCache(cache.WithShards[*extractLink](16))
|
||||
var extractG = singleflight.Group[*extractLink]{Remember: true}
|
||||
var extractCache = cache.NewKeyedCache[*objWithLink](5 * time.Minute)
|
||||
var extractG = singleflight.Group[*objWithLink]{}
|
||||
|
||||
func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
key := stdpath.Join(Key(storage, path), args.InnerPath)
|
||||
if link, ok := extractCache.Get(key); ok {
|
||||
return link.Link, link.Obj, nil
|
||||
if ol, ok := extractCache.Get(key); ok {
|
||||
if ol.link.Expiration != nil || ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
|
||||
return ol.link, ol.obj, nil
|
||||
}
|
||||
}
|
||||
|
||||
var forget any
|
||||
var linkM *extractLink
|
||||
fn := func() (*extractLink, error) {
|
||||
link, err := driverExtract(ctx, storage, path, args)
|
||||
fn := func() (*objWithLink, error) {
|
||||
ol, err := driverExtract(ctx, storage, path, args)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed extract archive")
|
||||
}
|
||||
if link.MFile != nil && forget != nil {
|
||||
linkM = link
|
||||
return nil, errLinkMFileCache
|
||||
if ol.link.Expiration != nil {
|
||||
extractCache.SetWithTTL(key, ol, *ol.link.Expiration)
|
||||
} else {
|
||||
extractCache.SetWithExpirable(key, ol, &ol.link.SyncClosers)
|
||||
}
|
||||
if link.Link.Expiration != nil {
|
||||
extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration))
|
||||
}
|
||||
link.AddIfCloser(forget)
|
||||
return link, nil
|
||||
return ol, nil
|
||||
}
|
||||
|
||||
if storage.Config().OnlyLinkMFile {
|
||||
link, err := fn()
|
||||
for {
|
||||
ol, err, _ := extractG.Do(key, fn)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return link.Link, link.Obj, nil
|
||||
}
|
||||
|
||||
forget = utils.CloseFunc(func() error {
|
||||
if forget != nil {
|
||||
forget = nil
|
||||
linkG.Forget(key)
|
||||
if ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
|
||||
return ol.link, ol.obj, nil
|
||||
}
|
||||
return nil
|
||||
})
|
||||
link, err, _ := extractG.Do(key, fn)
|
||||
for err == nil && !link.AcquireReference() {
|
||||
link, err, _ = extractG.Do(key, fn)
|
||||
}
|
||||
if err == errLinkMFileCache {
|
||||
if linkM != nil {
|
||||
return linkM.Link, linkM.Obj, nil
|
||||
}
|
||||
forget = nil
|
||||
link, err = fn()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return link.Link, link.Obj, nil
|
||||
}
|
||||
|
||||
func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*extractLink, error) {
|
||||
func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*objWithLink, error) {
|
||||
storageAr, ok := storage.(driver.ArchiveReader)
|
||||
if !ok {
|
||||
return nil, errs.DriverExtractNotSupported
|
||||
@@ -438,7 +414,7 @@ func driverExtract(ctx context.Context, storage driver.Driver, path string, args
|
||||
return nil, errors.WithStack(errs.NotFile)
|
||||
}
|
||||
link, err := storageAr.Extract(ctx, archiveFile, args)
|
||||
return &extractLink{Link: link, Obj: extracted}, err
|
||||
return &objWithLink{link: link, obj: extracted}, err
|
||||
}
|
||||
|
||||
type streamWithParent struct {
|
||||
@@ -500,16 +476,16 @@ func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstD
|
||||
if err == nil {
|
||||
if len(newObjs) > 0 {
|
||||
for _, newObj := range newObjs {
|
||||
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
|
||||
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
|
||||
}
|
||||
} else if !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, dstDirPath)
|
||||
Cache.DeleteDirectory(storage, dstDirPath)
|
||||
}
|
||||
}
|
||||
case driver.ArchiveDecompress:
|
||||
err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
|
||||
if err == nil && !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, dstDirPath)
|
||||
Cache.DeleteDirectory(storage, dstDirPath)
|
||||
}
|
||||
default:
|
||||
return errs.NotImplement
|
||||
|
||||
257
internal/op/cache.go
Normal file
257
internal/op/cache.go
Normal file
@@ -0,0 +1,257 @@
|
||||
package op
|
||||
|
||||
import (
|
||||
stdpath "path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/cache"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
)
|
||||
|
||||
type CacheManager struct {
|
||||
dirCache *cache.KeyedCache[*directoryCache] // Cache for directory listings
|
||||
linkCache *cache.TypedCache[*objWithLink] // Cache for file links
|
||||
userCache *cache.KeyedCache[*model.User] // Cache for user data
|
||||
settingCache *cache.KeyedCache[any] // Cache for settings
|
||||
detailCache *cache.KeyedCache[*model.StorageDetails] // Cache for storage details
|
||||
}
|
||||
|
||||
func NewCacheManager() *CacheManager {
|
||||
return &CacheManager{
|
||||
dirCache: cache.NewKeyedCache[*directoryCache](time.Minute * 5),
|
||||
linkCache: cache.NewTypedCache[*objWithLink](time.Minute * 30),
|
||||
userCache: cache.NewKeyedCache[*model.User](time.Hour),
|
||||
settingCache: cache.NewKeyedCache[any](time.Hour),
|
||||
detailCache: cache.NewKeyedCache[*model.StorageDetails](time.Minute * 30),
|
||||
}
|
||||
}
|
||||
|
||||
// global instance
|
||||
var Cache = NewCacheManager()
|
||||
|
||||
func Key(storage driver.Driver, path string) string {
|
||||
return stdpath.Join(storage.GetStorage().MountPath, path)
|
||||
}
|
||||
|
||||
// update object in dirCache.
|
||||
// if it's a directory, remove all its children from dirCache too.
|
||||
// if it's a file, remove its link from linkCache.
|
||||
func (cm *CacheManager) updateDirectoryObject(storage driver.Driver, dirPath string, oldObj model.Obj, newObj model.Obj) {
|
||||
key := Key(storage, dirPath)
|
||||
if !oldObj.IsDir() {
|
||||
cm.linkCache.DeleteKey(stdpath.Join(key, oldObj.GetName()))
|
||||
cm.linkCache.DeleteKey(stdpath.Join(key, newObj.GetName()))
|
||||
}
|
||||
if storage.Config().NoCache {
|
||||
return
|
||||
}
|
||||
|
||||
if cache, exist := cm.dirCache.Get(key); exist {
|
||||
if oldObj.IsDir() {
|
||||
cm.deleteDirectoryTree(stdpath.Join(key, oldObj.GetName()))
|
||||
}
|
||||
cache.UpdateObject(oldObj.GetName(), newObj)
|
||||
}
|
||||
}
|
||||
|
||||
// add new object to dirCache
|
||||
func (cm *CacheManager) addDirectoryObject(storage driver.Driver, dirPath string, newObj model.Obj) {
|
||||
if storage.Config().NoCache {
|
||||
return
|
||||
}
|
||||
cache, exist := cm.dirCache.Get(Key(storage, dirPath))
|
||||
if exist {
|
||||
cache.UpdateObject(newObj.GetName(), newObj)
|
||||
}
|
||||
}
|
||||
|
||||
// recursively delete directory and its children from dirCache
|
||||
func (cm *CacheManager) DeleteDirectoryTree(storage driver.Driver, dirPath string) {
|
||||
if storage.Config().NoCache {
|
||||
return
|
||||
}
|
||||
cm.deleteDirectoryTree(Key(storage, dirPath))
|
||||
}
|
||||
func (cm *CacheManager) deleteDirectoryTree(key string) {
|
||||
if dirCache, exists := cm.dirCache.Take(key); exists {
|
||||
for _, obj := range dirCache.objs {
|
||||
if obj.IsDir() {
|
||||
cm.deleteDirectoryTree(stdpath.Join(key, obj.GetName()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remove directory from dirCache
|
||||
func (cm *CacheManager) DeleteDirectory(storage driver.Driver, dirPath string) {
|
||||
if storage.Config().NoCache {
|
||||
return
|
||||
}
|
||||
cm.dirCache.Delete(Key(storage, dirPath))
|
||||
}
|
||||
|
||||
// remove object from dirCache.
|
||||
// if it's a directory, remove all its children from dirCache too.
|
||||
// if it's a file, remove its link from linkCache.
|
||||
func (cm *CacheManager) removeDirectoryObject(storage driver.Driver, dirPath string, obj model.Obj) {
|
||||
key := Key(storage, dirPath)
|
||||
if !obj.IsDir() {
|
||||
cm.linkCache.DeleteKey(stdpath.Join(key, obj.GetName()))
|
||||
}
|
||||
|
||||
if storage.Config().NoCache {
|
||||
return
|
||||
}
|
||||
if cache, exist := cm.dirCache.Get(key); exist {
|
||||
if obj.IsDir() {
|
||||
cm.deleteDirectoryTree(stdpath.Join(key, obj.GetName()))
|
||||
}
|
||||
cache.RemoveObject(obj.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
// cache user data
|
||||
func (cm *CacheManager) SetUser(username string, user *model.User) {
|
||||
cm.userCache.Set(username, user)
|
||||
}
|
||||
|
||||
// cached user data
|
||||
func (cm *CacheManager) GetUser(username string) (*model.User, bool) {
|
||||
return cm.userCache.Get(username)
|
||||
}
|
||||
|
||||
// remove user data from cache
|
||||
func (cm *CacheManager) DeleteUser(username string) {
|
||||
cm.userCache.Delete(username)
|
||||
}
|
||||
|
||||
// caches setting
|
||||
func (cm *CacheManager) SetSetting(key string, setting *model.SettingItem) {
|
||||
cm.settingCache.Set(key, setting)
|
||||
}
|
||||
|
||||
// cached setting
|
||||
func (cm *CacheManager) GetSetting(key string) (*model.SettingItem, bool) {
|
||||
if data, exists := cm.settingCache.Get(key); exists {
|
||||
if setting, ok := data.(*model.SettingItem); ok {
|
||||
return setting, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// cache setting groups
|
||||
func (cm *CacheManager) SetSettingGroup(key string, settings []model.SettingItem) {
|
||||
cm.settingCache.Set(key, settings)
|
||||
}
|
||||
|
||||
// cached setting group
|
||||
func (cm *CacheManager) GetSettingGroup(key string) ([]model.SettingItem, bool) {
|
||||
if data, exists := cm.settingCache.Get(key); exists {
|
||||
if settings, ok := data.([]model.SettingItem); ok {
|
||||
return settings, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (cm *CacheManager) SetStorageDetails(storage driver.Driver, details *model.StorageDetails) {
|
||||
if storage.Config().NoCache {
|
||||
return
|
||||
}
|
||||
expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
|
||||
cm.detailCache.SetWithTTL(storage.GetStorage().MountPath, details, expiration)
|
||||
}
|
||||
|
||||
func (cm *CacheManager) GetStorageDetails(storage driver.Driver) (*model.StorageDetails, bool) {
|
||||
return cm.detailCache.Get(storage.GetStorage().MountPath)
|
||||
}
|
||||
|
||||
func (cm *CacheManager) InvalidateStorageDetails(storage driver.Driver) {
|
||||
cm.detailCache.Delete(storage.GetStorage().MountPath)
|
||||
}
|
||||
|
||||
// clears all caches
|
||||
func (cm *CacheManager) ClearAll() {
|
||||
cm.dirCache.Clear()
|
||||
cm.linkCache.Clear()
|
||||
cm.userCache.Clear()
|
||||
cm.settingCache.Clear()
|
||||
cm.detailCache.Clear()
|
||||
}
|
||||
|
||||
type directoryCache struct {
|
||||
objs []model.Obj
|
||||
sorted []model.Obj
|
||||
mu sync.RWMutex
|
||||
|
||||
dirtyFlags uint8
|
||||
}
|
||||
|
||||
const (
|
||||
dirtyRemove uint8 = 1 << iota // 对象删除:刷新 sorted 副本,但不需要 full sort/extract
|
||||
dirtyUpdate // 对象更新:需要执行 full sort + extract
|
||||
)
|
||||
|
||||
func newDirectoryCache(objs []model.Obj) *directoryCache {
|
||||
sorted := make([]model.Obj, len(objs))
|
||||
copy(sorted, objs)
|
||||
return &directoryCache{
|
||||
objs: objs,
|
||||
sorted: sorted,
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *directoryCache) RemoveObject(name string) {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
for i, obj := range dc.objs {
|
||||
if obj.GetName() == name {
|
||||
dc.objs = append(dc.objs[:i], dc.objs[i+1:]...)
|
||||
dc.dirtyFlags |= dirtyRemove
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *directoryCache) UpdateObject(oldName string, newObj model.Obj) {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
if oldName != "" {
|
||||
for i, obj := range dc.objs {
|
||||
if obj.GetName() == oldName {
|
||||
dc.objs[i] = newObj
|
||||
dc.dirtyFlags |= dirtyUpdate
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
dc.objs = append(dc.objs, newObj)
|
||||
dc.dirtyFlags |= dirtyUpdate
|
||||
}
|
||||
|
||||
func (dc *directoryCache) GetSortedObjects(meta driver.Meta) []model.Obj {
|
||||
dc.mu.RLock()
|
||||
if dc.dirtyFlags == 0 {
|
||||
dc.mu.RUnlock()
|
||||
return dc.sorted
|
||||
}
|
||||
dc.mu.RUnlock()
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
|
||||
sorted := make([]model.Obj, len(dc.objs))
|
||||
copy(sorted, dc.objs)
|
||||
dc.sorted = sorted
|
||||
if dc.dirtyFlags&dirtyUpdate != 0 {
|
||||
storage := meta.GetStorage()
|
||||
if meta.Config().LocalSort {
|
||||
model.SortFiles(sorted, storage.OrderBy, storage.OrderDirection)
|
||||
}
|
||||
model.ExtractFolder(sorted, storage.ExtractFolder)
|
||||
}
|
||||
dc.dirtyFlags = 0
|
||||
return sorted
|
||||
}
|
||||
@@ -15,12 +15,27 @@ type DriverConstructor func() driver.Driver
|
||||
var driverMap = map[string]DriverConstructor{}
|
||||
var driverInfoMap = map[string]driver.Info{}
|
||||
|
||||
func RegisterDriver(driver DriverConstructor) {
|
||||
func RegisterDriver(driver DriverConstructor) error {
|
||||
// log.Infof("register driver: [%s]", config.Name)
|
||||
tempDriver := driver()
|
||||
if tempDriver == nil {
|
||||
return errors.New("register driver is null")
|
||||
}
|
||||
tempConfig := tempDriver.Config()
|
||||
|
||||
if driverMap[tempConfig.Name] != nil {
|
||||
return errors.New("driver is registered")
|
||||
}
|
||||
registerDriverItems(tempConfig, tempDriver.GetAddition())
|
||||
driverMap[tempConfig.Name] = driver
|
||||
return nil
|
||||
}
|
||||
|
||||
func UnRegisterDriver(driver DriverConstructor) {
|
||||
if tempDriver := driver(); tempDriver != nil {
|
||||
tempConfig := tempDriver.Config()
|
||||
delete(driverMap, tempConfig.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func GetDriver(name string) (DriverConstructor, error) {
|
||||
@@ -45,12 +60,18 @@ func GetDriverInfoMap() map[string]driver.Info {
|
||||
|
||||
func registerDriverItems(config driver.Config, addition driver.Additional) {
|
||||
// log.Debugf("addition of %s: %+v", config.Name, addition)
|
||||
tAddition := reflect.TypeOf(addition)
|
||||
for tAddition.Kind() == reflect.Pointer {
|
||||
tAddition = tAddition.Elem()
|
||||
var additionalItems []driver.Item
|
||||
if v, ok := addition.(driver.IGetItem); ok {
|
||||
additionalItems = v.GetItems()
|
||||
} else {
|
||||
tAddition := reflect.TypeOf(addition)
|
||||
for tAddition.Kind() == reflect.Pointer {
|
||||
tAddition = tAddition.Elem()
|
||||
}
|
||||
additionalItems = getAdditionalItems(tAddition, config.DefaultRoot)
|
||||
}
|
||||
|
||||
mainItems := getMainItems(config)
|
||||
additionalItems := getAdditionalItems(tAddition, config.DefaultRoot)
|
||||
driverInfoMap[config.Name] = driver.Info{
|
||||
Common: mainItems,
|
||||
Additional: additionalItems,
|
||||
|
||||
@@ -4,115 +4,20 @@ import (
|
||||
"context"
|
||||
stderrors "errors"
|
||||
stdpath "path"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/generic_sync"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// In order to facilitate adding some other things before and after file op
|
||||
|
||||
var listCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
|
||||
var listG singleflight.Group[[]model.Obj]
|
||||
|
||||
func updateCacheObj(storage driver.Driver, path string, oldObj model.Obj, newObj model.Obj) {
|
||||
key := Key(storage, path)
|
||||
objs, ok := listCache.Get(key)
|
||||
if ok {
|
||||
for i, obj := range objs {
|
||||
if obj.GetName() == newObj.GetName() {
|
||||
objs = slices.Delete(objs, i, i+1)
|
||||
break
|
||||
}
|
||||
}
|
||||
for i, obj := range objs {
|
||||
if obj.GetName() == oldObj.GetName() {
|
||||
objs[i] = newObj
|
||||
break
|
||||
}
|
||||
}
|
||||
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
||||
}
|
||||
}
|
||||
|
||||
func delCacheObj(storage driver.Driver, path string, obj model.Obj) {
|
||||
key := Key(storage, path)
|
||||
objs, ok := listCache.Get(key)
|
||||
if ok {
|
||||
for i, oldObj := range objs {
|
||||
if oldObj.GetName() == obj.GetName() {
|
||||
objs = append(objs[:i], objs[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
||||
}
|
||||
}
|
||||
|
||||
var addSortDebounceMap generic_sync.MapOf[string, func(func())]
|
||||
|
||||
func addCacheObj(storage driver.Driver, path string, newObj model.Obj) {
|
||||
key := Key(storage, path)
|
||||
objs, ok := listCache.Get(key)
|
||||
if ok {
|
||||
for i, obj := range objs {
|
||||
if obj.GetName() == newObj.GetName() {
|
||||
objs[i] = newObj
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Simple separation of files and folders
|
||||
if len(objs) > 0 && objs[len(objs)-1].IsDir() == newObj.IsDir() {
|
||||
objs = append(objs, newObj)
|
||||
} else {
|
||||
objs = append([]model.Obj{newObj}, objs...)
|
||||
}
|
||||
|
||||
if storage.Config().LocalSort {
|
||||
debounce, _ := addSortDebounceMap.LoadOrStore(key, utils.NewDebounce(time.Minute))
|
||||
log.Debug("addCacheObj: wait start sort")
|
||||
debounce(func() {
|
||||
log.Debug("addCacheObj: start sort")
|
||||
model.SortFiles(objs, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
|
||||
addSortDebounceMap.Delete(key)
|
||||
})
|
||||
}
|
||||
|
||||
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
||||
}
|
||||
}
|
||||
|
||||
func ClearCache(storage driver.Driver, path string) {
|
||||
objs, ok := listCache.Get(Key(storage, path))
|
||||
if ok {
|
||||
for _, obj := range objs {
|
||||
if obj.IsDir() {
|
||||
ClearCache(storage, stdpath.Join(path, obj.GetName()))
|
||||
}
|
||||
}
|
||||
}
|
||||
listCache.Del(Key(storage, path))
|
||||
}
|
||||
|
||||
func DeleteCache(storage driver.Driver, path string) {
|
||||
listCache.Del(Key(storage, path))
|
||||
}
|
||||
|
||||
func Key(storage driver.Driver, path string) string {
|
||||
return stdpath.Join(storage.GetStorage().MountPath, utils.FixAndCleanPath(path))
|
||||
}
|
||||
|
||||
// List files in storage, not contains virtual file
|
||||
func List(ctx context.Context, storage driver.Driver, path string, args model.ListArgs) ([]model.Obj, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
@@ -122,11 +27,12 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
|
||||
log.Debugf("op.List %s", path)
|
||||
key := Key(storage, path)
|
||||
if !args.Refresh {
|
||||
if files, ok := listCache.Get(key); ok {
|
||||
if dirCache, exists := Cache.dirCache.Get(key); exists {
|
||||
log.Debugf("use cache when list %s", path)
|
||||
return files, nil
|
||||
return dirCache.GetSortedObjects(storage), nil
|
||||
}
|
||||
}
|
||||
|
||||
dir, err := GetUnwrap(ctx, storage, path)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get dir")
|
||||
@@ -135,6 +41,7 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
|
||||
if !dir.IsDir() {
|
||||
return nil, errors.WithStack(errs.NotFolder)
|
||||
}
|
||||
|
||||
objs, err, _ := listG.Do(key, func() ([]model.Obj, error) {
|
||||
files, err := storage.List(ctx, dir, args)
|
||||
if err != nil {
|
||||
@@ -162,10 +69,11 @@ func List(ctx context.Context, storage driver.Driver, path string, args model.Li
|
||||
if !storage.Config().NoCache {
|
||||
if len(files) > 0 {
|
||||
log.Debugf("set cache: %s => %+v", key, files)
|
||||
listCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
||||
ttl := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
|
||||
Cache.dirCache.SetWithTTL(key, newDirectoryCache(files), ttl)
|
||||
} else {
|
||||
log.Debugf("del cache: %s", key)
|
||||
listCache.Del(key)
|
||||
Cache.deleteDirectoryTree(key)
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
@@ -252,100 +160,68 @@ func GetUnwrap(ctx context.Context, storage driver.Driver, path string) (model.O
|
||||
return model.UnwrapObj(obj), err
|
||||
}
|
||||
|
||||
var linkCache = cache.NewMemCache(cache.WithShards[*model.Link](16))
|
||||
var linkG = singleflight.Group[*model.Link]{Remember: true}
|
||||
var errLinkMFileCache = stderrors.New("ErrLinkMFileCache")
|
||||
var linkG = singleflight.Group[*objWithLink]{}
|
||||
|
||||
// Link get link, if is an url. should have an expiry time
|
||||
func Link(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (*model.Link, model.Obj, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
var (
|
||||
file model.Obj
|
||||
err error
|
||||
)
|
||||
// use cache directly
|
||||
dir, name := stdpath.Split(stdpath.Join(storage.GetStorage().MountPath, path))
|
||||
if cacheFiles, ok := listCache.Get(strings.TrimSuffix(dir, "/")); ok {
|
||||
for _, f := range cacheFiles {
|
||||
if f.GetName() == name {
|
||||
file = model.UnwrapObj(f)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if g, ok := storage.(driver.GetObjInfo); ok {
|
||||
file, err = g.GetObjInfo(ctx, path)
|
||||
} else {
|
||||
file, err = GetUnwrap(ctx, storage, path)
|
||||
|
||||
mode := storage.Config().LinkCacheMode
|
||||
if mode == -1 {
|
||||
mode = storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(path)
|
||||
}
|
||||
typeKey := args.Type
|
||||
if mode&driver.LinkCacheIP == 1 {
|
||||
typeKey += "/" + args.IP
|
||||
}
|
||||
if mode&driver.LinkCacheUA == 1 {
|
||||
typeKey += "/" + args.Header.Get("User-Agent")
|
||||
}
|
||||
key := Key(storage, path)
|
||||
if ol, exists := Cache.linkCache.GetType(key, typeKey); exists {
|
||||
if ol.link.Expiration != nil ||
|
||||
ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
|
||||
return ol.link, ol.obj, nil
|
||||
}
|
||||
}
|
||||
if file == nil {
|
||||
|
||||
fn := func() (*objWithLink, error) {
|
||||
file, err := GetUnwrap(ctx, storage, path)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed to get file")
|
||||
return nil, errors.WithMessage(err, "failed to get file")
|
||||
}
|
||||
if file.IsDir() {
|
||||
return nil, errors.WithStack(errs.NotFile)
|
||||
}
|
||||
return nil, nil, errors.WithStack(errs.ObjectNotFound)
|
||||
}
|
||||
if file.IsDir() {
|
||||
return nil, nil, errors.WithStack(errs.NotFile)
|
||||
}
|
||||
|
||||
key := stdpath.Join(Key(storage, path), args.Type)
|
||||
if link, ok := linkCache.Get(key); ok {
|
||||
return link, file, nil
|
||||
}
|
||||
|
||||
var forget any
|
||||
var linkM *model.Link
|
||||
fn := func() (*model.Link, error) {
|
||||
link, err := storage.Link(ctx, file, args)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed get link")
|
||||
}
|
||||
if link.MFile != nil && forget != nil {
|
||||
linkM = link
|
||||
return nil, errLinkMFileCache
|
||||
}
|
||||
ol := &objWithLink{link: link, obj: file}
|
||||
if link.Expiration != nil {
|
||||
linkCache.Set(key, link, cache.WithEx[*model.Link](*link.Expiration))
|
||||
Cache.linkCache.SetTypeWithTTL(key, typeKey, ol, *link.Expiration)
|
||||
} else {
|
||||
Cache.linkCache.SetTypeWithExpirable(key, typeKey, ol, &link.SyncClosers)
|
||||
}
|
||||
link.AddIfCloser(forget)
|
||||
return link, nil
|
||||
return ol, nil
|
||||
}
|
||||
|
||||
if storage.Config().OnlyLinkMFile {
|
||||
link, err := fn()
|
||||
retry := 0
|
||||
for {
|
||||
ol, err, _ := linkG.Do(key+"/"+typeKey, fn)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return link, file, err
|
||||
}
|
||||
|
||||
forget = utils.CloseFunc(func() error {
|
||||
if forget != nil {
|
||||
forget = nil
|
||||
linkG.Forget(key)
|
||||
if ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
|
||||
if retry > 1 {
|
||||
log.Warnf("Link retry successed after %d times: %s %s", retry, key, typeKey)
|
||||
}
|
||||
return ol.link, ol.obj, nil
|
||||
}
|
||||
return nil
|
||||
})
|
||||
link, err, _ := linkG.Do(key, fn)
|
||||
for err == nil && !link.AcquireReference() {
|
||||
link, err, _ = linkG.Do(key, fn)
|
||||
retry++
|
||||
}
|
||||
|
||||
if err == errLinkMFileCache {
|
||||
if linkM != nil {
|
||||
return linkM, file, nil
|
||||
}
|
||||
forget = nil
|
||||
link, err = fn()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return link, file, nil
|
||||
}
|
||||
|
||||
// Other api
|
||||
@@ -365,7 +241,7 @@ func Other(ctx context.Context, storage driver.Driver, args model.FsOtherArgs) (
|
||||
}
|
||||
}
|
||||
|
||||
var mkdirG singleflight.Group[interface{}]
|
||||
var mkdirG singleflight.Group[any]
|
||||
|
||||
func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache ...bool) error {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
@@ -373,7 +249,7 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
key := Key(storage, path)
|
||||
_, err, _ := mkdirG.Do(key, func() (interface{}, error) {
|
||||
_, err, _ := mkdirG.Do(key, func() (any, error) {
|
||||
// check if dir exists
|
||||
f, err := GetUnwrap(ctx, storage, path)
|
||||
if err != nil {
|
||||
@@ -395,15 +271,19 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache
|
||||
newObj, err = s.MakeDir(ctx, parentDir, dirName)
|
||||
if err == nil {
|
||||
if newObj != nil {
|
||||
addCacheObj(storage, parentPath, model.WrapObjName(newObj))
|
||||
if !storage.Config().NoCache {
|
||||
if dirCache, exist := Cache.dirCache.Get(Key(storage, parentPath)); exist {
|
||||
dirCache.UpdateObject("", newObj)
|
||||
}
|
||||
}
|
||||
} else if !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, parentPath)
|
||||
Cache.DeleteDirectory(storage, parentPath)
|
||||
}
|
||||
}
|
||||
case driver.Mkdir:
|
||||
err = s.MakeDir(ctx, parentDir, dirName)
|
||||
if err == nil && !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, parentPath)
|
||||
Cache.DeleteDirectory(storage, parentPath)
|
||||
}
|
||||
default:
|
||||
return nil, errs.NotImplement
|
||||
@@ -427,7 +307,11 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
|
||||
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
srcPath = utils.FixAndCleanPath(srcPath)
|
||||
srcDirPath := stdpath.Dir(srcPath)
|
||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||
if dstDirPath == srcDirPath {
|
||||
return stderrors.New("move in place")
|
||||
}
|
||||
srcRawObj, err := Get(ctx, storage, srcPath)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed to get src object")
|
||||
@@ -437,26 +321,25 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed to get dst dir")
|
||||
}
|
||||
srcDirPath := stdpath.Dir(srcPath)
|
||||
|
||||
switch s := storage.(type) {
|
||||
case driver.MoveResult:
|
||||
var newObj model.Obj
|
||||
newObj, err = s.Move(ctx, srcObj, dstDir)
|
||||
if err == nil {
|
||||
delCacheObj(storage, srcDirPath, srcRawObj)
|
||||
Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
|
||||
if newObj != nil {
|
||||
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
|
||||
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
|
||||
} else if !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, dstDirPath)
|
||||
Cache.DeleteDirectory(storage, dstDirPath)
|
||||
}
|
||||
}
|
||||
case driver.Move:
|
||||
err = s.Move(ctx, srcObj, dstDir)
|
||||
if err == nil {
|
||||
delCacheObj(storage, srcDirPath, srcRawObj)
|
||||
Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
|
||||
if !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, dstDirPath)
|
||||
Cache.DeleteDirectory(storage, dstDirPath)
|
||||
}
|
||||
}
|
||||
default:
|
||||
@@ -475,28 +358,29 @@ func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string,
|
||||
return errors.WithMessage(err, "failed to get src object")
|
||||
}
|
||||
srcObj := model.UnwrapObj(srcRawObj)
|
||||
srcDirPath := stdpath.Dir(srcPath)
|
||||
|
||||
switch s := storage.(type) {
|
||||
case driver.RenameResult:
|
||||
var newObj model.Obj
|
||||
newObj, err = s.Rename(ctx, srcObj, dstName)
|
||||
if err == nil {
|
||||
srcDirPath := stdpath.Dir(srcPath)
|
||||
if newObj != nil {
|
||||
updateCacheObj(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj))
|
||||
} else if !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, srcDirPath)
|
||||
if srcRawObj.IsDir() {
|
||||
ClearCache(storage, srcPath)
|
||||
Cache.updateDirectoryObject(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj))
|
||||
} else {
|
||||
Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
|
||||
if !utils.IsBool(lazyCache...) {
|
||||
Cache.DeleteDirectory(storage, srcDirPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
case driver.Rename:
|
||||
err = s.Rename(ctx, srcObj, dstName)
|
||||
if err == nil && !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, srcDirPath)
|
||||
if srcRawObj.IsDir() {
|
||||
ClearCache(storage, srcPath)
|
||||
if err == nil {
|
||||
srcDirPath := stdpath.Dir(srcPath)
|
||||
Cache.removeDirectoryObject(storage, srcDirPath, srcRawObj)
|
||||
if !utils.IsBool(lazyCache...) {
|
||||
Cache.DeleteDirectory(storage, srcDirPath)
|
||||
}
|
||||
}
|
||||
default:
|
||||
@@ -512,10 +396,14 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
|
||||
}
|
||||
srcPath = utils.FixAndCleanPath(srcPath)
|
||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||
srcObj, err := GetUnwrap(ctx, storage, srcPath)
|
||||
if dstDirPath == stdpath.Dir(srcPath) {
|
||||
return stderrors.New("copy in place")
|
||||
}
|
||||
srcRawObj, err := Get(ctx, storage, srcPath)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed to get src object")
|
||||
}
|
||||
srcObj := model.UnwrapObj(srcRawObj)
|
||||
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed to get dst dir")
|
||||
@@ -527,15 +415,17 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
|
||||
newObj, err = s.Copy(ctx, srcObj, dstDir)
|
||||
if err == nil {
|
||||
if newObj != nil {
|
||||
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
|
||||
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
|
||||
} else if !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, dstDirPath)
|
||||
Cache.DeleteDirectory(storage, dstDirPath)
|
||||
}
|
||||
}
|
||||
case driver.Copy:
|
||||
err = s.Copy(ctx, srcObj, dstDir)
|
||||
if err == nil && !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, dstDirPath)
|
||||
if err == nil {
|
||||
if !utils.IsBool(lazyCache...) {
|
||||
Cache.DeleteDirectory(storage, dstDirPath)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return errs.NotImplement
|
||||
@@ -566,11 +456,7 @@ func Remove(ctx context.Context, storage driver.Driver, path string) error {
|
||||
case driver.Remove:
|
||||
err = s.Remove(ctx, model.UnwrapObj(rawObj))
|
||||
if err == nil {
|
||||
delCacheObj(storage, dirPath, rawObj)
|
||||
// clear folder cache recursively
|
||||
if rawObj.IsDir() {
|
||||
ClearCache(storage, path)
|
||||
}
|
||||
Cache.removeDirectoryObject(storage, dirPath, rawObj)
|
||||
}
|
||||
default:
|
||||
return errs.NotImplement
|
||||
@@ -640,16 +526,20 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
|
||||
var newObj model.Obj
|
||||
newObj, err = s.Put(ctx, parentDir, file, up)
|
||||
if err == nil {
|
||||
Cache.linkCache.DeleteKey(Key(storage, dstPath))
|
||||
if newObj != nil {
|
||||
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
|
||||
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
|
||||
} else if !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, dstDirPath)
|
||||
Cache.DeleteDirectory(storage, dstDirPath)
|
||||
}
|
||||
}
|
||||
case driver.Put:
|
||||
err = s.Put(ctx, parentDir, file, up)
|
||||
if err == nil && !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, dstDirPath)
|
||||
if err == nil {
|
||||
Cache.linkCache.DeleteKey(Key(storage, dstPath))
|
||||
if !utils.IsBool(lazyCache...) {
|
||||
Cache.DeleteDirectory(storage, dstDirPath)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return errs.NotImplement
|
||||
@@ -664,13 +554,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
|
||||
}
|
||||
} else {
|
||||
// upload success, remove old obj
|
||||
err := Remove(ctx, storage, tempPath)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
key := Key(storage, stdpath.Join(dstDirPath, file.GetName()))
|
||||
linkCache.Del(key)
|
||||
}
|
||||
err = Remove(ctx, storage, tempPath)
|
||||
}
|
||||
}
|
||||
return errors.WithStack(err)
|
||||
@@ -681,7 +565,8 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
|
||||
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||
_, err := GetUnwrap(ctx, storage, stdpath.Join(dstDirPath, dstName))
|
||||
dstPath := stdpath.Join(dstDirPath, dstName)
|
||||
_, err := GetUnwrap(ctx, storage, dstPath)
|
||||
if err == nil {
|
||||
return errors.New("obj already exists")
|
||||
}
|
||||
@@ -698,16 +583,20 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
|
||||
var newObj model.Obj
|
||||
newObj, err = s.PutURL(ctx, dstDir, dstName, url)
|
||||
if err == nil {
|
||||
Cache.linkCache.DeleteKey(Key(storage, dstPath))
|
||||
if newObj != nil {
|
||||
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
|
||||
Cache.addDirectoryObject(storage, dstDirPath, model.WrapObjName(newObj))
|
||||
} else if !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, dstDirPath)
|
||||
Cache.DeleteDirectory(storage, dstDirPath)
|
||||
}
|
||||
}
|
||||
case driver.PutURL:
|
||||
err = s.PutURL(ctx, dstDir, dstName, url)
|
||||
if err == nil && !utils.IsBool(lazyCache...) {
|
||||
DeleteCache(storage, dstDirPath)
|
||||
if err == nil {
|
||||
Cache.linkCache.DeleteKey(Key(storage, dstPath))
|
||||
if !utils.IsBool(lazyCache...) {
|
||||
Cache.DeleteDirectory(storage, dstDirPath)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return errs.NotImplement
|
||||
|
||||
@@ -5,26 +5,21 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var settingCache = cache.NewMemCache(cache.WithShards[*model.SettingItem](4))
|
||||
var settingG singleflight.Group[*model.SettingItem]
|
||||
var settingCacheF = func(item *model.SettingItem) {
|
||||
settingCache.Set(item.Key, item, cache.WithEx[*model.SettingItem](time.Hour))
|
||||
Cache.SetSetting(item.Key, item)
|
||||
}
|
||||
|
||||
var settingGroupCache = cache.NewMemCache(cache.WithShards[[]model.SettingItem](4))
|
||||
var settingGroupG singleflight.Group[[]model.SettingItem]
|
||||
var settingGroupCacheF = func(key string, item []model.SettingItem) {
|
||||
settingGroupCache.Set(key, item, cache.WithEx[[]model.SettingItem](time.Hour))
|
||||
var settingGroupCacheF = func(key string, items []model.SettingItem) {
|
||||
Cache.SetSettingGroup(key, items)
|
||||
}
|
||||
|
||||
var settingChangingCallbacks = make([]func(), 0)
|
||||
@@ -34,8 +29,7 @@ func RegisterSettingChangingCallback(f func()) {
|
||||
}
|
||||
|
||||
func SettingCacheUpdate() {
|
||||
settingCache.Clear()
|
||||
settingGroupCache.Clear()
|
||||
Cache.ClearAll()
|
||||
for _, cb := range settingChangingCallbacks {
|
||||
cb()
|
||||
}
|
||||
@@ -60,7 +54,7 @@ func GetSettingsMap() map[string]string {
|
||||
}
|
||||
|
||||
func GetSettingItems() ([]model.SettingItem, error) {
|
||||
if items, ok := settingGroupCache.Get("ALL_SETTING_ITEMS"); ok {
|
||||
if items, exists := Cache.GetSettingGroup("ALL_SETTING_ITEMS"); exists {
|
||||
return items, nil
|
||||
}
|
||||
items, err, _ := settingGroupG.Do("ALL_SETTING_ITEMS", func() ([]model.SettingItem, error) {
|
||||
@@ -75,7 +69,7 @@ func GetSettingItems() ([]model.SettingItem, error) {
|
||||
}
|
||||
|
||||
func GetPublicSettingItems() ([]model.SettingItem, error) {
|
||||
if items, ok := settingGroupCache.Get("ALL_PUBLIC_SETTING_ITEMS"); ok {
|
||||
if items, exists := Cache.GetSettingGroup("ALL_PUBLIC_SETTING_ITEMS"); exists {
|
||||
return items, nil
|
||||
}
|
||||
items, err, _ := settingGroupG.Do("ALL_PUBLIC_SETTING_ITEMS", func() ([]model.SettingItem, error) {
|
||||
@@ -90,7 +84,7 @@ func GetPublicSettingItems() ([]model.SettingItem, error) {
|
||||
}
|
||||
|
||||
func GetSettingItemByKey(key string) (*model.SettingItem, error) {
|
||||
if item, ok := settingCache.Get(key); ok {
|
||||
if item, exists := Cache.GetSetting(key); exists {
|
||||
return item, nil
|
||||
}
|
||||
|
||||
@@ -118,8 +112,8 @@ func GetSettingItemInKeys(keys []string) ([]model.SettingItem, error) {
|
||||
}
|
||||
|
||||
func GetSettingItemsByGroup(group int) ([]model.SettingItem, error) {
|
||||
key := strconv.Itoa(group)
|
||||
if items, ok := settingGroupCache.Get(key); ok {
|
||||
key := fmt.Sprintf("GROUP_%d", group)
|
||||
if items, exists := Cache.GetSettingGroup(key); exists {
|
||||
return items, nil
|
||||
}
|
||||
items, err, _ := settingGroupG.Do(key, func() ([]model.SettingItem, error) {
|
||||
@@ -135,11 +129,14 @@ func GetSettingItemsByGroup(group int) ([]model.SettingItem, error) {
|
||||
|
||||
func GetSettingItemsInGroups(groups []int) ([]model.SettingItem, error) {
|
||||
sort.Ints(groups)
|
||||
key := strings.Join(utils.MustSliceConvert(groups, func(i int) string {
|
||||
return strconv.Itoa(i)
|
||||
}), ",")
|
||||
|
||||
if items, ok := settingGroupCache.Get(key); ok {
|
||||
keyParts := make([]string, 0, len(groups))
|
||||
for _, g := range groups {
|
||||
keyParts = append(keyParts, strconv.Itoa(g))
|
||||
}
|
||||
key := "GROUPS_" + strings.Join(keyParts, "_")
|
||||
|
||||
if items, exists := Cache.GetSettingGroup(key); exists {
|
||||
return items, nil
|
||||
}
|
||||
items, err, _ := settingGroupG.Do(key, func() ([]model.SettingItem, error) {
|
||||
@@ -165,10 +162,10 @@ func SaveSettingItems(items []model.SettingItem) error {
|
||||
}
|
||||
}
|
||||
err := db.SaveSettingItems(items)
|
||||
if err != nil {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed save setting: %+v", err)
|
||||
}
|
||||
SettingCacheUpdate()
|
||||
SettingCacheUpdate()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -137,3 +137,7 @@ func DeleteSharing(sid string) error {
|
||||
sharingCache.Del(sid)
|
||||
return db.DeleteSharingById(sid)
|
||||
}
|
||||
|
||||
func DeleteSharingsByCreatorId(creatorId uint) error {
|
||||
return db.DeleteSharingsByCreatorId(creatorId)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/generic_sync"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -239,6 +240,8 @@ func UpdateStorage(ctx context.Context, storage model.Storage) error {
|
||||
if oldStorage.MountPath != storage.MountPath {
|
||||
// mount path renamed, need to drop the storage
|
||||
storagesMap.Delete(oldStorage.MountPath)
|
||||
Cache.DeleteDirectoryTree(storageDriver, "/")
|
||||
Cache.InvalidateStorageDetails(storageDriver)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed get storage driver")
|
||||
@@ -259,6 +262,7 @@ func DeleteStorageById(ctx context.Context, id uint) error {
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed get storage")
|
||||
}
|
||||
var dropErr error = nil
|
||||
if !storage.Disabled {
|
||||
storageDriver, err := GetStorageByMountPath(storage.MountPath)
|
||||
if err != nil {
|
||||
@@ -266,17 +270,19 @@ func DeleteStorageById(ctx context.Context, id uint) error {
|
||||
}
|
||||
// drop the storage in the driver
|
||||
if err := storageDriver.Drop(ctx); err != nil {
|
||||
return errors.Wrapf(err, "failed drop storage")
|
||||
dropErr = errors.Wrapf(err, "failed drop storage")
|
||||
}
|
||||
// delete the storage in the memory
|
||||
storagesMap.Delete(storage.MountPath)
|
||||
Cache.DeleteDirectoryTree(storageDriver, "/")
|
||||
Cache.InvalidateStorageDetails(storageDriver)
|
||||
go callStorageHooks("del", storageDriver)
|
||||
}
|
||||
// delete the storage in the database
|
||||
if err := db.DeleteStorageById(id); err != nil {
|
||||
return errors.WithMessage(err, "failed delete storage in database")
|
||||
}
|
||||
return nil
|
||||
return dropErr
|
||||
}
|
||||
|
||||
// MustSaveDriverStorage call from specific driver
|
||||
@@ -340,8 +346,8 @@ func GetStorageVirtualFilesByPath(prefix string) []model.Obj {
|
||||
})
|
||||
}
|
||||
|
||||
func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails ...bool) []model.Obj {
|
||||
if utils.IsBool(hideDetails...) {
|
||||
func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails, refresh bool) []model.Obj {
|
||||
if hideDetails {
|
||||
return GetStorageVirtualFilesByPath(prefix)
|
||||
}
|
||||
return getStorageVirtualFilesByPath(prefix, func(d driver.Driver, obj model.Obj) model.Obj {
|
||||
@@ -354,7 +360,7 @@ func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string,
|
||||
}
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
details, err := GetStorageDetails(timeoutCtx, d)
|
||||
details, err := GetStorageDetails(timeoutCtx, d, refresh)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
|
||||
log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err)
|
||||
@@ -439,7 +445,9 @@ func GetBalancedStorage(path string) driver.Driver {
|
||||
}
|
||||
}
|
||||
|
||||
func GetStorageDetails(ctx context.Context, storage driver.Driver) (*model.StorageDetails, error) {
|
||||
var detailsG singleflight.Group[*model.StorageDetails]
|
||||
|
||||
func GetStorageDetails(ctx context.Context, storage driver.Driver, refresh ...bool) (*model.StorageDetails, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
@@ -447,5 +455,18 @@ func GetStorageDetails(ctx context.Context, storage driver.Driver) (*model.Stora
|
||||
if !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
return wd.GetDetails(ctx)
|
||||
if !utils.IsBool(refresh...) {
|
||||
if ret, ok := Cache.GetStorageDetails(storage); ok {
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
details, err, _ := detailsG.Do(storage.GetStorage().MountPath, func() (*model.StorageDetails, error) {
|
||||
ret, err := wd.GetDetails(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
Cache.SetStorageDetails(storage, ret)
|
||||
return ret, nil
|
||||
})
|
||||
return details, err
|
||||
}
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
package op
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var userCache = cache.NewMemCache(cache.WithShards[*model.User](2))
|
||||
var userG singleflight.Group[*model.User]
|
||||
var guestUser *model.User
|
||||
var adminUser *model.User
|
||||
@@ -46,7 +43,7 @@ func GetUserByName(username string) (*model.User, error) {
|
||||
if username == "" {
|
||||
return nil, errs.EmptyUsername
|
||||
}
|
||||
if user, ok := userCache.Get(username); ok {
|
||||
if user, exists := Cache.GetUser(username); exists {
|
||||
return user, nil
|
||||
}
|
||||
user, err, _ := userG.Do(username, func() (*model.User, error) {
|
||||
@@ -54,7 +51,7 @@ func GetUserByName(username string) (*model.User, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
userCache.Set(username, _user, cache.WithEx[*model.User](time.Hour))
|
||||
Cache.SetUser(username, _user)
|
||||
return _user, nil
|
||||
})
|
||||
return user, err
|
||||
@@ -81,7 +78,10 @@ func DeleteUserById(id uint) error {
|
||||
if old.IsAdmin() || old.IsGuest() {
|
||||
return errs.DeleteAdminOrGuest
|
||||
}
|
||||
userCache.Del(old.Username)
|
||||
Cache.DeleteUser(old.Username)
|
||||
if err := DeleteSharingsByCreatorId(id); err != nil {
|
||||
return errors.WithMessage(err, "failed to delete user's sharings")
|
||||
}
|
||||
return db.DeleteUserById(id)
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func UpdateUser(u *model.User) error {
|
||||
if u.IsGuest() {
|
||||
guestUser = nil
|
||||
}
|
||||
userCache.Del(old.Username)
|
||||
Cache.DeleteUser(old.Username)
|
||||
u.BasePath = utils.FixAndCleanPath(u.BasePath)
|
||||
return db.UpdateUser(u)
|
||||
}
|
||||
@@ -125,6 +125,6 @@ func DelUserCache(username string) error {
|
||||
if user.IsGuest() {
|
||||
guestUser = nil
|
||||
}
|
||||
userCache.Del(username)
|
||||
Cache.DeleteUser(username)
|
||||
return nil
|
||||
}
|
||||
|
||||
909
internal/plugin/driver.go
Normal file
909
internal/plugin/driver.go
Normal file
@@ -0,0 +1,909 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/alloc"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
plugin_warp "github.com/OpenListTeam/OpenList/v4/internal/plugin/warp"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
pool "github.com/jolestar/go-commons-pool/v2"
|
||||
|
||||
manager_io "github.com/OpenListTeam/wazero-wasip2/manager/io"
|
||||
io_v_0_2 "github.com/OpenListTeam/wazero-wasip2/wasip2/io/v0_2"
|
||||
witgo "github.com/OpenListTeam/wazero-wasip2/wit-go"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/tetratelabs/wazero"
|
||||
"github.com/tetratelabs/wazero/api"
|
||||
"github.com/tetratelabs/wazero/experimental"
|
||||
"github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1"
|
||||
)
|
||||
|
||||
var PluginPrefix = "openlist:plugin-driver/exports@0.1.0#"
|
||||
|
||||
// DriverPlugin 是*插件*管理器 (每个 .wasm 文件一个)
|
||||
// 它管理共享的 wazero 资源
|
||||
type DriverPlugin struct {
|
||||
plugin *PluginInfo
|
||||
runtime wazero.Runtime // 共享的 wazero 运行时
|
||||
compiledModule wazero.CompiledModule // 共享的已编译模块
|
||||
host *DriverHost // 注册的 wasi host 资源, 这里的self.driver始终为nil
|
||||
}
|
||||
|
||||
// WasmInstance 代表池中的一个可重用对象
|
||||
// 它包含一个活动的 WASM 实例及其宿主/Guest API
|
||||
type WasmInstance struct {
|
||||
instance api.Module
|
||||
exports *DriverHost
|
||||
guest *witgo.Host
|
||||
}
|
||||
|
||||
// 内部函数,用于动态调用 Guest 以获取属性
|
||||
func (d *WasmInstance) GetProperties(ctx context.Context) (plugin_warp.DriverProps, error) {
|
||||
var propertiesResult plugin_warp.DriverProps
|
||||
err := d.guest.Call(ctx, PluginPrefix+"get-properties", &propertiesResult)
|
||||
if err != nil {
|
||||
return plugin_warp.DriverProps{}, err
|
||||
}
|
||||
return propertiesResult, nil
|
||||
}
|
||||
|
||||
// 内部函数,用于动态调用 Guest 以获取表单
|
||||
func (d *WasmInstance) GetFormMeta(ctx context.Context) ([]plugin_warp.FormField, error) {
|
||||
var formMeta []plugin_warp.FormField
|
||||
err := d.guest.Call(ctx, PluginPrefix+"get-form-meta", &formMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return formMeta, nil
|
||||
}
|
||||
|
||||
func (i *WasmInstance) Close() error {
|
||||
return i.instance.Close(context.Background())
|
||||
// exports 借用WasmDriver的资源这里不销毁
|
||||
}
|
||||
|
||||
// 用于创建和管理 WasmInstance
|
||||
type driverPoolFactory struct {
|
||||
ctx context.Context
|
||||
driver *WasmDriver // 指向 WasmDriver (状态持有者)
|
||||
compiledModule wazero.CompiledModule // 共享的模块
|
||||
runtime wazero.Runtime // 共享的运行时
|
||||
host *DriverHost
|
||||
}
|
||||
|
||||
func (f *driverPoolFactory) makeObject(ctx context.Context) (*WasmInstance, error) {
|
||||
// 1. 配置模块
|
||||
moduleConfig := wazero.NewModuleConfig().
|
||||
WithFS(os.DirFS("/")).
|
||||
WithStartFunctions("_initialize").
|
||||
WithStdout(os.Stdout).
|
||||
WithStderr(os.Stderr).
|
||||
WithStdin(os.Stdin).
|
||||
// WithSysNanosleep().
|
||||
// WithSysNanotime().
|
||||
// WithSysWalltime().
|
||||
WithOsyield(func() {
|
||||
runtime.Gosched()
|
||||
}).
|
||||
WithName(f.driver.plugin.plugin.ID)
|
||||
|
||||
instanceCtx := experimental.WithMemoryAllocator(f.ctx, experimental.MemoryAllocatorFunc(alloc.NewMemory))
|
||||
|
||||
// 2. 实例化共享的已编译模块
|
||||
instance, err := f.runtime.InstantiateModule(instanceCtx, f.compiledModule, moduleConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to instantiate module: %w", err)
|
||||
}
|
||||
|
||||
// 3. 创建 Guest API
|
||||
guest, err := witgo.NewHost(instance)
|
||||
if err != nil {
|
||||
instance.Close(ctx)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 5. 组装 WasmInstance
|
||||
wasmInstance := &WasmInstance{
|
||||
instance: instance,
|
||||
exports: f.host,
|
||||
guest: guest,
|
||||
}
|
||||
return wasmInstance, nil
|
||||
}
|
||||
|
||||
// MakeObject 创建一个新的 WasmInstance 并将其放入池中
|
||||
func (f *driverPoolFactory) MakeObject(ctx context.Context) (*pool.PooledObject, error) {
|
||||
wasmInstance, err := f.makeObject(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 设置Host端句柄,用于配置获取等host端方法
|
||||
if err := wasmInstance.guest.Call(ctx, PluginPrefix+"set-handle", nil, uint32(f.driver.ID)); err != nil {
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
wasmInstance.Close()
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
// 调用实例的初始化方法
|
||||
ctxHandle := f.host.ContextManager().Add(ctx)
|
||||
defer f.host.ContextManager().Remove(ctxHandle)
|
||||
|
||||
var result witgo.Result[witgo.Unit, plugin_warp.ErrCode]
|
||||
if err := wasmInstance.guest.Call(ctx, PluginPrefix+"init", &result, ctxHandle); err != nil {
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
wasmInstance.Close()
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
if result.Err != nil {
|
||||
wasmInstance.Close()
|
||||
return nil, result.Err.ToError()
|
||||
}
|
||||
|
||||
return pool.NewPooledObject(wasmInstance), nil
|
||||
}
|
||||
|
||||
// DestroyObject 销毁池中的 WasmInstance
|
||||
func (f *driverPoolFactory) DestroyObject(ctx context.Context, object *pool.PooledObject) error {
|
||||
instance := object.Object.(*WasmInstance)
|
||||
log.Debugf("Destroying pooled WASM instance for plugin: %s", f.driver.Storage.MountPath)
|
||||
|
||||
var err error
|
||||
// 4. 调用实例的销毁化方法
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
var result witgo.Result[witgo.Unit, plugin_warp.ErrCode]
|
||||
if err = instance.guest.Call(ctx, PluginPrefix+"drop", &result, ctxHandle); err != nil {
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
err = errors.New("Internal error in plugin")
|
||||
} else if result.Err != nil {
|
||||
err = result.Err.ToError()
|
||||
}
|
||||
|
||||
return stderrors.Join(err, instance.Close())
|
||||
}
|
||||
|
||||
// ValidateObject 验证实例是否仍然有效
|
||||
func (f *driverPoolFactory) ValidateObject(ctx context.Context, object *pool.PooledObject) bool {
|
||||
instance := object.Object.(*WasmInstance)
|
||||
return instance.instance != nil && !instance.instance.IsClosed()
|
||||
}
|
||||
|
||||
// ActivateObject 在借用时调用
|
||||
func (f *driverPoolFactory) ActivateObject(ctx context.Context, object *pool.PooledObject) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PassivateObject 在归还时调用
|
||||
func (f *driverPoolFactory) PassivateObject(ctx context.Context, object *pool.PooledObject) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WasmDriver 是*驱动*实例 (每个挂载点一个)
|
||||
// 它管理池和*状态*
|
||||
type WasmDriver struct {
|
||||
model.Storage
|
||||
flag uint32
|
||||
|
||||
plugin *DriverPlugin
|
||||
|
||||
host *DriverHost
|
||||
pool *pool.ObjectPool
|
||||
|
||||
config plugin_warp.DriverProps
|
||||
additional plugin_warp.Additional
|
||||
}
|
||||
|
||||
// NewDriverPlugin
|
||||
// 创建插件管理器
|
||||
func NewDriverPlugin(ctx context.Context, plugin *PluginInfo) (*DriverPlugin, error) {
|
||||
wasmBytes, err := os.ReadFile(plugin.WasmPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read wasm file '%s': %w", plugin.WasmPath, err)
|
||||
}
|
||||
|
||||
// 1. 创建共享的 wazero 运行时
|
||||
rt := wazero.NewRuntime(ctx)
|
||||
|
||||
// 2. 注册 wasip1/wasip2 资源
|
||||
wasi_snapshot_preview1.MustInstantiate(ctx, rt)
|
||||
host := NewDriverHost()
|
||||
if err := host.Instantiate(ctx, rt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 3. 编译共享的模块
|
||||
compiledModule, err := rt.CompileModule(ctx, wasmBytes)
|
||||
if err != nil {
|
||||
rt.Close(ctx)
|
||||
return nil, fmt.Errorf("failed to compile wasm module for plugin '%s': %w", plugin.ID, err)
|
||||
}
|
||||
|
||||
// 4. 创建 DriverPlugin 实例(管理器)
|
||||
driverPlugin := &DriverPlugin{
|
||||
plugin: plugin,
|
||||
runtime: rt,
|
||||
compiledModule: compiledModule,
|
||||
host: host,
|
||||
}
|
||||
return driverPlugin, nil
|
||||
}
|
||||
|
||||
// Close 关闭共享的 wazero 运行时
|
||||
func (dp *DriverPlugin) Close(ctx context.Context) error {
|
||||
log.Infof("Closing plugin runtime for: %s", dp.plugin.ID)
|
||||
if dp.runtime != nil {
|
||||
return dp.runtime.Close(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewWasmDriver
|
||||
// 创建*驱动实例* (每个挂载一个)
|
||||
func (dp *DriverPlugin) NewWasmDriver() (driver.Driver, error) {
|
||||
ctx := context.Background() // Factory/Pool context
|
||||
|
||||
// 1. 创建 WasmDriver 实例 (状态持有者)
|
||||
driver := &WasmDriver{
|
||||
plugin: dp, // 指向共享资源的管理器
|
||||
host: dp.host,
|
||||
}
|
||||
|
||||
type WasmDirverWarp struct {
|
||||
*WasmDriver
|
||||
}
|
||||
driverWarp := &WasmDirverWarp{driver}
|
||||
runtime.SetFinalizer(driverWarp, func(driver *WasmDirverWarp) {
|
||||
dp.host.driver.Remove(uint32(driver.ID))
|
||||
})
|
||||
|
||||
// 3. 创建池工厂
|
||||
factory := &driverPoolFactory{
|
||||
ctx: ctx,
|
||||
driver: driver,
|
||||
compiledModule: dp.compiledModule,
|
||||
runtime: dp.runtime,
|
||||
host: dp.host,
|
||||
}
|
||||
|
||||
// 4. 配置并创建池
|
||||
poolConfig := pool.NewDefaultPoolConfig()
|
||||
poolConfig.MaxIdle = 2
|
||||
poolConfig.MaxTotal = 8
|
||||
poolConfig.TestOnBorrow = true
|
||||
poolConfig.BlockWhenExhausted = true
|
||||
driver.pool = pool.NewObjectPool(ctx, factory, poolConfig)
|
||||
|
||||
// 5. 首次获取插件信息
|
||||
initConfig := func() error {
|
||||
instance, err := factory.makeObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer instance.Close()
|
||||
|
||||
props, err := instance.GetProperties(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to refresh properties: %w", err)
|
||||
}
|
||||
driver.config = props
|
||||
|
||||
forms, err := instance.GetFormMeta(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to refresh forms: %w", err)
|
||||
}
|
||||
driver.additional.Forms = forms
|
||||
return nil
|
||||
}
|
||||
if err := initConfig(); err != nil {
|
||||
driver.Close(ctx) // 构造失败,关闭池
|
||||
return nil, err
|
||||
}
|
||||
return driverWarp, nil
|
||||
}
|
||||
|
||||
// Close (在 WasmDriver 上) 关闭此*实例*的池
|
||||
func (d *WasmDriver) Close(ctx context.Context) error {
|
||||
log.Infof("Closing pool for driver: %s", d.MountPath)
|
||||
if d.pool != nil {
|
||||
d.pool.Close(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleError 处理 wasm 驱动返回的错误
|
||||
func (d *WasmDriver) handleError(errcode *plugin_warp.ErrCode) error {
|
||||
if errcode != nil {
|
||||
err := errcode.ToError()
|
||||
if errcode.Unauthorized != nil && d.Status == op.WORK {
|
||||
if atomic.CompareAndSwapUint32(&d.flag, 0, 1) {
|
||||
d.Status = err.Error()
|
||||
op.MustSaveDriverStorage(d)
|
||||
atomic.StoreUint32(&d.flag, 0)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// // 内部函数,用于动态调用 Guest 以获取属性
|
||||
// func (d *WasmDriver) getProperties(ctx context.Context) (plugin_warp.DriverProps, error) {
|
||||
// obj, err := d.pool.BorrowObject(ctx)
|
||||
// if err != nil {
|
||||
// return plugin_warp.DriverProps{}, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
// }
|
||||
// instance := obj.(*WasmInstance)
|
||||
// defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
// return instance.GetProperties(ctx)
|
||||
// }
|
||||
|
||||
// // 内部函数,用于动态调用 Guest 以获取表单
|
||||
// func (d *WasmDriver) getFormMeta(ctx context.Context) ([]plugin_warp.FormField, error) {
|
||||
// obj, err := d.pool.BorrowObject(ctx)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
// }
|
||||
// instance := obj.(*WasmInstance)
|
||||
// defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
// return instance.GetFormMeta(ctx)
|
||||
// }
|
||||
|
||||
// Config 返回缓存的配置
|
||||
func (d *WasmDriver) Config() driver.Config {
|
||||
// props, err := d.getProperties(context.Background())
|
||||
// if err != nil {
|
||||
// log.Errorf("failed to get properties: %s", err)
|
||||
// return d.config.ToConfig()
|
||||
// }
|
||||
|
||||
// d.config = props
|
||||
return d.config.ToConfig()
|
||||
}
|
||||
|
||||
func (d *WasmDriver) GetAddition() driver.Additional {
|
||||
// newFormMeta, err := d.getFormMeta(context.Background())
|
||||
// if err != nil {
|
||||
// log.Errorf("failed to get form meta: %s", err)
|
||||
// return &d.additional
|
||||
// }
|
||||
// d.additional.Forms = newFormMeta
|
||||
return &d.additional
|
||||
}
|
||||
|
||||
// Init 初始化驱动
|
||||
func (d *WasmDriver) Init(ctx context.Context) error {
|
||||
log.Debugf("Re-initializing pool for plugin %s by clearing idle.", d.MountPath)
|
||||
d.pool.Clear(ctx)
|
||||
|
||||
// 注册
|
||||
d.host.driver.Set(uint32(d.ID), d)
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pre-warm pool after re-init: %w", err)
|
||||
}
|
||||
d.pool.ReturnObject(ctx, obj)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Drop 销毁驱动 (由 Guest 调用)
|
||||
func (d *WasmDriver) Drop(ctx context.Context) error {
|
||||
log.Infof("Guest triggered Drop, closing pool for driver: %s", d.MountPath)
|
||||
return d.Close(ctx)
|
||||
}
|
||||
|
||||
func (d *WasmDriver) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.ListFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
var result witgo.Result[plugin_warp.Object, plugin_warp.ErrCode]
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"get-root", &result, ctxHandle)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok, nil
|
||||
}
|
||||
|
||||
// GetFile 获取文件信息
|
||||
func (d *WasmDriver) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.GetFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
var result witgo.Result[plugin_warp.Object, plugin_warp.ErrCode]
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"get-file", &result, ctxHandle, path)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok, nil
|
||||
}
|
||||
|
||||
// List 列出文件
|
||||
func (d *WasmDriver) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if !d.config.Capabilitys.ListFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
robj := dir.(*plugin_warp.Object)
|
||||
var result witgo.Result[[]plugin_warp.Object, plugin_warp.ErrCode]
|
||||
|
||||
param := struct {
|
||||
Handle plugin_warp.Context
|
||||
Obj *plugin_warp.Object
|
||||
}{ctxHandle, robj}
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"list-files", &result, param)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
return utils.MustSliceConvert(*result.Ok, func(o plugin_warp.Object) model.Obj { return &o }), nil
|
||||
}
|
||||
|
||||
// Link 获取文件直链或读取流
|
||||
func (d *WasmDriver) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if !d.config.Capabilitys.LinkFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
// 这部分资源全由Host端管理
|
||||
// TODO: 或许应该把创建的Stream生命周期一同绑定到此处结束,防止忘记关闭导致的资源泄漏
|
||||
|
||||
pobj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := pobj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, pobj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
headersHandle := instance.exports.HTTPManager().Fields.Add(args.Header)
|
||||
defer instance.exports.HTTPManager().Fields.Remove(headersHandle)
|
||||
|
||||
obj := file.(*plugin_warp.Object)
|
||||
|
||||
var result witgo.Result[plugin_warp.LinkResult, plugin_warp.ErrCode]
|
||||
|
||||
param := struct {
|
||||
Handle plugin_warp.Context
|
||||
Obj *plugin_warp.Object
|
||||
LinkArgs plugin_warp.LinkArgs
|
||||
}{ctxHandle, obj, plugin_warp.LinkArgs{IP: args.IP, Header: headersHandle}}
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"link-file", &result, param)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
// 覆盖旧的Obj
|
||||
if result.Ok.File.IsSome() {
|
||||
*obj = *result.Ok.File.Some
|
||||
}
|
||||
|
||||
if result.Ok.Resource.Direct != nil {
|
||||
direct := result.Ok.Resource.Direct
|
||||
header, _ := instance.exports.HTTPManager().Fields.Pop(direct.Header)
|
||||
link := &model.Link{URL: direct.Url, Header: http.Header(header)}
|
||||
if direct.Expiratcion.IsSome() {
|
||||
exp := direct.Expiratcion.Some.ToDuration()
|
||||
link.Expiration = &exp
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
|
||||
if result.Ok.Resource.RangeStream != nil {
|
||||
fileSize := obj.GetSize()
|
||||
return &model.Link{
|
||||
RangeReader: stream.RateLimitRangeReaderFunc(func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
var size uint64
|
||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > fileSize {
|
||||
size = uint64(fileSize - httpRange.Start)
|
||||
} else {
|
||||
size = uint64(httpRange.Length)
|
||||
}
|
||||
|
||||
pobj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
instance := pobj.(*WasmInstance)
|
||||
|
||||
r, w := io.Pipe()
|
||||
cw := &checkWriter{W: w, N: size}
|
||||
streamHandle := instance.exports.StreamManager().Add(&manager_io.Stream{
|
||||
Writer: cw,
|
||||
CheckWriter: cw,
|
||||
})
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
|
||||
type RangeSpec struct {
|
||||
Offset uint64
|
||||
Size uint64
|
||||
Stream io_v_0_2.OutputStream
|
||||
}
|
||||
|
||||
var result witgo.Result[witgo.Unit, plugin_warp.ErrCode]
|
||||
param := struct {
|
||||
Handle plugin_warp.Context
|
||||
Obj *plugin_warp.Object
|
||||
LinkArgs plugin_warp.LinkArgs
|
||||
RangeSpec RangeSpec
|
||||
}{ctxHandle, obj, plugin_warp.LinkArgs{IP: args.IP, Header: headersHandle}, RangeSpec{Offset: uint64(httpRange.Start), Size: size, Stream: streamHandle}}
|
||||
|
||||
go func() {
|
||||
defer d.pool.ReturnObject(ctx, instance)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
if err := instance.guest.Call(ctx, PluginPrefix+"link-range", &result, param); err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
w.CloseWithError(errs.NotImplement)
|
||||
return
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
w.CloseWithError(d.handleError(result.Err))
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
return utils.NewReadCloser(r, func() error {
|
||||
instance.exports.StreamManager().Remove(streamHandle)
|
||||
return r.Close()
|
||||
}), nil
|
||||
}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
type checkWriter struct {
|
||||
W io.Writer
|
||||
N uint64
|
||||
}
|
||||
|
||||
func (c *checkWriter) Write(p []byte) (n int, err error) {
|
||||
if c.N <= 0 {
|
||||
return 0, stderrors.New("write limit exceeded")
|
||||
}
|
||||
n, err = c.W.Write(p[:min(uint64(len(p)), c.N)])
|
||||
c.N -= uint64(n)
|
||||
return
|
||||
}
|
||||
func (c *checkWriter) CheckWrite() uint64 {
|
||||
return max(c.N, 1)
|
||||
}
|
||||
|
||||
func (d *WasmDriver) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.MkdirFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
robj := parentDir.(*plugin_warp.Object)
|
||||
var result witgo.Result[witgo.Option[plugin_warp.Object], plugin_warp.ErrCode]
|
||||
|
||||
if err := instance.guest.Call(ctx, PluginPrefix+"make-dir", &result, ctxHandle, robj, dirName); err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok.Some, nil
|
||||
}
|
||||
|
||||
func (d *WasmDriver) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.RenameFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
robj := srcObj.(*plugin_warp.Object)
|
||||
var result witgo.Result[witgo.Option[plugin_warp.Object], plugin_warp.ErrCode]
|
||||
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"rename-file", &result, ctxHandle, robj, newName)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok.Some, nil
|
||||
}
|
||||
|
||||
func (d *WasmDriver) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.MoveFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
srcobj := srcObj.(*plugin_warp.Object)
|
||||
dstobj := dstDir.(*plugin_warp.Object)
|
||||
|
||||
var result witgo.Result[witgo.Option[plugin_warp.Object], plugin_warp.ErrCode]
|
||||
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"move-file", &result, ctxHandle, srcobj, dstobj)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok.Some, nil
|
||||
}
|
||||
|
||||
func (d *WasmDriver) Remove(ctx context.Context, srcObj model.Obj) error {
|
||||
if !d.config.Capabilitys.RemoveFile {
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
srcobj := srcObj.(*plugin_warp.Object)
|
||||
|
||||
var result witgo.Result[witgo.Unit, plugin_warp.ErrCode]
|
||||
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"remove-file", &result, ctxHandle, srcobj)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *WasmDriver) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.CopyFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
srcobj := srcObj.(*plugin_warp.Object)
|
||||
dstobj := dstDir.(*plugin_warp.Object)
|
||||
|
||||
var result witgo.Result[witgo.Option[plugin_warp.Object], plugin_warp.ErrCode]
|
||||
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"copy-file", &result, ctxHandle, srcobj, dstobj)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok.Some, nil
|
||||
}
|
||||
|
||||
func (d *WasmDriver) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
if !d.config.Capabilitys.UploadFile {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
obj, err := d.pool.BorrowObject(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to borrow wasm instance: %w", err)
|
||||
}
|
||||
instance := obj.(*WasmInstance)
|
||||
defer d.pool.ReturnObject(ctx, obj)
|
||||
|
||||
ctxHandle := instance.exports.ContextManager().Add(ctx)
|
||||
defer instance.exports.ContextManager().Remove(ctxHandle)
|
||||
|
||||
stream := instance.exports.uploads.Add(&plugin_warp.UploadReadableType{FileStreamer: file, UpdateProgress: up})
|
||||
defer instance.exports.uploads.Remove(stream)
|
||||
|
||||
dstobj := dstDir.(*plugin_warp.Object)
|
||||
|
||||
var result witgo.Result[witgo.Option[plugin_warp.Object], plugin_warp.ErrCode]
|
||||
|
||||
exist := witgo.None[plugin_warp.Object]()
|
||||
if file.GetExist() != nil {
|
||||
exist = witgo.Some(plugin_warp.ConvertObjToObject(file.GetExist()))
|
||||
}
|
||||
|
||||
uploadReq := &plugin_warp.UploadRequest{
|
||||
Target: plugin_warp.ConvertObjToObject(file),
|
||||
Content: stream,
|
||||
Exist: exist,
|
||||
}
|
||||
|
||||
err = instance.guest.Call(ctx, PluginPrefix+"upload-file", &result, ctxHandle, dstobj, uploadReq)
|
||||
if err != nil {
|
||||
if errors.Is(err, witgo.ErrNotExportFunc) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
// 这里就不返回错误了,避免大量栈数据
|
||||
log.Errorln(err)
|
||||
return nil, errors.New("Internal error in plugin")
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
return nil, d.handleError(result.Err)
|
||||
}
|
||||
|
||||
return result.Ok.Some, nil
|
||||
}
|
||||
|
||||
var _ driver.Meta = (*WasmDriver)(nil)
|
||||
var _ driver.Reader = (*WasmDriver)(nil)
|
||||
var _ driver.Getter = (*WasmDriver)(nil)
|
||||
var _ driver.GetRooter = (*WasmDriver)(nil)
|
||||
var _ driver.MkdirResult = (*WasmDriver)(nil)
|
||||
var _ driver.RenameResult = (*WasmDriver)(nil)
|
||||
var _ driver.MoveResult = (*WasmDriver)(nil)
|
||||
var _ driver.Remove = (*WasmDriver)(nil)
|
||||
var _ driver.CopyResult = (*WasmDriver)(nil)
|
||||
var _ driver.PutResult = (*WasmDriver)(nil)
|
||||
284
internal/plugin/host.go
Normal file
284
internal/plugin/host.go
Normal file
@@ -0,0 +1,284 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"maps"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tetratelabs/wazero"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
plugin_warp "github.com/OpenListTeam/OpenList/v4/internal/plugin/warp"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
|
||||
manager_io "github.com/OpenListTeam/wazero-wasip2/manager/io"
|
||||
"github.com/OpenListTeam/wazero-wasip2/wasip2"
|
||||
wasi_clocks "github.com/OpenListTeam/wazero-wasip2/wasip2/clocks"
|
||||
wasi_filesystem "github.com/OpenListTeam/wazero-wasip2/wasip2/filesystem"
|
||||
wasi_http "github.com/OpenListTeam/wazero-wasip2/wasip2/http"
|
||||
wasi_io "github.com/OpenListTeam/wazero-wasip2/wasip2/io"
|
||||
io_v0_2 "github.com/OpenListTeam/wazero-wasip2/wasip2/io/v0_2"
|
||||
wasi_random "github.com/OpenListTeam/wazero-wasip2/wasip2/random"
|
||||
wasi_sockets "github.com/OpenListTeam/wazero-wasip2/wasip2/sockets"
|
||||
witgo "github.com/OpenListTeam/wazero-wasip2/wit-go"
|
||||
)
|
||||
|
||||
type DriverHost struct {
|
||||
*wasip2.Host
|
||||
contexts *plugin_warp.ContextManaget
|
||||
uploads *plugin_warp.UploadReadableManager
|
||||
|
||||
driver *witgo.ResourceManager[*WasmDriver]
|
||||
}
|
||||
|
||||
func NewDriverHost() *DriverHost {
|
||||
waspi2_host := wasip2.NewHost(
|
||||
wasi_io.Module("0.2.2"),
|
||||
wasi_filesystem.Module("0.2.2"),
|
||||
wasi_random.Module("0.2.2"),
|
||||
wasi_clocks.Module("0.2.2"),
|
||||
wasi_sockets.Module("0.2.0"),
|
||||
wasi_http.Module("0.2.0"),
|
||||
)
|
||||
return &DriverHost{
|
||||
Host: waspi2_host,
|
||||
contexts: plugin_warp.NewContextManager(),
|
||||
uploads: plugin_warp.NewUploadManager(),
|
||||
driver: witgo.NewResourceManager[*WasmDriver](nil),
|
||||
}
|
||||
}
|
||||
|
||||
func (host *DriverHost) Instantiate(ctx context.Context, rt wazero.Runtime) error {
|
||||
if err := host.Host.Instantiate(ctx, rt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
module := rt.NewHostModuleBuilder("openlist:plugin-driver/host@0.1.0")
|
||||
exports := witgo.NewExporter(module)
|
||||
|
||||
exports.Export("log", host.Log)
|
||||
exports.Export("load-config", host.LoadConfig)
|
||||
exports.Export("save-config", host.SaveConfig)
|
||||
if _, err := exports.Instantiate(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
moduleType := rt.NewHostModuleBuilder("openlist:plugin-driver/types@0.1.0")
|
||||
exportsType := witgo.NewExporter(moduleType)
|
||||
exportsType.Export("[resource-drop]cancellable", host.DropContext)
|
||||
exportsType.Export("[method]cancellable.subscribe", host.Subscribe)
|
||||
|
||||
exportsType.Export("[resource-drop]readable", host.DropReadable)
|
||||
exportsType.Export("[method]readable.streams", host.Stream)
|
||||
exportsType.Export("[method]readable.peek", host.StreamPeek)
|
||||
exportsType.Export("[method]readable.chunks", host.Chunks)
|
||||
exportsType.Export("[method]readable.next-chunk", host.NextChunk)
|
||||
exportsType.Export("[method]readable.chunk-reset", host.ChunkReset)
|
||||
exportsType.Export("[method]readable.get-hasher", host.GetHasher)
|
||||
exportsType.Export("[method]readable.update-progress", host.UpdateProgress)
|
||||
if _, err := exportsType.Instantiate(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (host *DriverHost) ContextManager() *plugin_warp.ContextManaget {
|
||||
return host.contexts
|
||||
}
|
||||
|
||||
func (host *DriverHost) UploadManager() *plugin_warp.UploadReadableManager {
|
||||
return host.uploads
|
||||
}
|
||||
|
||||
func (host *DriverHost) DropReadable(this plugin_warp.UploadReadable) {
|
||||
host.uploads.Remove(this)
|
||||
}
|
||||
|
||||
func (host *DriverHost) DropContext(this plugin_warp.Context) {
|
||||
host.contexts.Remove(this)
|
||||
}
|
||||
|
||||
// log: func(level: log-level, message: string);
|
||||
func (host *DriverHost) Log(level plugin_warp.LogLevel, message string) {
|
||||
if level.Debug != nil {
|
||||
log.Debugln(message)
|
||||
} else if level.Error != nil {
|
||||
log.Errorln(message)
|
||||
} else if level.Info != nil {
|
||||
log.Infoln(message)
|
||||
} else if level.Warn != nil {
|
||||
log.Warnln(message)
|
||||
} else {
|
||||
log.Traceln(message)
|
||||
}
|
||||
}
|
||||
|
||||
// load-config: func(driver: u32) -> result<list<u8>, string>;
|
||||
func (host *DriverHost) LoadConfig(driverHandle uint32) witgo.Result[[]byte, string] {
|
||||
driver, ok := host.driver.Get(driverHandle)
|
||||
if !ok || driver == nil {
|
||||
return witgo.Err[[]byte]("host.driver is null, loading timing too early")
|
||||
}
|
||||
return witgo.Ok[[]byte, string](driver.additional.Bytes())
|
||||
}
|
||||
|
||||
// save-config: func(driver: u32, config: list<u8>) -> result<_, string>;
|
||||
func (host *DriverHost) SaveConfig(driverHandle uint32, config []byte) witgo.Result[witgo.Unit, string] {
|
||||
driver, ok := host.driver.Get(driverHandle)
|
||||
if !ok || driver == nil {
|
||||
return witgo.Err[witgo.Unit]("host.driver is null, loading timing too early")
|
||||
}
|
||||
|
||||
driver.additional.SetBytes(config)
|
||||
op.MustSaveDriverStorage(driver)
|
||||
return witgo.Ok[witgo.Unit, string](witgo.Unit{})
|
||||
}
|
||||
|
||||
// streams: func() -> result<input-stream, string>;
|
||||
func (host *DriverHost) Stream(this plugin_warp.UploadReadable) witgo.Result[io_v0_2.InputStream, string] {
|
||||
upload, ok := host.uploads.Get(this)
|
||||
if !ok {
|
||||
return witgo.Err[io_v0_2.InputStream]("UploadReadable::Stream: ErrorCodeBadDescriptor")
|
||||
}
|
||||
if upload.StreamConsume {
|
||||
return witgo.Err[io_v0_2.InputStream]("UploadReadable::Stream: StreamConsume")
|
||||
}
|
||||
|
||||
upload.StreamConsume = true
|
||||
streamHandle := host.StreamManager().Add(&manager_io.Stream{Reader: upload, Seeker: upload.GetFile()})
|
||||
return witgo.Ok[io_v0_2.InputStream, string](streamHandle)
|
||||
}
|
||||
|
||||
// peek: func(offset: u64, len: u64) -> result<input-stream, string>;
|
||||
func (host *DriverHost) StreamPeek(this plugin_warp.UploadReadable, offset uint64, len uint64) witgo.Result[io_v0_2.InputStream, string] {
|
||||
upload, ok := host.uploads.Get(this)
|
||||
if !ok {
|
||||
return witgo.Err[io_v0_2.InputStream]("UploadReadable::StreamPeek: ErrorCodeBadDescriptor")
|
||||
}
|
||||
if upload.StreamConsume {
|
||||
return witgo.Err[io_v0_2.InputStream]("UploadReadable::StreamPeek: StreamConsume")
|
||||
}
|
||||
|
||||
peekReader, err := upload.RangeRead(http_range.Range{Start: int64(offset), Length: int64(len)})
|
||||
if err != nil {
|
||||
return witgo.Err[io_v0_2.InputStream](err.Error())
|
||||
}
|
||||
seeker, _ := peekReader.(io.Seeker)
|
||||
streamHandle := host.StreamManager().Add(&manager_io.Stream{Reader: peekReader, Seeker: seeker})
|
||||
return witgo.Ok[io_v0_2.InputStream, string](streamHandle)
|
||||
}
|
||||
|
||||
// chunks: func(len: u32) -> result<u32, string>;
|
||||
func (host *DriverHost) Chunks(this plugin_warp.UploadReadable, len uint32) witgo.Result[uint32, string] {
|
||||
upload, ok := host.uploads.Get(this)
|
||||
if !ok {
|
||||
return witgo.Err[uint32]("UploadReadable::Chunks: ErrorCodeBadDescriptor")
|
||||
}
|
||||
if upload.StreamConsume {
|
||||
return witgo.Err[uint32]("UploadReadable::Chunks: StreamConsume")
|
||||
}
|
||||
if upload.SectionReader != nil {
|
||||
return witgo.Err[uint32]("UploadReadable::Chunks: Already exist chunk reader")
|
||||
}
|
||||
|
||||
ss, err := stream.NewStreamSectionReader(upload, int(len), &upload.UpdateProgress)
|
||||
if err != nil {
|
||||
return witgo.Err[uint32](err.Error())
|
||||
}
|
||||
chunkSize := int64(len)
|
||||
upload.SectionReader = &plugin_warp.StreamSectionReader{StreamSectionReaderIF: ss, CunketSize: chunkSize}
|
||||
return witgo.Ok[uint32, string](uint32((upload.GetSize() + chunkSize - 1) / chunkSize))
|
||||
}
|
||||
|
||||
// next-chunk: func() -> result<input-stream, string>;
|
||||
func (host *DriverHost) NextChunk(this plugin_warp.UploadReadable) witgo.Result[io_v0_2.InputStream, string] {
|
||||
upload, ok := host.uploads.Get(this)
|
||||
if !ok {
|
||||
return witgo.Err[io_v0_2.InputStream]("UploadReadable::NextChunk: ErrorCodeBadDescriptor")
|
||||
}
|
||||
if upload.SectionReader == nil {
|
||||
return witgo.Err[io_v0_2.InputStream]("UploadReadable::NextChunk: No chunk reader")
|
||||
}
|
||||
|
||||
chunkSize := min(upload.SectionReader.CunketSize, upload.GetSize()-upload.SectionReader.Offset)
|
||||
sr, err := upload.SectionReader.GetSectionReader(upload.SectionReader.Offset, chunkSize)
|
||||
if err != nil {
|
||||
return witgo.Err[io_v0_2.InputStream](err.Error())
|
||||
}
|
||||
upload.SectionReader.Offset += chunkSize
|
||||
streamHandle := host.StreamManager().Add(&manager_io.Stream{Reader: sr, Seeker: sr, Closer: utils.CloseFunc(func() error {
|
||||
upload.SectionReader.FreeSectionReader(sr)
|
||||
return nil
|
||||
})})
|
||||
return witgo.Ok[io_v0_2.InputStream, string](streamHandle)
|
||||
}
|
||||
|
||||
// chunk-reset: func(chunk: input-stream) -> result<_, string>;
|
||||
func (host *DriverHost) ChunkReset(this plugin_warp.UploadReadable, chunk io_v0_2.InputStream) witgo.Result[witgo.Unit, string] {
|
||||
stream, ok := host.StreamManager().Get(chunk)
|
||||
if !ok {
|
||||
return witgo.Err[witgo.Unit]("UploadReadable::ChunkReset: ErrorCodeBadDescriptor")
|
||||
}
|
||||
if stream.Seeker == nil {
|
||||
return witgo.Err[witgo.Unit]("UploadReadable::ChunkReset: Not Seeker")
|
||||
}
|
||||
_, err := stream.Seeker.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return witgo.Err[witgo.Unit](err.Error())
|
||||
}
|
||||
return witgo.Ok[witgo.Unit, string](witgo.Unit{})
|
||||
}
|
||||
|
||||
// get-hasher: func(hashs: list<hash-alg>) -> result<list<hash-info>, string>;
|
||||
func (host *DriverHost) GetHasher(this plugin_warp.UploadReadable, hashs []plugin_warp.HashAlg) witgo.Result[[]plugin_warp.HashInfo, string] {
|
||||
upload, ok := host.uploads.Get(this)
|
||||
if !ok {
|
||||
return witgo.Err[[]plugin_warp.HashInfo]("UploadReadable: ErrorCodeBadDescriptor")
|
||||
}
|
||||
|
||||
resultHashs := plugin_warp.HashInfoConvert2(upload.GetHash(), hashs)
|
||||
if resultHashs != nil {
|
||||
return witgo.Ok[[]plugin_warp.HashInfo, string](resultHashs)
|
||||
}
|
||||
|
||||
if upload.StreamConsume {
|
||||
return witgo.Err[[]plugin_warp.HashInfo]("UploadReadable: StreamConsume")
|
||||
}
|
||||
|
||||
// 无法从obj中获取需要的hash,或者获取的hash不完整。
|
||||
// 需要缓存整个文件并进行hash计算
|
||||
hashTypes := plugin_warp.HashAlgConverts(hashs)
|
||||
|
||||
hashers := utils.NewMultiHasher(hashTypes)
|
||||
if _, err := upload.CacheFullAndWriter(&upload.UpdateProgress, hashers); err != nil {
|
||||
return witgo.Err[[]plugin_warp.HashInfo](err.Error())
|
||||
}
|
||||
|
||||
maps.Copy(upload.GetHash().Export(), hashers.GetHashInfo().Export())
|
||||
|
||||
return witgo.Ok[[]plugin_warp.HashInfo, string](plugin_warp.HashInfoConvert(*hashers.GetHashInfo()))
|
||||
}
|
||||
|
||||
// update-progress: func(progress: f64);
|
||||
func (host *DriverHost) UpdateProgress(this plugin_warp.UploadReadable, progress float64) {
|
||||
upload, ok := host.uploads.Get(this)
|
||||
if ok {
|
||||
upload.UpdateProgress(progress)
|
||||
}
|
||||
}
|
||||
|
||||
// resource cancellable { subscribe: func() -> pollable; }
|
||||
func (host *DriverHost) Subscribe(this plugin_warp.Context) io_v0_2.Pollable {
|
||||
poll := host.Host.PollManager()
|
||||
|
||||
ctx, ok := host.contexts.Get(this)
|
||||
if !ok {
|
||||
return poll.Add(manager_io.ReadyPollable)
|
||||
}
|
||||
|
||||
return poll.Add(&plugin_warp.ContextPollable{Context: ctx})
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user