mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-11-25 03:15:19 +08:00
Compare commits
48 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6417f71527 | ||
|
|
ae93fb0479 | ||
|
|
ce3f8e36c1 | ||
|
|
33f1fbc9fb | ||
|
|
fbc4d6d3f8 | ||
|
|
834248b9e4 | ||
|
|
9235c7dff1 | ||
|
|
7b377b1d54 | ||
|
|
d312db3db1 | ||
|
|
7e1358e686 | ||
|
|
62e381a764 | ||
|
|
8f18e34da0 | ||
|
|
525f26dc23 | ||
|
|
a0fcfa3ed2 | ||
|
|
15f276537c | ||
|
|
623a12050e | ||
|
|
ae2d2d1021 | ||
|
|
a109152a13 | ||
|
|
febbcd6027 | ||
|
|
549e60136b | ||
|
|
14d2b8290a | ||
|
|
bbc328d589 | ||
|
|
5780db293a | ||
|
|
cdc069d8e7 | ||
|
|
fb5094f688 | ||
|
|
670e0bdc45 | ||
|
|
89235012af | ||
|
|
2bfbad2874 | ||
|
|
4ba7696032 | ||
|
|
66645516e5 | ||
|
|
eb2ff2d2ca | ||
|
|
4153245f2c | ||
|
|
6fe9af7819 | ||
|
|
2edc446ced | ||
|
|
c3c7983f7b | ||
|
|
22deb4df30 | ||
|
|
da0c734aa3 | ||
|
|
189cebe4c9 | ||
|
|
9d3da44a99 | ||
|
|
8f17d35ed5 | ||
|
|
89759b6e3b | ||
|
|
a2fc38be8d | ||
|
|
e0414e7110 | ||
|
|
b486af0031 | ||
|
|
ea09ce4b8f | ||
|
|
d465da43e3 | ||
|
|
84ed487950 | ||
|
|
3c07144211 |
@@ -64,7 +64,9 @@ Thank you for your support and understanding of the OpenList project.
|
||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([China](https://www.teambition.com), [International](https://us.teambition.com))
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [139yun](https://yun.139.com) (Personal, Family, Group)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
||||
@@ -93,7 +95,6 @@ Thank you for your support and understanding of the OpenList project.
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [Weiyun](https://www.weiyun.com)
|
||||
|
||||
- [x] Easy to deploy and out-of-the-box
|
||||
- [x] File preview (PDF, markdown, code, plain text, ...)
|
||||
- [x] Image preview in gallery mode
|
||||
|
||||
@@ -64,7 +64,9 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
|
||||
- [x] [又拍云对象存储](https://www.upyun.com/products/file-storage)
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([中国](https://www.teambition.com), [国际](https://us.teambition.com))
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [分秒帧](https://www.mediatrack.cn)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [和彩云](https://yun.139.com)(个人、家庭、群组)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
- [x] [百度网盘](http://pan.baidu.com)
|
||||
|
||||
@@ -65,6 +65,7 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([中国](https://www.teambition.com), [国際](https://us.teambition.com))
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [139yun](https://yun.139.com)(個人、家族、グループ)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
||||
@@ -93,6 +94,7 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [Weiyun](https://www.weiyun.com)
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] 簡単にデプロイでき、すぐに使える
|
||||
- [x] ファイルプレビュー(PDF、markdown、コード、テキストなど)
|
||||
- [x] ギャラリーモードでの画像プレビュー
|
||||
|
||||
@@ -64,7 +64,9 @@ Dank u voor uw ondersteuning en begrip
|
||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([China](https://www.teambition.com), [Internationaal](https://us.teambition.com))
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [139yun](https://yun.139.com) (Persoonlijk, Familie, Groep)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
||||
|
||||
@@ -17,6 +17,7 @@ func Init() {
|
||||
bootstrap.Log()
|
||||
bootstrap.InitDB()
|
||||
data.InitData()
|
||||
bootstrap.InitPlugins()
|
||||
bootstrap.InitStreamLimit()
|
||||
bootstrap.InitIndex()
|
||||
bootstrap.InitUpgradePatch()
|
||||
|
||||
@@ -2,6 +2,7 @@ package flags
|
||||
|
||||
var (
|
||||
DataDir string
|
||||
ConfigPath string
|
||||
Debug bool
|
||||
NoPrefix bool
|
||||
Dev bool
|
||||
|
||||
@@ -27,7 +27,8 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
|
||||
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data directory (relative paths are resolved against the current working directory)")
|
||||
RootCmd.PersistentFlags().StringVar(&flags.ConfigPath, "config", "", "path to config.json (relative to current working directory; defaults to [data directory]/config.json, where [data directory] is set by --data)")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
|
||||
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")
|
||||
|
||||
@@ -27,6 +27,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
|
||||
"github.com/quic-go/quic-go/http3"
|
||||
)
|
||||
|
||||
// ServerCmd represents the server command
|
||||
@@ -63,6 +65,7 @@ the address is defined in config file`,
|
||||
httpHandler = h2c.NewHandler(r, &http2.Server{})
|
||||
}
|
||||
var httpSrv, httpsSrv, unixSrv *http.Server
|
||||
var quicSrv *http3.Server
|
||||
if conf.Conf.Scheme.HttpPort != -1 {
|
||||
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
||||
fmt.Printf("start HTTP server @ %s\n", httpBase)
|
||||
@@ -86,6 +89,24 @@ the address is defined in config file`,
|
||||
utils.Log.Fatalf("failed to start https: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
if conf.Conf.Scheme.EnableH3 {
|
||||
fmt.Printf("start HTTP3 (quic) server @ %s\n", httpsBase)
|
||||
utils.Log.Infof("start HTTP3 (quic) server @ %s", httpsBase)
|
||||
r.Use(func(c *gin.Context) {
|
||||
if c.Request.TLS != nil {
|
||||
port := conf.Conf.Scheme.HttpsPort
|
||||
c.Header("Alt-Svc", fmt.Sprintf("h3=\":%d\"; ma=86400", port))
|
||||
}
|
||||
c.Next()
|
||||
})
|
||||
quicSrv = &http3.Server{Addr: httpsBase, Handler: r}
|
||||
go func() {
|
||||
err := quicSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start http3 (quic): %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
|
||||
@@ -203,6 +224,15 @@ the address is defined in config file`,
|
||||
utils.Log.Fatal("HTTPS server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
if conf.Conf.Scheme.EnableH3 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := quicSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("HTTP3 (quic) server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
wg.Add(1)
|
||||
|
||||
@@ -245,4 +245,17 @@ func (d *Pan115) DeleteOfflineTasks(ctx context.Context, hashes []string, delete
|
||||
return d.client.DeleteOfflineTasks(hashes, deleteFiles)
|
||||
}
|
||||
|
||||
func (d *Pan115) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
info, err := d.client.GetInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: uint64(info.SpaceInfo.AllTotal.Size),
|
||||
FreeSpace: uint64(info.SpaceInfo.AllRemain.Size),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Pan115)(nil)
|
||||
|
||||
@@ -15,10 +15,9 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "115 Cloud",
|
||||
DefaultRoot: "0",
|
||||
// OnlyProxy: true,
|
||||
// NoOverwriteUpload: true,
|
||||
Name: "115 Cloud",
|
||||
DefaultRoot: "0",
|
||||
LinkCacheMode: driver.LinkCacheUA,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -131,23 +131,6 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := d.client.GetFolderInfoByPath(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Obj{
|
||||
Fid: resp.FileID,
|
||||
Fn: resp.FileName,
|
||||
Fc: resp.FileCategory,
|
||||
Sha1: resp.Sha1,
|
||||
Pc: resp.PickCode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -17,8 +17,9 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "115 Open",
|
||||
DefaultRoot: "0",
|
||||
Name: "115 Open",
|
||||
DefaultRoot: "0",
|
||||
LinkCacheMode: driver.LinkCacheUA,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -74,7 +74,6 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
"type": f.Type,
|
||||
}
|
||||
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
|
||||
|
||||
req.SetBody(data)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
@@ -254,4 +253,15 @@ func (d *Pan123) APIRateLimit(ctx context.Context, api string) error {
|
||||
return limiter.Wait(ctx)
|
||||
}
|
||||
|
||||
func (d *Pan123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
userInfo, err := d.getUserInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(userInfo.Data.SpaceUsed, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Pan123)(nil)
|
||||
|
||||
@@ -122,3 +122,14 @@ type S3PreSignedURLs struct {
|
||||
PreSignedUrls map[string]string `json:"presignedUrls"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UserInfoResp struct {
|
||||
Data struct {
|
||||
Uid int64 `json:"UID"`
|
||||
Nickname string `json:"Nickname"`
|
||||
SpaceUsed uint64 `json:"SpaceUsed"`
|
||||
SpacePermanent uint64 `json:"SpacePermanent"`
|
||||
SpaceTemp uint64 `json:"SpaceTemp"`
|
||||
FileCount int `json:"FileCount"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
if cur == chunkCount {
|
||||
curSize = lastChunkSize
|
||||
}
|
||||
var reader *stream.SectionReader
|
||||
var reader io.ReadSeeker
|
||||
var rateLimitedRd io.Reader
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
|
||||
@@ -43,7 +43,7 @@ const (
|
||||
S3Auth = MainApi + "/file/s3_upload_object/auth"
|
||||
UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
|
||||
S3Complete = MainApi + "/file/s3_complete_multipart_upload"
|
||||
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||
// AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||
)
|
||||
|
||||
func signPath(path string, os string, version string) (k string, v string) {
|
||||
@@ -282,3 +282,14 @@ func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (d *Pan123) getUserInfo(ctx context.Context) (*UserInfoResp, error) {
|
||||
var resp UserInfoResp
|
||||
_, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}, nil
|
||||
}
|
||||
|
||||
uid, err := d.getUID()
|
||||
uid, err := d.getUID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -215,7 +215,7 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
}
|
||||
|
||||
func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
userInfo, err := d.getUserInfo()
|
||||
userInfo, err := d.getUserInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -229,5 +229,15 @@ func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error)
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Open123)(nil)
|
||||
var _ driver.PutResult = (*Open123)(nil)
|
||||
func (d *Open123) OfflineDownload(ctx context.Context, url string, dir model.Obj, callback string) (int, error) {
|
||||
return d.createOfflineDownloadTask(ctx, url, dir.GetID(), callback)
|
||||
}
|
||||
|
||||
func (d *Open123) OfflineDownloadProcess(ctx context.Context, taskID int) (float64, int, error) {
|
||||
return d.queryOfflineDownloadStatus(ctx, taskID)
|
||||
}
|
||||
|
||||
var (
|
||||
_ driver.Driver = (*Open123)(nil)
|
||||
_ driver.PutResult = (*Open123)(nil)
|
||||
)
|
||||
|
||||
@@ -19,6 +19,7 @@ func (a *ApiInfo) Require() {
|
||||
a.token <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ApiInfo) Release() {
|
||||
if a.qps > 0 {
|
||||
time.AfterFunc(time.Second, func() {
|
||||
@@ -26,13 +27,16 @@ func (a *ApiInfo) Release() {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ApiInfo) SetQPS(qps int) {
|
||||
a.qps = qps
|
||||
a.token = make(chan struct{}, qps)
|
||||
}
|
||||
|
||||
func (a *ApiInfo) NowLen() int {
|
||||
return len(a.token)
|
||||
}
|
||||
|
||||
func InitApiInfo(url string, qps int) *ApiInfo {
|
||||
return &ApiInfo{
|
||||
url: url,
|
||||
@@ -185,3 +189,18 @@ type UploadCompleteResp struct {
|
||||
FileID int64 `json:"fileID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type OfflineDownloadResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
TaskID int `json:"taskID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type OfflineDownloadProcessResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
Process float64 `json:"process"`
|
||||
Status int `json:"status"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
||||
partNumber := partIndex + 1 // 分片号从1开始
|
||||
offset := partIndex * chunkSize
|
||||
size := min(chunkSize, size-offset)
|
||||
var reader *stream.SectionReader
|
||||
var reader io.ReadSeeker
|
||||
var rateLimitedRd io.Reader
|
||||
sliceMD5 := ""
|
||||
// 表单
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package _123_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -18,7 +19,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于拓展
|
||||
var ( // 不同情况下获取的AccessTokenQPS限制不同 如下模块化易于拓展
|
||||
Api = "https://open-api.123pan.com"
|
||||
|
||||
AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1)
|
||||
@@ -33,6 +34,9 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
|
||||
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
|
||||
UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
|
||||
UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
|
||||
|
||||
OfflineDownload = InitApiInfo(Api+"/api/v1/offline/download", 1)
|
||||
OfflineDownloadProcess = InitApiInfo(Api+"/api/v1/offline/download/process", 5)
|
||||
)
|
||||
|
||||
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
@@ -82,7 +86,6 @@ func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCall
|
||||
return nil, errors.New(baseResp.Message)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (d *Open123) flushAccessToken() error {
|
||||
@@ -148,21 +151,23 @@ func (d *Open123) SignURL(originURL, privateKey string, uid uint64, validDuratio
|
||||
return objURL.String(), nil
|
||||
}
|
||||
|
||||
func (d *Open123) getUserInfo() (*UserInfoResp, error) {
|
||||
func (d *Open123) getUserInfo(ctx context.Context) (*UserInfoResp, error) {
|
||||
var resp UserInfoResp
|
||||
|
||||
if _, err := d.Request(UserInfo, http.MethodGet, nil, &resp); err != nil {
|
||||
if _, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) getUID() (uint64, error) {
|
||||
func (d *Open123) getUID(ctx context.Context) (uint64, error) {
|
||||
if d.UID != 0 {
|
||||
return d.UID, nil
|
||||
}
|
||||
resp, err := d.getUserInfo()
|
||||
resp, err := d.getUserInfo(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -184,7 +189,6 @@ func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*Fi
|
||||
"searchData": "",
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -276,3 +280,34 @@ func (d *Open123) trash(fileId int64) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Open123) createOfflineDownloadTask(ctx context.Context, url string, dirID, callback string) (taskID int, err error) {
|
||||
body := base.Json{
|
||||
"url": url,
|
||||
"dirID": dirID,
|
||||
}
|
||||
if len(callback) > 0 {
|
||||
body["callBackUrl"] = callback
|
||||
}
|
||||
var resp OfflineDownloadResp
|
||||
_, err = d.Request(OfflineDownload, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(body)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return resp.Data.TaskID, nil
|
||||
}
|
||||
|
||||
func (d *Open123) queryOfflineDownloadStatus(ctx context.Context, taskID int) (process float64, status int, err error) {
|
||||
var resp OfflineDownloadProcessResp
|
||||
_, err = d.Request(OfflineDownloadProcess, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"taskID": strconv.Itoa(taskID),
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return .0, 0, err
|
||||
}
|
||||
return resp.Data.Process, resp.Data.Status, nil
|
||||
}
|
||||
|
||||
@@ -54,7 +54,8 @@ func (d *Yun139) Init(ctx context.Context) error {
|
||||
"userInfo": base.Json{
|
||||
"userType": 1,
|
||||
"accountType": 1,
|
||||
"accountName": d.Account},
|
||||
"accountName": d.Account,
|
||||
},
|
||||
"modAddrType": 1,
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
@@ -732,7 +733,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
"manualRename": 2,
|
||||
"operation": 0,
|
||||
"path": path.Join(dstDir.GetPath(), dstDir.GetID()),
|
||||
"seqNo": random.String(32), //序列号不能为空
|
||||
"seqNo": random.String(32), // 序列号不能为空
|
||||
"totalSize": reportSize,
|
||||
"uploadContentList": []base.Json{{
|
||||
"contentName": stream.GetName(),
|
||||
@@ -834,4 +835,48 @@ func (d *Yun139) Other(ctx context.Context, args model.OtherArgs) (interface{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Yun139) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if d.UserDomainID == "" {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
var total, free uint64
|
||||
if d.isFamily() {
|
||||
diskInfo, err := d.getFamilyDiskInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalMb, err := strconv.ParseUint(diskInfo.Data.DiskSize, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed convert disk size into integer: %+v", err)
|
||||
}
|
||||
usedMb, err := strconv.ParseUint(diskInfo.Data.UsedSize, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed convert used size into integer: %+v", err)
|
||||
}
|
||||
total = totalMb * 1024 * 1024
|
||||
free = total - (usedMb * 1024 * 1024)
|
||||
} else {
|
||||
diskInfo, err := d.getPersonalDiskInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalMb, err := strconv.ParseUint(diskInfo.Data.DiskSize, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed convert disk size into integer: %+v", err)
|
||||
}
|
||||
freeMb, err := strconv.ParseUint(diskInfo.Data.FreeDiskSize, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed convert free size into integer: %+v", err)
|
||||
}
|
||||
total = totalMb * 1024 * 1024
|
||||
free = freeMb * 1024 * 1024
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Yun139)(nil)
|
||||
|
||||
@@ -11,6 +11,7 @@ type Addition struct {
|
||||
driver.RootID
|
||||
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"`
|
||||
CloudID string `json:"cloud_id"`
|
||||
UserDomainID string `json:"user_domain_id" help:"ud_id in Cookie, fill in to show disk usage"`
|
||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||
ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"`
|
||||
UseLargeThumbnail bool `json:"use_large_thumbnail" type:"bool" default:"false" help:"Enable to use large thumbnail for images"`
|
||||
|
||||
@@ -312,3 +312,20 @@ type RefreshTokenResp struct {
|
||||
AccessToken string `xml:"accessToken"`
|
||||
Desc string `xml:"desc"`
|
||||
}
|
||||
|
||||
type PersonalDiskInfoResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
FreeDiskSize string `json:"freeDiskSize"`
|
||||
DiskSize string `json:"diskSize"`
|
||||
IsInfinitePicStorage *bool `json:"isInfinitePicStorage"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type FamilyDiskInfoResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
UsedSize string `json:"usedSize"`
|
||||
DiskSize string `json:"diskSize"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
@@ -107,8 +107,7 @@ func (d *Yun139) refreshToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Yun139) request(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
url := "https://yun.139.com" + pathname
|
||||
func (d *Yun139) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
randStr := random.String(16)
|
||||
ts := time.Now().Format("2006-01-02 15:04:05")
|
||||
@@ -219,7 +218,7 @@ func (d *Yun139) requestRoute(data interface{}, resp interface{}) ([]byte, error
|
||||
}
|
||||
|
||||
func (d *Yun139) post(pathname string, data interface{}, resp interface{}) ([]byte, error) {
|
||||
return d.request(pathname, http.MethodPost, func(req *resty.Request) {
|
||||
return d.request("https://yun.139.com"+pathname, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data)
|
||||
}, resp)
|
||||
}
|
||||
@@ -268,7 +267,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
|
||||
HashInfo: utils.NewHashInfo(utils.MD5, content.Digest),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||
//Thumbnail: content.BigthumbnailURL,
|
||||
// Thumbnail: content.BigthumbnailURL,
|
||||
}
|
||||
files = append(files, &f)
|
||||
}
|
||||
@@ -335,7 +334,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
Path: path, // 文件所在目录的Path
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||
//Thumbnail: content.BigthumbnailURL,
|
||||
// Thumbnail: content.BigthumbnailURL,
|
||||
}
|
||||
files = append(files, &f)
|
||||
}
|
||||
@@ -390,7 +389,7 @@ func (d *Yun139) groupGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
Path: path, // 文件所在目录的Path
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||
//Thumbnail: content.BigthumbnailURL,
|
||||
// Thumbnail: content.BigthumbnailURL,
|
||||
}
|
||||
files = append(files, &f)
|
||||
}
|
||||
@@ -418,6 +417,7 @@ func (d *Yun139) getLink(contentId string) (string, error) {
|
||||
}
|
||||
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
|
||||
}
|
||||
|
||||
func (d *Yun139) familyGetLink(contentId string, path string) (string, error) {
|
||||
data := d.newJson(base.Json{
|
||||
"contentID": contentId,
|
||||
@@ -510,6 +510,7 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R
|
||||
}
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
func (d *Yun139) personalPost(pathname string, data interface{}, resp interface{}) ([]byte, error) {
|
||||
return d.personalRequest(pathname, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data)
|
||||
@@ -545,7 +546,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
|
||||
}
|
||||
nextPageCursor = resp.Data.NextPageCursor
|
||||
for _, item := range resp.Data.Items {
|
||||
var isFolder = (item.Type == "folder")
|
||||
isFolder := (item.Type == "folder")
|
||||
var f model.Obj
|
||||
if isFolder {
|
||||
f = &model.Object{
|
||||
@@ -557,7 +558,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
|
||||
IsFolder: isFolder,
|
||||
}
|
||||
} else {
|
||||
var Thumbnails = item.Thumbnails
|
||||
Thumbnails := item.Thumbnails
|
||||
var ThumbnailUrl string
|
||||
if d.UseLargeThumbnail {
|
||||
for _, thumb := range Thumbnails {
|
||||
@@ -600,7 +601,7 @@ func (d *Yun139) personalGetLink(fileId string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var cdnUrl = jsoniter.Get(res, "data", "cdnUrl").ToString()
|
||||
cdnUrl := jsoniter.Get(res, "data", "cdnUrl").ToString()
|
||||
if cdnUrl != "" {
|
||||
return cdnUrl, nil
|
||||
} else {
|
||||
@@ -614,12 +615,14 @@ func (d *Yun139) getAuthorization() string {
|
||||
}
|
||||
return d.Authorization
|
||||
}
|
||||
|
||||
func (d *Yun139) getAccount() string {
|
||||
if d.ref != nil {
|
||||
return d.ref.getAccount()
|
||||
}
|
||||
return d.Account
|
||||
}
|
||||
|
||||
func (d *Yun139) getPersonalCloudHost() string {
|
||||
if d.ref != nil {
|
||||
return d.ref.getPersonalCloudHost()
|
||||
@@ -670,3 +673,33 @@ func (d *Yun139) uploadPersonalParts(ctx context.Context, partInfos []PartInfo,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Yun139) getPersonalDiskInfo(ctx context.Context) (*PersonalDiskInfoResp, error) {
|
||||
data := map[string]interface{}{
|
||||
"userDomainId": d.UserDomainID,
|
||||
}
|
||||
var resp PersonalDiskInfoResp
|
||||
_, err := d.request("https://user-njs.yun.139.com/user/disk/getPersonalDiskInfo", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data)
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Yun139) getFamilyDiskInfo(ctx context.Context) (*FamilyDiskInfoResp, error) {
|
||||
data := map[string]interface{}{
|
||||
"userDomainId": d.UserDomainID,
|
||||
}
|
||||
var resp FamilyDiskInfoResp
|
||||
_, err := d.request("https://user-njs.yun.139.com/user/disk/getFamilyDiskInfo", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data)
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -194,4 +194,17 @@ func (d *Cloud189) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
return d.newUpload(ctx, dstDir, stream, up)
|
||||
}
|
||||
|
||||
func (d *Cloud189) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
capacityInfo, err := d.getCapacityInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: capacityInfo.CloudCapacityInfo.TotalSize,
|
||||
FreeSpace: capacityInfo.CloudCapacityInfo.FreeSize,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Cloud189)(nil)
|
||||
|
||||
@@ -66,3 +66,21 @@ type DownResp struct {
|
||||
ResMessage string `json:"res_message"`
|
||||
FileDownloadUrl string `json:"downloadUrl"`
|
||||
}
|
||||
|
||||
type CapacityResp struct {
|
||||
ResCode int `json:"res_code"`
|
||||
ResMessage string `json:"res_message"`
|
||||
Account string `json:"account"`
|
||||
CloudCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"cloudCapacityInfo"`
|
||||
FamilyCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"familyCapacityInfo"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
}
|
||||
|
||||
@@ -157,7 +157,7 @@ func (d *Cloud189) request(url string, method string, callback base.ReqCallback,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//log.Debug(res.String())
|
||||
// log.Debug(res.String())
|
||||
if e.ErrorCode != "" {
|
||||
if e.ErrorCode == "InvalidSessionKey" {
|
||||
err = d.newLogin()
|
||||
@@ -186,8 +186,8 @@ func (d *Cloud189) getFiles(fileId string) ([]model.Obj, error) {
|
||||
"mediaType": "0",
|
||||
"folderId": fileId,
|
||||
"iconOption": "5",
|
||||
"orderBy": "lastOpTime", //account.OrderBy
|
||||
"descending": "true", //account.OrderDirection
|
||||
"orderBy": "lastOpTime", // account.OrderBy
|
||||
"descending": "true", // account.OrderDirection
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
@@ -311,7 +311,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
}
|
||||
d.sessionKey = sessionKey
|
||||
const DEFAULT int64 = 10485760
|
||||
var count = int64(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
||||
count := int64(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
||||
|
||||
res, err := d.uploadRequest("/person/initMultiUpload", map[string]string{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
@@ -340,10 +340,10 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
if DEFAULT < byteSize {
|
||||
byteSize = DEFAULT
|
||||
}
|
||||
//log.Debugf("%d,%d", byteSize, finish)
|
||||
// log.Debugf("%d,%d", byteSize, finish)
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
//log.Debug(err, n)
|
||||
// log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -395,3 +395,14 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Cloud189) getCapacityInfo(ctx context.Context) (*CapacityResp, error) {
|
||||
var resp CapacityResp
|
||||
_, err := d.request("https://cloud.189.cn/api/portal/getUserSizeInfo.action", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
|
||||
// 避免重复登陆
|
||||
if !y.isLogin() || y.Addition.AccessToken == "" {
|
||||
if err = y.login(); err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
|
||||
y.cron = cron.NewCron(time.Minute * 5)
|
||||
y.cron.Do(y.keepAlive)
|
||||
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) Drop(ctx context.Context) error {
|
||||
@@ -244,7 +244,6 @@ func (y *Cloud189TV) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
FileName: srcObj.GetName(),
|
||||
IsFolder: BoolToNumber(srcObj.IsDir()),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -278,5 +277,25 @@ func (y *Cloud189TV) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
}
|
||||
|
||||
return y.OldUpload(ctx, dstDir, stream, up, isFamily, overwrite)
|
||||
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
capacityInfo, err := y.getCapacityInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var total, free uint64
|
||||
if y.isFamily() {
|
||||
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
||||
free = capacityInfo.FamilyCapacityInfo.FreeSize
|
||||
} else {
|
||||
total = capacityInfo.CloudCapacityInfo.TotalSize
|
||||
free = capacityInfo.CloudCapacityInfo.FreeSize
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -316,3 +316,21 @@ type BatchTaskConflictTaskInfoResp struct {
|
||||
TaskInfos []BatchTaskInfo
|
||||
TaskType int `json:"taskType"`
|
||||
}
|
||||
|
||||
type CapacityResp struct {
|
||||
ResCode int `json:"res_code"`
|
||||
ResMessage string `json:"res_message"`
|
||||
Account string `json:"account"`
|
||||
CloudCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"cloudCapacityInfo"`
|
||||
FamilyCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"familyCapacityInfo"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
}
|
||||
|
||||
@@ -70,6 +70,9 @@ func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, para
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) requestWithRetry(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, retryCount int, isFamily ...bool) ([]byte, error) {
|
||||
if y.tokenInfo == nil {
|
||||
return nil, fmt.Errorf("login failed")
|
||||
}
|
||||
req := y.client.R().SetQueryParams(clientSuffix())
|
||||
|
||||
if params != nil {
|
||||
@@ -173,6 +176,7 @@ func (y *Cloud189TV) put(ctx context.Context, url string, headers map[string]str
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
|
||||
fullUrl := ApiUrl
|
||||
if isFamily {
|
||||
@@ -238,9 +242,8 @@ func (y *Cloud189TV) login() (err error) {
|
||||
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/getQrCodeUUID.action",
|
||||
http.MethodGet))
|
||||
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/getQrCodeUUID.action")
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
if erron.HasError() {
|
||||
return &erron
|
||||
@@ -280,7 +283,7 @@ func (y *Cloud189TV) login() (err error) {
|
||||
req.SetQueryParam("uuid", y.TempUuid)
|
||||
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action")
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
if erron.HasError() {
|
||||
return &erron
|
||||
@@ -300,7 +303,7 @@ func (y *Cloud189TV) login() (err error) {
|
||||
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
|
||||
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
if erron.HasError() {
|
||||
@@ -309,7 +312,7 @@ func (y *Cloud189TV) login() (err error) {
|
||||
|
||||
y.tokenInfo = &tokenInfo
|
||||
op.MustSaveDriverStorage(y)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// refreshSession 尝试使用现有的 AccessToken 刷新会话
|
||||
@@ -324,7 +327,7 @@ func (y *Cloud189TV) refreshSession() (err error) {
|
||||
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
|
||||
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
if erron.HasError() {
|
||||
@@ -371,7 +374,7 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
|
||||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
fileMd5 := file.GetHash().GetHash(utils.MD5)
|
||||
var tempFile = file.GetFile()
|
||||
tempFile := file.GetFile()
|
||||
var err error
|
||||
if len(fileMd5) != utils.MD5.Width {
|
||||
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
@@ -474,7 +477,6 @@ func (y *Cloud189TV) OldUploadCreate(ctx context.Context, parentID string, fileM
|
||||
})
|
||||
}
|
||||
}, &uploadInfo, isFamily)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -628,3 +630,15 @@ func (y *Cloud189TV) WaitBatchTask(aType string, taskID string, t time.Duration)
|
||||
time.Sleep(t)
|
||||
}
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) getCapacityInfo(ctx context.Context) (*CapacityResp, error) {
|
||||
fullUrl := ApiUrl + "/portal/getUserSizeInfo.action"
|
||||
var resp CapacityResp
|
||||
_, err := y.get(fullUrl, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -90,11 +90,11 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
if y.Addition.RefreshToken != "" {
|
||||
y.tokenInfo = &AppSessionResp{RefreshToken: y.Addition.RefreshToken}
|
||||
if err = y.refreshToken(); err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err = y.login(); err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err)
|
||||
}
|
||||
})
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Cloud189PC) InitReference(storage driver.Driver) error {
|
||||
@@ -305,7 +305,6 @@ func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
FileName: srcObj.GetName(),
|
||||
IsFolder: BoolToNumber(srcObj.IsDir()),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -411,3 +410,24 @@ func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
return y.StreamUpload(ctx, dstDir, stream, up, isFamily, overwrite)
|
||||
}
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
capacityInfo, err := y.getCapacityInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var total, free uint64
|
||||
if y.isFamily() {
|
||||
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
||||
free = capacityInfo.FamilyCapacityInfo.FreeSize
|
||||
} else {
|
||||
total = capacityInfo.CloudCapacityInfo.TotalSize
|
||||
free = capacityInfo.CloudCapacityInfo.FreeSize
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -409,3 +409,21 @@ func (p Params) Encode() string {
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type CapacityResp struct {
|
||||
ResCode int `json:"res_code"`
|
||||
ResMessage string `json:"res_message"`
|
||||
Account string `json:"account"`
|
||||
CloudCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"cloudCapacityInfo"`
|
||||
FamilyCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"familyCapacityInfo"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
}
|
||||
|
||||
@@ -90,6 +90,9 @@ func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string {
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}, isFamily ...bool) ([]byte, error) {
|
||||
if y.getTokenInfo() == nil {
|
||||
return nil, fmt.Errorf("login failed")
|
||||
}
|
||||
req := y.getClient().R().SetQueryParams(clientSuffix())
|
||||
|
||||
// 设置params
|
||||
@@ -189,6 +192,7 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
|
||||
res := make([]model.Obj, 0, 100)
|
||||
for pageNum := 1; ; pageNum++ {
|
||||
@@ -342,7 +346,7 @@ func (y *Cloud189PC) loginByPassword() (err error) {
|
||||
SetQueryParam("redirectURL", loginresp.ToUrl).
|
||||
Post(API_URL + "/getSessionForPC.action")
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
if erron.HasError() {
|
||||
@@ -350,12 +354,12 @@ func (y *Cloud189PC) loginByPassword() (err error) {
|
||||
}
|
||||
if tokenInfo.ResCode != 0 {
|
||||
err = fmt.Errorf(tokenInfo.ResMessage)
|
||||
return
|
||||
return err
|
||||
}
|
||||
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||
y.tokenInfo = &tokenInfo
|
||||
op.MustSaveDriverStorage(y)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) loginByQRCode() error {
|
||||
@@ -447,7 +451,6 @@ func (y *Cloud189PC) genQRCode(text string) error {
|
||||
// Create the HTML page
|
||||
qrPage := fmt.Sprintf(qrTemplate, text, qrCodeBase64, y.qrcodeParam.UUID)
|
||||
return fmt.Errorf("need verify: \n%s", qrPage)
|
||||
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) initBaseParams() (*BaseLoginParam, error) {
|
||||
@@ -616,7 +619,7 @@ func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
|
||||
if y.ref != nil {
|
||||
return y.ref.refreshTokenWithRetry(retryCount)
|
||||
}
|
||||
|
||||
|
||||
// 限制重试次数,避免无限递归
|
||||
if retryCount >= 3 {
|
||||
if y.Addition.RefreshToken != "" {
|
||||
@@ -625,7 +628,7 @@ func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
|
||||
}
|
||||
return errors.New("refresh token failed after maximum retries")
|
||||
}
|
||||
|
||||
|
||||
var erron RespErr
|
||||
var tokenInfo AppSessionResp
|
||||
_, err = y.client.R().
|
||||
@@ -700,7 +703,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
params.Set("familyId", y.FamilyID)
|
||||
fullUrl += "/family"
|
||||
} else {
|
||||
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||
// params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||
fullUrl += "/person"
|
||||
}
|
||||
|
||||
@@ -752,7 +755,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
partSize = lastPartSize
|
||||
}
|
||||
partInfo := ""
|
||||
var reader *stream.SectionReader
|
||||
var reader io.ReadSeeker
|
||||
var rateLimitedRd io.Reader
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
@@ -876,7 +879,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
lastSliceSize = sliceSize
|
||||
}
|
||||
|
||||
//step.1 优先计算所需信息
|
||||
// step.1 优先计算所需信息
|
||||
byteSize := sliceSize
|
||||
fileMd5 := utils.MD5.NewFunc()
|
||||
sliceMd5 := utils.MD5.NewFunc()
|
||||
@@ -927,14 +930,14 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
if isFamily {
|
||||
fullUrl += "/family"
|
||||
} else {
|
||||
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||
// params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||
fullUrl += "/person"
|
||||
}
|
||||
|
||||
// 尝试恢复进度
|
||||
uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.getTokenInfo().SessionKey, fileMd5Hex)
|
||||
if !ok {
|
||||
//step.2 预上传
|
||||
// step.2 预上传
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
@@ -1163,7 +1166,6 @@ func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileM
|
||||
})
|
||||
}
|
||||
}, &uploadInfo, isFamily)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1473,3 +1475,15 @@ func (y *Cloud189PC) getClient() *resty.Client {
|
||||
}
|
||||
return y.client
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) getCapacityInfo(ctx context.Context) (*CapacityResp, error) {
|
||||
fullUrl := API_URL + "/portal/getUserSizeInfo.action"
|
||||
var resp CapacityResp
|
||||
_, err := y.get(fullUrl, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
type Alias struct {
|
||||
model.Storage
|
||||
Addition
|
||||
rootOrder []string
|
||||
pathMap map[string][]string
|
||||
autoFlatten bool
|
||||
oneKey string
|
||||
@@ -40,13 +41,18 @@ func (d *Alias) Init(ctx context.Context) error {
|
||||
if d.Paths == "" {
|
||||
return errors.New("paths is required")
|
||||
}
|
||||
paths := strings.Split(d.Paths, "\n")
|
||||
d.rootOrder = make([]string, 0, len(paths))
|
||||
d.pathMap = make(map[string][]string)
|
||||
for _, path := range strings.Split(d.Paths, "\n") {
|
||||
for _, path := range paths {
|
||||
path = strings.TrimSpace(path)
|
||||
if path == "" {
|
||||
continue
|
||||
}
|
||||
k, v := getPair(path)
|
||||
if _, ok := d.pathMap[k]; !ok {
|
||||
d.rootOrder = append(d.rootOrder, k)
|
||||
}
|
||||
d.pathMap[k] = append(d.pathMap[k], v)
|
||||
}
|
||||
if len(d.pathMap) == 1 {
|
||||
@@ -62,6 +68,7 @@ func (d *Alias) Init(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *Alias) Drop(ctx context.Context) error {
|
||||
d.rootOrder = nil
|
||||
d.pathMap = nil
|
||||
return nil
|
||||
}
|
||||
@@ -123,7 +130,7 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
path := dir.GetPath()
|
||||
if utils.PathEqual(path, "/") && !d.autoFlatten {
|
||||
return d.listRoot(), nil
|
||||
return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough, args.Refresh), nil
|
||||
}
|
||||
root, sub := d.getRootAndPath(path)
|
||||
dsts, ok := d.pathMap[root]
|
||||
@@ -131,27 +138,35 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
var objs []model.Obj
|
||||
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
|
||||
for _, dst := range dsts {
|
||||
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), fsArgs)
|
||||
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), &fs.ListArgs{
|
||||
NoLog: true,
|
||||
Refresh: args.Refresh,
|
||||
WithStorageDetails: args.WithStorageDetails && d.DetailsPassThrough,
|
||||
})
|
||||
if err == nil {
|
||||
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
objRes := model.Object{
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
}
|
||||
if !ok {
|
||||
return &objRes, nil
|
||||
if thumb, ok := model.GetThumb(obj); ok {
|
||||
return &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}, nil
|
||||
if details, ok := model.GetStorageDetails(obj); ok {
|
||||
return &model.ObjStorageDetails{
|
||||
Obj: &objRes,
|
||||
StorageDetailsWithName: *details,
|
||||
}, nil
|
||||
}
|
||||
return &objRes, nil
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
@@ -196,9 +211,6 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
if resultLink.ContentLength == 0 {
|
||||
resultLink.ContentLength = fi.GetSize()
|
||||
}
|
||||
if resultLink.MFile != nil {
|
||||
return &resultLink, nil
|
||||
}
|
||||
if d.DownloadConcurrency > 0 {
|
||||
resultLink.Concurrency = d.DownloadConcurrency
|
||||
}
|
||||
@@ -250,7 +262,7 @@ func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string
|
||||
}
|
||||
return err
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name dirs cannot make sub-dir")
|
||||
}
|
||||
return err
|
||||
@@ -261,14 +273,14 @@ func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name files cannot be moved")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name dirs cannot be moved to")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -296,7 +308,7 @@ func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) er
|
||||
}
|
||||
return err
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name files cannot be Rename")
|
||||
}
|
||||
return err
|
||||
@@ -307,14 +319,14 @@ func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name files cannot be copied")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name dirs cannot be copied to")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -348,7 +360,7 @@ func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name files cannot be Delete")
|
||||
}
|
||||
return err
|
||||
@@ -392,7 +404,7 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
|
||||
return err
|
||||
}
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name dirs cannot be Put")
|
||||
}
|
||||
return err
|
||||
@@ -409,7 +421,7 @@ func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name files cannot offline download")
|
||||
}
|
||||
return err
|
||||
@@ -482,14 +494,14 @@ func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj,
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name files cannot be decompressed")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name dirs cannot be decompressed to")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -512,4 +524,25 @@ func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Alias) ResolveLinkCacheMode(path string) driver.LinkCacheMode {
|
||||
root, sub := d.getRootAndPath(path)
|
||||
dsts, ok := d.pathMap[root]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(stdpath.Join(dst, sub))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
mode := storage.Config().LinkCacheMode
|
||||
if mode == -1 {
|
||||
return storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(actualPath)
|
||||
} else {
|
||||
return mode
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Alias)(nil)
|
||||
|
||||
@@ -16,6 +16,7 @@ type Addition struct {
|
||||
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
||||
Writable bool `json:"writable" type:"bool" default:"false"`
|
||||
ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"`
|
||||
DetailsPassThrough bool `json:"details_pass_through" type:"bool" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
@@ -25,6 +26,7 @@ var config = driver.Config{
|
||||
NoUpload: false,
|
||||
DefaultRoot: "/",
|
||||
ProxyRangeOption: true,
|
||||
LinkCacheMode: driver.LinkCacheAuto,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -2,8 +2,11 @@ package alias
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@@ -11,24 +14,61 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (d *Alias) listRoot() []model.Obj {
|
||||
func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model.Obj {
|
||||
var objs []model.Obj
|
||||
for k := range d.pathMap {
|
||||
var wg sync.WaitGroup
|
||||
for _, k := range d.rootOrder {
|
||||
obj := model.Object{
|
||||
Name: k,
|
||||
IsFolder: true,
|
||||
Modified: d.Modified,
|
||||
}
|
||||
idx := len(objs)
|
||||
objs = append(objs, &obj)
|
||||
v := d.pathMap[k]
|
||||
if !withDetails || len(v) != 1 {
|
||||
continue
|
||||
}
|
||||
remoteDriver, err := op.GetStorageByMountPath(v[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
_, ok := remoteDriver.(driver.WithDetails)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
objs[idx] = &model.ObjStorageDetails{
|
||||
Obj: objs[idx],
|
||||
StorageDetailsWithName: model.StorageDetailsWithName{
|
||||
StorageDetails: nil,
|
||||
DriverName: remoteDriver.Config().Name,
|
||||
},
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
c, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
details, e := op.GetStorageDetails(c, remoteDriver, refresh)
|
||||
if e != nil {
|
||||
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
|
||||
log.Errorf("failed get %s storage details: %+v", remoteDriver.GetStorage().MountPath, e)
|
||||
}
|
||||
return
|
||||
}
|
||||
objs[idx].(*model.ObjStorageDetails).StorageDetails = details
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
return objs
|
||||
}
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
func getPair(path string) (string, string) {
|
||||
//path = strings.TrimSpace(path)
|
||||
// path = strings.TrimSpace(path)
|
||||
if strings.Contains(path, ":") {
|
||||
pair := strings.SplitN(path, ":", 2)
|
||||
if !strings.Contains(pair[0], "/") {
|
||||
|
||||
@@ -45,7 +45,7 @@ func (d *AliDrive) GetAddition() driver.Additional {
|
||||
|
||||
func (d *AliDrive) Init(ctx context.Context) error {
|
||||
// TODO login / refresh token
|
||||
//op.MustSaveDriverStorage(d)
|
||||
// op.MustSaveDriverStorage(d)
|
||||
err := d.refreshToken()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -171,7 +171,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
Mimetype: streamer.GetMimetype(),
|
||||
}
|
||||
const DEFAULT int64 = 10485760
|
||||
var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
|
||||
count := int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
|
||||
|
||||
partInfoList := make([]base.Json, 0, count)
|
||||
for i := 1; i <= count; i++ {
|
||||
@@ -327,6 +327,20 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
return fmt.Errorf("%+v", resp2)
|
||||
}
|
||||
|
||||
func (d *AliDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
res, err, _ := d.request("https://api.aliyundrive.com/adrive/v1/user/driveCapacityDetails", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
used := utils.Json.Get(res, "drive_used_size").ToUint64()
|
||||
total := utils.Json.Get(res, "drive_total_size").ToUint64()
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *AliDrive) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
var resp base.Json
|
||||
var url string
|
||||
|
||||
@@ -299,10 +299,7 @@ func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails
|
||||
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
|
||||
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: total - used,
|
||||
},
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -36,12 +36,14 @@ import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_drive"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_photo"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud_open"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/ilanzou"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/ipfs_api"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/kodbox"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/lanzou"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/lenovonas_share"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/local"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediafire"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediatrack"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/mega"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/misskey"
|
||||
@@ -54,6 +56,7 @@ import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/proton_drive"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc_tv"
|
||||
|
||||
@@ -212,7 +212,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
lastBlockSize = sliceSize
|
||||
}
|
||||
|
||||
//cal md5 for first 256k data
|
||||
// cal md5 for first 256k data
|
||||
const SliceSize int64 = 256 * utils.KB
|
||||
// cal md5
|
||||
blockList := make([]string, 0, count)
|
||||
@@ -284,7 +284,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
}
|
||||
log.Debugf("%+v", precreateResp)
|
||||
if precreateResp.ReturnType == 2 {
|
||||
//rapid upload, since got md5 match from baidu server
|
||||
// rapid upload, since got md5 match from baidu server
|
||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
||||
precreateResp.File.Ctime = ctime
|
||||
precreateResp.File.Mtime = mtime
|
||||
@@ -365,11 +365,11 @@ func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
du, err := d.quota()
|
||||
du, err := d.quota(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{DiskUsage: *du}, nil
|
||||
return &model.StorageDetails{DiskUsage: du}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package baidu_netdisk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"unicode"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
@@ -207,7 +209,7 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Li
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//if res.StatusCode() == 302 {
|
||||
// if res.StatusCode() == 302 {
|
||||
u = res.Header().Get("location")
|
||||
//}
|
||||
|
||||
@@ -381,16 +383,15 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||
return maxSliceSize
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) quota() (*model.DiskUsage, error) {
|
||||
func (d *BaiduNetdisk) quota(ctx context.Context) (model.DiskUsage, error) {
|
||||
var resp QuotaResp
|
||||
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, nil, &resp)
|
||||
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return model.DiskUsage{}, err
|
||||
}
|
||||
return &model.DiskUsage{
|
||||
TotalSpace: resp.Total,
|
||||
FreeSpace: resp.Total - resp.Used,
|
||||
}, nil
|
||||
return driver.DiskUsageFromUsedAndTotal(resp.Used, resp.Total), nil
|
||||
}
|
||||
|
||||
// func encodeURIComponent(str string) string {
|
||||
|
||||
@@ -18,8 +18,9 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "BaiduPhoto",
|
||||
LocalSort: true,
|
||||
Name: "BaiduPhoto",
|
||||
LocalSort: true,
|
||||
LinkCacheMode: driver.LinkCacheUA,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -239,7 +240,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = writer.WriteField("puid", fmt.Sprintf("%d", resp.Msg.Puid))
|
||||
err = writer.WriteField("puid", strconv.Itoa(resp.Msg.Puid))
|
||||
if err != nil {
|
||||
fmt.Println("Error writing param2 to request body:", err)
|
||||
return err
|
||||
@@ -260,7 +261,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
|
||||
req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
|
||||
resps, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -258,7 +258,7 @@ type UploadDoneParam struct {
|
||||
func fileToObj(f File) *model.Object {
|
||||
if len(f.Content.FolderName) > 0 {
|
||||
return &model.Object{
|
||||
ID: fmt.Sprintf("%d", f.ID),
|
||||
ID: strconv.Itoa(f.ID),
|
||||
Name: f.Content.FolderName,
|
||||
Size: 0,
|
||||
Modified: time.UnixMilli(f.Inserttime),
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
@@ -172,7 +173,7 @@ func (d *ChaoXing) Login() (string, error) {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
|
||||
req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -17,9 +17,11 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
"github.com/avast/retry-go"
|
||||
)
|
||||
|
||||
type Chunk struct {
|
||||
@@ -39,6 +41,9 @@ func (d *Chunk) Init(ctx context.Context) error {
|
||||
if d.PartSize <= 0 {
|
||||
return errors.New("part size must be positive")
|
||||
}
|
||||
if len(d.ChunkPrefix) <= 0 {
|
||||
return errors.New("chunk folder prefix must not be empty")
|
||||
}
|
||||
d.RemotePath = utils.FixAndCleanPath(d.RemotePath)
|
||||
return nil
|
||||
}
|
||||
@@ -72,13 +77,13 @@ func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
}
|
||||
|
||||
remoteActualDir, name := stdpath.Split(remoteActualPath)
|
||||
chunkName := "[openlist_chunk]" + name
|
||||
chunkName := d.ChunkPrefix + name
|
||||
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, chunkName), model.ListArgs{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var totalSize int64 = 0
|
||||
// 0号块必须存在
|
||||
// 0号块默认为-1 以支持空文件
|
||||
chunkSizes := []int64{-1}
|
||||
h := make(map[*utils.HashType]string)
|
||||
var first model.Obj
|
||||
@@ -115,21 +120,6 @@ func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
chunkSizes[idx] = o.GetSize()
|
||||
}
|
||||
}
|
||||
// 检查0号块不等于-1 以支持空文件
|
||||
// 如果块数量大于1 最后一块不可能为0
|
||||
// 只检查中间块是否有0
|
||||
for i, l := 0, len(chunkSizes)-2; ; i++ {
|
||||
if i == 0 {
|
||||
if chunkSizes[i] == -1 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
} else if chunkSizes[i] == 0 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
if i >= l {
|
||||
break
|
||||
}
|
||||
}
|
||||
reqDir, _ := stdpath.Split(path)
|
||||
objRes := chunkObject{
|
||||
Object: model.Object{
|
||||
@@ -161,67 +151,76 @@ func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
return nil, err
|
||||
}
|
||||
result := make([]model.Obj, 0, len(remoteObjs))
|
||||
listG, listCtx := errgroup.NewGroupWithContext(ctx, d.NumListWorkers, retry.Attempts(3))
|
||||
for _, obj := range remoteObjs {
|
||||
if utils.IsCanceled(listCtx) {
|
||||
break
|
||||
}
|
||||
rawName := obj.GetName()
|
||||
if obj.IsDir() {
|
||||
if name, ok := strings.CutPrefix(rawName, "[openlist_chunk]"); ok {
|
||||
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
|
||||
ReqPath: stdpath.Join(args.ReqPath, rawName),
|
||||
Refresh: args.Refresh,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalSize := int64(0)
|
||||
h := make(map[*utils.HashType]string)
|
||||
first := obj
|
||||
for _, o := range chunkObjs {
|
||||
if o.IsDir() {
|
||||
continue
|
||||
if name, ok := strings.CutPrefix(rawName, d.ChunkPrefix); ok {
|
||||
resultIdx := len(result)
|
||||
result = append(result, nil)
|
||||
listG.Go(func(ctx context.Context) error {
|
||||
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
|
||||
ReqPath: stdpath.Join(args.ReqPath, rawName),
|
||||
Refresh: args.Refresh,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
|
||||
hn, value, ok := strings.Cut(after, "_")
|
||||
if ok {
|
||||
ht, ok := utils.GetHashByName(hn)
|
||||
if ok {
|
||||
h[ht] = value
|
||||
}
|
||||
totalSize := int64(0)
|
||||
h := make(map[*utils.HashType]string)
|
||||
first := obj
|
||||
for _, o := range chunkObjs {
|
||||
if o.IsDir() {
|
||||
continue
|
||||
}
|
||||
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
|
||||
hn, value, ok := strings.Cut(after, "_")
|
||||
if ok {
|
||||
ht, ok := utils.GetHashByName(hn)
|
||||
if ok {
|
||||
h[ht] = value
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if idx == 0 {
|
||||
first = o
|
||||
}
|
||||
totalSize += o.GetSize()
|
||||
}
|
||||
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||
if err != nil {
|
||||
continue
|
||||
objRes := model.Object{
|
||||
Name: name,
|
||||
Size: totalSize,
|
||||
Modified: first.ModTime(),
|
||||
Ctime: first.CreateTime(),
|
||||
}
|
||||
if idx == 0 {
|
||||
first = o
|
||||
if len(h) > 0 {
|
||||
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||
}
|
||||
totalSize += o.GetSize()
|
||||
}
|
||||
objRes := model.Object{
|
||||
Name: name,
|
||||
Size: totalSize,
|
||||
Modified: first.ModTime(),
|
||||
Ctime: first.CreateTime(),
|
||||
}
|
||||
if len(h) > 0 {
|
||||
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||
}
|
||||
if !d.Thumbnail {
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
|
||||
thumb := fmt.Sprintf("%s/d%s?sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(thumbPath, true),
|
||||
sign.Sign(thumbPath))
|
||||
result = append(result, &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
})
|
||||
}
|
||||
if !d.Thumbnail {
|
||||
result[resultIdx] = &objRes
|
||||
} else {
|
||||
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
|
||||
thumb := fmt.Sprintf("%s/d%s?sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(thumbPath, true),
|
||||
sign.Sign(thumbPath))
|
||||
result[resultIdx] = &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -248,6 +247,9 @@ func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
})
|
||||
}
|
||||
}
|
||||
if err = listG.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -267,6 +269,21 @@ func (d *Chunk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
resultLink.SyncClosers = utils.NewSyncClosers(l)
|
||||
return &resultLink, nil
|
||||
}
|
||||
// 检查0号块不等于-1 以支持空文件
|
||||
// 如果块数量大于1 最后一块不可能为0
|
||||
// 只检查中间块是否有0
|
||||
for i, l := 0, len(chunkFile.chunkSizes)-2; ; i++ {
|
||||
if i == 0 {
|
||||
if chunkFile.chunkSizes[i] == -1 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
} else if chunkFile.chunkSizes[i] == 0 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
if i >= l {
|
||||
break
|
||||
}
|
||||
}
|
||||
fileSize := chunkFile.GetSize()
|
||||
mergedRrf := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
start := httpRange.Start
|
||||
@@ -383,7 +400,7 @@ func (d *Chunk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
|
||||
func (d *Chunk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if _, ok := srcObj.(*chunkObject); ok {
|
||||
newName = "[openlist_chunk]" + newName
|
||||
newName = d.ChunkPrefix + newName
|
||||
}
|
||||
return fs.Rename(ctx, stdpath.Join(d.RemotePath, srcObj.GetPath()), newName)
|
||||
}
|
||||
@@ -404,14 +421,14 @@ func (d *Chunk) Put(ctx context.Context, dstDir model.Obj, file model.FileStream
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.Thumbnail && dstDir.GetName() == ".thumbnails" {
|
||||
if (d.Thumbnail && dstDir.GetName() == ".thumbnails") || (d.ChunkLargeFileOnly && file.GetSize() <= d.PartSize) {
|
||||
return op.Put(ctx, remoteStorage, stdpath.Join(remoteActualPath, dstDir.GetPath()), file, up)
|
||||
}
|
||||
upReader := &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
}
|
||||
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), "[openlist_chunk]"+file.GetName())
|
||||
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), d.ChunkPrefix+file.GetName())
|
||||
if d.StoreHash {
|
||||
for ht, value := range file.GetHash().All() {
|
||||
_ = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||
@@ -472,11 +489,7 @@ func (d *Chunk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if err != nil {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
wd, ok := remoteStorage.(driver.WithDetails)
|
||||
if !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
remoteDetails, err := wd.GetDetails(ctx)
|
||||
remoteDetails, err := op.GetStorageDetails(ctx, remoteStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -6,10 +6,13 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
RemotePath string `json:"remote_path" required:"true"`
|
||||
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
|
||||
CustomExt string `json:"custom_ext" type:"string"`
|
||||
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
|
||||
RemotePath string `json:"remote_path" required:"true"`
|
||||
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
|
||||
ChunkLargeFileOnly bool `json:"chunk_large_file_only" default:"false" help:"chunk only if file size > part_size"`
|
||||
ChunkPrefix string `json:"chunk_prefix" type:"string" default:"[openlist_chunk]" help:"the prefix of chunk folder"`
|
||||
CustomExt string `json:"custom_ext" type:"string"`
|
||||
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
|
||||
NumListWorkers int `json:"num_list_workers" required:"true" type:"number" default:"5"`
|
||||
|
||||
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
|
||||
ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"`
|
||||
@@ -26,6 +29,11 @@ var config = driver.Config{
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Chunk{}
|
||||
return &Chunk{
|
||||
Addition: Addition{
|
||||
ChunkPrefix: "[openlist_chunk]",
|
||||
NumListWorkers: 5,
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -342,15 +342,14 @@ func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir mode
|
||||
func (d *CloudreveV4) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
// TODO return storage details (total space, free space, etc.)
|
||||
var r CapacityResp
|
||||
err := d.request(http.MethodGet, "/user/capacity", nil, &r)
|
||||
err := d.request(http.MethodGet, "/user/capacity", func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: r.Total,
|
||||
FreeSpace: r.Total - r.Used,
|
||||
},
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(r.Used, r.Total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ func (d *Crypt) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *Crypt) Init(ctx context.Context) error {
|
||||
//obfuscate credentials if it's updated or just created
|
||||
// obfuscate credentials if it's updated or just created
|
||||
err := d.updateObfusParm(&d.Password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obfuscate password: %w", err)
|
||||
@@ -63,7 +63,7 @@ func (d *Crypt) Init(ctx context.Context) error {
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
//need remote storage exist
|
||||
// need remote storage exist
|
||||
storage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't find remote storage: %w", err)
|
||||
@@ -109,8 +109,8 @@ func (d *Crypt) Drop(ctx context.Context) error {
|
||||
|
||||
func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
path := dir.GetPath()
|
||||
//return d.list(ctx, d.RemotePath, path)
|
||||
//remoteFull
|
||||
// return d.list(ctx, d.RemotePath, path)
|
||||
// remoteFull
|
||||
|
||||
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true, Refresh: args.Refresh})
|
||||
// the obj must implement the model.SetPath interface
|
||||
@@ -124,7 +124,7 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
if obj.IsDir() {
|
||||
name, err := d.cipher.DecryptDirName(obj.GetName())
|
||||
if err != nil {
|
||||
//filter illegal files
|
||||
// filter illegal files
|
||||
continue
|
||||
}
|
||||
if !d.ShowHidden && strings.HasPrefix(name, ".") {
|
||||
@@ -143,12 +143,12 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
size, err := d.cipher.DecryptedSize(obj.GetSize())
|
||||
if err != nil {
|
||||
//filter illegal files
|
||||
// filter illegal files
|
||||
continue
|
||||
}
|
||||
name, err := d.cipher.DecryptFileName(obj.GetName())
|
||||
if err != nil {
|
||||
//filter illegal files
|
||||
// filter illegal files
|
||||
continue
|
||||
}
|
||||
if !d.ShowHidden && strings.HasPrefix(name, ".") {
|
||||
@@ -202,7 +202,7 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
remoteObj, err = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
if errs.IsObjectNotFound(err) && secondTry {
|
||||
//try the opposite
|
||||
// try the opposite
|
||||
remoteFullPath = d.getPathForRemote(path, !firstTryIsFolder)
|
||||
remoteObj, err2 = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
|
||||
if err2 != nil {
|
||||
@@ -240,7 +240,7 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
IsFolder: remoteObj.IsDir(),
|
||||
}
|
||||
return obj, nil
|
||||
//return nil, errs.ObjectNotFound
|
||||
// return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
// https://github.com/rclone/rclone/blob/v1.67.0/backend/crypt/cipher.go#L37
|
||||
@@ -317,7 +317,8 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
}
|
||||
return readSeeker, nil
|
||||
}),
|
||||
SyncClosers: utils.NewSyncClosers(remoteLink),
|
||||
SyncClosers: utils.NewSyncClosers(remoteLink),
|
||||
RequireReference: remoteLink.RequireReference,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -366,7 +367,6 @@ func (d *Crypt) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
return op.Copy(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
|
||||
|
||||
}
|
||||
|
||||
func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
|
||||
@@ -412,11 +412,7 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
|
||||
}
|
||||
|
||||
func (d *Crypt) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
wd, ok := d.remoteStorage.(driver.WithDetails)
|
||||
if !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
remoteDetails, err := wd.GetDetails(ctx)
|
||||
remoteDetails, err := op.GetStorageDetails(ctx, d.remoteStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -486,7 +486,7 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.
|
||||
"Authorization": {storeInfo.Auth},
|
||||
"Content-Type": {"application/octet-stream"},
|
||||
"Content-Crc32": {crc32Value},
|
||||
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
|
||||
"Content-Length": {strconv.FormatInt(file.GetSize(), 10)},
|
||||
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
|
||||
}
|
||||
res, err := base.HttpClient.Do(req)
|
||||
@@ -577,7 +577,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
|
||||
if partIndex == totalParts-1 {
|
||||
size = fileSize - offset
|
||||
}
|
||||
var reader *stream.SectionReader
|
||||
var reader io.ReadSeeker
|
||||
var rateLimitedRd io.Reader
|
||||
crc32Value := ""
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
@@ -612,7 +612,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
|
||||
"Authorization": {storeInfo.Auth},
|
||||
"Content-Type": {"application/octet-stream"},
|
||||
"Content-Crc32": {crc32Value},
|
||||
"Content-Length": {fmt.Sprintf("%d", size)},
|
||||
"Content-Length": {strconv.FormatInt(size, 10)},
|
||||
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
|
||||
}
|
||||
res, err := base.HttpClient.Do(req)
|
||||
|
||||
@@ -16,9 +16,10 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "FebBox",
|
||||
NoUpload: true,
|
||||
DefaultRoot: "0",
|
||||
Name: "FebBox",
|
||||
NoUpload: true,
|
||||
DefaultRoot: "0",
|
||||
LinkCacheMode: driver.LinkCacheIP,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -31,11 +31,11 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "FTP",
|
||||
LocalSort: true,
|
||||
OnlyLinkMFile: false,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
Name: "FTP",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -167,4 +167,30 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if d.DisableDiskUsage {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
about, err := d.getAbout(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var total, used uint64
|
||||
if about.StorageQuota.Limit == nil {
|
||||
total = 0
|
||||
} else {
|
||||
total, err = strconv.ParseUint(*about.StorageQuota.Limit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
used, err = strconv.ParseUint(about.StorageQuota.Usage, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*GoogleDrive)(nil)
|
||||
|
||||
@@ -7,14 +7,15 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"string" help:"such as: folder,name,modifiedTime"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
|
||||
UseOnlineAPI bool `json:"use_online_api" default:"true"`
|
||||
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/googleui/renewapi"`
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5" help:"chunk size while uploading (unit: MB)"`
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"string" help:"such as: folder,name,modifiedTime"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
|
||||
UseOnlineAPI bool `json:"use_online_api" default:"true"`
|
||||
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/googleui/renewapi"`
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5" help:"chunk size while uploading (unit: MB)"`
|
||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -78,3 +78,12 @@ type Error struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
type AboutResp struct {
|
||||
StorageQuota struct {
|
||||
Limit *string `json:"limit"`
|
||||
Usage string `json:"usage"`
|
||||
UsageInDrive string `json:"usageInDrive"`
|
||||
UsageInDriveTrash string `json:"usageInDriveTrash"`
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,16 +28,16 @@ import (
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
type googleDriveServiceAccount struct {
|
||||
//Type string `json:"type"`
|
||||
//ProjectID string `json:"project_id"`
|
||||
//PrivateKeyID string `json:"private_key_id"`
|
||||
// Type string `json:"type"`
|
||||
// ProjectID string `json:"project_id"`
|
||||
// PrivateKeyID string `json:"private_key_id"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
ClientEMail string `json:"client_email"`
|
||||
//ClientID string `json:"client_id"`
|
||||
//AuthURI string `json:"auth_uri"`
|
||||
// ClientID string `json:"client_id"`
|
||||
// AuthURI string `json:"auth_uri"`
|
||||
TokenURI string `json:"token_uri"`
|
||||
//AuthProviderX509CertURL string `json:"auth_provider_x509_cert_url"`
|
||||
//ClientX509CertURL string `json:"client_x509_cert_url"`
|
||||
// AuthProviderX509CertURL string `json:"auth_provider_x509_cert_url"`
|
||||
// ClientX509CertURL string `json:"client_x509_cert_url"`
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) refreshToken() error {
|
||||
@@ -255,7 +255,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
|
||||
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
||||
defaultChunkSize := d.ChunkSize * 1024 * 1024
|
||||
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -315,3 +315,18 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) getAbout(ctx context.Context) (*AboutResp, error) {
|
||||
query := map[string]string{
|
||||
"fields": "storageQuota",
|
||||
}
|
||||
var resp AboutResp
|
||||
_, err := d.request("https://www.googleapis.com/drive/v3/about", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(query)
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
111
drivers/halalcloud_open/common.go
Normal file
111
drivers/halalcloud_open/common.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
|
||||
)
|
||||
|
||||
var (
|
||||
slicePostErrorRetryInterval = time.Second * 120
|
||||
retryTimes = 5
|
||||
)
|
||||
|
||||
type halalCommon struct {
|
||||
// *AuthService // 登录信息
|
||||
UserInfo *sdkUser.User // 用户信息
|
||||
refreshTokenFunc func(token string) error
|
||||
// serv *AuthService
|
||||
configs sync.Map
|
||||
}
|
||||
|
||||
func (m *halalCommon) GetAccessToken() (string, error) {
|
||||
value, exists := m.configs.Load("access_token")
|
||||
if !exists {
|
||||
return "", nil // 如果不存在,返回空字符串
|
||||
}
|
||||
return value.(string), nil // 返回配置项的值
|
||||
}
|
||||
|
||||
// GetRefreshToken implements ConfigStore.
|
||||
func (m *halalCommon) GetRefreshToken() (string, error) {
|
||||
value, exists := m.configs.Load("refresh_token")
|
||||
if !exists {
|
||||
return "", nil // 如果不存在,返回空字符串
|
||||
}
|
||||
return value.(string), nil // 返回配置项的值
|
||||
}
|
||||
|
||||
// SetAccessToken implements ConfigStore.
|
||||
func (m *halalCommon) SetAccessToken(token string) error {
|
||||
m.configs.Store("access_token", token)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRefreshToken implements ConfigStore.
|
||||
func (m *halalCommon) SetRefreshToken(token string) error {
|
||||
m.configs.Store("refresh_token", token)
|
||||
if m.refreshTokenFunc != nil {
|
||||
return m.refreshTokenFunc(token)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetToken implements ConfigStore.
|
||||
func (m *halalCommon) SetToken(accessToken string, refreshToken string, expiresIn int64) error {
|
||||
m.configs.Store("access_token", accessToken)
|
||||
m.configs.Store("refresh_token", refreshToken)
|
||||
m.configs.Store("expires_in", expiresIn)
|
||||
if m.refreshTokenFunc != nil {
|
||||
return m.refreshTokenFunc(refreshToken)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearConfigs implements ConfigStore.
|
||||
func (m *halalCommon) ClearConfigs() error {
|
||||
m.configs = sync.Map{} // 清空map
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteConfig implements ConfigStore.
|
||||
func (m *halalCommon) DeleteConfig(key string) error {
|
||||
_, exists := m.configs.Load(key)
|
||||
if !exists {
|
||||
return nil // 如果不存在,直接返回
|
||||
}
|
||||
m.configs.Delete(key) // 删除指定的配置项
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConfig implements ConfigStore.
|
||||
func (m *halalCommon) GetConfig(key string) (string, error) {
|
||||
value, exists := m.configs.Load(key)
|
||||
if !exists {
|
||||
return "", nil // 如果不存在,返回空字符串
|
||||
}
|
||||
return value.(string), nil // 返回配置项的值
|
||||
}
|
||||
|
||||
// ListConfigs implements ConfigStore.
|
||||
func (m *halalCommon) ListConfigs() (map[string]string, error) {
|
||||
configs := make(map[string]string)
|
||||
m.configs.Range(func(key, value interface{}) bool {
|
||||
configs[key.(string)] = value.(string) // 将每个配置项添加到map中
|
||||
return true // 继续遍历
|
||||
})
|
||||
return configs, nil // 返回所有配置项
|
||||
}
|
||||
|
||||
// SetConfig implements ConfigStore.
|
||||
func (m *halalCommon) SetConfig(key string, value string) error {
|
||||
m.configs.Store(key, value) // 使用Store方法设置或更新配置项
|
||||
return nil // 成功设置配置项后返回nil
|
||||
}
|
||||
|
||||
func NewHalalCommon() *halalCommon {
|
||||
return &halalCommon{
|
||||
configs: sync.Map{},
|
||||
}
|
||||
}
|
||||
29
drivers/halalcloud_open/driver.go
Normal file
29
drivers/halalcloud_open/driver.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
sdkClient "github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
|
||||
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
)
|
||||
|
||||
type HalalCloudOpen struct {
|
||||
*halalCommon
|
||||
model.Storage
|
||||
Addition
|
||||
sdkClient *sdkClient.Client
|
||||
sdkUserFileService *sdkUserFile.UserFileService
|
||||
sdkUserService *sdkUser.UserService
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*HalalCloudOpen)(nil)
|
||||
131
drivers/halalcloud_open/driver_curd_impl.go
Normal file
131
drivers/halalcloud_open/driver_curd_impl.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
sdkModel "github.com/halalcloud/golang-sdk-lite/halalcloud/model"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) getFiles(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
|
||||
|
||||
files := make([]model.Obj, 0)
|
||||
limit := int64(100)
|
||||
token := ""
|
||||
|
||||
for {
|
||||
result, err := d.sdkUserFileService.List(ctx, &sdkUserFile.FileListRequest{
|
||||
Parent: &sdkUserFile.File{Path: dir.GetPath()},
|
||||
ListInfo: &sdkModel.ScanListRequest{
|
||||
Limit: strconv.FormatInt(limit, 10),
|
||||
Token: token,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; len(result.Files) > i; i++ {
|
||||
files = append(files, NewObjFile(result.Files[i]))
|
||||
}
|
||||
|
||||
if result.ListInfo == nil || result.ListInfo.Token == "" {
|
||||
break
|
||||
}
|
||||
token = result.ListInfo.Token
|
||||
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) makeDir(ctx context.Context, dir model.Obj, name string) (model.Obj, error) {
|
||||
_, err := d.sdkUserFileService.Create(ctx, &sdkUserFile.File{
|
||||
Path: dir.GetPath(),
|
||||
Name: name,
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) move(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
|
||||
oldDir := obj.GetPath()
|
||||
newDir := dir.GetPath()
|
||||
_, err := d.sdkUserFileService.Move(ctx, &sdkUserFile.BatchOperationRequest{
|
||||
Source: []*sdkUserFile.File{
|
||||
{
|
||||
Path: oldDir,
|
||||
},
|
||||
},
|
||||
Dest: &sdkUserFile.File{
|
||||
Path: newDir,
|
||||
},
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) rename(ctx context.Context, obj model.Obj, name string) (model.Obj, error) {
|
||||
|
||||
_, err := d.sdkUserFileService.Rename(ctx, &sdkUserFile.File{
|
||||
Path: obj.GetPath(),
|
||||
Name: name,
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) copy(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
|
||||
id := obj.GetID()
|
||||
sourcePath := obj.GetPath()
|
||||
if len(id) > 0 {
|
||||
sourcePath = ""
|
||||
}
|
||||
|
||||
destID := dir.GetID()
|
||||
destPath := dir.GetPath()
|
||||
if len(destID) > 0 {
|
||||
destPath = ""
|
||||
}
|
||||
dest := &sdkUserFile.File{
|
||||
Path: destPath,
|
||||
Identity: destID,
|
||||
}
|
||||
_, err := d.sdkUserFileService.Copy(ctx, &sdkUserFile.BatchOperationRequest{
|
||||
Source: []*sdkUserFile.File{
|
||||
{
|
||||
Path: sourcePath,
|
||||
Identity: id,
|
||||
},
|
||||
},
|
||||
Dest: dest,
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) remove(ctx context.Context, obj model.Obj) error {
|
||||
id := obj.GetID()
|
||||
_, err := d.sdkUserFileService.Delete(ctx, &sdkUserFile.BatchOperationRequest{
|
||||
Source: []*sdkUserFile.File{
|
||||
{
|
||||
Identity: id,
|
||||
Path: obj.GetPath(),
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) details(ctx context.Context) (*model.StorageDetails, error) {
|
||||
ret, err := d.sdkUserService.GetStatisticsAndQuota(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := uint64(ret.DiskStatisticsQuota.BytesQuota)
|
||||
|
||||
free := uint64(ret.DiskStatisticsQuota.BytesFree)
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
108
drivers/halalcloud_open/driver_get_link.go
Normal file
108
drivers/halalcloud_open/driver_get_link.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) getLink(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if args.Redirect {
|
||||
// return nil, model.ErrUnsupported
|
||||
fid := file.GetID()
|
||||
fpath := file.GetPath()
|
||||
if fid != "" {
|
||||
fpath = ""
|
||||
}
|
||||
fi, err := d.sdkUserFileService.GetDirectDownloadAddress(ctx, &sdkUserFile.DirectDownloadRequest{
|
||||
Identity: fid,
|
||||
Path: fpath,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expireAt := fi.ExpireAt
|
||||
duration := time.Until(time.UnixMilli(expireAt))
|
||||
return &model.Link{
|
||||
URL: fi.DownloadAddress,
|
||||
Expiration: &duration,
|
||||
}, nil
|
||||
}
|
||||
result, err := d.sdkUserFileService.ParseFileSlice(ctx, &sdkUserFile.File{
|
||||
Identity: file.GetID(),
|
||||
Path: file.GetPath(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileAddrs := []*sdkUserFile.SliceDownloadInfo{}
|
||||
var addressDuration int64
|
||||
|
||||
nodesNumber := len(result.RawNodes)
|
||||
nodesIndex := nodesNumber - 1
|
||||
startIndex, endIndex := 0, nodesIndex
|
||||
for nodesIndex >= 0 {
|
||||
if nodesIndex >= 200 {
|
||||
endIndex = 200
|
||||
} else {
|
||||
endIndex = nodesNumber
|
||||
}
|
||||
for ; endIndex <= nodesNumber; endIndex += 200 {
|
||||
if endIndex == 0 {
|
||||
endIndex = 1
|
||||
}
|
||||
sliceAddress, err := d.sdkUserFileService.GetSliceDownloadAddress(ctx, &sdkUserFile.SliceDownloadAddressRequest{
|
||||
Identity: result.RawNodes[startIndex:endIndex],
|
||||
Version: 1,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addressDuration, _ = strconv.ParseInt(sliceAddress.ExpireAt, 10, 64)
|
||||
fileAddrs = append(fileAddrs, sliceAddress.Addresses...)
|
||||
startIndex = endIndex
|
||||
nodesIndex -= 200
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
size, _ := strconv.ParseInt(result.FileSize, 10, 64)
|
||||
chunks := getChunkSizes(result.Sizes)
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length >= size {
|
||||
length = size - httpRange.Start
|
||||
}
|
||||
oo := &openObject{
|
||||
ctx: ctx,
|
||||
d: fileAddrs,
|
||||
chunk: []byte{},
|
||||
chunks: chunks,
|
||||
skip: httpRange.Start,
|
||||
sha: result.Sha1,
|
||||
shaTemp: sha1.New(),
|
||||
}
|
||||
|
||||
return readers.NewLimitedReadCloser(oo, length), nil
|
||||
}
|
||||
|
||||
var duration time.Duration
|
||||
if addressDuration != 0 {
|
||||
duration = time.Until(time.UnixMilli(addressDuration))
|
||||
} else {
|
||||
duration = time.Until(time.Now().Add(time.Hour))
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
|
||||
Expiration: &duration,
|
||||
}, nil
|
||||
}
|
||||
50
drivers/halalcloud_open/driver_init.go
Normal file
50
drivers/halalcloud_open/driver_init.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
|
||||
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) Init(ctx context.Context) error {
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, 3
|
||||
}
|
||||
if d.halalCommon == nil {
|
||||
d.halalCommon = &halalCommon{
|
||||
UserInfo: &sdkUser.User{},
|
||||
refreshTokenFunc: func(token string) error {
|
||||
d.Addition.RefreshToken = token
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
if d.Addition.RefreshToken != "" {
|
||||
d.halalCommon.SetRefreshToken(d.Addition.RefreshToken)
|
||||
}
|
||||
timeout := d.Addition.TimeOut
|
||||
if timeout <= 0 {
|
||||
timeout = 60
|
||||
}
|
||||
host := d.Addition.Host
|
||||
if host == "" {
|
||||
host = "openapi.2dland.cn"
|
||||
}
|
||||
|
||||
client := apiclient.NewClient(nil, host, d.Addition.ClientID, d.Addition.ClientSecret, d.halalCommon, apiclient.WithTimeout(time.Second*time.Duration(timeout)))
|
||||
d.sdkClient = client
|
||||
d.sdkUserFileService = sdkUserFile.NewUserFileService(client)
|
||||
d.sdkUserService = sdkUser.NewUserService(client)
|
||||
userInfo, err := d.sdkUserService.Get(ctx, &sdkUser.User{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.halalCommon.UserInfo = userInfo
|
||||
// 能够获取到用户信息,已经检查了 RefreshToken 的有效性,无需再次检查
|
||||
return nil
|
||||
}
|
||||
48
drivers/halalcloud_open/driver_interface.go
Normal file
48
drivers/halalcloud_open/driver_interface.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
return d.getFiles(ctx, dir)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
return d.getLink(ctx, file, args)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
return d.makeDir(ctx, parentDir, dirName)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return d.move(ctx, srcObj, dstDir)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
return d.rename(ctx, srcObj, newName)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return d.copy(ctx, srcObj, dstDir)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return d.remove(ctx, obj)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
return d.put(ctx, dstDir, stream, up)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
return d.details(ctx)
|
||||
}
|
||||
258
drivers/halalcloud_open/halalcloud_upload.go
Normal file
258
drivers/halalcloud_open/halalcloud_upload.go
Normal file
@@ -0,0 +1,258 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) put(ctx context.Context, dstDir model.Obj, fileStream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
|
||||
newPath := path.Join(dstDir.GetPath(), fileStream.GetName())
|
||||
|
||||
uploadTask, err := d.sdkUserFileService.CreateUploadTask(ctx, &sdkUserFile.File{
|
||||
Path: newPath,
|
||||
Size: fileStream.GetSize(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uploadTask.Created {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
slicesList := make([]string, 0)
|
||||
codec := uint64(0x55)
|
||||
if uploadTask.BlockCodec > 0 {
|
||||
codec = uint64(uploadTask.BlockCodec)
|
||||
}
|
||||
blockHashType := uploadTask.BlockHashType
|
||||
mhType := uint64(0x12)
|
||||
if blockHashType > 0 {
|
||||
mhType = uint64(blockHashType)
|
||||
}
|
||||
prefix := cid.Prefix{
|
||||
Codec: codec,
|
||||
MhLength: -1,
|
||||
MhType: mhType,
|
||||
Version: 1,
|
||||
}
|
||||
blockSize := uploadTask.BlockSize
|
||||
useSingleUpload := true
|
||||
//
|
||||
if fileStream.GetSize() <= int64(blockSize) || d.uploadThread <= 1 {
|
||||
useSingleUpload = true
|
||||
}
|
||||
// Not sure whether FileStream supports concurrent read and write operations, so currently using single-threaded upload to ensure safety.
|
||||
// read file
|
||||
if useSingleUpload {
|
||||
bufferSize := int(blockSize)
|
||||
buffer := make([]byte, bufferSize)
|
||||
reader := driver.NewLimitedUploadStream(ctx, fileStream)
|
||||
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
|
||||
// fileStream.Seek(0, os.SEEK_SET)
|
||||
for {
|
||||
n, err := teeReader.Read(buffer)
|
||||
if n > 0 {
|
||||
data := buffer[:n]
|
||||
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
slicesList = append(slicesList, uploadCid.String())
|
||||
}
|
||||
if err == io.EOF || n == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// TODO: implement multipart upload, currently using single-threaded upload to ensure safety.
|
||||
bufferSize := int(blockSize)
|
||||
buffer := make([]byte, bufferSize)
|
||||
reader := driver.NewLimitedUploadStream(ctx, fileStream)
|
||||
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
|
||||
for {
|
||||
n, err := teeReader.Read(buffer)
|
||||
if n > 0 {
|
||||
data := buffer[:n]
|
||||
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
slicesList = append(slicesList, uploadCid.String())
|
||||
}
|
||||
if err == io.EOF || n == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
newFile, err := makeFile(ctx, slicesList, uploadTask.Task, uploadTask.UploadAddress, retryTimes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewObjFile(newFile), nil
|
||||
|
||||
}
|
||||
|
||||
func makeFile(ctx context.Context, fileSlice []string, taskID string, uploadAddress string, retry int) (*sdkUserFile.File, error) {
|
||||
var lastError error = nil
|
||||
for range retry {
|
||||
newFile, err := doMakeFile(fileSlice, taskID, uploadAddress)
|
||||
if err == nil {
|
||||
return newFile, nil
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return nil, err
|
||||
}
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return nil, err
|
||||
}
|
||||
lastError = err
|
||||
time.Sleep(slicePostErrorRetryInterval)
|
||||
}
|
||||
return nil, fmt.Errorf("mk file slice failed after %d times, error: %s", retry, lastError.Error())
|
||||
}
|
||||
|
||||
func doMakeFile(fileSlice []string, taskID string, uploadAddress string) (*sdkUserFile.File, error) {
|
||||
accessUrl := uploadAddress + "/" + taskID
|
||||
getTimeOut := time.Minute * 2
|
||||
u, err := url.Parse(accessUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n, _ := json.Marshal(fileSlice)
|
||||
httpRequest := http.Request{
|
||||
Method: http.MethodPost,
|
||||
URL: u,
|
||||
Header: map[string][]string{
|
||||
"Accept": {"application/json"},
|
||||
"Content-Type": {"application/json"},
|
||||
//"Content-Length": {strconv.Itoa(len(n))},
|
||||
},
|
||||
Body: io.NopCloser(bytes.NewReader(n)),
|
||||
}
|
||||
httpClient := http.Client{
|
||||
Timeout: getTimeOut,
|
||||
}
|
||||
httpResponse, err := httpClient.Do(&httpRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer httpResponse.Body.Close()
|
||||
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
|
||||
b, _ := io.ReadAll(httpResponse.Body)
|
||||
message := string(b)
|
||||
return nil, fmt.Errorf("mk file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
|
||||
}
|
||||
b, _ := io.ReadAll(httpResponse.Body)
|
||||
var result *sdkUserFile.File
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
func postFileSlice(ctx context.Context, fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix, retry int) (cid.Cid, error) {
|
||||
var lastError error = nil
|
||||
for range retry {
|
||||
newCid, err := doPostFileSlice(fileSlice, taskID, uploadAddress, preix)
|
||||
if err == nil {
|
||||
return newCid, nil
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
time.Sleep(slicePostErrorRetryInterval)
|
||||
lastError = err
|
||||
}
|
||||
return cid.Undef, fmt.Errorf("upload file slice failed after %d times, error: %s", retry, lastError.Error())
|
||||
}
|
||||
func doPostFileSlice(fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix) (cid.Cid, error) {
|
||||
// 1. sum file slice
|
||||
newCid, err := preix.Sum(fileSlice)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
// 2. post file slice
|
||||
sliceCidString := newCid.String()
|
||||
// /{taskID}/{sliceID}
|
||||
accessUrl := uploadAddress + "/" + taskID + "/" + sliceCidString
|
||||
getTimeOut := time.Second * 30
|
||||
// get {accessUrl} in {getTimeOut}
|
||||
u, err := url.Parse(accessUrl)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
// header: accept: application/json
|
||||
// header: content-type: application/octet-stream
|
||||
// header: content-length: {fileSlice.length}
|
||||
// header: x-content-cid: {sliceCidString}
|
||||
// header: x-task-id: {taskID}
|
||||
httpRequest := http.Request{
|
||||
Method: http.MethodGet,
|
||||
URL: u,
|
||||
Header: map[string][]string{
|
||||
"Accept": {"application/json"},
|
||||
},
|
||||
}
|
||||
httpClient := http.Client{
|
||||
Timeout: getTimeOut,
|
||||
}
|
||||
httpResponse, err := httpClient.Do(&httpRequest)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
if httpResponse.StatusCode != http.StatusOK {
|
||||
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d", httpResponse.StatusCode)
|
||||
}
|
||||
var result bool
|
||||
b, err := io.ReadAll(httpResponse.Body)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
if result {
|
||||
return newCid, nil
|
||||
}
|
||||
|
||||
httpRequest = http.Request{
|
||||
Method: http.MethodPost,
|
||||
URL: u,
|
||||
Header: map[string][]string{
|
||||
"Accept": {"application/json"},
|
||||
"Content-Type": {"application/octet-stream"},
|
||||
// "Content-Length": {strconv.Itoa(len(fileSlice))},
|
||||
},
|
||||
Body: io.NopCloser(bytes.NewReader(fileSlice)),
|
||||
}
|
||||
httpResponse, err = httpClient.Do(&httpRequest)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
defer httpResponse.Body.Close()
|
||||
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
|
||||
b, _ := io.ReadAll(httpResponse.Body)
|
||||
message := string(b)
|
||||
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
|
||||
}
|
||||
//
|
||||
|
||||
return newCid, nil
|
||||
}
|
||||
32
drivers/halalcloud_open/meta.go
Normal file
32
drivers/halalcloud_open/meta.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
driver.RootPath
|
||||
// define other
|
||||
RefreshToken string `json:"refresh_token" required:"false" help:"If using a personal API approach, the RefreshToken is not required."`
|
||||
UploadThread int `json:"upload_thread" type:"number" default:"3" help:"1 <= thread <= 32"`
|
||||
|
||||
ClientID string `json:"client_id" required:"true" default:""`
|
||||
ClientSecret string `json:"client_secret" required:"true" default:""`
|
||||
Host string `json:"host" required:"false" default:"openapi.2dland.cn"`
|
||||
TimeOut int `json:"timeout" type:"number" default:"60" help:"timeout in seconds"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "HalalCloudOpen",
|
||||
OnlyProxy: false,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &HalalCloudOpen{}
|
||||
})
|
||||
}
|
||||
60
drivers/halalcloud_open/obj_file.go
Normal file
60
drivers/halalcloud_open/obj_file.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
)
|
||||
|
||||
type ObjFile struct {
|
||||
sdkFile *sdkUserFile.File
|
||||
fileSize int64
|
||||
modTime time.Time
|
||||
createTime time.Time
|
||||
}
|
||||
|
||||
func NewObjFile(f *sdkUserFile.File) model.Obj {
|
||||
ofile := &ObjFile{sdkFile: f}
|
||||
ofile.fileSize = f.Size
|
||||
modTimeTs := f.UpdateTs
|
||||
ofile.modTime = time.UnixMilli(modTimeTs)
|
||||
createTimeTs := f.CreateTs
|
||||
ofile.createTime = time.UnixMilli(createTimeTs)
|
||||
return ofile
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetSize() int64 {
|
||||
return f.fileSize
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetName() string {
|
||||
return f.sdkFile.Name
|
||||
}
|
||||
|
||||
func (f *ObjFile) ModTime() time.Time {
|
||||
return f.modTime
|
||||
}
|
||||
|
||||
func (f *ObjFile) IsDir() bool {
|
||||
return f.sdkFile.Dir
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{
|
||||
// TODO: support more hash types
|
||||
}
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetID() string {
|
||||
return f.sdkFile.Identity
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetPath() string {
|
||||
return f.sdkFile.Path
|
||||
}
|
||||
|
||||
func (f *ObjFile) CreateTime() time.Time {
|
||||
return f.createTime
|
||||
}
|
||||
185
drivers/halalcloud_open/utils.go
Normal file
185
drivers/halalcloud_open/utils.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
// get the next chunk
|
||||
func (oo *openObject) getChunk(_ context.Context) (err error) {
|
||||
if oo.id >= len(oo.chunks) {
|
||||
return io.EOF
|
||||
}
|
||||
var chunk []byte
|
||||
err = utils.Retry(3, time.Second, func() (err error) {
|
||||
chunk, err = getRawFiles(oo.d[oo.id])
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oo.id++
|
||||
oo.chunk = chunk
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p.
|
||||
func (oo *openObject) Read(p []byte) (n int, err error) {
|
||||
oo.mu.Lock()
|
||||
defer oo.mu.Unlock()
|
||||
if oo.closed {
|
||||
return 0, fmt.Errorf("read on closed file")
|
||||
}
|
||||
// Skip data at the start if requested
|
||||
for oo.skip > 0 {
|
||||
//size := 1024 * 1024
|
||||
_, size, err := oo.ChunkLocation(oo.id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if oo.skip < int64(size) {
|
||||
break
|
||||
}
|
||||
oo.id++
|
||||
oo.skip -= int64(size)
|
||||
}
|
||||
if len(oo.chunk) == 0 {
|
||||
err = oo.getChunk(oo.ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if oo.skip > 0 {
|
||||
oo.chunk = (oo.chunk)[oo.skip:]
|
||||
oo.skip = 0
|
||||
}
|
||||
}
|
||||
n = copy(p, oo.chunk)
|
||||
oo.shaTemp.Write(p[:n])
|
||||
oo.chunk = (oo.chunk)[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Close closed the file - MAC errors are reported here
|
||||
func (oo *openObject) Close() (err error) {
|
||||
oo.mu.Lock()
|
||||
defer oo.mu.Unlock()
|
||||
if oo.closed {
|
||||
return nil
|
||||
}
|
||||
// 校验Sha1
|
||||
if string(oo.shaTemp.Sum(nil)) != oo.sha {
|
||||
return fmt.Errorf("failed to finish download: SHA mismatch")
|
||||
}
|
||||
|
||||
oo.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetMD5Hash(text string) string {
|
||||
tHash := md5.Sum([]byte(text))
|
||||
return hex.EncodeToString(tHash[:])
|
||||
}
|
||||
|
||||
type chunkSize struct {
|
||||
position int64
|
||||
size int
|
||||
}
|
||||
|
||||
type openObject struct {
|
||||
ctx context.Context
|
||||
mu sync.Mutex
|
||||
d []*sdkUserFile.SliceDownloadInfo
|
||||
id int
|
||||
skip int64
|
||||
chunk []byte
|
||||
chunks []chunkSize
|
||||
closed bool
|
||||
sha string
|
||||
shaTemp hash.Hash
|
||||
}
|
||||
|
||||
func getChunkSizes(sliceSize []*sdkUserFile.SliceSize) (chunks []chunkSize) {
|
||||
chunks = make([]chunkSize, 0)
|
||||
for _, s := range sliceSize {
|
||||
// 对最后一个做特殊处理
|
||||
endIndex := s.EndIndex
|
||||
startIndex := s.StartIndex
|
||||
if endIndex == 0 {
|
||||
endIndex = startIndex
|
||||
}
|
||||
for j := startIndex; j <= endIndex; j++ {
|
||||
size := s.Size
|
||||
chunks = append(chunks, chunkSize{position: j, size: int(size)})
|
||||
}
|
||||
}
|
||||
return chunks
|
||||
}
|
||||
|
||||
func (oo *openObject) ChunkLocation(id int) (position int64, size int, err error) {
|
||||
if id < 0 || id >= len(oo.chunks) {
|
||||
return 0, 0, errors.New("invalid arguments")
|
||||
}
|
||||
|
||||
return (oo.chunks)[id].position, (oo.chunks)[id].size, nil
|
||||
}
|
||||
|
||||
func getRawFiles(addr *sdkUserFile.SliceDownloadInfo) ([]byte, error) {
|
||||
|
||||
if addr == nil {
|
||||
return nil, errors.New("addr is nil")
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: time.Duration(60 * time.Second), // Set timeout to 60 seconds
|
||||
}
|
||||
resp, err := client.Get(addr.DownloadAddress)
|
||||
if err != nil {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("bad status: %s, body: %s", resp.Status, body)
|
||||
}
|
||||
|
||||
if addr.Encrypt > 0 {
|
||||
cd := uint8(addr.Encrypt)
|
||||
for idx := 0; idx < len(body); idx++ {
|
||||
body[idx] = body[idx] ^ cd
|
||||
}
|
||||
}
|
||||
storeType := addr.StoreType
|
||||
if storeType != 10 {
|
||||
|
||||
sourceCid, err := cid.Decode(addr.Identity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
checkCid, err := sourceCid.Prefix().Sum(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !checkCid.Equals(sourceCid) {
|
||||
return nil, fmt.Errorf("bad cid: %s, body: %s", checkCid.String(), body)
|
||||
}
|
||||
}
|
||||
|
||||
return body, nil
|
||||
|
||||
}
|
||||
@@ -97,13 +97,13 @@ func (d *ILanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
}
|
||||
obj := model.Object{
|
||||
ID: strconv.FormatInt(f.FileId, 10),
|
||||
//Path: "",
|
||||
// Path: "",
|
||||
Name: f.FileName,
|
||||
Size: f.FileSize * 1024,
|
||||
Modified: updTime,
|
||||
Ctime: updTime,
|
||||
IsFolder: false,
|
||||
//HashInfo: utils.HashInfo{},
|
||||
// HashInfo: utils.HashInfo{},
|
||||
}
|
||||
if f.FileType == 2 {
|
||||
obj.IsFolder = true
|
||||
@@ -185,13 +185,13 @@ func (d *ILanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri
|
||||
}
|
||||
return &model.Object{
|
||||
ID: utils.Json.Get(res, "list", 0, "id").ToString(),
|
||||
//Path: "",
|
||||
// Path: "",
|
||||
Name: dirName,
|
||||
Size: 0,
|
||||
Modified: time.Now(),
|
||||
Ctime: time.Now(),
|
||||
IsFolder: true,
|
||||
//HashInfo: utils.HashInfo{},
|
||||
// HashInfo: utils.HashInfo{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -239,7 +239,7 @@ func (d *ILanZou) Rename(ctx context.Context, srcObj model.Obj, newName string)
|
||||
}
|
||||
return &model.Object{
|
||||
ID: srcObj.GetID(),
|
||||
//Path: "",
|
||||
// Path: "",
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: time.Now(),
|
||||
@@ -392,7 +392,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
|
||||
}
|
||||
return &model.Object{
|
||||
ID: strconv.FormatInt(file.FileId, 10),
|
||||
//Path: ,
|
||||
// Path: ,
|
||||
Name: file.FileName,
|
||||
Size: s.GetSize(),
|
||||
Modified: s.ModTime(),
|
||||
@@ -402,6 +402,22 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ILanZou) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
res, err := d.proved("/user/account/map", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalSize := utils.Json.Get(res, "map", "totalSize").ToUint64() * 1024
|
||||
rewardSize := utils.Json.Get(res, "map", "rewardSize").ToUint64() * 1024
|
||||
total := totalSize + rewardSize
|
||||
used := utils.Json.Get(res, "map", "usedSize").ToUint64() * 1024
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func (d *ILanZou) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package lanzou
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
@@ -9,8 +10,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const DAY time.Duration = 84600000000000
|
||||
@@ -122,20 +121,26 @@ var findAcwScV2Reg = regexp.MustCompile(`arg1='([0-9A-Z]+)'`)
|
||||
|
||||
// 在页面被过多访问或其他情况下,有时候会先返回一个加密的页面,其执行计算出一个acw_sc__v2后放入页面后再重新访问页面才能获得正常页面
|
||||
// 若该页面进行了js加密,则进行解密,计算acw_sc__v2,并加入cookie
|
||||
func CalcAcwScV2(html string) (string, error) {
|
||||
log.Debugln("acw_sc__v2", html)
|
||||
acwScV2s := findAcwScV2Reg.FindStringSubmatch(html)
|
||||
if len(acwScV2s) != 2 {
|
||||
return "", fmt.Errorf("无法匹配acw_sc__v2")
|
||||
func CalcAcwScV2(htmlContent string) (string, error) {
|
||||
matches := findAcwScV2Reg.FindStringSubmatch(htmlContent)
|
||||
if len(matches) != 2 {
|
||||
return "", errors.New("无法匹配到 arg1 参数")
|
||||
}
|
||||
return HexXor(Unbox(acwScV2s[1]), "3000176000856006061501533003690027800375"), nil
|
||||
arg1 := matches[1]
|
||||
|
||||
mask := "3000176000856006061501533003690027800375"
|
||||
result, err := hexXor(unbox(arg1), mask)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("hexXor 操作失败: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func Unbox(hex string) string {
|
||||
func unbox(hex string) string {
|
||||
var box = []int{6, 28, 34, 31, 33, 18, 30, 23, 9, 8, 19, 38, 17, 24, 0, 5, 32, 21, 10, 22, 25, 14, 15, 3, 16, 27, 13, 35, 2, 29, 11, 26, 4, 36, 1, 39, 37, 7, 20, 12}
|
||||
var newBox = make([]byte, len(hex))
|
||||
for i := 0; i < len(box); i++ {
|
||||
j := box[i]
|
||||
for i, j := range box {
|
||||
if len(newBox) > j {
|
||||
newBox[j] = hex[i]
|
||||
}
|
||||
@@ -143,14 +148,21 @@ func Unbox(hex string) string {
|
||||
return string(newBox)
|
||||
}
|
||||
|
||||
func HexXor(hex1, hex2 string) string {
|
||||
out := bytes.NewBuffer(make([]byte, len(hex1)))
|
||||
for i := 0; i < len(hex1) && i < len(hex2); i += 2 {
|
||||
v1, _ := strconv.ParseInt(hex1[i:i+2], 16, 64)
|
||||
v2, _ := strconv.ParseInt(hex2[i:i+2], 16, 64)
|
||||
out.WriteString(strconv.FormatInt(v1^v2, 16))
|
||||
func hexXor(hex1, hex2 string) (string, error) {
|
||||
bytes1, err := hex.DecodeString(hex1)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("解码 hex1 失败: %w", err)
|
||||
}
|
||||
return out.String()
|
||||
bytes2, err := hex.DecodeString(hex2)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("解码 hex2 失败: %w", err)
|
||||
}
|
||||
minLength := min(len(bytes2), len(bytes1))
|
||||
resultBytes := make([]byte, minLength)
|
||||
for i := range minLength {
|
||||
resultBytes[i] = bytes1[i] ^ bytes2[i]
|
||||
}
|
||||
return hex.EncodeToString(resultBytes), nil
|
||||
}
|
||||
|
||||
var findDataReg = regexp.MustCompile(`data[:\s]+({[^}]+})`) // 查找json
|
||||
|
||||
@@ -3,6 +3,7 @@ package lanzou
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"runtime"
|
||||
@@ -94,36 +95,66 @@ func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{},
|
||||
}
|
||||
}
|
||||
|
||||
// 修复点:所有请求都自动处理 acw_sc__v2 验证和 down_ip=1
|
||||
func (d *LanZou) request(url string, method string, callback base.ReqCallback, up bool) ([]byte, error) {
|
||||
var req *resty.Request
|
||||
if up {
|
||||
once.Do(func() {
|
||||
upClient = base.NewRestyClient().SetTimeout(120 * time.Second)
|
||||
var vs string
|
||||
for retry := 0; retry < 3; retry++ {
|
||||
if up {
|
||||
once.Do(func() {
|
||||
upClient = base.NewRestyClient().SetTimeout(120 * time.Second)
|
||||
})
|
||||
req = upClient.R()
|
||||
} else {
|
||||
req = base.RestyClient.R()
|
||||
}
|
||||
|
||||
req.SetHeaders(map[string]string{
|
||||
"Referer": "https://pc.woozooo.com",
|
||||
"User-Agent": d.UserAgent,
|
||||
})
|
||||
req = upClient.R()
|
||||
} else {
|
||||
req = base.RestyClient.R()
|
||||
}
|
||||
|
||||
req.SetHeaders(map[string]string{
|
||||
"Referer": "https://pc.woozooo.com",
|
||||
"User-Agent": d.UserAgent,
|
||||
})
|
||||
// 下载直链时需要加 down_ip=1
|
||||
if strings.Contains(url, "/file/") {
|
||||
cookie := d.Cookie
|
||||
if cookie != "" {
|
||||
cookie += "; "
|
||||
}
|
||||
cookie += "down_ip=1"
|
||||
if vs != "" {
|
||||
cookie += "; acw_sc__v2=" + vs
|
||||
}
|
||||
req.SetHeader("cookie", cookie)
|
||||
} else if d.Cookie != "" {
|
||||
cookie := d.Cookie
|
||||
if vs != "" {
|
||||
cookie += "; acw_sc__v2=" + vs
|
||||
}
|
||||
req.SetHeader("cookie", cookie)
|
||||
} else if vs != "" {
|
||||
req.SetHeader("cookie", "acw_sc__v2="+vs)
|
||||
}
|
||||
|
||||
if d.Cookie != "" {
|
||||
req.SetHeader("cookie", d.Cookie)
|
||||
}
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
res, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bodyStr := res.String()
|
||||
log.Debugf("lanzou request: url=>%s ,stats=>%d ,body => %s\n", res.Request.URL, res.StatusCode(), bodyStr)
|
||||
if strings.Contains(bodyStr, "acw_sc__v2") {
|
||||
vs, err = CalcAcwScV2(bodyStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return res.Body(), err
|
||||
}
|
||||
|
||||
res, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("lanzou request: url=>%s ,stats=>%d ,body => %s\n", res.Request.URL, res.StatusCode(), res.String())
|
||||
return res.Body(), err
|
||||
return nil, errors.New("acw_sc__v2 validation error")
|
||||
}
|
||||
|
||||
func (d *LanZou) Login() ([]*http.Cookie, error) {
|
||||
@@ -430,27 +461,91 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
||||
file.Time = timeFindReg.FindString(sharePageData)
|
||||
|
||||
// 重定向获取真实链接
|
||||
res, err := base.NoRedirectClient.R().SetHeaders(map[string]string{
|
||||
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
|
||||
}).Get(downloadUrl)
|
||||
var (
|
||||
res *resty.Response
|
||||
err error
|
||||
)
|
||||
var vs string
|
||||
var bodyStr string
|
||||
for i := 0; i < 3; i++ {
|
||||
res, err = base.NoRedirectClient.R().SetHeaders(map[string]string{
|
||||
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
|
||||
"Referer": baseUrl,
|
||||
}).SetDoNotParseResponse(true).
|
||||
SetCookie(&http.Cookie{
|
||||
Name: "acw_sc__v2",
|
||||
Value: vs,
|
||||
}).SetHeader("cookie", "down_ip=1").Get(downloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if res.StatusCode() == 302 {
|
||||
if res.RawBody() != nil {
|
||||
res.RawBody().Close()
|
||||
}
|
||||
break
|
||||
}
|
||||
bodyBytes, err := io.ReadAll(res.RawBody())
|
||||
if res.RawBody() != nil {
|
||||
res.RawBody().Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("读取响应体失败: %w", err)
|
||||
}
|
||||
bodyStr = string(bodyBytes)
|
||||
if strings.Contains(bodyStr, "acw_sc__v2") {
|
||||
if vs, err = CalcAcwScV2(bodyStr); err != nil {
|
||||
log.Errorf("lanzou: err => acw_sc__v2 validation error ,data => %s\n", bodyStr)
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
file.Url = res.Header().Get("location")
|
||||
|
||||
// 触发验证
|
||||
rPageData := res.String()
|
||||
// 触发二次验证,也需要处理一下触发acw_sc__v2的情况
|
||||
if res.StatusCode() != 302 {
|
||||
param, err = htmlJsonToMap(rPageData)
|
||||
param, err = htmlJsonToMap(bodyStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
param["el"] = "2"
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
// 通过验证获取直连
|
||||
data, err := d.post(fmt.Sprint(baseUrl, "/ajax.php"), func(req *resty.Request) { req.SetFormData(param) }, nil)
|
||||
// 通过验证获取直链
|
||||
var data []byte
|
||||
for i := 0; i < 3; i++ {
|
||||
data, err = d.post(fmt.Sprint(baseUrl, "/ajax.php"), func(req *resty.Request) {
|
||||
req.SetFormData(param)
|
||||
req.SetHeader("cookie", "down_ip=1")
|
||||
if vs != "" {
|
||||
req.SetCookie(&http.Cookie{
|
||||
Name: "acw_sc__v2",
|
||||
Value: vs,
|
||||
})
|
||||
}
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ajaxBodyStr := string(data)
|
||||
if strings.Contains(ajaxBodyStr, "acw_sc__v2") {
|
||||
if vs, err = CalcAcwScV2(ajaxBodyStr); err != nil {
|
||||
log.Errorf("lanzou: err => acw_sc__v2 validation error ,data => %s\n", ajaxBodyStr)
|
||||
return nil, err
|
||||
}
|
||||
time.Sleep(time.Second * 2)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -235,6 +235,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
fullPath := file.GetPath()
|
||||
link := &model.Link{}
|
||||
var MFile model.File
|
||||
if args.Type == "thumb" && utils.Ext(file.GetName()) != "svg" {
|
||||
var buf *bytes.Buffer
|
||||
var thumbPath *string
|
||||
@@ -261,9 +262,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
return nil, err
|
||||
}
|
||||
link.ContentLength = int64(stat.Size())
|
||||
link.MFile = open
|
||||
MFile = open
|
||||
} else {
|
||||
link.MFile = bytes.NewReader(buf.Bytes())
|
||||
MFile = bytes.NewReader(buf.Bytes())
|
||||
link.ContentLength = int64(buf.Len())
|
||||
}
|
||||
} else {
|
||||
@@ -272,13 +273,11 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
return nil, err
|
||||
}
|
||||
link.ContentLength = file.GetSize()
|
||||
link.MFile = open
|
||||
}
|
||||
link.AddIfCloser(link.MFile)
|
||||
if !d.Config().OnlyLinkMFile {
|
||||
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, link.MFile)
|
||||
link.MFile = nil
|
||||
MFile = open
|
||||
}
|
||||
link.SyncClosers.AddIfCloser(MFile)
|
||||
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, MFile)
|
||||
link.RequireReference = link.SyncClosers.Length() > 0
|
||||
return link, nil
|
||||
}
|
||||
|
||||
@@ -375,18 +374,26 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
|
||||
err = os.Remove(obj.GetPath())
|
||||
}
|
||||
} else {
|
||||
if !utils.Exists(d.RecycleBinPath) {
|
||||
err = os.MkdirAll(d.RecycleBinPath, 0o755)
|
||||
objPath := obj.GetPath()
|
||||
objName := obj.GetName()
|
||||
var relPath string
|
||||
relPath, err = filepath.Rel(d.GetRootPath(), filepath.Dir(objPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
recycleBinPath := filepath.Join(d.RecycleBinPath, relPath)
|
||||
if !utils.Exists(recycleBinPath) {
|
||||
err = os.MkdirAll(recycleBinPath, 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dstPath := filepath.Join(d.RecycleBinPath, obj.GetName())
|
||||
dstPath := filepath.Join(recycleBinPath, objName)
|
||||
if utils.Exists(dstPath) {
|
||||
dstPath = filepath.Join(d.RecycleBinPath, obj.GetName()+"_"+time.Now().Format("20060102150405"))
|
||||
dstPath = filepath.Join(recycleBinPath, objName+"_"+time.Now().Format("20060102150405"))
|
||||
}
|
||||
err = os.Rename(obj.GetPath(), dstPath)
|
||||
err = os.Rename(objPath, dstPath)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -18,12 +18,12 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Local",
|
||||
OnlyLinkMFile: false,
|
||||
LocalSort: true,
|
||||
NoCache: true,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
Name: "Local",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
NoCache: true,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
431
drivers/mediafire/driver.go
Normal file
431
drivers/mediafire/driver.go
Normal file
@@ -0,0 +1,431 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
|
||||
Date: 2025-09-21
|
||||
|
||||
Date: 2025-09-26
|
||||
Final opts by @Suyunjing @j2rong4cn @KirCute @Da3zKi7
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type Mediafire struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
cron *cron.Cron
|
||||
|
||||
actionToken string
|
||||
limiter *rate.Limiter
|
||||
|
||||
appBase string
|
||||
apiBase string
|
||||
hostBase string
|
||||
maxRetries int
|
||||
|
||||
secChUa string
|
||||
secChUaPlatform string
|
||||
userAgent string
|
||||
}
|
||||
|
||||
func (d *Mediafire) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Mediafire) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
// Init initializes the MediaFire driver with session token and cookie validation
|
||||
func (d *Mediafire) Init(ctx context.Context) error {
|
||||
if d.SessionToken == "" {
|
||||
return fmt.Errorf("Init :: [MediaFire] {critical} missing sessionToken")
|
||||
}
|
||||
|
||||
if d.Cookie == "" {
|
||||
return fmt.Errorf("Init :: [MediaFire] {critical} missing Cookie")
|
||||
}
|
||||
// Setup rate limiter if rate limit is configured
|
||||
if d.LimitRate > 0 {
|
||||
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
|
||||
}
|
||||
// Validate and refresh session token if needed
|
||||
if _, err := d.getSessionToken(ctx); err != nil {
|
||||
|
||||
d.renewToken(ctx)
|
||||
|
||||
// Avoids 10 mins token expiry (6- 9)
|
||||
num := rand.Intn(4) + 6
|
||||
|
||||
d.cron = cron.NewCron(time.Minute * time.Duration(num))
|
||||
d.cron.Do(func() {
|
||||
// Crazy, but working way to refresh session token
|
||||
d.renewToken(ctx)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Drop cleans up driver resources
|
||||
func (d *Mediafire) Drop(ctx context.Context) error {
|
||||
// Clear cached resources
|
||||
d.actionToken = ""
|
||||
if d.cron != nil {
|
||||
d.cron.Stop()
|
||||
d.cron = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List retrieves files and folders from the specified directory
|
||||
func (d *Mediafire) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
files, err := d.getFiles(ctx, dir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||
return d.fileToObj(src), nil
|
||||
})
|
||||
}
|
||||
|
||||
// Link generates a direct download link for the specified file
|
||||
func (d *Mediafire) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
downloadUrl, err := d.getDirectDownloadLink(ctx, file.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := base.NoRedirectClient.R().SetDoNotParseResponse(true).SetContext(ctx).Head(downloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = res.RawBody().Close()
|
||||
}()
|
||||
|
||||
if res.StatusCode() == 302 {
|
||||
downloadUrl = res.Header().Get("location")
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: downloadUrl,
|
||||
Header: http.Header{
|
||||
"Origin": []string{d.appBase},
|
||||
"Referer": []string{d.appBase + "/"},
|
||||
"sec-ch-ua": []string{d.secChUa},
|
||||
"sec-ch-ua-platform": []string{d.secChUaPlatform},
|
||||
"User-Agent": []string{d.userAgent},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MakeDir creates a new folder in the specified parent directory
|
||||
func (d *Mediafire) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"parent_key": parentDir.GetID(),
|
||||
"foldername": dirName,
|
||||
}
|
||||
|
||||
var resp MediafireFolderCreateResponse
|
||||
_, err := d.postForm(ctx, "/folder/create.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
created, _ := time.Parse("2006-01-02T15:04:05Z", resp.Response.CreatedUTC)
|
||||
|
||||
return &model.Object{
|
||||
ID: resp.Response.FolderKey,
|
||||
Name: resp.Response.Name,
|
||||
Size: 0,
|
||||
Modified: created,
|
||||
Ctime: created,
|
||||
IsFolder: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Move relocates a file or folder to a different parent directory
|
||||
func (d *Mediafire) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/move.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key_src": srcObj.GetID(),
|
||||
"folder_key_dst": dstDir.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/move.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"folder_key": dstDir.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireMoveResponse
|
||||
_, err := d.postForm(ctx, endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return srcObj, nil
|
||||
}
|
||||
|
||||
// Rename changes the name of a file or folder
|
||||
func (d *Mediafire) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/update.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": srcObj.GetID(),
|
||||
"foldername": newName,
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/update.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"filename": newName,
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireRenameResponse
|
||||
_, err := d.postForm(ctx, endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: srcObj.GetID(),
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
Ctime: srcObj.CreateTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Copy creates a duplicate of a file or folder in the specified destination directory
|
||||
func (d *Mediafire) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/copy.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key_src": srcObj.GetID(),
|
||||
"folder_key_dst": dstDir.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/copy.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"folder_key": dstDir.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireCopyResponse
|
||||
_, err := d.postForm(ctx, endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var newID string
|
||||
if srcObj.IsDir() {
|
||||
if len(resp.Response.NewFolderKeys) > 0 {
|
||||
newID = resp.Response.NewFolderKeys[0]
|
||||
}
|
||||
} else {
|
||||
if len(resp.Response.NewQuickKeys) > 0 {
|
||||
newID = resp.Response.NewQuickKeys[0]
|
||||
}
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: newID,
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
Ctime: srcObj.CreateTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Remove deletes a file or folder permanently
|
||||
func (d *Mediafire) Remove(ctx context.Context, obj model.Obj) error {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if obj.IsDir() {
|
||||
|
||||
endpoint = "/folder/delete.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": obj.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/delete.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": obj.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireRemoveResponse
|
||||
_, err := d.postForm(ctx, endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return checkAPIResult(resp.Response.Result)
|
||||
}
|
||||
|
||||
// Put uploads a file to the specified directory with support for resumable upload and quick upload
|
||||
func (d *Mediafire) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
fileHash := file.GetHash().GetHash(utils.SHA256)
|
||||
var err error
|
||||
|
||||
// Try to use existing hash first, cache only if necessary
|
||||
if len(fileHash) != utils.SHA256.Width {
|
||||
_, fileHash, err = stream.CacheFullAndHash(file, &up, utils.SHA256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
checkResp, err := d.uploadCheck(ctx, file.GetName(), file.GetSize(), fileHash, dstDir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if checkResp.Response.HashExists == "yes" && checkResp.Response.InAccount == "yes" {
|
||||
up(100.0)
|
||||
existingFile, err := d.getExistingFileInfo(ctx, fileHash, file.GetName(), dstDir.GetID())
|
||||
if err == nil && existingFile != nil {
|
||||
// File exists, return existing file info
|
||||
return &model.Object{
|
||||
ID: existingFile.GetID(),
|
||||
Name: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
}, nil
|
||||
}
|
||||
// If getExistingFileInfo fails, log and continue with normal upload
|
||||
// This ensures upload doesn't fail due to search issues
|
||||
}
|
||||
|
||||
var pollKey string
|
||||
|
||||
if checkResp.Response.ResumableUpload.AllUnitsReady != "yes" {
|
||||
pollKey, err = d.uploadUnits(ctx, file, checkResp, file.GetName(), fileHash, dstDir.GetID(), up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
pollKey = checkResp.Response.ResumableUpload.UploadKey
|
||||
up(100.0)
|
||||
}
|
||||
|
||||
pollResp, err := d.pollUpload(ctx, pollKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: pollResp.Response.Doupload.QuickKey,
|
||||
Name: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
}
|
||||
var resp MediafireUserInfoResponse
|
||||
_, err := d.postForm(ctx, "/user/get_info.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
used, err := strconv.ParseUint(resp.Response.UserInfo.UsedStorageSize, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total, err := strconv.ParseUint(resp.Response.UserInfo.StorageLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: total - used,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Mediafire)(nil)
|
||||
61
drivers/mediafire/meta.go
Normal file
61
drivers/mediafire/meta.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
|
||||
Date: 2025-09-21
|
||||
|
||||
Date: 2025-09-26
|
||||
Final opts by @Suyunjing @j2rong4cn @KirCute @Da3zKi7
|
||||
*/
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
//driver.RootID
|
||||
|
||||
SessionToken string `json:"session_token" required:"true" type:"string" help:"Required for MediaFire API"`
|
||||
Cookie string `json:"cookie" required:"true" type:"string" help:"Required for navigation"`
|
||||
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"100"`
|
||||
UploadThreads int `json:"upload_threads" type:"number" default:"3" help:"concurrent upload threads"`
|
||||
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "MediaFire",
|
||||
LocalSort: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Mediafire{
|
||||
appBase: "https://app.mediafire.com",
|
||||
apiBase: "https://www.mediafire.com/api/1.5",
|
||||
hostBase: "https://www.mediafire.com",
|
||||
maxRetries: 3,
|
||||
secChUa: "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"139\", \"Google Chrome\";v=\"139\"",
|
||||
secChUaPlatform: "Windows",
|
||||
userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
}
|
||||
})
|
||||
}
|
||||
246
drivers/mediafire/types.go
Normal file
246
drivers/mediafire/types.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
*/
|
||||
|
||||
type MediafireRenewTokenResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
SessionToken string `json:"session_token"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FolderContent struct {
|
||||
ChunkSize string `json:"chunk_size"`
|
||||
ContentType string `json:"content_type"`
|
||||
ChunkNumber string `json:"chunk_number"`
|
||||
FolderKey string `json:"folderkey"`
|
||||
Folders []MediafireFolder `json:"folders,omitempty"`
|
||||
Files []MediafireFile `json:"files,omitempty"`
|
||||
MoreChunks string `json:"more_chunks"`
|
||||
} `json:"folder_content"`
|
||||
Result string `json:"result"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFolder struct {
|
||||
FolderKey string `json:"folderkey"`
|
||||
Name string `json:"name"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
}
|
||||
|
||||
type MediafireFile struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
Filename string `json:"filename"`
|
||||
Size string `json:"size"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
MimeType string `json:"mimetype"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
ID string
|
||||
Name string
|
||||
Size int64
|
||||
CreatedUTC string
|
||||
IsFolder bool
|
||||
}
|
||||
|
||||
type FolderContentResponse struct {
|
||||
Folders []MediafireFolder
|
||||
Files []MediafireFile
|
||||
MoreChunks bool
|
||||
}
|
||||
|
||||
type MediafireLinksResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Links []struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
View string `json:"view"`
|
||||
NormalDownload string `json:"normal_download"`
|
||||
OneTime struct {
|
||||
Download string `json:"download"`
|
||||
View string `json:"view"`
|
||||
} `json:"one_time"`
|
||||
} `json:"links"`
|
||||
OneTimeKeyRequestCount string `json:"one_time_key_request_count"`
|
||||
OneTimeKeyRequestMaxCount string `json:"one_time_key_request_max_count"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireDirectDownloadResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Links []struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
DirectDownload string `json:"direct_download"`
|
||||
} `json:"links"`
|
||||
DirectDownloadFreeBandwidth string `json:"direct_download_free_bandwidth"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFolderCreateResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FolderKey string `json:"folder_key"`
|
||||
UploadKey string `json:"upload_key"`
|
||||
ParentFolderKey string `json:"parent_folderkey"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
Privacy string `json:"privacy"`
|
||||
FileCount string `json:"file_count"`
|
||||
FolderCount string `json:"folder_count"`
|
||||
Revision string `json:"revision"`
|
||||
DropboxEnabled string `json:"dropbox_enabled"`
|
||||
Flag string `json:"flag"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireMoveResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
NewNames []string `json:"new_names"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireRenameResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireCopyResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
NewQuickKeys []string `json:"new_quickkeys,omitempty"`
|
||||
NewFolderKeys []string `json:"new_folderkeys,omitempty"`
|
||||
SkippedCount string `json:"skipped_count,omitempty"`
|
||||
OtherCount string `json:"other_count,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireRemoveResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireCheckResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
HashExists string `json:"hash_exists"`
|
||||
InAccount string `json:"in_account"`
|
||||
InFolder string `json:"in_folder"`
|
||||
FileExists string `json:"file_exists"`
|
||||
ResumableUpload struct {
|
||||
AllUnitsReady string `json:"all_units_ready"`
|
||||
NumberOfUnits string `json:"number_of_units"`
|
||||
UnitSize string `json:"unit_size"`
|
||||
Bitmap struct {
|
||||
Count string `json:"count"`
|
||||
Words []string `json:"words"`
|
||||
} `json:"bitmap"`
|
||||
UploadKey string `json:"upload_key"`
|
||||
} `json:"resumable_upload"`
|
||||
AvailableSpace string `json:"available_space"`
|
||||
UsedStorageSize string `json:"used_storage_size"`
|
||||
StorageLimit string `json:"storage_limit"`
|
||||
StorageLimitExceeded string `json:"storage_limit_exceeded"`
|
||||
UploadURL struct {
|
||||
Simple string `json:"simple"`
|
||||
SimpleFallback string `json:"simple_fallback"`
|
||||
Resumable string `json:"resumable"`
|
||||
ResumableFallback string `json:"resumable_fallback"`
|
||||
} `json:"upload_url"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
type MediafireActionTokenResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
ActionToken string `json:"action_token"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafirePollResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Doupload struct {
|
||||
Result string `json:"result"`
|
||||
Status string `json:"status"`
|
||||
Description string `json:"description"`
|
||||
QuickKey string `json:"quickkey"`
|
||||
Hash string `json:"hash"`
|
||||
Filename string `json:"filename"`
|
||||
Size string `json:"size"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
Revision string `json:"revision"`
|
||||
} `json:"doupload"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFileSearchResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FileInfo []File `json:"file_info"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireUserInfoResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
UserInfo struct {
|
||||
Email string `json:"string"`
|
||||
DisplayName string `json:"display_name"`
|
||||
UsedStorageSize string `json:"used_storage_size"`
|
||||
StorageLimit string `json:"storage_limit"`
|
||||
} `json:"user_info"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
729
drivers/mediafire/util.go
Normal file
729
drivers/mediafire/util.go
Normal file
@@ -0,0 +1,729 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
|
||||
Date: 2025-09-21
|
||||
|
||||
Date: 2025-09-26
|
||||
Final opts by @Suyunjing @j2rong4cn @KirCute @Da3zKi7
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
// checkAPIResult validates MediaFire API response result and returns error if not successful
|
||||
func checkAPIResult(result string) error {
|
||||
if result != "Success" {
|
||||
return fmt.Errorf("MediaFire API error: %s", result)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getSessionToken retrieves and validates session token from MediaFire
|
||||
func (d *Mediafire) getSessionToken(ctx context.Context) (string, error) {
|
||||
if d.limiter != nil {
|
||||
if err := d.limiter.Wait(ctx); err != nil {
|
||||
return "", fmt.Errorf("rate limit wait failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
tokenURL := d.hostBase + "/application/get_session_token.php"
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req.Header.Set("Accept", "*/*")
|
||||
req.Header.Set("Accept-Encoding", "gzip, deflate, br, zstd")
|
||||
req.Header.Set("Accept-Language", "en-US,en;q=0.9")
|
||||
req.Header.Set("Content-Length", "0")
|
||||
req.Header.Set("Cookie", d.Cookie)
|
||||
req.Header.Set("DNT", "1")
|
||||
req.Header.Set("Origin", d.hostBase)
|
||||
req.Header.Set("Priority", "u=1, i")
|
||||
req.Header.Set("Referer", (d.hostBase + "/"))
|
||||
req.Header.Set("Sec-Ch-Ua", d.secChUa)
|
||||
req.Header.Set("Sec-Ch-Ua-Mobile", "?0")
|
||||
req.Header.Set("Sec-Ch-Ua-Platform", d.secChUaPlatform)
|
||||
req.Header.Set("Sec-Fetch-Dest", "empty")
|
||||
req.Header.Set("Sec-Fetch-Mode", "cors")
|
||||
req.Header.Set("Sec-Fetch-Site", "same-site")
|
||||
req.Header.Set("User-Agent", d.userAgent)
|
||||
// req.Header.Set("Connection", "keep-alive")
|
||||
|
||||
resp, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// fmt.Printf("getSessionToken :: Raw response: %s\n", string(body))
|
||||
// fmt.Printf("getSessionToken :: Parsed response: %+v\n", resp)
|
||||
|
||||
var tokenResp struct {
|
||||
Response struct {
|
||||
SessionToken string `json:"session_token"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
if resp.StatusCode == 200 {
|
||||
if err := json.Unmarshal(body, &tokenResp); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if tokenResp.Response.SessionToken == "" {
|
||||
return "", fmt.Errorf("empty session token received")
|
||||
}
|
||||
|
||||
cookieMap := make(map[string]string)
|
||||
for _, cookie := range resp.Cookies() {
|
||||
cookieMap[cookie.Name] = cookie.Value
|
||||
}
|
||||
|
||||
if len(cookieMap) > 0 {
|
||||
|
||||
var cookies []string
|
||||
for name, value := range cookieMap {
|
||||
cookies = append(cookies, fmt.Sprintf("%s=%s", name, value))
|
||||
}
|
||||
d.Cookie = strings.Join(cookies, "; ")
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
// fmt.Printf("getSessionToken :: Captured cookies: %s\n", d.Cookie)
|
||||
}
|
||||
|
||||
} else {
|
||||
return "", fmt.Errorf("getSessionToken :: failed to get session token, status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
d.SessionToken = tokenResp.Response.SessionToken
|
||||
|
||||
// fmt.Printf("Init :: Obtain Session Token %v", d.SessionToken)
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
return d.SessionToken, nil
|
||||
}
|
||||
|
||||
// renewToken refreshes the current session token when expired
|
||||
func (d *Mediafire) renewToken(ctx context.Context) error {
|
||||
query := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireRenewTokenResponse
|
||||
_, err := d.postForm(ctx, "/user/renew_session_token.php", query, &resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to renew token: %w", err)
|
||||
}
|
||||
|
||||
// fmt.Printf("getInfo :: Raw response: %s\n", string(body))
|
||||
// fmt.Printf("getInfo :: Parsed response: %+v\n", resp)
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return fmt.Errorf("MediaFire token renewal failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
d.SessionToken = resp.Response.SessionToken
|
||||
|
||||
// fmt.Printf("Init :: Renew Session Token: %s", resp.Response.Result)
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFiles(ctx context.Context, folderKey string) ([]File, error) {
|
||||
// Pre-allocate slice with reasonable capacity to reduce memory allocations
|
||||
files := make([]File, 0, d.ChunkSize*2) // Estimate: ChunkSize for files + folders
|
||||
hasMore := true
|
||||
chunkNumber := 1
|
||||
|
||||
for hasMore {
|
||||
resp, err := d.getFolderContent(ctx, folderKey, chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Process folders and files in single loop to improve cache locality
|
||||
totalItems := len(resp.Folders) + len(resp.Files)
|
||||
if cap(files)-len(files) < totalItems {
|
||||
// Grow slice if needed
|
||||
newFiles := make([]File, len(files), len(files)+totalItems+int(d.ChunkSize))
|
||||
copy(newFiles, files)
|
||||
files = newFiles
|
||||
}
|
||||
|
||||
for _, folder := range resp.Folders {
|
||||
files = append(files, File{
|
||||
ID: folder.FolderKey,
|
||||
Name: folder.Name,
|
||||
Size: 0,
|
||||
CreatedUTC: folder.CreatedUTC,
|
||||
IsFolder: true,
|
||||
})
|
||||
}
|
||||
|
||||
for _, file := range resp.Files {
|
||||
size, _ := strconv.ParseInt(file.Size, 10, 64)
|
||||
files = append(files, File{
|
||||
ID: file.QuickKey,
|
||||
Name: file.Filename,
|
||||
Size: size,
|
||||
CreatedUTC: file.CreatedUTC,
|
||||
IsFolder: false,
|
||||
})
|
||||
}
|
||||
|
||||
hasMore = resp.MoreChunks
|
||||
chunkNumber++
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFolderContent(ctx context.Context, folderKey string, chunkNumber int) (*FolderContentResponse, error) {
|
||||
foldersResp, err := d.getFolderContentByType(ctx, folderKey, "folders", chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filesResp, err := d.getFolderContentByType(ctx, folderKey, "files", chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FolderContentResponse{
|
||||
Folders: foldersResp.Response.FolderContent.Folders,
|
||||
Files: filesResp.Response.FolderContent.Files,
|
||||
MoreChunks: foldersResp.Response.FolderContent.MoreChunks == "yes" || filesResp.Response.FolderContent.MoreChunks == "yes",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFolderContentByType(ctx context.Context, folderKey, contentType string, chunkNumber int) (*MediafireResponse, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": folderKey,
|
||||
"content_type": contentType,
|
||||
"chunk": strconv.Itoa(chunkNumber),
|
||||
"chunk_size": strconv.FormatInt(d.ChunkSize, 10),
|
||||
"details": "yes",
|
||||
"order_direction": d.OrderDirection,
|
||||
"order_by": d.OrderBy,
|
||||
"filter": "",
|
||||
}
|
||||
|
||||
var resp MediafireResponse
|
||||
_, err := d.postForm(ctx, "/folder/get_content.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// fileToObj converts MediaFire file data to model.ObjThumb with thumbnail support
|
||||
func (d *Mediafire) fileToObj(f File) *model.ObjThumb {
|
||||
created, _ := time.Parse("2006-01-02T15:04:05Z", f.CreatedUTC)
|
||||
|
||||
var thumbnailURL string
|
||||
if !f.IsFolder && f.ID != "" {
|
||||
thumbnailURL = d.hostBase + "/convkey/acaa/" + f.ID + "3g.jpg"
|
||||
}
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: f.ID,
|
||||
// Path: "",
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Modified: created,
|
||||
Ctime: created,
|
||||
IsFolder: f.IsFolder,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumbnailURL,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Mediafire) setCommonHeaders(req *resty.Request) {
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": d.Cookie,
|
||||
"User-Agent": d.userAgent,
|
||||
"Origin": d.appBase,
|
||||
"Referer": d.appBase + "/",
|
||||
})
|
||||
}
|
||||
|
||||
// apiRequest performs HTTP request to MediaFire API with rate limiting and common headers
|
||||
func (d *Mediafire) apiRequest(ctx context.Context, method, endpoint string, queryParams, formData map[string]string, resp interface{}) ([]byte, error) {
|
||||
if d.limiter != nil {
|
||||
if err := d.limiter.Wait(ctx); err != nil {
|
||||
return nil, fmt.Errorf("rate limit wait failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
req := base.RestyClient.R()
|
||||
req.SetContext(ctx)
|
||||
d.setCommonHeaders(req)
|
||||
|
||||
// Set query parameters for GET requests
|
||||
if queryParams != nil {
|
||||
req.SetQueryParams(queryParams)
|
||||
}
|
||||
|
||||
// Set form data for POST requests
|
||||
if formData != nil {
|
||||
req.SetFormData(formData)
|
||||
req.SetHeader("Content-Type", "application/x-www-form-urlencoded")
|
||||
}
|
||||
|
||||
// Set response object if provided
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
|
||||
var res *resty.Response
|
||||
var err error
|
||||
|
||||
// Execute request based on method
|
||||
switch method {
|
||||
case "GET":
|
||||
res, err = req.Get(d.apiBase + endpoint)
|
||||
case "POST":
|
||||
res, err = req.Post(d.apiBase + endpoint)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported HTTP method: %s", method)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getForm(ctx context.Context, endpoint string, query map[string]string, resp interface{}) ([]byte, error) {
|
||||
return d.apiRequest(ctx, "GET", endpoint, query, nil, resp)
|
||||
}
|
||||
|
||||
func (d *Mediafire) postForm(ctx context.Context, endpoint string, data map[string]string, resp interface{}) ([]byte, error) {
|
||||
return d.apiRequest(ctx, "POST", endpoint, nil, data, resp)
|
||||
}
|
||||
|
||||
func (d *Mediafire) getDirectDownloadLink(ctx context.Context, fileID string) (string, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"quick_key": fileID,
|
||||
"link_type": "direct_download",
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireDirectDownloadResponse
|
||||
_, err := d.getForm(ctx, "/file/get_links.php", data, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(resp.Response.Links) == 0 {
|
||||
return "", fmt.Errorf("no download links found")
|
||||
}
|
||||
|
||||
return resp.Response.Links[0].DirectDownload, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) uploadCheck(ctx context.Context, filename string, filesize int64, filehash, folderKey string) (*MediafireCheckResponse, error) {
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get action token: %w", err)
|
||||
}
|
||||
|
||||
query := map[string]string{
|
||||
"session_token": actionToken, /* d.SessionToken */
|
||||
"filename": filename,
|
||||
"size": strconv.FormatInt(filesize, 10),
|
||||
"hash": filehash,
|
||||
"folder_key": folderKey,
|
||||
"resumable": "yes",
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireCheckResponse
|
||||
_, err = d.postForm(ctx, "/upload/check.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// fmt.Printf("uploadCheck :: Raw response: %s\n", string(body))
|
||||
// fmt.Printf("uploadCheck :: Parsed response: %+v\n", resp)
|
||||
|
||||
// fmt.Printf("uploadCheck :: ResumableUpload section: %+v\n", resp.Response.ResumableUpload)
|
||||
// fmt.Printf("uploadCheck :: Upload key specifically: '%s'\n", resp.Response.ResumableUpload.UploadKey)
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) uploadUnits(ctx context.Context, file model.FileStreamer, checkResp *MediafireCheckResponse, filename, fileHash, folderKey string, up driver.UpdateProgress) (string, error) {
|
||||
unitSize, _ := strconv.ParseInt(checkResp.Response.ResumableUpload.UnitSize, 10, 64)
|
||||
numUnits, _ := strconv.Atoi(checkResp.Response.ResumableUpload.NumberOfUnits)
|
||||
uploadKey := checkResp.Response.ResumableUpload.UploadKey
|
||||
|
||||
stringWords := checkResp.Response.ResumableUpload.Bitmap.Words
|
||||
intWords := make([]int, 0, len(stringWords))
|
||||
for _, word := range stringWords {
|
||||
if intWord, err := strconv.Atoi(word); err == nil {
|
||||
intWords = append(intWords, intWord)
|
||||
}
|
||||
}
|
||||
|
||||
// Intelligent buffer sizing for large files
|
||||
bufferSize := int(unitSize)
|
||||
fileSize := file.GetSize()
|
||||
|
||||
// Split in chunks
|
||||
if fileSize > d.ChunkSize*1024*1024 {
|
||||
|
||||
// Large, use ChunkSize (default = 100MB)
|
||||
bufferSize = min(int(fileSize), int(d.ChunkSize)*1024*1024)
|
||||
} else if fileSize > 10*1024*1024 {
|
||||
// Medium, use full file size for concurrent access
|
||||
bufferSize = int(fileSize)
|
||||
}
|
||||
|
||||
// Create stream section reader for efficient chunking
|
||||
ss, err := stream.NewStreamSectionReader(file, bufferSize, &up)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Cal minimal parallel upload threads, allows MediaFire resumable upload to rule it over custom value
|
||||
// If file is big, likely will respect d.UploadThreads instead of MediaFire's suggestion i.e. 5 threads
|
||||
thread := min(numUnits, d.UploadThreads)
|
||||
|
||||
// Create ordered group for sequential upload processing with retry logic
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
var finalUploadKey string
|
||||
var keyMutex sync.Mutex
|
||||
|
||||
fileSize = file.GetSize()
|
||||
for unitID := range numUnits {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
start := int64(unitID) * unitSize
|
||||
size := unitSize
|
||||
if start+size > fileSize {
|
||||
size = fileSize - start
|
||||
}
|
||||
|
||||
var reader io.ReadSeeker
|
||||
var rateLimitedRd io.Reader
|
||||
var unitHash string
|
||||
|
||||
// Use lifecycle pattern for proper resource management
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
// Skip already uploaded units
|
||||
if d.isUnitUploaded(intWords, unitID) {
|
||||
return ss.DiscardSection(start, size)
|
||||
}
|
||||
|
||||
var err error
|
||||
reader, err = ss.GetSectionReader(start, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
||||
return nil
|
||||
},
|
||||
Do: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
return nil // Skip if reader is not initialized (already uploaded)
|
||||
}
|
||||
|
||||
if unitHash == "" {
|
||||
reader.Seek(0, io.SeekStart)
|
||||
var err error
|
||||
unitHash, err = utils.HashReader(utils.SHA256, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
reader.Seek(0, io.SeekStart)
|
||||
|
||||
// Perform upload
|
||||
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.limiter != nil {
|
||||
if err := d.limiter.Wait(ctx); err != nil {
|
||||
return fmt.Errorf("rate limit wait failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
url := d.apiBase + "/upload/resumable.php"
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q := req.URL.Query()
|
||||
q.Add("folder_key", folderKey)
|
||||
q.Add("response_format", "json")
|
||||
q.Add("session_token", actionToken)
|
||||
q.Add("key", uploadKey)
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
req.Header.Set("x-filehash", fileHash)
|
||||
req.Header.Set("x-filesize", strconv.FormatInt(fileSize, 10))
|
||||
req.Header.Set("x-unit-id", strconv.Itoa(unitID))
|
||||
req.Header.Set("x-unit-size", strconv.FormatInt(size, 10))
|
||||
req.Header.Set("x-unit-hash", unitHash)
|
||||
req.Header.Set("x-filename", filename)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.ContentLength = size
|
||||
|
||||
/* fmt.Printf("Debug resumable upload request:\n")
|
||||
fmt.Printf(" URL: %s\n", req.URL.String())
|
||||
fmt.Printf(" Headers: %+v\n", req.Header)
|
||||
fmt.Printf(" Unit ID: %d\n", unitID)
|
||||
fmt.Printf(" Unit Size: %d\n", len(unitData))
|
||||
fmt.Printf(" Upload Key: %s\n", uploadKey)
|
||||
fmt.Printf(" Action Token: %s\n", actionToken) */
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response body: %v", err)
|
||||
}
|
||||
|
||||
// fmt.Printf("MediaFire resumable upload response (status %d): %s\n", res.StatusCode, string(body))
|
||||
|
||||
var uploadResp struct {
|
||||
Response struct {
|
||||
Doupload struct {
|
||||
Key string `json:"key"`
|
||||
} `json:"doupload"`
|
||||
Result string `json:"result"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &uploadResp); err != nil {
|
||||
return fmt.Errorf("failed to parse response: %v", err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("resumable upload failed with status %d", res.StatusCode)
|
||||
}
|
||||
|
||||
// Thread-safe update of final upload key
|
||||
keyMutex.Lock()
|
||||
finalUploadKey = uploadResp.Response.Doupload.Key
|
||||
keyMutex.Unlock()
|
||||
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
up(float64(threadG.Success()) * 100 / float64(numUnits))
|
||||
if reader != nil {
|
||||
// Cleanup resources
|
||||
ss.FreeSectionReader(reader)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return finalUploadKey, nil
|
||||
}
|
||||
|
||||
/*func (d *Mediafire) uploadSingleUnit(ctx context.Context, file model.FileStreamer, unitID int, unitSize int64, fileHash, filename, uploadKey, folderKey string, fileSize int64) (string, error) {
|
||||
start := int64(unitID) * unitSize
|
||||
size := unitSize
|
||||
|
||||
if start+size > fileSize {
|
||||
size = fileSize - start
|
||||
}
|
||||
|
||||
unitData := make([]byte, size)
|
||||
_, err := file.Read(unitData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return d.resumableUpload(ctx, folderKey, uploadKey, unitData, unitID, fileHash, filename, fileSize)
|
||||
}*/
|
||||
|
||||
func (d *Mediafire) getActionToken(ctx context.Context) (string, error) {
|
||||
if d.actionToken != "" {
|
||||
return d.actionToken, nil
|
||||
}
|
||||
|
||||
data := map[string]string{
|
||||
"type": "upload",
|
||||
"lifespan": "1440",
|
||||
"response_format": "json",
|
||||
"session_token": d.SessionToken,
|
||||
}
|
||||
|
||||
var resp MediafireActionTokenResponse
|
||||
_, err := d.postForm(ctx, "/user/get_action_token.php", data, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return "", fmt.Errorf("MediaFire action token failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return resp.Response.ActionToken, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) pollUpload(ctx context.Context, key string) (*MediafirePollResponse, error) {
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get action token: %w", err)
|
||||
}
|
||||
|
||||
// fmt.Printf("Debug Key: %+v\n", key)
|
||||
|
||||
query := map[string]string{
|
||||
"key": key,
|
||||
"response_format": "json",
|
||||
"session_token": actionToken, /* d.SessionToken */
|
||||
}
|
||||
|
||||
var resp MediafirePollResponse
|
||||
_, err = d.postForm(ctx, "/upload/poll_upload.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// fmt.Printf("pollUpload :: Raw response: %s\n", string(body))
|
||||
// fmt.Printf("pollUpload :: Parsed response: %+v\n", resp)
|
||||
|
||||
// fmt.Printf("pollUpload :: Debug Result: %+v\n", resp.Response.Result)
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) isUnitUploaded(words []int, unitID int) bool {
|
||||
wordIndex := unitID / 16
|
||||
bitIndex := unitID % 16
|
||||
if wordIndex >= len(words) {
|
||||
return false
|
||||
}
|
||||
return (words[wordIndex]>>bitIndex)&1 == 1
|
||||
}
|
||||
|
||||
func (d *Mediafire) getExistingFileInfo(ctx context.Context, fileHash, filename, folderKey string) (*model.ObjThumb, error) {
|
||||
// First try to find by hash directly (most efficient)
|
||||
if fileInfo, err := d.getFileByHash(ctx, fileHash); err == nil && fileInfo != nil {
|
||||
return fileInfo, nil
|
||||
}
|
||||
|
||||
// If hash search fails, search in the target folder
|
||||
// This is a fallback method in case the file exists but hash search doesn't work
|
||||
files, err := d.getFiles(ctx, folderKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if file.Name == filename && !file.IsFolder {
|
||||
return d.fileToObj(file), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("existing file not found")
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFileByHash(ctx context.Context, hash string) (*model.ObjThumb, error) {
|
||||
query := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"hash": hash,
|
||||
}
|
||||
|
||||
var resp MediafireFileSearchResponse
|
||||
_, err := d.postForm(ctx, "/file/get_info.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire file search failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
if len(resp.Response.FileInfo) == 0 {
|
||||
return nil, fmt.Errorf("file not found by hash")
|
||||
}
|
||||
|
||||
file := resp.Response.FileInfo[0]
|
||||
return d.fileToObj(file), nil
|
||||
}
|
||||
@@ -22,6 +22,7 @@ type Onedrive struct {
|
||||
AccessToken string
|
||||
root *Object
|
||||
mutex sync.Mutex
|
||||
ref *Onedrive
|
||||
}
|
||||
|
||||
func (d *Onedrive) Config() driver.Config {
|
||||
@@ -36,10 +37,22 @@ func (d *Onedrive) Init(ctx context.Context) error {
|
||||
if d.ChunkSize < 1 {
|
||||
d.ChunkSize = 5
|
||||
}
|
||||
if d.ref != nil {
|
||||
return nil
|
||||
}
|
||||
return d.refreshToken()
|
||||
}
|
||||
|
||||
func (d *Onedrive) InitReference(refStorage driver.Driver) error {
|
||||
if ref, ok := refStorage.(*Onedrive); ok {
|
||||
d.ref = ref
|
||||
return nil
|
||||
}
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Onedrive) Drop(ctx context.Context) error {
|
||||
d.ref = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -207,4 +220,20 @@ func (d *Onedrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Onedrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if d.DisableDiskUsage {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
drive, err := d.getDrive(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: drive.Quota.Total,
|
||||
FreeSpace: drive.Quota.Remaining,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Onedrive)(nil)
|
||||
|
||||
@@ -7,17 +7,18 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
|
||||
IsSharepoint bool `json:"is_sharepoint"`
|
||||
UseOnlineAPI bool `json:"use_online_api" default:"true"`
|
||||
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/onedrive/renewapi"`
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
RedirectUri string `json:"redirect_uri" required:"true" default:"https://api.oplist.org/onedrive/callback"`
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
SiteId string `json:"site_id"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
|
||||
IsSharepoint bool `json:"is_sharepoint"`
|
||||
UseOnlineAPI bool `json:"use_online_api" default:"true"`
|
||||
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/onedrive/renewapi"`
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
RedirectUri string `json:"redirect_uri" required:"true" default:"https://api.oplist.org/onedrive/callback"`
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
SiteId string `json:"site_id"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -89,3 +89,15 @@ type FileSystemInfoFacet struct {
|
||||
CreatedDateTime time.Time `json:"createdDateTime,omitempty"` // The UTC date and time the file was created on a client.
|
||||
LastModifiedDateTime time.Time `json:"lastModifiedDateTime,omitempty"` // The UTC date and time the file was last modified on a client.
|
||||
}
|
||||
|
||||
type DriveResp struct {
|
||||
ID string `json:"id"`
|
||||
DriveType string `json:"driveType"`
|
||||
Quota struct {
|
||||
Deleted uint64 `json:"deleted"`
|
||||
Remaining uint64 `json:"remaining"`
|
||||
State string `json:"state"`
|
||||
Total uint64 `json:"total"`
|
||||
Used uint64 `json:"used"`
|
||||
} `json:"quota"`
|
||||
}
|
||||
|
||||
@@ -134,6 +134,9 @@ func (d *Onedrive) _refreshToken() error {
|
||||
}
|
||||
|
||||
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
if d.ref != nil {
|
||||
return d.ref.Request(url, method, callback, resp)
|
||||
}
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||
if callback != nil {
|
||||
@@ -295,3 +298,21 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Onedrive) getDrive(ctx context.Context) (*DriveResp, error) {
|
||||
var api string
|
||||
host, _ := onedriveHostMap[d.Region]
|
||||
if d.IsSharepoint {
|
||||
api = fmt.Sprintf("%s/v1.0/sites/%s/drive", host.Api, d.SiteId)
|
||||
} else {
|
||||
api = fmt.Sprintf("%s/v1.0/me/drive", host.Api)
|
||||
}
|
||||
var resp DriveResp
|
||||
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -206,4 +206,20 @@ func (d *OnedriveAPP) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *OnedriveAPP) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if d.DisableDiskUsage {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
drive, err := d.getDrive(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: drive.Quota.Total,
|
||||
FreeSpace: drive.Quota.Remaining,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*OnedriveAPP)(nil)
|
||||
|
||||
@@ -7,13 +7,14 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
|
||||
ClientID string `json:"client_id" required:"true"`
|
||||
ClientSecret string `json:"client_secret" required:"true"`
|
||||
TenantID string `json:"tenant_id"`
|
||||
Email string `json:"email"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
|
||||
ClientID string `json:"client_id" required:"true"`
|
||||
ClientSecret string `json:"client_secret" required:"true"`
|
||||
TenantID string `json:"tenant_id"`
|
||||
Email string `json:"email"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -72,3 +72,15 @@ type Files struct {
|
||||
Value []File `json:"value"`
|
||||
NextLink string `json:"@odata.nextLink"`
|
||||
}
|
||||
|
||||
type DriveResp struct {
|
||||
ID string `json:"id"`
|
||||
DriveType string `json:"driveType"`
|
||||
Quota struct {
|
||||
Deleted uint64 `json:"deleted"`
|
||||
Remaining uint64 `json:"remaining"`
|
||||
State string `json:"state"`
|
||||
Total uint64 `json:"total"`
|
||||
Used uint64 `json:"used"`
|
||||
} `json:"quota"`
|
||||
}
|
||||
|
||||
@@ -209,3 +209,16 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *OnedriveAPP) getDrive(ctx context.Context) (*DriveResp, error) {
|
||||
host, _ := onedriveHostMap[d.Region]
|
||||
api := fmt.Sprintf("%s/v1.0/users/%s/drive", host.Api, d.Email)
|
||||
var resp DriveResp
|
||||
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -110,19 +110,29 @@ func (d *OpenList) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
|
||||
func (d *OpenList) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
var resp common.Resp[FsGetResp]
|
||||
headers := map[string]string{
|
||||
"User-Agent": base.UserAgent,
|
||||
}
|
||||
// if PassUAToUpsteam is true, then pass the user-agent to the upstream
|
||||
userAgent := base.UserAgent
|
||||
if d.PassUAToUpsteam {
|
||||
userAgent = args.Header.Get("user-agent")
|
||||
if userAgent == "" {
|
||||
userAgent = base.UserAgent
|
||||
userAgent := args.Header.Get("user-agent")
|
||||
if userAgent != "" {
|
||||
headers["User-Agent"] = base.UserAgent
|
||||
}
|
||||
}
|
||||
// if PassIPToUpsteam is true, then pass the ip address to the upstream
|
||||
if d.PassIPToUpsteam {
|
||||
ip := args.IP
|
||||
if ip != "" {
|
||||
headers["X-Forwarded-For"] = ip
|
||||
headers["X-Real-Ip"] = ip
|
||||
}
|
||||
}
|
||||
_, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(FsGetReq{
|
||||
Path: file.GetPath(),
|
||||
Password: d.MetaPassword,
|
||||
}).SetHeader("user-agent", userAgent)
|
||||
}).SetHeaders(headers)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -355,8 +365,15 @@ func (d *OpenList) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.O
|
||||
return err
|
||||
}
|
||||
|
||||
//func (d *OpenList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
func (d *OpenList) ResolveLinkCacheMode(_ string) driver.LinkCacheMode {
|
||||
var mode driver.LinkCacheMode
|
||||
if d.PassIPToUpsteam {
|
||||
mode |= driver.LinkCacheIP
|
||||
}
|
||||
if d.PassUAToUpsteam {
|
||||
mode |= driver.LinkCacheUA
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*OpenList)(nil)
|
||||
|
||||
@@ -12,6 +12,7 @@ type Addition struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Token string `json:"token"`
|
||||
PassIPToUpsteam bool `json:"pass_ip_to_upsteam" default:"true"`
|
||||
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
|
||||
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
|
||||
}
|
||||
@@ -22,6 +23,7 @@ var config = driver.Config{
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: true,
|
||||
ProxyRangeOption: true,
|
||||
LinkCacheMode: driver.LinkCacheAuto,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -36,7 +36,6 @@ func (d *PikPak) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *PikPak) Init(ctx context.Context) (err error) {
|
||||
|
||||
if d.Common == nil {
|
||||
d.Common = &Common{
|
||||
client: base.NewRestyClient(),
|
||||
@@ -247,7 +246,7 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
}
|
||||
|
||||
params := resp.Resumable.Params
|
||||
//endpoint := strings.Join(strings.Split(params.Endpoint, ".")[1:], ".")
|
||||
// endpoint := strings.Join(strings.Split(params.Endpoint, ".")[1:], ".")
|
||||
// web 端上传 返回的endpoint 为 `mypikpak.net` | android 端上传 返回的endpoint 为 `vip-lixian-07.mypikpak.net`·
|
||||
if d.Addition.Platform == "android" {
|
||||
params.Endpoint = "mypikpak.net"
|
||||
@@ -260,6 +259,27 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
return d.UploadByMultipart(ctx, ¶ms, stream.GetSize(), stream, up)
|
||||
}
|
||||
|
||||
func (d *PikPak) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
var about AboutResponse
|
||||
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/about", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &about)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total, err := strconv.ParseUint(about.Quota.Limit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
used, err := strconv.ParseUint(about.Quota.Usage, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 离线下载文件
|
||||
func (d *PikPak) OfflineDownload(ctx context.Context, fileUrl string, parentDir model.Obj, fileName string) (*OfflineTask, error) {
|
||||
requestBody := base.Json{
|
||||
@@ -278,7 +298,6 @@ func (d *PikPak) OfflineDownload(ctx context.Context, fileUrl string, parentDir
|
||||
req.SetContext(ctx).
|
||||
SetBody(requestBody)
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -325,7 +344,6 @@ func (d *PikPak) OfflineList(ctx context.Context, nextPageToken string, phase []
|
||||
req.SetContext(ctx).
|
||||
SetQueryParams(params)
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get offline list: %w", err)
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ type Media struct {
|
||||
|
||||
type UploadTaskData struct {
|
||||
UploadType string `json:"upload_type"`
|
||||
//UPLOAD_TYPE_RESUMABLE
|
||||
// UPLOAD_TYPE_RESUMABLE
|
||||
Resumable *struct {
|
||||
Kind string `json:"kind"`
|
||||
Params S3Params `json:"params"`
|
||||
@@ -195,3 +195,15 @@ type CaptchaTokenResponse struct {
|
||||
ExpiresIn int64 `json:"expires_in"`
|
||||
Url string `json:"url"`
|
||||
}
|
||||
|
||||
type AboutResponse struct {
|
||||
Quota struct {
|
||||
Limit string `json:"limit"`
|
||||
Usage string `json:"usage"`
|
||||
UsageInTrash string `json:"usage_in_trash"`
|
||||
IsUnlimited bool `json:"is_unlimited"`
|
||||
Complimentary string `json:"complimentary"`
|
||||
} `json:"quota"`
|
||||
ExpiresAt string `json:"expires_at"`
|
||||
UserType int `json:"user_type"`
|
||||
}
|
||||
|
||||
290
drivers/proton_drive/driver.go
Normal file
290
drivers/proton_drive/driver.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
proton_api_bridge "github.com/henrybear327/Proton-API-Bridge"
|
||||
"github.com/henrybear327/Proton-API-Bridge/common"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
)
|
||||
|
||||
type ProtonDrive struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
protonDrive *proton_api_bridge.ProtonDrive
|
||||
|
||||
apiBase string
|
||||
appVersion string
|
||||
protonJson string
|
||||
userAgent string
|
||||
sdkVersion string
|
||||
webDriveAV string
|
||||
|
||||
c *proton.Client
|
||||
|
||||
// userKR *crypto.KeyRing
|
||||
addrKRs map[string]*crypto.KeyRing
|
||||
addrData map[string]proton.Address
|
||||
|
||||
MainShare *proton.Share
|
||||
|
||||
DefaultAddrKR *crypto.KeyRing
|
||||
MainShareKR *crypto.KeyRing
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Init(ctx context.Context) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); err == nil && r != nil {
|
||||
err = fmt.Errorf("ProtonDrive initialization panic: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
if d.Email == "" {
|
||||
return fmt.Errorf("email is required")
|
||||
}
|
||||
if d.Password == "" {
|
||||
return fmt.Errorf("password is required")
|
||||
}
|
||||
|
||||
config := &common.Config{
|
||||
AppVersion: d.appVersion,
|
||||
UserAgent: d.userAgent,
|
||||
FirstLoginCredential: &common.FirstLoginCredentialData{
|
||||
Username: d.Email,
|
||||
Password: d.Password,
|
||||
TwoFA: d.TwoFACode,
|
||||
},
|
||||
EnableCaching: true,
|
||||
ConcurrentBlockUploadCount: setting.GetInt(conf.TaskUploadThreadsNum, conf.Conf.Tasks.Upload.Workers),
|
||||
//ConcurrentFileCryptoCount: 2,
|
||||
UseReusableLogin: d.UseReusableLogin && d.ReusableCredential != (common.ReusableCredentialData{}),
|
||||
ReplaceExistingDraft: true,
|
||||
ReusableCredential: &d.ReusableCredential,
|
||||
}
|
||||
|
||||
protonDrive, _, err := proton_api_bridge.NewProtonDrive(
|
||||
ctx,
|
||||
config,
|
||||
d.authHandler,
|
||||
func() {},
|
||||
)
|
||||
|
||||
if err != nil && config.UseReusableLogin {
|
||||
config.UseReusableLogin = false
|
||||
protonDrive, _, err = proton_api_bridge.NewProtonDrive(ctx,
|
||||
config,
|
||||
d.authHandler,
|
||||
func() {},
|
||||
)
|
||||
if err == nil {
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize ProtonDrive: %w", err)
|
||||
}
|
||||
|
||||
if err := d.initClient(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.protonDrive = protonDrive
|
||||
d.MainShare = protonDrive.MainShare
|
||||
if d.RootFolderID == "root" || d.RootFolderID == "" {
|
||||
d.RootFolderID = protonDrive.RootLink.LinkID
|
||||
}
|
||||
d.MainShareKR = protonDrive.MainShareKR
|
||||
d.DefaultAddrKR = protonDrive.DefaultAddrKR
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
entries, err := d.protonDrive.ListDirectory(ctx, dir.GetID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list directory: %w", err)
|
||||
}
|
||||
|
||||
objects := make([]model.Obj, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
obj := &model.Object{
|
||||
ID: entry.Link.LinkID,
|
||||
Name: entry.Name,
|
||||
Size: entry.Link.Size,
|
||||
Modified: time.Unix(entry.Link.ModifyTime, 0),
|
||||
IsFolder: entry.IsFolder,
|
||||
}
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
link, err := d.getLink(ctx, file.GetID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed get file link: %+v", err)
|
||||
}
|
||||
fileSystemAttrs, err := d.protonDrive.GetActiveRevisionAttrs(ctx, link)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed get file revision: %+v", err)
|
||||
}
|
||||
// 解密后的文件大小
|
||||
size := fileSystemAttrs.Size
|
||||
|
||||
rangeReaderFunc := func(rangeCtx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
if length < 0 || httpRange.Start+length > size {
|
||||
length = size - httpRange.Start
|
||||
}
|
||||
reader, _, _, err := d.protonDrive.DownloadFile(rangeCtx, link, httpRange.Start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed start download: %+v", err)
|
||||
}
|
||||
return utils.ReadCloser{
|
||||
Reader: io.LimitReader(reader, length),
|
||||
Closer: reader,
|
||||
}, nil
|
||||
}
|
||||
|
||||
expiration := time.Minute
|
||||
return &model.Link{
|
||||
RangeReader: &model.FileRangeReader{
|
||||
RangeReaderIF: stream.RateLimitRangeReaderFunc(rangeReaderFunc),
|
||||
},
|
||||
ContentLength: size,
|
||||
Expiration: &expiration,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
id, err := d.protonDrive.CreateNewFolderByID(ctx, parentDir.GetID(), dirName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
|
||||
newDir := &model.Object{
|
||||
ID: id,
|
||||
Name: dirName,
|
||||
IsFolder: true,
|
||||
Modified: time.Now(),
|
||||
}
|
||||
return newDir, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return d.DirectMove(ctx, srcObj, dstDir)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
if d.protonDrive == nil {
|
||||
return nil, fmt.Errorf("protonDrive bridge is nil")
|
||||
}
|
||||
|
||||
return d.DirectRename(ctx, srcObj, newName)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if srcObj.IsDir() {
|
||||
return nil, fmt.Errorf("directory copy not supported")
|
||||
}
|
||||
|
||||
srcLink, err := d.getLink(ctx, srcObj.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, linkSize, fileSystemAttrs, err := d.protonDrive.DownloadFile(ctx, srcLink, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to download source file: %w", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
actualSize := linkSize
|
||||
if fileSystemAttrs != nil && fileSystemAttrs.Size > 0 {
|
||||
actualSize = fileSystemAttrs.Size
|
||||
}
|
||||
|
||||
file := &stream.FileStream{
|
||||
Ctx: ctx,
|
||||
Obj: &model.Object{
|
||||
Name: srcObj.GetName(),
|
||||
// Use the accurate and real size
|
||||
Size: actualSize,
|
||||
Modified: srcObj.ModTime(),
|
||||
},
|
||||
Reader: reader,
|
||||
}
|
||||
defer file.Close()
|
||||
return d.Put(ctx, dstDir, file, func(percentage float64) {})
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if obj.IsDir() {
|
||||
return d.protonDrive.MoveFolderToTrashByID(ctx, obj.GetID(), false)
|
||||
} else {
|
||||
return d.protonDrive.MoveFileToTrashByID(ctx, obj.GetID())
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
return d.uploadFile(ctx, dstDir.GetID(), file, up)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
about, err := d.protonDrive.About(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := uint64(about.MaxSpace)
|
||||
free := total - uint64(about.UsedSpace)
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*ProtonDrive)(nil)
|
||||
56
drivers/proton_drive/meta.go
Normal file
56
drivers/proton_drive/meta.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/henrybear327/Proton-API-Bridge/common"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
Email string `json:"email" required:"true" type:"string"`
|
||||
Password string `json:"password" required:"true" type:"string"`
|
||||
TwoFACode string `json:"two_fa_code" type:"string"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"100"`
|
||||
UseReusableLogin bool `json:"use_reusable_login" type:"bool" default:"true" help:"Use reusable login credentials instead of username/password"`
|
||||
ReusableCredential common.ReusableCredentialData
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "ProtonDrive",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
DefaultRoot: "root",
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &ProtonDrive{
|
||||
Addition: Addition{
|
||||
UseReusableLogin: true,
|
||||
},
|
||||
apiBase: "https://drive.proton.me/api",
|
||||
appVersion: "windows-drive@1.11.3+rclone+proton",
|
||||
protonJson: "application/vnd.protonmail.v1+json",
|
||||
sdkVersion: "js@0.3.0",
|
||||
userAgent: "ProtonDrive/v1.70.0 (Windows NT 10.0.22000; Win64; x64)",
|
||||
webDriveAV: "web-drive@5.2.0+0f69f7a8",
|
||||
}
|
||||
})
|
||||
}
|
||||
38
drivers/proton_drive/types.go
Normal file
38
drivers/proton_drive/types.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
type MoveRequest struct {
|
||||
ParentLinkID string `json:"ParentLinkID"`
|
||||
NodePassphrase string `json:"NodePassphrase"`
|
||||
NodePassphraseSignature *string `json:"NodePassphraseSignature"`
|
||||
Name string `json:"Name"`
|
||||
NameSignatureEmail string `json:"NameSignatureEmail"`
|
||||
Hash string `json:"Hash"`
|
||||
OriginalHash string `json:"OriginalHash"`
|
||||
ContentHash *string `json:"ContentHash"` // Maybe null
|
||||
}
|
||||
|
||||
type RenameRequest struct {
|
||||
Name string `json:"Name"` // PGP encrypted name
|
||||
NameSignatureEmail string `json:"NameSignatureEmail"` // User's signature email
|
||||
Hash string `json:"Hash"` // New name hash
|
||||
OriginalHash string `json:"OriginalHash"` // Current name hash
|
||||
}
|
||||
|
||||
type RenameResponse struct {
|
||||
Code int `json:"Code"`
|
||||
}
|
||||
670
drivers/proton_drive/util.go
Normal file
670
drivers/proton_drive/util.go
Normal file
@@ -0,0 +1,670 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
)
|
||||
|
||||
func (d *ProtonDrive) uploadFile(ctx context.Context, parentLinkID string, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
_, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
var reader io.Reader
|
||||
// Use buffered reader with larger buffer for better performance
|
||||
var bufferSize int
|
||||
|
||||
// File > 100MB (default)
|
||||
if file.GetSize() > d.ChunkSize*1024*1024 {
|
||||
// 256KB for large files
|
||||
bufferSize = 256 * 1024
|
||||
// File > 10MB
|
||||
} else if file.GetSize() > 10*1024*1024 {
|
||||
// 128KB for medium files
|
||||
bufferSize = 128 * 1024
|
||||
} else {
|
||||
// 64KB for small files
|
||||
bufferSize = 64 * 1024
|
||||
}
|
||||
|
||||
// reader = bufio.NewReader(file)
|
||||
reader = bufio.NewReaderSize(file, bufferSize)
|
||||
reader = &driver.ReaderUpdatingProgress{
|
||||
Reader: &stream.SimpleReaderWithSize{
|
||||
Reader: reader,
|
||||
Size: file.GetSize(),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
}
|
||||
reader = driver.NewLimitedUploadStream(ctx, reader)
|
||||
|
||||
id, _, err := d.protonDrive.UploadFileByReader(ctx, parentLinkID, file.GetName(), file.ModTime(), reader, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: id,
|
||||
Name: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
Modified: file.ModTime(),
|
||||
IsFolder: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) encryptFileName(ctx context.Context, name string, parentLinkID string) (string, error) {
|
||||
parentLink, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get parent node keyring
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Temporary file (request)
|
||||
tempReq := proton.CreateFileReq{
|
||||
SignatureAddress: d.MainShare.Creator,
|
||||
}
|
||||
|
||||
// Encrypt the filename
|
||||
err = tempReq.SetName(name, d.DefaultAddrKR, parentNodeKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to encrypt filename: %w", err)
|
||||
}
|
||||
|
||||
return tempReq.Name, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) generateFileNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
|
||||
parentLink, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get parent node keyring
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent keyring: %w", err)
|
||||
}
|
||||
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
|
||||
}
|
||||
|
||||
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent hash key: %w", err)
|
||||
}
|
||||
|
||||
nameHash, err := proton.GetNameHash(name, parentHashKey)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate name hash: %w", err)
|
||||
}
|
||||
|
||||
return nameHash, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getOriginalNameHash(link *proton.Link) (string, error) {
|
||||
if link == nil {
|
||||
return "", fmt.Errorf("link cannot be nil")
|
||||
}
|
||||
|
||||
if link.Hash == "" {
|
||||
return "", fmt.Errorf("link hash is empty")
|
||||
}
|
||||
|
||||
return link.Hash, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getLink(ctx context.Context, linkID string) (*proton.Link, error) {
|
||||
if linkID == "" {
|
||||
return nil, fmt.Errorf("linkID cannot be empty")
|
||||
}
|
||||
|
||||
link, err := d.c.GetLink(ctx, d.MainShare.ShareID, linkID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &link, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getLinkKR(ctx context.Context, link *proton.Link) (*crypto.KeyRing, error) {
|
||||
if link == nil {
|
||||
return nil, fmt.Errorf("link cannot be nil")
|
||||
}
|
||||
|
||||
// Root Link or Root Dir
|
||||
if link.ParentLinkID == "" {
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return link.GetKeyRing(d.MainShareKR, signatureVerificationKR)
|
||||
}
|
||||
|
||||
// Get parent keyring recursively
|
||||
parentLink, err := d.getLink(ctx, link.ParentLinkID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return link.GetKeyRing(parentNodeKR, signatureVerificationKR)
|
||||
}
|
||||
|
||||
var (
|
||||
ErrKeyPassOrSaltedKeyPassMustBeNotNil = errors.New("either keyPass or saltedKeyPass must be not nil")
|
||||
ErrFailedToUnlockUserKeys = errors.New("failed to unlock user keys")
|
||||
)
|
||||
|
||||
func getAccountKRs(ctx context.Context, c *proton.Client, keyPass, saltedKeyPass []byte) (*crypto.KeyRing, map[string]*crypto.KeyRing, map[string]proton.Address, []byte, error) {
|
||||
user, err := c.GetUser(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("user %#v", user)
|
||||
|
||||
addrsArr, err := c.GetAddresses(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("addr %#v", addr)
|
||||
|
||||
if saltedKeyPass == nil {
|
||||
if keyPass == nil {
|
||||
return nil, nil, nil, nil, ErrKeyPassOrSaltedKeyPassMustBeNotNil
|
||||
}
|
||||
|
||||
// Due to limitations, salts are stored using cacheCredentialToFile
|
||||
salts, err := c.GetSalts(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("salts %#v", salts)
|
||||
|
||||
saltedKeyPass, err = salts.SaltForKey(keyPass, user.Keys.Primary().ID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("saltedKeyPass ok")
|
||||
}
|
||||
|
||||
userKR, addrKRs, err := proton.Unlock(user, addrsArr, saltedKeyPass, nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
} else if userKR.CountDecryptionEntities() == 0 {
|
||||
return nil, nil, nil, nil, ErrFailedToUnlockUserKeys
|
||||
}
|
||||
|
||||
addrs := make(map[string]proton.Address)
|
||||
for _, addr := range addrsArr {
|
||||
addrs[addr.Email] = addr
|
||||
}
|
||||
|
||||
return userKR, addrKRs, addrs, saltedKeyPass, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getSignatureVerificationKeyring(emailAddresses []string, verificationAddrKRs ...*crypto.KeyRing) (*crypto.KeyRing, error) {
|
||||
ret, err := crypto.NewKeyRing(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, emailAddress := range emailAddresses {
|
||||
if addr, ok := d.addrData[emailAddress]; ok {
|
||||
if addrKR, exists := d.addrKRs[addr.ID]; exists {
|
||||
err = d.addKeysFromKR(ret, addrKR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, kr := range verificationAddrKRs {
|
||||
err = d.addKeysFromKR(ret, kr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if ret.CountEntities() == 0 {
|
||||
return nil, fmt.Errorf("no keyring for signature verification")
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) addKeysFromKR(kr *crypto.KeyRing, newKRs ...*crypto.KeyRing) error {
|
||||
for i := range newKRs {
|
||||
for _, key := range newKRs[i].GetKeys() {
|
||||
err := kr.AddKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) DirectRename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
// fmt.Printf("DEBUG DirectRename: path=%s, newName=%s", srcObj.GetPath(), newName)
|
||||
|
||||
if d.MainShare == nil || d.DefaultAddrKR == nil {
|
||||
return nil, fmt.Errorf("missing required fields: MainShare=%v, DefaultAddrKR=%v",
|
||||
d.MainShare != nil, d.DefaultAddrKR != nil)
|
||||
}
|
||||
|
||||
if d.protonDrive == nil {
|
||||
return nil, fmt.Errorf("protonDrive bridge is nil")
|
||||
}
|
||||
|
||||
srcLink, err := d.getLink(ctx, srcObj.GetID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find source: %w", err)
|
||||
}
|
||||
|
||||
parentLinkID := srcLink.ParentLinkID
|
||||
if parentLinkID == "" {
|
||||
return nil, fmt.Errorf("cannot rename root folder")
|
||||
}
|
||||
|
||||
encryptedName, err := d.encryptFileName(ctx, newName, parentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
|
||||
}
|
||||
|
||||
newHash, err := d.generateFileNameHash(ctx, newName, parentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate new hash: %w", err)
|
||||
}
|
||||
|
||||
originalHash, err := d.getOriginalNameHash(srcLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get original hash: %w", err)
|
||||
}
|
||||
|
||||
renameReq := RenameRequest{
|
||||
Name: encryptedName,
|
||||
NameSignatureEmail: d.MainShare.Creator,
|
||||
Hash: newHash,
|
||||
OriginalHash: originalHash,
|
||||
}
|
||||
|
||||
err = d.executeRenameAPI(ctx, srcLink.LinkID, renameReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename API call failed: %w", err)
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: srcLink.LinkID,
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) executeRenameAPI(ctx context.Context, linkID string, req RenameRequest) error {
|
||||
renameURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/rename",
|
||||
d.MainShare.VolumeID, linkID)
|
||||
|
||||
reqBody, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal rename request: %w", err)
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "PUT", renameURL, bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
httpReq.Header.Set("Accept", d.protonJson)
|
||||
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
|
||||
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
|
||||
httpReq.Header.Set("X-Pm-Uid", d.ReusableCredential.UID)
|
||||
httpReq.Header.Set("Authorization", "Bearer "+d.ReusableCredential.AccessToken)
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(httpReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute rename request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("rename failed with status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var renameResp RenameResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&renameResp); err != nil {
|
||||
return fmt.Errorf("failed to decode rename response: %w", err)
|
||||
}
|
||||
|
||||
if renameResp.Code != 1000 {
|
||||
return fmt.Errorf("rename failed with code %d", renameResp.Code)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) executeMoveAPI(ctx context.Context, linkID string, req MoveRequest) error {
|
||||
// fmt.Printf("DEBUG Move Request - Name: %s\n", req.Name)
|
||||
// fmt.Printf("DEBUG Move Request - Hash: %s\n", req.Hash)
|
||||
// fmt.Printf("DEBUG Move Request - OriginalHash: %s\n", req.OriginalHash)
|
||||
// fmt.Printf("DEBUG Move Request - ParentLinkID: %s\n", req.ParentLinkID)
|
||||
|
||||
// fmt.Printf("DEBUG Move Request - Name length: %d\n", len(req.Name))
|
||||
// fmt.Printf("DEBUG Move Request - NameSignatureEmail: %s\n", req.NameSignatureEmail)
|
||||
// fmt.Printf("DEBUG Move Request - ContentHash: %v\n", req.ContentHash)
|
||||
// fmt.Printf("DEBUG Move Request - NodePassphrase length: %d\n", len(req.NodePassphrase))
|
||||
// fmt.Printf("DEBUG Move Request - NodePassphraseSignature length: %d\n", len(req.NodePassphraseSignature))
|
||||
|
||||
// fmt.Printf("DEBUG Move Request - SrcLinkID: %s\n", linkID)
|
||||
// fmt.Printf("DEBUG Move Request - DstParentLinkID: %s\n", req.ParentLinkID)
|
||||
// fmt.Printf("DEBUG Move Request - ShareID: %s\n", d.MainShare.ShareID)
|
||||
|
||||
srcLink, _ := d.getLink(ctx, linkID)
|
||||
if srcLink != nil && srcLink.ParentLinkID == req.ParentLinkID {
|
||||
return fmt.Errorf("cannot move to same parent directory")
|
||||
}
|
||||
|
||||
moveURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/move",
|
||||
d.MainShare.VolumeID, linkID)
|
||||
|
||||
reqBody, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal move request: %w", err)
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "PUT", moveURL, bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("Authorization", "Bearer "+d.ReusableCredential.AccessToken)
|
||||
httpReq.Header.Set("Accept", d.protonJson)
|
||||
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
|
||||
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
|
||||
httpReq.Header.Set("X-Pm-Uid", d.ReusableCredential.UID)
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(httpReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute move request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var moveResp RenameResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&moveResp); err != nil {
|
||||
return fmt.Errorf("failed to decode move response: %w", err)
|
||||
}
|
||||
|
||||
if moveResp.Code != 1000 {
|
||||
return fmt.Errorf("move operation failed with code: %d", moveResp.Code)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) DirectMove(ctx context.Context, srcObj model.Obj, dstDir model.Obj) (model.Obj, error) {
|
||||
// fmt.Printf("DEBUG DirectMove: srcPath=%s, dstPath=%s", srcObj.GetPath(), dstDir.GetPath())
|
||||
|
||||
srcLink, err := d.getLink(ctx, srcObj.GetID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find source: %w", err)
|
||||
}
|
||||
|
||||
dstParentLinkID := dstDir.GetID()
|
||||
|
||||
if srcObj.IsDir() {
|
||||
// Check if destination is a descendant of source
|
||||
if err := d.checkCircularMove(ctx, srcLink.LinkID, dstParentLinkID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Encrypt the filename for the new location
|
||||
encryptedName, err := d.encryptFileName(ctx, srcObj.GetName(), dstParentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
|
||||
}
|
||||
|
||||
newHash, err := d.generateNameHash(ctx, srcObj.GetName(), dstParentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate new hash: %w", err)
|
||||
}
|
||||
|
||||
originalHash, err := d.getOriginalNameHash(srcLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get original hash: %w", err)
|
||||
}
|
||||
|
||||
// Re-encrypt node passphrase for new parent context
|
||||
reencryptedPassphrase, err := d.reencryptNodePassphrase(ctx, srcLink, dstParentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to re-encrypt node passphrase: %w", err)
|
||||
}
|
||||
|
||||
moveReq := MoveRequest{
|
||||
ParentLinkID: dstParentLinkID,
|
||||
NodePassphrase: reencryptedPassphrase,
|
||||
Name: encryptedName,
|
||||
NameSignatureEmail: d.MainShare.Creator,
|
||||
Hash: newHash,
|
||||
OriginalHash: originalHash,
|
||||
ContentHash: nil,
|
||||
|
||||
// *** Causes rejection ***
|
||||
/* NodePassphraseSignature: srcLink.NodePassphraseSignature, */
|
||||
}
|
||||
|
||||
//fmt.Printf("DEBUG MoveRequest validation:\n")
|
||||
//fmt.Printf(" Name length: %d\n", len(moveReq.Name))
|
||||
//fmt.Printf(" Hash: %s\n", moveReq.Hash)
|
||||
//fmt.Printf(" OriginalHash: %s\n", moveReq.OriginalHash)
|
||||
//fmt.Printf(" NodePassphrase length: %d\n", len(moveReq.NodePassphrase))
|
||||
/* fmt.Printf(" NodePassphraseSignature length: %d\n", len(moveReq.NodePassphraseSignature)) */
|
||||
//fmt.Printf(" NameSignatureEmail: %s\n", moveReq.NameSignatureEmail)
|
||||
|
||||
err = d.executeMoveAPI(ctx, srcLink.LinkID, moveReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move API call failed: %w", err)
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: srcLink.LinkID,
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) reencryptNodePassphrase(ctx context.Context, srcLink *proton.Link, dstParentLinkID string) (string, error) {
|
||||
// Get source parent link with metadata
|
||||
srcParentLink, err := d.getLink(ctx, srcLink.ParentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get source parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get source parent keyring using link object
|
||||
srcParentKR, err := d.getLinkKR(ctx, srcParentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get source parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Get destination parent link with metadata
|
||||
dstParentLink, err := d.getLink(ctx, dstParentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get destination parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get destination parent keyring using link object
|
||||
dstParentKR, err := d.getLinkKR(ctx, dstParentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get destination parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Re-encrypt the node passphrase from source parent context to destination parent context
|
||||
reencryptedPassphrase, err := reencryptKeyPacket(srcParentKR, dstParentKR, d.DefaultAddrKR, srcLink.NodePassphrase)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to re-encrypt key packet: %w", err)
|
||||
}
|
||||
|
||||
return reencryptedPassphrase, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) generateNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
|
||||
parentLink, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get parent node keyring
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Get signature verification keyring
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
|
||||
}
|
||||
|
||||
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent hash key: %w", err)
|
||||
}
|
||||
|
||||
nameHash, err := proton.GetNameHash(name, parentHashKey)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate name hash: %w", err)
|
||||
}
|
||||
|
||||
return nameHash, nil
|
||||
}
|
||||
|
||||
func reencryptKeyPacket(srcKR, dstKR, _ *crypto.KeyRing, passphrase string) (string, error) { // addrKR (3)
|
||||
oldSplitMessage, err := crypto.NewPGPSplitMessageFromArmored(passphrase)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
sessionKey, err := srcKR.DecryptSessionKey(oldSplitMessage.KeyPacket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
newKeyPacket, err := dstKR.EncryptSessionKey(sessionKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
newSplitMessage := crypto.NewPGPSplitMessage(newKeyPacket, oldSplitMessage.DataPacket)
|
||||
|
||||
return newSplitMessage.GetArmored()
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) checkCircularMove(ctx context.Context, srcLinkID, dstParentLinkID string) error {
|
||||
currentLinkID := dstParentLinkID
|
||||
|
||||
for currentLinkID != "" && currentLinkID != d.RootFolderID {
|
||||
if currentLinkID == srcLinkID {
|
||||
return fmt.Errorf("cannot move folder into itself or its subfolder")
|
||||
}
|
||||
|
||||
currentLink, err := d.getLink(ctx, currentLinkID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentLinkID = currentLink.ParentLinkID
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) authHandler(auth proton.Auth) {
|
||||
if auth.AccessToken != d.ReusableCredential.AccessToken || auth.RefreshToken != d.ReusableCredential.RefreshToken {
|
||||
d.ReusableCredential.UID = auth.UID
|
||||
d.ReusableCredential.AccessToken = auth.AccessToken
|
||||
d.ReusableCredential.RefreshToken = auth.RefreshToken
|
||||
|
||||
if err := d.initClient(context.Background()); err != nil {
|
||||
fmt.Printf("ProtonDrive: failed to reinitialize client after auth refresh: %v\n", err)
|
||||
}
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) initClient(ctx context.Context) error {
|
||||
clientOptions := []proton.Option{
|
||||
proton.WithAppVersion(d.appVersion),
|
||||
proton.WithUserAgent(d.userAgent),
|
||||
}
|
||||
manager := proton.New(clientOptions...)
|
||||
d.c = manager.NewClient(d.ReusableCredential.UID, d.ReusableCredential.AccessToken, d.ReusableCredential.RefreshToken)
|
||||
|
||||
saltedKeyPassBytes, err := base64.StdEncoding.DecodeString(d.ReusableCredential.SaltedKeyPass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode salted key pass: %w", err)
|
||||
}
|
||||
|
||||
_, addrKRs, addrs, _, err := getAccountKRs(ctx, d.c, nil, saltedKeyPassBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get account keyrings: %w", err)
|
||||
}
|
||||
|
||||
d.addrKRs = addrKRs
|
||||
d.addrData = addrs
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -194,7 +194,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
log.Debugf("left: %d", left)
|
||||
reader := driver.NewLimitedUploadStream(ctx, bytes.NewReader(part))
|
||||
m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, reader)
|
||||
//m, err := driver.UpPart(pre, file.GetMIMEType(), partNumber, bytes, account, md5Str, sha1Str)
|
||||
// m, err := driver.UpPart(pre, file.GetMIMEType(), partNumber, bytes, account, md5Str, sha1Str)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -212,4 +212,17 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
return d.upFinish(pre)
|
||||
}
|
||||
|
||||
func (d *QuarkOrUC) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
memberInfo, err := d.memberInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: memberInfo.Data.TotalCapacity,
|
||||
FreeSpace: memberInfo.Data.TotalCapacity - memberInfo.Data.UseCapacity,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*QuarkOrUC)(nil)
|
||||
|
||||
@@ -12,8 +12,8 @@ type Resp struct {
|
||||
Status int `json:"status"`
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
//ReqId string `json:"req_id"`
|
||||
//Timestamp int `json:"timestamp"`
|
||||
// ReqId string `json:"req_id"`
|
||||
// Timestamp int `json:"timestamp"`
|
||||
}
|
||||
|
||||
var _ model.Obj = (*File)(nil)
|
||||
@@ -21,27 +21,27 @@ var _ model.Obj = (*File)(nil)
|
||||
type File struct {
|
||||
Fid string `json:"fid"`
|
||||
FileName string `json:"file_name"`
|
||||
//PdirFid string `json:"pdir_fid"`
|
||||
// PdirFid string `json:"pdir_fid"`
|
||||
Category int `json:"category"`
|
||||
//FileType int `json:"file_type"`
|
||||
// FileType int `json:"file_type"`
|
||||
Size int64 `json:"size"`
|
||||
//FormatType string `json:"format_type"`
|
||||
//Status int `json:"status"`
|
||||
//Tags string `json:"tags,omitempty"`
|
||||
// FormatType string `json:"format_type"`
|
||||
// Status int `json:"status"`
|
||||
// Tags string `json:"tags,omitempty"`
|
||||
LCreatedAt int64 `json:"l_created_at"`
|
||||
LUpdatedAt int64 `json:"l_updated_at"`
|
||||
//NameSpace int `json:"name_space"`
|
||||
//IncludeItems int `json:"include_items,omitempty"`
|
||||
//RiskType int `json:"risk_type"`
|
||||
//BackupSign int `json:"backup_sign"`
|
||||
//Duration int `json:"duration"`
|
||||
//FileSource string `json:"file_source"`
|
||||
// NameSpace int `json:"name_space"`
|
||||
// IncludeItems int `json:"include_items,omitempty"`
|
||||
// RiskType int `json:"risk_type"`
|
||||
// BackupSign int `json:"backup_sign"`
|
||||
// Duration int `json:"duration"`
|
||||
// FileSource string `json:"file_source"`
|
||||
File bool `json:"file"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
UpdatedAt int64 `json:"updated_at"`
|
||||
//PrivateExtra struct {} `json:"_private_extra"`
|
||||
//ObjCategory string `json:"obj_category,omitempty"`
|
||||
//Thumbnail string `json:"thumbnail,omitempty"`
|
||||
// PrivateExtra struct {} `json:"_private_extra"`
|
||||
// ObjCategory string `json:"obj_category,omitempty"`
|
||||
// Thumbnail string `json:"thumbnail,omitempty"`
|
||||
}
|
||||
|
||||
func fileToObj(f File) *model.Object {
|
||||
@@ -104,19 +104,19 @@ type SortResp struct {
|
||||
type DownResp struct {
|
||||
Resp
|
||||
Data []struct {
|
||||
//Fid string `json:"fid"`
|
||||
//FileName string `json:"file_name"`
|
||||
//PdirFid string `json:"pdir_fid"`
|
||||
//Category int `json:"category"`
|
||||
//FileType int `json:"file_type"`
|
||||
//Size int `json:"size"`
|
||||
//FormatType string `json:"format_type"`
|
||||
//Status int `json:"status"`
|
||||
//Tags string `json:"tags"`
|
||||
//LCreatedAt int64 `json:"l_created_at"`
|
||||
//LUpdatedAt int64 `json:"l_updated_at"`
|
||||
//NameSpace int `json:"name_space"`
|
||||
//Thumbnail string `json:"thumbnail"`
|
||||
// Fid string `json:"fid"`
|
||||
// FileName string `json:"file_name"`
|
||||
// PdirFid string `json:"pdir_fid"`
|
||||
// Category int `json:"category"`
|
||||
// FileType int `json:"file_type"`
|
||||
// Size int `json:"size"`
|
||||
// FormatType string `json:"format_type"`
|
||||
// Status int `json:"status"`
|
||||
// Tags string `json:"tags"`
|
||||
// LCreatedAt int64 `json:"l_created_at"`
|
||||
// LUpdatedAt int64 `json:"l_updated_at"`
|
||||
// NameSpace int `json:"name_space"`
|
||||
// Thumbnail string `json:"thumbnail"`
|
||||
DownloadUrl string `json:"download_url"`
|
||||
//Md5 string `json:"md5"`
|
||||
//RiskType int `json:"risk_type"`
|
||||
@@ -168,14 +168,14 @@ type TranscodingResp struct {
|
||||
Resoultion string `json:"resoultion"`
|
||||
Success bool `json:"success"`
|
||||
} `json:"video_info,omitempty"`
|
||||
//Right string `json:"right"`
|
||||
//MemberRight string `json:"member_right"`
|
||||
//TransStatus string `json:"trans_status"`
|
||||
//Accessable bool `json:"accessable"`
|
||||
//SupportsFormat string `json:"supports_format"`
|
||||
//VideoFuncType string `json:"video_func_type,omitempty"`
|
||||
// Right string `json:"right"`
|
||||
// MemberRight string `json:"member_right"`
|
||||
// TransStatus string `json:"trans_status"`
|
||||
// Accessable bool `json:"accessable"`
|
||||
// SupportsFormat string `json:"supports_format"`
|
||||
// VideoFuncType string `json:"video_func_type,omitempty"`
|
||||
} `json:"video_list"`
|
||||
//AudioList []interface{} `json:"audio_list"`
|
||||
// AudioList []interface{} `json:"audio_list"`
|
||||
FileName string `json:"file_name"`
|
||||
NameSpace int `json:"name_space"`
|
||||
Size int64 `json:"size"`
|
||||
@@ -247,8 +247,7 @@ type HashResp struct {
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
FormatType string `json:"format_type"`
|
||||
} `json:"data"`
|
||||
Metadata struct {
|
||||
} `json:"metadata"`
|
||||
Metadata struct{} `json:"metadata"`
|
||||
}
|
||||
|
||||
type UpAuthResp struct {
|
||||
@@ -258,6 +257,28 @@ type UpAuthResp struct {
|
||||
Speed int `json:"speed"`
|
||||
Headers []interface{} `json:"headers"`
|
||||
} `json:"data"`
|
||||
Metadata struct{} `json:"metadata"`
|
||||
}
|
||||
|
||||
type MemberResp struct {
|
||||
Resp
|
||||
Data struct {
|
||||
MemberType string `json:"member_type"`
|
||||
CreatedAt uint64 `json:"created_at"`
|
||||
SecretUseCapacity uint64 `json:"secret_use_capacity"`
|
||||
UseCapacity uint64 `json:"use_capacity"`
|
||||
IsNewUser bool `json:"is_new_user"`
|
||||
MemberStatus struct {
|
||||
Vip string `json:"VIP"`
|
||||
ZVip string `json:"Z_VIP"`
|
||||
MiniVip string `json:"MINI_VIP"`
|
||||
SuperVip string `json:"SUPER_VIP"`
|
||||
} `json:"member_status"`
|
||||
SecretTotalCapacity uint64 `json:"secret_total_capacity"`
|
||||
TotalCapacity uint64 `json:"total_capacity"`
|
||||
} `json:"data"`
|
||||
Metadata struct {
|
||||
RangeSize int `json:"range_size"`
|
||||
ServerCurTime uint64 `json:"server_cur_time"`
|
||||
} `json:"metadata"`
|
||||
}
|
||||
|
||||
@@ -198,7 +198,7 @@ func (d *QuarkOrUC) upHash(md5, sha1, taskId string) (bool, error) {
|
||||
}
|
||||
|
||||
func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes io.Reader) (string, error) {
|
||||
//func (driver QuarkOrUC) UpPart(pre UpPreResp, mineType string, partNumber int, bytes []byte, account *model.Account, md5Str, sha1Str string) (string, error) {
|
||||
// func (driver QuarkOrUC) UpPart(pre UpPreResp, mineType string, partNumber int, bytes []byte, account *model.Account, md5Str, sha1Str string) (string, error) {
|
||||
timeStr := time.Now().UTC().Format(http.TimeFormat)
|
||||
data := base.Json{
|
||||
"auth_info": pre.Data.AuthInfo,
|
||||
@@ -334,3 +334,20 @@ func (d *QuarkOrUC) upFinish(pre UpPreResp) error {
|
||||
time.Sleep(time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *QuarkOrUC) memberInfo(ctx context.Context) (*MemberResp, error) {
|
||||
var resp MemberResp
|
||||
query := map[string]string{
|
||||
"fetch_subscribe": "false",
|
||||
"_ch": "home",
|
||||
"fetch_identity": "false",
|
||||
}
|
||||
_, err := d.request("/member", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(query)
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -95,14 +95,22 @@ func (d *QuarkUCTV) List(ctx context.Context, dir model.Obj, args model.ListArgs
|
||||
files := make([]model.Obj, 0)
|
||||
pageIndex := int64(0)
|
||||
pageSize := int64(100)
|
||||
desc := "1"
|
||||
orderBy := "3"
|
||||
if d.OrderDirection == "asc" {
|
||||
desc = "0"
|
||||
}
|
||||
if d.OrderBy == "file_name" {
|
||||
orderBy = "1"
|
||||
}
|
||||
for {
|
||||
var filesData FilesData
|
||||
_, err := d.request(ctx, "/file", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"method": "list",
|
||||
"parent_fid": dir.GetID(),
|
||||
"order_by": "3",
|
||||
"desc": "1",
|
||||
"order_by": orderBy,
|
||||
"desc": desc,
|
||||
"category": "",
|
||||
"source": "",
|
||||
"ex_source": "",
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
driver.RootID
|
||||
OrderBy string `json:"order_by" type:"select" options:"file_name,updated_at" default:"updated_at"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"desc"`
|
||||
// define other
|
||||
RefreshToken string `json:"refresh_token" required:"false" default:""`
|
||||
// 必要且影响登录,由签名决定
|
||||
|
||||
@@ -69,15 +69,10 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
||||
Limiter: stream.ServerDownloadLimit,
|
||||
Ctx: ctx,
|
||||
}
|
||||
if !d.Config().OnlyLinkMFile {
|
||||
return &model.Link{
|
||||
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
}, nil
|
||||
}
|
||||
return &model.Link{
|
||||
MFile: mFile,
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
RequireReference: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,12 +16,12 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "SFTP",
|
||||
LocalSort: true,
|
||||
OnlyLinkMFile: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: true,
|
||||
NoLinkURL: true,
|
||||
Name: "SFTP",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: true,
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
|
||||
"github.com/hirochachacha/go-smb2"
|
||||
"github.com/cloudsoda/go-smb2"
|
||||
)
|
||||
|
||||
type SMB struct {
|
||||
@@ -33,7 +33,7 @@ func (d *SMB) Init(ctx context.Context) error {
|
||||
if !strings.Contains(d.Addition.Address, ":") {
|
||||
d.Addition.Address = d.Addition.Address + ":445"
|
||||
}
|
||||
return d._initFS()
|
||||
return d._initFS(ctx)
|
||||
}
|
||||
|
||||
func (d *SMB) Drop(ctx context.Context) error {
|
||||
@@ -44,7 +44,7 @@ func (d *SMB) Drop(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *SMB) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullPath := dir.GetPath()
|
||||
@@ -71,7 +71,7 @@ func (d *SMB) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
|
||||
}
|
||||
|
||||
func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullPath := file.GetPath()
|
||||
@@ -86,20 +86,15 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
||||
Limiter: stream.ServerDownloadLimit,
|
||||
Ctx: ctx,
|
||||
}
|
||||
if !d.Config().OnlyLinkMFile {
|
||||
return &model.Link{
|
||||
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
}, nil
|
||||
}
|
||||
return &model.Link{
|
||||
MFile: mFile,
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
RangeReader: stream.GetRangeReaderFromMFile(file.GetSize(), mFile),
|
||||
SyncClosers: utils.NewSyncClosers(remoteFile),
|
||||
RequireReference: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *SMB) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
fullPath := filepath.Join(parentDir.GetPath(), dirName)
|
||||
@@ -113,7 +108,7 @@ func (d *SMB) MakeDir(ctx context.Context, parentDir model.Obj, dirName string)
|
||||
}
|
||||
|
||||
func (d *SMB) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
srcPath := srcObj.GetPath()
|
||||
@@ -128,7 +123,7 @@ func (d *SMB) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *SMB) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
srcPath := srcObj.GetPath()
|
||||
@@ -143,7 +138,7 @@ func (d *SMB) Rename(ctx context.Context, srcObj model.Obj, newName string) erro
|
||||
}
|
||||
|
||||
func (d *SMB) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
srcPath := srcObj.GetPath()
|
||||
@@ -163,7 +158,7 @@ func (d *SMB) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *SMB) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
var err error
|
||||
@@ -182,7 +177,7 @@ func (d *SMB) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
fullPath := filepath.Join(dstDir.GetPath(), stream.GetName())
|
||||
@@ -206,7 +201,7 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream
|
||||
}
|
||||
|
||||
func (d *SMB) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := d.fs.Statfs(d.RootFolderPath)
|
||||
|
||||
@@ -14,12 +14,12 @@ type Addition struct {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "SMB",
|
||||
LocalSort: true,
|
||||
OnlyLinkMFile: false,
|
||||
DefaultRoot: ".",
|
||||
NoCache: true,
|
||||
NoLinkURL: true,
|
||||
Name: "SMB",
|
||||
LocalSort: true,
|
||||
OnlyProxy: true,
|
||||
DefaultRoot: ".",
|
||||
NoCache: true,
|
||||
NoLinkURL: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user