mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-11-25 03:15:19 +08:00
Merge branch 'main' into plugin
This commit is contained in:
@@ -65,6 +65,7 @@ Thank you for your support and understanding of the OpenList project.
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([China](https://www.teambition.com), [International](https://us.teambition.com))
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [139yun](https://yun.139.com) (Personal, Family, Group)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
||||
@@ -93,7 +94,6 @@ Thank you for your support and understanding of the OpenList project.
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [Weiyun](https://www.weiyun.com)
|
||||
|
||||
- [x] Easy to deploy and out-of-the-box
|
||||
- [x] File preview (PDF, markdown, code, plain text, ...)
|
||||
- [x] Image preview in gallery mode
|
||||
|
||||
@@ -65,6 +65,7 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([中国](https://www.teambition.com), [国际](https://us.teambition.com))
|
||||
- [x] [分秒帧](https://www.mediatrack.cn)
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [和彩云](https://yun.139.com)(个人、家庭、群组)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
- [x] [百度网盘](http://pan.baidu.com)
|
||||
|
||||
@@ -93,6 +93,7 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [Weiyun](https://www.weiyun.com)
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] 簡単にデプロイでき、すぐに使える
|
||||
- [x] ファイルプレビュー(PDF、markdown、コード、テキストなど)
|
||||
- [x] ギャラリーモードでの画像プレビュー
|
||||
|
||||
@@ -64,6 +64,7 @@ Dank u voor uw ondersteuning en begrip
|
||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([China](https://www.teambition.com), [Internationaal](https://us.teambition.com))
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||
- [x] [139yun](https://yun.139.com) (Persoonlijk, Familie, Groep)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
|
||||
@@ -27,6 +27,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
|
||||
"github.com/quic-go/quic-go/http3"
|
||||
)
|
||||
|
||||
// ServerCmd represents the server command
|
||||
@@ -63,6 +65,7 @@ the address is defined in config file`,
|
||||
httpHandler = h2c.NewHandler(r, &http2.Server{})
|
||||
}
|
||||
var httpSrv, httpsSrv, unixSrv *http.Server
|
||||
var quicSrv *http3.Server
|
||||
if conf.Conf.Scheme.HttpPort != -1 {
|
||||
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
||||
fmt.Printf("start HTTP server @ %s\n", httpBase)
|
||||
@@ -86,6 +89,24 @@ the address is defined in config file`,
|
||||
utils.Log.Fatalf("failed to start https: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
if conf.Conf.Scheme.EnableH3 {
|
||||
fmt.Printf("start HTTP3 (quic) server @ %s\n", httpsBase)
|
||||
utils.Log.Infof("start HTTP3 (quic) server @ %s", httpsBase)
|
||||
r.Use(func(c *gin.Context) {
|
||||
if c.Request.TLS != nil {
|
||||
port := conf.Conf.Scheme.HttpsPort
|
||||
c.Header("Alt-Svc", fmt.Sprintf("h3=\":%d\"; ma=86400", port))
|
||||
}
|
||||
c.Next()
|
||||
})
|
||||
quicSrv = &http3.Server{Addr: httpsBase, Handler: r}
|
||||
go func() {
|
||||
err := quicSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
utils.Log.Fatalf("failed to start http3 (quic): %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
|
||||
@@ -203,6 +224,15 @@ the address is defined in config file`,
|
||||
utils.Log.Fatal("HTTPS server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
if conf.Conf.Scheme.EnableH3 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := quicSrv.Shutdown(ctx); err != nil {
|
||||
utils.Log.Fatal("HTTP3 (quic) server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
if conf.Conf.Scheme.UnixFile != "" {
|
||||
wg.Add(1)
|
||||
|
||||
@@ -245,4 +245,17 @@ func (d *Pan115) DeleteOfflineTasks(ctx context.Context, hashes []string, delete
|
||||
return d.client.DeleteOfflineTasks(hashes, deleteFiles)
|
||||
}
|
||||
|
||||
func (d *Pan115) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
info, err := d.client.GetInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: uint64(info.SpaceInfo.AllTotal.Size),
|
||||
FreeSpace: uint64(info.SpaceInfo.AllRemain.Size),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Pan115)(nil)
|
||||
|
||||
@@ -74,7 +74,6 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
"type": f.Type,
|
||||
}
|
||||
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
|
||||
|
||||
req.SetBody(data)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
@@ -254,4 +253,15 @@ func (d *Pan123) APIRateLimit(ctx context.Context, api string) error {
|
||||
return limiter.Wait(ctx)
|
||||
}
|
||||
|
||||
func (d *Pan123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
userInfo, err := d.getUserInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(userInfo.Data.SpaceUsed, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Pan123)(nil)
|
||||
|
||||
@@ -122,3 +122,14 @@ type S3PreSignedURLs struct {
|
||||
PreSignedUrls map[string]string `json:"presignedUrls"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UserInfoResp struct {
|
||||
Data struct {
|
||||
Uid int64 `json:"UID"`
|
||||
Nickname string `json:"Nickname"`
|
||||
SpaceUsed uint64 `json:"SpaceUsed"`
|
||||
SpacePermanent uint64 `json:"SpacePermanent"`
|
||||
SpaceTemp uint64 `json:"SpaceTemp"`
|
||||
FileCount int `json:"FileCount"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
if cur == chunkCount {
|
||||
curSize = lastChunkSize
|
||||
}
|
||||
var reader *stream.SectionReader
|
||||
var reader io.ReadSeeker
|
||||
var rateLimitedRd io.Reader
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
|
||||
@@ -43,7 +43,7 @@ const (
|
||||
S3Auth = MainApi + "/file/s3_upload_object/auth"
|
||||
UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
|
||||
S3Complete = MainApi + "/file/s3_complete_multipart_upload"
|
||||
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||
// AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||
)
|
||||
|
||||
func signPath(path string, os string, version string) (k string, v string) {
|
||||
@@ -282,3 +282,14 @@ func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (d *Pan123) getUserInfo(ctx context.Context) (*UserInfoResp, error) {
|
||||
var resp UserInfoResp
|
||||
_, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}, nil
|
||||
}
|
||||
|
||||
uid, err := d.getUID()
|
||||
uid, err := d.getUID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -215,7 +215,7 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
||||
}
|
||||
|
||||
func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
userInfo, err := d.getUserInfo()
|
||||
userInfo, err := d.getUserInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -229,5 +229,15 @@ func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error)
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Open123)(nil)
|
||||
var _ driver.PutResult = (*Open123)(nil)
|
||||
func (d *Open123) OfflineDownload(ctx context.Context, url string, dir model.Obj, callback string) (int, error) {
|
||||
return d.createOfflineDownloadTask(ctx, url, dir.GetID(), callback)
|
||||
}
|
||||
|
||||
func (d *Open123) OfflineDownloadProcess(ctx context.Context, taskID int) (float64, int, error) {
|
||||
return d.queryOfflineDownloadStatus(ctx, taskID)
|
||||
}
|
||||
|
||||
var (
|
||||
_ driver.Driver = (*Open123)(nil)
|
||||
_ driver.PutResult = (*Open123)(nil)
|
||||
)
|
||||
|
||||
@@ -19,6 +19,7 @@ func (a *ApiInfo) Require() {
|
||||
a.token <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ApiInfo) Release() {
|
||||
if a.qps > 0 {
|
||||
time.AfterFunc(time.Second, func() {
|
||||
@@ -26,13 +27,16 @@ func (a *ApiInfo) Release() {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ApiInfo) SetQPS(qps int) {
|
||||
a.qps = qps
|
||||
a.token = make(chan struct{}, qps)
|
||||
}
|
||||
|
||||
func (a *ApiInfo) NowLen() int {
|
||||
return len(a.token)
|
||||
}
|
||||
|
||||
func InitApiInfo(url string, qps int) *ApiInfo {
|
||||
return &ApiInfo{
|
||||
url: url,
|
||||
@@ -185,3 +189,18 @@ type UploadCompleteResp struct {
|
||||
FileID int64 `json:"fileID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type OfflineDownloadResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
TaskID int `json:"taskID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type OfflineDownloadProcessResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
Process float64 `json:"process"`
|
||||
Status int `json:"status"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
||||
partNumber := partIndex + 1 // 分片号从1开始
|
||||
offset := partIndex * chunkSize
|
||||
size := min(chunkSize, size-offset)
|
||||
var reader *stream.SectionReader
|
||||
var reader io.ReadSeeker
|
||||
var rateLimitedRd io.Reader
|
||||
sliceMD5 := ""
|
||||
// 表单
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package _123_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -18,7 +19,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于拓展
|
||||
var ( // 不同情况下获取的AccessTokenQPS限制不同 如下模块化易于拓展
|
||||
Api = "https://open-api.123pan.com"
|
||||
|
||||
AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1)
|
||||
@@ -33,6 +34,9 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
|
||||
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
|
||||
UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
|
||||
UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
|
||||
|
||||
OfflineDownload = InitApiInfo(Api+"/api/v1/offline/download", 1)
|
||||
OfflineDownloadProcess = InitApiInfo(Api+"/api/v1/offline/download/process", 5)
|
||||
)
|
||||
|
||||
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
@@ -82,7 +86,6 @@ func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCall
|
||||
return nil, errors.New(baseResp.Message)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (d *Open123) flushAccessToken() error {
|
||||
@@ -148,21 +151,23 @@ func (d *Open123) SignURL(originURL, privateKey string, uid uint64, validDuratio
|
||||
return objURL.String(), nil
|
||||
}
|
||||
|
||||
func (d *Open123) getUserInfo() (*UserInfoResp, error) {
|
||||
func (d *Open123) getUserInfo(ctx context.Context) (*UserInfoResp, error) {
|
||||
var resp UserInfoResp
|
||||
|
||||
if _, err := d.Request(UserInfo, http.MethodGet, nil, &resp); err != nil {
|
||||
if _, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) getUID() (uint64, error) {
|
||||
func (d *Open123) getUID(ctx context.Context) (uint64, error) {
|
||||
if d.UID != 0 {
|
||||
return d.UID, nil
|
||||
}
|
||||
resp, err := d.getUserInfo()
|
||||
resp, err := d.getUserInfo(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -184,7 +189,6 @@ func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*Fi
|
||||
"searchData": "",
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -276,3 +280,34 @@ func (d *Open123) trash(fileId int64) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Open123) createOfflineDownloadTask(ctx context.Context, url string, dirID, callback string) (taskID int, err error) {
|
||||
body := base.Json{
|
||||
"url": url,
|
||||
"dirID": dirID,
|
||||
}
|
||||
if len(callback) > 0 {
|
||||
body["callBackUrl"] = callback
|
||||
}
|
||||
var resp OfflineDownloadResp
|
||||
_, err = d.Request(OfflineDownload, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(body)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return resp.Data.TaskID, nil
|
||||
}
|
||||
|
||||
func (d *Open123) queryOfflineDownloadStatus(ctx context.Context, taskID int) (process float64, status int, err error) {
|
||||
var resp OfflineDownloadProcessResp
|
||||
_, err = d.Request(OfflineDownloadProcess, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"taskID": strconv.Itoa(taskID),
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return .0, 0, err
|
||||
}
|
||||
return resp.Data.Process, resp.Data.Status, nil
|
||||
}
|
||||
|
||||
@@ -54,7 +54,8 @@ func (d *Yun139) Init(ctx context.Context) error {
|
||||
"userInfo": base.Json{
|
||||
"userType": 1,
|
||||
"accountType": 1,
|
||||
"accountName": d.Account},
|
||||
"accountName": d.Account,
|
||||
},
|
||||
"modAddrType": 1,
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
@@ -732,7 +733,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
"manualRename": 2,
|
||||
"operation": 0,
|
||||
"path": path.Join(dstDir.GetPath(), dstDir.GetID()),
|
||||
"seqNo": random.String(32), //序列号不能为空
|
||||
"seqNo": random.String(32), // 序列号不能为空
|
||||
"totalSize": reportSize,
|
||||
"uploadContentList": []base.Json{{
|
||||
"contentName": stream.GetName(),
|
||||
@@ -834,4 +835,48 @@ func (d *Yun139) Other(ctx context.Context, args model.OtherArgs) (interface{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Yun139) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if d.UserDomainID == "" {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
var total, free uint64
|
||||
if d.isFamily() {
|
||||
diskInfo, err := d.getFamilyDiskInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalMb, err := strconv.ParseUint(diskInfo.Data.DiskSize, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed convert disk size into integer: %+v", err)
|
||||
}
|
||||
usedMb, err := strconv.ParseUint(diskInfo.Data.UsedSize, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed convert used size into integer: %+v", err)
|
||||
}
|
||||
total = totalMb * 1024 * 1024
|
||||
free = total - (usedMb * 1024 * 1024)
|
||||
} else {
|
||||
diskInfo, err := d.getPersonalDiskInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalMb, err := strconv.ParseUint(diskInfo.Data.DiskSize, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed convert disk size into integer: %+v", err)
|
||||
}
|
||||
freeMb, err := strconv.ParseUint(diskInfo.Data.FreeDiskSize, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed convert free size into integer: %+v", err)
|
||||
}
|
||||
total = totalMb * 1024 * 1024
|
||||
free = freeMb * 1024 * 1024
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Yun139)(nil)
|
||||
|
||||
@@ -11,6 +11,7 @@ type Addition struct {
|
||||
driver.RootID
|
||||
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"`
|
||||
CloudID string `json:"cloud_id"`
|
||||
UserDomainID string `json:"user_domain_id" help:"ud_id in Cookie, fill in to show disk usage"`
|
||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||
ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"`
|
||||
UseLargeThumbnail bool `json:"use_large_thumbnail" type:"bool" default:"false" help:"Enable to use large thumbnail for images"`
|
||||
|
||||
@@ -312,3 +312,20 @@ type RefreshTokenResp struct {
|
||||
AccessToken string `xml:"accessToken"`
|
||||
Desc string `xml:"desc"`
|
||||
}
|
||||
|
||||
type PersonalDiskInfoResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
FreeDiskSize string `json:"freeDiskSize"`
|
||||
DiskSize string `json:"diskSize"`
|
||||
IsInfinitePicStorage *bool `json:"isInfinitePicStorage"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type FamilyDiskInfoResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
UsedSize string `json:"usedSize"`
|
||||
DiskSize string `json:"diskSize"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
@@ -107,8 +107,7 @@ func (d *Yun139) refreshToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Yun139) request(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
url := "https://yun.139.com" + pathname
|
||||
func (d *Yun139) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
randStr := random.String(16)
|
||||
ts := time.Now().Format("2006-01-02 15:04:05")
|
||||
@@ -219,7 +218,7 @@ func (d *Yun139) requestRoute(data interface{}, resp interface{}) ([]byte, error
|
||||
}
|
||||
|
||||
func (d *Yun139) post(pathname string, data interface{}, resp interface{}) ([]byte, error) {
|
||||
return d.request(pathname, http.MethodPost, func(req *resty.Request) {
|
||||
return d.request("https://yun.139.com"+pathname, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data)
|
||||
}, resp)
|
||||
}
|
||||
@@ -268,7 +267,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
|
||||
HashInfo: utils.NewHashInfo(utils.MD5, content.Digest),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||
//Thumbnail: content.BigthumbnailURL,
|
||||
// Thumbnail: content.BigthumbnailURL,
|
||||
}
|
||||
files = append(files, &f)
|
||||
}
|
||||
@@ -335,7 +334,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
Path: path, // 文件所在目录的Path
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||
//Thumbnail: content.BigthumbnailURL,
|
||||
// Thumbnail: content.BigthumbnailURL,
|
||||
}
|
||||
files = append(files, &f)
|
||||
}
|
||||
@@ -390,7 +389,7 @@ func (d *Yun139) groupGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
Path: path, // 文件所在目录的Path
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||
//Thumbnail: content.BigthumbnailURL,
|
||||
// Thumbnail: content.BigthumbnailURL,
|
||||
}
|
||||
files = append(files, &f)
|
||||
}
|
||||
@@ -418,6 +417,7 @@ func (d *Yun139) getLink(contentId string) (string, error) {
|
||||
}
|
||||
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
|
||||
}
|
||||
|
||||
func (d *Yun139) familyGetLink(contentId string, path string) (string, error) {
|
||||
data := d.newJson(base.Json{
|
||||
"contentID": contentId,
|
||||
@@ -510,6 +510,7 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R
|
||||
}
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
func (d *Yun139) personalPost(pathname string, data interface{}, resp interface{}) ([]byte, error) {
|
||||
return d.personalRequest(pathname, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data)
|
||||
@@ -545,7 +546,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
|
||||
}
|
||||
nextPageCursor = resp.Data.NextPageCursor
|
||||
for _, item := range resp.Data.Items {
|
||||
var isFolder = (item.Type == "folder")
|
||||
isFolder := (item.Type == "folder")
|
||||
var f model.Obj
|
||||
if isFolder {
|
||||
f = &model.Object{
|
||||
@@ -557,7 +558,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
|
||||
IsFolder: isFolder,
|
||||
}
|
||||
} else {
|
||||
var Thumbnails = item.Thumbnails
|
||||
Thumbnails := item.Thumbnails
|
||||
var ThumbnailUrl string
|
||||
if d.UseLargeThumbnail {
|
||||
for _, thumb := range Thumbnails {
|
||||
@@ -600,7 +601,7 @@ func (d *Yun139) personalGetLink(fileId string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var cdnUrl = jsoniter.Get(res, "data", "cdnUrl").ToString()
|
||||
cdnUrl := jsoniter.Get(res, "data", "cdnUrl").ToString()
|
||||
if cdnUrl != "" {
|
||||
return cdnUrl, nil
|
||||
} else {
|
||||
@@ -614,12 +615,14 @@ func (d *Yun139) getAuthorization() string {
|
||||
}
|
||||
return d.Authorization
|
||||
}
|
||||
|
||||
func (d *Yun139) getAccount() string {
|
||||
if d.ref != nil {
|
||||
return d.ref.getAccount()
|
||||
}
|
||||
return d.Account
|
||||
}
|
||||
|
||||
func (d *Yun139) getPersonalCloudHost() string {
|
||||
if d.ref != nil {
|
||||
return d.ref.getPersonalCloudHost()
|
||||
@@ -670,3 +673,33 @@ func (d *Yun139) uploadPersonalParts(ctx context.Context, partInfos []PartInfo,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Yun139) getPersonalDiskInfo(ctx context.Context) (*PersonalDiskInfoResp, error) {
|
||||
data := map[string]interface{}{
|
||||
"userDomainId": d.UserDomainID,
|
||||
}
|
||||
var resp PersonalDiskInfoResp
|
||||
_, err := d.request("https://user-njs.yun.139.com/user/disk/getPersonalDiskInfo", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data)
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Yun139) getFamilyDiskInfo(ctx context.Context) (*FamilyDiskInfoResp, error) {
|
||||
data := map[string]interface{}{
|
||||
"userDomainId": d.UserDomainID,
|
||||
}
|
||||
var resp FamilyDiskInfoResp
|
||||
_, err := d.request("https://user-njs.yun.139.com/user/disk/getFamilyDiskInfo", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data)
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -194,4 +194,17 @@ func (d *Cloud189) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
return d.newUpload(ctx, dstDir, stream, up)
|
||||
}
|
||||
|
||||
func (d *Cloud189) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
capacityInfo, err := d.getCapacityInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: capacityInfo.CloudCapacityInfo.TotalSize,
|
||||
FreeSpace: capacityInfo.CloudCapacityInfo.FreeSize,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Cloud189)(nil)
|
||||
|
||||
@@ -66,3 +66,21 @@ type DownResp struct {
|
||||
ResMessage string `json:"res_message"`
|
||||
FileDownloadUrl string `json:"downloadUrl"`
|
||||
}
|
||||
|
||||
type CapacityResp struct {
|
||||
ResCode int `json:"res_code"`
|
||||
ResMessage string `json:"res_message"`
|
||||
Account string `json:"account"`
|
||||
CloudCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"cloudCapacityInfo"`
|
||||
FamilyCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"familyCapacityInfo"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
}
|
||||
|
||||
@@ -157,7 +157,7 @@ func (d *Cloud189) request(url string, method string, callback base.ReqCallback,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//log.Debug(res.String())
|
||||
// log.Debug(res.String())
|
||||
if e.ErrorCode != "" {
|
||||
if e.ErrorCode == "InvalidSessionKey" {
|
||||
err = d.newLogin()
|
||||
@@ -186,8 +186,8 @@ func (d *Cloud189) getFiles(fileId string) ([]model.Obj, error) {
|
||||
"mediaType": "0",
|
||||
"folderId": fileId,
|
||||
"iconOption": "5",
|
||||
"orderBy": "lastOpTime", //account.OrderBy
|
||||
"descending": "true", //account.OrderDirection
|
||||
"orderBy": "lastOpTime", // account.OrderBy
|
||||
"descending": "true", // account.OrderDirection
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
@@ -311,7 +311,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
}
|
||||
d.sessionKey = sessionKey
|
||||
const DEFAULT int64 = 10485760
|
||||
var count = int64(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
||||
count := int64(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
||||
|
||||
res, err := d.uploadRequest("/person/initMultiUpload", map[string]string{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
@@ -340,10 +340,10 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
if DEFAULT < byteSize {
|
||||
byteSize = DEFAULT
|
||||
}
|
||||
//log.Debugf("%d,%d", byteSize, finish)
|
||||
// log.Debugf("%d,%d", byteSize, finish)
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
//log.Debug(err, n)
|
||||
// log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -395,3 +395,14 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Cloud189) getCapacityInfo(ctx context.Context) (*CapacityResp, error) {
|
||||
var resp CapacityResp
|
||||
_, err := d.request("https://cloud.189.cn/api/portal/getUserSizeInfo.action", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
|
||||
// 避免重复登陆
|
||||
if !y.isLogin() || y.Addition.AccessToken == "" {
|
||||
if err = y.login(); err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
|
||||
y.cron = cron.NewCron(time.Minute * 5)
|
||||
y.cron.Do(y.keepAlive)
|
||||
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) Drop(ctx context.Context) error {
|
||||
@@ -244,7 +244,6 @@ func (y *Cloud189TV) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
FileName: srcObj.GetName(),
|
||||
IsFolder: BoolToNumber(srcObj.IsDir()),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -278,5 +277,25 @@ func (y *Cloud189TV) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
}
|
||||
|
||||
return y.OldUpload(ctx, dstDir, stream, up, isFamily, overwrite)
|
||||
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
capacityInfo, err := y.getCapacityInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var total, free uint64
|
||||
if y.isFamily() {
|
||||
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
||||
free = capacityInfo.FamilyCapacityInfo.FreeSize
|
||||
} else {
|
||||
total = capacityInfo.CloudCapacityInfo.TotalSize
|
||||
free = capacityInfo.CloudCapacityInfo.FreeSize
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -316,3 +316,21 @@ type BatchTaskConflictTaskInfoResp struct {
|
||||
TaskInfos []BatchTaskInfo
|
||||
TaskType int `json:"taskType"`
|
||||
}
|
||||
|
||||
type CapacityResp struct {
|
||||
ResCode int `json:"res_code"`
|
||||
ResMessage string `json:"res_message"`
|
||||
Account string `json:"account"`
|
||||
CloudCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"cloudCapacityInfo"`
|
||||
FamilyCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"familyCapacityInfo"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
}
|
||||
|
||||
@@ -70,6 +70,9 @@ func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, para
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) requestWithRetry(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, retryCount int, isFamily ...bool) ([]byte, error) {
|
||||
if y.tokenInfo == nil {
|
||||
return nil, fmt.Errorf("login failed")
|
||||
}
|
||||
req := y.client.R().SetQueryParams(clientSuffix())
|
||||
|
||||
if params != nil {
|
||||
@@ -173,6 +176,7 @@ func (y *Cloud189TV) put(ctx context.Context, url string, headers map[string]str
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
|
||||
fullUrl := ApiUrl
|
||||
if isFamily {
|
||||
@@ -238,9 +242,8 @@ func (y *Cloud189TV) login() (err error) {
|
||||
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/getQrCodeUUID.action",
|
||||
http.MethodGet))
|
||||
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/getQrCodeUUID.action")
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
if erron.HasError() {
|
||||
return &erron
|
||||
@@ -280,7 +283,7 @@ func (y *Cloud189TV) login() (err error) {
|
||||
req.SetQueryParam("uuid", y.TempUuid)
|
||||
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action")
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
if erron.HasError() {
|
||||
return &erron
|
||||
@@ -300,7 +303,7 @@ func (y *Cloud189TV) login() (err error) {
|
||||
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
|
||||
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
if erron.HasError() {
|
||||
@@ -309,7 +312,7 @@ func (y *Cloud189TV) login() (err error) {
|
||||
|
||||
y.tokenInfo = &tokenInfo
|
||||
op.MustSaveDriverStorage(y)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// refreshSession 尝试使用现有的 AccessToken 刷新会话
|
||||
@@ -324,7 +327,7 @@ func (y *Cloud189TV) refreshSession() (err error) {
|
||||
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
|
||||
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
if erron.HasError() {
|
||||
@@ -371,7 +374,7 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
|
||||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
fileMd5 := file.GetHash().GetHash(utils.MD5)
|
||||
var tempFile = file.GetFile()
|
||||
tempFile := file.GetFile()
|
||||
var err error
|
||||
if len(fileMd5) != utils.MD5.Width {
|
||||
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||
@@ -474,7 +477,6 @@ func (y *Cloud189TV) OldUploadCreate(ctx context.Context, parentID string, fileM
|
||||
})
|
||||
}
|
||||
}, &uploadInfo, isFamily)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -628,3 +630,15 @@ func (y *Cloud189TV) WaitBatchTask(aType string, taskID string, t time.Duration)
|
||||
time.Sleep(t)
|
||||
}
|
||||
}
|
||||
|
||||
func (y *Cloud189TV) getCapacityInfo(ctx context.Context) (*CapacityResp, error) {
|
||||
fullUrl := ApiUrl + "/portal/getUserSizeInfo.action"
|
||||
var resp CapacityResp
|
||||
_, err := y.get(fullUrl, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -90,11 +90,11 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
if y.Addition.RefreshToken != "" {
|
||||
y.tokenInfo = &AppSessionResp{RefreshToken: y.Addition.RefreshToken}
|
||||
if err = y.refreshToken(); err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err = y.login(); err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err)
|
||||
}
|
||||
})
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Cloud189PC) InitReference(storage driver.Driver) error {
|
||||
@@ -305,7 +305,6 @@ func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
FileName: srcObj.GetName(),
|
||||
IsFolder: BoolToNumber(srcObj.IsDir()),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -411,3 +410,24 @@ func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
return y.StreamUpload(ctx, dstDir, stream, up, isFamily, overwrite)
|
||||
}
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
capacityInfo, err := y.getCapacityInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var total, free uint64
|
||||
if y.isFamily() {
|
||||
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
||||
free = capacityInfo.FamilyCapacityInfo.FreeSize
|
||||
} else {
|
||||
total = capacityInfo.CloudCapacityInfo.TotalSize
|
||||
free = capacityInfo.CloudCapacityInfo.FreeSize
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -409,3 +409,21 @@ func (p Params) Encode() string {
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type CapacityResp struct {
|
||||
ResCode int `json:"res_code"`
|
||||
ResMessage string `json:"res_message"`
|
||||
Account string `json:"account"`
|
||||
CloudCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"cloudCapacityInfo"`
|
||||
FamilyCapacityInfo struct {
|
||||
FreeSize uint64 `json:"freeSize"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
UsedSize uint64 `json:"usedSize"`
|
||||
} `json:"familyCapacityInfo"`
|
||||
TotalSize uint64 `json:"totalSize"`
|
||||
}
|
||||
|
||||
@@ -90,6 +90,9 @@ func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string {
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}, isFamily ...bool) ([]byte, error) {
|
||||
if y.getTokenInfo() == nil {
|
||||
return nil, fmt.Errorf("login failed")
|
||||
}
|
||||
req := y.getClient().R().SetQueryParams(clientSuffix())
|
||||
|
||||
// 设置params
|
||||
@@ -189,6 +192,7 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
|
||||
res := make([]model.Obj, 0, 100)
|
||||
for pageNum := 1; ; pageNum++ {
|
||||
@@ -342,7 +346,7 @@ func (y *Cloud189PC) loginByPassword() (err error) {
|
||||
SetQueryParam("redirectURL", loginresp.ToUrl).
|
||||
Post(API_URL + "/getSessionForPC.action")
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
if erron.HasError() {
|
||||
@@ -350,12 +354,12 @@ func (y *Cloud189PC) loginByPassword() (err error) {
|
||||
}
|
||||
if tokenInfo.ResCode != 0 {
|
||||
err = fmt.Errorf(tokenInfo.ResMessage)
|
||||
return
|
||||
return err
|
||||
}
|
||||
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||
y.tokenInfo = &tokenInfo
|
||||
op.MustSaveDriverStorage(y)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) loginByQRCode() error {
|
||||
@@ -447,7 +451,6 @@ func (y *Cloud189PC) genQRCode(text string) error {
|
||||
// Create the HTML page
|
||||
qrPage := fmt.Sprintf(qrTemplate, text, qrCodeBase64, y.qrcodeParam.UUID)
|
||||
return fmt.Errorf("need verify: \n%s", qrPage)
|
||||
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) initBaseParams() (*BaseLoginParam, error) {
|
||||
@@ -616,7 +619,7 @@ func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
|
||||
if y.ref != nil {
|
||||
return y.ref.refreshTokenWithRetry(retryCount)
|
||||
}
|
||||
|
||||
|
||||
// 限制重试次数,避免无限递归
|
||||
if retryCount >= 3 {
|
||||
if y.Addition.RefreshToken != "" {
|
||||
@@ -625,7 +628,7 @@ func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
|
||||
}
|
||||
return errors.New("refresh token failed after maximum retries")
|
||||
}
|
||||
|
||||
|
||||
var erron RespErr
|
||||
var tokenInfo AppSessionResp
|
||||
_, err = y.client.R().
|
||||
@@ -700,7 +703,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
params.Set("familyId", y.FamilyID)
|
||||
fullUrl += "/family"
|
||||
} else {
|
||||
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||
// params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||
fullUrl += "/person"
|
||||
}
|
||||
|
||||
@@ -752,7 +755,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
partSize = lastPartSize
|
||||
}
|
||||
partInfo := ""
|
||||
var reader *stream.SectionReader
|
||||
var reader io.ReadSeeker
|
||||
var rateLimitedRd io.Reader
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
@@ -876,7 +879,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
lastSliceSize = sliceSize
|
||||
}
|
||||
|
||||
//step.1 优先计算所需信息
|
||||
// step.1 优先计算所需信息
|
||||
byteSize := sliceSize
|
||||
fileMd5 := utils.MD5.NewFunc()
|
||||
sliceMd5 := utils.MD5.NewFunc()
|
||||
@@ -927,14 +930,14 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
if isFamily {
|
||||
fullUrl += "/family"
|
||||
} else {
|
||||
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||
// params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||
fullUrl += "/person"
|
||||
}
|
||||
|
||||
// 尝试恢复进度
|
||||
uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.getTokenInfo().SessionKey, fileMd5Hex)
|
||||
if !ok {
|
||||
//step.2 预上传
|
||||
// step.2 预上传
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
@@ -1163,7 +1166,6 @@ func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileM
|
||||
})
|
||||
}
|
||||
}, &uploadInfo, isFamily)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1473,3 +1475,15 @@ func (y *Cloud189PC) getClient() *resty.Client {
|
||||
}
|
||||
return y.client
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) getCapacityInfo(ctx context.Context) (*CapacityResp, error) {
|
||||
fullUrl := API_URL + "/portal/getUserSizeInfo.action"
|
||||
var resp CapacityResp
|
||||
_, err := y.get(fullUrl, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
type Alias struct {
|
||||
model.Storage
|
||||
Addition
|
||||
rootOrder []string
|
||||
pathMap map[string][]string
|
||||
autoFlatten bool
|
||||
oneKey string
|
||||
@@ -40,13 +41,18 @@ func (d *Alias) Init(ctx context.Context) error {
|
||||
if d.Paths == "" {
|
||||
return errors.New("paths is required")
|
||||
}
|
||||
paths := strings.Split(d.Paths, "\n")
|
||||
d.rootOrder = make([]string, 0, len(paths))
|
||||
d.pathMap = make(map[string][]string)
|
||||
for _, path := range strings.Split(d.Paths, "\n") {
|
||||
for _, path := range paths {
|
||||
path = strings.TrimSpace(path)
|
||||
if path == "" {
|
||||
continue
|
||||
}
|
||||
k, v := getPair(path)
|
||||
if _, ok := d.pathMap[k]; !ok {
|
||||
d.rootOrder = append(d.rootOrder, k)
|
||||
}
|
||||
d.pathMap[k] = append(d.pathMap[k], v)
|
||||
}
|
||||
if len(d.pathMap) == 1 {
|
||||
@@ -62,6 +68,7 @@ func (d *Alias) Init(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *Alias) Drop(ctx context.Context) error {
|
||||
d.rootOrder = nil
|
||||
d.pathMap = nil
|
||||
return nil
|
||||
}
|
||||
@@ -123,7 +130,7 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
path := dir.GetPath()
|
||||
if utils.PathEqual(path, "/") && !d.autoFlatten {
|
||||
return d.listRoot(), nil
|
||||
return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough), nil
|
||||
}
|
||||
root, sub := d.getRootAndPath(path)
|
||||
dsts, ok := d.pathMap[root]
|
||||
@@ -131,27 +138,35 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
var objs []model.Obj
|
||||
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
|
||||
for _, dst := range dsts {
|
||||
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), fsArgs)
|
||||
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), &fs.ListArgs{
|
||||
NoLog: true,
|
||||
Refresh: args.Refresh,
|
||||
WithStorageDetails: args.WithStorageDetails && d.DetailsPassThrough,
|
||||
})
|
||||
if err == nil {
|
||||
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
objRes := model.Object{
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
}
|
||||
if !ok {
|
||||
return &objRes, nil
|
||||
if thumb, ok := model.GetThumb(obj); ok {
|
||||
return &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}, nil
|
||||
if details, ok := model.GetStorageDetails(obj); ok {
|
||||
return &model.ObjStorageDetails{
|
||||
Obj: &objRes,
|
||||
StorageDetailsWithName: *details,
|
||||
}, nil
|
||||
}
|
||||
return &objRes, nil
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
@@ -250,7 +265,7 @@ func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string
|
||||
}
|
||||
return err
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name dirs cannot make sub-dir")
|
||||
}
|
||||
return err
|
||||
@@ -261,14 +276,14 @@ func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name files cannot be moved")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name dirs cannot be moved to")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -296,7 +311,7 @@ func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) er
|
||||
}
|
||||
return err
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name files cannot be Rename")
|
||||
}
|
||||
return err
|
||||
@@ -307,14 +322,14 @@ func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name files cannot be copied")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name dirs cannot be copied to")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -348,7 +363,7 @@ func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name files cannot be Delete")
|
||||
}
|
||||
return err
|
||||
@@ -392,7 +407,7 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
|
||||
return err
|
||||
}
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name dirs cannot be Put")
|
||||
}
|
||||
return err
|
||||
@@ -409,7 +424,7 @@ func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name files cannot offline download")
|
||||
}
|
||||
return err
|
||||
@@ -482,14 +497,14 @@ func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj,
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name files cannot be decompressed")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if errs.IsNotImplement(err) {
|
||||
if errs.IsNotImplementError(err) {
|
||||
return errors.New("same-name dirs cannot be decompressed to")
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -16,6 +16,7 @@ type Addition struct {
|
||||
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
||||
Writable bool `json:"writable" type:"bool" default:"false"`
|
||||
ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"`
|
||||
DetailsPassThrough bool `json:"details_pass_through" type:"bool" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -2,8 +2,11 @@ package alias
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@@ -11,24 +14,61 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (d *Alias) listRoot() []model.Obj {
|
||||
func (d *Alias) listRoot(ctx context.Context, withDetails bool) []model.Obj {
|
||||
var objs []model.Obj
|
||||
for k := range d.pathMap {
|
||||
var wg sync.WaitGroup
|
||||
for _, k := range d.rootOrder {
|
||||
obj := model.Object{
|
||||
Name: k,
|
||||
IsFolder: true,
|
||||
Modified: d.Modified,
|
||||
}
|
||||
idx := len(objs)
|
||||
objs = append(objs, &obj)
|
||||
v := d.pathMap[k]
|
||||
if !withDetails || len(v) != 1 {
|
||||
continue
|
||||
}
|
||||
remoteDriver, err := op.GetStorageByMountPath(v[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
_, ok := remoteDriver.(driver.WithDetails)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
objs[idx] = &model.ObjStorageDetails{
|
||||
Obj: objs[idx],
|
||||
StorageDetailsWithName: model.StorageDetailsWithName{
|
||||
StorageDetails: nil,
|
||||
DriverName: remoteDriver.Config().Name,
|
||||
},
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
c, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
details, e := op.GetStorageDetails(c, remoteDriver)
|
||||
if e != nil {
|
||||
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
|
||||
log.Errorf("failed get %s storage details: %+v", remoteDriver.GetStorage().MountPath, e)
|
||||
}
|
||||
return
|
||||
}
|
||||
objs[idx].(*model.ObjStorageDetails).StorageDetails = details
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
return objs
|
||||
}
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
func getPair(path string) (string, string) {
|
||||
//path = strings.TrimSpace(path)
|
||||
// path = strings.TrimSpace(path)
|
||||
if strings.Contains(path, ":") {
|
||||
pair := strings.SplitN(path, ":", 2)
|
||||
if !strings.Contains(pair[0], "/") {
|
||||
|
||||
@@ -45,7 +45,7 @@ func (d *AliDrive) GetAddition() driver.Additional {
|
||||
|
||||
func (d *AliDrive) Init(ctx context.Context) error {
|
||||
// TODO login / refresh token
|
||||
//op.MustSaveDriverStorage(d)
|
||||
// op.MustSaveDriverStorage(d)
|
||||
err := d.refreshToken()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -171,7 +171,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
Mimetype: streamer.GetMimetype(),
|
||||
}
|
||||
const DEFAULT int64 = 10485760
|
||||
var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
|
||||
count := int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
|
||||
|
||||
partInfoList := make([]base.Json, 0, count)
|
||||
for i := 1; i <= count; i++ {
|
||||
@@ -327,6 +327,20 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
return fmt.Errorf("%+v", resp2)
|
||||
}
|
||||
|
||||
func (d *AliDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
res, err, _ := d.request("https://api.aliyundrive.com/adrive/v1/user/driveCapacityDetails", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
used := utils.Json.Get(res, "drive_used_size").ToUint64()
|
||||
total := utils.Json.Get(res, "drive_total_size").ToUint64()
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *AliDrive) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
var resp base.Json
|
||||
var url string
|
||||
|
||||
@@ -36,12 +36,14 @@ import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_drive"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_photo"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud_open"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/ilanzou"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/ipfs_api"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/kodbox"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/lanzou"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/lenovonas_share"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/local"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediafire"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediatrack"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/mega"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/misskey"
|
||||
|
||||
@@ -212,7 +212,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
lastBlockSize = sliceSize
|
||||
}
|
||||
|
||||
//cal md5 for first 256k data
|
||||
// cal md5 for first 256k data
|
||||
const SliceSize int64 = 256 * utils.KB
|
||||
// cal md5
|
||||
blockList := make([]string, 0, count)
|
||||
@@ -284,7 +284,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
}
|
||||
log.Debugf("%+v", precreateResp)
|
||||
if precreateResp.ReturnType == 2 {
|
||||
//rapid upload, since got md5 match from baidu server
|
||||
// rapid upload, since got md5 match from baidu server
|
||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
||||
precreateResp.File.Ctime = ctime
|
||||
precreateResp.File.Mtime = mtime
|
||||
@@ -365,11 +365,11 @@ func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
du, err := d.quota()
|
||||
du, err := d.quota(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{DiskUsage: *du}, nil
|
||||
return &model.StorageDetails{DiskUsage: du}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package baidu_netdisk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"unicode"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
@@ -207,7 +209,7 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Li
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//if res.StatusCode() == 302 {
|
||||
// if res.StatusCode() == 302 {
|
||||
u = res.Header().Get("location")
|
||||
//}
|
||||
|
||||
@@ -381,16 +383,15 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||
return maxSliceSize
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) quota() (*model.DiskUsage, error) {
|
||||
func (d *BaiduNetdisk) quota(ctx context.Context) (model.DiskUsage, error) {
|
||||
var resp QuotaResp
|
||||
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, nil, &resp)
|
||||
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return model.DiskUsage{}, err
|
||||
}
|
||||
return &model.DiskUsage{
|
||||
TotalSpace: resp.Total,
|
||||
FreeSpace: resp.Total - resp.Used,
|
||||
}, nil
|
||||
return driver.DiskUsageFromUsedAndTotal(resp.Used, resp.Total), nil
|
||||
}
|
||||
|
||||
// func encodeURIComponent(str string) string {
|
||||
|
||||
@@ -17,9 +17,11 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
"github.com/avast/retry-go"
|
||||
)
|
||||
|
||||
type Chunk struct {
|
||||
@@ -39,6 +41,9 @@ func (d *Chunk) Init(ctx context.Context) error {
|
||||
if d.PartSize <= 0 {
|
||||
return errors.New("part size must be positive")
|
||||
}
|
||||
if len(d.ChunkPrefix) <= 0 {
|
||||
return errors.New("chunk folder prefix must not be empty")
|
||||
}
|
||||
d.RemotePath = utils.FixAndCleanPath(d.RemotePath)
|
||||
return nil
|
||||
}
|
||||
@@ -72,13 +77,13 @@ func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
}
|
||||
|
||||
remoteActualDir, name := stdpath.Split(remoteActualPath)
|
||||
chunkName := "[openlist_chunk]" + name
|
||||
chunkName := d.ChunkPrefix + name
|
||||
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, chunkName), model.ListArgs{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var totalSize int64 = 0
|
||||
// 0号块必须存在
|
||||
// 0号块默认为-1 以支持空文件
|
||||
chunkSizes := []int64{-1}
|
||||
h := make(map[*utils.HashType]string)
|
||||
var first model.Obj
|
||||
@@ -115,21 +120,6 @@ func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
chunkSizes[idx] = o.GetSize()
|
||||
}
|
||||
}
|
||||
// 检查0号块不等于-1 以支持空文件
|
||||
// 如果块数量大于1 最后一块不可能为0
|
||||
// 只检查中间块是否有0
|
||||
for i, l := 0, len(chunkSizes)-2; ; i++ {
|
||||
if i == 0 {
|
||||
if chunkSizes[i] == -1 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
} else if chunkSizes[i] == 0 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
if i >= l {
|
||||
break
|
||||
}
|
||||
}
|
||||
reqDir, _ := stdpath.Split(path)
|
||||
objRes := chunkObject{
|
||||
Object: model.Object{
|
||||
@@ -161,67 +151,76 @@ func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
return nil, err
|
||||
}
|
||||
result := make([]model.Obj, 0, len(remoteObjs))
|
||||
listG, listCtx := errgroup.NewGroupWithContext(ctx, d.NumListWorkers, retry.Attempts(3))
|
||||
for _, obj := range remoteObjs {
|
||||
if utils.IsCanceled(listCtx) {
|
||||
break
|
||||
}
|
||||
rawName := obj.GetName()
|
||||
if obj.IsDir() {
|
||||
if name, ok := strings.CutPrefix(rawName, "[openlist_chunk]"); ok {
|
||||
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
|
||||
ReqPath: stdpath.Join(args.ReqPath, rawName),
|
||||
Refresh: args.Refresh,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalSize := int64(0)
|
||||
h := make(map[*utils.HashType]string)
|
||||
first := obj
|
||||
for _, o := range chunkObjs {
|
||||
if o.IsDir() {
|
||||
continue
|
||||
if name, ok := strings.CutPrefix(rawName, d.ChunkPrefix); ok {
|
||||
resultIdx := len(result)
|
||||
result = append(result, nil)
|
||||
listG.Go(func(ctx context.Context) error {
|
||||
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
|
||||
ReqPath: stdpath.Join(args.ReqPath, rawName),
|
||||
Refresh: args.Refresh,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
|
||||
hn, value, ok := strings.Cut(after, "_")
|
||||
if ok {
|
||||
ht, ok := utils.GetHashByName(hn)
|
||||
if ok {
|
||||
h[ht] = value
|
||||
}
|
||||
totalSize := int64(0)
|
||||
h := make(map[*utils.HashType]string)
|
||||
first := obj
|
||||
for _, o := range chunkObjs {
|
||||
if o.IsDir() {
|
||||
continue
|
||||
}
|
||||
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
|
||||
hn, value, ok := strings.Cut(after, "_")
|
||||
if ok {
|
||||
ht, ok := utils.GetHashByName(hn)
|
||||
if ok {
|
||||
h[ht] = value
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if idx == 0 {
|
||||
first = o
|
||||
}
|
||||
totalSize += o.GetSize()
|
||||
}
|
||||
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||
if err != nil {
|
||||
continue
|
||||
objRes := model.Object{
|
||||
Name: name,
|
||||
Size: totalSize,
|
||||
Modified: first.ModTime(),
|
||||
Ctime: first.CreateTime(),
|
||||
}
|
||||
if idx == 0 {
|
||||
first = o
|
||||
if len(h) > 0 {
|
||||
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||
}
|
||||
totalSize += o.GetSize()
|
||||
}
|
||||
objRes := model.Object{
|
||||
Name: name,
|
||||
Size: totalSize,
|
||||
Modified: first.ModTime(),
|
||||
Ctime: first.CreateTime(),
|
||||
}
|
||||
if len(h) > 0 {
|
||||
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||
}
|
||||
if !d.Thumbnail {
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
|
||||
thumb := fmt.Sprintf("%s/d%s?sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(thumbPath, true),
|
||||
sign.Sign(thumbPath))
|
||||
result = append(result, &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
})
|
||||
}
|
||||
if !d.Thumbnail {
|
||||
result[resultIdx] = &objRes
|
||||
} else {
|
||||
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
|
||||
thumb := fmt.Sprintf("%s/d%s?sign=%s",
|
||||
common.GetApiUrl(ctx),
|
||||
utils.EncodePath(thumbPath, true),
|
||||
sign.Sign(thumbPath))
|
||||
result[resultIdx] = &model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -248,6 +247,9 @@ func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
})
|
||||
}
|
||||
}
|
||||
if err = listG.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -267,6 +269,21 @@ func (d *Chunk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
resultLink.SyncClosers = utils.NewSyncClosers(l)
|
||||
return &resultLink, nil
|
||||
}
|
||||
// 检查0号块不等于-1 以支持空文件
|
||||
// 如果块数量大于1 最后一块不可能为0
|
||||
// 只检查中间块是否有0
|
||||
for i, l := 0, len(chunkFile.chunkSizes)-2; ; i++ {
|
||||
if i == 0 {
|
||||
if chunkFile.chunkSizes[i] == -1 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
} else if chunkFile.chunkSizes[i] == 0 {
|
||||
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||
}
|
||||
if i >= l {
|
||||
break
|
||||
}
|
||||
}
|
||||
fileSize := chunkFile.GetSize()
|
||||
mergedRrf := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
start := httpRange.Start
|
||||
@@ -383,7 +400,7 @@ func (d *Chunk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
|
||||
func (d *Chunk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if _, ok := srcObj.(*chunkObject); ok {
|
||||
newName = "[openlist_chunk]" + newName
|
||||
newName = d.ChunkPrefix + newName
|
||||
}
|
||||
return fs.Rename(ctx, stdpath.Join(d.RemotePath, srcObj.GetPath()), newName)
|
||||
}
|
||||
@@ -404,14 +421,14 @@ func (d *Chunk) Put(ctx context.Context, dstDir model.Obj, file model.FileStream
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.Thumbnail && dstDir.GetName() == ".thumbnails" {
|
||||
if (d.Thumbnail && dstDir.GetName() == ".thumbnails") || (d.ChunkLargeFileOnly && file.GetSize() <= d.PartSize) {
|
||||
return op.Put(ctx, remoteStorage, stdpath.Join(remoteActualPath, dstDir.GetPath()), file, up)
|
||||
}
|
||||
upReader := &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
}
|
||||
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), "[openlist_chunk]"+file.GetName())
|
||||
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), d.ChunkPrefix+file.GetName())
|
||||
if d.StoreHash {
|
||||
for ht, value := range file.GetHash().All() {
|
||||
_ = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||
@@ -472,11 +489,7 @@ func (d *Chunk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if err != nil {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
wd, ok := remoteStorage.(driver.WithDetails)
|
||||
if !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
remoteDetails, err := wd.GetDetails(ctx)
|
||||
remoteDetails, err := op.GetStorageDetails(ctx, remoteStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -6,10 +6,13 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
RemotePath string `json:"remote_path" required:"true"`
|
||||
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
|
||||
CustomExt string `json:"custom_ext" type:"string"`
|
||||
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
|
||||
RemotePath string `json:"remote_path" required:"true"`
|
||||
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
|
||||
ChunkLargeFileOnly bool `json:"chunk_large_file_only" default:"false" help:"chunk only if file size > part_size"`
|
||||
ChunkPrefix string `json:"chunk_prefix" type:"string" default:"[openlist_chunk]" help:"the prefix of chunk folder"`
|
||||
CustomExt string `json:"custom_ext" type:"string"`
|
||||
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
|
||||
NumListWorkers int `json:"num_list_workers" required:"true" type:"number" default:"5"`
|
||||
|
||||
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
|
||||
ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"`
|
||||
@@ -26,6 +29,11 @@ var config = driver.Config{
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Chunk{}
|
||||
return &Chunk{
|
||||
Addition: Addition{
|
||||
ChunkPrefix: "[openlist_chunk]",
|
||||
NumListWorkers: 5,
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -342,15 +342,14 @@ func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir mode
|
||||
func (d *CloudreveV4) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
// TODO return storage details (total space, free space, etc.)
|
||||
var r CapacityResp
|
||||
err := d.request(http.MethodGet, "/user/capacity", nil, &r)
|
||||
err := d.request(http.MethodGet, "/user/capacity", func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: r.Total,
|
||||
FreeSpace: r.Total - r.Used,
|
||||
},
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(r.Used, r.Total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ func (d *Crypt) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *Crypt) Init(ctx context.Context) error {
|
||||
//obfuscate credentials if it's updated or just created
|
||||
// obfuscate credentials if it's updated or just created
|
||||
err := d.updateObfusParm(&d.Password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obfuscate password: %w", err)
|
||||
@@ -63,7 +63,7 @@ func (d *Crypt) Init(ctx context.Context) error {
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
//need remote storage exist
|
||||
// need remote storage exist
|
||||
storage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't find remote storage: %w", err)
|
||||
@@ -109,8 +109,8 @@ func (d *Crypt) Drop(ctx context.Context) error {
|
||||
|
||||
func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
path := dir.GetPath()
|
||||
//return d.list(ctx, d.RemotePath, path)
|
||||
//remoteFull
|
||||
// return d.list(ctx, d.RemotePath, path)
|
||||
// remoteFull
|
||||
|
||||
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true, Refresh: args.Refresh})
|
||||
// the obj must implement the model.SetPath interface
|
||||
@@ -124,7 +124,7 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
if obj.IsDir() {
|
||||
name, err := d.cipher.DecryptDirName(obj.GetName())
|
||||
if err != nil {
|
||||
//filter illegal files
|
||||
// filter illegal files
|
||||
continue
|
||||
}
|
||||
if !d.ShowHidden && strings.HasPrefix(name, ".") {
|
||||
@@ -143,12 +143,12 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
size, err := d.cipher.DecryptedSize(obj.GetSize())
|
||||
if err != nil {
|
||||
//filter illegal files
|
||||
// filter illegal files
|
||||
continue
|
||||
}
|
||||
name, err := d.cipher.DecryptFileName(obj.GetName())
|
||||
if err != nil {
|
||||
//filter illegal files
|
||||
// filter illegal files
|
||||
continue
|
||||
}
|
||||
if !d.ShowHidden && strings.HasPrefix(name, ".") {
|
||||
@@ -202,7 +202,7 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
remoteObj, err = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
if errs.IsObjectNotFound(err) && secondTry {
|
||||
//try the opposite
|
||||
// try the opposite
|
||||
remoteFullPath = d.getPathForRemote(path, !firstTryIsFolder)
|
||||
remoteObj, err2 = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
|
||||
if err2 != nil {
|
||||
@@ -240,7 +240,7 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
IsFolder: remoteObj.IsDir(),
|
||||
}
|
||||
return obj, nil
|
||||
//return nil, errs.ObjectNotFound
|
||||
// return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
// https://github.com/rclone/rclone/blob/v1.67.0/backend/crypt/cipher.go#L37
|
||||
@@ -366,7 +366,6 @@ func (d *Crypt) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
return op.Copy(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
|
||||
|
||||
}
|
||||
|
||||
func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
|
||||
@@ -412,11 +411,7 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
|
||||
}
|
||||
|
||||
func (d *Crypt) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
wd, ok := d.remoteStorage.(driver.WithDetails)
|
||||
if !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
remoteDetails, err := wd.GetDetails(ctx)
|
||||
remoteDetails, err := op.GetStorageDetails(ctx, d.remoteStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -577,7 +577,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
|
||||
if partIndex == totalParts-1 {
|
||||
size = fileSize - offset
|
||||
}
|
||||
var reader *stream.SectionReader
|
||||
var reader io.ReadSeeker
|
||||
var rateLimitedRd io.Reader
|
||||
crc32Value := ""
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
|
||||
@@ -167,4 +167,30 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if d.DisableDiskUsage {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
about, err := d.getAbout(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var total, used uint64
|
||||
if about.StorageQuota.Limit == nil {
|
||||
total = 0
|
||||
} else {
|
||||
total, err = strconv.ParseUint(*about.StorageQuota.Limit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
used, err = strconv.ParseUint(about.StorageQuota.Usage, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*GoogleDrive)(nil)
|
||||
|
||||
@@ -7,14 +7,15 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"string" help:"such as: folder,name,modifiedTime"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
|
||||
UseOnlineAPI bool `json:"use_online_api" default:"true"`
|
||||
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/googleui/renewapi"`
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5" help:"chunk size while uploading (unit: MB)"`
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"string" help:"such as: folder,name,modifiedTime"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
|
||||
UseOnlineAPI bool `json:"use_online_api" default:"true"`
|
||||
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/googleui/renewapi"`
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5" help:"chunk size while uploading (unit: MB)"`
|
||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -78,3 +78,12 @@ type Error struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
type AboutResp struct {
|
||||
StorageQuota struct {
|
||||
Limit *string `json:"limit"`
|
||||
Usage string `json:"usage"`
|
||||
UsageInDrive string `json:"usageInDrive"`
|
||||
UsageInDriveTrash string `json:"usageInDriveTrash"`
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,16 +28,16 @@ import (
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
type googleDriveServiceAccount struct {
|
||||
//Type string `json:"type"`
|
||||
//ProjectID string `json:"project_id"`
|
||||
//PrivateKeyID string `json:"private_key_id"`
|
||||
// Type string `json:"type"`
|
||||
// ProjectID string `json:"project_id"`
|
||||
// PrivateKeyID string `json:"private_key_id"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
ClientEMail string `json:"client_email"`
|
||||
//ClientID string `json:"client_id"`
|
||||
//AuthURI string `json:"auth_uri"`
|
||||
// ClientID string `json:"client_id"`
|
||||
// AuthURI string `json:"auth_uri"`
|
||||
TokenURI string `json:"token_uri"`
|
||||
//AuthProviderX509CertURL string `json:"auth_provider_x509_cert_url"`
|
||||
//ClientX509CertURL string `json:"client_x509_cert_url"`
|
||||
// AuthProviderX509CertURL string `json:"auth_provider_x509_cert_url"`
|
||||
// ClientX509CertURL string `json:"client_x509_cert_url"`
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) refreshToken() error {
|
||||
@@ -255,7 +255,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
|
||||
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
||||
defaultChunkSize := d.ChunkSize * 1024 * 1024
|
||||
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -315,3 +315,18 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) getAbout(ctx context.Context) (*AboutResp, error) {
|
||||
query := map[string]string{
|
||||
"fields": "storageQuota",
|
||||
}
|
||||
var resp AboutResp
|
||||
_, err := d.request("https://www.googleapis.com/drive/v3/about", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(query)
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
111
drivers/halalcloud_open/common.go
Normal file
111
drivers/halalcloud_open/common.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
|
||||
)
|
||||
|
||||
var (
|
||||
slicePostErrorRetryInterval = time.Second * 120
|
||||
retryTimes = 5
|
||||
)
|
||||
|
||||
type halalCommon struct {
|
||||
// *AuthService // 登录信息
|
||||
UserInfo *sdkUser.User // 用户信息
|
||||
refreshTokenFunc func(token string) error
|
||||
// serv *AuthService
|
||||
configs sync.Map
|
||||
}
|
||||
|
||||
func (m *halalCommon) GetAccessToken() (string, error) {
|
||||
value, exists := m.configs.Load("access_token")
|
||||
if !exists {
|
||||
return "", nil // 如果不存在,返回空字符串
|
||||
}
|
||||
return value.(string), nil // 返回配置项的值
|
||||
}
|
||||
|
||||
// GetRefreshToken implements ConfigStore.
|
||||
func (m *halalCommon) GetRefreshToken() (string, error) {
|
||||
value, exists := m.configs.Load("refresh_token")
|
||||
if !exists {
|
||||
return "", nil // 如果不存在,返回空字符串
|
||||
}
|
||||
return value.(string), nil // 返回配置项的值
|
||||
}
|
||||
|
||||
// SetAccessToken implements ConfigStore.
|
||||
func (m *halalCommon) SetAccessToken(token string) error {
|
||||
m.configs.Store("access_token", token)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRefreshToken implements ConfigStore.
|
||||
func (m *halalCommon) SetRefreshToken(token string) error {
|
||||
m.configs.Store("refresh_token", token)
|
||||
if m.refreshTokenFunc != nil {
|
||||
return m.refreshTokenFunc(token)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetToken implements ConfigStore.
|
||||
func (m *halalCommon) SetToken(accessToken string, refreshToken string, expiresIn int64) error {
|
||||
m.configs.Store("access_token", accessToken)
|
||||
m.configs.Store("refresh_token", refreshToken)
|
||||
m.configs.Store("expires_in", expiresIn)
|
||||
if m.refreshTokenFunc != nil {
|
||||
return m.refreshTokenFunc(refreshToken)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearConfigs implements ConfigStore.
|
||||
func (m *halalCommon) ClearConfigs() error {
|
||||
m.configs = sync.Map{} // 清空map
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteConfig implements ConfigStore.
|
||||
func (m *halalCommon) DeleteConfig(key string) error {
|
||||
_, exists := m.configs.Load(key)
|
||||
if !exists {
|
||||
return nil // 如果不存在,直接返回
|
||||
}
|
||||
m.configs.Delete(key) // 删除指定的配置项
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConfig implements ConfigStore.
|
||||
func (m *halalCommon) GetConfig(key string) (string, error) {
|
||||
value, exists := m.configs.Load(key)
|
||||
if !exists {
|
||||
return "", nil // 如果不存在,返回空字符串
|
||||
}
|
||||
return value.(string), nil // 返回配置项的值
|
||||
}
|
||||
|
||||
// ListConfigs implements ConfigStore.
|
||||
func (m *halalCommon) ListConfigs() (map[string]string, error) {
|
||||
configs := make(map[string]string)
|
||||
m.configs.Range(func(key, value interface{}) bool {
|
||||
configs[key.(string)] = value.(string) // 将每个配置项添加到map中
|
||||
return true // 继续遍历
|
||||
})
|
||||
return configs, nil // 返回所有配置项
|
||||
}
|
||||
|
||||
// SetConfig implements ConfigStore.
|
||||
func (m *halalCommon) SetConfig(key string, value string) error {
|
||||
m.configs.Store(key, value) // 使用Store方法设置或更新配置项
|
||||
return nil // 成功设置配置项后返回nil
|
||||
}
|
||||
|
||||
func NewHalalCommon() *halalCommon {
|
||||
return &halalCommon{
|
||||
configs: sync.Map{},
|
||||
}
|
||||
}
|
||||
29
drivers/halalcloud_open/driver.go
Normal file
29
drivers/halalcloud_open/driver.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
sdkClient "github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
|
||||
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
)
|
||||
|
||||
type HalalCloudOpen struct {
|
||||
*halalCommon
|
||||
model.Storage
|
||||
Addition
|
||||
sdkClient *sdkClient.Client
|
||||
sdkUserFileService *sdkUserFile.UserFileService
|
||||
sdkUserService *sdkUser.UserService
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*HalalCloudOpen)(nil)
|
||||
131
drivers/halalcloud_open/driver_curd_impl.go
Normal file
131
drivers/halalcloud_open/driver_curd_impl.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
sdkModel "github.com/halalcloud/golang-sdk-lite/halalcloud/model"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) getFiles(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
|
||||
|
||||
files := make([]model.Obj, 0)
|
||||
limit := int64(100)
|
||||
token := ""
|
||||
|
||||
for {
|
||||
result, err := d.sdkUserFileService.List(ctx, &sdkUserFile.FileListRequest{
|
||||
Parent: &sdkUserFile.File{Path: dir.GetPath()},
|
||||
ListInfo: &sdkModel.ScanListRequest{
|
||||
Limit: strconv.FormatInt(limit, 10),
|
||||
Token: token,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; len(result.Files) > i; i++ {
|
||||
files = append(files, NewObjFile(result.Files[i]))
|
||||
}
|
||||
|
||||
if result.ListInfo == nil || result.ListInfo.Token == "" {
|
||||
break
|
||||
}
|
||||
token = result.ListInfo.Token
|
||||
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) makeDir(ctx context.Context, dir model.Obj, name string) (model.Obj, error) {
|
||||
_, err := d.sdkUserFileService.Create(ctx, &sdkUserFile.File{
|
||||
Path: dir.GetPath(),
|
||||
Name: name,
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) move(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
|
||||
oldDir := obj.GetPath()
|
||||
newDir := dir.GetPath()
|
||||
_, err := d.sdkUserFileService.Move(ctx, &sdkUserFile.BatchOperationRequest{
|
||||
Source: []*sdkUserFile.File{
|
||||
{
|
||||
Path: oldDir,
|
||||
},
|
||||
},
|
||||
Dest: &sdkUserFile.File{
|
||||
Path: newDir,
|
||||
},
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) rename(ctx context.Context, obj model.Obj, name string) (model.Obj, error) {
|
||||
|
||||
_, err := d.sdkUserFileService.Rename(ctx, &sdkUserFile.File{
|
||||
Path: obj.GetPath(),
|
||||
Name: name,
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) copy(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
|
||||
id := obj.GetID()
|
||||
sourcePath := obj.GetPath()
|
||||
if len(id) > 0 {
|
||||
sourcePath = ""
|
||||
}
|
||||
|
||||
destID := dir.GetID()
|
||||
destPath := dir.GetPath()
|
||||
if len(destID) > 0 {
|
||||
destPath = ""
|
||||
}
|
||||
dest := &sdkUserFile.File{
|
||||
Path: destPath,
|
||||
Identity: destID,
|
||||
}
|
||||
_, err := d.sdkUserFileService.Copy(ctx, &sdkUserFile.BatchOperationRequest{
|
||||
Source: []*sdkUserFile.File{
|
||||
{
|
||||
Path: sourcePath,
|
||||
Identity: id,
|
||||
},
|
||||
},
|
||||
Dest: dest,
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) remove(ctx context.Context, obj model.Obj) error {
|
||||
id := obj.GetID()
|
||||
_, err := d.sdkUserFileService.Delete(ctx, &sdkUserFile.BatchOperationRequest{
|
||||
Source: []*sdkUserFile.File{
|
||||
{
|
||||
Identity: id,
|
||||
Path: obj.GetPath(),
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) details(ctx context.Context) (*model.StorageDetails, error) {
|
||||
ret, err := d.sdkUserService.GetStatisticsAndQuota(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total := uint64(ret.DiskStatisticsQuota.BytesQuota)
|
||||
|
||||
free := uint64(ret.DiskStatisticsQuota.BytesFree)
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: free,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
108
drivers/halalcloud_open/driver_get_link.go
Normal file
108
drivers/halalcloud_open/driver_get_link.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) getLink(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if args.Redirect {
|
||||
// return nil, model.ErrUnsupported
|
||||
fid := file.GetID()
|
||||
fpath := file.GetPath()
|
||||
if fid != "" {
|
||||
fpath = ""
|
||||
}
|
||||
fi, err := d.sdkUserFileService.GetDirectDownloadAddress(ctx, &sdkUserFile.DirectDownloadRequest{
|
||||
Identity: fid,
|
||||
Path: fpath,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expireAt := fi.ExpireAt
|
||||
duration := time.Until(time.UnixMilli(expireAt))
|
||||
return &model.Link{
|
||||
URL: fi.DownloadAddress,
|
||||
Expiration: &duration,
|
||||
}, nil
|
||||
}
|
||||
result, err := d.sdkUserFileService.ParseFileSlice(ctx, &sdkUserFile.File{
|
||||
Identity: file.GetID(),
|
||||
Path: file.GetPath(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileAddrs := []*sdkUserFile.SliceDownloadInfo{}
|
||||
var addressDuration int64
|
||||
|
||||
nodesNumber := len(result.RawNodes)
|
||||
nodesIndex := nodesNumber - 1
|
||||
startIndex, endIndex := 0, nodesIndex
|
||||
for nodesIndex >= 0 {
|
||||
if nodesIndex >= 200 {
|
||||
endIndex = 200
|
||||
} else {
|
||||
endIndex = nodesNumber
|
||||
}
|
||||
for ; endIndex <= nodesNumber; endIndex += 200 {
|
||||
if endIndex == 0 {
|
||||
endIndex = 1
|
||||
}
|
||||
sliceAddress, err := d.sdkUserFileService.GetSliceDownloadAddress(ctx, &sdkUserFile.SliceDownloadAddressRequest{
|
||||
Identity: result.RawNodes[startIndex:endIndex],
|
||||
Version: 1,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addressDuration, _ = strconv.ParseInt(sliceAddress.ExpireAt, 10, 64)
|
||||
fileAddrs = append(fileAddrs, sliceAddress.Addresses...)
|
||||
startIndex = endIndex
|
||||
nodesIndex -= 200
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
size, _ := strconv.ParseInt(result.FileSize, 10, 64)
|
||||
chunks := getChunkSizes(result.Sizes)
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
if httpRange.Length < 0 || httpRange.Start+httpRange.Length >= size {
|
||||
length = size - httpRange.Start
|
||||
}
|
||||
oo := &openObject{
|
||||
ctx: ctx,
|
||||
d: fileAddrs,
|
||||
chunk: []byte{},
|
||||
chunks: chunks,
|
||||
skip: httpRange.Start,
|
||||
sha: result.Sha1,
|
||||
shaTemp: sha1.New(),
|
||||
}
|
||||
|
||||
return readers.NewLimitedReadCloser(oo, length), nil
|
||||
}
|
||||
|
||||
var duration time.Duration
|
||||
if addressDuration != 0 {
|
||||
duration = time.Until(time.UnixMilli(addressDuration))
|
||||
} else {
|
||||
duration = time.Until(time.Now().Add(time.Hour))
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
|
||||
Expiration: &duration,
|
||||
}, nil
|
||||
}
|
||||
50
drivers/halalcloud_open/driver_init.go
Normal file
50
drivers/halalcloud_open/driver_init.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
|
||||
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) Init(ctx context.Context) error {
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, 3
|
||||
}
|
||||
if d.halalCommon == nil {
|
||||
d.halalCommon = &halalCommon{
|
||||
UserInfo: &sdkUser.User{},
|
||||
refreshTokenFunc: func(token string) error {
|
||||
d.Addition.RefreshToken = token
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
if d.Addition.RefreshToken != "" {
|
||||
d.halalCommon.SetRefreshToken(d.Addition.RefreshToken)
|
||||
}
|
||||
timeout := d.Addition.TimeOut
|
||||
if timeout <= 0 {
|
||||
timeout = 60
|
||||
}
|
||||
host := d.Addition.Host
|
||||
if host == "" {
|
||||
host = "openapi.2dland.cn"
|
||||
}
|
||||
|
||||
client := apiclient.NewClient(nil, host, d.Addition.ClientID, d.Addition.ClientSecret, d.halalCommon, apiclient.WithTimeout(time.Second*time.Duration(timeout)))
|
||||
d.sdkClient = client
|
||||
d.sdkUserFileService = sdkUserFile.NewUserFileService(client)
|
||||
d.sdkUserService = sdkUser.NewUserService(client)
|
||||
userInfo, err := d.sdkUserService.Get(ctx, &sdkUser.User{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.halalCommon.UserInfo = userInfo
|
||||
// 能够获取到用户信息,已经检查了 RefreshToken 的有效性,无需再次检查
|
||||
return nil
|
||||
}
|
||||
48
drivers/halalcloud_open/driver_interface.go
Normal file
48
drivers/halalcloud_open/driver_interface.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
return d.getFiles(ctx, dir)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
return d.getLink(ctx, file, args)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
return d.makeDir(ctx, parentDir, dirName)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return d.move(ctx, srcObj, dstDir)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
return d.rename(ctx, srcObj, newName)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return d.copy(ctx, srcObj, dstDir)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return d.remove(ctx, obj)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
return d.put(ctx, dstDir, stream, up)
|
||||
}
|
||||
|
||||
func (d *HalalCloudOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
return d.details(ctx)
|
||||
}
|
||||
258
drivers/halalcloud_open/halalcloud_upload.go
Normal file
258
drivers/halalcloud_open/halalcloud_upload.go
Normal file
@@ -0,0 +1,258 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
func (d *HalalCloudOpen) put(ctx context.Context, dstDir model.Obj, fileStream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
|
||||
newPath := path.Join(dstDir.GetPath(), fileStream.GetName())
|
||||
|
||||
uploadTask, err := d.sdkUserFileService.CreateUploadTask(ctx, &sdkUserFile.File{
|
||||
Path: newPath,
|
||||
Size: fileStream.GetSize(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uploadTask.Created {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
slicesList := make([]string, 0)
|
||||
codec := uint64(0x55)
|
||||
if uploadTask.BlockCodec > 0 {
|
||||
codec = uint64(uploadTask.BlockCodec)
|
||||
}
|
||||
blockHashType := uploadTask.BlockHashType
|
||||
mhType := uint64(0x12)
|
||||
if blockHashType > 0 {
|
||||
mhType = uint64(blockHashType)
|
||||
}
|
||||
prefix := cid.Prefix{
|
||||
Codec: codec,
|
||||
MhLength: -1,
|
||||
MhType: mhType,
|
||||
Version: 1,
|
||||
}
|
||||
blockSize := uploadTask.BlockSize
|
||||
useSingleUpload := true
|
||||
//
|
||||
if fileStream.GetSize() <= int64(blockSize) || d.uploadThread <= 1 {
|
||||
useSingleUpload = true
|
||||
}
|
||||
// Not sure whether FileStream supports concurrent read and write operations, so currently using single-threaded upload to ensure safety.
|
||||
// read file
|
||||
if useSingleUpload {
|
||||
bufferSize := int(blockSize)
|
||||
buffer := make([]byte, bufferSize)
|
||||
reader := driver.NewLimitedUploadStream(ctx, fileStream)
|
||||
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
|
||||
// fileStream.Seek(0, os.SEEK_SET)
|
||||
for {
|
||||
n, err := teeReader.Read(buffer)
|
||||
if n > 0 {
|
||||
data := buffer[:n]
|
||||
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
slicesList = append(slicesList, uploadCid.String())
|
||||
}
|
||||
if err == io.EOF || n == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// TODO: implement multipart upload, currently using single-threaded upload to ensure safety.
|
||||
bufferSize := int(blockSize)
|
||||
buffer := make([]byte, bufferSize)
|
||||
reader := driver.NewLimitedUploadStream(ctx, fileStream)
|
||||
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
|
||||
for {
|
||||
n, err := teeReader.Read(buffer)
|
||||
if n > 0 {
|
||||
data := buffer[:n]
|
||||
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
slicesList = append(slicesList, uploadCid.String())
|
||||
}
|
||||
if err == io.EOF || n == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
newFile, err := makeFile(ctx, slicesList, uploadTask.Task, uploadTask.UploadAddress, retryTimes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewObjFile(newFile), nil
|
||||
|
||||
}
|
||||
|
||||
func makeFile(ctx context.Context, fileSlice []string, taskID string, uploadAddress string, retry int) (*sdkUserFile.File, error) {
|
||||
var lastError error = nil
|
||||
for range retry {
|
||||
newFile, err := doMakeFile(fileSlice, taskID, uploadAddress)
|
||||
if err == nil {
|
||||
return newFile, nil
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return nil, err
|
||||
}
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return nil, err
|
||||
}
|
||||
lastError = err
|
||||
time.Sleep(slicePostErrorRetryInterval)
|
||||
}
|
||||
return nil, fmt.Errorf("mk file slice failed after %d times, error: %s", retry, lastError.Error())
|
||||
}
|
||||
|
||||
func doMakeFile(fileSlice []string, taskID string, uploadAddress string) (*sdkUserFile.File, error) {
|
||||
accessUrl := uploadAddress + "/" + taskID
|
||||
getTimeOut := time.Minute * 2
|
||||
u, err := url.Parse(accessUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n, _ := json.Marshal(fileSlice)
|
||||
httpRequest := http.Request{
|
||||
Method: http.MethodPost,
|
||||
URL: u,
|
||||
Header: map[string][]string{
|
||||
"Accept": {"application/json"},
|
||||
"Content-Type": {"application/json"},
|
||||
//"Content-Length": {fmt.Sprintf("%d", len(n))},
|
||||
},
|
||||
Body: io.NopCloser(bytes.NewReader(n)),
|
||||
}
|
||||
httpClient := http.Client{
|
||||
Timeout: getTimeOut,
|
||||
}
|
||||
httpResponse, err := httpClient.Do(&httpRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer httpResponse.Body.Close()
|
||||
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
|
||||
b, _ := io.ReadAll(httpResponse.Body)
|
||||
message := string(b)
|
||||
return nil, fmt.Errorf("mk file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
|
||||
}
|
||||
b, _ := io.ReadAll(httpResponse.Body)
|
||||
var result *sdkUserFile.File
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
func postFileSlice(ctx context.Context, fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix, retry int) (cid.Cid, error) {
|
||||
var lastError error = nil
|
||||
for range retry {
|
||||
newCid, err := doPostFileSlice(fileSlice, taskID, uploadAddress, preix)
|
||||
if err == nil {
|
||||
return newCid, nil
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
time.Sleep(slicePostErrorRetryInterval)
|
||||
lastError = err
|
||||
}
|
||||
return cid.Undef, fmt.Errorf("upload file slice failed after %d times, error: %s", retry, lastError.Error())
|
||||
}
|
||||
func doPostFileSlice(fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix) (cid.Cid, error) {
|
||||
// 1. sum file slice
|
||||
newCid, err := preix.Sum(fileSlice)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
// 2. post file slice
|
||||
sliceCidString := newCid.String()
|
||||
// /{taskID}/{sliceID}
|
||||
accessUrl := uploadAddress + "/" + taskID + "/" + sliceCidString
|
||||
getTimeOut := time.Second * 30
|
||||
// get {accessUrl} in {getTimeOut}
|
||||
u, err := url.Parse(accessUrl)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
// header: accept: application/json
|
||||
// header: content-type: application/octet-stream
|
||||
// header: content-length: {fileSlice.length}
|
||||
// header: x-content-cid: {sliceCidString}
|
||||
// header: x-task-id: {taskID}
|
||||
httpRequest := http.Request{
|
||||
Method: http.MethodGet,
|
||||
URL: u,
|
||||
Header: map[string][]string{
|
||||
"Accept": {"application/json"},
|
||||
},
|
||||
}
|
||||
httpClient := http.Client{
|
||||
Timeout: getTimeOut,
|
||||
}
|
||||
httpResponse, err := httpClient.Do(&httpRequest)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
if httpResponse.StatusCode != http.StatusOK {
|
||||
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d", httpResponse.StatusCode)
|
||||
}
|
||||
var result bool
|
||||
b, err := io.ReadAll(httpResponse.Body)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
if result {
|
||||
return newCid, nil
|
||||
}
|
||||
|
||||
httpRequest = http.Request{
|
||||
Method: http.MethodPost,
|
||||
URL: u,
|
||||
Header: map[string][]string{
|
||||
"Accept": {"application/json"},
|
||||
"Content-Type": {"application/octet-stream"},
|
||||
// "Content-Length": {fmt.Sprintf("%d", len(fileSlice))},
|
||||
},
|
||||
Body: io.NopCloser(bytes.NewReader(fileSlice)),
|
||||
}
|
||||
httpResponse, err = httpClient.Do(&httpRequest)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
defer httpResponse.Body.Close()
|
||||
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
|
||||
b, _ := io.ReadAll(httpResponse.Body)
|
||||
message := string(b)
|
||||
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
|
||||
}
|
||||
//
|
||||
|
||||
return newCid, nil
|
||||
}
|
||||
32
drivers/halalcloud_open/meta.go
Normal file
32
drivers/halalcloud_open/meta.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
driver.RootPath
|
||||
// define other
|
||||
RefreshToken string `json:"refresh_token" required:"false" help:"If using a personal API approach, the RefreshToken is not required."`
|
||||
UploadThread int `json:"upload_thread" type:"number" default:"3" help:"1 <= thread <= 32"`
|
||||
|
||||
ClientID string `json:"client_id" required:"true" default:""`
|
||||
ClientSecret string `json:"client_secret" required:"true" default:""`
|
||||
Host string `json:"host" required:"false" default:"openapi.2dland.cn"`
|
||||
TimeOut int `json:"timeout" type:"number" default:"60" help:"timeout in seconds"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "HalalCloudOpen",
|
||||
OnlyProxy: false,
|
||||
DefaultRoot: "/",
|
||||
NoLinkURL: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &HalalCloudOpen{}
|
||||
})
|
||||
}
|
||||
60
drivers/halalcloud_open/obj_file.go
Normal file
60
drivers/halalcloud_open/obj_file.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
)
|
||||
|
||||
type ObjFile struct {
|
||||
sdkFile *sdkUserFile.File
|
||||
fileSize int64
|
||||
modTime time.Time
|
||||
createTime time.Time
|
||||
}
|
||||
|
||||
func NewObjFile(f *sdkUserFile.File) model.Obj {
|
||||
ofile := &ObjFile{sdkFile: f}
|
||||
ofile.fileSize = f.Size
|
||||
modTimeTs := f.UpdateTs
|
||||
ofile.modTime = time.UnixMilli(modTimeTs)
|
||||
createTimeTs := f.CreateTs
|
||||
ofile.createTime = time.UnixMilli(createTimeTs)
|
||||
return ofile
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetSize() int64 {
|
||||
return f.fileSize
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetName() string {
|
||||
return f.sdkFile.Name
|
||||
}
|
||||
|
||||
func (f *ObjFile) ModTime() time.Time {
|
||||
return f.modTime
|
||||
}
|
||||
|
||||
func (f *ObjFile) IsDir() bool {
|
||||
return f.sdkFile.Dir
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{
|
||||
// TODO: support more hash types
|
||||
}
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetID() string {
|
||||
return f.sdkFile.Identity
|
||||
}
|
||||
|
||||
func (f *ObjFile) GetPath() string {
|
||||
return f.sdkFile.Path
|
||||
}
|
||||
|
||||
func (f *ObjFile) CreateTime() time.Time {
|
||||
return f.createTime
|
||||
}
|
||||
185
drivers/halalcloud_open/utils.go
Normal file
185
drivers/halalcloud_open/utils.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package halalcloudopen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
// get the next chunk
|
||||
func (oo *openObject) getChunk(_ context.Context) (err error) {
|
||||
if oo.id >= len(oo.chunks) {
|
||||
return io.EOF
|
||||
}
|
||||
var chunk []byte
|
||||
err = utils.Retry(3, time.Second, func() (err error) {
|
||||
chunk, err = getRawFiles(oo.d[oo.id])
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oo.id++
|
||||
oo.chunk = chunk
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p.
|
||||
func (oo *openObject) Read(p []byte) (n int, err error) {
|
||||
oo.mu.Lock()
|
||||
defer oo.mu.Unlock()
|
||||
if oo.closed {
|
||||
return 0, fmt.Errorf("read on closed file")
|
||||
}
|
||||
// Skip data at the start if requested
|
||||
for oo.skip > 0 {
|
||||
//size := 1024 * 1024
|
||||
_, size, err := oo.ChunkLocation(oo.id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if oo.skip < int64(size) {
|
||||
break
|
||||
}
|
||||
oo.id++
|
||||
oo.skip -= int64(size)
|
||||
}
|
||||
if len(oo.chunk) == 0 {
|
||||
err = oo.getChunk(oo.ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if oo.skip > 0 {
|
||||
oo.chunk = (oo.chunk)[oo.skip:]
|
||||
oo.skip = 0
|
||||
}
|
||||
}
|
||||
n = copy(p, oo.chunk)
|
||||
oo.shaTemp.Write(p[:n])
|
||||
oo.chunk = (oo.chunk)[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Close closed the file - MAC errors are reported here
|
||||
func (oo *openObject) Close() (err error) {
|
||||
oo.mu.Lock()
|
||||
defer oo.mu.Unlock()
|
||||
if oo.closed {
|
||||
return nil
|
||||
}
|
||||
// 校验Sha1
|
||||
if string(oo.shaTemp.Sum(nil)) != oo.sha {
|
||||
return fmt.Errorf("failed to finish download: SHA mismatch")
|
||||
}
|
||||
|
||||
oo.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetMD5Hash(text string) string {
|
||||
tHash := md5.Sum([]byte(text))
|
||||
return hex.EncodeToString(tHash[:])
|
||||
}
|
||||
|
||||
type chunkSize struct {
|
||||
position int64
|
||||
size int
|
||||
}
|
||||
|
||||
type openObject struct {
|
||||
ctx context.Context
|
||||
mu sync.Mutex
|
||||
d []*sdkUserFile.SliceDownloadInfo
|
||||
id int
|
||||
skip int64
|
||||
chunk []byte
|
||||
chunks []chunkSize
|
||||
closed bool
|
||||
sha string
|
||||
shaTemp hash.Hash
|
||||
}
|
||||
|
||||
func getChunkSizes(sliceSize []*sdkUserFile.SliceSize) (chunks []chunkSize) {
|
||||
chunks = make([]chunkSize, 0)
|
||||
for _, s := range sliceSize {
|
||||
// 对最后一个做特殊处理
|
||||
endIndex := s.EndIndex
|
||||
startIndex := s.StartIndex
|
||||
if endIndex == 0 {
|
||||
endIndex = startIndex
|
||||
}
|
||||
for j := startIndex; j <= endIndex; j++ {
|
||||
size := s.Size
|
||||
chunks = append(chunks, chunkSize{position: j, size: int(size)})
|
||||
}
|
||||
}
|
||||
return chunks
|
||||
}
|
||||
|
||||
func (oo *openObject) ChunkLocation(id int) (position int64, size int, err error) {
|
||||
if id < 0 || id >= len(oo.chunks) {
|
||||
return 0, 0, errors.New("invalid arguments")
|
||||
}
|
||||
|
||||
return (oo.chunks)[id].position, (oo.chunks)[id].size, nil
|
||||
}
|
||||
|
||||
func getRawFiles(addr *sdkUserFile.SliceDownloadInfo) ([]byte, error) {
|
||||
|
||||
if addr == nil {
|
||||
return nil, errors.New("addr is nil")
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: time.Duration(60 * time.Second), // Set timeout to 60 seconds
|
||||
}
|
||||
resp, err := client.Get(addr.DownloadAddress)
|
||||
if err != nil {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("bad status: %s, body: %s", resp.Status, body)
|
||||
}
|
||||
|
||||
if addr.Encrypt > 0 {
|
||||
cd := uint8(addr.Encrypt)
|
||||
for idx := 0; idx < len(body); idx++ {
|
||||
body[idx] = body[idx] ^ cd
|
||||
}
|
||||
}
|
||||
storeType := addr.StoreType
|
||||
if storeType != 10 {
|
||||
|
||||
sourceCid, err := cid.Decode(addr.Identity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
checkCid, err := sourceCid.Prefix().Sum(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !checkCid.Equals(sourceCid) {
|
||||
return nil, fmt.Errorf("bad cid: %s, body: %s", checkCid.String(), body)
|
||||
}
|
||||
}
|
||||
|
||||
return body, nil
|
||||
|
||||
}
|
||||
@@ -97,13 +97,13 @@ func (d *ILanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
}
|
||||
obj := model.Object{
|
||||
ID: strconv.FormatInt(f.FileId, 10),
|
||||
//Path: "",
|
||||
// Path: "",
|
||||
Name: f.FileName,
|
||||
Size: f.FileSize * 1024,
|
||||
Modified: updTime,
|
||||
Ctime: updTime,
|
||||
IsFolder: false,
|
||||
//HashInfo: utils.HashInfo{},
|
||||
// HashInfo: utils.HashInfo{},
|
||||
}
|
||||
if f.FileType == 2 {
|
||||
obj.IsFolder = true
|
||||
@@ -185,13 +185,13 @@ func (d *ILanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri
|
||||
}
|
||||
return &model.Object{
|
||||
ID: utils.Json.Get(res, "list", 0, "id").ToString(),
|
||||
//Path: "",
|
||||
// Path: "",
|
||||
Name: dirName,
|
||||
Size: 0,
|
||||
Modified: time.Now(),
|
||||
Ctime: time.Now(),
|
||||
IsFolder: true,
|
||||
//HashInfo: utils.HashInfo{},
|
||||
// HashInfo: utils.HashInfo{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -239,7 +239,7 @@ func (d *ILanZou) Rename(ctx context.Context, srcObj model.Obj, newName string)
|
||||
}
|
||||
return &model.Object{
|
||||
ID: srcObj.GetID(),
|
||||
//Path: "",
|
||||
// Path: "",
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: time.Now(),
|
||||
@@ -392,7 +392,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
|
||||
}
|
||||
return &model.Object{
|
||||
ID: strconv.FormatInt(file.FileId, 10),
|
||||
//Path: ,
|
||||
// Path: ,
|
||||
Name: file.FileName,
|
||||
Size: s.GetSize(),
|
||||
Modified: s.ModTime(),
|
||||
@@ -402,6 +402,22 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ILanZou) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
res, err := d.proved("/user/account/map", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalSize := utils.Json.Get(res, "map", "totalSize").ToUint64() * 1024
|
||||
rewardSize := utils.Json.Get(res, "map", "rewardSize").ToUint64() * 1024
|
||||
total := totalSize + rewardSize
|
||||
used := utils.Json.Get(res, "map", "usedSize").ToUint64() * 1024
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
//func (d *ILanZou) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package lanzou
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
@@ -9,8 +10,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const DAY time.Duration = 84600000000000
|
||||
@@ -122,20 +121,26 @@ var findAcwScV2Reg = regexp.MustCompile(`arg1='([0-9A-Z]+)'`)
|
||||
|
||||
// 在页面被过多访问或其他情况下,有时候会先返回一个加密的页面,其执行计算出一个acw_sc__v2后放入页面后再重新访问页面才能获得正常页面
|
||||
// 若该页面进行了js加密,则进行解密,计算acw_sc__v2,并加入cookie
|
||||
func CalcAcwScV2(html string) (string, error) {
|
||||
log.Debugln("acw_sc__v2", html)
|
||||
acwScV2s := findAcwScV2Reg.FindStringSubmatch(html)
|
||||
if len(acwScV2s) != 2 {
|
||||
return "", fmt.Errorf("无法匹配acw_sc__v2")
|
||||
func CalcAcwScV2(htmlContent string) (string, error) {
|
||||
matches := findAcwScV2Reg.FindStringSubmatch(htmlContent)
|
||||
if len(matches) != 2 {
|
||||
return "", errors.New("无法匹配到 arg1 参数")
|
||||
}
|
||||
return HexXor(Unbox(acwScV2s[1]), "3000176000856006061501533003690027800375"), nil
|
||||
arg1 := matches[1]
|
||||
|
||||
mask := "3000176000856006061501533003690027800375"
|
||||
result, err := hexXor(unbox(arg1), mask)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("hexXor 操作失败: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func Unbox(hex string) string {
|
||||
func unbox(hex string) string {
|
||||
var box = []int{6, 28, 34, 31, 33, 18, 30, 23, 9, 8, 19, 38, 17, 24, 0, 5, 32, 21, 10, 22, 25, 14, 15, 3, 16, 27, 13, 35, 2, 29, 11, 26, 4, 36, 1, 39, 37, 7, 20, 12}
|
||||
var newBox = make([]byte, len(hex))
|
||||
for i := 0; i < len(box); i++ {
|
||||
j := box[i]
|
||||
for i, j := range box {
|
||||
if len(newBox) > j {
|
||||
newBox[j] = hex[i]
|
||||
}
|
||||
@@ -143,14 +148,21 @@ func Unbox(hex string) string {
|
||||
return string(newBox)
|
||||
}
|
||||
|
||||
func HexXor(hex1, hex2 string) string {
|
||||
out := bytes.NewBuffer(make([]byte, len(hex1)))
|
||||
for i := 0; i < len(hex1) && i < len(hex2); i += 2 {
|
||||
v1, _ := strconv.ParseInt(hex1[i:i+2], 16, 64)
|
||||
v2, _ := strconv.ParseInt(hex2[i:i+2], 16, 64)
|
||||
out.WriteString(strconv.FormatInt(v1^v2, 16))
|
||||
func hexXor(hex1, hex2 string) (string, error) {
|
||||
bytes1, err := hex.DecodeString(hex1)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("解码 hex1 失败: %w", err)
|
||||
}
|
||||
return out.String()
|
||||
bytes2, err := hex.DecodeString(hex2)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("解码 hex2 失败: %w", err)
|
||||
}
|
||||
minLength := min(len(bytes2), len(bytes1))
|
||||
resultBytes := make([]byte, minLength)
|
||||
for i := range minLength {
|
||||
resultBytes[i] = bytes1[i] ^ bytes2[i]
|
||||
}
|
||||
return hex.EncodeToString(resultBytes), nil
|
||||
}
|
||||
|
||||
var findDataReg = regexp.MustCompile(`data[:\s]+({[^}]+})`) // 查找json
|
||||
|
||||
@@ -3,6 +3,7 @@ package lanzou
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"runtime"
|
||||
@@ -94,36 +95,66 @@ func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{},
|
||||
}
|
||||
}
|
||||
|
||||
// 修复点:所有请求都自动处理 acw_sc__v2 验证和 down_ip=1
|
||||
func (d *LanZou) request(url string, method string, callback base.ReqCallback, up bool) ([]byte, error) {
|
||||
var req *resty.Request
|
||||
if up {
|
||||
once.Do(func() {
|
||||
upClient = base.NewRestyClient().SetTimeout(120 * time.Second)
|
||||
var vs string
|
||||
for retry := 0; retry < 3; retry++ {
|
||||
if up {
|
||||
once.Do(func() {
|
||||
upClient = base.NewRestyClient().SetTimeout(120 * time.Second)
|
||||
})
|
||||
req = upClient.R()
|
||||
} else {
|
||||
req = base.RestyClient.R()
|
||||
}
|
||||
|
||||
req.SetHeaders(map[string]string{
|
||||
"Referer": "https://pc.woozooo.com",
|
||||
"User-Agent": d.UserAgent,
|
||||
})
|
||||
req = upClient.R()
|
||||
} else {
|
||||
req = base.RestyClient.R()
|
||||
}
|
||||
|
||||
req.SetHeaders(map[string]string{
|
||||
"Referer": "https://pc.woozooo.com",
|
||||
"User-Agent": d.UserAgent,
|
||||
})
|
||||
// 下载直链时需要加 down_ip=1
|
||||
if strings.Contains(url, "/file/") {
|
||||
cookie := d.Cookie
|
||||
if cookie != "" {
|
||||
cookie += "; "
|
||||
}
|
||||
cookie += "down_ip=1"
|
||||
if vs != "" {
|
||||
cookie += "; acw_sc__v2=" + vs
|
||||
}
|
||||
req.SetHeader("cookie", cookie)
|
||||
} else if d.Cookie != "" {
|
||||
cookie := d.Cookie
|
||||
if vs != "" {
|
||||
cookie += "; acw_sc__v2=" + vs
|
||||
}
|
||||
req.SetHeader("cookie", cookie)
|
||||
} else if vs != "" {
|
||||
req.SetHeader("cookie", "acw_sc__v2="+vs)
|
||||
}
|
||||
|
||||
if d.Cookie != "" {
|
||||
req.SetHeader("cookie", d.Cookie)
|
||||
}
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
res, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bodyStr := res.String()
|
||||
log.Debugf("lanzou request: url=>%s ,stats=>%d ,body => %s\n", res.Request.URL, res.StatusCode(), bodyStr)
|
||||
if strings.Contains(bodyStr, "acw_sc__v2") {
|
||||
vs, err = CalcAcwScV2(bodyStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return res.Body(), err
|
||||
}
|
||||
|
||||
res, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("lanzou request: url=>%s ,stats=>%d ,body => %s\n", res.Request.URL, res.StatusCode(), res.String())
|
||||
return res.Body(), err
|
||||
return nil, errors.New("acw_sc__v2 validation error")
|
||||
}
|
||||
|
||||
func (d *LanZou) Login() ([]*http.Cookie, error) {
|
||||
@@ -430,27 +461,91 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
||||
file.Time = timeFindReg.FindString(sharePageData)
|
||||
|
||||
// 重定向获取真实链接
|
||||
res, err := base.NoRedirectClient.R().SetHeaders(map[string]string{
|
||||
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
|
||||
}).Get(downloadUrl)
|
||||
var (
|
||||
res *resty.Response
|
||||
err error
|
||||
)
|
||||
var vs string
|
||||
var bodyStr string
|
||||
for i := 0; i < 3; i++ {
|
||||
res, err = base.NoRedirectClient.R().SetHeaders(map[string]string{
|
||||
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
|
||||
"Referer": baseUrl,
|
||||
}).SetDoNotParseResponse(true).
|
||||
SetCookie(&http.Cookie{
|
||||
Name: "acw_sc__v2",
|
||||
Value: vs,
|
||||
}).SetHeader("cookie", "down_ip=1").Get(downloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if res.StatusCode() == 302 {
|
||||
if res.RawBody() != nil {
|
||||
res.RawBody().Close()
|
||||
}
|
||||
break
|
||||
}
|
||||
bodyBytes, err := io.ReadAll(res.RawBody())
|
||||
if res.RawBody() != nil {
|
||||
res.RawBody().Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("读取响应体失败: %w", err)
|
||||
}
|
||||
bodyStr = string(bodyBytes)
|
||||
if strings.Contains(bodyStr, "acw_sc__v2") {
|
||||
if vs, err = CalcAcwScV2(bodyStr); err != nil {
|
||||
log.Errorf("lanzou: err => acw_sc__v2 validation error ,data => %s\n", bodyStr)
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
file.Url = res.Header().Get("location")
|
||||
|
||||
// 触发验证
|
||||
rPageData := res.String()
|
||||
// 触发二次验证,也需要处理一下触发acw_sc__v2的情况
|
||||
if res.StatusCode() != 302 {
|
||||
param, err = htmlJsonToMap(rPageData)
|
||||
param, err = htmlJsonToMap(bodyStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
param["el"] = "2"
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
// 通过验证获取直连
|
||||
data, err := d.post(fmt.Sprint(baseUrl, "/ajax.php"), func(req *resty.Request) { req.SetFormData(param) }, nil)
|
||||
// 通过验证获取直链
|
||||
var data []byte
|
||||
for i := 0; i < 3; i++ {
|
||||
data, err = d.post(fmt.Sprint(baseUrl, "/ajax.php"), func(req *resty.Request) {
|
||||
req.SetFormData(param)
|
||||
req.SetHeader("cookie", "down_ip=1")
|
||||
if vs != "" {
|
||||
req.SetCookie(&http.Cookie{
|
||||
Name: "acw_sc__v2",
|
||||
Value: vs,
|
||||
})
|
||||
}
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ajaxBodyStr := string(data)
|
||||
if strings.Contains(ajaxBodyStr, "acw_sc__v2") {
|
||||
if vs, err = CalcAcwScV2(ajaxBodyStr); err != nil {
|
||||
log.Errorf("lanzou: err => acw_sc__v2 validation error ,data => %s\n", ajaxBodyStr)
|
||||
return nil, err
|
||||
}
|
||||
time.Sleep(time.Second * 2)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
431
drivers/mediafire/driver.go
Normal file
431
drivers/mediafire/driver.go
Normal file
@@ -0,0 +1,431 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
|
||||
Date: 2025-09-21
|
||||
|
||||
Date: 2025-09-26
|
||||
Final opts by @Suyunjing @j2rong4cn @KirCute @Da3zKi7
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type Mediafire struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
cron *cron.Cron
|
||||
|
||||
actionToken string
|
||||
limiter *rate.Limiter
|
||||
|
||||
appBase string
|
||||
apiBase string
|
||||
hostBase string
|
||||
maxRetries int
|
||||
|
||||
secChUa string
|
||||
secChUaPlatform string
|
||||
userAgent string
|
||||
}
|
||||
|
||||
func (d *Mediafire) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Mediafire) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
// Init initializes the MediaFire driver with session token and cookie validation
|
||||
func (d *Mediafire) Init(ctx context.Context) error {
|
||||
if d.SessionToken == "" {
|
||||
return fmt.Errorf("Init :: [MediaFire] {critical} missing sessionToken")
|
||||
}
|
||||
|
||||
if d.Cookie == "" {
|
||||
return fmt.Errorf("Init :: [MediaFire] {critical} missing Cookie")
|
||||
}
|
||||
// Setup rate limiter if rate limit is configured
|
||||
if d.LimitRate > 0 {
|
||||
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
|
||||
}
|
||||
// Validate and refresh session token if needed
|
||||
if _, err := d.getSessionToken(ctx); err != nil {
|
||||
|
||||
d.renewToken(ctx)
|
||||
|
||||
// Avoids 10 mins token expiry (6- 9)
|
||||
num := rand.Intn(4) + 6
|
||||
|
||||
d.cron = cron.NewCron(time.Minute * time.Duration(num))
|
||||
d.cron.Do(func() {
|
||||
// Crazy, but working way to refresh session token
|
||||
d.renewToken(ctx)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Drop cleans up driver resources
|
||||
func (d *Mediafire) Drop(ctx context.Context) error {
|
||||
// Clear cached resources
|
||||
d.actionToken = ""
|
||||
if d.cron != nil {
|
||||
d.cron.Stop()
|
||||
d.cron = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List retrieves files and folders from the specified directory
|
||||
func (d *Mediafire) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
files, err := d.getFiles(ctx, dir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||
return d.fileToObj(src), nil
|
||||
})
|
||||
}
|
||||
|
||||
// Link generates a direct download link for the specified file
|
||||
func (d *Mediafire) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
downloadUrl, err := d.getDirectDownloadLink(ctx, file.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := base.NoRedirectClient.R().SetDoNotParseResponse(true).SetContext(ctx).Head(downloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = res.RawBody().Close()
|
||||
}()
|
||||
|
||||
if res.StatusCode() == 302 {
|
||||
downloadUrl = res.Header().Get("location")
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: downloadUrl,
|
||||
Header: http.Header{
|
||||
"Origin": []string{d.appBase},
|
||||
"Referer": []string{d.appBase + "/"},
|
||||
"sec-ch-ua": []string{d.secChUa},
|
||||
"sec-ch-ua-platform": []string{d.secChUaPlatform},
|
||||
"User-Agent": []string{d.userAgent},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MakeDir creates a new folder in the specified parent directory
|
||||
func (d *Mediafire) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"parent_key": parentDir.GetID(),
|
||||
"foldername": dirName,
|
||||
}
|
||||
|
||||
var resp MediafireFolderCreateResponse
|
||||
_, err := d.postForm(ctx, "/folder/create.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
created, _ := time.Parse("2006-01-02T15:04:05Z", resp.Response.CreatedUTC)
|
||||
|
||||
return &model.Object{
|
||||
ID: resp.Response.FolderKey,
|
||||
Name: resp.Response.Name,
|
||||
Size: 0,
|
||||
Modified: created,
|
||||
Ctime: created,
|
||||
IsFolder: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Move relocates a file or folder to a different parent directory
|
||||
func (d *Mediafire) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/move.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key_src": srcObj.GetID(),
|
||||
"folder_key_dst": dstDir.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/move.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"folder_key": dstDir.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireMoveResponse
|
||||
_, err := d.postForm(ctx, endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return srcObj, nil
|
||||
}
|
||||
|
||||
// Rename changes the name of a file or folder
|
||||
func (d *Mediafire) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/update.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": srcObj.GetID(),
|
||||
"foldername": newName,
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/update.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"filename": newName,
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireRenameResponse
|
||||
_, err := d.postForm(ctx, endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: srcObj.GetID(),
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
Ctime: srcObj.CreateTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Copy creates a duplicate of a file or folder in the specified destination directory
|
||||
func (d *Mediafire) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/copy.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key_src": srcObj.GetID(),
|
||||
"folder_key_dst": dstDir.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/copy.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"folder_key": dstDir.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireCopyResponse
|
||||
_, err := d.postForm(ctx, endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var newID string
|
||||
if srcObj.IsDir() {
|
||||
if len(resp.Response.NewFolderKeys) > 0 {
|
||||
newID = resp.Response.NewFolderKeys[0]
|
||||
}
|
||||
} else {
|
||||
if len(resp.Response.NewQuickKeys) > 0 {
|
||||
newID = resp.Response.NewQuickKeys[0]
|
||||
}
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: newID,
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
Ctime: srcObj.CreateTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Remove deletes a file or folder permanently
|
||||
func (d *Mediafire) Remove(ctx context.Context, obj model.Obj) error {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if obj.IsDir() {
|
||||
|
||||
endpoint = "/folder/delete.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": obj.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/delete.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": obj.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireRemoveResponse
|
||||
_, err := d.postForm(ctx, endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return checkAPIResult(resp.Response.Result)
|
||||
}
|
||||
|
||||
// Put uploads a file to the specified directory with support for resumable upload and quick upload
|
||||
func (d *Mediafire) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
fileHash := file.GetHash().GetHash(utils.SHA256)
|
||||
var err error
|
||||
|
||||
// Try to use existing hash first, cache only if necessary
|
||||
if len(fileHash) != utils.SHA256.Width {
|
||||
_, fileHash, err = stream.CacheFullAndHash(file, &up, utils.SHA256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
checkResp, err := d.uploadCheck(ctx, file.GetName(), file.GetSize(), fileHash, dstDir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if checkResp.Response.HashExists == "yes" && checkResp.Response.InAccount == "yes" {
|
||||
up(100.0)
|
||||
existingFile, err := d.getExistingFileInfo(ctx, fileHash, file.GetName(), dstDir.GetID())
|
||||
if err == nil && existingFile != nil {
|
||||
// File exists, return existing file info
|
||||
return &model.Object{
|
||||
ID: existingFile.GetID(),
|
||||
Name: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
}, nil
|
||||
}
|
||||
// If getExistingFileInfo fails, log and continue with normal upload
|
||||
// This ensures upload doesn't fail due to search issues
|
||||
}
|
||||
|
||||
var pollKey string
|
||||
|
||||
if checkResp.Response.ResumableUpload.AllUnitsReady != "yes" {
|
||||
pollKey, err = d.uploadUnits(ctx, file, checkResp, file.GetName(), fileHash, dstDir.GetID(), up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
pollKey = checkResp.Response.ResumableUpload.UploadKey
|
||||
up(100.0)
|
||||
}
|
||||
|
||||
pollResp, err := d.pollUpload(ctx, pollKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: pollResp.Response.Doupload.QuickKey,
|
||||
Name: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
}
|
||||
var resp MediafireUserInfoResponse
|
||||
_, err := d.postForm(ctx, "/user/get_info.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
used, err := strconv.ParseUint(resp.Response.UserInfo.UsedStorageSize, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total, err := strconv.ParseUint(resp.Response.UserInfo.StorageLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: total - used,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Mediafire)(nil)
|
||||
62
drivers/mediafire/meta.go
Normal file
62
drivers/mediafire/meta.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
|
||||
Date: 2025-09-21
|
||||
|
||||
Date: 2025-09-26
|
||||
Final opts by @Suyunjing @j2rong4cn @KirCute @Da3zKi7
|
||||
*/
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
//driver.RootID
|
||||
|
||||
SessionToken string `json:"session_token" required:"true" type:"string" help:"Required for MediaFire API"`
|
||||
Cookie string `json:"cookie" required:"true" type:"string" help:"Required for navigation"`
|
||||
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"100"`
|
||||
UploadThreads int `json:"upload_threads" type:"number" default:"3" help:"concurrent upload threads"`
|
||||
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "MediaFire",
|
||||
LocalSort: false,
|
||||
OnlyLinkMFile: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Mediafire{
|
||||
appBase: "https://app.mediafire.com",
|
||||
apiBase: "https://www.mediafire.com/api/1.5",
|
||||
hostBase: "https://www.mediafire.com",
|
||||
maxRetries: 3,
|
||||
secChUa: "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"139\", \"Google Chrome\";v=\"139\"",
|
||||
secChUaPlatform: "Windows",
|
||||
userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
}
|
||||
})
|
||||
}
|
||||
246
drivers/mediafire/types.go
Normal file
246
drivers/mediafire/types.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
*/
|
||||
|
||||
type MediafireRenewTokenResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
SessionToken string `json:"session_token"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FolderContent struct {
|
||||
ChunkSize string `json:"chunk_size"`
|
||||
ContentType string `json:"content_type"`
|
||||
ChunkNumber string `json:"chunk_number"`
|
||||
FolderKey string `json:"folderkey"`
|
||||
Folders []MediafireFolder `json:"folders,omitempty"`
|
||||
Files []MediafireFile `json:"files,omitempty"`
|
||||
MoreChunks string `json:"more_chunks"`
|
||||
} `json:"folder_content"`
|
||||
Result string `json:"result"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFolder struct {
|
||||
FolderKey string `json:"folderkey"`
|
||||
Name string `json:"name"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
}
|
||||
|
||||
type MediafireFile struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
Filename string `json:"filename"`
|
||||
Size string `json:"size"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
MimeType string `json:"mimetype"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
ID string
|
||||
Name string
|
||||
Size int64
|
||||
CreatedUTC string
|
||||
IsFolder bool
|
||||
}
|
||||
|
||||
type FolderContentResponse struct {
|
||||
Folders []MediafireFolder
|
||||
Files []MediafireFile
|
||||
MoreChunks bool
|
||||
}
|
||||
|
||||
type MediafireLinksResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Links []struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
View string `json:"view"`
|
||||
NormalDownload string `json:"normal_download"`
|
||||
OneTime struct {
|
||||
Download string `json:"download"`
|
||||
View string `json:"view"`
|
||||
} `json:"one_time"`
|
||||
} `json:"links"`
|
||||
OneTimeKeyRequestCount string `json:"one_time_key_request_count"`
|
||||
OneTimeKeyRequestMaxCount string `json:"one_time_key_request_max_count"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireDirectDownloadResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Links []struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
DirectDownload string `json:"direct_download"`
|
||||
} `json:"links"`
|
||||
DirectDownloadFreeBandwidth string `json:"direct_download_free_bandwidth"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFolderCreateResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FolderKey string `json:"folder_key"`
|
||||
UploadKey string `json:"upload_key"`
|
||||
ParentFolderKey string `json:"parent_folderkey"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
Privacy string `json:"privacy"`
|
||||
FileCount string `json:"file_count"`
|
||||
FolderCount string `json:"folder_count"`
|
||||
Revision string `json:"revision"`
|
||||
DropboxEnabled string `json:"dropbox_enabled"`
|
||||
Flag string `json:"flag"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireMoveResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
NewNames []string `json:"new_names"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireRenameResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireCopyResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
NewQuickKeys []string `json:"new_quickkeys,omitempty"`
|
||||
NewFolderKeys []string `json:"new_folderkeys,omitempty"`
|
||||
SkippedCount string `json:"skipped_count,omitempty"`
|
||||
OtherCount string `json:"other_count,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireRemoveResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireCheckResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
HashExists string `json:"hash_exists"`
|
||||
InAccount string `json:"in_account"`
|
||||
InFolder string `json:"in_folder"`
|
||||
FileExists string `json:"file_exists"`
|
||||
ResumableUpload struct {
|
||||
AllUnitsReady string `json:"all_units_ready"`
|
||||
NumberOfUnits string `json:"number_of_units"`
|
||||
UnitSize string `json:"unit_size"`
|
||||
Bitmap struct {
|
||||
Count string `json:"count"`
|
||||
Words []string `json:"words"`
|
||||
} `json:"bitmap"`
|
||||
UploadKey string `json:"upload_key"`
|
||||
} `json:"resumable_upload"`
|
||||
AvailableSpace string `json:"available_space"`
|
||||
UsedStorageSize string `json:"used_storage_size"`
|
||||
StorageLimit string `json:"storage_limit"`
|
||||
StorageLimitExceeded string `json:"storage_limit_exceeded"`
|
||||
UploadURL struct {
|
||||
Simple string `json:"simple"`
|
||||
SimpleFallback string `json:"simple_fallback"`
|
||||
Resumable string `json:"resumable"`
|
||||
ResumableFallback string `json:"resumable_fallback"`
|
||||
} `json:"upload_url"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
type MediafireActionTokenResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
ActionToken string `json:"action_token"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafirePollResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Doupload struct {
|
||||
Result string `json:"result"`
|
||||
Status string `json:"status"`
|
||||
Description string `json:"description"`
|
||||
QuickKey string `json:"quickkey"`
|
||||
Hash string `json:"hash"`
|
||||
Filename string `json:"filename"`
|
||||
Size string `json:"size"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
Revision string `json:"revision"`
|
||||
} `json:"doupload"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFileSearchResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FileInfo []File `json:"file_info"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireUserInfoResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
UserInfo struct {
|
||||
Email string `json:"string"`
|
||||
DisplayName string `json:"display_name"`
|
||||
UsedStorageSize string `json:"used_storage_size"`
|
||||
StorageLimit string `json:"storage_limit"`
|
||||
} `json:"user_info"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
729
drivers/mediafire/util.go
Normal file
729
drivers/mediafire/util.go
Normal file
@@ -0,0 +1,729 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
|
||||
Date: 2025-09-21
|
||||
|
||||
Date: 2025-09-26
|
||||
Final opts by @Suyunjing @j2rong4cn @KirCute @Da3zKi7
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
// checkAPIResult validates MediaFire API response result and returns error if not successful
|
||||
func checkAPIResult(result string) error {
|
||||
if result != "Success" {
|
||||
return fmt.Errorf("MediaFire API error: %s", result)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getSessionToken retrieves and validates session token from MediaFire
|
||||
func (d *Mediafire) getSessionToken(ctx context.Context) (string, error) {
|
||||
if d.limiter != nil {
|
||||
if err := d.limiter.Wait(ctx); err != nil {
|
||||
return "", fmt.Errorf("rate limit wait failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
tokenURL := d.hostBase + "/application/get_session_token.php"
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req.Header.Set("Accept", "*/*")
|
||||
req.Header.Set("Accept-Encoding", "gzip, deflate, br, zstd")
|
||||
req.Header.Set("Accept-Language", "en-US,en;q=0.9")
|
||||
req.Header.Set("Content-Length", "0")
|
||||
req.Header.Set("Cookie", d.Cookie)
|
||||
req.Header.Set("DNT", "1")
|
||||
req.Header.Set("Origin", d.hostBase)
|
||||
req.Header.Set("Priority", "u=1, i")
|
||||
req.Header.Set("Referer", (d.hostBase + "/"))
|
||||
req.Header.Set("Sec-Ch-Ua", d.secChUa)
|
||||
req.Header.Set("Sec-Ch-Ua-Mobile", "?0")
|
||||
req.Header.Set("Sec-Ch-Ua-Platform", d.secChUaPlatform)
|
||||
req.Header.Set("Sec-Fetch-Dest", "empty")
|
||||
req.Header.Set("Sec-Fetch-Mode", "cors")
|
||||
req.Header.Set("Sec-Fetch-Site", "same-site")
|
||||
req.Header.Set("User-Agent", d.userAgent)
|
||||
// req.Header.Set("Connection", "keep-alive")
|
||||
|
||||
resp, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// fmt.Printf("getSessionToken :: Raw response: %s\n", string(body))
|
||||
// fmt.Printf("getSessionToken :: Parsed response: %+v\n", resp)
|
||||
|
||||
var tokenResp struct {
|
||||
Response struct {
|
||||
SessionToken string `json:"session_token"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
if resp.StatusCode == 200 {
|
||||
if err := json.Unmarshal(body, &tokenResp); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if tokenResp.Response.SessionToken == "" {
|
||||
return "", fmt.Errorf("empty session token received")
|
||||
}
|
||||
|
||||
cookieMap := make(map[string]string)
|
||||
for _, cookie := range resp.Cookies() {
|
||||
cookieMap[cookie.Name] = cookie.Value
|
||||
}
|
||||
|
||||
if len(cookieMap) > 0 {
|
||||
|
||||
var cookies []string
|
||||
for name, value := range cookieMap {
|
||||
cookies = append(cookies, fmt.Sprintf("%s=%s", name, value))
|
||||
}
|
||||
d.Cookie = strings.Join(cookies, "; ")
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
// fmt.Printf("getSessionToken :: Captured cookies: %s\n", d.Cookie)
|
||||
}
|
||||
|
||||
} else {
|
||||
return "", fmt.Errorf("getSessionToken :: failed to get session token, status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
d.SessionToken = tokenResp.Response.SessionToken
|
||||
|
||||
// fmt.Printf("Init :: Obtain Session Token %v", d.SessionToken)
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
return d.SessionToken, nil
|
||||
}
|
||||
|
||||
// renewToken refreshes the current session token when expired
|
||||
func (d *Mediafire) renewToken(ctx context.Context) error {
|
||||
query := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireRenewTokenResponse
|
||||
_, err := d.postForm(ctx, "/user/renew_session_token.php", query, &resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to renew token: %w", err)
|
||||
}
|
||||
|
||||
// fmt.Printf("getInfo :: Raw response: %s\n", string(body))
|
||||
// fmt.Printf("getInfo :: Parsed response: %+v\n", resp)
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return fmt.Errorf("MediaFire token renewal failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
d.SessionToken = resp.Response.SessionToken
|
||||
|
||||
// fmt.Printf("Init :: Renew Session Token: %s", resp.Response.Result)
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFiles(ctx context.Context, folderKey string) ([]File, error) {
|
||||
// Pre-allocate slice with reasonable capacity to reduce memory allocations
|
||||
files := make([]File, 0, d.ChunkSize*2) // Estimate: ChunkSize for files + folders
|
||||
hasMore := true
|
||||
chunkNumber := 1
|
||||
|
||||
for hasMore {
|
||||
resp, err := d.getFolderContent(ctx, folderKey, chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Process folders and files in single loop to improve cache locality
|
||||
totalItems := len(resp.Folders) + len(resp.Files)
|
||||
if cap(files)-len(files) < totalItems {
|
||||
// Grow slice if needed
|
||||
newFiles := make([]File, len(files), len(files)+totalItems+int(d.ChunkSize))
|
||||
copy(newFiles, files)
|
||||
files = newFiles
|
||||
}
|
||||
|
||||
for _, folder := range resp.Folders {
|
||||
files = append(files, File{
|
||||
ID: folder.FolderKey,
|
||||
Name: folder.Name,
|
||||
Size: 0,
|
||||
CreatedUTC: folder.CreatedUTC,
|
||||
IsFolder: true,
|
||||
})
|
||||
}
|
||||
|
||||
for _, file := range resp.Files {
|
||||
size, _ := strconv.ParseInt(file.Size, 10, 64)
|
||||
files = append(files, File{
|
||||
ID: file.QuickKey,
|
||||
Name: file.Filename,
|
||||
Size: size,
|
||||
CreatedUTC: file.CreatedUTC,
|
||||
IsFolder: false,
|
||||
})
|
||||
}
|
||||
|
||||
hasMore = resp.MoreChunks
|
||||
chunkNumber++
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFolderContent(ctx context.Context, folderKey string, chunkNumber int) (*FolderContentResponse, error) {
|
||||
foldersResp, err := d.getFolderContentByType(ctx, folderKey, "folders", chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filesResp, err := d.getFolderContentByType(ctx, folderKey, "files", chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FolderContentResponse{
|
||||
Folders: foldersResp.Response.FolderContent.Folders,
|
||||
Files: filesResp.Response.FolderContent.Files,
|
||||
MoreChunks: foldersResp.Response.FolderContent.MoreChunks == "yes" || filesResp.Response.FolderContent.MoreChunks == "yes",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFolderContentByType(ctx context.Context, folderKey, contentType string, chunkNumber int) (*MediafireResponse, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": folderKey,
|
||||
"content_type": contentType,
|
||||
"chunk": strconv.Itoa(chunkNumber),
|
||||
"chunk_size": strconv.FormatInt(d.ChunkSize, 10),
|
||||
"details": "yes",
|
||||
"order_direction": d.OrderDirection,
|
||||
"order_by": d.OrderBy,
|
||||
"filter": "",
|
||||
}
|
||||
|
||||
var resp MediafireResponse
|
||||
_, err := d.postForm(ctx, "/folder/get_content.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// fileToObj converts MediaFire file data to model.ObjThumb with thumbnail support
|
||||
func (d *Mediafire) fileToObj(f File) *model.ObjThumb {
|
||||
created, _ := time.Parse("2006-01-02T15:04:05Z", f.CreatedUTC)
|
||||
|
||||
var thumbnailURL string
|
||||
if !f.IsFolder && f.ID != "" {
|
||||
thumbnailURL = d.hostBase + "/convkey/acaa/" + f.ID + "3g.jpg"
|
||||
}
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: f.ID,
|
||||
// Path: "",
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Modified: created,
|
||||
Ctime: created,
|
||||
IsFolder: f.IsFolder,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumbnailURL,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Mediafire) setCommonHeaders(req *resty.Request) {
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": d.Cookie,
|
||||
"User-Agent": d.userAgent,
|
||||
"Origin": d.appBase,
|
||||
"Referer": d.appBase + "/",
|
||||
})
|
||||
}
|
||||
|
||||
// apiRequest performs HTTP request to MediaFire API with rate limiting and common headers
|
||||
func (d *Mediafire) apiRequest(ctx context.Context, method, endpoint string, queryParams, formData map[string]string, resp interface{}) ([]byte, error) {
|
||||
if d.limiter != nil {
|
||||
if err := d.limiter.Wait(ctx); err != nil {
|
||||
return nil, fmt.Errorf("rate limit wait failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
req := base.RestyClient.R()
|
||||
req.SetContext(ctx)
|
||||
d.setCommonHeaders(req)
|
||||
|
||||
// Set query parameters for GET requests
|
||||
if queryParams != nil {
|
||||
req.SetQueryParams(queryParams)
|
||||
}
|
||||
|
||||
// Set form data for POST requests
|
||||
if formData != nil {
|
||||
req.SetFormData(formData)
|
||||
req.SetHeader("Content-Type", "application/x-www-form-urlencoded")
|
||||
}
|
||||
|
||||
// Set response object if provided
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
|
||||
var res *resty.Response
|
||||
var err error
|
||||
|
||||
// Execute request based on method
|
||||
switch method {
|
||||
case "GET":
|
||||
res, err = req.Get(d.apiBase + endpoint)
|
||||
case "POST":
|
||||
res, err = req.Post(d.apiBase + endpoint)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported HTTP method: %s", method)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getForm(ctx context.Context, endpoint string, query map[string]string, resp interface{}) ([]byte, error) {
|
||||
return d.apiRequest(ctx, "GET", endpoint, query, nil, resp)
|
||||
}
|
||||
|
||||
func (d *Mediafire) postForm(ctx context.Context, endpoint string, data map[string]string, resp interface{}) ([]byte, error) {
|
||||
return d.apiRequest(ctx, "POST", endpoint, nil, data, resp)
|
||||
}
|
||||
|
||||
func (d *Mediafire) getDirectDownloadLink(ctx context.Context, fileID string) (string, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"quick_key": fileID,
|
||||
"link_type": "direct_download",
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireDirectDownloadResponse
|
||||
_, err := d.getForm(ctx, "/file/get_links.php", data, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(resp.Response.Links) == 0 {
|
||||
return "", fmt.Errorf("no download links found")
|
||||
}
|
||||
|
||||
return resp.Response.Links[0].DirectDownload, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) uploadCheck(ctx context.Context, filename string, filesize int64, filehash, folderKey string) (*MediafireCheckResponse, error) {
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get action token: %w", err)
|
||||
}
|
||||
|
||||
query := map[string]string{
|
||||
"session_token": actionToken, /* d.SessionToken */
|
||||
"filename": filename,
|
||||
"size": strconv.FormatInt(filesize, 10),
|
||||
"hash": filehash,
|
||||
"folder_key": folderKey,
|
||||
"resumable": "yes",
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireCheckResponse
|
||||
_, err = d.postForm(ctx, "/upload/check.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// fmt.Printf("uploadCheck :: Raw response: %s\n", string(body))
|
||||
// fmt.Printf("uploadCheck :: Parsed response: %+v\n", resp)
|
||||
|
||||
// fmt.Printf("uploadCheck :: ResumableUpload section: %+v\n", resp.Response.ResumableUpload)
|
||||
// fmt.Printf("uploadCheck :: Upload key specifically: '%s'\n", resp.Response.ResumableUpload.UploadKey)
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) uploadUnits(ctx context.Context, file model.FileStreamer, checkResp *MediafireCheckResponse, filename, fileHash, folderKey string, up driver.UpdateProgress) (string, error) {
|
||||
unitSize, _ := strconv.ParseInt(checkResp.Response.ResumableUpload.UnitSize, 10, 64)
|
||||
numUnits, _ := strconv.Atoi(checkResp.Response.ResumableUpload.NumberOfUnits)
|
||||
uploadKey := checkResp.Response.ResumableUpload.UploadKey
|
||||
|
||||
stringWords := checkResp.Response.ResumableUpload.Bitmap.Words
|
||||
intWords := make([]int, 0, len(stringWords))
|
||||
for _, word := range stringWords {
|
||||
if intWord, err := strconv.Atoi(word); err == nil {
|
||||
intWords = append(intWords, intWord)
|
||||
}
|
||||
}
|
||||
|
||||
// Intelligent buffer sizing for large files
|
||||
bufferSize := int(unitSize)
|
||||
fileSize := file.GetSize()
|
||||
|
||||
// Split in chunks
|
||||
if fileSize > d.ChunkSize*1024*1024 {
|
||||
|
||||
// Large, use ChunkSize (default = 100MB)
|
||||
bufferSize = min(int(fileSize), int(d.ChunkSize)*1024*1024)
|
||||
} else if fileSize > 10*1024*1024 {
|
||||
// Medium, use full file size for concurrent access
|
||||
bufferSize = int(fileSize)
|
||||
}
|
||||
|
||||
// Create stream section reader for efficient chunking
|
||||
ss, err := stream.NewStreamSectionReader(file, bufferSize, &up)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Cal minimal parallel upload threads, allows MediaFire resumable upload to rule it over custom value
|
||||
// If file is big, likely will respect d.UploadThreads instead of MediaFire's suggestion i.e. 5 threads
|
||||
thread := min(numUnits, d.UploadThreads)
|
||||
|
||||
// Create ordered group for sequential upload processing with retry logic
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
var finalUploadKey string
|
||||
var keyMutex sync.Mutex
|
||||
|
||||
fileSize = file.GetSize()
|
||||
for unitID := range numUnits {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
start := int64(unitID) * unitSize
|
||||
size := unitSize
|
||||
if start+size > fileSize {
|
||||
size = fileSize - start
|
||||
}
|
||||
|
||||
var reader io.ReadSeeker
|
||||
var rateLimitedRd io.Reader
|
||||
var unitHash string
|
||||
|
||||
// Use lifecycle pattern for proper resource management
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
// Skip already uploaded units
|
||||
if d.isUnitUploaded(intWords, unitID) {
|
||||
return ss.DiscardSection(start, size)
|
||||
}
|
||||
|
||||
var err error
|
||||
reader, err = ss.GetSectionReader(start, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
||||
return nil
|
||||
},
|
||||
Do: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
return nil // Skip if reader is not initialized (already uploaded)
|
||||
}
|
||||
|
||||
if unitHash == "" {
|
||||
reader.Seek(0, io.SeekStart)
|
||||
var err error
|
||||
unitHash, err = utils.HashReader(utils.SHA256, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
reader.Seek(0, io.SeekStart)
|
||||
|
||||
// Perform upload
|
||||
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.limiter != nil {
|
||||
if err := d.limiter.Wait(ctx); err != nil {
|
||||
return fmt.Errorf("rate limit wait failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
url := d.apiBase + "/upload/resumable.php"
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q := req.URL.Query()
|
||||
q.Add("folder_key", folderKey)
|
||||
q.Add("response_format", "json")
|
||||
q.Add("session_token", actionToken)
|
||||
q.Add("key", uploadKey)
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
req.Header.Set("x-filehash", fileHash)
|
||||
req.Header.Set("x-filesize", strconv.FormatInt(fileSize, 10))
|
||||
req.Header.Set("x-unit-id", strconv.Itoa(unitID))
|
||||
req.Header.Set("x-unit-size", strconv.FormatInt(size, 10))
|
||||
req.Header.Set("x-unit-hash", unitHash)
|
||||
req.Header.Set("x-filename", filename)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.ContentLength = size
|
||||
|
||||
/* fmt.Printf("Debug resumable upload request:\n")
|
||||
fmt.Printf(" URL: %s\n", req.URL.String())
|
||||
fmt.Printf(" Headers: %+v\n", req.Header)
|
||||
fmt.Printf(" Unit ID: %d\n", unitID)
|
||||
fmt.Printf(" Unit Size: %d\n", len(unitData))
|
||||
fmt.Printf(" Upload Key: %s\n", uploadKey)
|
||||
fmt.Printf(" Action Token: %s\n", actionToken) */
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response body: %v", err)
|
||||
}
|
||||
|
||||
// fmt.Printf("MediaFire resumable upload response (status %d): %s\n", res.StatusCode, string(body))
|
||||
|
||||
var uploadResp struct {
|
||||
Response struct {
|
||||
Doupload struct {
|
||||
Key string `json:"key"`
|
||||
} `json:"doupload"`
|
||||
Result string `json:"result"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &uploadResp); err != nil {
|
||||
return fmt.Errorf("failed to parse response: %v", err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("resumable upload failed with status %d", res.StatusCode)
|
||||
}
|
||||
|
||||
// Thread-safe update of final upload key
|
||||
keyMutex.Lock()
|
||||
finalUploadKey = uploadResp.Response.Doupload.Key
|
||||
keyMutex.Unlock()
|
||||
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
up(float64(threadG.Success()) * 100 / float64(numUnits))
|
||||
if reader != nil {
|
||||
// Cleanup resources
|
||||
ss.FreeSectionReader(reader)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return finalUploadKey, nil
|
||||
}
|
||||
|
||||
/*func (d *Mediafire) uploadSingleUnit(ctx context.Context, file model.FileStreamer, unitID int, unitSize int64, fileHash, filename, uploadKey, folderKey string, fileSize int64) (string, error) {
|
||||
start := int64(unitID) * unitSize
|
||||
size := unitSize
|
||||
|
||||
if start+size > fileSize {
|
||||
size = fileSize - start
|
||||
}
|
||||
|
||||
unitData := make([]byte, size)
|
||||
_, err := file.Read(unitData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return d.resumableUpload(ctx, folderKey, uploadKey, unitData, unitID, fileHash, filename, fileSize)
|
||||
}*/
|
||||
|
||||
func (d *Mediafire) getActionToken(ctx context.Context) (string, error) {
|
||||
if d.actionToken != "" {
|
||||
return d.actionToken, nil
|
||||
}
|
||||
|
||||
data := map[string]string{
|
||||
"type": "upload",
|
||||
"lifespan": "1440",
|
||||
"response_format": "json",
|
||||
"session_token": d.SessionToken,
|
||||
}
|
||||
|
||||
var resp MediafireActionTokenResponse
|
||||
_, err := d.postForm(ctx, "/user/get_action_token.php", data, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return "", fmt.Errorf("MediaFire action token failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return resp.Response.ActionToken, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) pollUpload(ctx context.Context, key string) (*MediafirePollResponse, error) {
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get action token: %w", err)
|
||||
}
|
||||
|
||||
// fmt.Printf("Debug Key: %+v\n", key)
|
||||
|
||||
query := map[string]string{
|
||||
"key": key,
|
||||
"response_format": "json",
|
||||
"session_token": actionToken, /* d.SessionToken */
|
||||
}
|
||||
|
||||
var resp MediafirePollResponse
|
||||
_, err = d.postForm(ctx, "/upload/poll_upload.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// fmt.Printf("pollUpload :: Raw response: %s\n", string(body))
|
||||
// fmt.Printf("pollUpload :: Parsed response: %+v\n", resp)
|
||||
|
||||
// fmt.Printf("pollUpload :: Debug Result: %+v\n", resp.Response.Result)
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) isUnitUploaded(words []int, unitID int) bool {
|
||||
wordIndex := unitID / 16
|
||||
bitIndex := unitID % 16
|
||||
if wordIndex >= len(words) {
|
||||
return false
|
||||
}
|
||||
return (words[wordIndex]>>bitIndex)&1 == 1
|
||||
}
|
||||
|
||||
func (d *Mediafire) getExistingFileInfo(ctx context.Context, fileHash, filename, folderKey string) (*model.ObjThumb, error) {
|
||||
// First try to find by hash directly (most efficient)
|
||||
if fileInfo, err := d.getFileByHash(ctx, fileHash); err == nil && fileInfo != nil {
|
||||
return fileInfo, nil
|
||||
}
|
||||
|
||||
// If hash search fails, search in the target folder
|
||||
// This is a fallback method in case the file exists but hash search doesn't work
|
||||
files, err := d.getFiles(ctx, folderKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if file.Name == filename && !file.IsFolder {
|
||||
return d.fileToObj(file), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("existing file not found")
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFileByHash(ctx context.Context, hash string) (*model.ObjThumb, error) {
|
||||
query := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"hash": hash,
|
||||
}
|
||||
|
||||
var resp MediafireFileSearchResponse
|
||||
_, err := d.postForm(ctx, "/file/get_info.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire file search failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
if len(resp.Response.FileInfo) == 0 {
|
||||
return nil, fmt.Errorf("file not found by hash")
|
||||
}
|
||||
|
||||
file := resp.Response.FileInfo[0]
|
||||
return d.fileToObj(file), nil
|
||||
}
|
||||
@@ -207,4 +207,20 @@ func (d *Onedrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Onedrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if d.DisableDiskUsage {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
drive, err := d.getDrive(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: drive.Quota.Total,
|
||||
FreeSpace: drive.Quota.Remaining,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Onedrive)(nil)
|
||||
|
||||
@@ -7,17 +7,18 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
|
||||
IsSharepoint bool `json:"is_sharepoint"`
|
||||
UseOnlineAPI bool `json:"use_online_api" default:"true"`
|
||||
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/onedrive/renewapi"`
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
RedirectUri string `json:"redirect_uri" required:"true" default:"https://api.oplist.org/onedrive/callback"`
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
SiteId string `json:"site_id"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
|
||||
IsSharepoint bool `json:"is_sharepoint"`
|
||||
UseOnlineAPI bool `json:"use_online_api" default:"true"`
|
||||
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/onedrive/renewapi"`
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
RedirectUri string `json:"redirect_uri" required:"true" default:"https://api.oplist.org/onedrive/callback"`
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
SiteId string `json:"site_id"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -89,3 +89,15 @@ type FileSystemInfoFacet struct {
|
||||
CreatedDateTime time.Time `json:"createdDateTime,omitempty"` // The UTC date and time the file was created on a client.
|
||||
LastModifiedDateTime time.Time `json:"lastModifiedDateTime,omitempty"` // The UTC date and time the file was last modified on a client.
|
||||
}
|
||||
|
||||
type DriveResp struct {
|
||||
ID string `json:"id"`
|
||||
DriveType string `json:"driveType"`
|
||||
Quota struct {
|
||||
Deleted uint64 `json:"deleted"`
|
||||
Remaining uint64 `json:"remaining"`
|
||||
State string `json:"state"`
|
||||
Total uint64 `json:"total"`
|
||||
Used uint64 `json:"used"`
|
||||
} `json:"quota"`
|
||||
}
|
||||
|
||||
@@ -295,3 +295,21 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Onedrive) getDrive(ctx context.Context) (*DriveResp, error) {
|
||||
var api string
|
||||
host, _ := onedriveHostMap[d.Region]
|
||||
if d.IsSharepoint {
|
||||
api = fmt.Sprintf("%s/v1.0/sites/%s/drive", host.Api, d.SiteId)
|
||||
} else {
|
||||
api = fmt.Sprintf("%s/v1.0/me/drive", host.Api)
|
||||
}
|
||||
var resp DriveResp
|
||||
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -206,4 +206,20 @@ func (d *OnedriveAPP) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *OnedriveAPP) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if d.DisableDiskUsage {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
drive, err := d.getDrive(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: drive.Quota.Total,
|
||||
FreeSpace: drive.Quota.Remaining,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*OnedriveAPP)(nil)
|
||||
|
||||
@@ -7,13 +7,14 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
|
||||
ClientID string `json:"client_id" required:"true"`
|
||||
ClientSecret string `json:"client_secret" required:"true"`
|
||||
TenantID string `json:"tenant_id"`
|
||||
Email string `json:"email"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
|
||||
ClientID string `json:"client_id" required:"true"`
|
||||
ClientSecret string `json:"client_secret" required:"true"`
|
||||
TenantID string `json:"tenant_id"`
|
||||
Email string `json:"email"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -72,3 +72,15 @@ type Files struct {
|
||||
Value []File `json:"value"`
|
||||
NextLink string `json:"@odata.nextLink"`
|
||||
}
|
||||
|
||||
type DriveResp struct {
|
||||
ID string `json:"id"`
|
||||
DriveType string `json:"driveType"`
|
||||
Quota struct {
|
||||
Deleted uint64 `json:"deleted"`
|
||||
Remaining uint64 `json:"remaining"`
|
||||
State string `json:"state"`
|
||||
Total uint64 `json:"total"`
|
||||
Used uint64 `json:"used"`
|
||||
} `json:"quota"`
|
||||
}
|
||||
|
||||
@@ -209,3 +209,16 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *OnedriveAPP) getDrive(ctx context.Context) (*DriveResp, error) {
|
||||
host, _ := onedriveHostMap[d.Region]
|
||||
api := fmt.Sprintf("%s/v1.0/users/%s/drive", host.Api, d.Email)
|
||||
var resp DriveResp
|
||||
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -36,7 +36,6 @@ func (d *PikPak) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *PikPak) Init(ctx context.Context) (err error) {
|
||||
|
||||
if d.Common == nil {
|
||||
d.Common = &Common{
|
||||
client: base.NewRestyClient(),
|
||||
@@ -247,7 +246,7 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
}
|
||||
|
||||
params := resp.Resumable.Params
|
||||
//endpoint := strings.Join(strings.Split(params.Endpoint, ".")[1:], ".")
|
||||
// endpoint := strings.Join(strings.Split(params.Endpoint, ".")[1:], ".")
|
||||
// web 端上传 返回的endpoint 为 `mypikpak.net` | android 端上传 返回的endpoint 为 `vip-lixian-07.mypikpak.net`·
|
||||
if d.Addition.Platform == "android" {
|
||||
params.Endpoint = "mypikpak.net"
|
||||
@@ -260,6 +259,27 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
return d.UploadByMultipart(ctx, ¶ms, stream.GetSize(), stream, up)
|
||||
}
|
||||
|
||||
func (d *PikPak) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
var about AboutResponse
|
||||
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/about", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, &about)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total, err := strconv.ParseUint(about.Quota.Limit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
used, err := strconv.ParseUint(about.Quota.Usage, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 离线下载文件
|
||||
func (d *PikPak) OfflineDownload(ctx context.Context, fileUrl string, parentDir model.Obj, fileName string) (*OfflineTask, error) {
|
||||
requestBody := base.Json{
|
||||
@@ -278,7 +298,6 @@ func (d *PikPak) OfflineDownload(ctx context.Context, fileUrl string, parentDir
|
||||
req.SetContext(ctx).
|
||||
SetBody(requestBody)
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -325,7 +344,6 @@ func (d *PikPak) OfflineList(ctx context.Context, nextPageToken string, phase []
|
||||
req.SetContext(ctx).
|
||||
SetQueryParams(params)
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get offline list: %w", err)
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ type Media struct {
|
||||
|
||||
type UploadTaskData struct {
|
||||
UploadType string `json:"upload_type"`
|
||||
//UPLOAD_TYPE_RESUMABLE
|
||||
// UPLOAD_TYPE_RESUMABLE
|
||||
Resumable *struct {
|
||||
Kind string `json:"kind"`
|
||||
Params S3Params `json:"params"`
|
||||
@@ -195,3 +195,15 @@ type CaptchaTokenResponse struct {
|
||||
ExpiresIn int64 `json:"expires_in"`
|
||||
Url string `json:"url"`
|
||||
}
|
||||
|
||||
type AboutResponse struct {
|
||||
Quota struct {
|
||||
Limit string `json:"limit"`
|
||||
Usage string `json:"usage"`
|
||||
UsageInTrash string `json:"usage_in_trash"`
|
||||
IsUnlimited bool `json:"is_unlimited"`
|
||||
Complimentary string `json:"complimentary"`
|
||||
} `json:"quota"`
|
||||
ExpiresAt string `json:"expires_at"`
|
||||
UserType int `json:"user_type"`
|
||||
}
|
||||
|
||||
@@ -194,7 +194,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
log.Debugf("left: %d", left)
|
||||
reader := driver.NewLimitedUploadStream(ctx, bytes.NewReader(part))
|
||||
m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, reader)
|
||||
//m, err := driver.UpPart(pre, file.GetMIMEType(), partNumber, bytes, account, md5Str, sha1Str)
|
||||
// m, err := driver.UpPart(pre, file.GetMIMEType(), partNumber, bytes, account, md5Str, sha1Str)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -212,4 +212,17 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
return d.upFinish(pre)
|
||||
}
|
||||
|
||||
func (d *QuarkOrUC) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
memberInfo, err := d.memberInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: memberInfo.Data.TotalCapacity,
|
||||
FreeSpace: memberInfo.Data.TotalCapacity - memberInfo.Data.UseCapacity,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*QuarkOrUC)(nil)
|
||||
|
||||
@@ -12,8 +12,8 @@ type Resp struct {
|
||||
Status int `json:"status"`
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
//ReqId string `json:"req_id"`
|
||||
//Timestamp int `json:"timestamp"`
|
||||
// ReqId string `json:"req_id"`
|
||||
// Timestamp int `json:"timestamp"`
|
||||
}
|
||||
|
||||
var _ model.Obj = (*File)(nil)
|
||||
@@ -21,27 +21,27 @@ var _ model.Obj = (*File)(nil)
|
||||
type File struct {
|
||||
Fid string `json:"fid"`
|
||||
FileName string `json:"file_name"`
|
||||
//PdirFid string `json:"pdir_fid"`
|
||||
// PdirFid string `json:"pdir_fid"`
|
||||
Category int `json:"category"`
|
||||
//FileType int `json:"file_type"`
|
||||
// FileType int `json:"file_type"`
|
||||
Size int64 `json:"size"`
|
||||
//FormatType string `json:"format_type"`
|
||||
//Status int `json:"status"`
|
||||
//Tags string `json:"tags,omitempty"`
|
||||
// FormatType string `json:"format_type"`
|
||||
// Status int `json:"status"`
|
||||
// Tags string `json:"tags,omitempty"`
|
||||
LCreatedAt int64 `json:"l_created_at"`
|
||||
LUpdatedAt int64 `json:"l_updated_at"`
|
||||
//NameSpace int `json:"name_space"`
|
||||
//IncludeItems int `json:"include_items,omitempty"`
|
||||
//RiskType int `json:"risk_type"`
|
||||
//BackupSign int `json:"backup_sign"`
|
||||
//Duration int `json:"duration"`
|
||||
//FileSource string `json:"file_source"`
|
||||
// NameSpace int `json:"name_space"`
|
||||
// IncludeItems int `json:"include_items,omitempty"`
|
||||
// RiskType int `json:"risk_type"`
|
||||
// BackupSign int `json:"backup_sign"`
|
||||
// Duration int `json:"duration"`
|
||||
// FileSource string `json:"file_source"`
|
||||
File bool `json:"file"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
UpdatedAt int64 `json:"updated_at"`
|
||||
//PrivateExtra struct {} `json:"_private_extra"`
|
||||
//ObjCategory string `json:"obj_category,omitempty"`
|
||||
//Thumbnail string `json:"thumbnail,omitempty"`
|
||||
// PrivateExtra struct {} `json:"_private_extra"`
|
||||
// ObjCategory string `json:"obj_category,omitempty"`
|
||||
// Thumbnail string `json:"thumbnail,omitempty"`
|
||||
}
|
||||
|
||||
func fileToObj(f File) *model.Object {
|
||||
@@ -104,19 +104,19 @@ type SortResp struct {
|
||||
type DownResp struct {
|
||||
Resp
|
||||
Data []struct {
|
||||
//Fid string `json:"fid"`
|
||||
//FileName string `json:"file_name"`
|
||||
//PdirFid string `json:"pdir_fid"`
|
||||
//Category int `json:"category"`
|
||||
//FileType int `json:"file_type"`
|
||||
//Size int `json:"size"`
|
||||
//FormatType string `json:"format_type"`
|
||||
//Status int `json:"status"`
|
||||
//Tags string `json:"tags"`
|
||||
//LCreatedAt int64 `json:"l_created_at"`
|
||||
//LUpdatedAt int64 `json:"l_updated_at"`
|
||||
//NameSpace int `json:"name_space"`
|
||||
//Thumbnail string `json:"thumbnail"`
|
||||
// Fid string `json:"fid"`
|
||||
// FileName string `json:"file_name"`
|
||||
// PdirFid string `json:"pdir_fid"`
|
||||
// Category int `json:"category"`
|
||||
// FileType int `json:"file_type"`
|
||||
// Size int `json:"size"`
|
||||
// FormatType string `json:"format_type"`
|
||||
// Status int `json:"status"`
|
||||
// Tags string `json:"tags"`
|
||||
// LCreatedAt int64 `json:"l_created_at"`
|
||||
// LUpdatedAt int64 `json:"l_updated_at"`
|
||||
// NameSpace int `json:"name_space"`
|
||||
// Thumbnail string `json:"thumbnail"`
|
||||
DownloadUrl string `json:"download_url"`
|
||||
//Md5 string `json:"md5"`
|
||||
//RiskType int `json:"risk_type"`
|
||||
@@ -168,14 +168,14 @@ type TranscodingResp struct {
|
||||
Resoultion string `json:"resoultion"`
|
||||
Success bool `json:"success"`
|
||||
} `json:"video_info,omitempty"`
|
||||
//Right string `json:"right"`
|
||||
//MemberRight string `json:"member_right"`
|
||||
//TransStatus string `json:"trans_status"`
|
||||
//Accessable bool `json:"accessable"`
|
||||
//SupportsFormat string `json:"supports_format"`
|
||||
//VideoFuncType string `json:"video_func_type,omitempty"`
|
||||
// Right string `json:"right"`
|
||||
// MemberRight string `json:"member_right"`
|
||||
// TransStatus string `json:"trans_status"`
|
||||
// Accessable bool `json:"accessable"`
|
||||
// SupportsFormat string `json:"supports_format"`
|
||||
// VideoFuncType string `json:"video_func_type,omitempty"`
|
||||
} `json:"video_list"`
|
||||
//AudioList []interface{} `json:"audio_list"`
|
||||
// AudioList []interface{} `json:"audio_list"`
|
||||
FileName string `json:"file_name"`
|
||||
NameSpace int `json:"name_space"`
|
||||
Size int64 `json:"size"`
|
||||
@@ -247,8 +247,7 @@ type HashResp struct {
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
FormatType string `json:"format_type"`
|
||||
} `json:"data"`
|
||||
Metadata struct {
|
||||
} `json:"metadata"`
|
||||
Metadata struct{} `json:"metadata"`
|
||||
}
|
||||
|
||||
type UpAuthResp struct {
|
||||
@@ -258,6 +257,28 @@ type UpAuthResp struct {
|
||||
Speed int `json:"speed"`
|
||||
Headers []interface{} `json:"headers"`
|
||||
} `json:"data"`
|
||||
Metadata struct{} `json:"metadata"`
|
||||
}
|
||||
|
||||
type MemberResp struct {
|
||||
Resp
|
||||
Data struct {
|
||||
MemberType string `json:"member_type"`
|
||||
CreatedAt uint64 `json:"created_at"`
|
||||
SecretUseCapacity uint64 `json:"secret_use_capacity"`
|
||||
UseCapacity uint64 `json:"use_capacity"`
|
||||
IsNewUser bool `json:"is_new_user"`
|
||||
MemberStatus struct {
|
||||
Vip string `json:"VIP"`
|
||||
ZVip string `json:"Z_VIP"`
|
||||
MiniVip string `json:"MINI_VIP"`
|
||||
SuperVip string `json:"SUPER_VIP"`
|
||||
} `json:"member_status"`
|
||||
SecretTotalCapacity uint64 `json:"secret_total_capacity"`
|
||||
TotalCapacity uint64 `json:"total_capacity"`
|
||||
} `json:"data"`
|
||||
Metadata struct {
|
||||
RangeSize int `json:"range_size"`
|
||||
ServerCurTime uint64 `json:"server_cur_time"`
|
||||
} `json:"metadata"`
|
||||
}
|
||||
|
||||
@@ -198,7 +198,7 @@ func (d *QuarkOrUC) upHash(md5, sha1, taskId string) (bool, error) {
|
||||
}
|
||||
|
||||
func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes io.Reader) (string, error) {
|
||||
//func (driver QuarkOrUC) UpPart(pre UpPreResp, mineType string, partNumber int, bytes []byte, account *model.Account, md5Str, sha1Str string) (string, error) {
|
||||
// func (driver QuarkOrUC) UpPart(pre UpPreResp, mineType string, partNumber int, bytes []byte, account *model.Account, md5Str, sha1Str string) (string, error) {
|
||||
timeStr := time.Now().UTC().Format(http.TimeFormat)
|
||||
data := base.Json{
|
||||
"auth_info": pre.Data.AuthInfo,
|
||||
@@ -334,3 +334,20 @@ func (d *QuarkOrUC) upFinish(pre UpPreResp) error {
|
||||
time.Sleep(time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *QuarkOrUC) memberInfo(ctx context.Context) (*MemberResp, error) {
|
||||
var resp MemberResp
|
||||
query := map[string]string{
|
||||
"fetch_subscribe": "false",
|
||||
"_ch": "home",
|
||||
"fetch_identity": "false",
|
||||
}
|
||||
_, err := d.request("/member", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(query)
|
||||
req.SetContext(ctx)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
|
||||
"github.com/hirochachacha/go-smb2"
|
||||
"github.com/cloudsoda/go-smb2"
|
||||
)
|
||||
|
||||
type SMB struct {
|
||||
@@ -33,7 +33,7 @@ func (d *SMB) Init(ctx context.Context) error {
|
||||
if !strings.Contains(d.Addition.Address, ":") {
|
||||
d.Addition.Address = d.Addition.Address + ":445"
|
||||
}
|
||||
return d._initFS()
|
||||
return d._initFS(ctx)
|
||||
}
|
||||
|
||||
func (d *SMB) Drop(ctx context.Context) error {
|
||||
@@ -44,7 +44,7 @@ func (d *SMB) Drop(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *SMB) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullPath := dir.GetPath()
|
||||
@@ -71,7 +71,7 @@ func (d *SMB) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
|
||||
}
|
||||
|
||||
func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullPath := file.GetPath()
|
||||
@@ -99,7 +99,7 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
||||
}
|
||||
|
||||
func (d *SMB) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
fullPath := filepath.Join(parentDir.GetPath(), dirName)
|
||||
@@ -113,7 +113,7 @@ func (d *SMB) MakeDir(ctx context.Context, parentDir model.Obj, dirName string)
|
||||
}
|
||||
|
||||
func (d *SMB) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
srcPath := srcObj.GetPath()
|
||||
@@ -128,7 +128,7 @@ func (d *SMB) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *SMB) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
srcPath := srcObj.GetPath()
|
||||
@@ -143,7 +143,7 @@ func (d *SMB) Rename(ctx context.Context, srcObj model.Obj, newName string) erro
|
||||
}
|
||||
|
||||
func (d *SMB) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
srcPath := srcObj.GetPath()
|
||||
@@ -163,7 +163,7 @@ func (d *SMB) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *SMB) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
var err error
|
||||
@@ -182,7 +182,7 @@ func (d *SMB) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
fullPath := filepath.Join(dstDir.GetPath(), stream.GetName())
|
||||
@@ -206,7 +206,7 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream
|
||||
}
|
||||
|
||||
func (d *SMB) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
if err := d.checkConn(); err != nil {
|
||||
if err := d.checkConn(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := d.fs.Statfs(d.RootFolderPath)
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package smb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
|
||||
"github.com/hirochachacha/go-smb2"
|
||||
"github.com/cloudsoda/go-smb2"
|
||||
)
|
||||
|
||||
func (d *SMB) updateLastConnTime() {
|
||||
@@ -27,24 +27,20 @@ func (d *SMB) getLastConnTime() time.Time {
|
||||
return time.Unix(atomic.LoadInt64(&d.lastConnTime), 0)
|
||||
}
|
||||
|
||||
func (d *SMB) initFS() error {
|
||||
func (d *SMB) initFS(ctx context.Context) error {
|
||||
_, err, _ := singleflight.AnyGroup.Do(fmt.Sprintf("SMB.initFS:%p", d), func() (any, error) {
|
||||
return nil, d._initFS()
|
||||
return nil, d._initFS(ctx)
|
||||
})
|
||||
return err
|
||||
}
|
||||
func (d *SMB) _initFS() error {
|
||||
conn, err := net.Dial("tcp", d.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (d *SMB) _initFS(ctx context.Context) error {
|
||||
dialer := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
User: d.Username,
|
||||
Password: d.Password,
|
||||
},
|
||||
}
|
||||
s, err := dialer.Dial(conn)
|
||||
s, err := dialer.Dial(ctx, d.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -56,14 +52,14 @@ func (d *SMB) _initFS() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *SMB) checkConn() error {
|
||||
func (d *SMB) checkConn(ctx context.Context) error {
|
||||
if time.Since(d.getLastConnTime()) < 5*time.Minute {
|
||||
return nil
|
||||
}
|
||||
if d.fs != nil {
|
||||
_ = d.fs.Umount()
|
||||
}
|
||||
return d.initFS()
|
||||
return d.initFS(ctx)
|
||||
}
|
||||
|
||||
// CopyFile File copies a single file from src to dst
|
||||
|
||||
@@ -108,7 +108,7 @@ func (d *Strm) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// fs.Get 没报错,说明不是strm生成的路径,需要直接返回
|
||||
// fs.Get 没报错,说明不是strm驱动映射的路径,需要直接返回
|
||||
size := int64(0)
|
||||
if !obj.IsDir() {
|
||||
size = obj.GetSize()
|
||||
@@ -123,6 +123,11 @@ func (d *Strm) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
HashInfo: obj.GetHash(),
|
||||
}, nil
|
||||
}
|
||||
if strings.HasSuffix(path, ".strm") {
|
||||
// 上面fs.Get都没找到且后缀为.strm
|
||||
// 返回errs.NotSupport使得op.Get尝试从op.List中查找
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package teldrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
@@ -50,8 +51,8 @@ type chunkTask struct {
|
||||
chunkIdx int
|
||||
fileName string
|
||||
chunkSize int64
|
||||
reader *stream.SectionReader
|
||||
ss *stream.StreamSectionReader
|
||||
reader io.ReadSeeker
|
||||
ss stream.StreamSectionReaderIF
|
||||
}
|
||||
|
||||
type CopyManager struct {
|
||||
|
||||
21
go.mod
21
go.mod
@@ -22,6 +22,7 @@ require (
|
||||
github.com/charmbracelet/bubbletea v1.3.6
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc
|
||||
github.com/coreos/go-oidc v2.3.0+incompatible
|
||||
github.com/deckarep/golang-set/v2 v2.8.0
|
||||
github.com/dhowden/tag v0.0.0-20240417053706-3d75831295e8
|
||||
@@ -38,8 +39,8 @@ require (
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0
|
||||
github.com/hirochachacha/go-smb2 v1.1.0
|
||||
github.com/ipfs/go-ipfs-api v0.7.0
|
||||
github.com/itsHenry35/gofakes3 v0.0.8
|
||||
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3
|
||||
@@ -53,6 +54,7 @@ require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.13.9
|
||||
github.com/pquerna/otp v1.5.0
|
||||
github.com/quic-go/quic-go v0.54.1
|
||||
github.com/rclone/rclone v1.70.3
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
||||
github.com/shirou/gopsutil/v4 v4.25.5
|
||||
@@ -87,16 +89,27 @@ require (
|
||||
github.com/RoaringBitmap/roaring/v2 v2.4.5 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
|
||||
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/geoffgarside/ber v1.2.0 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/gofork v1.7.6 // indirect
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
|
||||
github.com/lanrat/extsort v1.0.2 // indirect
|
||||
github.com/mikelolasagasti/xz v1.0.1 // indirect
|
||||
github.com/minio/minlz v1.0.0 // indirect
|
||||
github.com/minio/xxml v0.0.3 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/otiai10/mint v1.6.3 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
|
||||
)
|
||||
|
||||
@@ -172,7 +185,6 @@ require (
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
|
||||
github.com/geoffgarside/ber v1.2.0 // indirect
|
||||
github.com/gin-contrib/sse v1.1.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
@@ -262,7 +274,8 @@ require (
|
||||
golang.org/x/sys v0.34.0
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/text v0.27.0
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
golang.org/x/tools v0.35.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
|
||||
|
||||
45
go.sum
45
go.sum
@@ -202,6 +202,10 @@ github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e/go.
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
|
||||
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc h1:t8YjNUCt1DimB4HCIXBztwWMhgxr5yG5/YaRl9Afdfg=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc/go.mod h1:CgWpFCFWzzEA5hVkhAc6DZZzGd3czx+BblvOzjmg6KA=
|
||||
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc h1:0xCWmFKBmarCqqqLeM7jFBSw/Or81UEElFqO8MY+GDs=
|
||||
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc/go.mod h1:uvR42Hb/t52HQd7x5/ZLzZEK8oihrFpgnodIJ1vte2E=
|
||||
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
|
||||
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
@@ -256,7 +260,6 @@ github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sa
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
|
||||
github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
|
||||
github.com/geoffgarside/ber v1.2.0 h1:/loowoRcs/MWLYmGX9QtIAbA+V/FrnVLsMMPhwiRm64=
|
||||
github.com/geoffgarside/ber v1.2.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
|
||||
github.com/gin-contrib/cors v1.7.6 h1:3gQ8GMzs1Ylpf70y8bMw4fVpycXIeX1ZemuSQIsnQQY=
|
||||
@@ -357,8 +360,16 @@ github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3
|
||||
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
|
||||
github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
|
||||
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
|
||||
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006100629-ba7a40dce261 h1:47L8SHM80cXszQydLrpp9MhVkFLLWCvrU9XmJ6XtRu0=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006100629-ba7a40dce261/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499 h1:4ovnBdiGDFi8putQGxhipuuhXItAgh4/YnzufPYkZkQ=
|
||||
github.com/halalcloud/golang-sdk-lite v0.0.0-20251006164234-3c629727c499/go.mod h1:8x1h4rm3s8xMcTyJrq848sQ6BJnKzl57mDY4CNshdPM=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@@ -366,6 +377,9 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
||||
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
@@ -377,8 +391,6 @@ github.com/hekmon/cunits/v2 v2.1.0 h1:k6wIjc4PlacNOHwKEMBgWV2/c8jyD4eRMs5mR1BBhI
|
||||
github.com/hekmon/cunits/v2 v2.1.0/go.mod h1:9r1TycXYXaTmEWlAIfFV8JT+Xo59U96yUJAYHxzii2M=
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0 h1:0Fb11qE0IBh4V4GlOwHNYpqpjcYDp5GouolwrpmcUDQ=
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0/go.mod h1:38SlNhFzinVUuY87wGj3acOmRxeYZAZfrj6Re7UgCDg=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@@ -398,6 +410,18 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
|
||||
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
|
||||
github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg=
|
||||
github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8=
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
@@ -567,6 +591,10 @@ github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQP
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg=
|
||||
github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
|
||||
github.com/rclone/rclone v1.70.3 h1:rg/WNh4DmSVZyKP2tHZ4lAaWEyMi7h/F0r7smOMA3IE=
|
||||
github.com/rclone/rclone v1.70.3/go.mod h1:nLyN+hpxAsQn9Rgt5kM774lcRDad82x/KqQeBZ83cMo=
|
||||
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=
|
||||
@@ -686,6 +714,8 @@ go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5J
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
||||
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
|
||||
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
|
||||
gocv.io/x/gocv v0.25.0/go.mod h1:Rar2PS6DV+T4FL+PM535EImD/h13hGVaHhnCu1xarBs=
|
||||
@@ -697,8 +727,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
@@ -740,6 +770,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -752,6 +784,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
@@ -878,8 +911,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -114,7 +114,9 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: "share_icon", Value: "🎁", Type: conf.TypeString, Group: model.STYLE},
|
||||
{Key: "home_container", Value: "max_980px", Type: conf.TypeSelect, Options: "max_980px,hope_container", Group: model.STYLE},
|
||||
{Key: "settings_layout", Value: "list", Type: conf.TypeSelect, Options: "list,responsive", Group: model.STYLE},
|
||||
{Key: conf.HideStorageDetails, Value: "false", Type: conf.TypeBool, Group: model.STYLE, Flag: model.PRIVATE},
|
||||
{Key: conf.HideStorageDetails, Value: "true", Type: conf.TypeBool, Group: model.STYLE, Flag: model.PRIVATE},
|
||||
{Key: conf.HideStorageDetailsInManagePage, Value: "true", Type: conf.TypeBool, Group: model.STYLE, Flag: model.PRIVATE},
|
||||
{Key: "show_disk_usage_in_plain_text", Value: "false", Type: conf.TypeBool, Group: model.STYLE, Flag: model.PUBLIC},
|
||||
// preview settings
|
||||
{Key: conf.TextTypes, Value: "txt,htm,html,xml,java,properties,sql,js,md,json,conf,ini,vue,php,py,bat,gitignore,yml,go,sh,c,cpp,h,hpp,tsx,vtt,srt,ass,rs,lrc,strm", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
|
||||
{Key: conf.AudioTypes, Value: "mp3,flac,ogg,m4a,wav,opus,wma", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
|
||||
@@ -146,7 +148,10 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: "audio_cover", Value: "https://res.oplist.org/logo/logo.svg", MigrationValue: "https://cdn.oplist.org/gh/OpenListTeam/Logo@main/logo.svg", Type: conf.TypeString, Group: model.PREVIEW},
|
||||
{Key: conf.AudioAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.VideoAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.PreviewDownloadByDefault, Value: "false", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.PreviewArchivesByDefault, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.SharePreviewDownloadByDefault, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.SharePreviewArchivesByDefault, Value: "false", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.ReadMeAutoRender, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
{Key: conf.FilterReadMeScripts, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
|
||||
// global settings
|
||||
@@ -156,10 +161,12 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: conf.CustomizeBody, Type: conf.TypeText, Group: model.GLOBAL, Flag: model.PRIVATE},
|
||||
{Key: conf.LinkExpiration, Value: "0", Type: conf.TypeNumber, Group: model.GLOBAL, Flag: model.PRIVATE},
|
||||
{Key: conf.SignAll, Value: "true", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PRIVATE},
|
||||
{Key: conf.PrivacyRegs, Value: `(?:(?:\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.){3}(?:\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])
|
||||
{
|
||||
Key: conf.PrivacyRegs, Value: `(?:(?:\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.){3}(?:\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])
|
||||
([[:xdigit:]]{1,4}(?::[[:xdigit:]]{1,4}){7}|::|:(?::[[:xdigit:]]{1,4}){1,6}|[[:xdigit:]]{1,4}:(?::[[:xdigit:]]{1,4}){1,5}|(?:[[:xdigit:]]{1,4}:){2}(?::[[:xdigit:]]{1,4}){1,4}|(?:[[:xdigit:]]{1,4}:){3}(?::[[:xdigit:]]{1,4}){1,3}|(?:[[:xdigit:]]{1,4}:){4}(?::[[:xdigit:]]{1,4}){1,2}|(?:[[:xdigit:]]{1,4}:){5}:[[:xdigit:]]{1,4}|(?:[[:xdigit:]]{1,4}:){1,6}:)
|
||||
(?U)access_token=(.*)&`,
|
||||
Type: conf.TypeText, Group: model.GLOBAL, Flag: model.PRIVATE},
|
||||
Type: conf.TypeText, Group: model.GLOBAL, Flag: model.PRIVATE,
|
||||
},
|
||||
{Key: conf.OcrApi, Value: "https://openlistteam-ocr-api-server.hf.space/ocr/file/json", MigrationValue: "https://api.example.com/ocr/file/json", Type: conf.TypeString, Group: model.GLOBAL}, // TODO: This can be replace by a community-hosted endpoint, see https://github.com/OpenListTeam/ocr_api_server
|
||||
{Key: conf.FilenameCharMapping, Value: `{"/": "|"}`, Type: conf.TypeText, Group: model.GLOBAL},
|
||||
{Key: conf.ForwardDirectLinkParams, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL},
|
||||
@@ -213,12 +220,11 @@ func InitialSettings() []model.SettingItem {
|
||||
// ftp settings
|
||||
{Key: conf.FTPPublicHost, Value: "127.0.0.1", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.FTPPasvPortMap, Value: "", Type: conf.TypeText, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.FTPProxyUserAgent, Value: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " +
|
||||
"Chrome/87.0.4280.88 Safari/537.36", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.FTPMandatoryTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.FTPImplicitTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.FTPTLSPrivateKeyPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.FTPTLSPublicCertPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.SFTPDisablePasswordLogin, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE},
|
||||
|
||||
// traffic settings
|
||||
{Key: conf.TaskOfflineDownloadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Download.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
|
||||
|
||||
@@ -35,6 +35,7 @@ type Scheme struct {
|
||||
UnixFile string `json:"unix_file" env:"UNIX_FILE"`
|
||||
UnixFilePerm string `json:"unix_file_perm" env:"UNIX_FILE_PERM"`
|
||||
EnableH2c bool `json:"enable_h2c" env:"ENABLE_H2C"`
|
||||
EnableH3 bool `json:"enable_h3" env:"ENABLE_H3"`
|
||||
}
|
||||
|
||||
type LogConfig struct {
|
||||
|
||||
@@ -17,23 +17,27 @@ const (
|
||||
AllowMounted = "allow_mounted"
|
||||
RobotsTxt = "robots_txt"
|
||||
|
||||
Logo = "logo" // multi-lines text, L1: light, EOL: dark
|
||||
Favicon = "favicon"
|
||||
MainColor = "main_color"
|
||||
HideStorageDetails = "hide_storage_details"
|
||||
Logo = "logo" // multi-lines text, L1: light, EOL: dark
|
||||
Favicon = "favicon"
|
||||
MainColor = "main_color"
|
||||
HideStorageDetails = "hide_storage_details"
|
||||
HideStorageDetailsInManagePage = "hide_storage_details_in_manage_page"
|
||||
|
||||
// preview
|
||||
TextTypes = "text_types"
|
||||
AudioTypes = "audio_types"
|
||||
VideoTypes = "video_types"
|
||||
ImageTypes = "image_types"
|
||||
ProxyTypes = "proxy_types"
|
||||
ProxyIgnoreHeaders = "proxy_ignore_headers"
|
||||
AudioAutoplay = "audio_autoplay"
|
||||
VideoAutoplay = "video_autoplay"
|
||||
PreviewArchivesByDefault = "preview_archives_by_default"
|
||||
ReadMeAutoRender = "readme_autorender"
|
||||
FilterReadMeScripts = "filter_readme_scripts"
|
||||
TextTypes = "text_types"
|
||||
AudioTypes = "audio_types"
|
||||
VideoTypes = "video_types"
|
||||
ImageTypes = "image_types"
|
||||
ProxyTypes = "proxy_types"
|
||||
ProxyIgnoreHeaders = "proxy_ignore_headers"
|
||||
AudioAutoplay = "audio_autoplay"
|
||||
VideoAutoplay = "video_autoplay"
|
||||
PreviewDownloadByDefault = "preview_download_by_default"
|
||||
PreviewArchivesByDefault = "preview_archives_by_default"
|
||||
SharePreviewDownloadByDefault = "share_preview_download_by_default"
|
||||
SharePreviewArchivesByDefault = "share_preview_archives_by_default"
|
||||
ReadMeAutoRender = "readme_autorender"
|
||||
FilterReadMeScripts = "filter_readme_scripts"
|
||||
|
||||
// global
|
||||
HideFiles = "hide_files"
|
||||
@@ -124,14 +128,18 @@ const (
|
||||
QbittorrentUrl = "qbittorrent_url"
|
||||
QbittorrentSeedtime = "qbittorrent_seedtime"
|
||||
|
||||
// 123 open offline download
|
||||
Pan123OpenOfflineDownloadCallbackUrl = "123_open_callback_url"
|
||||
Pan123OpenTempDir = "123_open_temp_dir"
|
||||
|
||||
// ftp
|
||||
FTPPublicHost = "ftp_public_host"
|
||||
FTPPasvPortMap = "ftp_pasv_port_map"
|
||||
FTPProxyUserAgent = "ftp_proxy_user_agent"
|
||||
FTPMandatoryTLS = "ftp_mandatory_tls"
|
||||
FTPImplicitTLS = "ftp_implicit_tls"
|
||||
FTPTLSPrivateKeyPath = "ftp_tls_private_key_path"
|
||||
FTPTLSPublicCertPath = "ftp_tls_public_cert_path"
|
||||
FTPPublicHost = "ftp_public_host"
|
||||
FTPPasvPortMap = "ftp_pasv_port_map"
|
||||
FTPMandatoryTLS = "ftp_mandatory_tls"
|
||||
FTPImplicitTLS = "ftp_implicit_tls"
|
||||
FTPTLSPrivateKeyPath = "ftp_tls_private_key_path"
|
||||
FTPTLSPublicCertPath = "ftp_tls_public_cert_path"
|
||||
SFTPDisablePasswordLogin = "sftp_disable_password_login"
|
||||
|
||||
// traffic
|
||||
TaskOfflineDownloadThreadsNum = "offline_download_task_threads_num"
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
type Driver interface {
|
||||
Meta
|
||||
Reader
|
||||
//Writer
|
||||
//Other
|
||||
// Writer
|
||||
// Other
|
||||
}
|
||||
|
||||
type Meta interface {
|
||||
|
||||
@@ -20,7 +20,7 @@ func (p *Progress) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
p.Done += int64(n)
|
||||
p.up(float64(p.Done) / float64(p.Total) * 100)
|
||||
return
|
||||
return n, err
|
||||
}
|
||||
|
||||
func NewProgress(total int64, up UpdateProgress) *Progress {
|
||||
@@ -61,3 +61,10 @@ type ReaderWithCtx = stream.ReaderWithCtx
|
||||
type ReaderUpdatingProgress = stream.ReaderUpdatingProgress
|
||||
|
||||
type SimpleReaderWithSize = stream.SimpleReaderWithSize
|
||||
|
||||
func DiskUsageFromUsedAndTotal(used, total uint64) model.DiskUsage {
|
||||
return model.DiskUsage{
|
||||
TotalSpace: max(used, total),
|
||||
FreeSpace: total - min(used, total),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,13 +12,12 @@ var (
|
||||
NotSupport = errors.New("not support")
|
||||
RelativePath = errors.New("using relative path is not allowed")
|
||||
|
||||
MoveBetweenTwoStorages = errors.New("can't move files between two storages, try to copy")
|
||||
UploadNotSupported = errors.New("upload not supported")
|
||||
|
||||
MetaNotFound = errors.New("meta not found")
|
||||
StorageNotFound = errors.New("storage not found")
|
||||
StreamIncomplete = errors.New("upload/download stream incomplete, possible network issue")
|
||||
StreamPeekFail = errors.New("StreamPeekFail")
|
||||
UploadNotSupported = errors.New("upload not supported")
|
||||
MetaNotFound = errors.New("meta not found")
|
||||
StorageNotFound = errors.New("storage not found")
|
||||
StorageNotInit = errors.New("storage not init")
|
||||
StreamIncomplete = errors.New("upload/download stream incomplete, possible network issue")
|
||||
StreamPeekFail = errors.New("StreamPeekFail")
|
||||
|
||||
UnknownArchiveFormat = errors.New("unknown archive format")
|
||||
WrongArchivePassword = errors.New("wrong archive password")
|
||||
@@ -42,6 +41,6 @@ func IsNotFoundError(err error) bool {
|
||||
func IsNotSupportError(err error) bool {
|
||||
return errors.Is(pkgerr.Cause(err), NotSupport)
|
||||
}
|
||||
func IsNotImplement(err error) bool {
|
||||
func IsNotImplementError(err error) bool {
|
||||
return errors.Is(pkgerr.Cause(err), NotImplement)
|
||||
}
|
||||
|
||||
12
internal/errs/unwrap.go
Normal file
12
internal/errs/unwrap.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package errs
|
||||
|
||||
import "errors"
|
||||
|
||||
func UnwrapOrSelf(err error) error {
|
||||
// errors.Unwrap has no fallback mechanism
|
||||
unwrapped := errors.Unwrap(err)
|
||||
if unwrapped == nil {
|
||||
return err
|
||||
}
|
||||
return unwrapped
|
||||
}
|
||||
@@ -24,8 +24,9 @@ func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error)
|
||||
var _objs []model.Obj
|
||||
if storage != nil {
|
||||
_objs, err = op.List(ctx, storage, actualPath, model.ListArgs{
|
||||
ReqPath: path,
|
||||
Refresh: args.Refresh,
|
||||
ReqPath: path,
|
||||
Refresh: args.Refresh,
|
||||
WithStorageDetails: args.WithStorageDetails,
|
||||
})
|
||||
if err != nil {
|
||||
if !args.NoLog {
|
||||
|
||||
@@ -11,9 +11,10 @@ import (
|
||||
)
|
||||
|
||||
type ListArgs struct {
|
||||
ReqPath string
|
||||
S3ShowPlaceholder bool
|
||||
Refresh bool
|
||||
ReqPath string
|
||||
S3ShowPlaceholder bool
|
||||
Refresh bool
|
||||
WithStorageDetails bool
|
||||
}
|
||||
|
||||
type LinkArgs struct {
|
||||
|
||||
@@ -32,7 +32,7 @@ type Proxy struct {
|
||||
WebdavPolicy string `json:"webdav_policy"`
|
||||
ProxyRange bool `json:"proxy_range"`
|
||||
DownProxyURL string `json:"down_proxy_url"`
|
||||
//Disable sign for DownProxyURL
|
||||
// Disable sign for DownProxyURL
|
||||
DisableProxySign bool `json:"disable_proxy_sign"`
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
@@ -114,7 +115,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
code = http.StatusRequestedRangeNotSatisfiable
|
||||
if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok {
|
||||
if statusCode, ok := errs.UnwrapOrSelf(err).(HttpStatusCodeError); ok {
|
||||
code = int(statusCode)
|
||||
}
|
||||
http.Error(w, err.Error(), code)
|
||||
@@ -137,7 +138,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
sendContent, err = RangeReadCloser.RangeRead(ctx, ra)
|
||||
if err != nil {
|
||||
code = http.StatusRequestedRangeNotSatisfiable
|
||||
if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok {
|
||||
if statusCode, ok := errs.UnwrapOrSelf(err).(HttpStatusCodeError); ok {
|
||||
code = int(statusCode)
|
||||
}
|
||||
http.Error(w, err.Error(), code)
|
||||
@@ -199,7 +200,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize)
|
||||
}
|
||||
code = http.StatusInternalServerError
|
||||
if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok {
|
||||
if statusCode, ok := errs.UnwrapOrSelf(err).(HttpStatusCodeError); ok {
|
||||
code = int(statusCode)
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
|
||||
119
internal/offline_download/123_open/client.go
Normal file
119
internal/offline_download/123_open/client.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package _123_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
_123_open "github.com/OpenListTeam/OpenList/v4/drivers/123_open"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/offline_download/tool"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
)
|
||||
|
||||
type Open123 struct{}
|
||||
|
||||
func (*Open123) Name() string {
|
||||
return "123 Open"
|
||||
}
|
||||
|
||||
func (*Open123) Items() []model.SettingItem {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*Open123) Run(_ *tool.DownloadTask) error {
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (*Open123) Init() (string, error) {
|
||||
return "ok", nil
|
||||
}
|
||||
|
||||
func (*Open123) IsReady() bool {
|
||||
tempDir := setting.GetStr(conf.Pan123OpenTempDir)
|
||||
if tempDir == "" {
|
||||
return false
|
||||
}
|
||||
storage, _, err := op.GetStorageAndActualPath(tempDir)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, ok := storage.(*_123_open.Open123); !ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (*Open123) AddURL(args *tool.AddUrlArgs) (string, error) {
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(args.TempDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
driver123Open, ok := storage.(*_123_open.Open123)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unsupported storage driver for offline download, only 123 Open is supported")
|
||||
}
|
||||
ctx := context.Background()
|
||||
if err := op.MakeDir(ctx, storage, actualPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
parentDir, err := op.GetUnwrap(ctx, storage, actualPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
cb := setting.GetStr(conf.Pan123OpenOfflineDownloadCallbackUrl)
|
||||
taskID, err := driver123Open.OfflineDownload(ctx, args.Url, parentDir, cb)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to add offline download task: %w", err)
|
||||
}
|
||||
return strconv.Itoa(taskID), nil
|
||||
}
|
||||
|
||||
func (*Open123) Remove(_ *tool.DownloadTask) error {
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (*Open123) Status(task *tool.DownloadTask) (*tool.Status, error) {
|
||||
taskID, err := strconv.Atoi(task.GID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse task ID: %s", task.GID)
|
||||
}
|
||||
storage, _, err := op.GetStorageAndActualPath(task.TempDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
driver123Open, ok := storage.(*_123_open.Open123)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unsupported storage driver for offline download, only 123 Open is supported")
|
||||
}
|
||||
process, status, err := driver123Open.OfflineDownloadProcess(context.Background(), taskID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var statusStr string
|
||||
switch status {
|
||||
case 0:
|
||||
statusStr = "downloading"
|
||||
case 1:
|
||||
err = fmt.Errorf("offline download failed")
|
||||
case 2:
|
||||
statusStr = "succeed"
|
||||
case 3:
|
||||
statusStr = "retrying"
|
||||
}
|
||||
return &tool.Status{
|
||||
Progress: process,
|
||||
Completed: status == 2,
|
||||
Status: statusStr,
|
||||
Err: err,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ tool.Tool = (*Open123)(nil)
|
||||
|
||||
func init() {
|
||||
tool.Tools.Add(&Open123{})
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package offline_download
|
||||
import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/internal/offline_download/115"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/internal/offline_download/115_open"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/internal/offline_download/123_open"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/internal/offline_download/aria2"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/internal/offline_download/http"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/internal/offline_download/pikpak"
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/offline_download/tool"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||
@@ -58,6 +59,7 @@ func (s SimpleHttp) Run(task *tool.DownloadTask) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("User-Agent", base.UserAgent)
|
||||
if streamPut {
|
||||
req.Header.Set("Range", "bytes=0-")
|
||||
}
|
||||
|
||||
@@ -2,18 +2,16 @@ package tool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser"
|
||||
|
||||
_115_open "github.com/OpenListTeam/OpenList/v4/drivers/115_open"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
|
||||
_115 "github.com/OpenListTeam/OpenList/v4/drivers/115"
|
||||
_115_open "github.com/OpenListTeam/OpenList/v4/drivers/115_open"
|
||||
_123_open "github.com/OpenListTeam/OpenList/v4/drivers/123_open"
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/thunder"
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser"
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/thunderx"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||
@@ -22,6 +20,7 @@ import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/setting"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/task"
|
||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -104,6 +103,13 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, erro
|
||||
} else {
|
||||
tempDir = filepath.Join(setting.GetStr(conf.Pan115OpenTempDir), uid)
|
||||
}
|
||||
case "123 Open":
|
||||
if _, ok := storage.(*_123_open.Open123); ok && dstDirActualPath != "/" {
|
||||
// directly offline downloading to the root path is not allowed via 123 open platform
|
||||
tempDir = args.DstDirPath
|
||||
} else {
|
||||
tempDir = filepath.Join(setting.GetStr(conf.Pan123OpenTempDir), uid)
|
||||
}
|
||||
case "PikPak":
|
||||
if _, ok := storage.(*pikpak.PikPak); ok {
|
||||
tempDir = args.DstDirPath
|
||||
|
||||
@@ -111,6 +111,9 @@ outer:
|
||||
if t.tool.Name() == "115 Open" {
|
||||
return nil
|
||||
}
|
||||
if t.tool.Name() == "123 Open" {
|
||||
return nil
|
||||
}
|
||||
t.Status = "offline download completed, maybe transferring"
|
||||
// hack for qBittorrent
|
||||
if t.tool.Name() == "qBittorrent" {
|
||||
@@ -174,7 +177,7 @@ func (t *DownloadTask) Update() (bool, error) {
|
||||
|
||||
func (t *DownloadTask) Transfer() error {
|
||||
toolName := t.tool.Name()
|
||||
if toolName == "115 Cloud" || toolName == "115 Open" || toolName == "PikPak" || toolName == "Thunder" || toolName == "ThunderX" || toolName == "ThunderBrowser" {
|
||||
if toolName == "115 Cloud" || toolName == "115 Open" || toolName == "123 Open" || toolName == "PikPak" || toolName == "Thunder" || toolName == "ThunderX" || toolName == "ThunderBrowser" {
|
||||
// 如果不是直接下载到目标路径,则进行转存
|
||||
if t.TempDir != t.DstDirPath {
|
||||
return transferObj(t.Ctx(), t.TempDir, t.DstDirPath, t.DeletePolicy)
|
||||
|
||||
@@ -27,7 +27,7 @@ var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider]
|
||||
|
||||
func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
key := Key(storage, path)
|
||||
@@ -163,7 +163,7 @@ var archiveListG singleflight.Group[[]model.Obj]
|
||||
|
||||
func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
metaKey := Key(storage, path)
|
||||
@@ -309,7 +309,7 @@ func splitPath(path string) []string {
|
||||
|
||||
func ArchiveGet(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, model.Obj, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
af, err := GetUnwrap(ctx, storage, path)
|
||||
@@ -364,7 +364,7 @@ var extractG = singleflight.Group[*extractLink]{Remember: true}
|
||||
|
||||
func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
key := stdpath.Join(Key(storage, path), args.InnerPath)
|
||||
if link, ok := extractCache.Get(key); ok {
|
||||
@@ -480,7 +480,7 @@ func InternalExtract(ctx context.Context, storage driver.Driver, path string, ar
|
||||
|
||||
func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) error {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
srcPath = utils.FixAndCleanPath(srcPath)
|
||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||
|
||||
@@ -116,7 +116,7 @@ func Key(storage driver.Driver, path string) string {
|
||||
// List files in storage, not contains virtual file
|
||||
func List(ctx context.Context, storage driver.Driver, path string, args model.ListArgs) ([]model.Obj, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
log.Debugf("op.List %s", path)
|
||||
@@ -184,7 +184,7 @@ func Get(ctx context.Context, storage driver.Driver, path string) (model.Obj, er
|
||||
if err == nil {
|
||||
return model.WrapObjName(obj), nil
|
||||
}
|
||||
if !errs.IsNotImplement(err) {
|
||||
if !errs.IsNotImplementError(err) && !errs.IsNotSupportError(err) {
|
||||
return nil, errors.WithMessage(err, "failed to get obj")
|
||||
}
|
||||
}
|
||||
@@ -259,7 +259,7 @@ var errLinkMFileCache = stderrors.New("ErrLinkMFileCache")
|
||||
// Link get link, if is an url. should have an expiry time
|
||||
func Link(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (*model.Link, model.Obj, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return nil, nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
var (
|
||||
file model.Obj
|
||||
@@ -369,7 +369,7 @@ var mkdirG singleflight.Group[interface{}]
|
||||
|
||||
func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache ...bool) error {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
key := Key(storage, path)
|
||||
@@ -424,7 +424,7 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache
|
||||
|
||||
func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, lazyCache ...bool) error {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
srcPath = utils.FixAndCleanPath(srcPath)
|
||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||
@@ -467,7 +467,7 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
|
||||
|
||||
func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string, lazyCache ...bool) error {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
srcPath = utils.FixAndCleanPath(srcPath)
|
||||
srcRawObj, err := Get(ctx, storage, srcPath)
|
||||
@@ -508,7 +508,7 @@ func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string,
|
||||
// Copy Just copy file[s] in a storage
|
||||
func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, lazyCache ...bool) error {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
srcPath = utils.FixAndCleanPath(srcPath)
|
||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||
@@ -545,7 +545,7 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string
|
||||
|
||||
func Remove(ctx context.Context, storage driver.Driver, path string) error {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
if utils.PathEqual(path, "/") {
|
||||
return errors.New("delete root folder is not allowed, please goto the manage page to delete the storage instead")
|
||||
@@ -586,7 +586,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
|
||||
}
|
||||
}()
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
// UrlTree PUT
|
||||
if storage.GetStorage().Driver == "UrlTree" {
|
||||
@@ -678,7 +678,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
|
||||
|
||||
func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url string, lazyCache ...bool) error {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||
_, err := GetUnwrap(ctx, storage, stdpath.Join(dstDirPath, dstName))
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/db"
|
||||
@@ -351,13 +352,11 @@ func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string,
|
||||
DriverName: d.Config().Name,
|
||||
},
|
||||
}
|
||||
storage, ok := d.(driver.WithDetails)
|
||||
if !ok {
|
||||
return ret
|
||||
}
|
||||
details, err := storage.GetDetails(ctx)
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
details, err := GetStorageDetails(timeoutCtx, d)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errs.NotImplement) {
|
||||
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
|
||||
log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err)
|
||||
}
|
||||
return ret
|
||||
@@ -379,6 +378,7 @@ func getStorageVirtualFilesByPath(prefix string, rootCallback func(driver.Driver
|
||||
|
||||
prefix = utils.FixAndCleanPath(prefix)
|
||||
set := make(map[string]int)
|
||||
var wg sync.WaitGroup
|
||||
for _, v := range storages {
|
||||
mountPath := utils.GetActualMountPath(v.GetStorage().MountPath)
|
||||
// Exclude prefix itself and non prefix
|
||||
@@ -396,14 +396,25 @@ func getStorageVirtualFilesByPath(prefix string, rootCallback func(driver.Driver
|
||||
IsFolder: true,
|
||||
}
|
||||
if len(names) == 1 {
|
||||
files = append(files, rootCallback(v, obj))
|
||||
idx = len(files)
|
||||
files = append(files, obj)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
files[idx] = rootCallback(v, files[idx])
|
||||
}()
|
||||
} else {
|
||||
files = append(files, obj)
|
||||
}
|
||||
} else if len(names) == 1 {
|
||||
files[idx] = rootCallback(v, files[idx])
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
files[idx] = rootCallback(v, files[idx])
|
||||
}()
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
return files
|
||||
}
|
||||
|
||||
@@ -427,3 +438,14 @@ func GetBalancedStorage(path string) driver.Driver {
|
||||
return storages[i]
|
||||
}
|
||||
}
|
||||
|
||||
func GetStorageDetails(ctx context.Context, storage driver.Driver) (*model.StorageDetails, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status)
|
||||
}
|
||||
wd, ok := storage.(driver.WithDetails)
|
||||
if !ok {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
return wd.GetDetails(ctx)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
@@ -151,32 +152,58 @@ func CacheFullAndHash(stream model.FileStreamer, up *model.UpdateProgress, hashT
|
||||
return tmpF, hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
type StreamSectionReader struct {
|
||||
file model.FileStreamer
|
||||
off int64
|
||||
bufPool *pool.Pool[[]byte]
|
||||
type StreamSectionReaderIF interface {
|
||||
// 线程不安全
|
||||
GetSectionReader(off, length int64) (io.ReadSeeker, error)
|
||||
FreeSectionReader(sr io.ReadSeeker)
|
||||
// 线程不安全
|
||||
DiscardSection(off int64, length int64) error
|
||||
}
|
||||
|
||||
func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int, up *model.UpdateProgress) (*StreamSectionReader, error) {
|
||||
ss := &StreamSectionReader{file: file}
|
||||
func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int, up *model.UpdateProgress) (StreamSectionReaderIF, error) {
|
||||
if file.GetFile() != nil {
|
||||
return ss, nil
|
||||
return &cachedSectionReader{file.GetFile()}, nil
|
||||
}
|
||||
|
||||
maxBufferSize = min(maxBufferSize, int(file.GetSize()))
|
||||
if maxBufferSize > conf.MaxBufferLimit {
|
||||
_, err := file.CacheFullAndWriter(up, nil)
|
||||
f, err := os.CreateTemp(conf.Conf.TempDir, "file-*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if f.Truncate((file.GetSize()+int64(maxBufferSize-1))/int64(maxBufferSize)*int64(maxBufferSize)) != nil {
|
||||
// fallback to full cache
|
||||
_, _ = f.Close(), os.Remove(f.Name())
|
||||
cache, err := file.CacheFullAndWriter(up, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cachedSectionReader{cache}, nil
|
||||
}
|
||||
|
||||
ss := &fileSectionReader{Reader: file, temp: f}
|
||||
ss.bufPool = &pool.Pool[*offsetWriterWithBase]{
|
||||
New: func() *offsetWriterWithBase {
|
||||
base := ss.fileOff
|
||||
ss.fileOff += int64(maxBufferSize)
|
||||
return &offsetWriterWithBase{io.NewOffsetWriter(ss.temp, base), base}
|
||||
},
|
||||
}
|
||||
file.Add(utils.CloseFunc(func() error {
|
||||
ss.bufPool.Reset()
|
||||
return errors.Join(ss.temp.Close(), os.Remove(ss.temp.Name()))
|
||||
}))
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
ss := &directSectionReader{file: file}
|
||||
if conf.MmapThreshold > 0 && maxBufferSize >= conf.MmapThreshold {
|
||||
ss.bufPool = &pool.Pool[[]byte]{
|
||||
New: func() []byte {
|
||||
buf, err := mmap.Alloc(maxBufferSize)
|
||||
if err == nil {
|
||||
file.Add(utils.CloseFunc(func() error {
|
||||
ss.file.Add(utils.CloseFunc(func() error {
|
||||
return mmap.Free(buf)
|
||||
}))
|
||||
} else {
|
||||
@@ -200,38 +227,113 @@ func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int, up *mode
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// 线程不安全
|
||||
func (ss *StreamSectionReader) GetSectionReader(off, length int64) (*SectionReader, error) {
|
||||
var cache io.ReaderAt = ss.file.GetFile()
|
||||
var buf []byte
|
||||
if cache == nil {
|
||||
if off != ss.off {
|
||||
return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
||||
}
|
||||
tempBuf := ss.bufPool.Get()
|
||||
buf = tempBuf[:length]
|
||||
n, err := io.ReadFull(ss.file, buf)
|
||||
if int64(n) != length {
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
|
||||
}
|
||||
ss.off += int64(n)
|
||||
off = 0
|
||||
cache = bytes.NewReader(buf)
|
||||
}
|
||||
return &SectionReader{io.NewSectionReader(cache, off, length), buf}, nil
|
||||
type cachedSectionReader struct {
|
||||
cache io.ReaderAt
|
||||
}
|
||||
|
||||
func (ss *StreamSectionReader) FreeSectionReader(sr *SectionReader) {
|
||||
if sr != nil {
|
||||
if sr.buf != nil {
|
||||
ss.bufPool.Put(sr.buf[0:cap(sr.buf)])
|
||||
sr.buf = nil
|
||||
}
|
||||
func (*cachedSectionReader) DiscardSection(off int64, length int64) error {
|
||||
return nil
|
||||
}
|
||||
func (s *cachedSectionReader) GetSectionReader(off, length int64) (io.ReadSeeker, error) {
|
||||
return io.NewSectionReader(s.cache, off, length), nil
|
||||
}
|
||||
func (*cachedSectionReader) FreeSectionReader(sr io.ReadSeeker) {}
|
||||
|
||||
type fileSectionReader struct {
|
||||
io.Reader
|
||||
off int64
|
||||
temp *os.File
|
||||
fileOff int64
|
||||
bufPool *pool.Pool[*offsetWriterWithBase]
|
||||
}
|
||||
|
||||
type offsetWriterWithBase struct {
|
||||
*io.OffsetWriter
|
||||
base int64
|
||||
}
|
||||
|
||||
// 线程不安全
|
||||
func (ss *fileSectionReader) DiscardSection(off int64, length int64) error {
|
||||
if off != ss.off {
|
||||
return fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
||||
}
|
||||
_, err := utils.CopyWithBufferN(io.Discard, ss.Reader, length)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to skip data: (expect =%d) %w", length, err)
|
||||
}
|
||||
ss.off += length
|
||||
return nil
|
||||
}
|
||||
|
||||
type fileBufferSectionReader struct {
|
||||
io.ReadSeeker
|
||||
fileBuf *offsetWriterWithBase
|
||||
}
|
||||
|
||||
func (ss *fileSectionReader) GetSectionReader(off, length int64) (io.ReadSeeker, error) {
|
||||
if off != ss.off {
|
||||
return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
||||
}
|
||||
fileBuf := ss.bufPool.Get()
|
||||
_, _ = fileBuf.Seek(0, io.SeekStart)
|
||||
n, err := utils.CopyWithBufferN(fileBuf, ss.Reader, length)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
|
||||
}
|
||||
ss.off += length
|
||||
return &fileBufferSectionReader{io.NewSectionReader(ss.temp, fileBuf.base, length), fileBuf}, nil
|
||||
}
|
||||
|
||||
func (ss *fileSectionReader) FreeSectionReader(rs io.ReadSeeker) {
|
||||
if sr, ok := rs.(*fileBufferSectionReader); ok {
|
||||
ss.bufPool.Put(sr.fileBuf)
|
||||
sr.fileBuf = nil
|
||||
sr.ReadSeeker = nil
|
||||
}
|
||||
}
|
||||
|
||||
type SectionReader struct {
|
||||
type directSectionReader struct {
|
||||
file model.FileStreamer
|
||||
off int64
|
||||
bufPool *pool.Pool[[]byte]
|
||||
}
|
||||
|
||||
// 线程不安全
|
||||
func (ss *directSectionReader) DiscardSection(off int64, length int64) error {
|
||||
if off != ss.off {
|
||||
return fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
||||
}
|
||||
_, err := utils.CopyWithBufferN(io.Discard, ss.file, length)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to skip data: (expect =%d) %w", length, err)
|
||||
}
|
||||
ss.off += length
|
||||
return nil
|
||||
}
|
||||
|
||||
type bufferSectionReader struct {
|
||||
io.ReadSeeker
|
||||
buf []byte
|
||||
}
|
||||
|
||||
// 线程不安全
|
||||
func (ss *directSectionReader) GetSectionReader(off, length int64) (io.ReadSeeker, error) {
|
||||
if off != ss.off {
|
||||
return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
||||
}
|
||||
tempBuf := ss.bufPool.Get()
|
||||
buf := tempBuf[:length]
|
||||
n, err := io.ReadFull(ss.file, buf)
|
||||
if int64(n) != length {
|
||||
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
|
||||
}
|
||||
ss.off += int64(n)
|
||||
return &bufferSectionReader{bytes.NewReader(buf), buf}, nil
|
||||
}
|
||||
func (ss *directSectionReader) FreeSectionReader(rs io.ReadSeeker) {
|
||||
if sr, ok := rs.(*bufferSectionReader); ok {
|
||||
ss.bufPool.Put(sr.buf[0:cap(sr.buf)])
|
||||
sr.buf = nil
|
||||
sr.ReadSeeker = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,31 +207,34 @@ func (c *SyncClosers) AcquireReference() bool {
|
||||
}
|
||||
newRef := ref + 1
|
||||
if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) {
|
||||
log.Debugf("AcquireReference %p: %d", c, newRef)
|
||||
// log.Debugf("AcquireReference %p: %d", c, newRef)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const closersClosed = math.MinInt16
|
||||
|
||||
func (c *SyncClosers) Close() error {
|
||||
for {
|
||||
ref := atomic.LoadInt32(&c.ref)
|
||||
if ref < 0 {
|
||||
return nil
|
||||
}
|
||||
newRef := ref - 1
|
||||
if newRef <= 0 {
|
||||
newRef = math.MinInt16
|
||||
}
|
||||
if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) {
|
||||
log.Debugf("Close %p: %d", c, ref)
|
||||
if newRef > 0 {
|
||||
return nil
|
||||
}
|
||||
break
|
||||
}
|
||||
ref := atomic.AddInt32(&c.ref, -1)
|
||||
if ref > 0 {
|
||||
// log.Debugf("ReleaseReference %p: %d", c, ref)
|
||||
return nil
|
||||
}
|
||||
|
||||
if ref < -1 {
|
||||
atomic.StoreInt32(&c.ref, closersClosed)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attempt to acquire FinalClose permission.
|
||||
// At this point, ref must be 0 or -1. We try to atomically change it to the closersClosed state.
|
||||
// Only the first successful goroutine gets the cleanup permission.
|
||||
if !atomic.CompareAndSwapInt32(&c.ref, ref, closersClosed) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// log.Debugf("FinalClose %p", c)
|
||||
var errs []error
|
||||
for _, closer := range c.closers {
|
||||
if closer != nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user