mirror of
https://github.com/AlistGo/alist.git
synced 2025-11-25 11:29:45 +08:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3cddb6b7ed | ||
|
|
ce41587095 | ||
|
|
0cbc7ebc92 | ||
|
|
b4d9beb49c | ||
|
|
4c8401855c | ||
|
|
e2016dd031 | ||
|
|
a6bd90a9b2 | ||
|
|
35d322443b | ||
|
|
81a7f28ba2 | ||
|
|
fe564c42da | ||
|
|
d17889bf8e | ||
|
|
4f8bc478d5 | ||
|
|
e1800f18e4 | ||
|
|
16cce37947 | ||
|
|
6e7c7d1dd0 | ||
|
|
28a8428559 | ||
|
|
d0026030cb | ||
|
|
fcbc79cb24 | ||
|
|
930f9f6096 | ||
|
|
23107483a1 |
@@ -57,7 +57,9 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
|
||||
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||
|
||||
@@ -57,7 +57,9 @@
|
||||
- [x] [又拍云对象存储](https://www.upyun.com/products/file-storage)
|
||||
- [x] WebDav(支持无API的OneDrive/SharePoint)
|
||||
- [x] Teambition([中国](https://www.teambition.com/ ),[国际](https://us.teambition.com/ ))
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [分秒帧](https://www.mediatrack.cn/)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [和彩云](https://yun.139.com/) (个人云, 家庭云,共享群组)
|
||||
- [x] [Yandex.Disk](https://disk.yandex.com/)
|
||||
- [x] [百度网盘](http://pan.baidu.com/)
|
||||
|
||||
@@ -57,7 +57,9 @@
|
||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
|
||||
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -28,7 +30,8 @@ import (
|
||||
type Pan123 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
apiRateLimit sync.Map
|
||||
apiRateLimit sync.Map
|
||||
safeBoxUnlocked sync.Map
|
||||
}
|
||||
|
||||
func (d *Pan123) Config() driver.Config {
|
||||
@@ -52,9 +55,26 @@ func (d *Pan123) Drop(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if f, ok := dir.(File); ok && f.IsLock {
|
||||
if err := d.unlockSafeBox(f.FileId); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
files, err := d.getFiles(ctx, dir.GetID(), dir.GetName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
msg := strings.ToLower(err.Error())
|
||||
if strings.Contains(msg, "safe box") || strings.Contains(err.Error(), "保险箱") {
|
||||
if id, e := strconv.ParseInt(dir.GetID(), 10, 64); e == nil {
|
||||
if e = d.unlockSafeBox(id); e == nil {
|
||||
files, err = d.getFiles(ctx, dir.GetID(), dir.GetName())
|
||||
} else {
|
||||
return nil, e
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||
return src, nil
|
||||
|
||||
@@ -6,8 +6,9 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
SafePassword string `json:"safe_password"`
|
||||
driver.RootID
|
||||
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
|
||||
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
|
||||
@@ -20,6 +20,7 @@ type File struct {
|
||||
Etag string `json:"Etag"`
|
||||
S3KeyFlag string `json:"S3KeyFlag"`
|
||||
DownloadUrl string `json:"DownloadUrl"`
|
||||
IsLock bool `json:"IsLock"`
|
||||
}
|
||||
|
||||
func (f File) CreateTime() time.Time {
|
||||
|
||||
@@ -43,6 +43,7 @@ const (
|
||||
S3Auth = MainApi + "/file/s3_upload_object/auth"
|
||||
UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
|
||||
S3Complete = MainApi + "/file/s3_complete_multipart_upload"
|
||||
SafeBoxUnlock = MainApi + "/restful/goapi/v1/file/safe_box/auth/unlockbox"
|
||||
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||
)
|
||||
|
||||
@@ -238,6 +239,22 @@ do:
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (d *Pan123) unlockSafeBox(fileId int64) error {
|
||||
if _, ok := d.safeBoxUnlocked.Load(fileId); ok {
|
||||
return nil
|
||||
}
|
||||
data := base.Json{"password": d.SafePassword}
|
||||
url := fmt.Sprintf("%s?fileId=%d", SafeBoxUnlock, fileId)
|
||||
_, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.safeBoxUnlocked.Store(fileId, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]File, error) {
|
||||
page := 1
|
||||
total := 0
|
||||
@@ -267,6 +284,15 @@ func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]
|
||||
req.SetQueryParams(query)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
msg := strings.ToLower(err.Error())
|
||||
if strings.Contains(msg, "safe box") || strings.Contains(err.Error(), "保险箱") {
|
||||
if fid, e := strconv.ParseInt(parentId, 10, 64); e == nil {
|
||||
if e = d.unlockSafeBox(fid); e == nil {
|
||||
return d.getFiles(ctx, parentId, name)
|
||||
}
|
||||
return nil, e
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
log.Debug(string(_res))
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/go-resty/resty/v2"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Open123 struct {
|
||||
@@ -89,8 +90,24 @@ func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
return nil, fmt.Errorf("get link failed: %s", result.Message)
|
||||
}
|
||||
|
||||
linkURL := result.Data.URL
|
||||
if d.PrivateKey != "" {
|
||||
if d.UID == 0 {
|
||||
return nil, fmt.Errorf("uid is required when private key is set")
|
||||
}
|
||||
duration := time.Duration(d.ValidDuration)
|
||||
if duration <= 0 {
|
||||
duration = 30
|
||||
}
|
||||
signedURL, err := SignURL(linkURL, d.PrivateKey, d.UID, duration*time.Minute)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
linkURL = signedURL
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: result.Data.URL,
|
||||
URL: linkURL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -8,8 +8,11 @@ import (
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
|
||||
ClientID string `json:"client_id" required:"true" label:"Client ID"`
|
||||
ClientSecret string `json:"client_secret" required:"true" label:"Client Secret"`
|
||||
ClientID string `json:"client_id" required:"true" label:"Client ID"`
|
||||
ClientSecret string `json:"client_secret" required:"true" label:"Client Secret"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
UID uint64 `json:"uid" type:"number"`
|
||||
ValidDuration int64 `json:"valid_duration" type:"number" default:"30" help:"minutes"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
27
drivers/123_open/sign.go
Normal file
27
drivers/123_open/sign.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package _123Open
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
func SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (string, error) {
|
||||
if privateKey == "" {
|
||||
return originURL, nil
|
||||
}
|
||||
parsed, err := url.Parse(originURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ts := time.Now().Add(validDuration).Unix()
|
||||
randInt := rand.Int()
|
||||
signature := fmt.Sprintf("%d-%d-%d-%x", ts, randInt, uid, md5.Sum([]byte(fmt.Sprintf("%s-%d-%d-%d-%s",
|
||||
parsed.Path, ts, randInt, uid, privateKey))))
|
||||
query := parsed.Query()
|
||||
query.Add("auth_key", signature)
|
||||
parsed.RawQuery = query.Encode()
|
||||
return parsed.String(), nil
|
||||
}
|
||||
@@ -80,9 +80,10 @@ func (d *Cloud189) Link(ctx context.Context, file model.Obj, args model.LinkArgs
|
||||
}
|
||||
|
||||
func (d *Cloud189) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
safeName := d.sanitizeName(dirName)
|
||||
form := map[string]string{
|
||||
"parentFolderId": parentDir.GetID(),
|
||||
"folderName": dirName,
|
||||
"folderName": safeName,
|
||||
}
|
||||
_, err := d.request("https://cloud.189.cn/api/open/file/createFolder.action", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(form)
|
||||
@@ -126,9 +127,10 @@ func (d *Cloud189) Rename(ctx context.Context, srcObj model.Obj, newName string)
|
||||
idKey = "folderId"
|
||||
nameKey = "destFolderName"
|
||||
}
|
||||
safeName := d.sanitizeName(newName)
|
||||
form := map[string]string{
|
||||
idKey: srcObj.GetID(),
|
||||
nameKey: newName,
|
||||
nameKey: safeName,
|
||||
}
|
||||
_, err := d.request(url, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(form)
|
||||
|
||||
@@ -6,9 +6,10 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
Cookie string `json:"cookie" help:"Fill in the cookie if need captcha"`
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
Cookie string `json:"cookie" help:"Fill in the cookie if need captcha"`
|
||||
StripEmoji bool `json:"strip_emoji" help:"Remove four-byte characters (e.g., emoji) before upload"`
|
||||
driver.RootID
|
||||
}
|
||||
|
||||
|
||||
@@ -11,9 +11,11 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@@ -222,13 +224,37 @@ func (d *Cloud189) getFiles(fileId string) ([]model.Obj, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (d *Cloud189) sanitizeName(name string) string {
|
||||
if !d.StripEmoji {
|
||||
return name
|
||||
}
|
||||
b := strings.Builder{}
|
||||
for _, r := range name {
|
||||
if utf8.RuneLen(r) == 4 {
|
||||
continue
|
||||
}
|
||||
b.WriteRune(r)
|
||||
}
|
||||
sanitized := b.String()
|
||||
if sanitized == "" {
|
||||
ext := path.Ext(name)
|
||||
if ext != "" {
|
||||
sanitized = "file" + ext
|
||||
} else {
|
||||
sanitized = "file"
|
||||
}
|
||||
}
|
||||
return sanitized
|
||||
}
|
||||
|
||||
func (d *Cloud189) oldUpload(dstDir model.Obj, file model.FileStreamer) error {
|
||||
safeName := d.sanitizeName(file.GetName())
|
||||
res, err := d.client.R().SetMultipartFormData(map[string]string{
|
||||
"parentId": dstDir.GetID(),
|
||||
"sessionKey": "??",
|
||||
"opertype": "1",
|
||||
"fname": file.GetName(),
|
||||
}).SetMultipartField("Filedata", file.GetName(), file.GetMimetype(), file).Post("https://hb02.upload.cloud.189.cn/v1/DCIWebUploadAction")
|
||||
"fname": safeName,
|
||||
}).SetMultipartField("Filedata", safeName, file.GetMimetype(), file).Post("https://hb02.upload.cloud.189.cn/v1/DCIWebUploadAction")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -313,9 +339,10 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
const DEFAULT int64 = 10485760
|
||||
var count = int64(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
||||
|
||||
safeName := d.sanitizeName(file.GetName())
|
||||
res, err := d.uploadRequest("/person/initMultiUpload", map[string]string{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": encode(file.GetName()),
|
||||
"fileName": encode(safeName),
|
||||
"fileSize": strconv.FormatInt(file.GetSize(), 10),
|
||||
"sliceSize": strconv.FormatInt(DEFAULT, 10),
|
||||
"lazyCheck": "1",
|
||||
|
||||
@@ -205,10 +205,11 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
|
||||
fullUrl += "/createFolder.action"
|
||||
|
||||
var newFolder Cloud189Folder
|
||||
safeName := y.sanitizeName(dirName)
|
||||
_, err := y.post(fullUrl, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
req.SetQueryParams(map[string]string{
|
||||
"folderName": dirName,
|
||||
"folderName": safeName,
|
||||
"relativePath": "",
|
||||
})
|
||||
if isFamily {
|
||||
@@ -225,6 +226,7 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newFolder.Name = safeName
|
||||
return &newFolder, nil
|
||||
}
|
||||
|
||||
@@ -258,21 +260,29 @@ func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName strin
|
||||
}
|
||||
|
||||
var newObj model.Obj
|
||||
safeName := y.sanitizeName(newName)
|
||||
switch f := srcObj.(type) {
|
||||
case *Cloud189File:
|
||||
fullUrl += "/renameFile.action"
|
||||
queryParam["fileId"] = srcObj.GetID()
|
||||
queryParam["destFileName"] = newName
|
||||
queryParam["destFileName"] = safeName
|
||||
newObj = &Cloud189File{Icon: f.Icon} // 复用预览
|
||||
case *Cloud189Folder:
|
||||
fullUrl += "/renameFolder.action"
|
||||
queryParam["folderId"] = srcObj.GetID()
|
||||
queryParam["destFolderName"] = newName
|
||||
queryParam["destFolderName"] = safeName
|
||||
newObj = &Cloud189Folder{}
|
||||
default:
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
switch obj := newObj.(type) {
|
||||
case *Cloud189File:
|
||||
obj.Name = safeName
|
||||
case *Cloud189Folder:
|
||||
obj.Name = safeName
|
||||
}
|
||||
|
||||
_, err := y.request(fullUrl, method, func(req *resty.Request) {
|
||||
req.SetContext(ctx).SetQueryParams(queryParam)
|
||||
}, nil, newObj, isFamily)
|
||||
|
||||
@@ -6,9 +6,10 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
VCode string `json:"validate_code"`
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
VCode string `json:"validate_code"`
|
||||
StripEmoji bool `json:"strip_emoji" help:"Remove four-byte characters (e.g., emoji) before upload"`
|
||||
driver.RootID
|
||||
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
|
||||
@@ -12,11 +12,13 @@ import (
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
@@ -57,6 +59,29 @@ const (
|
||||
CHANNEL_ID = "web_cloud.189.cn"
|
||||
)
|
||||
|
||||
func (y *Cloud189PC) sanitizeName(name string) string {
|
||||
if !y.StripEmoji {
|
||||
return name
|
||||
}
|
||||
b := strings.Builder{}
|
||||
for _, r := range name {
|
||||
if utf8.RuneLen(r) == 4 {
|
||||
continue
|
||||
}
|
||||
b.WriteRune(r)
|
||||
}
|
||||
sanitized := b.String()
|
||||
if sanitized == "" {
|
||||
ext := path.Ext(name)
|
||||
if ext != "" {
|
||||
sanitized = "file" + ext
|
||||
} else {
|
||||
sanitized = "file"
|
||||
}
|
||||
}
|
||||
return sanitized
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
|
||||
dateOfGmt := getHttpDateStr()
|
||||
sessionKey := y.getTokenInfo().SessionKey
|
||||
@@ -475,10 +500,11 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
||||
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||
size := file.GetSize()
|
||||
sliceSize := partSize(size)
|
||||
safeName := y.sanitizeName(file.GetName())
|
||||
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
"fileName": url.QueryEscape(safeName),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"sliceSize": fmt.Sprint(sliceSize),
|
||||
"lazyCheck": "1",
|
||||
@@ -596,7 +622,8 @@ func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
|
||||
return nil, errors.New("invalid hash")
|
||||
}
|
||||
|
||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, stream.GetName(), fmt.Sprint(stream.GetSize()), isFamily)
|
||||
safeName := y.sanitizeName(stream.GetName())
|
||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, safeName, fmt.Sprint(stream.GetSize()), isFamily)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -615,6 +642,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
tmpF *os.File
|
||||
err error
|
||||
)
|
||||
safeName := y.sanitizeName(file.GetName())
|
||||
size := file.GetSize()
|
||||
if _, ok := cache.(io.ReaderAt); !ok && size > 0 {
|
||||
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
|
||||
@@ -697,7 +725,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
//step.2 预上传
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
"fileName": url.QueryEscape(safeName),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"fileMd5": fileMd5Hex,
|
||||
"sliceSize": fmt.Sprint(sliceSize),
|
||||
@@ -833,9 +861,10 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
return nil, err
|
||||
}
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
|
||||
safeName := y.sanitizeName(file.GetName())
|
||||
|
||||
// 创建上传会话
|
||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily)
|
||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, safeName, fmt.Sprint(file.GetSize()), isFamily)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
|
||||
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
||||
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/bitqiu"
|
||||
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
|
||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve_v4"
|
||||
@@ -30,8 +31,10 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
||||
_ "github.com/alist-org/alist/v3/drivers/febbox"
|
||||
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
||||
_ "github.com/alist-org/alist/v3/drivers/gitee"
|
||||
_ "github.com/alist-org/alist/v3/drivers/github"
|
||||
_ "github.com/alist-org/alist/v3/drivers/github_releases"
|
||||
_ "github.com/alist-org/alist/v3/drivers/gofile"
|
||||
_ "github.com/alist-org/alist/v3/drivers/google_drive"
|
||||
_ "github.com/alist-org/alist/v3/drivers/google_photo"
|
||||
_ "github.com/alist-org/alist/v3/drivers/halalcloud"
|
||||
@@ -41,6 +44,7 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/lanzou"
|
||||
_ "github.com/alist-org/alist/v3/drivers/lenovonas_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/local"
|
||||
_ "github.com/alist-org/alist/v3/drivers/mediafire"
|
||||
_ "github.com/alist-org/alist/v3/drivers/mediatrack"
|
||||
_ "github.com/alist-org/alist/v3/drivers/mega"
|
||||
_ "github.com/alist-org/alist/v3/drivers/misskey"
|
||||
@@ -49,8 +53,10 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/onedrive"
|
||||
_ "github.com/alist-org/alist/v3/drivers/onedrive_app"
|
||||
_ "github.com/alist-org/alist/v3/drivers/onedrive_sharelink"
|
||||
_ "github.com/alist-org/alist/v3/drivers/pcloud"
|
||||
_ "github.com/alist-org/alist/v3/drivers/pikpak"
|
||||
_ "github.com/alist-org/alist/v3/drivers/pikpak_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/proton_drive"
|
||||
_ "github.com/alist-org/alist/v3/drivers/quark_uc"
|
||||
_ "github.com/alist-org/alist/v3/drivers/quark_uc_tv"
|
||||
_ "github.com/alist-org/alist/v3/drivers/quqi"
|
||||
|
||||
767
drivers/bitqiu/driver.go
Normal file
767
drivers/bitqiu/driver.go
Normal file
@@ -0,0 +1,767 @@
|
||||
package bitqiu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http/cookiejar"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
streamPkg "github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
baseURL = "https://pan.bitqiu.com"
|
||||
loginURL = baseURL + "/loginServer/login"
|
||||
userInfoURL = baseURL + "/user/getInfo"
|
||||
listURL = baseURL + "/apiToken/cfi/fs/resources/pages"
|
||||
uploadInitializeURL = baseURL + "/apiToken/cfi/fs/upload/v2/initialize"
|
||||
uploadCompleteURL = baseURL + "/apiToken/cfi/fs/upload/v2/complete"
|
||||
downloadURL = baseURL + "/download/getUrl"
|
||||
createDirURL = baseURL + "/resource/create"
|
||||
moveResourceURL = baseURL + "/resource/remove"
|
||||
renameResourceURL = baseURL + "/resource/rename"
|
||||
copyResourceURL = baseURL + "/apiToken/cfi/fs/async/copy"
|
||||
copyManagerURL = baseURL + "/apiToken/cfi/fs/async/manager"
|
||||
deleteResourceURL = baseURL + "/resource/delete"
|
||||
|
||||
successCode = "10200"
|
||||
uploadSuccessCode = "30010"
|
||||
copySubmittedCode = "10300"
|
||||
orgChannel = "default|default|default"
|
||||
)
|
||||
|
||||
const (
|
||||
copyPollInterval = time.Second
|
||||
copyPollMaxAttempts = 60
|
||||
chunkSize = int64(1 << 20)
|
||||
)
|
||||
|
||||
const defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"
|
||||
|
||||
type BitQiu struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
client *resty.Client
|
||||
userID string
|
||||
}
|
||||
|
||||
func (d *BitQiu) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *BitQiu) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *BitQiu) Init(ctx context.Context) error {
|
||||
if d.Addition.UserPlatform == "" {
|
||||
d.Addition.UserPlatform = uuid.NewString()
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
|
||||
if d.client == nil {
|
||||
jar, err := cookiejar.New(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.client = base.NewRestyClient()
|
||||
d.client.SetBaseURL(baseURL)
|
||||
d.client.SetCookieJar(jar)
|
||||
}
|
||||
d.client.SetHeader("user-agent", d.userAgent())
|
||||
|
||||
return d.login(ctx)
|
||||
}
|
||||
|
||||
func (d *BitQiu) Drop(ctx context.Context) error {
|
||||
d.client = nil
|
||||
d.userID = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *BitQiu) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
parentID := d.resolveParentID(dir)
|
||||
dirPath := ""
|
||||
if dir != nil {
|
||||
dirPath = dir.GetPath()
|
||||
}
|
||||
pageSize := d.pageSize()
|
||||
orderType := d.orderType()
|
||||
desc := d.orderDesc()
|
||||
|
||||
var results []model.Obj
|
||||
page := 1
|
||||
for {
|
||||
form := map[string]string{
|
||||
"parentId": parentID,
|
||||
"limit": strconv.Itoa(pageSize),
|
||||
"orderType": orderType,
|
||||
"desc": desc,
|
||||
"model": "1",
|
||||
"userId": d.userID,
|
||||
"currentPage": strconv.Itoa(page),
|
||||
"page": strconv.Itoa(page),
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
var resp Response[ResourcePage]
|
||||
if err := d.postForm(ctx, listURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Code != successCode {
|
||||
if resp.Code == "10401" || resp.Code == "10404" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("list failed: %s", resp.Message)
|
||||
}
|
||||
|
||||
objs, err := utils.SliceConvert(resp.Data.Data, func(item Resource) (model.Obj, error) {
|
||||
return item.toObject(parentID, dirPath)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, objs...)
|
||||
|
||||
if !resp.Data.HasNext || len(resp.Data.Data) == 0 {
|
||||
break
|
||||
}
|
||||
page++
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (d *BitQiu) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if file.IsDir() {
|
||||
return nil, errs.NotFile
|
||||
}
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
form := map[string]string{
|
||||
"fileIds": file.GetID(),
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[DownloadData]
|
||||
if err := d.postForm(ctx, downloadURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
if resp.Data.URL == "" {
|
||||
return nil, fmt.Errorf("empty download url returned")
|
||||
}
|
||||
return &model.Link{URL: resp.Data.URL}, nil
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("get link failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("get link failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
parentID := d.resolveParentID(parentDir)
|
||||
parentPath := ""
|
||||
if parentDir != nil {
|
||||
parentPath = parentDir.GetPath()
|
||||
}
|
||||
form := map[string]string{
|
||||
"parentId": parentID,
|
||||
"name": dirName,
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[CreateDirData]
|
||||
if err := d.postForm(ctx, createDirURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
newParentID := parentID
|
||||
if resp.Data.ParentID != "" {
|
||||
newParentID = resp.Data.ParentID
|
||||
}
|
||||
name := resp.Data.Name
|
||||
if name == "" {
|
||||
name = dirName
|
||||
}
|
||||
resource := Resource{
|
||||
ResourceID: resp.Data.DirID,
|
||||
ResourceType: 1,
|
||||
Name: name,
|
||||
ParentID: newParentID,
|
||||
}
|
||||
obj, err := resource.toObject(newParentID, parentPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if o, ok := obj.(*Object); ok {
|
||||
o.ParentID = newParentID
|
||||
}
|
||||
return obj, nil
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("create folder failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("create folder failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
targetParentID := d.resolveParentID(dstDir)
|
||||
form := map[string]string{
|
||||
"dirIds": "",
|
||||
"fileIds": "",
|
||||
"parentId": targetParentID,
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
if srcObj.IsDir() {
|
||||
form["dirIds"] = srcObj.GetID()
|
||||
} else {
|
||||
form["fileIds"] = srcObj.GetID()
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[any]
|
||||
if err := d.postForm(ctx, moveResourceURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
dstPath := ""
|
||||
if dstDir != nil {
|
||||
dstPath = dstDir.GetPath()
|
||||
}
|
||||
if setter, ok := srcObj.(model.SetPath); ok {
|
||||
setter.SetPath(path.Join(dstPath, srcObj.GetName()))
|
||||
}
|
||||
if o, ok := srcObj.(*Object); ok {
|
||||
o.ParentID = targetParentID
|
||||
}
|
||||
return srcObj, nil
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("move failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("move failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
form := map[string]string{
|
||||
"resourceId": srcObj.GetID(),
|
||||
"name": newName,
|
||||
"type": "0",
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
if srcObj.IsDir() {
|
||||
form["type"] = "1"
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[any]
|
||||
if err := d.postForm(ctx, renameResourceURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
return updateObjectName(srcObj, newName), nil
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("rename failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("rename failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
targetParentID := d.resolveParentID(dstDir)
|
||||
form := map[string]string{
|
||||
"dirIds": "",
|
||||
"fileIds": "",
|
||||
"parentId": targetParentID,
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
if srcObj.IsDir() {
|
||||
form["dirIds"] = srcObj.GetID()
|
||||
} else {
|
||||
form["fileIds"] = srcObj.GetID()
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[any]
|
||||
if err := d.postForm(ctx, copyResourceURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode, copySubmittedCode:
|
||||
return d.waitForCopiedObject(ctx, srcObj, dstDir)
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("copy failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("copy failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
form := map[string]string{
|
||||
"dirIds": "",
|
||||
"fileIds": "",
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
if obj.IsDir() {
|
||||
form["dirIds"] = obj.GetID()
|
||||
} else {
|
||||
form["fileIds"] = obj.GetID()
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[any]
|
||||
if err := d.postForm(ctx, deleteResourceURL, form, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
return nil
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("remove failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("remove failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
up(0)
|
||||
tmpFile, md5sum, err := streamPkg.CacheFullInTempFileAndHash(file, utils.MD5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
|
||||
parentID := d.resolveParentID(dstDir)
|
||||
parentPath := ""
|
||||
if dstDir != nil {
|
||||
parentPath = dstDir.GetPath()
|
||||
}
|
||||
form := map[string]string{
|
||||
"parentId": parentID,
|
||||
"name": file.GetName(),
|
||||
"size": strconv.FormatInt(file.GetSize(), 10),
|
||||
"hash": md5sum,
|
||||
"sampleMd5": md5sum,
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
var resp Response[json.RawMessage]
|
||||
if err = d.postForm(ctx, uploadInitializeURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Code != uploadSuccessCode {
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
var initData UploadInitData
|
||||
if err := json.Unmarshal(resp.Data, &initData); err != nil {
|
||||
return nil, fmt.Errorf("parse upload init response failed: %w", err)
|
||||
}
|
||||
serverCode, err := d.uploadFileInChunks(ctx, tmpFile, file.GetSize(), md5sum, initData, up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, err := d.completeChunkUpload(ctx, initData, parentID, parentPath, file.GetName(), file.GetSize(), md5sum, serverCode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up(100)
|
||||
return obj, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("upload failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
|
||||
var resource Resource
|
||||
if err := json.Unmarshal(resp.Data, &resource); err != nil {
|
||||
return nil, fmt.Errorf("parse upload response failed: %w", err)
|
||||
}
|
||||
obj, err := resource.toObject(parentID, parentPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up(100)
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (d *BitQiu) uploadFileInChunks(ctx context.Context, tmpFile model.File, size int64, md5sum string, initData UploadInitData, up driver.UpdateProgress) (string, error) {
|
||||
if d.client == nil {
|
||||
return "", fmt.Errorf("client not initialized")
|
||||
}
|
||||
if size <= 0 {
|
||||
return "", fmt.Errorf("invalid file size")
|
||||
}
|
||||
buf := make([]byte, chunkSize)
|
||||
offset := int64(0)
|
||||
var finishedFlag string
|
||||
|
||||
for offset < size {
|
||||
chunkLen := chunkSize
|
||||
remaining := size - offset
|
||||
if remaining < chunkLen {
|
||||
chunkLen = remaining
|
||||
}
|
||||
|
||||
reader := io.NewSectionReader(tmpFile, offset, chunkLen)
|
||||
chunkBuf := buf[:chunkLen]
|
||||
if _, err := io.ReadFull(reader, chunkBuf); err != nil {
|
||||
return "", fmt.Errorf("read chunk failed: %w", err)
|
||||
}
|
||||
|
||||
headers := map[string]string{
|
||||
"accept": "*/*",
|
||||
"content-type": "application/octet-stream",
|
||||
"appid": initData.AppID,
|
||||
"token": initData.Token,
|
||||
"userid": strconv.FormatInt(initData.UserID, 10),
|
||||
"serialnumber": initData.SerialNumber,
|
||||
"hash": md5sum,
|
||||
"len": strconv.FormatInt(chunkLen, 10),
|
||||
"offset": strconv.FormatInt(offset, 10),
|
||||
"user-agent": d.userAgent(),
|
||||
}
|
||||
|
||||
var chunkResp ChunkUploadResponse
|
||||
req := d.client.R().
|
||||
SetContext(ctx).
|
||||
SetHeaders(headers).
|
||||
SetBody(chunkBuf).
|
||||
SetResult(&chunkResp)
|
||||
|
||||
if _, err := req.Post(initData.UploadURL); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if chunkResp.ErrCode != 0 {
|
||||
return "", fmt.Errorf("chunk upload failed with code %d", chunkResp.ErrCode)
|
||||
}
|
||||
finishedFlag = chunkResp.FinishedFlag
|
||||
offset += chunkLen
|
||||
up(float64(offset) * 100 / float64(size))
|
||||
}
|
||||
|
||||
if finishedFlag == "" {
|
||||
return "", fmt.Errorf("upload finished without server code")
|
||||
}
|
||||
return finishedFlag, nil
|
||||
}
|
||||
|
||||
func (d *BitQiu) completeChunkUpload(ctx context.Context, initData UploadInitData, parentID, parentPath, name string, size int64, md5sum, serverCode string) (model.Obj, error) {
|
||||
form := map[string]string{
|
||||
"currentPage": "1",
|
||||
"limit": "1",
|
||||
"userId": strconv.FormatInt(initData.UserID, 10),
|
||||
"status": "0",
|
||||
"parentId": parentID,
|
||||
"name": name,
|
||||
"fileUid": initData.FileUID,
|
||||
"fileSid": initData.FileSID,
|
||||
"size": strconv.FormatInt(size, 10),
|
||||
"serverCode": serverCode,
|
||||
"snapTime": "",
|
||||
"hash": md5sum,
|
||||
"sampleMd5": md5sum,
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
|
||||
var resp Response[Resource]
|
||||
if err := d.postForm(ctx, uploadCompleteURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Code != successCode {
|
||||
return nil, fmt.Errorf("complete upload failed: %s", resp.Message)
|
||||
}
|
||||
|
||||
return resp.Data.toObject(parentID, parentPath)
|
||||
}
|
||||
|
||||
func (d *BitQiu) login(ctx context.Context) error {
|
||||
if d.client == nil {
|
||||
return fmt.Errorf("client not initialized")
|
||||
}
|
||||
|
||||
form := map[string]string{
|
||||
"passport": d.Username,
|
||||
"password": utils.GetMD5EncodeStr(d.Password),
|
||||
"remember": "0",
|
||||
"captcha": "",
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
var resp Response[LoginData]
|
||||
if err := d.postForm(ctx, loginURL, form, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Code != successCode {
|
||||
return fmt.Errorf("login failed: %s", resp.Message)
|
||||
}
|
||||
d.userID = strconv.FormatInt(resp.Data.UserID, 10)
|
||||
return d.ensureRootFolderID(ctx)
|
||||
}
|
||||
|
||||
func (d *BitQiu) ensureRootFolderID(ctx context.Context) error {
|
||||
rootID := d.Addition.GetRootId()
|
||||
if rootID != "" && rootID != "0" {
|
||||
return nil
|
||||
}
|
||||
|
||||
form := map[string]string{
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
var resp Response[UserInfoData]
|
||||
if err := d.postForm(ctx, userInfoURL, form, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Code != successCode {
|
||||
return fmt.Errorf("get user info failed: %s", resp.Message)
|
||||
}
|
||||
if resp.Data.RootDirID == "" {
|
||||
return fmt.Errorf("get user info failed: empty root dir id")
|
||||
}
|
||||
if d.Addition.RootFolderID != resp.Data.RootDirID {
|
||||
d.Addition.RootFolderID = resp.Data.RootDirID
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *BitQiu) postForm(ctx context.Context, url string, form map[string]string, result interface{}) error {
|
||||
if d.client == nil {
|
||||
return fmt.Errorf("client not initialized")
|
||||
}
|
||||
req := d.client.R().
|
||||
SetContext(ctx).
|
||||
SetHeaders(d.commonHeaders()).
|
||||
SetFormData(form)
|
||||
if result != nil {
|
||||
req = req.SetResult(result)
|
||||
}
|
||||
_, err := req.Post(url)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *BitQiu) waitForCopiedObject(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
expectedName := srcObj.GetName()
|
||||
expectedIsDir := srcObj.IsDir()
|
||||
var lastListErr error
|
||||
|
||||
for attempt := 0; attempt < copyPollMaxAttempts; attempt++ {
|
||||
if attempt > 0 {
|
||||
if err := waitWithContext(ctx, copyPollInterval); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.checkCopyFailure(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, err := d.findObjectInDir(ctx, dstDir, expectedName, expectedIsDir)
|
||||
if err != nil {
|
||||
lastListErr = err
|
||||
continue
|
||||
}
|
||||
if obj != nil {
|
||||
return obj, nil
|
||||
}
|
||||
}
|
||||
if lastListErr != nil {
|
||||
return nil, lastListErr
|
||||
}
|
||||
return nil, fmt.Errorf("copy task timed out waiting for completion")
|
||||
}
|
||||
|
||||
func (d *BitQiu) checkCopyFailure(ctx context.Context) error {
|
||||
form := map[string]string{
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[AsyncManagerData]
|
||||
if err := d.postForm(ctx, copyManagerURL, form, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
if len(resp.Data.FailTasks) > 0 {
|
||||
return fmt.Errorf("copy failed: %s", resp.Data.FailTasks[0].ErrorMessage())
|
||||
}
|
||||
return nil
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("query copy status failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("query copy status failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) findObjectInDir(ctx context.Context, dir model.Obj, name string, isDir bool) (model.Obj, error) {
|
||||
objs, err := d.List(ctx, dir, model.ListArgs{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, obj := range objs {
|
||||
if obj.GetName() == name && obj.IsDir() == isDir {
|
||||
return obj, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func waitWithContext(ctx context.Context, d time.Duration) error {
|
||||
timer := time.NewTimer(d)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-timer.C:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (d *BitQiu) commonHeaders() map[string]string {
|
||||
headers := map[string]string{
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "no-cache",
|
||||
"pragma": "no-cache",
|
||||
"user-platform": d.Addition.UserPlatform,
|
||||
"x-kl-saas-ajax-request": "Ajax_Request",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
"referer": baseURL + "/",
|
||||
"origin": baseURL,
|
||||
"user-agent": d.userAgent(),
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
func (d *BitQiu) userAgent() string {
|
||||
if ua := strings.TrimSpace(d.Addition.UserAgent); ua != "" {
|
||||
return ua
|
||||
}
|
||||
return defaultUserAgent
|
||||
}
|
||||
|
||||
func (d *BitQiu) resolveParentID(dir model.Obj) string {
|
||||
if dir != nil && dir.GetID() != "" {
|
||||
return dir.GetID()
|
||||
}
|
||||
if root := d.Addition.GetRootId(); root != "" {
|
||||
return root
|
||||
}
|
||||
return config.DefaultRoot
|
||||
}
|
||||
|
||||
func (d *BitQiu) pageSize() int {
|
||||
if size, err := strconv.Atoi(d.Addition.PageSize); err == nil && size > 0 {
|
||||
return size
|
||||
}
|
||||
return 24
|
||||
}
|
||||
|
||||
func (d *BitQiu) orderType() string {
|
||||
if d.Addition.OrderType != "" {
|
||||
return d.Addition.OrderType
|
||||
}
|
||||
return "updateTime"
|
||||
}
|
||||
|
||||
func (d *BitQiu) orderDesc() string {
|
||||
if d.Addition.OrderDesc {
|
||||
return "1"
|
||||
}
|
||||
return "0"
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*BitQiu)(nil)
|
||||
var _ driver.PutResult = (*BitQiu)(nil)
|
||||
28
drivers/bitqiu/meta.go
Normal file
28
drivers/bitqiu/meta.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package bitqiu
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
UserPlatform string `json:"user_platform" help:"Optional device identifier; auto-generated if empty."`
|
||||
OrderType string `json:"order_type" type:"select" options:"updateTime,createTime,name,size" default:"updateTime"`
|
||||
OrderDesc bool `json:"order_desc"`
|
||||
PageSize string `json:"page_size" default:"24" help:"Number of entries to request per page."`
|
||||
UserAgent string `json:"user_agent" default:"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "BitQiu",
|
||||
DefaultRoot: "0",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &BitQiu{}
|
||||
})
|
||||
}
|
||||
107
drivers/bitqiu/types.go
Normal file
107
drivers/bitqiu/types.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package bitqiu
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
type Response[T any] struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data T `json:"data"`
|
||||
}
|
||||
|
||||
type LoginData struct {
|
||||
UserID int64 `json:"userId"`
|
||||
}
|
||||
|
||||
type ResourcePage struct {
|
||||
CurrentPage int `json:"currentPage"`
|
||||
PageSize int `json:"pageSize"`
|
||||
TotalCount int `json:"totalCount"`
|
||||
TotalPageCount int `json:"totalPageCount"`
|
||||
Data []Resource `json:"data"`
|
||||
HasNext bool `json:"hasNext"`
|
||||
}
|
||||
|
||||
type Resource struct {
|
||||
ResourceID string `json:"resourceId"`
|
||||
ResourceUID string `json:"resourceUid"`
|
||||
ResourceType int `json:"resourceType"`
|
||||
ParentID string `json:"parentId"`
|
||||
Name string `json:"name"`
|
||||
ExtName string `json:"extName"`
|
||||
Size *json.Number `json:"size"`
|
||||
CreateTime *string `json:"createTime"`
|
||||
UpdateTime *string `json:"updateTime"`
|
||||
FileMD5 string `json:"fileMd5"`
|
||||
}
|
||||
|
||||
type DownloadData struct {
|
||||
URL string `json:"url"`
|
||||
MD5 string `json:"md5"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
type UserInfoData struct {
|
||||
RootDirID string `json:"rootDirId"`
|
||||
}
|
||||
|
||||
type CreateDirData struct {
|
||||
DirID string `json:"dirId"`
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parentId"`
|
||||
}
|
||||
|
||||
type AsyncManagerData struct {
|
||||
WaitTasks []AsyncTask `json:"waitTaskList"`
|
||||
RunningTasks []AsyncTask `json:"runningTaskList"`
|
||||
SuccessTasks []AsyncTask `json:"successTaskList"`
|
||||
FailTasks []AsyncTask `json:"failTaskList"`
|
||||
TaskList []AsyncTask `json:"taskList"`
|
||||
}
|
||||
|
||||
type AsyncTask struct {
|
||||
TaskID string `json:"taskId"`
|
||||
Status int `json:"status"`
|
||||
ErrorMsg string `json:"errorMsg"`
|
||||
Message string `json:"message"`
|
||||
Result *AsyncTaskInfo `json:"result"`
|
||||
TargetName string `json:"targetName"`
|
||||
TargetDirID string `json:"parentId"`
|
||||
}
|
||||
|
||||
type AsyncTaskInfo struct {
|
||||
Resource Resource `json:"resource"`
|
||||
DirID string `json:"dirId"`
|
||||
FileID string `json:"fileId"`
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parentId"`
|
||||
}
|
||||
|
||||
func (t AsyncTask) ErrorMessage() string {
|
||||
if t.ErrorMsg != "" {
|
||||
return t.ErrorMsg
|
||||
}
|
||||
if t.Message != "" {
|
||||
return t.Message
|
||||
}
|
||||
return "unknown error"
|
||||
}
|
||||
|
||||
type UploadInitData struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Token string `json:"token"`
|
||||
FileUID string `json:"fileUid"`
|
||||
FileSID string `json:"fileSid"`
|
||||
ParentID string `json:"parentId"`
|
||||
UserID int64 `json:"userId"`
|
||||
SerialNumber string `json:"serialNumber"`
|
||||
UploadURL string `json:"uploadUrl"`
|
||||
AppID string `json:"appId"`
|
||||
}
|
||||
|
||||
type ChunkUploadResponse struct {
|
||||
ErrCode int `json:"errCode"`
|
||||
Offset int64 `json:"offset"`
|
||||
Finished int `json:"finished"`
|
||||
FinishedFlag string `json:"finishedFlag"`
|
||||
}
|
||||
102
drivers/bitqiu/util.go
Normal file
102
drivers/bitqiu/util.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package bitqiu
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
type Object struct {
|
||||
model.Object
|
||||
ParentID string
|
||||
}
|
||||
|
||||
func (r Resource) toObject(parentID, parentPath string) (model.Obj, error) {
|
||||
id := r.ResourceID
|
||||
if id == "" {
|
||||
id = r.ResourceUID
|
||||
}
|
||||
obj := &Object{
|
||||
Object: model.Object{
|
||||
ID: id,
|
||||
Name: r.Name,
|
||||
IsFolder: r.ResourceType == 1,
|
||||
},
|
||||
ParentID: parentID,
|
||||
}
|
||||
if r.Size != nil {
|
||||
if size, err := (*r.Size).Int64(); err == nil {
|
||||
obj.Size = size
|
||||
}
|
||||
}
|
||||
if ct := parseBitQiuTime(r.CreateTime); !ct.IsZero() {
|
||||
obj.Ctime = ct
|
||||
}
|
||||
if mt := parseBitQiuTime(r.UpdateTime); !mt.IsZero() {
|
||||
obj.Modified = mt
|
||||
}
|
||||
if r.FileMD5 != "" {
|
||||
obj.HashInfo = utils.NewHashInfo(utils.MD5, strings.ToLower(r.FileMD5))
|
||||
}
|
||||
obj.SetPath(path.Join(parentPath, obj.Name))
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func parseBitQiuTime(value *string) time.Time {
|
||||
if value == nil {
|
||||
return time.Time{}
|
||||
}
|
||||
trimmed := strings.TrimSpace(*value)
|
||||
if trimmed == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
if ts, err := time.ParseInLocation("2006-01-02 15:04:05", trimmed, time.Local); err == nil {
|
||||
return ts
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func updateObjectName(obj model.Obj, newName string) model.Obj {
|
||||
newPath := path.Join(parentPathOf(obj.GetPath()), newName)
|
||||
|
||||
switch o := obj.(type) {
|
||||
case *Object:
|
||||
o.Name = newName
|
||||
o.Object.Name = newName
|
||||
o.SetPath(newPath)
|
||||
return o
|
||||
case *model.Object:
|
||||
o.Name = newName
|
||||
o.SetPath(newPath)
|
||||
return o
|
||||
}
|
||||
|
||||
if setter, ok := obj.(model.SetPath); ok {
|
||||
setter.SetPath(newPath)
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: obj.GetID(),
|
||||
Path: newPath,
|
||||
Name: newName,
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
Ctime: obj.CreateTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}
|
||||
}
|
||||
|
||||
func parentPathOf(p string) string {
|
||||
if p == "" {
|
||||
return ""
|
||||
}
|
||||
dir := path.Dir(p)
|
||||
if dir == "." {
|
||||
return ""
|
||||
}
|
||||
return dir
|
||||
}
|
||||
224
drivers/gitee/driver.go
Normal file
224
drivers/gitee/driver.go
Normal file
@@ -0,0 +1,224 @@
|
||||
package gitee
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type Gitee struct {
|
||||
model.Storage
|
||||
Addition
|
||||
client *resty.Client
|
||||
}
|
||||
|
||||
func (d *Gitee) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Gitee) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Gitee) Init(ctx context.Context) error {
|
||||
d.RootFolderPath = utils.FixAndCleanPath(d.RootFolderPath)
|
||||
d.Endpoint = strings.TrimSpace(d.Endpoint)
|
||||
if d.Endpoint == "" {
|
||||
d.Endpoint = "https://gitee.com/api/v5"
|
||||
}
|
||||
d.Endpoint = strings.TrimSuffix(d.Endpoint, "/")
|
||||
d.Owner = strings.TrimSpace(d.Owner)
|
||||
d.Repo = strings.TrimSpace(d.Repo)
|
||||
d.Token = strings.TrimSpace(d.Token)
|
||||
d.DownloadProxy = strings.TrimSpace(d.DownloadProxy)
|
||||
if d.Owner == "" || d.Repo == "" {
|
||||
return errors.New("owner and repo are required")
|
||||
}
|
||||
d.client = base.NewRestyClient().
|
||||
SetBaseURL(d.Endpoint).
|
||||
SetHeader("Accept", "application/json")
|
||||
repo, err := d.getRepo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Ref = strings.TrimSpace(d.Ref)
|
||||
if d.Ref == "" {
|
||||
d.Ref = repo.DefaultBranch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Gitee) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Gitee) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
relPath := d.relativePath(dir.GetPath())
|
||||
contents, err := d.listContents(relPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objs := make([]model.Obj, 0, len(contents))
|
||||
for i := range contents {
|
||||
objs = append(objs, contents[i].toModelObj())
|
||||
}
|
||||
return objs, nil
|
||||
}
|
||||
|
||||
func (d *Gitee) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
var downloadURL string
|
||||
if obj, ok := file.(*Object); ok {
|
||||
downloadURL = obj.DownloadURL
|
||||
if downloadURL == "" {
|
||||
relPath := d.relativePath(file.GetPath())
|
||||
content, err := d.getContent(relPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if content.DownloadURL == "" {
|
||||
return nil, errors.New("empty download url")
|
||||
}
|
||||
obj.DownloadURL = content.DownloadURL
|
||||
downloadURL = content.DownloadURL
|
||||
}
|
||||
} else {
|
||||
relPath := d.relativePath(file.GetPath())
|
||||
content, err := d.getContent(relPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if content.DownloadURL == "" {
|
||||
return nil, errors.New("empty download url")
|
||||
}
|
||||
downloadURL = content.DownloadURL
|
||||
}
|
||||
url := d.applyProxy(downloadURL)
|
||||
return &model.Link{
|
||||
URL: url,
|
||||
Header: http.Header{
|
||||
"Cookie": {d.Cookie},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Gitee) newRequest() *resty.Request {
|
||||
req := d.client.R()
|
||||
if d.Token != "" {
|
||||
req.SetQueryParam("access_token", d.Token)
|
||||
}
|
||||
if d.Ref != "" {
|
||||
req.SetQueryParam("ref", d.Ref)
|
||||
}
|
||||
return req
|
||||
}
|
||||
|
||||
func (d *Gitee) apiPath(path string) string {
|
||||
escapedOwner := url.PathEscape(d.Owner)
|
||||
escapedRepo := url.PathEscape(d.Repo)
|
||||
if path == "" {
|
||||
return fmt.Sprintf("/repos/%s/%s/contents", escapedOwner, escapedRepo)
|
||||
}
|
||||
return fmt.Sprintf("/repos/%s/%s/contents/%s", escapedOwner, escapedRepo, encodePath(path))
|
||||
}
|
||||
|
||||
func (d *Gitee) listContents(path string) ([]Content, error) {
|
||||
res, err := d.newRequest().Get(d.apiPath(path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.IsError() {
|
||||
return nil, toErr(res)
|
||||
}
|
||||
var contents []Content
|
||||
if err := utils.Json.Unmarshal(res.Body(), &contents); err != nil {
|
||||
var single Content
|
||||
if err2 := utils.Json.Unmarshal(res.Body(), &single); err2 == nil && single.Type != "" {
|
||||
if single.Type != "dir" {
|
||||
return nil, errs.NotFolder
|
||||
}
|
||||
return []Content{}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
for i := range contents {
|
||||
contents[i].Path = joinPath(path, contents[i].Name)
|
||||
}
|
||||
return contents, nil
|
||||
}
|
||||
|
||||
func (d *Gitee) getContent(path string) (*Content, error) {
|
||||
res, err := d.newRequest().Get(d.apiPath(path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.IsError() {
|
||||
return nil, toErr(res)
|
||||
}
|
||||
var content Content
|
||||
if err := utils.Json.Unmarshal(res.Body(), &content); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if content.Type == "" {
|
||||
return nil, errors.New("invalid response")
|
||||
}
|
||||
if content.Path == "" {
|
||||
content.Path = path
|
||||
}
|
||||
return &content, nil
|
||||
}
|
||||
|
||||
func (d *Gitee) relativePath(full string) string {
|
||||
full = utils.FixAndCleanPath(full)
|
||||
root := utils.FixAndCleanPath(d.RootFolderPath)
|
||||
if root == "/" {
|
||||
return strings.TrimPrefix(full, "/")
|
||||
}
|
||||
if utils.PathEqual(full, root) {
|
||||
return ""
|
||||
}
|
||||
prefix := utils.PathAddSeparatorSuffix(root)
|
||||
if strings.HasPrefix(full, prefix) {
|
||||
return strings.TrimPrefix(full, prefix)
|
||||
}
|
||||
return strings.TrimPrefix(full, "/")
|
||||
}
|
||||
|
||||
func (d *Gitee) applyProxy(raw string) string {
|
||||
if raw == "" || d.DownloadProxy == "" {
|
||||
return raw
|
||||
}
|
||||
proxy := d.DownloadProxy
|
||||
if !strings.HasSuffix(proxy, "/") {
|
||||
proxy += "/"
|
||||
}
|
||||
return proxy + strings.TrimLeft(raw, "/")
|
||||
}
|
||||
|
||||
func encodePath(p string) string {
|
||||
if p == "" {
|
||||
return ""
|
||||
}
|
||||
parts := strings.Split(p, "/")
|
||||
for i, part := range parts {
|
||||
parts[i] = url.PathEscape(part)
|
||||
}
|
||||
return strings.Join(parts, "/")
|
||||
}
|
||||
|
||||
func joinPath(base, name string) string {
|
||||
if base == "" {
|
||||
return name
|
||||
}
|
||||
return strings.TrimPrefix(stdpath.Join(base, name), "./")
|
||||
}
|
||||
29
drivers/gitee/meta.go
Normal file
29
drivers/gitee/meta.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package gitee
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Endpoint string `json:"endpoint" type:"string" help:"Gitee API endpoint, default https://gitee.com/api/v5"`
|
||||
Token string `json:"token" type:"string"`
|
||||
Owner string `json:"owner" type:"string" required:"true"`
|
||||
Repo string `json:"repo" type:"string" required:"true"`
|
||||
Ref string `json:"ref" type:"string" help:"Branch, tag or commit SHA, defaults to repository default branch"`
|
||||
DownloadProxy string `json:"download_proxy" type:"string" help:"Prefix added before download URLs, e.g. https://mirror.example.com/"`
|
||||
Cookie string `json:"cookie" type:"string" help:"Cookie returned from user info request"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Gitee",
|
||||
LocalSort: true,
|
||||
DefaultRoot: "/",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Gitee{}
|
||||
})
|
||||
}
|
||||
60
drivers/gitee/types.go
Normal file
60
drivers/gitee/types.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package gitee
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
type Links struct {
|
||||
Self string `json:"self"`
|
||||
Html string `json:"html"`
|
||||
}
|
||||
|
||||
type Content struct {
|
||||
Type string `json:"type"`
|
||||
Size *int64 `json:"size"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Sha string `json:"sha"`
|
||||
URL string `json:"url"`
|
||||
HtmlURL string `json:"html_url"`
|
||||
DownloadURL string `json:"download_url"`
|
||||
Links Links `json:"_links"`
|
||||
}
|
||||
|
||||
func (c Content) toModelObj() model.Obj {
|
||||
size := int64(0)
|
||||
if c.Size != nil {
|
||||
size = *c.Size
|
||||
}
|
||||
return &Object{
|
||||
Object: model.Object{
|
||||
ID: c.Path,
|
||||
Name: c.Name,
|
||||
Size: size,
|
||||
Modified: time.Unix(0, 0),
|
||||
IsFolder: c.Type == "dir",
|
||||
},
|
||||
DownloadURL: c.DownloadURL,
|
||||
HtmlURL: c.HtmlURL,
|
||||
}
|
||||
}
|
||||
|
||||
type Object struct {
|
||||
model.Object
|
||||
DownloadURL string
|
||||
HtmlURL string
|
||||
}
|
||||
|
||||
func (o *Object) URL() string {
|
||||
return o.DownloadURL
|
||||
}
|
||||
|
||||
type Repo struct {
|
||||
DefaultBranch string `json:"default_branch"`
|
||||
}
|
||||
|
||||
type ErrResp struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
44
drivers/gitee/util.go
Normal file
44
drivers/gitee/util.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package gitee
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
func (d *Gitee) getRepo() (*Repo, error) {
|
||||
req := d.client.R()
|
||||
if d.Token != "" {
|
||||
req.SetQueryParam("access_token", d.Token)
|
||||
}
|
||||
if d.Cookie != "" {
|
||||
req.SetHeader("Cookie", d.Cookie)
|
||||
}
|
||||
escapedOwner := url.PathEscape(d.Owner)
|
||||
escapedRepo := url.PathEscape(d.Repo)
|
||||
res, err := req.Get(fmt.Sprintf("/repos/%s/%s", escapedOwner, escapedRepo))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.IsError() {
|
||||
return nil, toErr(res)
|
||||
}
|
||||
var repo Repo
|
||||
if err := utils.Json.Unmarshal(res.Body(), &repo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if repo.DefaultBranch == "" {
|
||||
return nil, fmt.Errorf("failed to fetch default branch")
|
||||
}
|
||||
return &repo, nil
|
||||
}
|
||||
|
||||
func toErr(res *resty.Response) error {
|
||||
var errMsg ErrResp
|
||||
if err := utils.Json.Unmarshal(res.Body(), &errMsg); err == nil && errMsg.Message != "" {
|
||||
return fmt.Errorf("%s: %s", res.Status(), errMsg.Message)
|
||||
}
|
||||
return fmt.Errorf(res.Status())
|
||||
}
|
||||
271
drivers/gofile/driver.go
Normal file
271
drivers/gofile/driver.go
Normal file
@@ -0,0 +1,271 @@
|
||||
package gofile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Gofile struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
accountId string
|
||||
}
|
||||
|
||||
func (d *Gofile) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Gofile) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Gofile) Init(ctx context.Context) error {
|
||||
if d.APIToken == "" {
|
||||
return fmt.Errorf("API token is required")
|
||||
}
|
||||
|
||||
// Get account ID
|
||||
accountId, err := d.getAccountId(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get account ID: %w", err)
|
||||
}
|
||||
d.accountId = accountId
|
||||
|
||||
// Get account info to set root folder if not specified
|
||||
if d.RootFolderID == "" {
|
||||
accountInfo, err := d.getAccountInfo(ctx, accountId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get account info: %w", err)
|
||||
}
|
||||
d.RootFolderID = accountInfo.Data.RootFolder
|
||||
}
|
||||
|
||||
// Save driver storage
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Gofile) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Gofile) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
var folderId string
|
||||
if dir.GetID() == "" {
|
||||
folderId = d.GetRootId()
|
||||
} else {
|
||||
folderId = dir.GetID()
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("/contents/%s", folderId)
|
||||
|
||||
var response ContentsResponse
|
||||
err := d.getJSON(ctx, endpoint, &response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var objects []model.Obj
|
||||
|
||||
// Process children or contents
|
||||
contents := response.Data.Children
|
||||
if contents == nil {
|
||||
contents = response.Data.Contents
|
||||
}
|
||||
|
||||
for _, content := range contents {
|
||||
objects = append(objects, d.convertContentToObj(content))
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if file.IsDir() {
|
||||
return nil, errs.NotFile
|
||||
}
|
||||
|
||||
// Create a direct link for the file
|
||||
directLink, err := d.createDirectLink(ctx, file.GetID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create direct link: %w", err)
|
||||
}
|
||||
|
||||
// Configure cache expiration based on user setting
|
||||
link := &model.Link{
|
||||
URL: directLink,
|
||||
}
|
||||
|
||||
// Only set expiration if LinkExpiry > 0 (0 means no caching)
|
||||
if d.LinkExpiry > 0 {
|
||||
expiration := time.Duration(d.LinkExpiry) * 24 * time.Hour
|
||||
link.Expiration = &expiration
|
||||
}
|
||||
|
||||
return link, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
var parentId string
|
||||
if parentDir.GetID() == "" {
|
||||
parentId = d.GetRootId()
|
||||
} else {
|
||||
parentId = parentDir.GetID()
|
||||
}
|
||||
|
||||
data := map[string]interface{}{
|
||||
"parentFolderId": parentId,
|
||||
"folderName": dirName,
|
||||
}
|
||||
|
||||
var response CreateFolderResponse
|
||||
err := d.postJSON(ctx, "/contents/createFolder", data, &response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: response.Data.ID,
|
||||
Name: response.Data.Name,
|
||||
IsFolder: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var dstId string
|
||||
if dstDir.GetID() == "" {
|
||||
dstId = d.GetRootId()
|
||||
} else {
|
||||
dstId = dstDir.GetID()
|
||||
}
|
||||
|
||||
data := map[string]interface{}{
|
||||
"contentsId": srcObj.GetID(),
|
||||
"folderId": dstId,
|
||||
}
|
||||
|
||||
err := d.putJSON(ctx, "/contents/move", data, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return updated object
|
||||
return &model.Object{
|
||||
ID: srcObj.GetID(),
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
data := map[string]interface{}{
|
||||
"attribute": "name",
|
||||
"attributeValue": newName,
|
||||
}
|
||||
|
||||
var response UpdateResponse
|
||||
err := d.putJSON(ctx, fmt.Sprintf("/contents/%s/update", srcObj.GetID()), data, &response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: srcObj.GetID(),
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var dstId string
|
||||
if dstDir.GetID() == "" {
|
||||
dstId = d.GetRootId()
|
||||
} else {
|
||||
dstId = dstDir.GetID()
|
||||
}
|
||||
|
||||
data := map[string]interface{}{
|
||||
"contentsId": srcObj.GetID(),
|
||||
"folderId": dstId,
|
||||
}
|
||||
|
||||
var response CopyResponse
|
||||
err := d.postJSON(ctx, "/contents/copy", data, &response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the new ID from the response
|
||||
newId := srcObj.GetID()
|
||||
if response.Data.CopiedContents != nil {
|
||||
if id, ok := response.Data.CopiedContents[srcObj.GetID()]; ok {
|
||||
newId = id
|
||||
}
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: newId,
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) Remove(ctx context.Context, obj model.Obj) error {
|
||||
data := map[string]interface{}{
|
||||
"contentsId": obj.GetID(),
|
||||
}
|
||||
|
||||
return d.deleteJSON(ctx, "/contents", data)
|
||||
}
|
||||
|
||||
func (d *Gofile) Put(ctx context.Context, dstDir model.Obj, fileStreamer model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
var folderId string
|
||||
if dstDir.GetID() == "" {
|
||||
folderId = d.GetRootId()
|
||||
} else {
|
||||
folderId = dstDir.GetID()
|
||||
}
|
||||
|
||||
response, err := d.uploadFile(ctx, folderId, fileStreamer, up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: response.Data.FileId,
|
||||
Name: response.Data.FileName,
|
||||
Size: fileStreamer.GetSize(),
|
||||
IsFolder: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Gofile) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Gofile) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Gofile) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Gofile)(nil)
|
||||
28
drivers/gofile/meta.go
Normal file
28
drivers/gofile/meta.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package gofile
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
APIToken string `json:"api_token" required:"true" help:"Get your API token from your Gofile profile page"`
|
||||
LinkExpiry int `json:"link_expiry" type:"number" default:"30" help:"Direct link cache duration in days. Set to 0 to disable caching"`
|
||||
DirectLinkExpiry int `json:"direct_link_expiry" type:"number" default:"0" help:"Direct link expiration time in hours on Gofile server. Set to 0 for no expiration"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Gofile",
|
||||
DefaultRoot: "",
|
||||
LocalSort: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Gofile{}
|
||||
})
|
||||
}
|
||||
124
drivers/gofile/types.go
Normal file
124
drivers/gofile/types.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package gofile
|
||||
|
||||
import "time"
|
||||
|
||||
type APIResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
type AccountResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type AccountInfoResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Email string `json:"email"`
|
||||
RootFolder string `json:"rootFolder"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type Content struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"` // "file" or "folder"
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
ModTime int64 `json:"modTime,omitempty"`
|
||||
DirectLink string `json:"directLink,omitempty"`
|
||||
Children map[string]Content `json:"children,omitempty"`
|
||||
ParentFolder string `json:"parentFolder,omitempty"`
|
||||
MD5 string `json:"md5,omitempty"`
|
||||
MimeType string `json:"mimeType,omitempty"`
|
||||
Link string `json:"link,omitempty"`
|
||||
}
|
||||
|
||||
type ContentsResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
IsOwner bool `json:"isOwner"`
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
ChildrenList []string `json:"childrenList,omitempty"`
|
||||
Children map[string]Content `json:"children,omitempty"`
|
||||
Contents map[string]Content `json:"contents,omitempty"`
|
||||
Public bool `json:"public,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Tags string `json:"tags,omitempty"`
|
||||
Expiry int64 `json:"expiry,omitempty"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
DownloadPage string `json:"downloadPage"`
|
||||
Code string `json:"code"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
FileId string `json:"fileId"`
|
||||
FileName string `json:"fileName"`
|
||||
GuestToken string `json:"guestToken,omitempty"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type DirectLinkResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
DirectLink string `json:"directLink"`
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type CreateFolderResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type CopyResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
CopiedContents map[string]string `json:"copiedContents"` // oldId -> newId mapping
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UpdateResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type ErrorResponse struct {
|
||||
Status string `json:"status"`
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
Code string `json:"code"`
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
func (c *Content) ModifiedTime() time.Time {
|
||||
if c.ModTime > 0 {
|
||||
return time.Unix(c.ModTime, 0)
|
||||
}
|
||||
return time.Unix(c.CreateTime, 0)
|
||||
}
|
||||
|
||||
func (c *Content) IsDir() bool {
|
||||
return c.Type == "folder"
|
||||
}
|
||||
265
drivers/gofile/util.go
Normal file
265
drivers/gofile/util.go
Normal file
@@ -0,0 +1,265 @@
|
||||
package gofile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
baseAPI = "https://api.gofile.io"
|
||||
uploadAPI = "https://upload.gofile.io"
|
||||
)
|
||||
|
||||
func (d *Gofile) request(ctx context.Context, method, endpoint string, body io.Reader, headers map[string]string) (*http.Response, error) {
|
||||
var url string
|
||||
if strings.HasPrefix(endpoint, "http") {
|
||||
url = endpoint
|
||||
} else {
|
||||
url = baseAPI + endpoint
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", "Bearer "+d.APIToken)
|
||||
req.Header.Set("User-Agent", "AList/3.0")
|
||||
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
return base.HttpClient.Do(req)
|
||||
}
|
||||
|
||||
func (d *Gofile) getJSON(ctx context.Context, endpoint string, result interface{}) error {
|
||||
resp, err := d.request(ctx, "GET", endpoint, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return d.handleError(resp)
|
||||
}
|
||||
|
||||
return json.NewDecoder(resp.Body).Decode(result)
|
||||
}
|
||||
|
||||
func (d *Gofile) postJSON(ctx context.Context, endpoint string, data interface{}, result interface{}) error {
|
||||
jsonData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
resp, err := d.request(ctx, "POST", endpoint, bytes.NewBuffer(jsonData), headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return d.handleError(resp)
|
||||
}
|
||||
|
||||
if result != nil {
|
||||
return json.NewDecoder(resp.Body).Decode(result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Gofile) putJSON(ctx context.Context, endpoint string, data interface{}, result interface{}) error {
|
||||
jsonData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
resp, err := d.request(ctx, "PUT", endpoint, bytes.NewBuffer(jsonData), headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return d.handleError(resp)
|
||||
}
|
||||
|
||||
if result != nil {
|
||||
return json.NewDecoder(resp.Body).Decode(result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Gofile) deleteJSON(ctx context.Context, endpoint string, data interface{}) error {
|
||||
jsonData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
resp, err := d.request(ctx, "DELETE", endpoint, bytes.NewBuffer(jsonData), headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return d.handleError(resp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Gofile) handleError(resp *http.Response) error {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
log.Debugf("Gofile API error (HTTP %d): %s", resp.StatusCode, string(body))
|
||||
|
||||
var errorResp ErrorResponse
|
||||
if err := json.Unmarshal(body, &errorResp); err == nil && errorResp.Status == "error" {
|
||||
return fmt.Errorf("gofile API error: %s (code: %s)", errorResp.Error.Message, errorResp.Error.Code)
|
||||
}
|
||||
|
||||
return fmt.Errorf("gofile API error: HTTP %d - %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
func (d *Gofile) uploadFile(ctx context.Context, folderId string, file model.FileStreamer, up driver.UpdateProgress) (*UploadResponse, error) {
|
||||
var body bytes.Buffer
|
||||
writer := multipart.NewWriter(&body)
|
||||
|
||||
if folderId != "" {
|
||||
writer.WriteField("folderId", folderId)
|
||||
}
|
||||
|
||||
part, err := writer.CreateFormFile("file", filepath.Base(file.GetName()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Copy with progress tracking if available
|
||||
if up != nil {
|
||||
reader := &progressReader{
|
||||
reader: file,
|
||||
total: file.GetSize(),
|
||||
up: up,
|
||||
}
|
||||
_, err = io.Copy(part, reader)
|
||||
} else {
|
||||
_, err = io.Copy(part, file)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
writer.Close()
|
||||
|
||||
headers := map[string]string{
|
||||
"Content-Type": writer.FormDataContentType(),
|
||||
}
|
||||
|
||||
resp, err := d.request(ctx, "POST", uploadAPI+"/uploadfile", &body, headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, d.handleError(resp)
|
||||
}
|
||||
|
||||
var result UploadResponse
|
||||
err = json.NewDecoder(resp.Body).Decode(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (d *Gofile) createDirectLink(ctx context.Context, contentId string) (string, error) {
|
||||
data := map[string]interface{}{}
|
||||
|
||||
if d.DirectLinkExpiry > 0 {
|
||||
expireTime := time.Now().Add(time.Duration(d.DirectLinkExpiry) * time.Hour).Unix()
|
||||
data["expireTime"] = expireTime
|
||||
}
|
||||
|
||||
var result DirectLinkResponse
|
||||
err := d.postJSON(ctx, fmt.Sprintf("/contents/%s/directlinks", contentId), data, &result)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return result.Data.DirectLink, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) convertContentToObj(content Content) model.Obj {
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: content.ID,
|
||||
Name: content.Name,
|
||||
Size: content.Size,
|
||||
Modified: content.ModifiedTime(),
|
||||
IsFolder: content.IsDir(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Gofile) getAccountId(ctx context.Context) (string, error) {
|
||||
var result AccountResponse
|
||||
err := d.getJSON(ctx, "/accounts/getid", &result)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result.Data.ID, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) getAccountInfo(ctx context.Context, accountId string) (*AccountInfoResponse, error) {
|
||||
var result AccountInfoResponse
|
||||
err := d.getJSON(ctx, fmt.Sprintf("/accounts/%s", accountId), &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// progressReader wraps an io.Reader to track upload progress
|
||||
type progressReader struct {
|
||||
reader io.Reader
|
||||
total int64
|
||||
read int64
|
||||
up driver.UpdateProgress
|
||||
}
|
||||
|
||||
func (pr *progressReader) Read(p []byte) (n int, err error) {
|
||||
n, err = pr.reader.Read(p)
|
||||
pr.read += int64(n)
|
||||
if pr.up != nil && pr.total > 0 {
|
||||
progress := float64(pr.read) * 100 / float64(pr.total)
|
||||
pr.up(progress)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@@ -430,17 +430,35 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
||||
file.Time = timeFindReg.FindString(sharePageData)
|
||||
|
||||
// 重定向获取真实链接
|
||||
res, err := base.NoRedirectClient.R().SetHeaders(map[string]string{
|
||||
headers := map[string]string{
|
||||
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
|
||||
}).Get(downloadUrl)
|
||||
}
|
||||
res, err := base.NoRedirectClient.R().SetHeaders(headers).Get(downloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rPageData := res.String()
|
||||
if findAcwScV2Reg.MatchString(rPageData) {
|
||||
log.Debug("lanzou: detected acw_sc__v2 challenge, recalculating cookie")
|
||||
acwScV2, err := CalcAcwScV2(rPageData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// retry with calculated cookie to bypass anti-crawler validation
|
||||
res, err = base.NoRedirectClient.R().
|
||||
SetHeaders(headers).
|
||||
SetCookie(&http.Cookie{Name: "acw_sc__v2", Value: acwScV2}).
|
||||
Get(downloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rPageData = res.String()
|
||||
}
|
||||
|
||||
file.Url = res.Header().Get("location")
|
||||
|
||||
// 触发验证
|
||||
rPageData := res.String()
|
||||
if res.StatusCode() != 302 {
|
||||
param, err = htmlJsonToMap(rPageData)
|
||||
if err != nil {
|
||||
|
||||
@@ -146,13 +146,14 @@ func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string
|
||||
thumb += "?type=thumb&sign=" + sign.Sign(stdpath.Join(reqPath, f.Name()))
|
||||
}
|
||||
}
|
||||
isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
|
||||
filePath := filepath.Join(fullPath, f.Name())
|
||||
isFolder := f.IsDir() || isLinkedDir(f, filePath)
|
||||
var size int64
|
||||
if !isFolder {
|
||||
size = f.Size()
|
||||
}
|
||||
var ctime time.Time
|
||||
t, err := times.Stat(stdpath.Join(fullPath, f.Name()))
|
||||
t, err := times.Stat(filePath)
|
||||
if err == nil {
|
||||
if t.HasBirthTime() {
|
||||
ctime = t.BirthTime()
|
||||
@@ -161,7 +162,7 @@ func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string
|
||||
|
||||
file := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Path: filepath.Join(fullPath, f.Name()),
|
||||
Path: filePath,
|
||||
Name: f.Name(),
|
||||
Modified: f.ModTime(),
|
||||
Size: size,
|
||||
@@ -197,7 +198,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
isFolder := f.IsDir() || isSymlinkDir(f, path)
|
||||
isFolder := f.IsDir() || isLinkedDir(f, path)
|
||||
size := f.Size()
|
||||
if isFolder {
|
||||
size = 0
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -18,14 +19,18 @@ import (
|
||||
ffmpeg "github.com/u2takey/ffmpeg-go"
|
||||
)
|
||||
|
||||
func isSymlinkDir(f fs.FileInfo, path string) bool {
|
||||
if f.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
dst, err := os.Readlink(filepath.Join(path, f.Name()))
|
||||
func isLinkedDir(f fs.FileInfo, path string) bool {
|
||||
if f.Mode()&os.ModeSymlink == os.ModeSymlink || (runtime.GOOS == "windows" && f.Mode()&os.ModeIrregular != 0) {
|
||||
dst, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if !filepath.IsAbs(dst) {
|
||||
dst = filepath.Join(path, dst)
|
||||
dst = filepath.Join(filepath.Dir(path), dst)
|
||||
}
|
||||
dst, err = filepath.Abs(dst)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
stat, err := os.Stat(dst)
|
||||
if err != nil {
|
||||
|
||||
433
drivers/mediafire/driver.go
Normal file
433
drivers/mediafire/driver.go
Normal file
@@ -0,0 +1,433 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
type Mediafire struct {
|
||||
model.Storage
|
||||
Addition
|
||||
cron *cron.Cron
|
||||
|
||||
actionToken string
|
||||
|
||||
appBase string
|
||||
apiBase string
|
||||
hostBase string
|
||||
maxRetries int
|
||||
|
||||
secChUa string
|
||||
secChUaPlatform string
|
||||
userAgent string
|
||||
}
|
||||
|
||||
func (d *Mediafire) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Mediafire) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Mediafire) Init(ctx context.Context) error {
|
||||
if d.SessionToken == "" {
|
||||
return fmt.Errorf("Init :: [MediaFire] {critical} missing sessionToken")
|
||||
}
|
||||
|
||||
if d.Cookie == "" {
|
||||
return fmt.Errorf("Init :: [MediaFire] {critical} missing Cookie")
|
||||
}
|
||||
|
||||
if _, err := d.getSessionToken(ctx); err != nil {
|
||||
|
||||
d.renewToken(ctx)
|
||||
|
||||
num := rand.Intn(4) + 6
|
||||
|
||||
d.cron = cron.NewCron(time.Minute * time.Duration(num))
|
||||
d.cron.Do(func() {
|
||||
d.renewToken(ctx)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
files, err := d.getFiles(ctx, dir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||
return d.fileToObj(src), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Mediafire) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
|
||||
downloadUrl, err := d.getDirectDownloadLink(ctx, file.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := base.NoRedirectClient.R().SetDoNotParseResponse(true).SetContext(ctx).Get(downloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = res.RawBody().Close()
|
||||
}()
|
||||
|
||||
if res.StatusCode() == 302 {
|
||||
downloadUrl = res.Header().Get("location")
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: downloadUrl,
|
||||
Header: http.Header{
|
||||
"Origin": []string{d.appBase},
|
||||
"Referer": []string{d.appBase + "/"},
|
||||
"sec-ch-ua": []string{d.secChUa},
|
||||
"sec-ch-ua-platform": []string{d.secChUaPlatform},
|
||||
"User-Agent": []string{d.userAgent},
|
||||
//"User-Agent": []string{base.UserAgent},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"parent_key": parentDir.GetID(),
|
||||
"foldername": dirName,
|
||||
}
|
||||
|
||||
var resp MediafireFolderCreateResponse
|
||||
_, err := d.postForm("/folder/create.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
created, _ := time.Parse("2006-01-02T15:04:05Z", resp.Response.CreatedUTC)
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: resp.Response.FolderKey,
|
||||
Name: resp.Response.Name,
|
||||
Size: 0,
|
||||
Modified: created,
|
||||
Ctime: created,
|
||||
IsFolder: true,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/move.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key_src": srcObj.GetID(),
|
||||
"folder_key_dst": dstDir.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/move.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"folder_key": dstDir.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireMoveResponse
|
||||
_, err := d.postForm(endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return srcObj, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/update.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": srcObj.GetID(),
|
||||
"foldername": newName,
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/update.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"filename": newName,
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireRenameResponse
|
||||
_, err := d.postForm(endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: srcObj.GetID(),
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
Ctime: srcObj.CreateTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/copy.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key_src": srcObj.GetID(),
|
||||
"folder_key_dst": dstDir.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/copy.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"folder_key": dstDir.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireCopyResponse
|
||||
_, err := d.postForm(endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
var newID string
|
||||
if srcObj.IsDir() {
|
||||
if len(resp.Response.NewFolderKeys) > 0 {
|
||||
newID = resp.Response.NewFolderKeys[0]
|
||||
}
|
||||
} else {
|
||||
if len(resp.Response.NewQuickKeys) > 0 {
|
||||
newID = resp.Response.NewQuickKeys[0]
|
||||
}
|
||||
}
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: newID,
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
Ctime: srcObj.CreateTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) Remove(ctx context.Context, obj model.Obj) error {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if obj.IsDir() {
|
||||
|
||||
endpoint = "/folder/delete.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": obj.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/delete.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": obj.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireRemoveResponse
|
||||
_, err := d.postForm(endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
_, err := d.PutResult(ctx, dstDir, file, up)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Mediafire) PutResult(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tempFile.Close()
|
||||
|
||||
osFile, ok := tempFile.(*os.File)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected *os.File, got %T", tempFile)
|
||||
}
|
||||
|
||||
fileHash, err := d.calculateSHA256(osFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
checkResp, err := d.uploadCheck(ctx, file.GetName(), file.GetSize(), fileHash, dstDir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if checkResp.Response.ResumableUpload.AllUnitsReady == "yes" {
|
||||
up(100.0)
|
||||
}
|
||||
|
||||
if checkResp.Response.HashExists == "yes" && checkResp.Response.InAccount == "yes" {
|
||||
up(100.0)
|
||||
existingFile, err := d.getExistingFileInfo(ctx, fileHash, file.GetName(), dstDir.GetID())
|
||||
if err == nil {
|
||||
return existingFile, nil
|
||||
}
|
||||
}
|
||||
|
||||
var pollKey string
|
||||
|
||||
if checkResp.Response.ResumableUpload.AllUnitsReady != "yes" {
|
||||
|
||||
var err error
|
||||
|
||||
pollKey, err = d.uploadUnits(ctx, osFile, checkResp, file.GetName(), fileHash, dstDir.GetID(), up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
|
||||
pollKey = checkResp.Response.ResumableUpload.UploadKey
|
||||
}
|
||||
|
||||
//fmt.Printf("pollKey: %+v\n", pollKey)
|
||||
|
||||
pollResp, err := d.pollUpload(ctx, pollKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
quickKey := pollResp.Response.Doupload.QuickKey
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: quickKey,
|
||||
Name: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Mediafire) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Mediafire) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Mediafire) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||
// return errs.NotImplement to use an internal archive tool
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
//func (d *Mediafire) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*Mediafire)(nil)
|
||||
54
drivers/mediafire/meta.go
Normal file
54
drivers/mediafire/meta.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
*/
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
//driver.RootID
|
||||
|
||||
SessionToken string `json:"session_token" required:"true" type:"string" help:"Required for MediaFire API"`
|
||||
Cookie string `json:"cookie" required:"true" type:"string" help:"Required for navigation"`
|
||||
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"100"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "MediaFire",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Mediafire{
|
||||
appBase: "https://app.mediafire.com",
|
||||
apiBase: "https://www.mediafire.com/api/1.5",
|
||||
hostBase: "https://www.mediafire.com",
|
||||
maxRetries: 3,
|
||||
secChUa: "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"139\", \"Google Chrome\";v=\"139\"",
|
||||
secChUaPlatform: "Windows",
|
||||
userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
}
|
||||
})
|
||||
}
|
||||
232
drivers/mediafire/types.go
Normal file
232
drivers/mediafire/types.go
Normal file
@@ -0,0 +1,232 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
*/
|
||||
|
||||
type MediafireRenewTokenResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
SessionToken string `json:"session_token"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FolderContent struct {
|
||||
ChunkSize string `json:"chunk_size"`
|
||||
ContentType string `json:"content_type"`
|
||||
ChunkNumber string `json:"chunk_number"`
|
||||
FolderKey string `json:"folderkey"`
|
||||
Folders []MediafireFolder `json:"folders,omitempty"`
|
||||
Files []MediafireFile `json:"files,omitempty"`
|
||||
MoreChunks string `json:"more_chunks"`
|
||||
} `json:"folder_content"`
|
||||
Result string `json:"result"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFolder struct {
|
||||
FolderKey string `json:"folderkey"`
|
||||
Name string `json:"name"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
}
|
||||
|
||||
type MediafireFile struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
Filename string `json:"filename"`
|
||||
Size string `json:"size"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
MimeType string `json:"mimetype"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
ID string
|
||||
Name string
|
||||
Size int64
|
||||
CreatedUTC string
|
||||
IsFolder bool
|
||||
}
|
||||
|
||||
type FolderContentResponse struct {
|
||||
Folders []MediafireFolder
|
||||
Files []MediafireFile
|
||||
MoreChunks bool
|
||||
}
|
||||
|
||||
type MediafireLinksResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Links []struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
View string `json:"view"`
|
||||
NormalDownload string `json:"normal_download"`
|
||||
OneTime struct {
|
||||
Download string `json:"download"`
|
||||
View string `json:"view"`
|
||||
} `json:"one_time"`
|
||||
} `json:"links"`
|
||||
OneTimeKeyRequestCount string `json:"one_time_key_request_count"`
|
||||
OneTimeKeyRequestMaxCount string `json:"one_time_key_request_max_count"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireDirectDownloadResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Links []struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
DirectDownload string `json:"direct_download"`
|
||||
} `json:"links"`
|
||||
DirectDownloadFreeBandwidth string `json:"direct_download_free_bandwidth"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFolderCreateResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FolderKey string `json:"folder_key"`
|
||||
UploadKey string `json:"upload_key"`
|
||||
ParentFolderKey string `json:"parent_folderkey"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
Privacy string `json:"privacy"`
|
||||
FileCount string `json:"file_count"`
|
||||
FolderCount string `json:"folder_count"`
|
||||
Revision string `json:"revision"`
|
||||
DropboxEnabled string `json:"dropbox_enabled"`
|
||||
Flag string `json:"flag"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireMoveResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
NewNames []string `json:"new_names"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireRenameResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireCopyResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
NewQuickKeys []string `json:"new_quickkeys,omitempty"`
|
||||
NewFolderKeys []string `json:"new_folderkeys,omitempty"`
|
||||
SkippedCount string `json:"skipped_count,omitempty"`
|
||||
OtherCount string `json:"other_count,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireRemoveResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireCheckResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
HashExists string `json:"hash_exists"`
|
||||
InAccount string `json:"in_account"`
|
||||
InFolder string `json:"in_folder"`
|
||||
FileExists string `json:"file_exists"`
|
||||
ResumableUpload struct {
|
||||
AllUnitsReady string `json:"all_units_ready"`
|
||||
NumberOfUnits string `json:"number_of_units"`
|
||||
UnitSize string `json:"unit_size"`
|
||||
Bitmap struct {
|
||||
Count string `json:"count"`
|
||||
Words []string `json:"words"`
|
||||
} `json:"bitmap"`
|
||||
UploadKey string `json:"upload_key"`
|
||||
} `json:"resumable_upload"`
|
||||
AvailableSpace string `json:"available_space"`
|
||||
UsedStorageSize string `json:"used_storage_size"`
|
||||
StorageLimit string `json:"storage_limit"`
|
||||
StorageLimitExceeded string `json:"storage_limit_exceeded"`
|
||||
UploadURL struct {
|
||||
Simple string `json:"simple"`
|
||||
SimpleFallback string `json:"simple_fallback"`
|
||||
Resumable string `json:"resumable"`
|
||||
ResumableFallback string `json:"resumable_fallback"`
|
||||
} `json:"upload_url"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
type MediafireActionTokenResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
ActionToken string `json:"action_token"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafirePollResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Doupload struct {
|
||||
Result string `json:"result"`
|
||||
Status string `json:"status"`
|
||||
Description string `json:"description"`
|
||||
QuickKey string `json:"quickkey"`
|
||||
Hash string `json:"hash"`
|
||||
Filename string `json:"filename"`
|
||||
Size string `json:"size"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
Revision string `json:"revision"`
|
||||
} `json:"doupload"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFileSearchResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FileInfo []File `json:"file_info"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
626
drivers/mediafire/util.go
Normal file
626
drivers/mediafire/util.go
Normal file
@@ -0,0 +1,626 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
func (d *Mediafire) getSessionToken(ctx context.Context) (string, error) {
|
||||
tokenURL := d.hostBase + "/application/get_session_token.php"
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req.Header.Set("Accept", "*/*")
|
||||
req.Header.Set("Accept-Encoding", "gzip, deflate, br, zstd")
|
||||
req.Header.Set("Accept-Language", "en-US,en;q=0.9")
|
||||
req.Header.Set("Content-Length", "0")
|
||||
req.Header.Set("Cookie", d.Cookie)
|
||||
req.Header.Set("DNT", "1")
|
||||
req.Header.Set("Origin", d.hostBase)
|
||||
req.Header.Set("Priority", "u=1, i")
|
||||
req.Header.Set("Referer", (d.hostBase + "/"))
|
||||
req.Header.Set("Sec-Ch-Ua", d.secChUa)
|
||||
req.Header.Set("Sec-Ch-Ua-Mobile", "?0")
|
||||
req.Header.Set("Sec-Ch-Ua-Platform", d.secChUaPlatform)
|
||||
req.Header.Set("Sec-Fetch-Dest", "empty")
|
||||
req.Header.Set("Sec-Fetch-Mode", "cors")
|
||||
req.Header.Set("Sec-Fetch-Site", "same-site")
|
||||
req.Header.Set("User-Agent", d.userAgent)
|
||||
//req.Header.Set("Connection", "keep-alive")
|
||||
|
||||
resp, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
//fmt.Printf("getSessionToken :: Raw response: %s\n", string(body))
|
||||
//fmt.Printf("getSessionToken :: Parsed response: %+v\n", resp)
|
||||
|
||||
var tokenResp struct {
|
||||
Response struct {
|
||||
SessionToken string `json:"session_token"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
if resp.StatusCode == 200 {
|
||||
if err := json.Unmarshal(body, &tokenResp); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if tokenResp.Response.SessionToken == "" {
|
||||
return "", fmt.Errorf("empty session token received")
|
||||
}
|
||||
|
||||
cookieMap := make(map[string]string)
|
||||
for _, cookie := range resp.Cookies() {
|
||||
cookieMap[cookie.Name] = cookie.Value
|
||||
}
|
||||
|
||||
if len(cookieMap) > 0 {
|
||||
|
||||
var cookies []string
|
||||
for name, value := range cookieMap {
|
||||
cookies = append(cookies, fmt.Sprintf("%s=%s", name, value))
|
||||
}
|
||||
d.Cookie = strings.Join(cookies, "; ")
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
//fmt.Printf("getSessionToken :: Captured cookies: %s\n", d.Cookie)
|
||||
}
|
||||
|
||||
} else {
|
||||
return "", fmt.Errorf("getSessionToken :: failed to get session token, status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
d.SessionToken = tokenResp.Response.SessionToken
|
||||
|
||||
//fmt.Printf("Init :: Obtain Session Token %v", d.SessionToken)
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
return d.SessionToken, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) renewToken(_ context.Context) error {
|
||||
query := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireRenewTokenResponse
|
||||
_, err := d.postForm("/user/renew_session_token.php", query, &resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to renew token: %w", err)
|
||||
}
|
||||
|
||||
//fmt.Printf("getInfo :: Raw response: %s\n", string(body))
|
||||
//fmt.Printf("getInfo :: Parsed response: %+v\n", resp)
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return fmt.Errorf("MediaFire token renewal failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
d.SessionToken = resp.Response.SessionToken
|
||||
|
||||
//fmt.Printf("Init :: Renew Session Token: %s", resp.Response.Result)
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFiles(ctx context.Context, folderKey string) ([]File, error) {
|
||||
files := make([]File, 0)
|
||||
hasMore := true
|
||||
chunkNumber := 1
|
||||
|
||||
for hasMore {
|
||||
resp, err := d.getFolderContent(ctx, folderKey, chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, folder := range resp.Folders {
|
||||
files = append(files, File{
|
||||
ID: folder.FolderKey,
|
||||
Name: folder.Name,
|
||||
Size: 0,
|
||||
CreatedUTC: folder.CreatedUTC,
|
||||
IsFolder: true,
|
||||
})
|
||||
}
|
||||
|
||||
for _, file := range resp.Files {
|
||||
size, _ := strconv.ParseInt(file.Size, 10, 64)
|
||||
files = append(files, File{
|
||||
ID: file.QuickKey,
|
||||
Name: file.Filename,
|
||||
Size: size,
|
||||
CreatedUTC: file.CreatedUTC,
|
||||
IsFolder: false,
|
||||
})
|
||||
}
|
||||
|
||||
hasMore = resp.MoreChunks
|
||||
chunkNumber++
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFolderContent(ctx context.Context, folderKey string, chunkNumber int) (*FolderContentResponse, error) {
|
||||
|
||||
foldersResp, err := d.getFolderContentByType(ctx, folderKey, "folders", chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filesResp, err := d.getFolderContentByType(ctx, folderKey, "files", chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FolderContentResponse{
|
||||
Folders: foldersResp.Response.FolderContent.Folders,
|
||||
Files: filesResp.Response.FolderContent.Files,
|
||||
MoreChunks: foldersResp.Response.FolderContent.MoreChunks == "yes" || filesResp.Response.FolderContent.MoreChunks == "yes",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFolderContentByType(_ context.Context, folderKey, contentType string, chunkNumber int) (*MediafireResponse, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": folderKey,
|
||||
"content_type": contentType,
|
||||
"chunk": strconv.Itoa(chunkNumber),
|
||||
"chunk_size": strconv.FormatInt(d.ChunkSize, 10),
|
||||
"details": "yes",
|
||||
"order_direction": d.OrderDirection,
|
||||
"order_by": d.OrderBy,
|
||||
"filter": "",
|
||||
}
|
||||
|
||||
var resp MediafireResponse
|
||||
_, err := d.postForm("/folder/get_content.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) fileToObj(f File) *model.ObjThumb {
|
||||
created, _ := time.Parse("2006-01-02T15:04:05Z", f.CreatedUTC)
|
||||
|
||||
var thumbnailURL string
|
||||
if !f.IsFolder && f.ID != "" {
|
||||
thumbnailURL = d.hostBase + "/convkey/acaa/" + f.ID + "3g.jpg"
|
||||
}
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: f.ID,
|
||||
//Path: "",
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Modified: created,
|
||||
Ctime: created,
|
||||
IsFolder: f.IsFolder,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumbnailURL,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Mediafire) getForm(endpoint string, query map[string]string, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
|
||||
req.SetQueryParams(query)
|
||||
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": d.Cookie,
|
||||
//"User-Agent": base.UserAgent,
|
||||
"User-Agent": d.userAgent,
|
||||
"Origin": d.appBase,
|
||||
"Referer": d.appBase + "/",
|
||||
})
|
||||
|
||||
// If response OK
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
|
||||
// Targets MediaFire API
|
||||
res, err := req.Get(d.apiBase + endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) postForm(endpoint string, data map[string]string, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
|
||||
req.SetFormData(data)
|
||||
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": d.Cookie,
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
//"User-Agent": base.UserAgent,
|
||||
"User-Agent": d.userAgent,
|
||||
"Origin": d.appBase,
|
||||
"Referer": d.appBase + "/",
|
||||
})
|
||||
|
||||
// If response OK
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
|
||||
// Targets MediaFire API
|
||||
res, err := req.Post(d.apiBase + endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getDirectDownloadLink(_ context.Context, fileID string) (string, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"quick_key": fileID,
|
||||
"link_type": "direct_download",
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireDirectDownloadResponse
|
||||
_, err := d.getForm("/file/get_links.php", data, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return "", fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
if len(resp.Response.Links) == 0 {
|
||||
return "", fmt.Errorf("no download links found")
|
||||
}
|
||||
|
||||
return resp.Response.Links[0].DirectDownload, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) calculateSHA256(file *os.File) (string, error) {
|
||||
hasher := sha256.New()
|
||||
if _, err := file.Seek(0, 0); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := io.Copy(hasher, file); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.EncodeToString(hasher.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) uploadCheck(ctx context.Context, filename string, filesize int64, filehash, folderKey string) (*MediafireCheckResponse, error) {
|
||||
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get action token: %w", err)
|
||||
}
|
||||
|
||||
query := map[string]string{
|
||||
"session_token": actionToken, /* d.SessionToken */
|
||||
"filename": filename,
|
||||
"size": strconv.FormatInt(filesize, 10),
|
||||
"hash": filehash,
|
||||
"folder_key": folderKey,
|
||||
"resumable": "yes",
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireCheckResponse
|
||||
_, err = d.postForm("/upload/check.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//fmt.Printf("uploadCheck :: Raw response: %s\n", string(body))
|
||||
//fmt.Printf("uploadCheck :: Parsed response: %+v\n", resp)
|
||||
|
||||
//fmt.Printf("uploadCheck :: ResumableUpload section: %+v\n", resp.Response.ResumableUpload)
|
||||
//fmt.Printf("uploadCheck :: Upload key specifically: '%s'\n", resp.Response.ResumableUpload.UploadKey)
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire upload check failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) resumableUpload(ctx context.Context, folderKey, uploadKey string, unitData []byte, unitID int, fileHash, filename string, totalFileSize int64) (string, error) {
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
url := d.apiBase + "/upload/resumable.php"
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(unitData))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
q := req.URL.Query()
|
||||
q.Add("folder_key", folderKey)
|
||||
q.Add("response_format", "json")
|
||||
q.Add("session_token", actionToken)
|
||||
q.Add("key", uploadKey)
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
req.Header.Set("x-filehash", fileHash)
|
||||
req.Header.Set("x-filesize", strconv.FormatInt(totalFileSize, 10))
|
||||
req.Header.Set("x-unit-id", strconv.Itoa(unitID))
|
||||
req.Header.Set("x-unit-size", strconv.FormatInt(int64(len(unitData)), 10))
|
||||
req.Header.Set("x-unit-hash", d.sha256Hex(bytes.NewReader(unitData)))
|
||||
req.Header.Set("x-filename", filename)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.ContentLength = int64(len(unitData))
|
||||
|
||||
/* fmt.Printf("Debug resumable upload request:\n")
|
||||
fmt.Printf(" URL: %s\n", req.URL.String())
|
||||
fmt.Printf(" Headers: %+v\n", req.Header)
|
||||
fmt.Printf(" Unit ID: %d\n", unitID)
|
||||
fmt.Printf(" Unit Size: %d\n", len(unitData))
|
||||
fmt.Printf(" Upload Key: %s\n", uploadKey)
|
||||
fmt.Printf(" Action Token: %s\n", actionToken) */
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read response body: %v", err)
|
||||
}
|
||||
|
||||
//fmt.Printf("MediaFire resumable upload response (status %d): %s\n", res.StatusCode, string(body))
|
||||
|
||||
var uploadResp struct {
|
||||
Response struct {
|
||||
Doupload struct {
|
||||
Key string `json:"key"`
|
||||
} `json:"doupload"`
|
||||
Result string `json:"result"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &uploadResp); err != nil {
|
||||
return "", fmt.Errorf("failed to parse response: %v", err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return "", fmt.Errorf("resumable upload failed with status %d", res.StatusCode)
|
||||
}
|
||||
|
||||
return uploadResp.Response.Doupload.Key, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) uploadUnits(ctx context.Context, file *os.File, checkResp *MediafireCheckResponse, filename, fileHash, folderKey string, up driver.UpdateProgress) (string, error) {
|
||||
unitSize, _ := strconv.ParseInt(checkResp.Response.ResumableUpload.UnitSize, 10, 64)
|
||||
numUnits, _ := strconv.Atoi(checkResp.Response.ResumableUpload.NumberOfUnits)
|
||||
uploadKey := checkResp.Response.ResumableUpload.UploadKey
|
||||
|
||||
stringWords := checkResp.Response.ResumableUpload.Bitmap.Words
|
||||
intWords := make([]int, len(stringWords))
|
||||
for i, word := range stringWords {
|
||||
intWords[i], _ = strconv.Atoi(word)
|
||||
}
|
||||
|
||||
var finalUploadKey string
|
||||
|
||||
for unitID := 0; unitID < numUnits; unitID++ {
|
||||
|
||||
if utils.IsCanceled(ctx) {
|
||||
return "", ctx.Err()
|
||||
}
|
||||
|
||||
if d.isUnitUploaded(intWords, unitID) {
|
||||
up(float64(unitID+1) * 100 / float64(numUnits))
|
||||
continue
|
||||
}
|
||||
|
||||
uploadKey, err := d.uploadSingleUnit(ctx, file, unitID, unitSize, fileHash, filename, uploadKey, folderKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
finalUploadKey = uploadKey
|
||||
|
||||
up(float64(unitID+1) * 100 / float64(numUnits))
|
||||
}
|
||||
|
||||
return finalUploadKey, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) uploadSingleUnit(ctx context.Context, file *os.File, unitID int, unitSize int64, fileHash, filename, uploadKey, folderKey string) (string, error) {
|
||||
start := int64(unitID) * unitSize
|
||||
size := unitSize
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
fileSize := stat.Size()
|
||||
|
||||
if start+size > fileSize {
|
||||
size = fileSize - start
|
||||
}
|
||||
|
||||
unitData := make([]byte, size)
|
||||
if _, err := file.ReadAt(unitData, start); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return d.resumableUpload(ctx, folderKey, uploadKey, unitData, unitID, fileHash, filename, fileSize)
|
||||
}
|
||||
|
||||
func (d *Mediafire) getActionToken(_ context.Context) (string, error) {
|
||||
|
||||
if d.actionToken != "" {
|
||||
return d.actionToken, nil
|
||||
}
|
||||
|
||||
data := map[string]string{
|
||||
"type": "upload",
|
||||
"lifespan": "1440",
|
||||
"response_format": "json",
|
||||
"session_token": d.SessionToken,
|
||||
}
|
||||
|
||||
var resp MediafireActionTokenResponse
|
||||
_, err := d.postForm("/user/get_action_token.php", data, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return "", fmt.Errorf("MediaFire action token failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return resp.Response.ActionToken, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) pollUpload(ctx context.Context, key string) (*MediafirePollResponse, error) {
|
||||
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get action token: %w", err)
|
||||
}
|
||||
|
||||
//fmt.Printf("Debug Key: %+v\n", key)
|
||||
|
||||
query := map[string]string{
|
||||
"key": key,
|
||||
"response_format": "json",
|
||||
"session_token": actionToken, /* d.SessionToken */
|
||||
}
|
||||
|
||||
var resp MediafirePollResponse
|
||||
_, err = d.postForm("/upload/poll_upload.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//fmt.Printf("pollUpload :: Raw response: %s\n", string(body))
|
||||
//fmt.Printf("pollUpload :: Parsed response: %+v\n", resp)
|
||||
|
||||
//fmt.Printf("pollUpload :: Debug Result: %+v\n", resp.Response.Result)
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire poll upload failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) sha256Hex(r io.Reader) string {
|
||||
h := sha256.New()
|
||||
io.Copy(h, r)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func (d *Mediafire) isUnitUploaded(words []int, unitID int) bool {
|
||||
wordIndex := unitID / 16
|
||||
bitIndex := unitID % 16
|
||||
if wordIndex >= len(words) {
|
||||
return false
|
||||
}
|
||||
return (words[wordIndex]>>bitIndex)&1 == 1
|
||||
}
|
||||
|
||||
func (d *Mediafire) getExistingFileInfo(ctx context.Context, fileHash, filename, folderKey string) (*model.ObjThumb, error) {
|
||||
|
||||
if fileInfo, err := d.getFileByHash(ctx, fileHash); err == nil && fileInfo != nil {
|
||||
return fileInfo, nil
|
||||
}
|
||||
|
||||
files, err := d.getFiles(ctx, folderKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if file.Name == filename && !file.IsFolder {
|
||||
return d.fileToObj(file), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("existing file not found")
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFileByHash(_ context.Context, hash string) (*model.ObjThumb, error) {
|
||||
query := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"hash": hash,
|
||||
}
|
||||
|
||||
var resp MediafireFileSearchResponse
|
||||
_, err := d.postForm("/file/get_info.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire file search failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
if len(resp.Response.FileInfo) == 0 {
|
||||
return nil, fmt.Errorf("file not found by hash")
|
||||
}
|
||||
|
||||
file := resp.Response.FileInfo[0]
|
||||
return d.fileToObj(file), nil
|
||||
}
|
||||
@@ -9,8 +9,9 @@ type Addition struct {
|
||||
AccessToken string `json:"access_token" required:"true"`
|
||||
ProjectID string `json:"project_id"`
|
||||
driver.RootID
|
||||
OrderBy string `json:"order_by" type:"select" options:"updated_at,title,size" default:"title"`
|
||||
OrderDesc bool `json:"order_desc"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"updated_at,title,size" default:"title"`
|
||||
OrderDesc bool `json:"order_desc"`
|
||||
DeviceFingerprint string `json:"device_fingerprint" required:"true"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -17,6 +17,9 @@ import (
|
||||
func (d *MediaTrack) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||
if d.DeviceFingerprint != "" {
|
||||
req.SetHeader("X-Device-Fingerprint", d.DeviceFingerprint)
|
||||
}
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
|
||||
189
drivers/pcloud/driver.go
Normal file
189
drivers/pcloud/driver.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package pcloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type PCloud struct {
|
||||
model.Storage
|
||||
Addition
|
||||
AccessToken string // Actual access token obtained from refresh token
|
||||
}
|
||||
|
||||
func (d *PCloud) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *PCloud) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *PCloud) Init(ctx context.Context) error {
|
||||
// Map hostname selection to actual API endpoints
|
||||
if d.Hostname == "us" {
|
||||
d.Hostname = "api.pcloud.com"
|
||||
} else if d.Hostname == "eu" {
|
||||
d.Hostname = "eapi.pcloud.com"
|
||||
}
|
||||
|
||||
// Set default root folder ID if not provided
|
||||
if d.RootFolderID == "" {
|
||||
d.RootFolderID = "d0"
|
||||
}
|
||||
|
||||
// Use the access token directly (like rclone)
|
||||
d.AccessToken = d.RefreshToken // RefreshToken field actually contains the access_token
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *PCloud) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *PCloud) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
folderID := d.RootFolderID
|
||||
if dir.GetID() != "" {
|
||||
folderID = dir.GetID()
|
||||
}
|
||||
|
||||
files, err := d.getFiles(folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return utils.SliceConvert(files, func(src FileObject) (model.Obj, error) {
|
||||
return fileToObj(src), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *PCloud) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
downloadURL, err := d.getDownloadLink(file.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: downloadURL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Mkdir implements driver.Mkdir
|
||||
func (d *PCloud) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
parentID := d.RootFolderID
|
||||
if parentDir.GetID() != "" {
|
||||
parentID = parentDir.GetID()
|
||||
}
|
||||
|
||||
return d.createFolder(parentID, dirName)
|
||||
}
|
||||
|
||||
// Move implements driver.Move
|
||||
func (d *PCloud) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// pCloud uses renamefile/renamefolder for both rename and move
|
||||
endpoint := "/renamefile"
|
||||
paramName := "fileid"
|
||||
|
||||
if srcObj.IsDir() {
|
||||
endpoint = "/renamefolder"
|
||||
paramName = "folderid"
|
||||
}
|
||||
|
||||
var resp ItemResult
|
||||
_, err := d.requestWithRetry(endpoint, "POST", func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
paramName: extractID(srcObj.GetID()),
|
||||
"tofolderid": extractID(dstDir.GetID()),
|
||||
"toname": srcObj.GetName(),
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rename implements driver.Rename
|
||||
func (d *PCloud) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
endpoint := "/renamefile"
|
||||
paramName := "fileid"
|
||||
|
||||
if srcObj.IsDir() {
|
||||
endpoint = "/renamefolder"
|
||||
paramName = "folderid"
|
||||
}
|
||||
|
||||
var resp ItemResult
|
||||
_, err := d.requestWithRetry(endpoint, "POST", func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
paramName: extractID(srcObj.GetID()),
|
||||
"toname": newName,
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy implements driver.Copy
|
||||
func (d *PCloud) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
endpoint := "/copyfile"
|
||||
paramName := "fileid"
|
||||
|
||||
if srcObj.IsDir() {
|
||||
endpoint = "/copyfolder"
|
||||
paramName = "folderid"
|
||||
}
|
||||
|
||||
var resp ItemResult
|
||||
_, err := d.requestWithRetry(endpoint, "POST", func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
paramName: extractID(srcObj.GetID()),
|
||||
"tofolderid": extractID(dstDir.GetID()),
|
||||
"toname": srcObj.GetName(),
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove implements driver.Remove
|
||||
func (d *PCloud) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return d.delete(obj.GetID(), obj.IsDir())
|
||||
}
|
||||
|
||||
// Put implements driver.Put
|
||||
func (d *PCloud) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
parentID := d.RootFolderID
|
||||
if dstDir.GetID() != "" {
|
||||
parentID = dstDir.GetID()
|
||||
}
|
||||
|
||||
return d.uploadFile(ctx, stream, parentID, stream.GetName(), stream.GetSize())
|
||||
}
|
||||
30
drivers/pcloud/meta.go
Normal file
30
drivers/pcloud/meta.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package pcloud
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Using json tag "access_token" for UI display, but internally it's a refresh token
|
||||
RefreshToken string `json:"access_token" required:"true" help:"OAuth token from pCloud authorization"`
|
||||
Hostname string `json:"hostname" type:"select" options:"us,eu" default:"us" help:"Select pCloud server region"`
|
||||
RootFolderID string `json:"root_folder_id" help:"Get folder ID from URL like https://my.pcloud.com/#/filemanager?folder=12345678901 (leave empty for root folder)"`
|
||||
ClientID string `json:"client_id" help:"Custom OAuth client ID (optional)"`
|
||||
ClientSecret string `json:"client_secret" help:"Custom OAuth client secret (optional)"`
|
||||
}
|
||||
|
||||
// Implement IRootId interface
|
||||
func (a Addition) GetRootId() string {
|
||||
return a.RootFolderID
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "pCloud",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &PCloud{}
|
||||
})
|
||||
}
|
||||
91
drivers/pcloud/types.go
Normal file
91
drivers/pcloud/types.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package pcloud
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
// ErrorResult represents a pCloud API error response
|
||||
type ErrorResult struct {
|
||||
Result int `json:"result"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// TokenResponse represents OAuth token response
|
||||
type TokenResponse struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
}
|
||||
|
||||
// ItemResult represents a common pCloud API response
|
||||
type ItemResult struct {
|
||||
Result int `json:"result"`
|
||||
Metadata *FolderMeta `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// FolderMeta contains folder metadata including contents
|
||||
type FolderMeta struct {
|
||||
Contents []FileObject `json:"contents,omitempty"`
|
||||
}
|
||||
|
||||
// DownloadLinkResult represents download link response
|
||||
type DownloadLinkResult struct {
|
||||
Result int `json:"result"`
|
||||
Hosts []string `json:"hosts"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// FileObject represents a file or folder object in pCloud
|
||||
type FileObject struct {
|
||||
Name string `json:"name"`
|
||||
Created string `json:"created"` // pCloud returns RFC1123 format string
|
||||
Modified string `json:"modified"` // pCloud returns RFC1123 format string
|
||||
IsFolder bool `json:"isfolder"`
|
||||
FolderID uint64 `json:"folderid,omitempty"`
|
||||
FileID uint64 `json:"fileid,omitempty"`
|
||||
Size uint64 `json:"size"`
|
||||
ParentID uint64 `json:"parentfolderid"`
|
||||
Icon string `json:"icon,omitempty"`
|
||||
Hash uint64 `json:"hash,omitempty"`
|
||||
Category int `json:"category,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
// Convert FileObject to model.Obj
|
||||
func fileToObj(f FileObject) model.Obj {
|
||||
// Parse RFC1123 format time from pCloud
|
||||
modTime, _ := time.Parse(time.RFC1123, f.Modified)
|
||||
|
||||
obj := model.Object{
|
||||
Name: f.Name,
|
||||
Size: int64(f.Size),
|
||||
Modified: modTime,
|
||||
IsFolder: f.IsFolder,
|
||||
}
|
||||
|
||||
if f.IsFolder {
|
||||
obj.ID = "d" + strconv.FormatUint(f.FolderID, 10)
|
||||
} else {
|
||||
obj.ID = "f" + strconv.FormatUint(f.FileID, 10)
|
||||
}
|
||||
|
||||
return &obj
|
||||
}
|
||||
|
||||
// Extract numeric ID from string ID (remove 'd' or 'f' prefix)
|
||||
func extractID(id string) string {
|
||||
if len(id) > 1 && (id[0] == 'd' || id[0] == 'f') {
|
||||
return id[1:]
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Get folder ID from path, return "0" for root
|
||||
func getFolderID(path string) string {
|
||||
if path == "/" || path == "" {
|
||||
return "0"
|
||||
}
|
||||
return extractID(path)
|
||||
}
|
||||
297
drivers/pcloud/util.go
Normal file
297
drivers/pcloud/util.go
Normal file
@@ -0,0 +1,297 @@
|
||||
package pcloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultClientID = "DnONSzyJXpm"
|
||||
defaultClientSecret = "VKEnd3ze4jsKFGg8TJiznwFG8"
|
||||
)
|
||||
|
||||
// Get API base URL
|
||||
func (d *PCloud) getAPIURL() string {
|
||||
return "https://" + d.Hostname
|
||||
}
|
||||
|
||||
// Get OAuth client credentials
|
||||
func (d *PCloud) getClientCredentials() (string, string) {
|
||||
clientID := d.ClientID
|
||||
clientSecret := d.ClientSecret
|
||||
|
||||
if clientID == "" {
|
||||
clientID = defaultClientID
|
||||
}
|
||||
if clientSecret == "" {
|
||||
clientSecret = defaultClientSecret
|
||||
}
|
||||
|
||||
return clientID, clientSecret
|
||||
}
|
||||
|
||||
// Refresh OAuth access token
|
||||
func (d *PCloud) refreshToken() error {
|
||||
clientID, clientSecret := d.getClientCredentials()
|
||||
|
||||
var resp TokenResponse
|
||||
_, err := base.RestyClient.R().
|
||||
SetFormData(map[string]string{
|
||||
"client_id": clientID,
|
||||
"client_secret": clientSecret,
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": d.RefreshToken,
|
||||
}).
|
||||
SetResult(&resp).
|
||||
Post(d.getAPIURL() + "/oauth2_token")
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.AccessToken = resp.AccessToken
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldRetry determines if an error should be retried based on pCloud-specific logic
|
||||
func (d *PCloud) shouldRetry(statusCode int, apiError *ErrorResult) bool {
|
||||
// HTTP-level retry conditions
|
||||
if statusCode == 429 || statusCode >= 500 {
|
||||
return true
|
||||
}
|
||||
|
||||
// pCloud API-specific retry conditions (like rclone)
|
||||
if apiError != nil && apiError.Result != 0 {
|
||||
// 4xxx: rate limiting
|
||||
if apiError.Result/1000 == 4 {
|
||||
return true
|
||||
}
|
||||
// 5xxx: internal errors
|
||||
if apiError.Result/1000 == 5 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// requestWithRetry makes authenticated API request with retry logic
|
||||
func (d *PCloud) requestWithRetry(endpoint string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
maxRetries := 3
|
||||
baseDelay := 500 * time.Millisecond
|
||||
|
||||
for attempt := 0; attempt <= maxRetries; attempt++ {
|
||||
body, err := d.request(endpoint, method, callback, resp)
|
||||
if err == nil {
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// If this is the last attempt, return the error
|
||||
if attempt == maxRetries {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if we should retry based on error type
|
||||
if !d.shouldRetryError(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Exponential backoff
|
||||
delay := baseDelay * time.Duration(1<<attempt)
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("max retries exceeded")
|
||||
}
|
||||
|
||||
// shouldRetryError checks if an error should trigger a retry
|
||||
func (d *PCloud) shouldRetryError(err error) bool {
|
||||
// For now, we'll retry on any error
|
||||
// In production, you'd want more specific error handling
|
||||
return true
|
||||
}
|
||||
|
||||
// Make authenticated API request
|
||||
func (d *PCloud) request(endpoint string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
|
||||
// Add access token as query parameter (pCloud doesn't use Bearer auth)
|
||||
req.SetQueryParam("access_token", d.AccessToken)
|
||||
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
|
||||
var res *resty.Response
|
||||
var err error
|
||||
|
||||
switch method {
|
||||
case http.MethodGet:
|
||||
res, err = req.Get(d.getAPIURL() + endpoint)
|
||||
case http.MethodPost:
|
||||
res, err = req.Post(d.getAPIURL() + endpoint)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported method: %s", method)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check for API errors with pCloud-specific logic
|
||||
if res.StatusCode() != 200 {
|
||||
var errResp ErrorResult
|
||||
if err := utils.Json.Unmarshal(res.Body(), &errResp); err == nil {
|
||||
// Check if this error should trigger a retry
|
||||
if d.shouldRetry(res.StatusCode(), &errResp) {
|
||||
return nil, fmt.Errorf("pCloud API error (retryable): %s (result: %d)", errResp.Error, errResp.Result)
|
||||
}
|
||||
return nil, fmt.Errorf("pCloud API error: %s (result: %d)", errResp.Error, errResp.Result)
|
||||
}
|
||||
return nil, fmt.Errorf("HTTP error: %d", res.StatusCode())
|
||||
}
|
||||
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
// List files in a folder
|
||||
func (d *PCloud) getFiles(folderID string) ([]FileObject, error) {
|
||||
var resp ItemResult
|
||||
_, err := d.requestWithRetry("/listfolder", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParam("folderid", extractID(folderID))
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return nil, fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
if resp.Metadata == nil {
|
||||
return []FileObject{}, nil
|
||||
}
|
||||
|
||||
return resp.Metadata.Contents, nil
|
||||
}
|
||||
|
||||
// Get download link for a file
|
||||
func (d *PCloud) getDownloadLink(fileID string) (string, error) {
|
||||
var resp DownloadLinkResult
|
||||
_, err := d.requestWithRetry("/getfilelink", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParam("fileid", extractID(fileID))
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return "", fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
if len(resp.Hosts) == 0 {
|
||||
return "", fmt.Errorf("no download hosts available")
|
||||
}
|
||||
|
||||
return "https://" + resp.Hosts[0] + resp.Path, nil
|
||||
}
|
||||
|
||||
// Create a folder
|
||||
func (d *PCloud) createFolder(parentID, name string) error {
|
||||
var resp ItemResult
|
||||
_, err := d.requestWithRetry("/createfolder", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"folderid": extractID(parentID),
|
||||
"name": name,
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete a file or folder
|
||||
func (d *PCloud) delete(objID string, isFolder bool) error {
|
||||
endpoint := "/deletefile"
|
||||
paramName := "fileid"
|
||||
|
||||
if isFolder {
|
||||
endpoint = "/deletefolderrecursive"
|
||||
paramName = "folderid"
|
||||
}
|
||||
|
||||
var resp ItemResult
|
||||
_, err := d.requestWithRetry(endpoint, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
paramName: extractID(objID),
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upload a file using direct /uploadfile endpoint like rclone
|
||||
func (d *PCloud) uploadFile(ctx context.Context, file io.Reader, parentID, name string, size int64) error {
|
||||
// pCloud requires Content-Length, so we need to know the size
|
||||
if size <= 0 {
|
||||
return fmt.Errorf("file size must be provided for pCloud upload")
|
||||
}
|
||||
|
||||
// Upload directly to /uploadfile endpoint like rclone
|
||||
var resp ItemResult
|
||||
req := base.RestyClient.R().
|
||||
SetQueryParam("access_token", d.AccessToken).
|
||||
SetHeader("Content-Length", strconv.FormatInt(size, 10)).
|
||||
SetFileReader("content", name, file).
|
||||
SetFormData(map[string]string{
|
||||
"filename": name,
|
||||
"folderid": extractID(parentID),
|
||||
"nopartial": "1",
|
||||
})
|
||||
|
||||
// Use PUT method like rclone
|
||||
res, err := req.Put(d.getAPIURL() + "/uploadfile")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse response
|
||||
if err := utils.Json.Unmarshal(res.Body(), &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return fmt.Errorf("pCloud upload error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
418
drivers/proton_drive/driver.go
Normal file
418
drivers/proton_drive/driver.go
Normal file
@@ -0,0 +1,418 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
proton_api_bridge "github.com/henrybear327/Proton-API-Bridge"
|
||||
"github.com/henrybear327/Proton-API-Bridge/common"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
)
|
||||
|
||||
type ProtonDrive struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
protonDrive *proton_api_bridge.ProtonDrive
|
||||
credentials *common.ProtonDriveCredential
|
||||
|
||||
apiBase string
|
||||
appVersion string
|
||||
protonJson string
|
||||
userAgent string
|
||||
sdkVersion string
|
||||
webDriveAV string
|
||||
|
||||
tempServer *http.Server
|
||||
tempServerPort int
|
||||
downloadTokens map[string]*downloadInfo
|
||||
tokenMutex sync.RWMutex
|
||||
|
||||
c *proton.Client
|
||||
//m *proton.Manager
|
||||
|
||||
credentialCacheFile string
|
||||
|
||||
//userKR *crypto.KeyRing
|
||||
addrKRs map[string]*crypto.KeyRing
|
||||
addrData map[string]proton.Address
|
||||
|
||||
MainShare *proton.Share
|
||||
RootLink *proton.Link
|
||||
|
||||
DefaultAddrKR *crypto.KeyRing
|
||||
MainShareKR *crypto.KeyRing
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Init(ctx context.Context) error {
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Printf("ProtonDrive initialization panic: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
if d.Username == "" {
|
||||
return fmt.Errorf("username is required")
|
||||
}
|
||||
if d.Password == "" {
|
||||
return fmt.Errorf("password is required")
|
||||
}
|
||||
|
||||
//fmt.Printf("ProtonDrive Init: Username=%s, TwoFACode=%s", d.Username, d.TwoFACode)
|
||||
|
||||
if ctx == nil {
|
||||
return fmt.Errorf("context cannot be nil")
|
||||
}
|
||||
|
||||
cachedCredentials, err := d.loadCachedCredentials()
|
||||
useReusableLogin := false
|
||||
var reusableCredential *common.ReusableCredentialData
|
||||
|
||||
if err == nil && cachedCredentials != nil &&
|
||||
cachedCredentials.UID != "" && cachedCredentials.AccessToken != "" &&
|
||||
cachedCredentials.RefreshToken != "" && cachedCredentials.SaltedKeyPass != "" {
|
||||
useReusableLogin = true
|
||||
reusableCredential = cachedCredentials
|
||||
} else {
|
||||
useReusableLogin = false
|
||||
reusableCredential = &common.ReusableCredentialData{}
|
||||
}
|
||||
|
||||
config := &common.Config{
|
||||
AppVersion: d.appVersion,
|
||||
UserAgent: d.userAgent,
|
||||
FirstLoginCredential: &common.FirstLoginCredentialData{
|
||||
Username: d.Username,
|
||||
Password: d.Password,
|
||||
TwoFA: d.TwoFACode,
|
||||
},
|
||||
EnableCaching: true,
|
||||
ConcurrentBlockUploadCount: 5,
|
||||
ConcurrentFileCryptoCount: 2,
|
||||
UseReusableLogin: false,
|
||||
ReplaceExistingDraft: true,
|
||||
ReusableCredential: reusableCredential,
|
||||
CredentialCacheFile: d.credentialCacheFile,
|
||||
}
|
||||
|
||||
if config.FirstLoginCredential == nil {
|
||||
return fmt.Errorf("failed to create login credentials, FirstLoginCredential cannot be nil")
|
||||
}
|
||||
|
||||
//fmt.Printf("Calling NewProtonDrive...")
|
||||
|
||||
protonDrive, credentials, err := proton_api_bridge.NewProtonDrive(
|
||||
ctx,
|
||||
config,
|
||||
func(auth proton.Auth) {},
|
||||
func() {},
|
||||
)
|
||||
|
||||
if credentials == nil && !useReusableLogin {
|
||||
return fmt.Errorf("failed to get credentials from NewProtonDrive")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize ProtonDrive: %w", err)
|
||||
}
|
||||
|
||||
d.protonDrive = protonDrive
|
||||
|
||||
var finalCredentials *common.ProtonDriveCredential
|
||||
|
||||
if useReusableLogin {
|
||||
|
||||
// For reusable login, create credentials from cached data
|
||||
finalCredentials = &common.ProtonDriveCredential{
|
||||
UID: reusableCredential.UID,
|
||||
AccessToken: reusableCredential.AccessToken,
|
||||
RefreshToken: reusableCredential.RefreshToken,
|
||||
SaltedKeyPass: reusableCredential.SaltedKeyPass,
|
||||
}
|
||||
|
||||
d.credentials = finalCredentials
|
||||
} else {
|
||||
d.credentials = credentials
|
||||
}
|
||||
|
||||
clientOptions := []proton.Option{
|
||||
proton.WithAppVersion(d.appVersion),
|
||||
proton.WithUserAgent(d.userAgent),
|
||||
}
|
||||
manager := proton.New(clientOptions...)
|
||||
d.c = manager.NewClient(d.credentials.UID, d.credentials.AccessToken, d.credentials.RefreshToken)
|
||||
|
||||
saltedKeyPassBytes, err := base64.StdEncoding.DecodeString(d.credentials.SaltedKeyPass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode salted key pass: %w", err)
|
||||
}
|
||||
|
||||
_, addrKRs, addrs, _, err := getAccountKRs(ctx, d.c, nil, saltedKeyPassBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get account keyrings: %w", err)
|
||||
}
|
||||
|
||||
d.MainShare = protonDrive.MainShare
|
||||
d.RootLink = protonDrive.RootLink
|
||||
d.MainShareKR = protonDrive.MainShareKR
|
||||
d.DefaultAddrKR = protonDrive.DefaultAddrKR
|
||||
d.addrKRs = addrKRs
|
||||
d.addrData = addrs
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Drop(ctx context.Context) error {
|
||||
if d.tempServer != nil {
|
||||
d.tempServer.Shutdown(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
var linkID string
|
||||
|
||||
if dir.GetPath() == "/" {
|
||||
linkID = d.protonDrive.RootLink.LinkID
|
||||
} else {
|
||||
|
||||
link, err := d.searchByPath(ctx, dir.GetPath(), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
linkID = link.LinkID
|
||||
}
|
||||
|
||||
entries, err := d.protonDrive.ListDirectory(ctx, linkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list directory: %w", err)
|
||||
}
|
||||
|
||||
//fmt.Printf("Found %d entries for path %s\n", len(entries), dir.GetPath())
|
||||
//fmt.Printf("Found %d entries\n", len(entries))
|
||||
|
||||
if len(entries) == 0 {
|
||||
emptySlice := []model.Obj{}
|
||||
|
||||
//fmt.Printf("Returning empty slice (entries): %+v\n", emptySlice)
|
||||
|
||||
return emptySlice, nil
|
||||
}
|
||||
|
||||
var objects []model.Obj
|
||||
for _, entry := range entries {
|
||||
obj := &model.Object{
|
||||
Name: entry.Name,
|
||||
Size: entry.Link.Size,
|
||||
Modified: time.Unix(entry.Link.ModifyTime, 0),
|
||||
IsFolder: entry.IsFolder,
|
||||
}
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
link, err := d.searchByPath(ctx, file.GetPath(), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := d.ensureTempServer(); err != nil {
|
||||
return nil, fmt.Errorf("failed to start temp server: %w", err)
|
||||
}
|
||||
|
||||
token := d.generateDownloadToken(link.LinkID, file.GetName())
|
||||
|
||||
/* return &model.Link{
|
||||
URL: fmt.Sprintf("protondrive://download/%s", link.LinkID),
|
||||
}, nil */
|
||||
|
||||
return &model.Link{
|
||||
URL: fmt.Sprintf("http://localhost:%d/temp/%s", d.tempServerPort, token),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
var parentLinkID string
|
||||
|
||||
if parentDir.GetPath() == "/" {
|
||||
parentLinkID = d.protonDrive.RootLink.LinkID
|
||||
} else {
|
||||
link, err := d.searchByPath(ctx, parentDir.GetPath(), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentLinkID = link.LinkID
|
||||
}
|
||||
|
||||
_, err := d.protonDrive.CreateNewFolderByID(ctx, parentLinkID, dirName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
|
||||
newDir := &model.Object{
|
||||
Name: dirName,
|
||||
IsFolder: true,
|
||||
Modified: time.Now(),
|
||||
}
|
||||
return newDir, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return d.DirectMove(ctx, srcObj, dstDir)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
|
||||
if d.protonDrive == nil {
|
||||
return nil, fmt.Errorf("protonDrive bridge is nil")
|
||||
}
|
||||
|
||||
return d.DirectRename(ctx, srcObj, newName)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if srcObj.IsDir() {
|
||||
return nil, fmt.Errorf("directory copy not supported")
|
||||
}
|
||||
|
||||
srcLink, err := d.searchByPath(ctx, srcObj.GetPath(), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, linkSize, fileSystemAttrs, err := d.protonDrive.DownloadFile(ctx, srcLink, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to download source file: %w", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
actualSize := linkSize
|
||||
if fileSystemAttrs != nil && fileSystemAttrs.Size > 0 {
|
||||
actualSize = fileSystemAttrs.Size
|
||||
}
|
||||
|
||||
tempFile, err := utils.CreateTempFile(reader, actualSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp file: %w", err)
|
||||
}
|
||||
defer tempFile.Close()
|
||||
|
||||
updatedObj := &model.Object{
|
||||
Name: srcObj.GetName(),
|
||||
// Use the accurate and real size
|
||||
Size: actualSize,
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: false,
|
||||
}
|
||||
|
||||
return d.Put(ctx, dstDir, &fileStreamer{
|
||||
ReadCloser: tempFile,
|
||||
obj: updatedObj,
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Remove(ctx context.Context, obj model.Obj) error {
|
||||
link, err := d.searchByPath(ctx, obj.GetPath(), obj.IsDir())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if obj.IsDir() {
|
||||
return d.protonDrive.MoveFolderToTrashByID(ctx, link.LinkID, false)
|
||||
} else {
|
||||
return d.protonDrive.MoveFileToTrashByID(ctx, link.LinkID)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
var parentLinkID string
|
||||
|
||||
if dstDir.GetPath() == "/" {
|
||||
parentLinkID = d.protonDrive.RootLink.LinkID
|
||||
} else {
|
||||
link, err := d.searchByPath(ctx, dstDir.GetPath(), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentLinkID = link.LinkID
|
||||
}
|
||||
|
||||
tempFile, err := utils.CreateTempFile(file, file.GetSize())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp file: %w", err)
|
||||
}
|
||||
defer tempFile.Close()
|
||||
|
||||
err = d.uploadFile(ctx, parentLinkID, file.GetName(), tempFile, file.GetSize(), up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uploadedObj := &model.Object{
|
||||
Name: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
Modified: file.ModTime(),
|
||||
IsFolder: false,
|
||||
}
|
||||
return uploadedObj, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||
// return errs.NotImplement to use an internal archive tool
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*ProtonDrive)(nil)
|
||||
69
drivers/proton_drive/meta.go
Normal file
69
drivers/proton_drive/meta.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
//driver.RootID
|
||||
|
||||
Username string `json:"username" required:"true" type:"string"`
|
||||
Password string `json:"password" required:"true" type:"string"`
|
||||
TwoFACode string `json:"two_fa_code,omitempty" type:"string"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `json:"name"`
|
||||
LocalSort bool `json:"local_sort"`
|
||||
OnlyLocal bool `json:"only_local"`
|
||||
OnlyProxy bool `json:"only_proxy"`
|
||||
NoCache bool `json:"no_cache"`
|
||||
NoUpload bool `json:"no_upload"`
|
||||
NeedMs bool `json:"need_ms"`
|
||||
DefaultRoot string `json:"default_root"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "ProtonDrive",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &ProtonDrive{
|
||||
apiBase: "https://drive.proton.me/api",
|
||||
appVersion: "windows-drive@1.11.3+rclone+proton",
|
||||
credentialCacheFile: ".prtcrd",
|
||||
protonJson: "application/vnd.protonmail.v1+json",
|
||||
sdkVersion: "js@0.3.0",
|
||||
userAgent: "ProtonDrive/v1.70.0 (Windows NT 10.0.22000; Win64; x64)",
|
||||
webDriveAV: "web-drive@5.2.0+0f69f7a8",
|
||||
}
|
||||
})
|
||||
}
|
||||
124
drivers/proton_drive/types.go
Normal file
124
drivers/proton_drive/types.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
)
|
||||
|
||||
type ProtonFile struct {
|
||||
*proton.Link
|
||||
Name string
|
||||
IsFolder bool
|
||||
}
|
||||
|
||||
func (p *ProtonFile) GetName() string {
|
||||
return p.Name
|
||||
}
|
||||
|
||||
func (p *ProtonFile) GetSize() int64 {
|
||||
return p.Link.Size
|
||||
}
|
||||
|
||||
func (p *ProtonFile) GetPath() string {
|
||||
return p.Name
|
||||
}
|
||||
|
||||
func (p *ProtonFile) IsDir() bool {
|
||||
return p.IsFolder
|
||||
}
|
||||
|
||||
func (p *ProtonFile) ModTime() time.Time {
|
||||
return time.Unix(p.Link.ModifyTime, 0)
|
||||
}
|
||||
|
||||
func (p *ProtonFile) CreateTime() time.Time {
|
||||
return time.Unix(p.Link.CreateTime, 0)
|
||||
}
|
||||
|
||||
type downloadInfo struct {
|
||||
LinkID string
|
||||
FileName string
|
||||
}
|
||||
|
||||
type fileStreamer struct {
|
||||
io.ReadCloser
|
||||
obj model.Obj
|
||||
}
|
||||
|
||||
func (fs *fileStreamer) GetMimetype() string { return "" }
|
||||
func (fs *fileStreamer) NeedStore() bool { return false }
|
||||
func (fs *fileStreamer) IsForceStreamUpload() bool { return false }
|
||||
func (fs *fileStreamer) GetExist() model.Obj { return nil }
|
||||
func (fs *fileStreamer) SetExist(model.Obj) {}
|
||||
func (fs *fileStreamer) RangeRead(http_range.Range) (io.Reader, error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
func (fs *fileStreamer) CacheFullInTempFile() (model.File, error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
func (fs *fileStreamer) SetTmpFile(r *os.File) {}
|
||||
func (fs *fileStreamer) GetFile() model.File { return nil }
|
||||
func (fs *fileStreamer) GetName() string { return fs.obj.GetName() }
|
||||
func (fs *fileStreamer) GetSize() int64 { return fs.obj.GetSize() }
|
||||
func (fs *fileStreamer) GetPath() string { return fs.obj.GetPath() }
|
||||
func (fs *fileStreamer) IsDir() bool { return fs.obj.IsDir() }
|
||||
func (fs *fileStreamer) ModTime() time.Time { return fs.obj.ModTime() }
|
||||
func (fs *fileStreamer) CreateTime() time.Time { return fs.obj.ModTime() }
|
||||
func (fs *fileStreamer) GetHash() utils.HashInfo { return fs.obj.GetHash() }
|
||||
func (fs *fileStreamer) GetID() string { return fs.obj.GetID() }
|
||||
|
||||
type httpRange struct {
|
||||
start, end int64
|
||||
}
|
||||
|
||||
type MoveRequest struct {
|
||||
ParentLinkID string `json:"ParentLinkID"`
|
||||
NodePassphrase string `json:"NodePassphrase"`
|
||||
NodePassphraseSignature *string `json:"NodePassphraseSignature"`
|
||||
Name string `json:"Name"`
|
||||
NameSignatureEmail string `json:"NameSignatureEmail"`
|
||||
Hash string `json:"Hash"`
|
||||
OriginalHash string `json:"OriginalHash"`
|
||||
ContentHash *string `json:"ContentHash"` // Maybe null
|
||||
}
|
||||
|
||||
type progressReader struct {
|
||||
reader io.Reader
|
||||
total int64
|
||||
current int64
|
||||
callback driver.UpdateProgress
|
||||
}
|
||||
|
||||
type RenameRequest struct {
|
||||
Name string `json:"Name"` // PGP encrypted name
|
||||
NameSignatureEmail string `json:"NameSignatureEmail"` // User's signature email
|
||||
Hash string `json:"Hash"` // New name hash
|
||||
OriginalHash string `json:"OriginalHash"` // Current name hash
|
||||
}
|
||||
|
||||
type RenameResponse struct {
|
||||
Code int `json:"Code"`
|
||||
}
|
||||
918
drivers/proton_drive/util.go
Normal file
918
drivers/proton_drive/util.go
Normal file
@@ -0,0 +1,918 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/henrybear327/Proton-API-Bridge/common"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
)
|
||||
|
||||
func (d *ProtonDrive) loadCachedCredentials() (*common.ReusableCredentialData, error) {
|
||||
if d.credentialCacheFile == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if _, err := os.Stat(d.credentialCacheFile); os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(d.credentialCacheFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read credential cache file: %w", err)
|
||||
}
|
||||
|
||||
var credentials common.ReusableCredentialData
|
||||
if err := json.Unmarshal(data, &credentials); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse cached credentials: %w", err)
|
||||
}
|
||||
|
||||
if credentials.UID == "" || credentials.AccessToken == "" ||
|
||||
credentials.RefreshToken == "" || credentials.SaltedKeyPass == "" {
|
||||
return nil, fmt.Errorf("cached credentials are incomplete")
|
||||
}
|
||||
|
||||
return &credentials, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) searchByPath(ctx context.Context, fullPath string, isFolder bool) (*proton.Link, error) {
|
||||
if fullPath == "/" {
|
||||
return d.protonDrive.RootLink, nil
|
||||
}
|
||||
|
||||
cleanPath := strings.Trim(fullPath, "/")
|
||||
pathParts := strings.Split(cleanPath, "/")
|
||||
|
||||
currentLink := d.protonDrive.RootLink
|
||||
|
||||
for i, part := range pathParts {
|
||||
isLastPart := i == len(pathParts)-1
|
||||
searchForFolder := !isLastPart || isFolder
|
||||
|
||||
entries, err := d.protonDrive.ListDirectory(ctx, currentLink.LinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list directory: %w", err)
|
||||
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, entry := range entries {
|
||||
// entry.Name is already decrypted!
|
||||
if entry.Name == part && entry.IsFolder == searchForFolder {
|
||||
currentLink = entry.Link
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return nil, fmt.Errorf("path not found: %s (looking for part: %s)", fullPath, part)
|
||||
}
|
||||
}
|
||||
|
||||
return currentLink, nil
|
||||
}
|
||||
|
||||
func (pr *progressReader) Read(p []byte) (int, error) {
|
||||
n, err := pr.reader.Read(p)
|
||||
pr.current += int64(n)
|
||||
|
||||
if pr.callback != nil {
|
||||
percentage := float64(pr.current) / float64(pr.total) * 100
|
||||
pr.callback(percentage)
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) uploadFile(ctx context.Context, parentLinkID, fileName string, file *os.File, size int64, up driver.UpdateProgress) error {
|
||||
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get file info: %w", err)
|
||||
}
|
||||
|
||||
_, err = d.protonDrive.GetLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
reader := &progressReader{
|
||||
reader: bufio.NewReader(file),
|
||||
total: size,
|
||||
current: 0,
|
||||
callback: up,
|
||||
}
|
||||
|
||||
_, _, err = d.protonDrive.UploadFileByReader(ctx, parentLinkID, fileName, fileInfo.ModTime(), reader, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) ensureTempServer() error {
|
||||
if d.tempServer != nil {
|
||||
|
||||
// Already running
|
||||
return nil
|
||||
}
|
||||
|
||||
listener, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.tempServerPort = listener.Addr().(*net.TCPAddr).Port
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/temp/", d.handleTempDownload)
|
||||
|
||||
d.tempServer = &http.Server{
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
go func() {
|
||||
d.tempServer.Serve(listener)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) handleTempDownload(w http.ResponseWriter, r *http.Request) {
|
||||
token := strings.TrimPrefix(r.URL.Path, "/temp/")
|
||||
|
||||
d.tokenMutex.RLock()
|
||||
info, exists := d.downloadTokens[token]
|
||||
d.tokenMutex.RUnlock()
|
||||
|
||||
if !exists {
|
||||
http.Error(w, "Invalid or expired token", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
link, err := d.protonDrive.GetLink(r.Context(), info.LinkID)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to get file link", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Get file size for range calculations
|
||||
_, _, attrs, err := d.protonDrive.DownloadFile(r.Context(), link, 0)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to get file info", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
fileSize := attrs.Size
|
||||
|
||||
rangeHeader := r.Header.Get("Range")
|
||||
if rangeHeader != "" {
|
||||
|
||||
// Parse range header like "bytes=0-1023" or "bytes=1024-"
|
||||
ranges, err := parseRange(rangeHeader, fileSize)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid range", http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
}
|
||||
|
||||
if len(ranges) == 1 {
|
||||
|
||||
// Single range request, small
|
||||
start, end := ranges[0].start, ranges[0].end
|
||||
contentLength := end - start + 1
|
||||
|
||||
// Start download from offset
|
||||
reader, _, _, err := d.protonDrive.DownloadFile(r.Context(), link, start)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to start download", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, fileSize))
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", contentLength))
|
||||
w.Header().Set("Content-Type", mime.TypeByExtension(filepath.Ext(link.Name)))
|
||||
|
||||
// Partial content...
|
||||
// Setting fileName is more cosmetical here
|
||||
//.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", link.Name))
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", info.FileName))
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
|
||||
w.WriteHeader(http.StatusPartialContent)
|
||||
|
||||
io.CopyN(w, reader, contentLength)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Full file download (non-range request)
|
||||
reader, _, _, err := d.protonDrive.DownloadFile(r.Context(), link, 0)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to start download", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
// Set headers for full content
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", fileSize))
|
||||
w.Header().Set("Content-Type", mime.TypeByExtension(filepath.Ext(link.Name)))
|
||||
|
||||
// Setting fileName is needed since ProtonDrive fileName is more like a random string
|
||||
//w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", link.Name))
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", info.FileName))
|
||||
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
|
||||
// Stream the full file
|
||||
io.Copy(w, reader)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) generateDownloadToken(linkID, fileName string) string {
|
||||
token := fmt.Sprintf("%d_%s", time.Now().UnixNano(), linkID[:8])
|
||||
|
||||
d.tokenMutex.Lock()
|
||||
if d.downloadTokens == nil {
|
||||
d.downloadTokens = make(map[string]*downloadInfo)
|
||||
}
|
||||
|
||||
d.downloadTokens[token] = &downloadInfo{
|
||||
LinkID: linkID,
|
||||
FileName: fileName,
|
||||
}
|
||||
|
||||
d.tokenMutex.Unlock()
|
||||
|
||||
go func() {
|
||||
|
||||
// Token expires in 1 hour
|
||||
time.Sleep(1 * time.Hour)
|
||||
d.tokenMutex.Lock()
|
||||
|
||||
delete(d.downloadTokens, token)
|
||||
d.tokenMutex.Unlock()
|
||||
}()
|
||||
|
||||
return token
|
||||
}
|
||||
|
||||
func parseRange(rangeHeader string, size int64) ([]httpRange, error) {
|
||||
if !strings.HasPrefix(rangeHeader, "bytes=") {
|
||||
return nil, fmt.Errorf("invalid range header")
|
||||
}
|
||||
|
||||
rangeSpec := strings.TrimPrefix(rangeHeader, "bytes=")
|
||||
ranges := strings.Split(rangeSpec, ",")
|
||||
|
||||
var result []httpRange
|
||||
for _, r := range ranges {
|
||||
r = strings.TrimSpace(r)
|
||||
if strings.Contains(r, "-") {
|
||||
parts := strings.Split(r, "-")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid range format")
|
||||
}
|
||||
|
||||
var start, end int64
|
||||
var err error
|
||||
|
||||
if parts[0] == "" {
|
||||
|
||||
// Suffix range (e.g., "-500")
|
||||
if parts[1] == "" {
|
||||
return nil, fmt.Errorf("invalid range format")
|
||||
}
|
||||
end = size - 1
|
||||
start, err = strconv.ParseInt(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
start = size - start
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
} else if parts[1] == "" {
|
||||
|
||||
// Prefix range (e.g., "500-")
|
||||
start, err = strconv.ParseInt(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
end = size - 1
|
||||
} else {
|
||||
// Full range (e.g., "0-1023")
|
||||
start, err = strconv.ParseInt(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
end, err = strconv.ParseInt(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if start >= size || end >= size || start > end {
|
||||
return nil, fmt.Errorf("range out of bounds")
|
||||
}
|
||||
|
||||
result = append(result, httpRange{start: start, end: end})
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) encryptFileName(ctx context.Context, name string, parentLinkID string) (string, error) {
|
||||
|
||||
parentLink, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get parent node keyring
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Temporary file (request)
|
||||
tempReq := proton.CreateFileReq{
|
||||
SignatureAddress: d.MainShare.Creator,
|
||||
}
|
||||
|
||||
// Encrypt the filename
|
||||
err = tempReq.SetName(name, d.DefaultAddrKR, parentNodeKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to encrypt filename: %w", err)
|
||||
}
|
||||
|
||||
return tempReq.Name, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) generateFileNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
|
||||
|
||||
parentLink, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get parent node keyring
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent keyring: %w", err)
|
||||
}
|
||||
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
|
||||
}
|
||||
|
||||
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent hash key: %w", err)
|
||||
}
|
||||
|
||||
nameHash, err := proton.GetNameHash(name, parentHashKey)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate name hash: %w", err)
|
||||
}
|
||||
|
||||
return nameHash, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getOriginalNameHash(link *proton.Link) (string, error) {
|
||||
if link == nil {
|
||||
return "", fmt.Errorf("link cannot be nil")
|
||||
}
|
||||
|
||||
if link.Hash == "" {
|
||||
return "", fmt.Errorf("link hash is empty")
|
||||
}
|
||||
|
||||
return link.Hash, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getLink(ctx context.Context, linkID string) (*proton.Link, error) {
|
||||
if linkID == "" {
|
||||
return nil, fmt.Errorf("linkID cannot be empty")
|
||||
}
|
||||
|
||||
link, err := d.c.GetLink(ctx, d.MainShare.ShareID, linkID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &link, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getLinkKR(ctx context.Context, link *proton.Link) (*crypto.KeyRing, error) {
|
||||
if link == nil {
|
||||
return nil, fmt.Errorf("link cannot be nil")
|
||||
}
|
||||
|
||||
// Root Link or Root Dir
|
||||
if link.ParentLinkID == "" {
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return link.GetKeyRing(d.MainShareKR, signatureVerificationKR)
|
||||
}
|
||||
|
||||
// Get parent keyring recursively
|
||||
parentLink, err := d.getLink(ctx, link.ParentLinkID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return link.GetKeyRing(parentNodeKR, signatureVerificationKR)
|
||||
}
|
||||
|
||||
var (
|
||||
ErrKeyPassOrSaltedKeyPassMustBeNotNil = errors.New("either keyPass or saltedKeyPass must be not nil")
|
||||
ErrFailedToUnlockUserKeys = errors.New("failed to unlock user keys")
|
||||
)
|
||||
|
||||
func getAccountKRs(ctx context.Context, c *proton.Client, keyPass, saltedKeyPass []byte) (*crypto.KeyRing, map[string]*crypto.KeyRing, map[string]proton.Address, []byte, error) {
|
||||
|
||||
user, err := c.GetUser(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("user %#v", user)
|
||||
|
||||
addrsArr, err := c.GetAddresses(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("addr %#v", addr)
|
||||
|
||||
if saltedKeyPass == nil {
|
||||
if keyPass == nil {
|
||||
return nil, nil, nil, nil, ErrKeyPassOrSaltedKeyPassMustBeNotNil
|
||||
}
|
||||
|
||||
// Due to limitations, salts are stored using cacheCredentialToFile
|
||||
salts, err := c.GetSalts(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("salts %#v", salts)
|
||||
|
||||
saltedKeyPass, err = salts.SaltForKey(keyPass, user.Keys.Primary().ID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("saltedKeyPass ok")
|
||||
}
|
||||
|
||||
userKR, addrKRs, err := proton.Unlock(user, addrsArr, saltedKeyPass, nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
|
||||
} else if userKR.CountDecryptionEntities() == 0 {
|
||||
return nil, nil, nil, nil, ErrFailedToUnlockUserKeys
|
||||
}
|
||||
|
||||
addrs := make(map[string]proton.Address)
|
||||
for _, addr := range addrsArr {
|
||||
addrs[addr.Email] = addr
|
||||
}
|
||||
|
||||
return userKR, addrKRs, addrs, saltedKeyPass, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getSignatureVerificationKeyring(emailAddresses []string, verificationAddrKRs ...*crypto.KeyRing) (*crypto.KeyRing, error) {
|
||||
ret, err := crypto.NewKeyRing(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, emailAddress := range emailAddresses {
|
||||
if addr, ok := d.addrData[emailAddress]; ok {
|
||||
if addrKR, exists := d.addrKRs[addr.ID]; exists {
|
||||
err = d.addKeysFromKR(ret, addrKR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, kr := range verificationAddrKRs {
|
||||
err = d.addKeysFromKR(ret, kr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if ret.CountEntities() == 0 {
|
||||
return nil, fmt.Errorf("no keyring for signature verification")
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) addKeysFromKR(kr *crypto.KeyRing, newKRs ...*crypto.KeyRing) error {
|
||||
for i := range newKRs {
|
||||
for _, key := range newKRs[i].GetKeys() {
|
||||
err := kr.AddKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) DirectRename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
//fmt.Printf("DEBUG DirectRename: path=%s, newName=%s", srcObj.GetPath(), newName)
|
||||
|
||||
if d.MainShare == nil || d.DefaultAddrKR == nil {
|
||||
return nil, fmt.Errorf("missing required fields: MainShare=%v, DefaultAddrKR=%v",
|
||||
d.MainShare != nil, d.DefaultAddrKR != nil)
|
||||
}
|
||||
|
||||
if d.protonDrive == nil {
|
||||
return nil, fmt.Errorf("protonDrive bridge is nil")
|
||||
}
|
||||
|
||||
srcLink, err := d.searchByPath(ctx, srcObj.GetPath(), srcObj.IsDir())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find source: %w", err)
|
||||
}
|
||||
|
||||
parentLinkID := srcLink.ParentLinkID
|
||||
if parentLinkID == "" {
|
||||
return nil, fmt.Errorf("cannot rename root folder")
|
||||
}
|
||||
|
||||
encryptedName, err := d.encryptFileName(ctx, newName, parentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
|
||||
}
|
||||
|
||||
newHash, err := d.generateFileNameHash(ctx, newName, parentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate new hash: %w", err)
|
||||
}
|
||||
|
||||
originalHash, err := d.getOriginalNameHash(srcLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get original hash: %w", err)
|
||||
}
|
||||
|
||||
renameReq := RenameRequest{
|
||||
Name: encryptedName,
|
||||
NameSignatureEmail: d.MainShare.Creator,
|
||||
Hash: newHash,
|
||||
OriginalHash: originalHash,
|
||||
}
|
||||
|
||||
err = d.executeRenameAPI(ctx, srcLink.LinkID, renameReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename API call failed: %w", err)
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) executeRenameAPI(ctx context.Context, linkID string, req RenameRequest) error {
|
||||
|
||||
renameURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/rename",
|
||||
d.MainShare.VolumeID, linkID)
|
||||
|
||||
reqBody, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal rename request: %w", err)
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "PUT", renameURL, bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
httpReq.Header.Set("Accept", d.protonJson)
|
||||
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
|
||||
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
|
||||
httpReq.Header.Set("X-Pm-Uid", d.credentials.UID)
|
||||
httpReq.Header.Set("Authorization", "Bearer "+d.credentials.AccessToken)
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(httpReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute rename request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("rename failed with status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var renameResp RenameResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&renameResp); err != nil {
|
||||
return fmt.Errorf("failed to decode rename response: %w", err)
|
||||
}
|
||||
|
||||
if renameResp.Code != 1000 {
|
||||
return fmt.Errorf("rename failed with code %d", renameResp.Code)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) executeMoveAPI(ctx context.Context, linkID string, req MoveRequest) error {
|
||||
//fmt.Printf("DEBUG Move Request - Name: %s\n", req.Name)
|
||||
//fmt.Printf("DEBUG Move Request - Hash: %s\n", req.Hash)
|
||||
//fmt.Printf("DEBUG Move Request - OriginalHash: %s\n", req.OriginalHash)
|
||||
//fmt.Printf("DEBUG Move Request - ParentLinkID: %s\n", req.ParentLinkID)
|
||||
|
||||
//fmt.Printf("DEBUG Move Request - Name length: %d\n", len(req.Name))
|
||||
//fmt.Printf("DEBUG Move Request - NameSignatureEmail: %s\n", req.NameSignatureEmail)
|
||||
//fmt.Printf("DEBUG Move Request - ContentHash: %v\n", req.ContentHash)
|
||||
//fmt.Printf("DEBUG Move Request - NodePassphrase length: %d\n", len(req.NodePassphrase))
|
||||
//fmt.Printf("DEBUG Move Request - NodePassphraseSignature length: %d\n", len(req.NodePassphraseSignature))
|
||||
|
||||
//fmt.Printf("DEBUG Move Request - SrcLinkID: %s\n", linkID)
|
||||
//fmt.Printf("DEBUG Move Request - DstParentLinkID: %s\n", req.ParentLinkID)
|
||||
//fmt.Printf("DEBUG Move Request - ShareID: %s\n", d.MainShare.ShareID)
|
||||
|
||||
srcLink, _ := d.getLink(ctx, linkID)
|
||||
if srcLink != nil && srcLink.ParentLinkID == req.ParentLinkID {
|
||||
return fmt.Errorf("cannot move to same parent directory")
|
||||
}
|
||||
|
||||
moveURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/move",
|
||||
d.MainShare.VolumeID, linkID)
|
||||
|
||||
reqBody, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal move request: %w", err)
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "PUT", moveURL, bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("Authorization", "Bearer "+d.credentials.AccessToken)
|
||||
httpReq.Header.Set("Accept", d.protonJson)
|
||||
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
|
||||
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
|
||||
httpReq.Header.Set("X-Pm-Uid", d.credentials.UID)
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(httpReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute move request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var moveResp RenameResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&moveResp); err != nil {
|
||||
return fmt.Errorf("failed to decode move response: %w", err)
|
||||
}
|
||||
|
||||
if moveResp.Code != 1000 {
|
||||
return fmt.Errorf("move operation failed with code: %d", moveResp.Code)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) DirectMove(ctx context.Context, srcObj model.Obj, dstDir model.Obj) (model.Obj, error) {
|
||||
//fmt.Printf("DEBUG DirectMove: srcPath=%s, dstPath=%s", srcObj.GetPath(), dstDir.GetPath())
|
||||
|
||||
srcLink, err := d.searchByPath(ctx, srcObj.GetPath(), srcObj.IsDir())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find source: %w", err)
|
||||
}
|
||||
|
||||
var dstParentLinkID string
|
||||
if dstDir.GetPath() == "/" {
|
||||
dstParentLinkID = d.RootLink.LinkID
|
||||
} else {
|
||||
dstLink, err := d.searchByPath(ctx, dstDir.GetPath(), true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find destination: %w", err)
|
||||
}
|
||||
dstParentLinkID = dstLink.LinkID
|
||||
}
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
// Check if destination is a descendant of source
|
||||
if err := d.checkCircularMove(ctx, srcLink.LinkID, dstParentLinkID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Encrypt the filename for the new location
|
||||
encryptedName, err := d.encryptFileName(ctx, srcObj.GetName(), dstParentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
|
||||
}
|
||||
|
||||
newHash, err := d.generateNameHash(ctx, srcObj.GetName(), dstParentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate new hash: %w", err)
|
||||
}
|
||||
|
||||
originalHash, err := d.getOriginalNameHash(srcLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get original hash: %w", err)
|
||||
}
|
||||
|
||||
// Re-encrypt node passphrase for new parent context
|
||||
reencryptedPassphrase, err := d.reencryptNodePassphrase(ctx, srcLink, dstParentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to re-encrypt node passphrase: %w", err)
|
||||
}
|
||||
|
||||
moveReq := MoveRequest{
|
||||
ParentLinkID: dstParentLinkID,
|
||||
NodePassphrase: reencryptedPassphrase,
|
||||
Name: encryptedName,
|
||||
NameSignatureEmail: d.MainShare.Creator,
|
||||
Hash: newHash,
|
||||
OriginalHash: originalHash,
|
||||
ContentHash: nil,
|
||||
|
||||
// *** Causes rejection ***
|
||||
/* NodePassphraseSignature: srcLink.NodePassphraseSignature, */
|
||||
}
|
||||
|
||||
//fmt.Printf("DEBUG MoveRequest validation:\n")
|
||||
//fmt.Printf(" Name length: %d\n", len(moveReq.Name))
|
||||
//fmt.Printf(" Hash: %s\n", moveReq.Hash)
|
||||
//fmt.Printf(" OriginalHash: %s\n", moveReq.OriginalHash)
|
||||
//fmt.Printf(" NodePassphrase length: %d\n", len(moveReq.NodePassphrase))
|
||||
/* fmt.Printf(" NodePassphraseSignature length: %d\n", len(moveReq.NodePassphraseSignature)) */
|
||||
//fmt.Printf(" NameSignatureEmail: %s\n", moveReq.NameSignatureEmail)
|
||||
|
||||
err = d.executeMoveAPI(ctx, srcLink.LinkID, moveReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move API call failed: %w", err)
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) reencryptNodePassphrase(ctx context.Context, srcLink *proton.Link, dstParentLinkID string) (string, error) {
|
||||
// Get source parent link with metadata
|
||||
srcParentLink, err := d.getLink(ctx, srcLink.ParentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get source parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get source parent keyring using link object
|
||||
srcParentKR, err := d.getLinkKR(ctx, srcParentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get source parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Get destination parent link with metadata
|
||||
dstParentLink, err := d.getLink(ctx, dstParentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get destination parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get destination parent keyring using link object
|
||||
dstParentKR, err := d.getLinkKR(ctx, dstParentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get destination parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Re-encrypt the node passphrase from source parent context to destination parent context
|
||||
reencryptedPassphrase, err := reencryptKeyPacket(srcParentKR, dstParentKR, d.DefaultAddrKR, srcLink.NodePassphrase)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to re-encrypt key packet: %w", err)
|
||||
}
|
||||
|
||||
return reencryptedPassphrase, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) generateNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
|
||||
|
||||
parentLink, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get parent node keyring
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Get signature verification keyring
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
|
||||
}
|
||||
|
||||
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent hash key: %w", err)
|
||||
}
|
||||
|
||||
nameHash, err := proton.GetNameHash(name, parentHashKey)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate name hash: %w", err)
|
||||
}
|
||||
|
||||
return nameHash, nil
|
||||
}
|
||||
|
||||
func reencryptKeyPacket(srcKR, dstKR, _ *crypto.KeyRing, passphrase string) (string, error) { // addrKR (3)
|
||||
oldSplitMessage, err := crypto.NewPGPSplitMessageFromArmored(passphrase)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
sessionKey, err := srcKR.DecryptSessionKey(oldSplitMessage.KeyPacket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
newKeyPacket, err := dstKR.EncryptSessionKey(sessionKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
newSplitMessage := crypto.NewPGPSplitMessage(newKeyPacket, oldSplitMessage.DataPacket)
|
||||
|
||||
return newSplitMessage.GetArmored()
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) checkCircularMove(ctx context.Context, srcLinkID, dstParentLinkID string) error {
|
||||
currentLinkID := dstParentLinkID
|
||||
|
||||
for currentLinkID != "" && currentLinkID != d.RootLink.LinkID {
|
||||
if currentLinkID == srcLinkID {
|
||||
return fmt.Errorf("cannot move folder into itself or its subfolder")
|
||||
}
|
||||
|
||||
currentLink, err := d.getLink(ctx, currentLinkID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentLinkID = currentLink.ParentLinkID
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
@@ -32,6 +33,33 @@ type S3 struct {
|
||||
cron *cron.Cron
|
||||
}
|
||||
|
||||
var storageClassLookup = map[string]string{
|
||||
"standard": s3.ObjectStorageClassStandard,
|
||||
"reduced_redundancy": s3.ObjectStorageClassReducedRedundancy,
|
||||
"glacier": s3.ObjectStorageClassGlacier,
|
||||
"standard_ia": s3.ObjectStorageClassStandardIa,
|
||||
"onezone_ia": s3.ObjectStorageClassOnezoneIa,
|
||||
"intelligent_tiering": s3.ObjectStorageClassIntelligentTiering,
|
||||
"deep_archive": s3.ObjectStorageClassDeepArchive,
|
||||
"outposts": s3.ObjectStorageClassOutposts,
|
||||
"glacier_ir": s3.ObjectStorageClassGlacierIr,
|
||||
"snow": s3.ObjectStorageClassSnow,
|
||||
"express_onezone": s3.ObjectStorageClassExpressOnezone,
|
||||
}
|
||||
|
||||
func (d *S3) resolveStorageClass() *string {
|
||||
value := strings.TrimSpace(d.StorageClass)
|
||||
if value == "" {
|
||||
return nil
|
||||
}
|
||||
normalized := strings.ToLower(strings.ReplaceAll(value, "-", "_"))
|
||||
if v, ok := storageClassLookup[normalized]; ok {
|
||||
return aws.String(v)
|
||||
}
|
||||
log.Warnf("s3: unknown storage class %q, using raw value", d.StorageClass)
|
||||
return aws.String(value)
|
||||
}
|
||||
|
||||
func (d *S3) Config() driver.Config {
|
||||
return d.config
|
||||
}
|
||||
@@ -179,8 +207,14 @@ func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up
|
||||
}),
|
||||
ContentType: &contentType,
|
||||
}
|
||||
if storageClass := d.resolveStorageClass(); storageClass != nil {
|
||||
input.StorageClass = storageClass
|
||||
}
|
||||
_, err := uploader.UploadWithContext(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*S3)(nil)
|
||||
var (
|
||||
_ driver.Driver = (*S3)(nil)
|
||||
_ driver.Other = (*S3)(nil)
|
||||
)
|
||||
|
||||
@@ -21,6 +21,7 @@ type Addition struct {
|
||||
ListObjectVersion string `json:"list_object_version" type:"select" options:"v1,v2" default:"v1"`
|
||||
RemoveBucket bool `json:"remove_bucket" help:"Remove bucket name from path when using custom host."`
|
||||
AddFilenameToDisposition bool `json:"add_filename_to_disposition" help:"Add filename to Content-Disposition header."`
|
||||
StorageClass string `json:"storage_class" type:"select" options:",standard,standard_ia,onezone_ia,intelligent_tiering,glacier,glacier_ir,deep_archive,archive" help:"Storage class for new objects. AWS and Tencent COS support different subsets (COS uses ARCHIVE/DEEP_ARCHIVE)."`
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
286
drivers/s3/other.go
Normal file
286
drivers/s3/other.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
const (
|
||||
OtherMethodArchive = "archive"
|
||||
OtherMethodArchiveStatus = "archive_status"
|
||||
OtherMethodThaw = "thaw"
|
||||
OtherMethodThawStatus = "thaw_status"
|
||||
)
|
||||
|
||||
type ArchiveRequest struct {
|
||||
StorageClass string `json:"storage_class"`
|
||||
}
|
||||
|
||||
type ThawRequest struct {
|
||||
Days int64 `json:"days"`
|
||||
Tier string `json:"tier"`
|
||||
}
|
||||
|
||||
type ObjectDescriptor struct {
|
||||
Path string `json:"path"`
|
||||
Bucket string `json:"bucket"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
type ArchiveResponse struct {
|
||||
Action string `json:"action"`
|
||||
Object ObjectDescriptor `json:"object"`
|
||||
StorageClass string `json:"storage_class"`
|
||||
RequestID string `json:"request_id,omitempty"`
|
||||
VersionID string `json:"version_id,omitempty"`
|
||||
ETag string `json:"etag,omitempty"`
|
||||
LastModified string `json:"last_modified,omitempty"`
|
||||
}
|
||||
|
||||
type ThawResponse struct {
|
||||
Action string `json:"action"`
|
||||
Object ObjectDescriptor `json:"object"`
|
||||
RequestID string `json:"request_id,omitempty"`
|
||||
Status *RestoreStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
type RestoreStatus struct {
|
||||
Ongoing bool `json:"ongoing"`
|
||||
Expiry string `json:"expiry,omitempty"`
|
||||
Raw string `json:"raw"`
|
||||
}
|
||||
|
||||
func (d *S3) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
if args.Obj == nil {
|
||||
return nil, fmt.Errorf("missing object reference")
|
||||
}
|
||||
if args.Obj.IsDir() {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
switch strings.ToLower(strings.TrimSpace(args.Method)) {
|
||||
case "archive":
|
||||
return d.archive(ctx, args)
|
||||
case "archive_status":
|
||||
return d.archiveStatus(ctx, args)
|
||||
case "thaw":
|
||||
return d.thaw(ctx, args)
|
||||
case "thaw_status":
|
||||
return d.thawStatus(ctx, args)
|
||||
default:
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
}
|
||||
|
||||
func (d *S3) archive(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
key := getKey(args.Obj.GetPath(), false)
|
||||
payload := ArchiveRequest{}
|
||||
if err := DecodeOtherArgs(args.Data, &payload); err != nil {
|
||||
return nil, fmt.Errorf("parse archive request: %w", err)
|
||||
}
|
||||
if payload.StorageClass == "" {
|
||||
return nil, fmt.Errorf("storage_class is required")
|
||||
}
|
||||
storageClass := NormalizeStorageClass(payload.StorageClass)
|
||||
input := &s3.CopyObjectInput{
|
||||
Bucket: &d.Bucket,
|
||||
Key: &key,
|
||||
CopySource: aws.String(url.PathEscape(d.Bucket + "/" + key)),
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
StorageClass: aws.String(storageClass),
|
||||
}
|
||||
copyReq, output := d.client.CopyObjectRequest(input)
|
||||
copyReq.SetContext(ctx)
|
||||
if err := copyReq.Send(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := ArchiveResponse{
|
||||
Action: "archive",
|
||||
Object: d.describeObject(args.Obj, key),
|
||||
StorageClass: storageClass,
|
||||
RequestID: copyReq.RequestID,
|
||||
}
|
||||
if output.VersionId != nil {
|
||||
resp.VersionID = aws.StringValue(output.VersionId)
|
||||
}
|
||||
if result := output.CopyObjectResult; result != nil {
|
||||
resp.ETag = aws.StringValue(result.ETag)
|
||||
if result.LastModified != nil {
|
||||
resp.LastModified = result.LastModified.UTC().Format(time.RFC3339)
|
||||
}
|
||||
}
|
||||
if status, err := d.describeObjectStatus(ctx, key); err == nil {
|
||||
if status.StorageClass != "" {
|
||||
resp.StorageClass = status.StorageClass
|
||||
}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *S3) archiveStatus(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
key := getKey(args.Obj.GetPath(), false)
|
||||
status, err := d.describeObjectStatus(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ArchiveResponse{
|
||||
Action: "archive_status",
|
||||
Object: d.describeObject(args.Obj, key),
|
||||
StorageClass: status.StorageClass,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *S3) thaw(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
key := getKey(args.Obj.GetPath(), false)
|
||||
payload := ThawRequest{Days: 1}
|
||||
if err := DecodeOtherArgs(args.Data, &payload); err != nil {
|
||||
return nil, fmt.Errorf("parse thaw request: %w", err)
|
||||
}
|
||||
if payload.Days <= 0 {
|
||||
payload.Days = 1
|
||||
}
|
||||
restoreRequest := &s3.RestoreRequest{
|
||||
Days: aws.Int64(payload.Days),
|
||||
}
|
||||
if tier := NormalizeRestoreTier(payload.Tier); tier != "" {
|
||||
restoreRequest.GlacierJobParameters = &s3.GlacierJobParameters{Tier: aws.String(tier)}
|
||||
}
|
||||
input := &s3.RestoreObjectInput{
|
||||
Bucket: &d.Bucket,
|
||||
Key: &key,
|
||||
RestoreRequest: restoreRequest,
|
||||
}
|
||||
restoreReq, _ := d.client.RestoreObjectRequest(input)
|
||||
restoreReq.SetContext(ctx)
|
||||
if err := restoreReq.Send(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
status, _ := d.describeObjectStatus(ctx, key)
|
||||
resp := ThawResponse{
|
||||
Action: "thaw",
|
||||
Object: d.describeObject(args.Obj, key),
|
||||
RequestID: restoreReq.RequestID,
|
||||
}
|
||||
if status != nil {
|
||||
resp.Status = status.Restore
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *S3) thawStatus(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
key := getKey(args.Obj.GetPath(), false)
|
||||
status, err := d.describeObjectStatus(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ThawResponse{
|
||||
Action: "thaw_status",
|
||||
Object: d.describeObject(args.Obj, key),
|
||||
Status: status.Restore,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *S3) describeObject(obj model.Obj, key string) ObjectDescriptor {
|
||||
return ObjectDescriptor{
|
||||
Path: obj.GetPath(),
|
||||
Bucket: d.Bucket,
|
||||
Key: key,
|
||||
}
|
||||
}
|
||||
|
||||
type objectStatus struct {
|
||||
StorageClass string
|
||||
Restore *RestoreStatus
|
||||
}
|
||||
|
||||
func (d *S3) describeObjectStatus(ctx context.Context, key string) (*objectStatus, error) {
|
||||
head, err := d.client.HeadObjectWithContext(ctx, &s3.HeadObjectInput{Bucket: &d.Bucket, Key: &key})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
status := &objectStatus{
|
||||
StorageClass: aws.StringValue(head.StorageClass),
|
||||
Restore: parseRestoreHeader(head.Restore),
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func parseRestoreHeader(header *string) *RestoreStatus {
|
||||
if header == nil {
|
||||
return nil
|
||||
}
|
||||
value := strings.TrimSpace(*header)
|
||||
if value == "" {
|
||||
return nil
|
||||
}
|
||||
status := &RestoreStatus{Raw: value}
|
||||
parts := strings.Split(value, ",")
|
||||
for _, part := range parts {
|
||||
part = strings.TrimSpace(part)
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(part, "ongoing-request=") {
|
||||
status.Ongoing = strings.Contains(part, "\"true\"")
|
||||
}
|
||||
if strings.HasPrefix(part, "expiry-date=") {
|
||||
expiry := strings.Trim(part[len("expiry-date="):], "\"")
|
||||
if expiry != "" {
|
||||
if t, err := time.Parse(time.RFC1123, expiry); err == nil {
|
||||
status.Expiry = t.UTC().Format(time.RFC3339)
|
||||
} else {
|
||||
status.Expiry = expiry
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func DecodeOtherArgs(data interface{}, target interface{}) error {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
raw, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(raw, target)
|
||||
}
|
||||
|
||||
func NormalizeStorageClass(value string) string {
|
||||
normalized := strings.ToLower(strings.TrimSpace(strings.ReplaceAll(value, "-", "_")))
|
||||
if normalized == "" {
|
||||
return value
|
||||
}
|
||||
if v, ok := storageClassLookup[normalized]; ok {
|
||||
return v
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func NormalizeRestoreTier(value string) string {
|
||||
normalized := strings.ToLower(strings.TrimSpace(value))
|
||||
switch normalized {
|
||||
case "", "default":
|
||||
return ""
|
||||
case "bulk":
|
||||
return s3.TierBulk
|
||||
case "standard":
|
||||
return s3.TierStandard
|
||||
case "expedited":
|
||||
return s3.TierExpedited
|
||||
default:
|
||||
return value
|
||||
}
|
||||
}
|
||||
@@ -109,13 +109,13 @@ func (d *S3) listV1(prefix string, args model.ListArgs) ([]model.Obj, error) {
|
||||
if !args.S3ShowPlaceholder && (name == getPlaceholderName(d.Placeholder) || name == d.Placeholder) {
|
||||
continue
|
||||
}
|
||||
file := model.Object{
|
||||
file := &model.Object{
|
||||
//Id: *object.Key,
|
||||
Name: name,
|
||||
Size: *object.Size,
|
||||
Modified: *object.LastModified,
|
||||
}
|
||||
files = append(files, &file)
|
||||
files = append(files, model.WrapObjStorageClass(file, aws.StringValue(object.StorageClass)))
|
||||
}
|
||||
if listObjectsResult.IsTruncated == nil {
|
||||
return nil, errors.New("IsTruncated nil")
|
||||
@@ -164,13 +164,13 @@ func (d *S3) listV2(prefix string, args model.ListArgs) ([]model.Obj, error) {
|
||||
if !args.S3ShowPlaceholder && (name == getPlaceholderName(d.Placeholder) || name == d.Placeholder) {
|
||||
continue
|
||||
}
|
||||
file := model.Object{
|
||||
file := &model.Object{
|
||||
//Id: *object.Key,
|
||||
Name: name,
|
||||
Size: *object.Size,
|
||||
Modified: *object.LastModified,
|
||||
}
|
||||
files = append(files, &file)
|
||||
files = append(files, model.WrapObjStorageClass(file, aws.StringValue(object.StorageClass)))
|
||||
}
|
||||
if !aws.BoolValue(listObjectsResult.IsTruncated) {
|
||||
break
|
||||
@@ -202,6 +202,9 @@ func (d *S3) copyFile(ctx context.Context, src string, dst string) error {
|
||||
CopySource: aws.String(url.PathEscape(d.Bucket + "/" + srcKey)),
|
||||
Key: &dstKey,
|
||||
}
|
||||
if storageClass := d.resolveStorageClass(); storageClass != nil {
|
||||
input.StorageClass = storageClass
|
||||
}
|
||||
_, err := d.client.CopyObject(input)
|
||||
return err
|
||||
}
|
||||
|
||||
23
go.mod
23
go.mod
@@ -8,6 +8,7 @@ require (
|
||||
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0
|
||||
github.com/KirCute/sftpd-alist v0.0.12
|
||||
github.com/ProtonMail/go-crypto v1.0.0
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.4
|
||||
github.com/SheltonZhu/115driver v1.1.2
|
||||
github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21
|
||||
github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4
|
||||
@@ -38,6 +39,8 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0
|
||||
github.com/henrybear327/go-proton-api v1.0.0
|
||||
github.com/hirochachacha/go-smb2 v1.1.0
|
||||
github.com/ipfs/go-ipfs-api v0.7.0
|
||||
github.com/jlaffaye/ftp v0.2.0
|
||||
@@ -81,7 +84,21 @@ require (
|
||||
gorm.io/gorm v1.25.11
|
||||
)
|
||||
|
||||
require github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.8.1 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||
github.com/bradenaw/juniper v0.15.2 // indirect
|
||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||
github.com/emersion/go-message v0.18.0 // indirect
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 // indirect
|
||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 // indirect
|
||||
github.com/relvacode/iso8601 v1.3.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/STARRY-S/zip v0.2.1 // indirect
|
||||
@@ -265,4 +282,8 @@ require (
|
||||
lukechampine.com/blake3 v1.1.7 // indirect
|
||||
)
|
||||
|
||||
replace github.com/ProtonMail/go-proton-api => github.com/henrybear327/go-proton-api v1.0.0
|
||||
|
||||
replace github.com/cronokirby/saferith => github.com/Da3zKi7/saferith v0.33.0-fixed
|
||||
|
||||
replace github.com/SheltonZhu/115driver => github.com/okatu-loli/115driver v1.1.2
|
||||
|
||||
45
go.sum
45
go.sum
@@ -34,14 +34,33 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Da3zKi7/saferith v0.33.0-fixed h1:fnIWTk7EP9mZAICf7aQjeoAwpfrlCrkOvqmi6CbWdTk=
|
||||
github.com/Da3zKi7/saferith v0.33.0-fixed/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA=
|
||||
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 h1:ikwCzeqoqN6wvBHOB9OI6dde/jbV7EoTMpUcxtYl5Po=
|
||||
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0/go.mod h1:v0NgMtKDDi/6CM6r4P+daCljCW3eO9yS+Z+pZDTKo1E=
|
||||
github.com/KirCute/sftpd-alist v0.0.12 h1:GNVM5QLbQLAfXP4wGUlXFA2IO6fVek0n0IsGnOuISdg=
|
||||
github.com/KirCute/sftpd-alist v0.0.12/go.mod h1:2wNK7yyW2XfjyJq10OY6xB4COLac64hOwfV6clDJn6s=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e h1:lCsqUUACrcMC83lg5rTo9Y0PnPItE61JSfvMyIcANwk=
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
|
||||
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
|
||||
github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI=
|
||||
github.com/ProtonMail/go-srp v0.0.7/go.mod h1:giCp+7qRnMIcCvI6V6U3S1lDDXDQYx2ewJ6F/9wdlJk=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.4 h1:Vz/8+HViFFnf2A6XX8JOvZMrA6F5puwNvvF21O1mRlo=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.4/go.mod h1:IhkNEDaxec6NyzSI0PlxapinnwPVIESk8/76da3Ct3g=
|
||||
github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM=
|
||||
github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ=
|
||||
github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM=
|
||||
github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
|
||||
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
|
||||
@@ -67,6 +86,9 @@ github.com/andreburgaud/crypt2go v1.8.0/go.mod h1:L5nfShQ91W78hOWhUH2tlGRPO+POAP
|
||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA=
|
||||
github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
|
||||
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
|
||||
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
|
||||
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
|
||||
github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
@@ -132,6 +154,9 @@ github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
|
||||
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bradenaw/juniper v0.15.2 h1:0JdjBGEF2jP1pOxmlNIrPhAoQN7Ng5IMAY5D0PHMW4U=
|
||||
github.com/bradenaw/juniper v0.15.2/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8=
|
||||
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
||||
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
||||
@@ -162,6 +187,7 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e h1:GLC8iDDcbt1H8+RkNao2nRGjyNTIo81e1rAJT9/uWYA=
|
||||
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e/go.mod h1:ln9Whp+wVY/FTbn2SK0ag+SKD2fC0yQCF/Lqowc1LmU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
|
||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
|
||||
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||
@@ -197,6 +223,12 @@ github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj6
|
||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJLNCEi7YHVMkwwtfSr2k9splgdSM=
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8=
|
||||
github.com/emersion/go-message v0.18.0 h1:7LxAXHRpSeoO/Wom3ZApVZYG7c3d17yCScYce8WiXA8=
|
||||
github.com/emersion/go-message v0.18.0/go.mod h1:Zi69ACvzaoV/MBnrxfVBPV3xWEuCmC2nEN39oJF4B8A=
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 h1:IbFBtwoTQyw0fIM5xv1HF+Y+3ZijDR839WMulgxCcUY=
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594/go.mod h1:aqO8z8wPrjkscevZJFVE1wXJrLpC5LtJG7fqLOsPb2U=
|
||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 h1:ATgqloALX6cHCranzkLb8/zjivwQ9DWWDCQRnxTPfaA=
|
||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9/go.mod h1:HMJKR5wlh/ziNp+sHEDV2ltblO4JD2+IdDOWtGcQBTM=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
@@ -337,6 +369,10 @@ github.com/hekmon/cunits/v2 v2.1.0 h1:k6wIjc4PlacNOHwKEMBgWV2/c8jyD4eRMs5mR1BBhI
|
||||
github.com/hekmon/cunits/v2 v2.1.0/go.mod h1:9r1TycXYXaTmEWlAIfFV8JT+Xo59U96yUJAYHxzii2M=
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0 h1:0Fb11qE0IBh4V4GlOwHNYpqpjcYDp5GouolwrpmcUDQ=
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0/go.mod h1:38SlNhFzinVUuY87wGj3acOmRxeYZAZfrj6Re7UgCDg=
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0LTKWQciUm8PMZb0=
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
|
||||
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
|
||||
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
@@ -529,6 +565,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/rclone/rclone v1.67.0 h1:yLRNgHEG2vQ60HCuzFqd0hYwKCRuWuvPUhvhMJ2jI5E=
|
||||
github.com/rclone/rclone v1.67.0/go.mod h1:Cb3Ar47M/SvwfhAjZTbVXdtrP/JLtPFCq2tkdtBVC6w=
|
||||
github.com/relvacode/iso8601 v1.3.0 h1:HguUjsGpIMh/zsTczGN3DVJFxTU/GX+MMmzcKoMO7ko=
|
||||
github.com/relvacode/iso8601 v1.3.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
|
||||
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=
|
||||
github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@@ -658,6 +696,8 @@ go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGX
|
||||
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
|
||||
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
|
||||
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
|
||||
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
|
||||
gocv.io/x/gocv v0.25.0/go.mod h1:Rar2PS6DV+T4FL+PM535EImD/h13hGVaHhnCu1xarBs=
|
||||
@@ -735,6 +775,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
@@ -743,6 +784,7 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
@@ -791,6 +833,7 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -803,6 +846,7 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -819,6 +863,7 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
|
||||
@@ -37,6 +37,18 @@ func InitTaskManager() {
|
||||
if len(tool.TransferTaskManager.GetAll()) == 0 { //prevent offline downloaded files from being deleted
|
||||
CleanTempDir()
|
||||
}
|
||||
workers := conf.Conf.Tasks.S3Transition.Workers
|
||||
if workers < 0 {
|
||||
workers = 0
|
||||
}
|
||||
fs.S3TransitionTaskManager = tache.NewManager[*fs.S3TransitionTask](
|
||||
tache.WithWorks(workers),
|
||||
tache.WithPersistFunction(
|
||||
db.GetTaskDataFunc("s3_transition", conf.Conf.Tasks.S3Transition.TaskPersistant),
|
||||
db.UpdateTaskDataFunc("s3_transition", conf.Conf.Tasks.S3Transition.TaskPersistant),
|
||||
),
|
||||
tache.WithMaxRetry(conf.Conf.Tasks.S3Transition.MaxRetry),
|
||||
)
|
||||
fs.ArchiveDownloadTaskManager = tache.NewManager[*fs.ArchiveDownloadTask](tache.WithWorks(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant), db.UpdateTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Decompress.MaxRetry))
|
||||
op.RegisterSettingChangingCallback(func() {
|
||||
fs.ArchiveDownloadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)))
|
||||
|
||||
@@ -60,6 +60,7 @@ type TasksConfig struct {
|
||||
Copy TaskConfig `json:"copy" envPrefix:"COPY_"`
|
||||
Decompress TaskConfig `json:"decompress" envPrefix:"DECOMPRESS_"`
|
||||
DecompressUpload TaskConfig `json:"decompress_upload" envPrefix:"DECOMPRESS_UPLOAD_"`
|
||||
S3Transition TaskConfig `json:"s3_transition" envPrefix:"S3_TRANSITION_"`
|
||||
AllowRetryCanceled bool `json:"allow_retry_canceled" env:"ALLOW_RETRY_CANCELED"`
|
||||
}
|
||||
|
||||
@@ -184,6 +185,11 @@ func DefaultConfig() *Config {
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
},
|
||||
S3Transition: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
AllowRetryCanceled: false,
|
||||
},
|
||||
Cors: Cors{
|
||||
|
||||
@@ -34,6 +34,14 @@ func GetRoles(pageIndex, pageSize int) (roles []model.Role, count int64, err err
|
||||
return roles, count, nil
|
||||
}
|
||||
|
||||
func GetAllRoles() ([]model.Role, error) {
|
||||
var roles []model.Role
|
||||
if err := db.Find(&roles).Error; err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return roles, nil
|
||||
}
|
||||
|
||||
func CreateRole(r *model.Role) error {
|
||||
if err := db.Create(r).Error; err != nil {
|
||||
return errors.WithStack(err)
|
||||
|
||||
@@ -83,6 +83,14 @@ func GetUsers(pageIndex, pageSize int) (users []model.User, count int64, err err
|
||||
return users, count, nil
|
||||
}
|
||||
|
||||
func GetAllUsers() ([]model.User, error) {
|
||||
var users []model.User
|
||||
if err := db.Find(&users).Error; err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return users, nil
|
||||
}
|
||||
|
||||
func DeleteUserById(id uint) error {
|
||||
return errors.WithStack(db.Delete(&model.User{}, id).Error)
|
||||
}
|
||||
|
||||
@@ -2,10 +2,15 @@ package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/s3"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -53,6 +58,38 @@ func other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get storage")
|
||||
}
|
||||
originalPath := args.Path
|
||||
|
||||
if _, ok := storage.(*s3.S3); ok {
|
||||
method := strings.ToLower(strings.TrimSpace(args.Method))
|
||||
if method == s3.OtherMethodArchive || method == s3.OtherMethodThaw {
|
||||
if S3TransitionTaskManager == nil {
|
||||
return nil, errors.New("s3 transition task manager is not initialized")
|
||||
}
|
||||
var payload json.RawMessage
|
||||
if args.Data != nil {
|
||||
raw, err := json.Marshal(args.Data)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed to encode request payload")
|
||||
}
|
||||
payload = raw
|
||||
}
|
||||
taskCreator, _ := ctx.Value("user").(*model.User)
|
||||
tsk := &S3TransitionTask{
|
||||
TaskExtension: task.TaskExtension{Creator: taskCreator},
|
||||
status: "queued",
|
||||
StorageMountPath: storage.GetStorage().MountPath,
|
||||
ObjectPath: actualPath,
|
||||
DisplayPath: originalPath,
|
||||
ObjectName: stdpath.Base(actualPath),
|
||||
Transition: method,
|
||||
Payload: payload,
|
||||
}
|
||||
S3TransitionTaskManager.Add(tsk)
|
||||
return map[string]string{"task_id": tsk.GetID()}, nil
|
||||
}
|
||||
}
|
||||
|
||||
args.Path = actualPath
|
||||
return op.Other(ctx, storage, args)
|
||||
}
|
||||
|
||||
310
internal/fs/s3_transition.go
Normal file
310
internal/fs/s3_transition.go
Normal file
@@ -0,0 +1,310 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/s3"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/xhofe/tache"
|
||||
)
|
||||
|
||||
const s3TransitionPollInterval = 15 * time.Second
|
||||
|
||||
// S3TransitionTask represents an asynchronous S3 archive/thaw request that is
|
||||
// tracked via the task manager so that clients can monitor the progress of the
|
||||
// operation.
|
||||
type S3TransitionTask struct {
|
||||
task.TaskExtension
|
||||
status string
|
||||
|
||||
StorageMountPath string `json:"storage_mount_path"`
|
||||
ObjectPath string `json:"object_path"`
|
||||
DisplayPath string `json:"display_path"`
|
||||
ObjectName string `json:"object_name"`
|
||||
Transition string `json:"transition"`
|
||||
Payload json.RawMessage `json:"payload,omitempty"`
|
||||
|
||||
TargetStorageClass string `json:"target_storage_class,omitempty"`
|
||||
RequestID string `json:"request_id,omitempty"`
|
||||
VersionID string `json:"version_id,omitempty"`
|
||||
|
||||
storage driver.Driver `json:"-"`
|
||||
}
|
||||
|
||||
// S3TransitionTaskManager holds asynchronous S3 archive/thaw tasks.
|
||||
var S3TransitionTaskManager *tache.Manager[*S3TransitionTask]
|
||||
|
||||
var _ task.TaskExtensionInfo = (*S3TransitionTask)(nil)
|
||||
|
||||
func (t *S3TransitionTask) GetName() string {
|
||||
action := strings.ToLower(t.Transition)
|
||||
if action == "" {
|
||||
action = "transition"
|
||||
}
|
||||
display := t.DisplayPath
|
||||
if display == "" {
|
||||
display = t.ObjectPath
|
||||
}
|
||||
if display == "" {
|
||||
display = t.ObjectName
|
||||
}
|
||||
return fmt.Sprintf("s3 %s %s", action, display)
|
||||
}
|
||||
|
||||
func (t *S3TransitionTask) GetStatus() string {
|
||||
return t.status
|
||||
}
|
||||
|
||||
func (t *S3TransitionTask) Run() error {
|
||||
t.ReinitCtx()
|
||||
t.ClearEndTime()
|
||||
start := time.Now()
|
||||
t.SetStartTime(start)
|
||||
defer func() { t.SetEndTime(time.Now()) }()
|
||||
|
||||
if err := t.ensureStorage(); err != nil {
|
||||
t.status = fmt.Sprintf("locate storage failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
payload, err := t.decodePayload()
|
||||
if err != nil {
|
||||
t.status = fmt.Sprintf("decode payload failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
method := strings.ToLower(strings.TrimSpace(t.Transition))
|
||||
switch method {
|
||||
case s3.OtherMethodArchive:
|
||||
t.status = "submitting archive request"
|
||||
t.SetProgress(0)
|
||||
resp, err := op.Other(t.Ctx(), t.storage, model.FsOtherArgs{
|
||||
Path: t.ObjectPath,
|
||||
Method: s3.OtherMethodArchive,
|
||||
Data: payload,
|
||||
})
|
||||
if err != nil {
|
||||
t.status = fmt.Sprintf("archive request failed: %v", err)
|
||||
return err
|
||||
}
|
||||
archiveResp, ok := toArchiveResponse(resp)
|
||||
if ok {
|
||||
if t.TargetStorageClass == "" {
|
||||
t.TargetStorageClass = archiveResp.StorageClass
|
||||
}
|
||||
t.RequestID = archiveResp.RequestID
|
||||
t.VersionID = archiveResp.VersionID
|
||||
if archiveResp.StorageClass != "" {
|
||||
t.status = fmt.Sprintf("archive requested, waiting for %s", archiveResp.StorageClass)
|
||||
} else {
|
||||
t.status = "archive requested"
|
||||
}
|
||||
} else if sc := t.extractTargetStorageClass(); sc != "" {
|
||||
t.TargetStorageClass = sc
|
||||
t.status = fmt.Sprintf("archive requested, waiting for %s", sc)
|
||||
} else {
|
||||
t.status = "archive requested"
|
||||
}
|
||||
if t.TargetStorageClass != "" {
|
||||
t.TargetStorageClass = s3.NormalizeStorageClass(t.TargetStorageClass)
|
||||
}
|
||||
t.SetProgress(25)
|
||||
return t.waitForArchive()
|
||||
case s3.OtherMethodThaw:
|
||||
t.status = "submitting thaw request"
|
||||
t.SetProgress(0)
|
||||
resp, err := op.Other(t.Ctx(), t.storage, model.FsOtherArgs{
|
||||
Path: t.ObjectPath,
|
||||
Method: s3.OtherMethodThaw,
|
||||
Data: payload,
|
||||
})
|
||||
if err != nil {
|
||||
t.status = fmt.Sprintf("thaw request failed: %v", err)
|
||||
return err
|
||||
}
|
||||
thawResp, ok := toThawResponse(resp)
|
||||
if ok {
|
||||
t.RequestID = thawResp.RequestID
|
||||
if thawResp.Status != nil && !thawResp.Status.Ongoing {
|
||||
t.SetProgress(100)
|
||||
t.status = thawCompletionMessage(thawResp.Status)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
t.status = "thaw requested"
|
||||
t.SetProgress(25)
|
||||
return t.waitForThaw()
|
||||
default:
|
||||
return errors.Errorf("unsupported transition method: %s", t.Transition)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *S3TransitionTask) ensureStorage() error {
|
||||
if t.storage != nil {
|
||||
return nil
|
||||
}
|
||||
storage, err := op.GetStorageByMountPath(t.StorageMountPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.storage = storage
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *S3TransitionTask) decodePayload() (interface{}, error) {
|
||||
if len(t.Payload) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var payload interface{}
|
||||
if err := json.Unmarshal(t.Payload, &payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func (t *S3TransitionTask) extractTargetStorageClass() string {
|
||||
if len(t.Payload) == 0 {
|
||||
return ""
|
||||
}
|
||||
var req s3.ArchiveRequest
|
||||
if err := json.Unmarshal(t.Payload, &req); err != nil {
|
||||
return ""
|
||||
}
|
||||
return s3.NormalizeStorageClass(req.StorageClass)
|
||||
}
|
||||
|
||||
func (t *S3TransitionTask) waitForArchive() error {
|
||||
ticker := time.NewTicker(s3TransitionPollInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
ctx := t.Ctx()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.status = "archive canceled"
|
||||
return ctx.Err()
|
||||
case <-ticker.C:
|
||||
resp, err := op.Other(ctx, t.storage, model.FsOtherArgs{
|
||||
Path: t.ObjectPath,
|
||||
Method: s3.OtherMethodArchiveStatus,
|
||||
})
|
||||
if err != nil {
|
||||
t.status = fmt.Sprintf("archive status error: %v", err)
|
||||
return err
|
||||
}
|
||||
archiveResp, ok := toArchiveResponse(resp)
|
||||
if !ok {
|
||||
t.status = fmt.Sprintf("unexpected archive status response: %T", resp)
|
||||
return errors.Errorf("unexpected archive status response: %T", resp)
|
||||
}
|
||||
currentClass := strings.TrimSpace(archiveResp.StorageClass)
|
||||
target := strings.TrimSpace(t.TargetStorageClass)
|
||||
if target == "" {
|
||||
target = currentClass
|
||||
t.TargetStorageClass = currentClass
|
||||
}
|
||||
if currentClass == "" {
|
||||
t.status = "waiting for storage class update"
|
||||
t.SetProgress(50)
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(currentClass, target) {
|
||||
t.SetProgress(100)
|
||||
t.status = fmt.Sprintf("archive complete (%s)", currentClass)
|
||||
return nil
|
||||
}
|
||||
t.status = fmt.Sprintf("storage class %s (target %s)", currentClass, target)
|
||||
t.SetProgress(75)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *S3TransitionTask) waitForThaw() error {
|
||||
ticker := time.NewTicker(s3TransitionPollInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
ctx := t.Ctx()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.status = "thaw canceled"
|
||||
return ctx.Err()
|
||||
case <-ticker.C:
|
||||
resp, err := op.Other(ctx, t.storage, model.FsOtherArgs{
|
||||
Path: t.ObjectPath,
|
||||
Method: s3.OtherMethodThawStatus,
|
||||
})
|
||||
if err != nil {
|
||||
t.status = fmt.Sprintf("thaw status error: %v", err)
|
||||
return err
|
||||
}
|
||||
thawResp, ok := toThawResponse(resp)
|
||||
if !ok {
|
||||
t.status = fmt.Sprintf("unexpected thaw status response: %T", resp)
|
||||
return errors.Errorf("unexpected thaw status response: %T", resp)
|
||||
}
|
||||
status := thawResp.Status
|
||||
if status == nil {
|
||||
t.status = "waiting for thaw status"
|
||||
t.SetProgress(50)
|
||||
continue
|
||||
}
|
||||
if status.Ongoing {
|
||||
t.status = fmt.Sprintf("thaw in progress (%s)", status.Raw)
|
||||
t.SetProgress(75)
|
||||
continue
|
||||
}
|
||||
t.SetProgress(100)
|
||||
t.status = thawCompletionMessage(status)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func thawCompletionMessage(status *s3.RestoreStatus) string {
|
||||
if status == nil {
|
||||
return "thaw complete"
|
||||
}
|
||||
if status.Expiry != "" {
|
||||
return fmt.Sprintf("thaw complete, expires %s", status.Expiry)
|
||||
}
|
||||
return "thaw complete"
|
||||
}
|
||||
|
||||
func toArchiveResponse(v interface{}) (s3.ArchiveResponse, bool) {
|
||||
switch resp := v.(type) {
|
||||
case s3.ArchiveResponse:
|
||||
return resp, true
|
||||
case *s3.ArchiveResponse:
|
||||
if resp != nil {
|
||||
return *resp, true
|
||||
}
|
||||
}
|
||||
return s3.ArchiveResponse{}, false
|
||||
}
|
||||
|
||||
func toThawResponse(v interface{}) (s3.ThawResponse, bool) {
|
||||
switch resp := v.(type) {
|
||||
case s3.ThawResponse:
|
||||
return resp, true
|
||||
case *s3.ThawResponse:
|
||||
if resp != nil {
|
||||
return *resp, true
|
||||
}
|
||||
}
|
||||
return s3.ThawResponse{}, false
|
||||
}
|
||||
|
||||
// Ensure compatibility with persistence when tasks are restored.
|
||||
func (t *S3TransitionTask) OnRestore() {
|
||||
// The storage handle is not persisted intentionally; it will be lazily
|
||||
// re-fetched on the next Run invocation.
|
||||
t.storage = nil
|
||||
}
|
||||
@@ -20,6 +20,10 @@ type ObjUnwrap interface {
|
||||
Unwrap() Obj
|
||||
}
|
||||
|
||||
type StorageClassProvider interface {
|
||||
StorageClass() string
|
||||
}
|
||||
|
||||
type Obj interface {
|
||||
GetSize() int64
|
||||
GetName() string
|
||||
@@ -141,6 +145,13 @@ func WrapObjsName(objs []Obj) {
|
||||
}
|
||||
}
|
||||
|
||||
func WrapObjStorageClass(obj Obj, storageClass string) Obj {
|
||||
if storageClass == "" {
|
||||
return obj
|
||||
}
|
||||
return &ObjWrapStorageClass{Obj: obj, storageClass: storageClass}
|
||||
}
|
||||
|
||||
func UnwrapObj(obj Obj) Obj {
|
||||
if unwrap, ok := obj.(ObjUnwrap); ok {
|
||||
obj = unwrap.Unwrap()
|
||||
@@ -168,6 +179,20 @@ func GetUrl(obj Obj) (url string, ok bool) {
|
||||
return url, false
|
||||
}
|
||||
|
||||
func GetStorageClass(obj Obj) (string, bool) {
|
||||
if provider, ok := obj.(StorageClassProvider); ok {
|
||||
value := provider.StorageClass()
|
||||
if value == "" {
|
||||
return "", false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
if unwrap, ok := obj.(ObjUnwrap); ok {
|
||||
return GetStorageClass(unwrap.Unwrap())
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func GetRawObject(obj Obj) *Object {
|
||||
switch v := obj.(type) {
|
||||
case *ObjThumbURL:
|
||||
|
||||
@@ -11,6 +11,11 @@ type ObjWrapName struct {
|
||||
Obj
|
||||
}
|
||||
|
||||
type ObjWrapStorageClass struct {
|
||||
storageClass string
|
||||
Obj
|
||||
}
|
||||
|
||||
func (o *ObjWrapName) Unwrap() Obj {
|
||||
return o.Obj
|
||||
}
|
||||
@@ -19,6 +24,20 @@ func (o *ObjWrapName) GetName() string {
|
||||
return o.Name
|
||||
}
|
||||
|
||||
func (o *ObjWrapStorageClass) Unwrap() Obj {
|
||||
return o.Obj
|
||||
}
|
||||
|
||||
func (o *ObjWrapStorageClass) StorageClass() string {
|
||||
return o.storageClass
|
||||
}
|
||||
|
||||
func (o *ObjWrapStorageClass) SetPath(path string) {
|
||||
if setter, ok := o.Obj.(SetPath); ok {
|
||||
setter.SetPath(path)
|
||||
}
|
||||
}
|
||||
|
||||
type Object struct {
|
||||
ID string
|
||||
Path string
|
||||
|
||||
@@ -41,15 +41,27 @@ func GetStorageByMountPath(mountPath string) (driver.Driver, error) {
|
||||
return storageDriver, nil
|
||||
}
|
||||
|
||||
func firstPathSegment(p string) string {
|
||||
p = utils.FixAndCleanPath(p)
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
if p == "" {
|
||||
return ""
|
||||
}
|
||||
if i := strings.Index(p, "/"); i >= 0 {
|
||||
return p[:i]
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// CreateStorage Save the storage to database so storage can get an id
|
||||
// then instantiate corresponding driver and save it in memory
|
||||
func CreateStorage(ctx context.Context, storage model.Storage) (uint, error) {
|
||||
storage.Modified = time.Now()
|
||||
storage.MountPath = utils.FixAndCleanPath(storage.MountPath)
|
||||
|
||||
if storage.MountPath == "/" {
|
||||
return 0, errors.New("Mount path cannot be '/'")
|
||||
}
|
||||
//if storage.MountPath == "/" {
|
||||
// return 0, errors.New("Mount path cannot be '/'")
|
||||
//}
|
||||
|
||||
var err error
|
||||
// check driver first
|
||||
@@ -210,9 +222,9 @@ func UpdateStorage(ctx context.Context, storage model.Storage) error {
|
||||
}
|
||||
storage.Modified = time.Now()
|
||||
storage.MountPath = utils.FixAndCleanPath(storage.MountPath)
|
||||
if storage.MountPath == "/" {
|
||||
return errors.New("Mount path cannot be '/'")
|
||||
}
|
||||
//if storage.MountPath == "/" {
|
||||
// return errors.New("Mount path cannot be '/'")
|
||||
//}
|
||||
err = db.UpdateStorage(&storage)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed update storage in database")
|
||||
@@ -267,6 +279,34 @@ func DeleteStorageById(ctx context.Context, id uint) error {
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed get storage")
|
||||
}
|
||||
firstMount := firstPathSegment(storage.MountPath)
|
||||
if firstMount != "" {
|
||||
roles, err := db.GetAllRoles()
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed to load roles")
|
||||
}
|
||||
users, err := db.GetAllUsers()
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed to load users")
|
||||
}
|
||||
var usedBy []string
|
||||
for _, r := range roles {
|
||||
for _, entry := range r.PermissionScopes {
|
||||
if firstPathSegment(entry.Path) == firstMount {
|
||||
usedBy = append(usedBy, "role:"+r.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, u := range users {
|
||||
if firstPathSegment(u.BasePath) == firstMount {
|
||||
usedBy = append(usedBy, "user:"+u.Username)
|
||||
}
|
||||
}
|
||||
if len(usedBy) > 0 {
|
||||
return errors.Errorf("storage is used by %s, please cancel usage first", strings.Join(usedBy, ", "))
|
||||
}
|
||||
}
|
||||
if !storage.Disabled {
|
||||
storageDriver, err := GetStorageByMountPath(storage.MountPath)
|
||||
if err != nil {
|
||||
|
||||
@@ -44,17 +44,19 @@ type ArchiveContentResp struct {
|
||||
}
|
||||
|
||||
func toObjsRespWithoutSignAndThumb(obj model.Obj) ObjResp {
|
||||
storageClass, _ := model.GetStorageClass(obj)
|
||||
return ObjResp{
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: "",
|
||||
Thumb: "",
|
||||
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: "",
|
||||
Thumb: "",
|
||||
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
|
||||
StorageClass: storageClass,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -165,25 +165,25 @@ func CurrentUser(c *gin.Context) {
|
||||
|
||||
var roleNames []string
|
||||
permMap := map[string]int32{}
|
||||
addedPaths := map[string]bool{}
|
||||
paths := make([]string, 0)
|
||||
|
||||
for _, role := range user.RolesDetail {
|
||||
roleNames = append(roleNames, role.Name)
|
||||
for _, entry := range role.PermissionScopes {
|
||||
cleanPath := path.Clean("/" + strings.TrimPrefix(entry.Path, "/"))
|
||||
if _, ok := permMap[cleanPath]; !ok {
|
||||
paths = append(paths, cleanPath)
|
||||
}
|
||||
permMap[cleanPath] |= entry.Permission
|
||||
}
|
||||
}
|
||||
userResp.RoleNames = roleNames
|
||||
|
||||
for fullPath, perm := range permMap {
|
||||
if !addedPaths[fullPath] {
|
||||
userResp.Permissions = append(userResp.Permissions, model.PermissionEntry{
|
||||
Path: fullPath,
|
||||
Permission: perm,
|
||||
})
|
||||
addedPaths[fullPath] = true
|
||||
}
|
||||
for _, fullPath := range paths {
|
||||
userResp.Permissions = append(userResp.Permissions, model.PermissionEntry{
|
||||
Path: fullPath,
|
||||
Permission: permMap[fullPath],
|
||||
})
|
||||
}
|
||||
|
||||
common.SuccessResp(c, userResp)
|
||||
|
||||
@@ -33,18 +33,19 @@ type DirReq struct {
|
||||
}
|
||||
|
||||
type ObjResp struct {
|
||||
Id string `json:"id"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfoStr string `json:"hashinfo"`
|
||||
HashInfo map[*utils.HashType]string `json:"hash_info"`
|
||||
Id string `json:"id"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfoStr string `json:"hashinfo"`
|
||||
HashInfo map[*utils.HashType]string `json:"hash_info"`
|
||||
StorageClass string `json:"storage_class,omitempty"`
|
||||
}
|
||||
|
||||
type FsListResp struct {
|
||||
@@ -57,19 +58,20 @@ type FsListResp struct {
|
||||
}
|
||||
|
||||
type ObjLabelResp struct {
|
||||
Id string `json:"id"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfoStr string `json:"hashinfo"`
|
||||
HashInfo map[*utils.HashType]string `json:"hash_info"`
|
||||
LabelList []model.Label `json:"label_list"`
|
||||
Id string `json:"id"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfoStr string `json:"hashinfo"`
|
||||
HashInfo map[*utils.HashType]string `json:"hash_info"`
|
||||
LabelList []model.Label `json:"label_list"`
|
||||
StorageClass string `json:"storage_class,omitempty"`
|
||||
}
|
||||
|
||||
func FsList(c *gin.Context) {
|
||||
@@ -256,20 +258,22 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjLabelResp {
|
||||
labels = labelsByName[obj.GetName()]
|
||||
}
|
||||
thumb, _ := model.GetThumb(obj)
|
||||
storageClass, _ := model.GetStorageClass(obj)
|
||||
resp = append(resp, ObjLabelResp{
|
||||
Id: obj.GetID(),
|
||||
Path: obj.GetPath(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: common.Sign(obj, parent, encrypt),
|
||||
Thumb: thumb,
|
||||
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
|
||||
LabelList: labels,
|
||||
Id: obj.GetID(),
|
||||
Path: obj.GetPath(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: common.Sign(obj, parent, encrypt),
|
||||
Thumb: thumb,
|
||||
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
|
||||
LabelList: labels,
|
||||
StorageClass: storageClass,
|
||||
})
|
||||
}
|
||||
return resp
|
||||
@@ -374,20 +378,22 @@ func FsGet(c *gin.Context) {
|
||||
}
|
||||
parentMeta, _ := op.GetNearestMeta(parentPath)
|
||||
thumb, _ := model.GetThumb(obj)
|
||||
storageClass, _ := model.GetStorageClass(obj)
|
||||
common.SuccessResp(c, FsGetResp{
|
||||
ObjResp: ObjResp{
|
||||
Id: obj.GetID(),
|
||||
Path: obj.GetPath(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: common.Sign(obj, parentPath, isEncrypt(meta, reqPath)),
|
||||
Type: utils.GetFileType(obj.GetName()),
|
||||
Thumb: thumb,
|
||||
Id: obj.GetID(),
|
||||
Path: obj.GetPath(),
|
||||
Name: obj.GetName(),
|
||||
Size: obj.GetSize(),
|
||||
IsDir: obj.IsDir(),
|
||||
Modified: obj.ModTime(),
|
||||
Created: obj.CreateTime(),
|
||||
HashInfoStr: obj.GetHash().String(),
|
||||
HashInfo: obj.GetHash().Export(),
|
||||
Sign: common.Sign(obj, parentPath, isEncrypt(meta, reqPath)),
|
||||
Type: utils.GetFileType(obj.GetName()),
|
||||
Thumb: thumb,
|
||||
StorageClass: storageClass,
|
||||
},
|
||||
RawURL: rawURL,
|
||||
Readme: getReadme(meta, reqPath),
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -154,7 +155,7 @@ func autoRegister(username, userID string, err error) (*model.User, error) {
|
||||
Password: random.String(16),
|
||||
Permission: int32(setting.GetInt(conf.SSODefaultPermission, 0)),
|
||||
BasePath: setting.GetStr(conf.SSODefaultDir),
|
||||
Role: nil,
|
||||
Role: model.Roles{op.GetDefaultRoleID()},
|
||||
Disabled: false,
|
||||
SsoID: userID,
|
||||
}
|
||||
@@ -256,6 +257,7 @@ func OIDCLoginCallback(c *gin.Context) {
|
||||
user, err = autoRegister(userID, userID, err)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
}
|
||||
token, err := common.GenerateToken(user)
|
||||
|
||||
@@ -220,6 +220,7 @@ func SetupTaskRoute(g *gin.RouterGroup) {
|
||||
taskRoute(g.Group("/copy"), fs.CopyTaskManager)
|
||||
taskRoute(g.Group("/offline_download"), tool.DownloadTaskManager)
|
||||
taskRoute(g.Group("/offline_download_transfer"), tool.TransferTaskManager)
|
||||
taskRoute(g.Group("/s3_transition"), fs.S3TransitionTaskManager)
|
||||
taskRoute(g.Group("/decompress"), fs.ArchiveDownloadTaskManager)
|
||||
taskRoute(g.Group("/decompress_upload"), fs.ArchiveContentUploadTaskManager)
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ func WebDAVAuth(c *gin.Context) {
|
||||
reqPath = "/"
|
||||
}
|
||||
reqPath, _ = url.PathUnescape(reqPath)
|
||||
reqPath, err = user.JoinPath(reqPath)
|
||||
reqPath, err = webdav.ResolvePath(user, reqPath)
|
||||
if err != nil {
|
||||
c.Status(http.StatusForbidden)
|
||||
c.Abort()
|
||||
|
||||
22
server/webdav/path.go
Normal file
22
server/webdav/path.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
// ResolvePath normalizes the provided raw path and resolves it against the user's base path
|
||||
// before delegating to the user-aware JoinPath permission checks.
|
||||
func ResolvePath(user *model.User, raw string) (string, error) {
|
||||
cleaned := utils.FixAndCleanPath(raw)
|
||||
basePath := utils.FixAndCleanPath(user.BasePath)
|
||||
|
||||
if cleaned != "/" && basePath != "/" && !utils.IsSubPath(basePath, cleaned) {
|
||||
cleaned = path.Join(basePath, strings.TrimPrefix(cleaned, "/"))
|
||||
}
|
||||
|
||||
return user.JoinPath(cleaned)
|
||||
}
|
||||
@@ -194,7 +194,7 @@ func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status
|
||||
}
|
||||
ctx := r.Context()
|
||||
user := ctx.Value("user").(*model.User)
|
||||
reqPath, err = user.JoinPath(reqPath)
|
||||
reqPath, err = ResolvePath(user, reqPath)
|
||||
if err != nil {
|
||||
return 403, err
|
||||
}
|
||||
@@ -222,7 +222,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
|
||||
// TODO: check locks for read-only access??
|
||||
ctx := r.Context()
|
||||
user := ctx.Value("user").(*model.User)
|
||||
reqPath, err = user.JoinPath(reqPath)
|
||||
reqPath, err = ResolvePath(user, reqPath)
|
||||
if err != nil {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
@@ -282,7 +282,7 @@ func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status i
|
||||
|
||||
ctx := r.Context()
|
||||
user := ctx.Value("user").(*model.User)
|
||||
reqPath, err = user.JoinPath(reqPath)
|
||||
reqPath, err = ResolvePath(user, reqPath)
|
||||
if err != nil {
|
||||
return 403, err
|
||||
}
|
||||
@@ -321,7 +321,7 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int,
|
||||
// comments in http.checkEtag.
|
||||
ctx := r.Context()
|
||||
user := ctx.Value("user").(*model.User)
|
||||
reqPath, err = user.JoinPath(reqPath)
|
||||
reqPath, err = ResolvePath(user, reqPath)
|
||||
if err != nil {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
@@ -375,7 +375,7 @@ func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status in
|
||||
|
||||
ctx := r.Context()
|
||||
user := ctx.Value("user").(*model.User)
|
||||
reqPath, err = user.JoinPath(reqPath)
|
||||
reqPath, err = ResolvePath(user, reqPath)
|
||||
if err != nil {
|
||||
return 403, err
|
||||
}
|
||||
@@ -439,11 +439,11 @@ func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status
|
||||
|
||||
ctx := r.Context()
|
||||
user := ctx.Value("user").(*model.User)
|
||||
src, err = user.JoinPath(src)
|
||||
src, err = ResolvePath(user, src)
|
||||
if err != nil {
|
||||
return 403, err
|
||||
}
|
||||
dst, err = user.JoinPath(dst)
|
||||
dst, err = ResolvePath(user, dst)
|
||||
if err != nil {
|
||||
return 403, err
|
||||
}
|
||||
@@ -540,7 +540,7 @@ func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
reqPath, err = user.JoinPath(reqPath)
|
||||
reqPath, err = ResolvePath(user, reqPath)
|
||||
if err != nil {
|
||||
return 403, err
|
||||
}
|
||||
@@ -623,7 +623,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status
|
||||
userAgent := r.Header.Get("User-Agent")
|
||||
ctx = context.WithValue(ctx, "userAgent", userAgent)
|
||||
user := ctx.Value("user").(*model.User)
|
||||
reqPath, err = user.JoinPath(reqPath)
|
||||
reqPath, err = ResolvePath(user, reqPath)
|
||||
if err != nil {
|
||||
return 403, err
|
||||
}
|
||||
@@ -801,7 +801,7 @@ func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (statu
|
||||
|
||||
ctx := r.Context()
|
||||
user := ctx.Value("user").(*model.User)
|
||||
reqPath, err = user.JoinPath(reqPath)
|
||||
reqPath, err = ResolvePath(user, reqPath)
|
||||
if err != nil {
|
||||
return 403, err
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user