Compare commits

..

1 Commits

Author SHA1 Message Date
gunzhibin
0e1604300e feat commit some permission code 2025-05-23 16:22:49 +08:00
169 changed files with 1168 additions and 12221 deletions

2
.github/FUNDING.yml vendored
View File

@@ -10,4 +10,4 @@ liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: ['https://alistgo.com/guide/sponsor.html']
custom: ['https://alist.nn.ci/guide/sponsor.html']

View File

@@ -16,14 +16,14 @@ body:
您必须勾选以下所有内容否则您的issue可能会被直接关闭。或者您可以去[讨论区](https://github.com/alist-org/alist/discussions)
options:
- label: |
I have read the [documentation](https://alistgo.com).
我已经阅读了[文档](https://alistgo.com)。
I have read the [documentation](https://alist.nn.ci).
我已经阅读了[文档](https://alist.nn.ci)。
- label: |
I'm sure there are no duplicate issues or discussions.
我确定没有重复的issue或讨论。
- label: |
I'm sure it's due to `AList` and not something else(such as [Network](https://alistgo.com/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alistgo.com/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host)`依赖`或`操作`)。
I'm sure it's due to `AList` and not something else(such as [Network](https://alist.nn.ci/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alist.nn.ci/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host)`依赖`或`操作`)。
- label: |
I'm sure this issue is not fixed in the latest version.
我确定这个问题在最新版本中没有被修复。

View File

@@ -7,7 +7,7 @@ body:
label: Please make sure of the following things
description: You may select more than one, even select all.
options:
- label: I have read the [documentation](https://alistgo.com).
- label: I have read the [documentation](https://alist.nn.ci).
- label: I'm sure there are no duplicate issues or discussions.
- label: I'm sure this feature is not implemented.
- label: I'm sure it's a reasonable and popular requirement.

View File

@@ -119,7 +119,7 @@ jobs:
- name: Checkout repo
uses: actions/checkout@v4
with:
repository: AlistGo/desktop-release
repository: alist-org/desktop-release
ref: main
persist-credentials: false
fetch-depth: 0
@@ -135,4 +135,4 @@ jobs:
with:
github_token: ${{ secrets.MY_TOKEN }}
branch: main
repository: AlistGo/desktop-release
repository: alist-org/desktop-release

View File

@@ -25,8 +25,6 @@ jobs:
- android-arm64
name: Build
runs-on: ${{ matrix.platform }}
env:
GOPROXY: https://proxy.golang.org,direct
steps:
- name: Checkout

View File

@@ -72,7 +72,7 @@ jobs:
- name: Checkout repo
uses: actions/checkout@v4
with:
repository: AlistGo/desktop-release
repository: alist-org/desktop-release
ref: main
persist-credentials: false
fetch-depth: 0
@@ -89,4 +89,4 @@ jobs:
with:
github_token: ${{ secrets.MY_TOKEN }}
branch: main
repository: AlistGo/desktop-release
repository: alist-org/desktop-release

View File

@@ -18,7 +18,6 @@ env:
REGISTRY: 'xhofe/alist'
REGISTRY_USERNAME: 'xhofe'
REGISTRY_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
GITHUB_CR_REPO: ghcr.io/${{ github.repository }}
ARTIFACT_NAME: 'binaries_docker_release'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
IMAGE_PUSH: ${{ github.event_name == 'push' }}
@@ -115,21 +114,11 @@ jobs:
username: ${{ env.REGISTRY_USERNAME }}
password: ${{ env.REGISTRY_PASSWORD }}
- name: Login to GHCR
uses: docker/login-action@v3
with:
logout: true
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
${{ env.REGISTRY }}
${{ env.GITHUB_CR_REPO }}
images: ${{ env.REGISTRY }}
tags: ${{ env.IMAGE_IS_PROD == 'true' && '' || env.IMAGE_TAGS_BETA }}
flavor: |
${{ env.IMAGE_IS_PROD == 'true' && 'latest=true' || '' }}

View File

@@ -1,4 +1,4 @@
FROM alpine:3.20.7
FROM alpine:edge
ARG TARGETPLATFORM
ARG INSTALL_FFMPEG=false

View File

@@ -1,5 +1,5 @@
<div align="center">
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<p><em>🗂A file list program that supports multiple storages, powered by Gin and Solidjs.</em></p>
<div>
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
@@ -31,7 +31,7 @@
<a href="https://hub.docker.com/r/xhofe/alist">
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
</a>
<a href="https://alistgo.com/guide/sponsor.html">
<a href="https://alist.nn.ci/guide/sponsor.html">
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
</a>
</div>
@@ -57,9 +57,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
- [x] WebDav(Support OneDrive/SharePoint without API)
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [Mediatrack](https://www.mediatrack.cn/)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
- [x] [YandexDisk](https://disk.yandex.com/)
- [x] [BaiduNetdisk](http://pan.baidu.com/)
@@ -90,7 +88,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
- [x] Dark mode
- [x] I18n
- [x] Protected routes (password protection and authentication)
- [x] WebDav (see https://alistgo.com/guide/webdav.html for details)
- [x] WebDav (see https://alist.nn.ci/guide/webdav.html for details)
- [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist)
- [x] Cloudflare Workers proxy
- [x] File/Folder package download
@@ -103,10 +101,6 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
<https://alistgo.com/>
## API Documentation (via Apifox):
<https://alist-public.apifox.cn/>
## Demo
<https://al.nn.ci>
@@ -118,11 +112,13 @@ Please go to our [discussion forum](https://github.com/alist-org/alist/discussio
## Sponsor
AList is an open-source software, if you happen to like this project and want me to keep going, please consider sponsoring me or providing a single donation! Thanks for all the love and support:
https://alistgo.com/guide/sponsor.html
https://alist.nn.ci/guide/sponsor.html
### Special sponsors
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
## Contributors

View File

@@ -1,5 +1,5 @@
<div align="center">
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<p><em>🗂一个支持多存储的文件列表程序,使用 Gin 和 Solidjs。</em></p>
<div>
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
@@ -31,7 +31,7 @@
<a href="https://hub.docker.com/r/xhofe/alist">
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
</a>
<a href="https://alistgo.com/zh/guide/sponsor.html">
<a href="https://alist.nn.ci/zh/guide/sponsor.html">
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
</a>
</div>
@@ -57,9 +57,7 @@
- [x] [又拍云对象存储](https://www.upyun.com/products/file-storage)
- [x] WebDav(支持无API的OneDrive/SharePoint)
- [x] Teambition[中国](https://www.teambition.com/ )[国际](https://us.teambition.com/ )
- [x] [MediaFire](https://www.mediafire.com)
- [x] [分秒帧](https://www.mediatrack.cn/)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [和彩云](https://yun.139.com/) (个人云, 家庭云,共享群组)
- [x] [Yandex.Disk](https://disk.yandex.com/)
- [x] [百度网盘](http://pan.baidu.com/)
@@ -88,7 +86,7 @@
- [x] 黑暗模式
- [x] 国际化
- [x] 受保护的路由(密码保护和身份验证)
- [x] WebDav (具体见 https://alistgo.com/zh/guide/webdav.html)
- [x] WebDav (具体见 https://alist.nn.ci/zh/guide/webdav.html)
- [x] [Docker 部署](https://hub.docker.com/r/xhofe/alist)
- [x] Cloudflare workers 中转
- [x] 文件/文件夹打包下载
@@ -99,11 +97,7 @@
## 文档
<https://alistgo.com/zh/>
## API 文档(通过 Apifox 提供)
<https://alist-public.apifox.cn/>
<https://alist.nn.ci/zh/>
## Demo
@@ -115,11 +109,13 @@
## 赞助
AList 是一个开源软件如果你碰巧喜欢这个项目并希望我继续下去请考虑赞助我或提供一个单一的捐款感谢所有的爱和支持https://alistgo.com/zh/guide/sponsor.html
AList 是一个开源软件如果你碰巧喜欢这个项目并希望我继续下去请考虑赞助我或提供一个单一的捐款感谢所有的爱和支持https://alist.nn.ci/zh/guide/sponsor.html
### 特别赞助
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - 苹果生态下优雅的网盘视频播放器iPhoneiPadMacApple TV全平台支持。
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (国内API服务器赞助)
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
## 贡献者

View File

@@ -1,5 +1,5 @@
<div align="center">
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<p><em>🗂Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
<div>
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
@@ -31,7 +31,7 @@
<a href="https://hub.docker.com/r/xhofe/alist">
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
</a>
<a href="https://alistgo.com/guide/sponsor.html">
<a href="https://alist.nn.ci/guide/sponsor.html">
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
</a>
</div>
@@ -57,9 +57,7 @@
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
- [x] WebDav(Support OneDrive/SharePoint without API)
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [Mediatrack](https://www.mediatrack.cn/)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
- [x] [YandexDisk](https://disk.yandex.com/)
- [x] [BaiduNetdisk](http://pan.baidu.com/)
@@ -89,7 +87,7 @@
- [x] ダークモード
- [x] 国際化
- [x] 保護されたルート (パスワード保護と認証)
- [x] WebDav (詳細は https://alistgo.com/guide/webdav.html を参照)
- [x] WebDav (詳細は https://alist.nn.ci/guide/webdav.html を参照)
- [x] [Docker デプロイ](https://hub.docker.com/r/xhofe/alist)
- [x] Cloudflare ワーカープロキシ
- [x] ファイル/フォルダパッケージのダウンロード
@@ -100,11 +98,7 @@
## ドキュメント
<https://alistgo.com/>
## APIドキュメントApifox 提供)
<https://alist-public.apifox.cn/>
<https://alist.nn.ci/>
## デモ
@@ -117,11 +111,13 @@
## スポンサー
AList はオープンソースのソフトウェアです。もしあなたがこのプロジェクトを気に入ってくださり、続けて欲しいと思ってくださるなら、ぜひスポンサーになってくださるか、1口でも寄付をしてくださるようご検討くださいすべての愛とサポートに感謝します:
https://alistgo.com/guide/sponsor.html
https://alist.nn.ci/guide/sponsor.html
### スペシャルスポンサー
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
## コントリビューター

View File

@@ -93,7 +93,7 @@ BuildDocker() {
PrepareBuildDockerMusl() {
mkdir -p build/musl-libs
BASE="https://github.com/go-cross/musl-toolchain-archive/releases/latest/download/"
BASE="https://musl.cc/"
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz"
@@ -245,7 +245,7 @@ BuildReleaseFreeBSD() {
cgo_cc="clang --target=${CGO_ARGS[$i]} --sysroot=/opt/freebsd/${os_arch}"
echo building for freebsd-${os_arch}
sudo mkdir -p "/opt/freebsd/${os_arch}"
wget -q https://download.freebsd.org/releases/${os_arch}/14.3-RELEASE/base.txz
wget -q https://download.freebsd.org/releases/${os_arch}/14.1-RELEASE/base.txz
sudo tar -xf ./base.txz -C /opt/freebsd/${os_arch}
rm base.txz
export GOOS=freebsd

View File

@@ -1,7 +1,6 @@
package cmd
import (
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_46_0"
"os"
"path/filepath"
"strconv"
@@ -17,12 +16,6 @@ func Init() {
bootstrap.InitConfig()
bootstrap.Log()
bootstrap.InitDB()
if v3_46_0.IsLegacyRoleDetected() {
utils.Log.Warnf("Detected legacy role format, executing ConvertLegacyRoles patch early...")
v3_46_0.ConvertLegacyRoles()
}
data.InitData()
bootstrap.InitStreamLimit()
bootstrap.InitIndex()

View File

@@ -16,7 +16,7 @@ var RootCmd = &cobra.Command{
Short: "A file list program that supports multiple storage.",
Long: `A file list program that supports multiple storage,
built with love by Xhofe and friends in Go/Solid.js.
Complete documentation is available at https://alistgo.com/`,
Complete documentation is available at https://alist.nn.ci/`,
}
func Execute() {
@@ -27,7 +27,7 @@ func Execute() {
}
func init() {
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "/data", "data folder")
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")

View File

@@ -6,8 +6,6 @@ import (
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
@@ -31,7 +29,6 @@ type Pan123 struct {
model.Storage
Addition
apiRateLimit sync.Map
safeBoxUnlocked sync.Map
}
func (d *Pan123) Config() driver.Config {
@@ -55,27 +52,10 @@ func (d *Pan123) Drop(ctx context.Context) error {
}
func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if f, ok := dir.(File); ok && f.IsLock {
if err := d.unlockSafeBox(f.FileId); err != nil {
return nil, err
}
}
files, err := d.getFiles(ctx, dir.GetID(), dir.GetName())
if err != nil {
msg := strings.ToLower(err.Error())
if strings.Contains(msg, "safe box") || strings.Contains(err.Error(), "保险箱") {
if id, e := strconv.ParseInt(dir.GetID(), 10, 64); e == nil {
if e = d.unlockSafeBox(id); e == nil {
files, err = d.getFiles(ctx, dir.GetID(), dir.GetName())
} else {
return nil, e
}
}
}
if err != nil {
return nil, err
}
}
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
return src, nil
})

View File

@@ -8,7 +8,6 @@ import (
type Addition struct {
Username string `json:"username" required:"true"`
Password string `json:"password" required:"true"`
SafePassword string `json:"safe_password"`
driver.RootID
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`

View File

@@ -20,7 +20,6 @@ type File struct {
Etag string `json:"Etag"`
S3KeyFlag string `json:"S3KeyFlag"`
DownloadUrl string `json:"DownloadUrl"`
IsLock bool `json:"IsLock"`
}
func (f File) CreateTime() time.Time {

View File

@@ -43,7 +43,6 @@ const (
S3Auth = MainApi + "/file/s3_upload_object/auth"
UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
S3Complete = MainApi + "/file/s3_complete_multipart_upload"
SafeBoxUnlock = MainApi + "/restful/goapi/v1/file/safe_box/auth/unlockbox"
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
)
@@ -164,10 +163,10 @@ func (d *Pan123) login() error {
SetHeaders(map[string]string{
"origin": "https://www.123pan.com",
"referer": "https://www.123pan.com/",
//"user-agent": "Dart/2.19(dart:io)-alist",
"user-agent": "Dart/2.19(dart:io)-alist",
"platform": "web",
"app-version": "3",
"user-agent": base.UserAgent,
//"user-agent": base.UserAgent,
}).
SetBody(body).Post(SignIn)
if err != nil {
@@ -203,7 +202,7 @@ do:
"origin": "https://www.123pan.com",
"referer": "https://www.123pan.com/",
"authorization": "Bearer " + d.AccessToken,
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) alist-client",
"platform": "web",
"app-version": "3",
//"user-agent": base.UserAgent,
@@ -239,22 +238,6 @@ do:
return body, nil
}
func (d *Pan123) unlockSafeBox(fileId int64) error {
if _, ok := d.safeBoxUnlocked.Load(fileId); ok {
return nil
}
data := base.Json{"password": d.SafePassword}
url := fmt.Sprintf("%s?fileId=%d", SafeBoxUnlock, fileId)
_, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
req.SetBody(data)
}, nil)
if err != nil {
return err
}
d.safeBoxUnlocked.Store(fileId, true)
return nil
}
func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]File, error) {
page := 1
total := 0
@@ -284,15 +267,6 @@ func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]
req.SetQueryParams(query)
}, &resp)
if err != nil {
msg := strings.ToLower(err.Error())
if strings.Contains(msg, "safe box") || strings.Contains(err.Error(), "保险箱") {
if fid, e := strconv.ParseInt(parentId, 10, 64); e == nil {
if e = d.unlockSafeBox(fid); e == nil {
return d.getFiles(ctx, parentId, name)
}
return nil, e
}
}
return nil, err
}
log.Debug(string(_res))

View File

@@ -1,191 +0,0 @@
package _123Open
import (
"fmt"
"github.com/go-resty/resty/v2"
"net/http"
)
const (
// baseurl
ApiBaseURL = "https://open-api.123pan.com"
// auth
ApiToken = "/api/v1/access_token"
// file list
ApiFileList = "/api/v2/file/list"
// direct link
ApiGetDirectLink = "/api/v1/direct-link/url"
// mkdir
ApiMakeDir = "/upload/v1/file/mkdir"
// remove
ApiRemove = "/api/v1/file/trash"
// upload
ApiUploadDomainURL = "/upload/v2/file/domain"
ApiSingleUploadURL = "/upload/v2/file/single/create"
ApiCreateUploadURL = "/upload/v2/file/create"
ApiUploadSliceURL = "/upload/v2/file/slice"
ApiUploadCompleteURL = "/upload/v2/file/upload_complete"
// move
ApiMove = "/api/v1/file/move"
// rename
ApiRename = "/api/v1/file/name"
)
type Response[T any] struct {
Code int `json:"code"`
Message string `json:"message"`
Data T `json:"data"`
}
type TokenResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data TokenData `json:"data"`
}
type TokenData struct {
AccessToken string `json:"accessToken"`
ExpiredAt string `json:"expiredAt"`
}
type FileListResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data FileListData `json:"data"`
}
type FileListData struct {
LastFileId int64 `json:"lastFileId"`
FileList []File `json:"fileList"`
}
type DirectLinkResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data DirectLinkData `json:"data"`
}
type DirectLinkData struct {
URL string `json:"url"`
}
type MakeDirRequest struct {
Name string `json:"name"`
ParentID int64 `json:"parentID"`
}
type MakeDirResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data MakeDirData `json:"data"`
}
type MakeDirData struct {
DirID int64 `json:"dirID"`
}
type RemoveRequest struct {
FileIDs []int64 `json:"fileIDs"`
}
type UploadCreateResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data UploadCreateData `json:"data"`
}
type UploadCreateData struct {
FileID int64 `json:"fileId"`
Reuse bool `json:"reuse"`
PreuploadID string `json:"preuploadId"`
SliceSize int64 `json:"sliceSize"`
Servers []string `json:"servers"`
}
type UploadUrlResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data UploadUrlData `json:"data"`
}
type UploadUrlData struct {
PresignedURL string `json:"presignedUrl"`
}
type UploadCompleteResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data UploadCompleteData `json:"data"`
}
type UploadCompleteData struct {
FileID int `json:"fileID"`
Completed bool `json:"completed"`
}
func (d *Open123) Request(endpoint string, method string, setup func(*resty.Request), result any) (*resty.Response, error) {
client := resty.New()
token, err := d.tm.getToken()
if err != nil {
return nil, err
}
req := client.R().
SetHeader("Authorization", "Bearer "+token).
SetHeader("Platform", "open_platform").
SetHeader("Content-Type", "application/json").
SetResult(result)
if setup != nil {
setup(req)
}
switch method {
case http.MethodGet:
return req.Get(ApiBaseURL + endpoint)
case http.MethodPost:
return req.Post(ApiBaseURL + endpoint)
case http.MethodPut:
return req.Put(ApiBaseURL + endpoint)
default:
return nil, fmt.Errorf("unsupported method: %s", method)
}
}
func (d *Open123) RequestTo(fullURL string, method string, setup func(*resty.Request), result any) (*resty.Response, error) {
client := resty.New()
token, err := d.tm.getToken()
if err != nil {
return nil, err
}
req := client.R().
SetHeader("Authorization", "Bearer "+token).
SetHeader("Platform", "open_platform").
SetHeader("Content-Type", "application/json").
SetResult(result)
if setup != nil {
setup(req)
}
switch method {
case http.MethodGet:
return req.Get(fullURL)
case http.MethodPost:
return req.Post(fullURL)
case http.MethodPut:
return req.Put(fullURL)
default:
return nil, fmt.Errorf("unsupported method: %s", method)
}
}

View File

@@ -1,294 +0,0 @@
package _123Open
import (
"context"
"fmt"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"net/http"
"strconv"
"time"
)
type Open123 struct {
model.Storage
Addition
UploadThread int
tm *tokenManager
}
func (d *Open123) Config() driver.Config {
return config
}
func (d *Open123) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Open123) Init(ctx context.Context) error {
d.tm = newTokenManager(d.ClientID, d.ClientSecret)
if _, err := d.tm.getToken(); err != nil {
return fmt.Errorf("token 初始化失败: %w", err)
}
return nil
}
func (d *Open123) Drop(ctx context.Context) error {
return nil
}
func (d *Open123) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
parentFileId, err := strconv.ParseInt(dir.GetID(), 10, 64)
if err != nil {
return nil, err
}
fileLastId := int64(0)
var results []File
for fileLastId != -1 {
files, err := d.getFiles(parentFileId, 100, fileLastId)
if err != nil {
return nil, err
}
for _, f := range files.Data.FileList {
if f.Trashed == 0 {
results = append(results, f)
}
}
fileLastId = files.Data.LastFileId
}
objs := make([]model.Obj, 0, len(results))
for _, f := range results {
objs = append(objs, f)
}
return objs, nil
}
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if file.IsDir() {
return nil, errs.LinkIsDir
}
fileID := file.GetID()
var result DirectLinkResp
url := fmt.Sprintf("%s?fileID=%s", ApiGetDirectLink, fileID)
_, err := d.Request(url, http.MethodGet, nil, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("get link failed: %s", result.Message)
}
linkURL := result.Data.URL
if d.PrivateKey != "" {
if d.UID == 0 {
return nil, fmt.Errorf("uid is required when private key is set")
}
duration := time.Duration(d.ValidDuration)
if duration <= 0 {
duration = 30
}
signedURL, err := SignURL(linkURL, d.PrivateKey, d.UID, duration*time.Minute)
if err != nil {
return nil, err
}
linkURL = signedURL
}
return &model.Link{
URL: linkURL,
}, nil
}
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
parentID, err := strconv.ParseInt(parentDir.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid parent ID: %w", err)
}
var result MakeDirResp
reqBody := MakeDirRequest{
Name: dirName,
ParentID: parentID,
}
_, err = d.Request(ApiMakeDir, http.MethodPost, func(r *resty.Request) {
r.SetBody(reqBody)
}, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("mkdir failed: %s", result.Message)
}
newDir := File{
FileId: result.Data.DirID,
FileName: dirName,
Type: 1,
ParentFileId: int(parentID),
Size: 0,
Trashed: 0,
}
return newDir, nil
}
func (d *Open123) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
srcID, err := strconv.ParseInt(srcObj.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid src file ID: %w", err)
}
dstID, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid dest dir ID: %w", err)
}
var result Response[any]
reqBody := map[string]interface{}{
"fileIDs": []int64{srcID},
"toParentFileID": dstID,
}
_, err = d.Request(ApiMove, http.MethodPost, func(r *resty.Request) {
r.SetBody(reqBody)
}, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("move failed: %s", result.Message)
}
files, err := d.getFiles(dstID, 100, 0)
if err != nil {
return nil, fmt.Errorf("move succeed but failed to get target dir: %w", err)
}
for _, f := range files.Data.FileList {
if f.FileId == srcID {
return f, nil
}
}
return nil, fmt.Errorf("move succeed but file not found in target dir")
}
func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
srcID, err := strconv.ParseInt(srcObj.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid file ID: %w", err)
}
var result Response[any]
reqBody := map[string]interface{}{
"fileId": srcID,
"fileName": newName,
}
_, err = d.Request(ApiRename, http.MethodPut, func(r *resty.Request) {
r.SetBody(reqBody)
}, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("rename failed: %s", result.Message)
}
parentID := 0
if file, ok := srcObj.(File); ok {
parentID = file.ParentFileId
}
files, err := d.getFiles(int64(parentID), 100, 0)
if err != nil {
return nil, fmt.Errorf("rename succeed but failed to get parent dir: %w", err)
}
for _, f := range files.Data.FileList {
if f.FileId == srcID {
return f, nil
}
}
return nil, fmt.Errorf("rename succeed but file not found in parent dir")
}
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return nil, errs.NotSupport
}
func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
idStr := obj.GetID()
id, err := strconv.ParseInt(idStr, 10, 64)
if err != nil {
return fmt.Errorf("invalid file ID: %w", err)
}
var result Response[any]
reqBody := RemoveRequest{
FileIDs: []int64{id},
}
_, err = d.Request(ApiRemove, http.MethodPost, func(r *resty.Request) {
r.SetBody(reqBody)
}, &result)
if err != nil {
return err
}
if result.Code != 0 {
return fmt.Errorf("remove failed: %s", result.Message)
}
return nil
}
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
if err != nil {
return err
}
}
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
if err != nil {
return err
}
if createResp.Data.Reuse {
return nil
}
return d.Upload(ctx, file, parentFileId, createResp, up)
}
func (d *Open123) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
return nil, errs.NotSupport
}
func (d *Open123) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
return nil, errs.NotSupport
}
func (d *Open123) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
return nil, errs.NotSupport
}
func (d *Open123) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
return nil, errs.NotSupport
}
//func (d *Open123) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Open123)(nil)

View File

@@ -1,36 +0,0 @@
package _123Open
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
driver.RootID
ClientID string `json:"client_id" required:"true" label:"Client ID"`
ClientSecret string `json:"client_secret" required:"true" label:"Client Secret"`
PrivateKey string `json:"private_key"`
UID uint64 `json:"uid" type:"number"`
ValidDuration int64 `json:"valid_duration" type:"number" default:"30" help:"minutes"`
}
var config = driver.Config{
Name: "123 Open",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Open123{}
})
}

View File

@@ -1,27 +0,0 @@
package _123Open
import (
"crypto/md5"
"fmt"
"math/rand"
"net/url"
"time"
)
func SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (string, error) {
if privateKey == "" {
return originURL, nil
}
parsed, err := url.Parse(originURL)
if err != nil {
return "", err
}
ts := time.Now().Add(validDuration).Unix()
randInt := rand.Int()
signature := fmt.Sprintf("%d-%d-%d-%x", ts, randInt, uid, md5.Sum([]byte(fmt.Sprintf("%s-%d-%d-%d-%s",
parsed.Path, ts, randInt, uid, privateKey))))
query := parsed.Query()
query.Add("auth_key", signature)
parsed.RawQuery = query.Encode()
return parsed.String(), nil
}

View File

@@ -1,85 +0,0 @@
package _123Open
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
)
const tokenURL = ApiBaseURL + ApiToken
type tokenManager struct {
clientID string
clientSecret string
mu sync.Mutex
accessToken string
expireTime time.Time
}
func newTokenManager(clientID, clientSecret string) *tokenManager {
return &tokenManager{
clientID: clientID,
clientSecret: clientSecret,
}
}
func (tm *tokenManager) getToken() (string, error) {
tm.mu.Lock()
defer tm.mu.Unlock()
if tm.accessToken != "" && time.Now().Before(tm.expireTime.Add(-5*time.Minute)) {
return tm.accessToken, nil
}
reqBody := map[string]string{
"clientID": tm.clientID,
"clientSecret": tm.clientSecret,
}
body, _ := json.Marshal(reqBody)
req, err := http.NewRequest("POST", tokenURL, bytes.NewBuffer(body))
if err != nil {
return "", err
}
req.Header.Set("Platform", "open_platform")
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
var result TokenResp
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return "", err
}
if result.Code != 0 {
return "", fmt.Errorf("get token failed: %s", result.Message)
}
tm.accessToken = result.Data.AccessToken
expireAt, err := time.Parse(time.RFC3339, result.Data.ExpiredAt)
if err != nil {
return "", fmt.Errorf("parse expire time failed: %w", err)
}
tm.expireTime = expireAt
return tm.accessToken, nil
}
func (tm *tokenManager) buildHeaders() (http.Header, error) {
token, err := tm.getToken()
if err != nil {
return nil, err
}
header := http.Header{}
header.Set("Authorization", "Bearer "+token)
header.Set("Platform", "open_platform")
header.Set("Content-Type", "application/json")
return header, nil
}

View File

@@ -1,70 +0,0 @@
package _123Open
import (
"fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"time"
)
type File struct {
FileName string `json:"filename"`
Size int64 `json:"size"`
CreateAt string `json:"createAt"`
UpdateAt string `json:"updateAt"`
FileId int64 `json:"fileId"`
Type int `json:"type"`
Etag string `json:"etag"`
S3KeyFlag string `json:"s3KeyFlag"`
ParentFileId int `json:"parentFileId"`
Category int `json:"category"`
Status int `json:"status"`
Trashed int `json:"trashed"`
}
func (f File) GetID() string {
return fmt.Sprint(f.FileId)
}
func (f File) GetName() string {
return f.FileName
}
func (f File) GetSize() int64 {
return f.Size
}
func (f File) IsDir() bool {
return f.Type == 1
}
func (f File) GetModified() string {
return f.UpdateAt
}
func (f File) GetThumb() string {
return ""
}
func (f File) ModTime() time.Time {
t, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt)
if err != nil {
return time.Time{}
}
return t
}
func (f File) CreateTime() time.Time {
t, err := time.Parse("2006-01-02 15:04:05", f.CreateAt)
if err != nil {
return time.Time{}
}
return t
}
func (f File) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.MD5, f.Etag)
}
func (f File) GetPath() string {
return ""
}

View File

@@ -1,282 +0,0 @@
package _123Open
import (
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"golang.org/x/sync/errgroup"
"io"
"mime/multipart"
"net/http"
"runtime"
"strconv"
"time"
)
func (d *Open123) create(parentFileID int64, filename, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
var resp UploadCreateResp
_, err := d.Request(ApiCreateUploadURL, http.MethodPost, func(req *resty.Request) {
body := base.Json{
"parentFileID": parentFileID,
"filename": filename,
"etag": etag,
"size": size,
}
if duplicate > 0 {
body["duplicate"] = duplicate
}
if containDir {
body["containDir"] = true
}
req.SetBody(body)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) GetUploadDomains() ([]string, error) {
var resp struct {
Code int `json:"code"`
Message string `json:"message"`
Data []string `json:"data"`
}
_, err := d.Request(ApiUploadDomainURL, http.MethodGet, nil, &resp)
if err != nil {
return nil, err
}
if resp.Code != 0 {
return nil, fmt.Errorf("get upload domain failed: %s", resp.Message)
}
return resp.Data, nil
}
func (d *Open123) UploadSingle(ctx context.Context, createResp *UploadCreateResp, file model.FileStreamer, parentID int64) error {
domain := createResp.Data.Servers[0]
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
_, _, err := stream.CacheFullInTempFileAndHash(file, utils.MD5)
if err != nil {
return err
}
}
reader, err := file.RangeRead(http_range.Range{Start: 0, Length: file.GetSize()})
if err != nil {
return err
}
reader = driver.NewLimitedUploadStream(ctx, reader)
var b bytes.Buffer
mw := multipart.NewWriter(&b)
mw.WriteField("parentFileID", fmt.Sprint(parentID))
mw.WriteField("filename", file.GetName())
mw.WriteField("etag", etag)
mw.WriteField("size", fmt.Sprint(file.GetSize()))
fw, _ := mw.CreateFormFile("file", file.GetName())
_, err = io.Copy(fw, reader)
mw.Close()
req, err := http.NewRequestWithContext(ctx, "POST", domain+ApiSingleUploadURL, &b)
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+d.tm.accessToken)
req.Header.Set("Platform", "open_platform")
req.Header.Set("Content-Type", mw.FormDataContentType())
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var result struct {
Code int `json:"code"`
Message string `json:"message"`
Data struct {
FileID int64 `json:"fileID"`
Completed bool `json:"completed"`
} `json:"data"`
}
body, _ := io.ReadAll(resp.Body)
if err := json.Unmarshal(body, &result); err != nil {
return fmt.Errorf("unmarshal response error: %v, body: %s", err, string(body))
}
if result.Code != 0 {
return fmt.Errorf("upload failed: %s", result.Message)
}
if !result.Data.Completed || result.Data.FileID == 0 {
return fmt.Errorf("upload incomplete or missing fileID")
}
return nil
}
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, parentID int64, createResp *UploadCreateResp, up driver.UpdateProgress) error {
if cacher, ok := file.(interface{ CacheFullInTempFile() (model.File, error) }); ok {
if _, err := cacher.CacheFullInTempFile(); err != nil {
return err
}
}
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
uploadNums := (size + chunkSize - 1) / chunkSize
uploadDomain := createResp.Data.Servers[0]
if d.UploadThread <= 0 {
cpuCores := runtime.NumCPU()
threads := cpuCores * 2
if threads < 4 {
threads = 4
}
if threads > 16 {
threads = 16
}
d.UploadThread = threads
fmt.Printf("[Upload] Auto set upload concurrency: %d (CPU cores=%d)\n", d.UploadThread, cpuCores)
}
fmt.Printf("[Upload] File size: %d bytes, chunk size: %d bytes, total slices: %d, concurrency: %d\n",
size, chunkSize, uploadNums, d.UploadThread)
if size <= 1<<30 {
return d.UploadSingle(ctx, createResp, file, parentID)
}
if createResp.Data.Reuse {
up(100)
return nil
}
client := resty.New()
semaphore := make(chan struct{}, d.UploadThread)
threadG, _ := errgroup.WithContext(ctx)
var progressArr = make([]int64, uploadNums)
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
partIndex := partIndex
semaphore <- struct{}{}
threadG.Go(func() error {
defer func() { <-semaphore }()
offset := partIndex * chunkSize
length := min(chunkSize, size-offset)
partNumber := partIndex + 1
fmt.Printf("[Slice %d] Starting read from offset %d, length %d\n", partNumber, offset, length)
reader, err := file.RangeRead(http_range.Range{Start: offset, Length: length})
if err != nil {
return fmt.Errorf("[Slice %d] RangeRead error: %v", partNumber, err)
}
buf := make([]byte, length)
n, err := io.ReadFull(reader, buf)
if err != nil && err != io.EOF {
return fmt.Errorf("[Slice %d] Read error: %v", partNumber, err)
}
buf = buf[:n]
hash := md5.Sum(buf)
sliceMD5Str := hex.EncodeToString(hash[:])
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
writer.WriteField("preuploadID", createResp.Data.PreuploadID)
writer.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
writer.WriteField("sliceMD5", sliceMD5Str)
partName := fmt.Sprintf("%s.part%d", file.GetName(), partNumber)
fw, _ := writer.CreateFormFile("slice", partName)
fw.Write(buf)
writer.Close()
resp, err := client.R().
SetHeader("Authorization", "Bearer "+d.tm.accessToken).
SetHeader("Platform", "open_platform").
SetHeader("Content-Type", writer.FormDataContentType()).
SetBody(body.Bytes()).
Post(uploadDomain + ApiUploadSliceURL)
if err != nil {
return fmt.Errorf("[Slice %d] Upload HTTP error: %v", partNumber, err)
}
if resp.StatusCode() != 200 {
return fmt.Errorf("[Slice %d] Upload failed with status: %s, resp: %s", partNumber, resp.Status(), resp.String())
}
progressArr[partIndex] = length
var totalUploaded int64 = 0
for _, v := range progressArr {
totalUploaded += v
}
if up != nil {
percent := float64(totalUploaded) / float64(size) * 100
up(percent)
}
fmt.Printf("[Slice %d] MD5: %s\n", partNumber, sliceMD5Str)
fmt.Printf("[Slice %d] Upload finished\n", partNumber)
return nil
})
}
if err := threadG.Wait(); err != nil {
return err
}
var completeResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data struct {
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
for {
reqBody := fmt.Sprintf(`{"preuploadID":"%s"}`, createResp.Data.PreuploadID)
req, err := http.NewRequestWithContext(ctx, "POST", uploadDomain+ApiUploadCompleteURL, bytes.NewBufferString(reqBody))
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+d.tm.accessToken)
req.Header.Set("Platform", "open_platform")
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
if err := json.Unmarshal(body, &completeResp); err != nil {
return fmt.Errorf("completion response unmarshal error: %v, body: %s", err, string(body))
}
if completeResp.Code != 0 {
return fmt.Errorf("completion API returned error code %d: %s", completeResp.Code, completeResp.Message)
}
if completeResp.Data.Completed && completeResp.Data.FileID != 0 {
fmt.Printf("[Upload] Upload completed successfully. FileID: %d\n", completeResp.Data.FileID)
break
}
time.Sleep(time.Second)
}
up(100)
return nil
}

View File

@@ -1,20 +0,0 @@
package _123Open
import (
"fmt"
"net/http"
)
func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) {
var result FileListResp
url := fmt.Sprintf("%s?parentFileId=%d&limit=%d&lastFileId=%d", ApiFileList, parentFileId, limit, lastFileId)
_, err := d.Request(url, http.MethodGet, nil, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("list error: %s", result.Message)
}
return &result, nil
}

View File

@@ -80,10 +80,9 @@ func (d *Cloud189) Link(ctx context.Context, file model.Obj, args model.LinkArgs
}
func (d *Cloud189) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
safeName := d.sanitizeName(dirName)
form := map[string]string{
"parentFolderId": parentDir.GetID(),
"folderName": safeName,
"folderName": dirName,
}
_, err := d.request("https://cloud.189.cn/api/open/file/createFolder.action", http.MethodPost, func(req *resty.Request) {
req.SetFormData(form)
@@ -127,10 +126,9 @@ func (d *Cloud189) Rename(ctx context.Context, srcObj model.Obj, newName string)
idKey = "folderId"
nameKey = "destFolderName"
}
safeName := d.sanitizeName(newName)
form := map[string]string{
idKey: srcObj.GetID(),
nameKey: safeName,
nameKey: newName,
}
_, err := d.request(url, http.MethodPost, func(req *resty.Request) {
req.SetFormData(form)

View File

@@ -9,7 +9,6 @@ type Addition struct {
Username string `json:"username" required:"true"`
Password string `json:"password" required:"true"`
Cookie string `json:"cookie" help:"Fill in the cookie if need captcha"`
StripEmoji bool `json:"strip_emoji" help:"Remove four-byte characters (e.g., emoji) before upload"`
driver.RootID
}

View File

@@ -11,11 +11,9 @@ import (
"io"
"math"
"net/http"
"path"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
@@ -224,37 +222,13 @@ func (d *Cloud189) getFiles(fileId string) ([]model.Obj, error) {
return res, nil
}
func (d *Cloud189) sanitizeName(name string) string {
if !d.StripEmoji {
return name
}
b := strings.Builder{}
for _, r := range name {
if utf8.RuneLen(r) == 4 {
continue
}
b.WriteRune(r)
}
sanitized := b.String()
if sanitized == "" {
ext := path.Ext(name)
if ext != "" {
sanitized = "file" + ext
} else {
sanitized = "file"
}
}
return sanitized
}
func (d *Cloud189) oldUpload(dstDir model.Obj, file model.FileStreamer) error {
safeName := d.sanitizeName(file.GetName())
res, err := d.client.R().SetMultipartFormData(map[string]string{
"parentId": dstDir.GetID(),
"sessionKey": "??",
"opertype": "1",
"fname": safeName,
}).SetMultipartField("Filedata", safeName, file.GetMimetype(), file).Post("https://hb02.upload.cloud.189.cn/v1/DCIWebUploadAction")
"fname": file.GetName(),
}).SetMultipartField("Filedata", file.GetName(), file.GetMimetype(), file).Post("https://hb02.upload.cloud.189.cn/v1/DCIWebUploadAction")
if err != nil {
return err
}
@@ -339,10 +313,9 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
const DEFAULT int64 = 10485760
var count = int64(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
safeName := d.sanitizeName(file.GetName())
res, err := d.uploadRequest("/person/initMultiUpload", map[string]string{
"parentFolderId": dstDir.GetID(),
"fileName": encode(safeName),
"fileName": encode(file.GetName()),
"fileSize": strconv.FormatInt(file.GetSize(), 10),
"sliceSize": strconv.FormatInt(DEFAULT, 10),
"lazyCheck": "1",

View File

@@ -205,11 +205,10 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
fullUrl += "/createFolder.action"
var newFolder Cloud189Folder
safeName := y.sanitizeName(dirName)
_, err := y.post(fullUrl, func(req *resty.Request) {
req.SetContext(ctx)
req.SetQueryParams(map[string]string{
"folderName": safeName,
"folderName": dirName,
"relativePath": "",
})
if isFamily {
@@ -226,7 +225,6 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
if err != nil {
return nil, err
}
newFolder.Name = safeName
return &newFolder, nil
}
@@ -260,29 +258,21 @@ func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName strin
}
var newObj model.Obj
safeName := y.sanitizeName(newName)
switch f := srcObj.(type) {
case *Cloud189File:
fullUrl += "/renameFile.action"
queryParam["fileId"] = srcObj.GetID()
queryParam["destFileName"] = safeName
queryParam["destFileName"] = newName
newObj = &Cloud189File{Icon: f.Icon} // 复用预览
case *Cloud189Folder:
fullUrl += "/renameFolder.action"
queryParam["folderId"] = srcObj.GetID()
queryParam["destFolderName"] = safeName
queryParam["destFolderName"] = newName
newObj = &Cloud189Folder{}
default:
return nil, errs.NotSupport
}
switch obj := newObj.(type) {
case *Cloud189File:
obj.Name = safeName
case *Cloud189Folder:
obj.Name = safeName
}
_, err := y.request(fullUrl, method, func(req *resty.Request) {
req.SetContext(ctx).SetQueryParams(queryParam)
}, nil, newObj, isFamily)

View File

@@ -9,7 +9,6 @@ type Addition struct {
Username string `json:"username" required:"true"`
Password string `json:"password" required:"true"`
VCode string `json:"validate_code"`
StripEmoji bool `json:"strip_emoji" help:"Remove four-byte characters (e.g., emoji) before upload"`
driver.RootID
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`

View File

@@ -12,13 +12,11 @@ import (
"net/http/cookiejar"
"net/url"
"os"
"path"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
"golang.org/x/sync/semaphore"
@@ -59,29 +57,6 @@ const (
CHANNEL_ID = "web_cloud.189.cn"
)
func (y *Cloud189PC) sanitizeName(name string) string {
if !y.StripEmoji {
return name
}
b := strings.Builder{}
for _, r := range name {
if utf8.RuneLen(r) == 4 {
continue
}
b.WriteRune(r)
}
sanitized := b.String()
if sanitized == "" {
ext := path.Ext(name)
if ext != "" {
sanitized = "file" + ext
} else {
sanitized = "file"
}
}
return sanitized
}
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
dateOfGmt := getHttpDateStr()
sessionKey := y.getTokenInfo().SessionKey
@@ -349,7 +324,7 @@ func (y *Cloud189PC) login() (err error) {
_, err = y.client.R().
SetResult(&tokenInfo).SetError(&erron).
SetQueryParams(clientSuffix()).
SetQueryParam("redirectURL", loginresp.ToUrl).
SetQueryParam("redirectURL", url.QueryEscape(loginresp.ToUrl)).
Post(API_URL + "/getSessionForPC.action")
if err != nil {
return
@@ -500,11 +475,10 @@ func (y *Cloud189PC) refreshSession() (err error) {
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
size := file.GetSize()
sliceSize := partSize(size)
safeName := y.sanitizeName(file.GetName())
params := Params{
"parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(safeName),
"fileName": url.QueryEscape(file.GetName()),
"fileSize": fmt.Sprint(file.GetSize()),
"sliceSize": fmt.Sprint(sliceSize),
"lazyCheck": "1",
@@ -622,8 +596,7 @@ func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
return nil, errors.New("invalid hash")
}
safeName := y.sanitizeName(stream.GetName())
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, safeName, fmt.Sprint(stream.GetSize()), isFamily)
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, stream.GetName(), fmt.Sprint(stream.GetSize()), isFamily)
if err != nil {
return nil, err
}
@@ -642,7 +615,6 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
tmpF *os.File
err error
)
safeName := y.sanitizeName(file.GetName())
size := file.GetSize()
if _, ok := cache.(io.ReaderAt); !ok && size > 0 {
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
@@ -725,7 +697,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
//step.2 预上传
params := Params{
"parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(safeName),
"fileName": url.QueryEscape(file.GetName()),
"fileSize": fmt.Sprint(file.GetSize()),
"fileMd5": fileMd5Hex,
"sliceSize": fmt.Sprint(sliceSize),
@@ -861,10 +833,9 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
return nil, err
}
rateLimited := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
safeName := y.sanitizeName(file.GetName())
// 创建上传会话
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, safeName, fmt.Sprint(file.GetSize()), isFamily)
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily)
if err != nil {
return nil, err
}

View File

@@ -56,7 +56,7 @@ func (d *AListV3) Init(ctx context.Context) error {
if err != nil {
return err
}
if utils.SliceContains(resp.Data.Role, model.GUEST) {
if resp.Data.Role == model.GUEST {
u := d.Address + "/api/public/settings"
res, err := base.RestyClient.R().Get(u)
if err != nil {

View File

@@ -1,7 +1,6 @@
package alist_v3
import (
"encoding/json"
"time"
"github.com/alist-org/alist/v3/internal/model"
@@ -77,7 +76,7 @@ type MeResp struct {
Username string `json:"username"`
Password string `json:"password"`
BasePath string `json:"base_path"`
Role IntSlice `json:"role"`
Role int `json:"role"`
Disabled bool `json:"disabled"`
Permission int `json:"permission"`
SsoId string `json:"sso_id"`
@@ -169,17 +168,3 @@ type DecompressReq struct {
PutIntoNewDir bool `json:"put_into_new_dir"`
SrcDir string `json:"src_dir"`
}
type IntSlice []int
func (s *IntSlice) UnmarshalJSON(data []byte) error {
if len(data) > 0 && data[0] == '[' {
return json.Unmarshal(data, (*[]int)(s))
}
var single int
if err := json.Unmarshal(data, &single); err != nil {
return err
}
*s = []int{single}
return nil
}

View File

@@ -55,7 +55,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
if err != nil {
return err
}
d.DriveId = d.Addition.DeviceID
d.DriveId = utils.Json.Get(res, "default_drive_id").ToString()
d.UserID = utils.Json.Get(res, "user_id").ToString()
d.cron = cron.NewCron(time.Hour * 2)
d.cron.Do(func() {

View File

@@ -8,7 +8,7 @@ import (
type Addition struct {
driver.RootID
RefreshToken string `json:"refresh_token" required:"true"`
DeviceID string `json:"device_id" required:"true"`
//DeviceID string `json:"device_id" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
RapidUpload bool `json:"rapid_upload"`

View File

@@ -11,7 +11,7 @@ type Addition struct {
RefreshToken string `json:"refresh_token" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
OauthTokenURL string `json:"oauth_token_url" default:"https://api.alistgo.com/alist/ali_open/token"`
OauthTokenURL string `json:"oauth_token_url" default:"https://api.nn.ci/alist/ali_open/token"`
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`

View File

@@ -6,7 +6,6 @@ import (
_ "github.com/alist-org/alist/v3/drivers/115_share"
_ "github.com/alist-org/alist/v3/drivers/123"
_ "github.com/alist-org/alist/v3/drivers/123_link"
_ "github.com/alist-org/alist/v3/drivers/123_open"
_ "github.com/alist-org/alist/v3/drivers/123_share"
_ "github.com/alist-org/alist/v3/drivers/139"
_ "github.com/alist-org/alist/v3/drivers/189"
@@ -21,20 +20,16 @@ import (
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
_ "github.com/alist-org/alist/v3/drivers/bitqiu"
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
_ "github.com/alist-org/alist/v3/drivers/cloudreve_v4"
_ "github.com/alist-org/alist/v3/drivers/crypt"
_ "github.com/alist-org/alist/v3/drivers/doubao"
_ "github.com/alist-org/alist/v3/drivers/doubao_share"
_ "github.com/alist-org/alist/v3/drivers/dropbox"
_ "github.com/alist-org/alist/v3/drivers/febbox"
_ "github.com/alist-org/alist/v3/drivers/ftp"
_ "github.com/alist-org/alist/v3/drivers/gitee"
_ "github.com/alist-org/alist/v3/drivers/github"
_ "github.com/alist-org/alist/v3/drivers/github_releases"
_ "github.com/alist-org/alist/v3/drivers/gofile"
_ "github.com/alist-org/alist/v3/drivers/google_drive"
_ "github.com/alist-org/alist/v3/drivers/google_photo"
_ "github.com/alist-org/alist/v3/drivers/halalcloud"
@@ -44,7 +39,6 @@ import (
_ "github.com/alist-org/alist/v3/drivers/lanzou"
_ "github.com/alist-org/alist/v3/drivers/lenovonas_share"
_ "github.com/alist-org/alist/v3/drivers/local"
_ "github.com/alist-org/alist/v3/drivers/mediafire"
_ "github.com/alist-org/alist/v3/drivers/mediatrack"
_ "github.com/alist-org/alist/v3/drivers/mega"
_ "github.com/alist-org/alist/v3/drivers/misskey"
@@ -53,10 +47,8 @@ import (
_ "github.com/alist-org/alist/v3/drivers/onedrive"
_ "github.com/alist-org/alist/v3/drivers/onedrive_app"
_ "github.com/alist-org/alist/v3/drivers/onedrive_sharelink"
_ "github.com/alist-org/alist/v3/drivers/pcloud"
_ "github.com/alist-org/alist/v3/drivers/pikpak"
_ "github.com/alist-org/alist/v3/drivers/pikpak_share"
_ "github.com/alist-org/alist/v3/drivers/proton_drive"
_ "github.com/alist-org/alist/v3/drivers/quark_uc"
_ "github.com/alist-org/alist/v3/drivers/quark_uc_tv"
_ "github.com/alist-org/alist/v3/drivers/quqi"

View File

@@ -11,8 +11,8 @@ type Addition struct {
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
DownloadAPI string `json:"download_api" type:"select" options:"official,crack,crack_video" default:"official"`
ClientID string `json:"client_id" required:"true" default:"hq9yQ9w9kR4YHj1kyYafLygVocobh7Sf"`
ClientSecret string `json:"client_secret" required:"true" default:"YH2VpZcFJHYNnV6vLfHQXDBhcE7ZChyE"`
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
AccessToken string
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`

View File

@@ -1,767 +0,0 @@
package bitqiu
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http/cookiejar"
"path"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
streamPkg "github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
)
const (
baseURL = "https://pan.bitqiu.com"
loginURL = baseURL + "/loginServer/login"
userInfoURL = baseURL + "/user/getInfo"
listURL = baseURL + "/apiToken/cfi/fs/resources/pages"
uploadInitializeURL = baseURL + "/apiToken/cfi/fs/upload/v2/initialize"
uploadCompleteURL = baseURL + "/apiToken/cfi/fs/upload/v2/complete"
downloadURL = baseURL + "/download/getUrl"
createDirURL = baseURL + "/resource/create"
moveResourceURL = baseURL + "/resource/remove"
renameResourceURL = baseURL + "/resource/rename"
copyResourceURL = baseURL + "/apiToken/cfi/fs/async/copy"
copyManagerURL = baseURL + "/apiToken/cfi/fs/async/manager"
deleteResourceURL = baseURL + "/resource/delete"
successCode = "10200"
uploadSuccessCode = "30010"
copySubmittedCode = "10300"
orgChannel = "default|default|default"
)
const (
copyPollInterval = time.Second
copyPollMaxAttempts = 60
chunkSize = int64(1 << 20)
)
const defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"
type BitQiu struct {
model.Storage
Addition
client *resty.Client
userID string
}
func (d *BitQiu) Config() driver.Config {
return config
}
func (d *BitQiu) GetAddition() driver.Additional {
return &d.Addition
}
func (d *BitQiu) Init(ctx context.Context) error {
if d.Addition.UserPlatform == "" {
d.Addition.UserPlatform = uuid.NewString()
op.MustSaveDriverStorage(d)
}
if d.client == nil {
jar, err := cookiejar.New(nil)
if err != nil {
return err
}
d.client = base.NewRestyClient()
d.client.SetBaseURL(baseURL)
d.client.SetCookieJar(jar)
}
d.client.SetHeader("user-agent", d.userAgent())
return d.login(ctx)
}
func (d *BitQiu) Drop(ctx context.Context) error {
d.client = nil
d.userID = ""
return nil
}
func (d *BitQiu) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
parentID := d.resolveParentID(dir)
dirPath := ""
if dir != nil {
dirPath = dir.GetPath()
}
pageSize := d.pageSize()
orderType := d.orderType()
desc := d.orderDesc()
var results []model.Obj
page := 1
for {
form := map[string]string{
"parentId": parentID,
"limit": strconv.Itoa(pageSize),
"orderType": orderType,
"desc": desc,
"model": "1",
"userId": d.userID,
"currentPage": strconv.Itoa(page),
"page": strconv.Itoa(page),
"org_channel": orgChannel,
}
var resp Response[ResourcePage]
if err := d.postForm(ctx, listURL, form, &resp); err != nil {
return nil, err
}
if resp.Code != successCode {
if resp.Code == "10401" || resp.Code == "10404" {
if err := d.login(ctx); err != nil {
return nil, err
}
continue
}
return nil, fmt.Errorf("list failed: %s", resp.Message)
}
objs, err := utils.SliceConvert(resp.Data.Data, func(item Resource) (model.Obj, error) {
return item.toObject(parentID, dirPath)
})
if err != nil {
return nil, err
}
results = append(results, objs...)
if !resp.Data.HasNext || len(resp.Data.Data) == 0 {
break
}
page++
}
return results, nil
}
func (d *BitQiu) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if file.IsDir() {
return nil, errs.NotFile
}
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
form := map[string]string{
"fileIds": file.GetID(),
"org_channel": orgChannel,
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[DownloadData]
if err := d.postForm(ctx, downloadURL, form, &resp); err != nil {
return nil, err
}
switch resp.Code {
case successCode:
if resp.Data.URL == "" {
return nil, fmt.Errorf("empty download url returned")
}
return &model.Link{URL: resp.Data.URL}, nil
case "10401", "10404":
if err := d.login(ctx); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("get link failed: %s", resp.Message)
}
}
return nil, fmt.Errorf("get link failed: retry limit reached")
}
func (d *BitQiu) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
parentID := d.resolveParentID(parentDir)
parentPath := ""
if parentDir != nil {
parentPath = parentDir.GetPath()
}
form := map[string]string{
"parentId": parentID,
"name": dirName,
"org_channel": orgChannel,
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[CreateDirData]
if err := d.postForm(ctx, createDirURL, form, &resp); err != nil {
return nil, err
}
switch resp.Code {
case successCode:
newParentID := parentID
if resp.Data.ParentID != "" {
newParentID = resp.Data.ParentID
}
name := resp.Data.Name
if name == "" {
name = dirName
}
resource := Resource{
ResourceID: resp.Data.DirID,
ResourceType: 1,
Name: name,
ParentID: newParentID,
}
obj, err := resource.toObject(newParentID, parentPath)
if err != nil {
return nil, err
}
if o, ok := obj.(*Object); ok {
o.ParentID = newParentID
}
return obj, nil
case "10401", "10404":
if err := d.login(ctx); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("create folder failed: %s", resp.Message)
}
}
return nil, fmt.Errorf("create folder failed: retry limit reached")
}
func (d *BitQiu) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
targetParentID := d.resolveParentID(dstDir)
form := map[string]string{
"dirIds": "",
"fileIds": "",
"parentId": targetParentID,
"org_channel": orgChannel,
}
if srcObj.IsDir() {
form["dirIds"] = srcObj.GetID()
} else {
form["fileIds"] = srcObj.GetID()
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[any]
if err := d.postForm(ctx, moveResourceURL, form, &resp); err != nil {
return nil, err
}
switch resp.Code {
case successCode:
dstPath := ""
if dstDir != nil {
dstPath = dstDir.GetPath()
}
if setter, ok := srcObj.(model.SetPath); ok {
setter.SetPath(path.Join(dstPath, srcObj.GetName()))
}
if o, ok := srcObj.(*Object); ok {
o.ParentID = targetParentID
}
return srcObj, nil
case "10401", "10404":
if err := d.login(ctx); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("move failed: %s", resp.Message)
}
}
return nil, fmt.Errorf("move failed: retry limit reached")
}
func (d *BitQiu) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
form := map[string]string{
"resourceId": srcObj.GetID(),
"name": newName,
"type": "0",
"org_channel": orgChannel,
}
if srcObj.IsDir() {
form["type"] = "1"
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[any]
if err := d.postForm(ctx, renameResourceURL, form, &resp); err != nil {
return nil, err
}
switch resp.Code {
case successCode:
return updateObjectName(srcObj, newName), nil
case "10401", "10404":
if err := d.login(ctx); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("rename failed: %s", resp.Message)
}
}
return nil, fmt.Errorf("rename failed: retry limit reached")
}
func (d *BitQiu) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
targetParentID := d.resolveParentID(dstDir)
form := map[string]string{
"dirIds": "",
"fileIds": "",
"parentId": targetParentID,
"org_channel": orgChannel,
}
if srcObj.IsDir() {
form["dirIds"] = srcObj.GetID()
} else {
form["fileIds"] = srcObj.GetID()
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[any]
if err := d.postForm(ctx, copyResourceURL, form, &resp); err != nil {
return nil, err
}
switch resp.Code {
case successCode, copySubmittedCode:
return d.waitForCopiedObject(ctx, srcObj, dstDir)
case "10401", "10404":
if err := d.login(ctx); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("copy failed: %s", resp.Message)
}
}
return nil, fmt.Errorf("copy failed: retry limit reached")
}
func (d *BitQiu) Remove(ctx context.Context, obj model.Obj) error {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return err
}
}
form := map[string]string{
"dirIds": "",
"fileIds": "",
"org_channel": orgChannel,
}
if obj.IsDir() {
form["dirIds"] = obj.GetID()
} else {
form["fileIds"] = obj.GetID()
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[any]
if err := d.postForm(ctx, deleteResourceURL, form, &resp); err != nil {
return err
}
switch resp.Code {
case successCode:
return nil
case "10401", "10404":
if err := d.login(ctx); err != nil {
return err
}
default:
return fmt.Errorf("remove failed: %s", resp.Message)
}
}
return fmt.Errorf("remove failed: retry limit reached")
}
func (d *BitQiu) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
if d.userID == "" {
if err := d.login(ctx); err != nil {
return nil, err
}
}
up(0)
tmpFile, md5sum, err := streamPkg.CacheFullInTempFileAndHash(file, utils.MD5)
if err != nil {
return nil, err
}
defer tmpFile.Close()
parentID := d.resolveParentID(dstDir)
parentPath := ""
if dstDir != nil {
parentPath = dstDir.GetPath()
}
form := map[string]string{
"parentId": parentID,
"name": file.GetName(),
"size": strconv.FormatInt(file.GetSize(), 10),
"hash": md5sum,
"sampleMd5": md5sum,
"org_channel": orgChannel,
}
var resp Response[json.RawMessage]
if err = d.postForm(ctx, uploadInitializeURL, form, &resp); err != nil {
return nil, err
}
if resp.Code != uploadSuccessCode {
switch resp.Code {
case successCode:
var initData UploadInitData
if err := json.Unmarshal(resp.Data, &initData); err != nil {
return nil, fmt.Errorf("parse upload init response failed: %w", err)
}
serverCode, err := d.uploadFileInChunks(ctx, tmpFile, file.GetSize(), md5sum, initData, up)
if err != nil {
return nil, err
}
obj, err := d.completeChunkUpload(ctx, initData, parentID, parentPath, file.GetName(), file.GetSize(), md5sum, serverCode)
if err != nil {
return nil, err
}
up(100)
return obj, nil
default:
return nil, fmt.Errorf("upload failed: %s", resp.Message)
}
}
var resource Resource
if err := json.Unmarshal(resp.Data, &resource); err != nil {
return nil, fmt.Errorf("parse upload response failed: %w", err)
}
obj, err := resource.toObject(parentID, parentPath)
if err != nil {
return nil, err
}
up(100)
return obj, nil
}
func (d *BitQiu) uploadFileInChunks(ctx context.Context, tmpFile model.File, size int64, md5sum string, initData UploadInitData, up driver.UpdateProgress) (string, error) {
if d.client == nil {
return "", fmt.Errorf("client not initialized")
}
if size <= 0 {
return "", fmt.Errorf("invalid file size")
}
buf := make([]byte, chunkSize)
offset := int64(0)
var finishedFlag string
for offset < size {
chunkLen := chunkSize
remaining := size - offset
if remaining < chunkLen {
chunkLen = remaining
}
reader := io.NewSectionReader(tmpFile, offset, chunkLen)
chunkBuf := buf[:chunkLen]
if _, err := io.ReadFull(reader, chunkBuf); err != nil {
return "", fmt.Errorf("read chunk failed: %w", err)
}
headers := map[string]string{
"accept": "*/*",
"content-type": "application/octet-stream",
"appid": initData.AppID,
"token": initData.Token,
"userid": strconv.FormatInt(initData.UserID, 10),
"serialnumber": initData.SerialNumber,
"hash": md5sum,
"len": strconv.FormatInt(chunkLen, 10),
"offset": strconv.FormatInt(offset, 10),
"user-agent": d.userAgent(),
}
var chunkResp ChunkUploadResponse
req := d.client.R().
SetContext(ctx).
SetHeaders(headers).
SetBody(chunkBuf).
SetResult(&chunkResp)
if _, err := req.Post(initData.UploadURL); err != nil {
return "", err
}
if chunkResp.ErrCode != 0 {
return "", fmt.Errorf("chunk upload failed with code %d", chunkResp.ErrCode)
}
finishedFlag = chunkResp.FinishedFlag
offset += chunkLen
up(float64(offset) * 100 / float64(size))
}
if finishedFlag == "" {
return "", fmt.Errorf("upload finished without server code")
}
return finishedFlag, nil
}
func (d *BitQiu) completeChunkUpload(ctx context.Context, initData UploadInitData, parentID, parentPath, name string, size int64, md5sum, serverCode string) (model.Obj, error) {
form := map[string]string{
"currentPage": "1",
"limit": "1",
"userId": strconv.FormatInt(initData.UserID, 10),
"status": "0",
"parentId": parentID,
"name": name,
"fileUid": initData.FileUID,
"fileSid": initData.FileSID,
"size": strconv.FormatInt(size, 10),
"serverCode": serverCode,
"snapTime": "",
"hash": md5sum,
"sampleMd5": md5sum,
"org_channel": orgChannel,
}
var resp Response[Resource]
if err := d.postForm(ctx, uploadCompleteURL, form, &resp); err != nil {
return nil, err
}
if resp.Code != successCode {
return nil, fmt.Errorf("complete upload failed: %s", resp.Message)
}
return resp.Data.toObject(parentID, parentPath)
}
func (d *BitQiu) login(ctx context.Context) error {
if d.client == nil {
return fmt.Errorf("client not initialized")
}
form := map[string]string{
"passport": d.Username,
"password": utils.GetMD5EncodeStr(d.Password),
"remember": "0",
"captcha": "",
"org_channel": orgChannel,
}
var resp Response[LoginData]
if err := d.postForm(ctx, loginURL, form, &resp); err != nil {
return err
}
if resp.Code != successCode {
return fmt.Errorf("login failed: %s", resp.Message)
}
d.userID = strconv.FormatInt(resp.Data.UserID, 10)
return d.ensureRootFolderID(ctx)
}
func (d *BitQiu) ensureRootFolderID(ctx context.Context) error {
rootID := d.Addition.GetRootId()
if rootID != "" && rootID != "0" {
return nil
}
form := map[string]string{
"org_channel": orgChannel,
}
var resp Response[UserInfoData]
if err := d.postForm(ctx, userInfoURL, form, &resp); err != nil {
return err
}
if resp.Code != successCode {
return fmt.Errorf("get user info failed: %s", resp.Message)
}
if resp.Data.RootDirID == "" {
return fmt.Errorf("get user info failed: empty root dir id")
}
if d.Addition.RootFolderID != resp.Data.RootDirID {
d.Addition.RootFolderID = resp.Data.RootDirID
op.MustSaveDriverStorage(d)
}
return nil
}
func (d *BitQiu) postForm(ctx context.Context, url string, form map[string]string, result interface{}) error {
if d.client == nil {
return fmt.Errorf("client not initialized")
}
req := d.client.R().
SetContext(ctx).
SetHeaders(d.commonHeaders()).
SetFormData(form)
if result != nil {
req = req.SetResult(result)
}
_, err := req.Post(url)
return err
}
func (d *BitQiu) waitForCopiedObject(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
expectedName := srcObj.GetName()
expectedIsDir := srcObj.IsDir()
var lastListErr error
for attempt := 0; attempt < copyPollMaxAttempts; attempt++ {
if attempt > 0 {
if err := waitWithContext(ctx, copyPollInterval); err != nil {
return nil, err
}
}
if err := d.checkCopyFailure(ctx); err != nil {
return nil, err
}
obj, err := d.findObjectInDir(ctx, dstDir, expectedName, expectedIsDir)
if err != nil {
lastListErr = err
continue
}
if obj != nil {
return obj, nil
}
}
if lastListErr != nil {
return nil, lastListErr
}
return nil, fmt.Errorf("copy task timed out waiting for completion")
}
func (d *BitQiu) checkCopyFailure(ctx context.Context) error {
form := map[string]string{
"org_channel": orgChannel,
}
for attempt := 0; attempt < 2; attempt++ {
var resp Response[AsyncManagerData]
if err := d.postForm(ctx, copyManagerURL, form, &resp); err != nil {
return err
}
switch resp.Code {
case successCode:
if len(resp.Data.FailTasks) > 0 {
return fmt.Errorf("copy failed: %s", resp.Data.FailTasks[0].ErrorMessage())
}
return nil
case "10401", "10404":
if err := d.login(ctx); err != nil {
return err
}
default:
return fmt.Errorf("query copy status failed: %s", resp.Message)
}
}
return fmt.Errorf("query copy status failed: retry limit reached")
}
func (d *BitQiu) findObjectInDir(ctx context.Context, dir model.Obj, name string, isDir bool) (model.Obj, error) {
objs, err := d.List(ctx, dir, model.ListArgs{})
if err != nil {
return nil, err
}
for _, obj := range objs {
if obj.GetName() == name && obj.IsDir() == isDir {
return obj, nil
}
}
return nil, nil
}
func waitWithContext(ctx context.Context, d time.Duration) error {
timer := time.NewTimer(d)
defer timer.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
return nil
}
}
func (d *BitQiu) commonHeaders() map[string]string {
headers := map[string]string{
"accept": "application/json, text/plain, */*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"pragma": "no-cache",
"user-platform": d.Addition.UserPlatform,
"x-kl-saas-ajax-request": "Ajax_Request",
"x-requested-with": "XMLHttpRequest",
"referer": baseURL + "/",
"origin": baseURL,
"user-agent": d.userAgent(),
}
return headers
}
func (d *BitQiu) userAgent() string {
if ua := strings.TrimSpace(d.Addition.UserAgent); ua != "" {
return ua
}
return defaultUserAgent
}
func (d *BitQiu) resolveParentID(dir model.Obj) string {
if dir != nil && dir.GetID() != "" {
return dir.GetID()
}
if root := d.Addition.GetRootId(); root != "" {
return root
}
return config.DefaultRoot
}
func (d *BitQiu) pageSize() int {
if size, err := strconv.Atoi(d.Addition.PageSize); err == nil && size > 0 {
return size
}
return 24
}
func (d *BitQiu) orderType() string {
if d.Addition.OrderType != "" {
return d.Addition.OrderType
}
return "updateTime"
}
func (d *BitQiu) orderDesc() string {
if d.Addition.OrderDesc {
return "1"
}
return "0"
}
var _ driver.Driver = (*BitQiu)(nil)
var _ driver.PutResult = (*BitQiu)(nil)

View File

@@ -1,28 +0,0 @@
package bitqiu
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
driver.RootID
Username string `json:"username" required:"true"`
Password string `json:"password" required:"true"`
UserPlatform string `json:"user_platform" help:"Optional device identifier; auto-generated if empty."`
OrderType string `json:"order_type" type:"select" options:"updateTime,createTime,name,size" default:"updateTime"`
OrderDesc bool `json:"order_desc"`
PageSize string `json:"page_size" default:"24" help:"Number of entries to request per page."`
UserAgent string `json:"user_agent" default:"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"`
}
var config = driver.Config{
Name: "BitQiu",
DefaultRoot: "0",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &BitQiu{}
})
}

View File

@@ -1,107 +0,0 @@
package bitqiu
import "encoding/json"
type Response[T any] struct {
Code string `json:"code"`
Message string `json:"message"`
Data T `json:"data"`
}
type LoginData struct {
UserID int64 `json:"userId"`
}
type ResourcePage struct {
CurrentPage int `json:"currentPage"`
PageSize int `json:"pageSize"`
TotalCount int `json:"totalCount"`
TotalPageCount int `json:"totalPageCount"`
Data []Resource `json:"data"`
HasNext bool `json:"hasNext"`
}
type Resource struct {
ResourceID string `json:"resourceId"`
ResourceUID string `json:"resourceUid"`
ResourceType int `json:"resourceType"`
ParentID string `json:"parentId"`
Name string `json:"name"`
ExtName string `json:"extName"`
Size *json.Number `json:"size"`
CreateTime *string `json:"createTime"`
UpdateTime *string `json:"updateTime"`
FileMD5 string `json:"fileMd5"`
}
type DownloadData struct {
URL string `json:"url"`
MD5 string `json:"md5"`
Size int64 `json:"size"`
}
type UserInfoData struct {
RootDirID string `json:"rootDirId"`
}
type CreateDirData struct {
DirID string `json:"dirId"`
Name string `json:"name"`
ParentID string `json:"parentId"`
}
type AsyncManagerData struct {
WaitTasks []AsyncTask `json:"waitTaskList"`
RunningTasks []AsyncTask `json:"runningTaskList"`
SuccessTasks []AsyncTask `json:"successTaskList"`
FailTasks []AsyncTask `json:"failTaskList"`
TaskList []AsyncTask `json:"taskList"`
}
type AsyncTask struct {
TaskID string `json:"taskId"`
Status int `json:"status"`
ErrorMsg string `json:"errorMsg"`
Message string `json:"message"`
Result *AsyncTaskInfo `json:"result"`
TargetName string `json:"targetName"`
TargetDirID string `json:"parentId"`
}
type AsyncTaskInfo struct {
Resource Resource `json:"resource"`
DirID string `json:"dirId"`
FileID string `json:"fileId"`
Name string `json:"name"`
ParentID string `json:"parentId"`
}
func (t AsyncTask) ErrorMessage() string {
if t.ErrorMsg != "" {
return t.ErrorMsg
}
if t.Message != "" {
return t.Message
}
return "unknown error"
}
type UploadInitData struct {
Name string `json:"name"`
Size int64 `json:"size"`
Token string `json:"token"`
FileUID string `json:"fileUid"`
FileSID string `json:"fileSid"`
ParentID string `json:"parentId"`
UserID int64 `json:"userId"`
SerialNumber string `json:"serialNumber"`
UploadURL string `json:"uploadUrl"`
AppID string `json:"appId"`
}
type ChunkUploadResponse struct {
ErrCode int `json:"errCode"`
Offset int64 `json:"offset"`
Finished int `json:"finished"`
FinishedFlag string `json:"finishedFlag"`
}

View File

@@ -1,102 +0,0 @@
package bitqiu
import (
"path"
"strings"
"time"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
)
type Object struct {
model.Object
ParentID string
}
func (r Resource) toObject(parentID, parentPath string) (model.Obj, error) {
id := r.ResourceID
if id == "" {
id = r.ResourceUID
}
obj := &Object{
Object: model.Object{
ID: id,
Name: r.Name,
IsFolder: r.ResourceType == 1,
},
ParentID: parentID,
}
if r.Size != nil {
if size, err := (*r.Size).Int64(); err == nil {
obj.Size = size
}
}
if ct := parseBitQiuTime(r.CreateTime); !ct.IsZero() {
obj.Ctime = ct
}
if mt := parseBitQiuTime(r.UpdateTime); !mt.IsZero() {
obj.Modified = mt
}
if r.FileMD5 != "" {
obj.HashInfo = utils.NewHashInfo(utils.MD5, strings.ToLower(r.FileMD5))
}
obj.SetPath(path.Join(parentPath, obj.Name))
return obj, nil
}
func parseBitQiuTime(value *string) time.Time {
if value == nil {
return time.Time{}
}
trimmed := strings.TrimSpace(*value)
if trimmed == "" {
return time.Time{}
}
if ts, err := time.ParseInLocation("2006-01-02 15:04:05", trimmed, time.Local); err == nil {
return ts
}
return time.Time{}
}
func updateObjectName(obj model.Obj, newName string) model.Obj {
newPath := path.Join(parentPathOf(obj.GetPath()), newName)
switch o := obj.(type) {
case *Object:
o.Name = newName
o.Object.Name = newName
o.SetPath(newPath)
return o
case *model.Object:
o.Name = newName
o.SetPath(newPath)
return o
}
if setter, ok := obj.(model.SetPath); ok {
setter.SetPath(newPath)
}
return &model.Object{
ID: obj.GetID(),
Path: newPath,
Name: newName,
Size: obj.GetSize(),
Modified: obj.ModTime(),
Ctime: obj.CreateTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}
}
func parentPathOf(p string) string {
if p == "" {
return ""
}
dir := path.Dir(p)
if dir == "." {
return ""
}
return dir
}

View File

@@ -18,7 +18,6 @@ import (
type Cloudreve struct {
model.Storage
Addition
ref *Cloudreve
}
func (d *Cloudreve) Config() driver.Config {
@@ -38,18 +37,8 @@ func (d *Cloudreve) Init(ctx context.Context) error {
return d.login()
}
func (d *Cloudreve) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*Cloudreve)
if ok {
d.ref = refStorage
return nil
}
return errs.NotSupport
}
func (d *Cloudreve) Drop(ctx context.Context) error {
d.Cookie = ""
d.ref = nil
return nil
}

View File

@@ -4,14 +4,12 @@ import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
@@ -21,6 +19,7 @@ import (
"github.com/alist-org/alist/v3/pkg/cookie"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
json "github.com/json-iterator/go"
jsoniter "github.com/json-iterator/go"
)
@@ -36,9 +35,6 @@ func (d *Cloudreve) getUA() string {
}
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
if d.ref != nil {
return d.ref.request(method, path, callback, out)
}
u := d.Address + "/api/v3" + path
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
@@ -83,11 +79,11 @@ func (d *Cloudreve) request(method string, path string, callback base.ReqCallbac
}
if out != nil && r.Data != nil {
var marshal []byte
marshal, err = jsoniter.Marshal(r.Data)
marshal, err = json.Marshal(r.Data)
if err != nil {
return err
}
err = jsoniter.Unmarshal(marshal, out)
err = json.Unmarshal(marshal, out)
if err != nil {
return err
}
@@ -191,9 +187,12 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
if utils.IsCanceled(ctx) {
return ctx.Err()
}
utils.Log.Debugf("[Cloudreve-Local] upload: %d", finish)
var byteSize = DEFAULT
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
if left < DEFAULT {
byteSize = left
}
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
@@ -206,26 +205,9 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
req.SetHeader("User-Agent", d.getUA())
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
req.AddRetryCondition(func(r *resty.Response, err error) bool {
if err != nil {
return true
}
if r.IsError() {
return true
}
var retryResp Resp
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
if jErr != nil {
return true
}
if retryResp.Code != 0 {
return true
}
return false
})
}, nil)
if err != nil {
return err
break
}
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
@@ -240,15 +222,16 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
utils.Log.Debugf("[Cloudreve-Remote] upload: %d", finish)
var byteSize = DEFAULT
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
if left < DEFAULT {
byteSize = left
}
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
@@ -265,43 +248,14 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
err = func() error {
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return errors.New(res.Status)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
var up Resp
err = json.Unmarshal(body, &up)
if err != nil {
return err
}
if up.Code != 0 {
return errors.New(up.Msg)
}
return nil
}()
if err == nil {
retryCount = 0
finish += byteSize
_ = res.Body.Close()
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
} else {
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
time.Sleep(backoff)
}
}
return nil
}
@@ -310,15 +264,16 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
uploadUrl := u.UploadURLs[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
utils.Log.Debugf("[Cloudreve-OneDrive] upload: %d", finish)
var byteSize = DEFAULT
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
if left < DEFAULT {
byteSize = left
}
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
@@ -340,31 +295,22 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
return err
}
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
data, _ := io.ReadAll(res.Body)
res.Body.Close()
_ = res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
finish += byteSize
}
_ = res.Body.Close()
up(float64(finish) * 100 / float64(stream.GetSize()))
}
}
// 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
err := d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
req.SetBody("{}")
}, nil)
if err != nil {
return err
}
return nil
}
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
@@ -372,15 +318,16 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
utils.Log.Debugf("[Cloudreve-S3] upload: %d", finish)
var byteSize = DEFAULT
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
if left < DEFAULT {
byteSize = left
}
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
@@ -399,27 +346,11 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
if err != nil {
return err
}
etag := res.Header.Get("ETag")
res.Body.Close()
switch {
case res.StatusCode != 200:
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-S3] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case etag == "":
return errors.New("faild to get ETag from header")
default:
retryCount = 0
etags = append(etags, etag)
finish += byteSize
_ = res.Body.Close()
etags = append(etags, res.Header.Get("ETag"))
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
}
}
// s3LikeFinishUpload
// https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252

View File

@@ -1,305 +0,0 @@
package cloudreve_v4
import (
"context"
"errors"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
)
type CloudreveV4 struct {
model.Storage
Addition
ref *CloudreveV4
}
func (d *CloudreveV4) Config() driver.Config {
if d.ref != nil {
return d.ref.Config()
}
if d.EnableVersionUpload {
config.NoOverwriteUpload = false
}
return config
}
func (d *CloudreveV4) GetAddition() driver.Additional {
return &d.Addition
}
func (d *CloudreveV4) Init(ctx context.Context) error {
// removing trailing slash
d.Address = strings.TrimSuffix(d.Address, "/")
op.MustSaveDriverStorage(d)
if d.ref != nil {
return nil
}
if d.AccessToken == "" && d.RefreshToken != "" {
return d.refreshToken()
}
if d.Username != "" {
return d.login()
}
return nil
}
func (d *CloudreveV4) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*CloudreveV4)
if ok {
d.ref = refStorage
return nil
}
return errs.NotSupport
}
func (d *CloudreveV4) Drop(ctx context.Context) error {
d.ref = nil
return nil
}
func (d *CloudreveV4) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
const pageSize int = 100
var f []File
var r FileResp
params := map[string]string{
"page_size": strconv.Itoa(pageSize),
"uri": dir.GetPath(),
"order_by": d.OrderBy,
"order_direction": d.OrderDirection,
"page": "0",
}
for {
err := d.request(http.MethodGet, "/file", func(req *resty.Request) {
req.SetQueryParams(params)
}, &r)
if err != nil {
return nil, err
}
f = append(f, r.Files...)
if r.Pagination.NextToken == "" || len(r.Files) < pageSize {
break
}
params["next_page_token"] = r.Pagination.NextToken
}
return utils.SliceConvert(f, func(src File) (model.Obj, error) {
if d.EnableFolderSize && src.Type == 1 {
var ds FolderSummaryResp
err := d.request(http.MethodGet, "/file/info", func(req *resty.Request) {
req.SetQueryParam("uri", src.Path)
req.SetQueryParam("folder_summary", "true")
}, &ds)
if err == nil && ds.FolderSummary.Size > 0 {
src.Size = ds.FolderSummary.Size
}
}
var thumb model.Thumbnail
if d.EnableThumb && src.Type == 0 {
var t FileThumbResp
err := d.request(http.MethodGet, "/file/thumb", func(req *resty.Request) {
req.SetQueryParam("uri", src.Path)
}, &t)
if err == nil && t.URL != "" {
thumb = model.Thumbnail{
Thumbnail: t.URL,
}
}
}
return &model.ObjThumb{
Object: model.Object{
ID: src.ID,
Path: src.Path,
Name: src.Name,
Size: src.Size,
Modified: src.UpdatedAt,
Ctime: src.CreatedAt,
IsFolder: src.Type == 1,
},
Thumbnail: thumb,
}, nil
})
}
func (d *CloudreveV4) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
var url FileUrlResp
err := d.request(http.MethodPost, "/file/url", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{file.GetPath()},
"download": true,
})
}, &url)
if err != nil {
return nil, err
}
if len(url.Urls) == 0 {
return nil, errors.New("server returns no url")
}
exp := time.Until(url.Expires)
return &model.Link{
URL: url.Urls[0].URL,
Expiration: &exp,
}, nil
}
func (d *CloudreveV4) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
req.SetBody(base.Json{
"type": "folder",
"uri": parentDir.GetPath() + "/" + dirName,
"error_on_conflict": true,
})
}, nil)
}
func (d *CloudreveV4) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
return d.request(http.MethodPost, "/file/move", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{srcObj.GetPath()},
"dst": dstDir.GetPath(),
"copy": false,
})
}, nil)
}
func (d *CloudreveV4) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
req.SetBody(base.Json{
"new_name": newName,
"uri": srcObj.GetPath(),
})
}, nil)
}
func (d *CloudreveV4) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
return d.request(http.MethodPost, "/file/move", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{srcObj.GetPath()},
"dst": dstDir.GetPath(),
"copy": true,
})
}, nil)
}
func (d *CloudreveV4) Remove(ctx context.Context, obj model.Obj) error {
return d.request(http.MethodDelete, "/file", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{obj.GetPath()},
"unlink": false,
"skip_soft_delete": true,
})
}, nil)
}
func (d *CloudreveV4) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
if file.GetSize() == 0 {
// 空文件使用新建文件方法,避免上传卡锁
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
req.SetBody(base.Json{
"type": "file",
"uri": dstDir.GetPath() + "/" + file.GetName(),
"error_on_conflict": true,
})
}, nil)
}
var p StoragePolicy
var r FileResp
var u FileUploadResp
var err error
params := map[string]string{
"page_size": "10",
"uri": dstDir.GetPath(),
"order_by": "created_at",
"order_direction": "asc",
"page": "0",
}
err = d.request(http.MethodGet, "/file", func(req *resty.Request) {
req.SetQueryParams(params)
}, &r)
if err != nil {
return err
}
p = r.StoragePolicy
body := base.Json{
"uri": dstDir.GetPath() + "/" + file.GetName(),
"size": file.GetSize(),
"policy_id": p.ID,
"last_modified": file.ModTime().UnixMilli(),
"mime_type": "",
}
if d.EnableVersionUpload {
body["entity_type"] = "version"
}
err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) {
req.SetBody(body)
}, &u)
if err != nil {
return err
}
if u.StoragePolicy.Relay {
err = d.upLocal(ctx, file, u, up)
} else {
switch u.StoragePolicy.Type {
case "local":
err = d.upLocal(ctx, file, u, up)
case "remote":
err = d.upRemote(ctx, file, u, up)
case "onedrive":
err = d.upOneDrive(ctx, file, u, up)
case "s3":
err = d.upS3(ctx, file, u, up)
default:
return errs.NotImplement
}
}
if err != nil {
// 删除失败的会话
_ = d.request(http.MethodDelete, "/file/upload", func(req *resty.Request) {
req.SetBody(base.Json{
"id": u.SessionID,
"uri": u.URI,
})
}, nil)
return err
}
return nil
}
func (d *CloudreveV4) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *CloudreveV4) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *CloudreveV4) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// return errs.NotImplement to use an internal archive tool
return nil, errs.NotImplement
}
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*CloudreveV4)(nil)

View File

@@ -1,44 +0,0 @@
package cloudreve_v4
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
// Usually one of two
driver.RootPath
// driver.RootID
// define other
Address string `json:"address" required:"true"`
Username string `json:"username"`
Password string `json:"password"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
CustomUA string `json:"custom_ua"`
EnableFolderSize bool `json:"enable_folder_size"`
EnableThumb bool `json:"enable_thumb"`
EnableVersionUpload bool `json:"enable_version_upload"`
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at" default:"name" required:"true"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc" required:"true"`
}
var config = driver.Config{
Name: "Cloudreve V4",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "cloudreve://my",
CheckStatus: true,
Alert: "",
NoOverwriteUpload: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &CloudreveV4{}
})
}

View File

@@ -1,164 +0,0 @@
package cloudreve_v4
import (
"time"
"github.com/alist-org/alist/v3/internal/model"
)
type Object struct {
model.Object
StoragePolicy StoragePolicy
}
type Resp struct {
Code int `json:"code"`
Msg string `json:"msg"`
Data any `json:"data"`
}
type BasicConfigResp struct {
InstanceID string `json:"instance_id"`
// Title string `json:"title"`
// Themes string `json:"themes"`
// DefaultTheme string `json:"default_theme"`
User struct {
ID string `json:"id"`
// Nickname string `json:"nickname"`
// CreatedAt time.Time `json:"created_at"`
// Anonymous bool `json:"anonymous"`
Group struct {
ID string `json:"id"`
Name string `json:"name"`
Permission string `json:"permission"`
} `json:"group"`
} `json:"user"`
// Logo string `json:"logo"`
// LogoLight string `json:"logo_light"`
// CaptchaReCaptchaKey string `json:"captcha_ReCaptchaKey"`
CaptchaType string `json:"captcha_type"` // support 'normal' only
// AppPromotion bool `json:"app_promotion"`
}
type SiteLoginConfigResp struct {
LoginCaptcha bool `json:"login_captcha"`
Authn bool `json:"authn"`
}
type PrepareLoginResp struct {
WebauthnEnabled bool `json:"webauthn_enabled"`
PasswordEnabled bool `json:"password_enabled"`
}
type CaptchaResp struct {
Image string `json:"image"`
Ticket string `json:"ticket"`
}
type Token struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
AccessExpires time.Time `json:"access_expires"`
RefreshExpires time.Time `json:"refresh_expires"`
}
type TokenResponse struct {
User struct {
ID string `json:"id"`
// Email string `json:"email"`
// Nickname string `json:"nickname"`
Status string `json:"status"`
// CreatedAt time.Time `json:"created_at"`
Group struct {
ID string `json:"id"`
Name string `json:"name"`
Permission string `json:"permission"`
// DirectLinkBatchSize int `json:"direct_link_batch_size"`
// TrashRetention int `json:"trash_retention"`
} `json:"group"`
// Language string `json:"language"`
} `json:"user"`
Token Token `json:"token"`
}
type File struct {
Type int `json:"type"` // 0: file, 1: folder
ID string `json:"id"`
Name string `json:"name"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
Size int64 `json:"size"`
Metadata interface{} `json:"metadata"`
Path string `json:"path"`
Capability string `json:"capability"`
Owned bool `json:"owned"`
PrimaryEntity string `json:"primary_entity"`
}
type StoragePolicy struct {
ID string `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
MaxSize int64 `json:"max_size"`
Relay bool `json:"relay,omitempty"`
}
type Pagination struct {
Page int `json:"page"`
PageSize int `json:"page_size"`
IsCursor bool `json:"is_cursor"`
NextToken string `json:"next_token,omitempty"`
}
type Props struct {
Capability string `json:"capability"`
MaxPageSize int `json:"max_page_size"`
OrderByOptions []string `json:"order_by_options"`
OrderDirectionOptions []string `json:"order_direction_options"`
}
type FileResp struct {
Files []File `json:"files"`
Parent File `json:"parent"`
Pagination Pagination `json:"pagination"`
Props Props `json:"props"`
ContextHint string `json:"context_hint"`
MixedType bool `json:"mixed_type"`
StoragePolicy StoragePolicy `json:"storage_policy"`
}
type FileUrlResp struct {
Urls []struct {
URL string `json:"url"`
} `json:"urls"`
Expires time.Time `json:"expires"`
}
type FileUploadResp struct {
// UploadID string `json:"upload_id"`
SessionID string `json:"session_id"`
ChunkSize int64 `json:"chunk_size"`
Expires int64 `json:"expires"`
StoragePolicy StoragePolicy `json:"storage_policy"`
URI string `json:"uri"`
CompleteURL string `json:"completeURL,omitempty"` // for S3-like
CallbackSecret string `json:"callback_secret,omitempty"` // for S3-like, OneDrive
UploadUrls []string `json:"upload_urls,omitempty"` // for not-local
Credential string `json:"credential,omitempty"` // for local
}
type FileThumbResp struct {
URL string `json:"url"`
Expires time.Time `json:"expires"`
}
type FolderSummaryResp struct {
File
FolderSummary struct {
Size int64 `json:"size"`
Files int64 `json:"files"`
Folders int64 `json:"folders"`
Completed bool `json:"completed"`
CalculatedAt time.Time `json:"calculated_at"`
} `json:"folder_summary"`
}

View File

@@ -1,476 +0,0 @@
package cloudreve_v4
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
)
// do others that not defined in Driver interface
func (d *CloudreveV4) getUA() string {
if d.CustomUA != "" {
return d.CustomUA
}
return base.UserAgent
}
func (d *CloudreveV4) request(method string, path string, callback base.ReqCallback, out any) error {
if d.ref != nil {
return d.ref.request(method, path, callback, out)
}
u := d.Address + "/api/v4" + path
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
"Accept": "application/json, text/plain, */*",
"User-Agent": d.getUA(),
})
if d.AccessToken != "" {
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
}
var r Resp
req.SetResult(&r)
if callback != nil {
callback(req)
}
resp, err := req.Execute(method, u)
if err != nil {
return err
}
if !resp.IsSuccess() {
return errors.New(resp.String())
}
if r.Code != 0 {
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" {
// try to refresh token
err = d.refreshToken()
if err != nil {
return err
}
return d.request(method, path, callback, out)
}
return errors.New(r.Msg)
}
if out != nil && r.Data != nil {
var marshal []byte
marshal, err = json.Marshal(r.Data)
if err != nil {
return err
}
err = json.Unmarshal(marshal, out)
if err != nil {
return err
}
}
return nil
}
func (d *CloudreveV4) login() error {
var siteConfig SiteLoginConfigResp
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig)
if err != nil {
return err
}
if !siteConfig.Authn {
return errors.New("authn not support")
}
var prepareLogin PrepareLoginResp
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
if err != nil {
return err
}
if !prepareLogin.PasswordEnabled {
return errors.New("password not enabled")
}
if prepareLogin.WebauthnEnabled {
return errors.New("webauthn not support")
}
for range 5 {
err = d.doLogin(siteConfig.LoginCaptcha)
if err == nil {
break
}
if err.Error() != "CAPTCHA not match." {
break
}
}
return err
}
func (d *CloudreveV4) doLogin(needCaptcha bool) error {
var err error
loginBody := base.Json{
"email": d.Username,
"password": d.Password,
}
if needCaptcha {
var config BasicConfigResp
err = d.request(http.MethodGet, "/site/config/basic", nil, &config)
if err != nil {
return err
}
if config.CaptchaType != "normal" {
return fmt.Errorf("captcha type %s not support", config.CaptchaType)
}
var captcha CaptchaResp
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha)
if err != nil {
return err
}
if !strings.HasPrefix(captcha.Image, "data:image/png;base64,") {
return errors.New("can not get captcha")
}
loginBody["ticket"] = captcha.Ticket
i := strings.Index(captcha.Image, ",")
dec := base64.NewDecoder(base64.StdEncoding, strings.NewReader(captcha.Image[i+1:]))
vRes, err := base.RestyClient.R().SetMultipartField(
"image", "validateCode.png", "image/png", dec).
Post(setting.GetStr(conf.OcrApi))
if err != nil {
return err
}
if jsoniter.Get(vRes.Body(), "status").ToInt() != 200 {
return errors.New("ocr error:" + jsoniter.Get(vRes.Body(), "msg").ToString())
}
captchaCode := jsoniter.Get(vRes.Body(), "result").ToString()
if captchaCode == "" {
return errors.New("ocr error: empty result")
}
loginBody["captcha"] = captchaCode
}
var token TokenResponse
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) {
req.SetBody(loginBody)
}, &token)
if err != nil {
return err
}
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
op.MustSaveDriverStorage(d)
return nil
}
func (d *CloudreveV4) refreshToken() error {
var token Token
if token.RefreshToken == "" {
if d.Username != "" {
err := d.login()
if err != nil {
return fmt.Errorf("cannot login to get refresh token, error: %s", err)
}
}
return nil
}
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
req.SetBody(base.Json{
"refresh_token": d.RefreshToken,
})
}, &token)
if err != nil {
return err
}
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
op.MustSaveDriverStorage(d)
return nil
}
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
if DEFAULT == 0 {
// support relay
DEFAULT = file.GetSize()
}
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
req.SetHeader("Content-Type", "application/octet-stream")
req.SetContentLength(true)
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
req.AddRetryCondition(func(r *resty.Response, err error) bool {
if err != nil {
return true
}
if r.IsError() {
return true
}
var retryResp Resp
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
if jErr != nil {
return true
}
if retryResp.Code != 0 {
return true
}
return false
})
}, nil)
if err != nil {
return err
}
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
}
return nil
}
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
uploadUrl := u.UploadUrls[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return errors.New(res.Status)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
var up Resp
err = json.Unmarshal(body, &up)
if err != nil {
return err
}
if up.Code != 0 {
return errors.New(up.Msg)
}
return nil
}()
if err == nil {
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
} else {
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
time.Sleep(backoff)
}
}
return nil
}
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
uploadUrl := u.UploadUrls[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize()))
req.Header.Set("User-Agent", d.getUA())
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[CloudreveV4-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
}
}
// 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/onedrive/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
req.SetBody("{}")
}, nil)
}
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, u.UploadUrls[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
etag := res.Header.Get("ETag")
res.Body.Close()
switch {
case res.StatusCode != 200:
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("server error %d, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case etag == "":
return errors.New("faild to get ETag from header")
default:
retryCount = 0
etags = append(etags, etag)
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
}
}
// s3LikeFinishUpload
bodyBuilder := &strings.Builder{}
bodyBuilder.WriteString("<CompleteMultipartUpload>")
for i, etag := range etags {
bodyBuilder.WriteString(fmt.Sprintf(
`<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>`,
i+1, // PartNumber 从 1 开始
etag,
))
}
bodyBuilder.WriteString("</CompleteMultipartUpload>")
req, err := http.NewRequest(
"POST",
u.CompleteURL,
strings.NewReader(bodyBuilder.String()),
)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/xml")
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
body, _ := io.ReadAll(res.Body)
return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body))
}
// 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/s3/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
req.SetBody("{}")
}, nil)
}

View File

@@ -1,224 +0,0 @@
package gitee
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
stdpath "path"
"strings"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
)
type Gitee struct {
model.Storage
Addition
client *resty.Client
}
func (d *Gitee) Config() driver.Config {
return config
}
func (d *Gitee) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Gitee) Init(ctx context.Context) error {
d.RootFolderPath = utils.FixAndCleanPath(d.RootFolderPath)
d.Endpoint = strings.TrimSpace(d.Endpoint)
if d.Endpoint == "" {
d.Endpoint = "https://gitee.com/api/v5"
}
d.Endpoint = strings.TrimSuffix(d.Endpoint, "/")
d.Owner = strings.TrimSpace(d.Owner)
d.Repo = strings.TrimSpace(d.Repo)
d.Token = strings.TrimSpace(d.Token)
d.DownloadProxy = strings.TrimSpace(d.DownloadProxy)
if d.Owner == "" || d.Repo == "" {
return errors.New("owner and repo are required")
}
d.client = base.NewRestyClient().
SetBaseURL(d.Endpoint).
SetHeader("Accept", "application/json")
repo, err := d.getRepo()
if err != nil {
return err
}
d.Ref = strings.TrimSpace(d.Ref)
if d.Ref == "" {
d.Ref = repo.DefaultBranch
}
return nil
}
func (d *Gitee) Drop(ctx context.Context) error {
return nil
}
func (d *Gitee) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
relPath := d.relativePath(dir.GetPath())
contents, err := d.listContents(relPath)
if err != nil {
return nil, err
}
objs := make([]model.Obj, 0, len(contents))
for i := range contents {
objs = append(objs, contents[i].toModelObj())
}
return objs, nil
}
func (d *Gitee) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
var downloadURL string
if obj, ok := file.(*Object); ok {
downloadURL = obj.DownloadURL
if downloadURL == "" {
relPath := d.relativePath(file.GetPath())
content, err := d.getContent(relPath)
if err != nil {
return nil, err
}
if content.DownloadURL == "" {
return nil, errors.New("empty download url")
}
obj.DownloadURL = content.DownloadURL
downloadURL = content.DownloadURL
}
} else {
relPath := d.relativePath(file.GetPath())
content, err := d.getContent(relPath)
if err != nil {
return nil, err
}
if content.DownloadURL == "" {
return nil, errors.New("empty download url")
}
downloadURL = content.DownloadURL
}
url := d.applyProxy(downloadURL)
return &model.Link{
URL: url,
Header: http.Header{
"Cookie": {d.Cookie},
},
}, nil
}
func (d *Gitee) newRequest() *resty.Request {
req := d.client.R()
if d.Token != "" {
req.SetQueryParam("access_token", d.Token)
}
if d.Ref != "" {
req.SetQueryParam("ref", d.Ref)
}
return req
}
func (d *Gitee) apiPath(path string) string {
escapedOwner := url.PathEscape(d.Owner)
escapedRepo := url.PathEscape(d.Repo)
if path == "" {
return fmt.Sprintf("/repos/%s/%s/contents", escapedOwner, escapedRepo)
}
return fmt.Sprintf("/repos/%s/%s/contents/%s", escapedOwner, escapedRepo, encodePath(path))
}
func (d *Gitee) listContents(path string) ([]Content, error) {
res, err := d.newRequest().Get(d.apiPath(path))
if err != nil {
return nil, err
}
if res.IsError() {
return nil, toErr(res)
}
var contents []Content
if err := utils.Json.Unmarshal(res.Body(), &contents); err != nil {
var single Content
if err2 := utils.Json.Unmarshal(res.Body(), &single); err2 == nil && single.Type != "" {
if single.Type != "dir" {
return nil, errs.NotFolder
}
return []Content{}, nil
}
return nil, err
}
for i := range contents {
contents[i].Path = joinPath(path, contents[i].Name)
}
return contents, nil
}
func (d *Gitee) getContent(path string) (*Content, error) {
res, err := d.newRequest().Get(d.apiPath(path))
if err != nil {
return nil, err
}
if res.IsError() {
return nil, toErr(res)
}
var content Content
if err := utils.Json.Unmarshal(res.Body(), &content); err != nil {
return nil, err
}
if content.Type == "" {
return nil, errors.New("invalid response")
}
if content.Path == "" {
content.Path = path
}
return &content, nil
}
func (d *Gitee) relativePath(full string) string {
full = utils.FixAndCleanPath(full)
root := utils.FixAndCleanPath(d.RootFolderPath)
if root == "/" {
return strings.TrimPrefix(full, "/")
}
if utils.PathEqual(full, root) {
return ""
}
prefix := utils.PathAddSeparatorSuffix(root)
if strings.HasPrefix(full, prefix) {
return strings.TrimPrefix(full, prefix)
}
return strings.TrimPrefix(full, "/")
}
func (d *Gitee) applyProxy(raw string) string {
if raw == "" || d.DownloadProxy == "" {
return raw
}
proxy := d.DownloadProxy
if !strings.HasSuffix(proxy, "/") {
proxy += "/"
}
return proxy + strings.TrimLeft(raw, "/")
}
func encodePath(p string) string {
if p == "" {
return ""
}
parts := strings.Split(p, "/")
for i, part := range parts {
parts[i] = url.PathEscape(part)
}
return strings.Join(parts, "/")
}
func joinPath(base, name string) string {
if base == "" {
return name
}
return strings.TrimPrefix(stdpath.Join(base, name), "./")
}

View File

@@ -1,29 +0,0 @@
package gitee
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
driver.RootPath
Endpoint string `json:"endpoint" type:"string" help:"Gitee API endpoint, default https://gitee.com/api/v5"`
Token string `json:"token" type:"string"`
Owner string `json:"owner" type:"string" required:"true"`
Repo string `json:"repo" type:"string" required:"true"`
Ref string `json:"ref" type:"string" help:"Branch, tag or commit SHA, defaults to repository default branch"`
DownloadProxy string `json:"download_proxy" type:"string" help:"Prefix added before download URLs, e.g. https://mirror.example.com/"`
Cookie string `json:"cookie" type:"string" help:"Cookie returned from user info request"`
}
var config = driver.Config{
Name: "Gitee",
LocalSort: true,
DefaultRoot: "/",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Gitee{}
})
}

View File

@@ -1,60 +0,0 @@
package gitee
import (
"time"
"github.com/alist-org/alist/v3/internal/model"
)
type Links struct {
Self string `json:"self"`
Html string `json:"html"`
}
type Content struct {
Type string `json:"type"`
Size *int64 `json:"size"`
Name string `json:"name"`
Path string `json:"path"`
Sha string `json:"sha"`
URL string `json:"url"`
HtmlURL string `json:"html_url"`
DownloadURL string `json:"download_url"`
Links Links `json:"_links"`
}
func (c Content) toModelObj() model.Obj {
size := int64(0)
if c.Size != nil {
size = *c.Size
}
return &Object{
Object: model.Object{
ID: c.Path,
Name: c.Name,
Size: size,
Modified: time.Unix(0, 0),
IsFolder: c.Type == "dir",
},
DownloadURL: c.DownloadURL,
HtmlURL: c.HtmlURL,
}
}
type Object struct {
model.Object
DownloadURL string
HtmlURL string
}
func (o *Object) URL() string {
return o.DownloadURL
}
type Repo struct {
DefaultBranch string `json:"default_branch"`
}
type ErrResp struct {
Message string `json:"message"`
}

View File

@@ -1,44 +0,0 @@
package gitee
import (
"fmt"
"net/url"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
)
func (d *Gitee) getRepo() (*Repo, error) {
req := d.client.R()
if d.Token != "" {
req.SetQueryParam("access_token", d.Token)
}
if d.Cookie != "" {
req.SetHeader("Cookie", d.Cookie)
}
escapedOwner := url.PathEscape(d.Owner)
escapedRepo := url.PathEscape(d.Repo)
res, err := req.Get(fmt.Sprintf("/repos/%s/%s", escapedOwner, escapedRepo))
if err != nil {
return nil, err
}
if res.IsError() {
return nil, toErr(res)
}
var repo Repo
if err := utils.Json.Unmarshal(res.Body(), &repo); err != nil {
return nil, err
}
if repo.DefaultBranch == "" {
return nil, fmt.Errorf("failed to fetch default branch")
}
return &repo, nil
}
func toErr(res *resty.Response) error {
var errMsg ErrResp
if err := utils.Json.Unmarshal(res.Body(), &errMsg); err == nil && errMsg.Message != "" {
return fmt.Errorf("%s: %s", res.Status(), errMsg.Message)
}
return fmt.Errorf(res.Status())
}

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"net/http"
"strings"
"sync"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
@@ -37,35 +36,37 @@ func (d *GithubReleases) Drop(ctx context.Context) error {
return nil
}
// processPoint 处理单个挂载点的文件列表
func (d *GithubReleases) processPoint(point *MountPoint, path string, args model.ListArgs) []File {
var pointFiles []File
func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
files := make([]File, 0)
path := fmt.Sprintf("/%s", strings.Trim(dir.GetPath(), "/"))
for i := range d.points {
point := &d.points[i]
if !d.Addition.ShowAllVersion { // latest
point.RequestLatestRelease(d.GetRequest, args.Refresh)
pointFiles = d.processLatestVersion(point, path)
} else { // all version
point.RequestReleases(d.GetRequest, args.Refresh)
pointFiles = d.processAllVersions(point, path)
}
return pointFiles
}
// processLatestVersion 处理最新版本的逻辑
func (d *GithubReleases) processLatestVersion(point *MountPoint, path string) []File {
var pointFiles []File
point.RequestRelease(d.GetRequest, args.Refresh)
if point.Point == path { // 与仓库路径相同
pointFiles = append(pointFiles, point.GetLatestRelease()...)
files = append(files, point.GetLatestRelease()...)
if d.Addition.ShowReadme {
files := point.GetOtherFile(d.GetRequest, false)
pointFiles = append(pointFiles, files...)
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
}
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
nextDir := GetNextDir(point.Point, path)
if nextDir != "" {
dirFile := File{
if nextDir == "" {
continue
}
hasSameDir := false
for index := range files {
if files[index].GetName() == nextDir {
hasSameDir = true
files[index].Size += point.GetLatestSize()
break
}
}
if !hasSameDir {
files = append(files, File{
Path: path + "/" + nextDir,
FileName: nextDir,
Size: point.GetLatestSize(),
@@ -73,28 +74,33 @@ func (d *GithubReleases) processLatestVersion(point *MountPoint, path string) []
CreateAt: point.Release.CreatedAt,
Type: "dir",
Url: "",
}
pointFiles = append(pointFiles, dirFile)
})
}
}
return pointFiles
}
// processAllVersions 处理所有版本的逻辑
func (d *GithubReleases) processAllVersions(point *MountPoint, path string) []File {
var pointFiles []File
} else { // all version
point.RequestReleases(d.GetRequest, args.Refresh)
if point.Point == path { // 与仓库路径相同
pointFiles = append(pointFiles, point.GetAllVersion()...)
files = append(files, point.GetAllVersion()...)
if d.Addition.ShowReadme {
files := point.GetOtherFile(d.GetRequest, false)
pointFiles = append(pointFiles, files...)
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
}
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
nextDir := GetNextDir(point.Point, path)
if nextDir != "" {
dirFile := File{
if nextDir == "" {
continue
}
hasSameDir := false
for index := range files {
if files[index].GetName() == nextDir {
hasSameDir = true
files[index].Size += point.GetAllVersionSize()
break
}
}
if !hasSameDir {
files = append(files, File{
FileName: nextDir,
Path: path + "/" + nextDir,
Size: point.GetAllVersionSize(),
@@ -102,65 +108,16 @@ func (d *GithubReleases) processAllVersions(point *MountPoint, path string) []Fi
CreateAt: (*point.Releases)[0].CreatedAt,
Type: "dir",
Url: "",
}
pointFiles = append(pointFiles, dirFile)
})
}
} else if strings.HasPrefix(path, point.Point) { // 仓库目录的子目录
tagName := GetNextDir(path, point.Point)
if tagName != "" {
pointFiles = append(pointFiles, point.GetReleaseByTagName(tagName)...)
}
if tagName == "" {
continue
}
return pointFiles
files = append(files, point.GetReleaseByTagName(tagName)...)
}
// mergeFiles 合并文件列表,处理重复目录
func (d *GithubReleases) mergeFiles(files *[]File, newFiles []File) {
for _, newFile := range newFiles {
if newFile.Type == "dir" {
hasSameDir := false
for index := range *files {
if (*files)[index].GetName() == newFile.GetName() && (*files)[index].Type == "dir" {
hasSameDir = true
(*files)[index].Size += newFile.Size
break
}
}
if !hasSameDir {
*files = append(*files, newFile)
}
} else {
*files = append(*files, newFile)
}
}
}
func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
files := make([]File, 0)
path := fmt.Sprintf("/%s", strings.Trim(dir.GetPath(), "/"))
if d.Addition.ConcurrentRequests && d.Addition.Token != "" { // 并发处理
var mu sync.Mutex
var wg sync.WaitGroup
for i := range d.points {
wg.Add(1)
go func(point *MountPoint) {
defer wg.Done()
pointFiles := d.processPoint(point, path, args)
mu.Lock()
d.mergeFiles(&files, pointFiles)
mu.Unlock()
}(&d.points[i])
}
wg.Wait()
} else { // 串行处理
for i := range d.points {
point := &d.points[i]
pointFiles := d.processPoint(point, path, args)
d.mergeFiles(&files, pointFiles)
}
}

View File

@@ -11,7 +11,6 @@ type Addition struct {
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
ConcurrentRequests bool `json:"concurrent_requests" type:"bool" default:"false" help:"To concurrently request the GitHub API, you must enter a GitHub token"`
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
}

View File

@@ -18,7 +18,7 @@ type MountPoint struct {
}
// 请求最新版本
func (m *MountPoint) RequestLatestRelease(get func(url string) (*resty.Response, error), refresh bool) {
func (m *MountPoint) RequestRelease(get func(url string) (*resty.Response, error), refresh bool) {
if m.Repo == "" {
return
}

View File

@@ -6,8 +6,8 @@ import (
"strings"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
// 发送 GET 请求
@@ -23,7 +23,7 @@ func (d *GithubReleases) GetRequest(url string) (*resty.Response, error) {
return nil, err
}
if res.StatusCode() != 200 {
utils.Log.Warnf("failed to get request: %s %d %s", url, res.StatusCode(), res.String())
log.Warn("failed to get request: ", res.StatusCode(), res.String())
}
return res, nil
}

View File

@@ -1,271 +0,0 @@
package gofile
import (
"context"
"fmt"
"time"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
)
type Gofile struct {
model.Storage
Addition
accountId string
}
func (d *Gofile) Config() driver.Config {
return config
}
func (d *Gofile) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Gofile) Init(ctx context.Context) error {
if d.APIToken == "" {
return fmt.Errorf("API token is required")
}
// Get account ID
accountId, err := d.getAccountId(ctx)
if err != nil {
return fmt.Errorf("failed to get account ID: %w", err)
}
d.accountId = accountId
// Get account info to set root folder if not specified
if d.RootFolderID == "" {
accountInfo, err := d.getAccountInfo(ctx, accountId)
if err != nil {
return fmt.Errorf("failed to get account info: %w", err)
}
d.RootFolderID = accountInfo.Data.RootFolder
}
// Save driver storage
op.MustSaveDriverStorage(d)
return nil
}
func (d *Gofile) Drop(ctx context.Context) error {
return nil
}
func (d *Gofile) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var folderId string
if dir.GetID() == "" {
folderId = d.GetRootId()
} else {
folderId = dir.GetID()
}
endpoint := fmt.Sprintf("/contents/%s", folderId)
var response ContentsResponse
err := d.getJSON(ctx, endpoint, &response)
if err != nil {
return nil, err
}
var objects []model.Obj
// Process children or contents
contents := response.Data.Children
if contents == nil {
contents = response.Data.Contents
}
for _, content := range contents {
objects = append(objects, d.convertContentToObj(content))
}
return objects, nil
}
func (d *Gofile) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if file.IsDir() {
return nil, errs.NotFile
}
// Create a direct link for the file
directLink, err := d.createDirectLink(ctx, file.GetID())
if err != nil {
return nil, fmt.Errorf("failed to create direct link: %w", err)
}
// Configure cache expiration based on user setting
link := &model.Link{
URL: directLink,
}
// Only set expiration if LinkExpiry > 0 (0 means no caching)
if d.LinkExpiry > 0 {
expiration := time.Duration(d.LinkExpiry) * 24 * time.Hour
link.Expiration = &expiration
}
return link, nil
}
func (d *Gofile) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
var parentId string
if parentDir.GetID() == "" {
parentId = d.GetRootId()
} else {
parentId = parentDir.GetID()
}
data := map[string]interface{}{
"parentFolderId": parentId,
"folderName": dirName,
}
var response CreateFolderResponse
err := d.postJSON(ctx, "/contents/createFolder", data, &response)
if err != nil {
return nil, err
}
return &model.Object{
ID: response.Data.ID,
Name: response.Data.Name,
IsFolder: true,
}, nil
}
func (d *Gofile) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var dstId string
if dstDir.GetID() == "" {
dstId = d.GetRootId()
} else {
dstId = dstDir.GetID()
}
data := map[string]interface{}{
"contentsId": srcObj.GetID(),
"folderId": dstId,
}
err := d.putJSON(ctx, "/contents/move", data, nil)
if err != nil {
return nil, err
}
// Return updated object
return &model.Object{
ID: srcObj.GetID(),
Name: srcObj.GetName(),
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
IsFolder: srcObj.IsDir(),
}, nil
}
func (d *Gofile) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
data := map[string]interface{}{
"attribute": "name",
"attributeValue": newName,
}
var response UpdateResponse
err := d.putJSON(ctx, fmt.Sprintf("/contents/%s/update", srcObj.GetID()), data, &response)
if err != nil {
return nil, err
}
return &model.Object{
ID: srcObj.GetID(),
Name: newName,
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
IsFolder: srcObj.IsDir(),
}, nil
}
func (d *Gofile) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var dstId string
if dstDir.GetID() == "" {
dstId = d.GetRootId()
} else {
dstId = dstDir.GetID()
}
data := map[string]interface{}{
"contentsId": srcObj.GetID(),
"folderId": dstId,
}
var response CopyResponse
err := d.postJSON(ctx, "/contents/copy", data, &response)
if err != nil {
return nil, err
}
// Get the new ID from the response
newId := srcObj.GetID()
if response.Data.CopiedContents != nil {
if id, ok := response.Data.CopiedContents[srcObj.GetID()]; ok {
newId = id
}
}
return &model.Object{
ID: newId,
Name: srcObj.GetName(),
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
IsFolder: srcObj.IsDir(),
}, nil
}
func (d *Gofile) Remove(ctx context.Context, obj model.Obj) error {
data := map[string]interface{}{
"contentsId": obj.GetID(),
}
return d.deleteJSON(ctx, "/contents", data)
}
func (d *Gofile) Put(ctx context.Context, dstDir model.Obj, fileStreamer model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
var folderId string
if dstDir.GetID() == "" {
folderId = d.GetRootId()
} else {
folderId = dstDir.GetID()
}
response, err := d.uploadFile(ctx, folderId, fileStreamer, up)
if err != nil {
return nil, err
}
return &model.Object{
ID: response.Data.FileId,
Name: response.Data.FileName,
Size: fileStreamer.GetSize(),
IsFolder: false,
}, nil
}
func (d *Gofile) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
return nil, errs.NotImplement
}
func (d *Gofile) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
return nil, errs.NotImplement
}
func (d *Gofile) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
return nil, errs.NotImplement
}
func (d *Gofile) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
return nil, errs.NotImplement
}
var _ driver.Driver = (*Gofile)(nil)

View File

@@ -1,28 +0,0 @@
package gofile
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
driver.RootID
APIToken string `json:"api_token" required:"true" help:"Get your API token from your Gofile profile page"`
LinkExpiry int `json:"link_expiry" type:"number" default:"30" help:"Direct link cache duration in days. Set to 0 to disable caching"`
DirectLinkExpiry int `json:"direct_link_expiry" type:"number" default:"0" help:"Direct link expiration time in hours on Gofile server. Set to 0 for no expiration"`
}
var config = driver.Config{
Name: "Gofile",
DefaultRoot: "",
LocalSort: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Gofile{}
})
}

View File

@@ -1,124 +0,0 @@
package gofile
import "time"
type APIResponse struct {
Status string `json:"status"`
Data interface{} `json:"data"`
}
type AccountResponse struct {
Status string `json:"status"`
Data struct {
ID string `json:"id"`
} `json:"data"`
}
type AccountInfoResponse struct {
Status string `json:"status"`
Data struct {
ID string `json:"id"`
Type string `json:"type"`
Email string `json:"email"`
RootFolder string `json:"rootFolder"`
} `json:"data"`
}
type Content struct {
ID string `json:"id"`
Type string `json:"type"` // "file" or "folder"
Name string `json:"name"`
Size int64 `json:"size,omitempty"`
CreateTime int64 `json:"createTime"`
ModTime int64 `json:"modTime,omitempty"`
DirectLink string `json:"directLink,omitempty"`
Children map[string]Content `json:"children,omitempty"`
ParentFolder string `json:"parentFolder,omitempty"`
MD5 string `json:"md5,omitempty"`
MimeType string `json:"mimeType,omitempty"`
Link string `json:"link,omitempty"`
}
type ContentsResponse struct {
Status string `json:"status"`
Data struct {
IsOwner bool `json:"isOwner"`
ID string `json:"id"`
Type string `json:"type"`
Name string `json:"name"`
ParentFolder string `json:"parentFolder"`
CreateTime int64 `json:"createTime"`
ChildrenList []string `json:"childrenList,omitempty"`
Children map[string]Content `json:"children,omitempty"`
Contents map[string]Content `json:"contents,omitempty"`
Public bool `json:"public,omitempty"`
Description string `json:"description,omitempty"`
Tags string `json:"tags,omitempty"`
Expiry int64 `json:"expiry,omitempty"`
} `json:"data"`
}
type UploadResponse struct {
Status string `json:"status"`
Data struct {
DownloadPage string `json:"downloadPage"`
Code string `json:"code"`
ParentFolder string `json:"parentFolder"`
FileId string `json:"fileId"`
FileName string `json:"fileName"`
GuestToken string `json:"guestToken,omitempty"`
} `json:"data"`
}
type DirectLinkResponse struct {
Status string `json:"status"`
Data struct {
DirectLink string `json:"directLink"`
ID string `json:"id"`
} `json:"data"`
}
type CreateFolderResponse struct {
Status string `json:"status"`
Data struct {
ID string `json:"id"`
Type string `json:"type"`
Name string `json:"name"`
ParentFolder string `json:"parentFolder"`
CreateTime int64 `json:"createTime"`
} `json:"data"`
}
type CopyResponse struct {
Status string `json:"status"`
Data struct {
CopiedContents map[string]string `json:"copiedContents"` // oldId -> newId mapping
} `json:"data"`
}
type UpdateResponse struct {
Status string `json:"status"`
Data struct {
ID string `json:"id"`
Name string `json:"name"`
} `json:"data"`
}
type ErrorResponse struct {
Status string `json:"status"`
Error struct {
Message string `json:"message"`
Code string `json:"code"`
} `json:"error"`
}
func (c *Content) ModifiedTime() time.Time {
if c.ModTime > 0 {
return time.Unix(c.ModTime, 0)
}
return time.Unix(c.CreateTime, 0)
}
func (c *Content) IsDir() bool {
return c.Type == "folder"
}

View File

@@ -1,265 +0,0 @@
package gofile
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"path/filepath"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
log "github.com/sirupsen/logrus"
)
const (
baseAPI = "https://api.gofile.io"
uploadAPI = "https://upload.gofile.io"
)
func (d *Gofile) request(ctx context.Context, method, endpoint string, body io.Reader, headers map[string]string) (*http.Response, error) {
var url string
if strings.HasPrefix(endpoint, "http") {
url = endpoint
} else {
url = baseAPI + endpoint
}
req, err := http.NewRequestWithContext(ctx, method, url, body)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Bearer "+d.APIToken)
req.Header.Set("User-Agent", "AList/3.0")
for k, v := range headers {
req.Header.Set(k, v)
}
return base.HttpClient.Do(req)
}
func (d *Gofile) getJSON(ctx context.Context, endpoint string, result interface{}) error {
resp, err := d.request(ctx, "GET", endpoint, nil, nil)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return d.handleError(resp)
}
return json.NewDecoder(resp.Body).Decode(result)
}
func (d *Gofile) postJSON(ctx context.Context, endpoint string, data interface{}, result interface{}) error {
jsonData, err := json.Marshal(data)
if err != nil {
return err
}
headers := map[string]string{
"Content-Type": "application/json",
}
resp, err := d.request(ctx, "POST", endpoint, bytes.NewBuffer(jsonData), headers)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return d.handleError(resp)
}
if result != nil {
return json.NewDecoder(resp.Body).Decode(result)
}
return nil
}
func (d *Gofile) putJSON(ctx context.Context, endpoint string, data interface{}, result interface{}) error {
jsonData, err := json.Marshal(data)
if err != nil {
return err
}
headers := map[string]string{
"Content-Type": "application/json",
}
resp, err := d.request(ctx, "PUT", endpoint, bytes.NewBuffer(jsonData), headers)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return d.handleError(resp)
}
if result != nil {
return json.NewDecoder(resp.Body).Decode(result)
}
return nil
}
func (d *Gofile) deleteJSON(ctx context.Context, endpoint string, data interface{}) error {
jsonData, err := json.Marshal(data)
if err != nil {
return err
}
headers := map[string]string{
"Content-Type": "application/json",
}
resp, err := d.request(ctx, "DELETE", endpoint, bytes.NewBuffer(jsonData), headers)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return d.handleError(resp)
}
return nil
}
func (d *Gofile) handleError(resp *http.Response) error {
body, _ := io.ReadAll(resp.Body)
log.Debugf("Gofile API error (HTTP %d): %s", resp.StatusCode, string(body))
var errorResp ErrorResponse
if err := json.Unmarshal(body, &errorResp); err == nil && errorResp.Status == "error" {
return fmt.Errorf("gofile API error: %s (code: %s)", errorResp.Error.Message, errorResp.Error.Code)
}
return fmt.Errorf("gofile API error: HTTP %d - %s", resp.StatusCode, string(body))
}
func (d *Gofile) uploadFile(ctx context.Context, folderId string, file model.FileStreamer, up driver.UpdateProgress) (*UploadResponse, error) {
var body bytes.Buffer
writer := multipart.NewWriter(&body)
if folderId != "" {
writer.WriteField("folderId", folderId)
}
part, err := writer.CreateFormFile("file", filepath.Base(file.GetName()))
if err != nil {
return nil, err
}
// Copy with progress tracking if available
if up != nil {
reader := &progressReader{
reader: file,
total: file.GetSize(),
up: up,
}
_, err = io.Copy(part, reader)
} else {
_, err = io.Copy(part, file)
}
if err != nil {
return nil, err
}
writer.Close()
headers := map[string]string{
"Content-Type": writer.FormDataContentType(),
}
resp, err := d.request(ctx, "POST", uploadAPI+"/uploadfile", &body, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, d.handleError(resp)
}
var result UploadResponse
err = json.NewDecoder(resp.Body).Decode(&result)
return &result, err
}
func (d *Gofile) createDirectLink(ctx context.Context, contentId string) (string, error) {
data := map[string]interface{}{}
if d.DirectLinkExpiry > 0 {
expireTime := time.Now().Add(time.Duration(d.DirectLinkExpiry) * time.Hour).Unix()
data["expireTime"] = expireTime
}
var result DirectLinkResponse
err := d.postJSON(ctx, fmt.Sprintf("/contents/%s/directlinks", contentId), data, &result)
if err != nil {
return "", err
}
return result.Data.DirectLink, nil
}
func (d *Gofile) convertContentToObj(content Content) model.Obj {
return &model.ObjThumb{
Object: model.Object{
ID: content.ID,
Name: content.Name,
Size: content.Size,
Modified: content.ModifiedTime(),
IsFolder: content.IsDir(),
},
}
}
func (d *Gofile) getAccountId(ctx context.Context) (string, error) {
var result AccountResponse
err := d.getJSON(ctx, "/accounts/getid", &result)
if err != nil {
return "", err
}
return result.Data.ID, nil
}
func (d *Gofile) getAccountInfo(ctx context.Context, accountId string) (*AccountInfoResponse, error) {
var result AccountInfoResponse
err := d.getJSON(ctx, fmt.Sprintf("/accounts/%s", accountId), &result)
if err != nil {
return nil, err
}
return &result, nil
}
// progressReader wraps an io.Reader to track upload progress
type progressReader struct {
reader io.Reader
total int64
read int64
up driver.UpdateProgress
}
func (pr *progressReader) Read(p []byte) (n int, err error) {
n, err = pr.reader.Read(p)
pr.read += int64(n)
if pr.up != nil && pr.total > 0 {
progress := float64(pr.read) * 100 / float64(pr.total)
pr.up(progress)
}
return n, err
}

View File

@@ -94,7 +94,6 @@ func RemoveJSComment(data string) string {
}
if inComment && v == '*' && i+1 < len(data) && data[i+1] == '/' {
inComment = false
i++
continue
}
if v == '/' && i+1 < len(data) {
@@ -109,9 +108,6 @@ func RemoveJSComment(data string) string {
continue
}
}
if inComment || inSingleLineComment {
continue
}
result.WriteByte(v)
}

View File

@@ -430,35 +430,17 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
file.Time = timeFindReg.FindString(sharePageData)
// 重定向获取真实链接
headers := map[string]string{
res, err := base.NoRedirectClient.R().SetHeaders(map[string]string{
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
}
res, err := base.NoRedirectClient.R().SetHeaders(headers).Get(downloadUrl)
}).Get(downloadUrl)
if err != nil {
return nil, err
}
rPageData := res.String()
if findAcwScV2Reg.MatchString(rPageData) {
log.Debug("lanzou: detected acw_sc__v2 challenge, recalculating cookie")
acwScV2, err := CalcAcwScV2(rPageData)
if err != nil {
return nil, err
}
// retry with calculated cookie to bypass anti-crawler validation
res, err = base.NoRedirectClient.R().
SetHeaders(headers).
SetCookie(&http.Cookie{Name: "acw_sc__v2", Value: acwScV2}).
Get(downloadUrl)
if err != nil {
return nil, err
}
rPageData = res.String()
}
file.Url = res.Header().Get("location")
// 触发验证
rPageData := res.String()
if res.StatusCode() != 302 {
param, err = htmlJsonToMap(rPageData)
if err != nil {

View File

@@ -146,14 +146,13 @@ func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string
thumb += "?type=thumb&sign=" + sign.Sign(stdpath.Join(reqPath, f.Name()))
}
}
filePath := filepath.Join(fullPath, f.Name())
isFolder := f.IsDir() || isLinkedDir(f, filePath)
isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
var size int64
if !isFolder {
size = f.Size()
}
var ctime time.Time
t, err := times.Stat(filePath)
t, err := times.Stat(stdpath.Join(fullPath, f.Name()))
if err == nil {
if t.HasBirthTime() {
ctime = t.BirthTime()
@@ -162,7 +161,7 @@ func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string
file := model.ObjThumb{
Object: model.Object{
Path: filePath,
Path: filepath.Join(fullPath, f.Name()),
Name: f.Name(),
Modified: f.ModTime(),
Size: size,
@@ -198,7 +197,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
}
return nil, err
}
isFolder := f.IsDir() || isLinkedDir(f, path)
isFolder := f.IsDir() || isSymlinkDir(f, path)
size := f.Size()
if isFolder {
size = 0

View File

@@ -7,7 +7,6 @@ import (
"io/fs"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
@@ -19,18 +18,14 @@ import (
ffmpeg "github.com/u2takey/ffmpeg-go"
)
func isLinkedDir(f fs.FileInfo, path string) bool {
if f.Mode()&os.ModeSymlink == os.ModeSymlink || (runtime.GOOS == "windows" && f.Mode()&os.ModeIrregular != 0) {
dst, err := os.Readlink(path)
func isSymlinkDir(f fs.FileInfo, path string) bool {
if f.Mode()&os.ModeSymlink == os.ModeSymlink {
dst, err := os.Readlink(filepath.Join(path, f.Name()))
if err != nil {
return false
}
if !filepath.IsAbs(dst) {
dst = filepath.Join(filepath.Dir(path), dst)
}
dst, err = filepath.Abs(dst)
if err != nil {
return false
dst = filepath.Join(path, dst)
}
stat, err := os.Stat(dst)
if err != nil {

View File

@@ -1,433 +0,0 @@
package mediafire
/*
Package mediafire
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-11
D@' 3z K!7 - The King Of Cracking
*/
import (
"context"
"fmt"
"math/rand"
"net/http"
"os"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/cron"
"github.com/alist-org/alist/v3/pkg/utils"
)
type Mediafire struct {
model.Storage
Addition
cron *cron.Cron
actionToken string
appBase string
apiBase string
hostBase string
maxRetries int
secChUa string
secChUaPlatform string
userAgent string
}
func (d *Mediafire) Config() driver.Config {
return config
}
func (d *Mediafire) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Mediafire) Init(ctx context.Context) error {
if d.SessionToken == "" {
return fmt.Errorf("Init :: [MediaFire] {critical} missing sessionToken")
}
if d.Cookie == "" {
return fmt.Errorf("Init :: [MediaFire] {critical} missing Cookie")
}
if _, err := d.getSessionToken(ctx); err != nil {
d.renewToken(ctx)
num := rand.Intn(4) + 6
d.cron = cron.NewCron(time.Minute * time.Duration(num))
d.cron.Do(func() {
d.renewToken(ctx)
})
}
return nil
}
func (d *Mediafire) Drop(ctx context.Context) error {
return nil
}
func (d *Mediafire) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
files, err := d.getFiles(ctx, dir.GetID())
if err != nil {
return nil, err
}
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
return d.fileToObj(src), nil
})
}
func (d *Mediafire) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
downloadUrl, err := d.getDirectDownloadLink(ctx, file.GetID())
if err != nil {
return nil, err
}
res, err := base.NoRedirectClient.R().SetDoNotParseResponse(true).SetContext(ctx).Get(downloadUrl)
if err != nil {
return nil, err
}
defer func() {
_ = res.RawBody().Close()
}()
if res.StatusCode() == 302 {
downloadUrl = res.Header().Get("location")
}
return &model.Link{
URL: downloadUrl,
Header: http.Header{
"Origin": []string{d.appBase},
"Referer": []string{d.appBase + "/"},
"sec-ch-ua": []string{d.secChUa},
"sec-ch-ua-platform": []string{d.secChUaPlatform},
"User-Agent": []string{d.userAgent},
//"User-Agent": []string{base.UserAgent},
},
}, nil
}
func (d *Mediafire) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
data := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"parent_key": parentDir.GetID(),
"foldername": dirName,
}
var resp MediafireFolderCreateResponse
_, err := d.postForm("/folder/create.php", data, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
created, _ := time.Parse("2006-01-02T15:04:05Z", resp.Response.CreatedUTC)
return &model.ObjThumb{
Object: model.Object{
ID: resp.Response.FolderKey,
Name: resp.Response.Name,
Size: 0,
Modified: created,
Ctime: created,
IsFolder: true,
},
Thumbnail: model.Thumbnail{},
}, nil
}
func (d *Mediafire) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var data map[string]string
var endpoint string
if srcObj.IsDir() {
endpoint = "/folder/move.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key_src": srcObj.GetID(),
"folder_key_dst": dstDir.GetID(),
}
} else {
endpoint = "/file/move.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"quick_key": srcObj.GetID(),
"folder_key": dstDir.GetID(),
}
}
var resp MediafireMoveResponse
_, err := d.postForm(endpoint, data, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
return srcObj, nil
}
func (d *Mediafire) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
var data map[string]string
var endpoint string
if srcObj.IsDir() {
endpoint = "/folder/update.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key": srcObj.GetID(),
"foldername": newName,
}
} else {
endpoint = "/file/update.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"quick_key": srcObj.GetID(),
"filename": newName,
}
}
var resp MediafireRenameResponse
_, err := d.postForm(endpoint, data, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
return &model.ObjThumb{
Object: model.Object{
ID: srcObj.GetID(),
Name: newName,
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
Ctime: srcObj.CreateTime(),
IsFolder: srcObj.IsDir(),
},
Thumbnail: model.Thumbnail{},
}, nil
}
func (d *Mediafire) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var data map[string]string
var endpoint string
if srcObj.IsDir() {
endpoint = "/folder/copy.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key_src": srcObj.GetID(),
"folder_key_dst": dstDir.GetID(),
}
} else {
endpoint = "/file/copy.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"quick_key": srcObj.GetID(),
"folder_key": dstDir.GetID(),
}
}
var resp MediafireCopyResponse
_, err := d.postForm(endpoint, data, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
var newID string
if srcObj.IsDir() {
if len(resp.Response.NewFolderKeys) > 0 {
newID = resp.Response.NewFolderKeys[0]
}
} else {
if len(resp.Response.NewQuickKeys) > 0 {
newID = resp.Response.NewQuickKeys[0]
}
}
return &model.ObjThumb{
Object: model.Object{
ID: newID,
Name: srcObj.GetName(),
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
Ctime: srcObj.CreateTime(),
IsFolder: srcObj.IsDir(),
},
Thumbnail: model.Thumbnail{},
}, nil
}
func (d *Mediafire) Remove(ctx context.Context, obj model.Obj) error {
var data map[string]string
var endpoint string
if obj.IsDir() {
endpoint = "/folder/delete.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key": obj.GetID(),
}
} else {
endpoint = "/file/delete.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"quick_key": obj.GetID(),
}
}
var resp MediafireRemoveResponse
_, err := d.postForm(endpoint, data, &resp)
if err != nil {
return err
}
if resp.Response.Result != "Success" {
return fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
return nil
}
func (d *Mediafire) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
_, err := d.PutResult(ctx, dstDir, file, up)
return err
}
func (d *Mediafire) PutResult(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
}
defer tempFile.Close()
osFile, ok := tempFile.(*os.File)
if !ok {
return nil, fmt.Errorf("expected *os.File, got %T", tempFile)
}
fileHash, err := d.calculateSHA256(osFile)
if err != nil {
return nil, err
}
checkResp, err := d.uploadCheck(ctx, file.GetName(), file.GetSize(), fileHash, dstDir.GetID())
if err != nil {
return nil, err
}
if checkResp.Response.ResumableUpload.AllUnitsReady == "yes" {
up(100.0)
}
if checkResp.Response.HashExists == "yes" && checkResp.Response.InAccount == "yes" {
up(100.0)
existingFile, err := d.getExistingFileInfo(ctx, fileHash, file.GetName(), dstDir.GetID())
if err == nil {
return existingFile, nil
}
}
var pollKey string
if checkResp.Response.ResumableUpload.AllUnitsReady != "yes" {
var err error
pollKey, err = d.uploadUnits(ctx, osFile, checkResp, file.GetName(), fileHash, dstDir.GetID(), up)
if err != nil {
return nil, err
}
} else {
pollKey = checkResp.Response.ResumableUpload.UploadKey
}
//fmt.Printf("pollKey: %+v\n", pollKey)
pollResp, err := d.pollUpload(ctx, pollKey)
if err != nil {
return nil, err
}
quickKey := pollResp.Response.Doupload.QuickKey
return &model.ObjThumb{
Object: model.Object{
ID: quickKey,
Name: file.GetName(),
Size: file.GetSize(),
},
Thumbnail: model.Thumbnail{},
}, nil
}
func (d *Mediafire) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Mediafire) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Mediafire) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Mediafire) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// return errs.NotImplement to use an internal archive tool
return nil, errs.NotImplement
}
//func (d *Mediafire) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Mediafire)(nil)

View File

@@ -1,54 +0,0 @@
package mediafire
/*
Package mediafire
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-11
D@' 3z K!7 - The King Of Cracking
*/
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
driver.RootPath
//driver.RootID
SessionToken string `json:"session_token" required:"true" type:"string" help:"Required for MediaFire API"`
Cookie string `json:"cookie" required:"true" type:"string" help:"Required for navigation"`
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"100"`
}
var config = driver.Config{
Name: "MediaFire",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "/",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Mediafire{
appBase: "https://app.mediafire.com",
apiBase: "https://www.mediafire.com/api/1.5",
hostBase: "https://www.mediafire.com",
maxRetries: 3,
secChUa: "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"139\", \"Google Chrome\";v=\"139\"",
secChUaPlatform: "Windows",
userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
}
})
}

View File

@@ -1,232 +0,0 @@
package mediafire
/*
Package mediafire
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-11
D@' 3z K!7 - The King Of Cracking
*/
type MediafireRenewTokenResponse struct {
Response struct {
Action string `json:"action"`
SessionToken string `json:"session_token"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireResponse struct {
Response struct {
Action string `json:"action"`
FolderContent struct {
ChunkSize string `json:"chunk_size"`
ContentType string `json:"content_type"`
ChunkNumber string `json:"chunk_number"`
FolderKey string `json:"folderkey"`
Folders []MediafireFolder `json:"folders,omitempty"`
Files []MediafireFile `json:"files,omitempty"`
MoreChunks string `json:"more_chunks"`
} `json:"folder_content"`
Result string `json:"result"`
} `json:"response"`
}
type MediafireFolder struct {
FolderKey string `json:"folderkey"`
Name string `json:"name"`
Created string `json:"created"`
CreatedUTC string `json:"created_utc"`
}
type MediafireFile struct {
QuickKey string `json:"quickkey"`
Filename string `json:"filename"`
Size string `json:"size"`
Created string `json:"created"`
CreatedUTC string `json:"created_utc"`
MimeType string `json:"mimetype"`
}
type File struct {
ID string
Name string
Size int64
CreatedUTC string
IsFolder bool
}
type FolderContentResponse struct {
Folders []MediafireFolder
Files []MediafireFile
MoreChunks bool
}
type MediafireLinksResponse struct {
Response struct {
Action string `json:"action"`
Links []struct {
QuickKey string `json:"quickkey"`
View string `json:"view"`
NormalDownload string `json:"normal_download"`
OneTime struct {
Download string `json:"download"`
View string `json:"view"`
} `json:"one_time"`
} `json:"links"`
OneTimeKeyRequestCount string `json:"one_time_key_request_count"`
OneTimeKeyRequestMaxCount string `json:"one_time_key_request_max_count"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireDirectDownloadResponse struct {
Response struct {
Action string `json:"action"`
Links []struct {
QuickKey string `json:"quickkey"`
DirectDownload string `json:"direct_download"`
} `json:"links"`
DirectDownloadFreeBandwidth string `json:"direct_download_free_bandwidth"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireFolderCreateResponse struct {
Response struct {
Action string `json:"action"`
FolderKey string `json:"folder_key"`
UploadKey string `json:"upload_key"`
ParentFolderKey string `json:"parent_folderkey"`
Name string `json:"name"`
Description string `json:"description"`
Created string `json:"created"`
CreatedUTC string `json:"created_utc"`
Privacy string `json:"privacy"`
FileCount string `json:"file_count"`
FolderCount string `json:"folder_count"`
Revision string `json:"revision"`
DropboxEnabled string `json:"dropbox_enabled"`
Flag string `json:"flag"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireMoveResponse struct {
Response struct {
Action string `json:"action"`
Asynchronous string `json:"asynchronous,omitempty"`
NewNames []string `json:"new_names"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireRenameResponse struct {
Response struct {
Action string `json:"action"`
Asynchronous string `json:"asynchronous,omitempty"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireCopyResponse struct {
Response struct {
Action string `json:"action"`
Asynchronous string `json:"asynchronous,omitempty"`
NewQuickKeys []string `json:"new_quickkeys,omitempty"`
NewFolderKeys []string `json:"new_folderkeys,omitempty"`
SkippedCount string `json:"skipped_count,omitempty"`
OtherCount string `json:"other_count,omitempty"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireRemoveResponse struct {
Response struct {
Action string `json:"action"`
Asynchronous string `json:"asynchronous,omitempty"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireCheckResponse struct {
Response struct {
Action string `json:"action"`
HashExists string `json:"hash_exists"`
InAccount string `json:"in_account"`
InFolder string `json:"in_folder"`
FileExists string `json:"file_exists"`
ResumableUpload struct {
AllUnitsReady string `json:"all_units_ready"`
NumberOfUnits string `json:"number_of_units"`
UnitSize string `json:"unit_size"`
Bitmap struct {
Count string `json:"count"`
Words []string `json:"words"`
} `json:"bitmap"`
UploadKey string `json:"upload_key"`
} `json:"resumable_upload"`
AvailableSpace string `json:"available_space"`
UsedStorageSize string `json:"used_storage_size"`
StorageLimit string `json:"storage_limit"`
StorageLimitExceeded string `json:"storage_limit_exceeded"`
UploadURL struct {
Simple string `json:"simple"`
SimpleFallback string `json:"simple_fallback"`
Resumable string `json:"resumable"`
ResumableFallback string `json:"resumable_fallback"`
} `json:"upload_url"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireActionTokenResponse struct {
Response struct {
Action string `json:"action"`
ActionToken string `json:"action_token"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafirePollResponse struct {
Response struct {
Action string `json:"action"`
Doupload struct {
Result string `json:"result"`
Status string `json:"status"`
Description string `json:"description"`
QuickKey string `json:"quickkey"`
Hash string `json:"hash"`
Filename string `json:"filename"`
Size string `json:"size"`
Created string `json:"created"`
CreatedUTC string `json:"created_utc"`
Revision string `json:"revision"`
} `json:"doupload"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireFileSearchResponse struct {
Response struct {
Action string `json:"action"`
FileInfo []File `json:"file_info"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}

View File

@@ -1,626 +0,0 @@
package mediafire
/*
Package mediafire
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-11
D@' 3z K!7 - The King Of Cracking
*/
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
)
func (d *Mediafire) getSessionToken(ctx context.Context) (string, error) {
tokenURL := d.hostBase + "/application/get_session_token.php"
req, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, nil)
if err != nil {
return "", err
}
req.Header.Set("Accept", "*/*")
req.Header.Set("Accept-Encoding", "gzip, deflate, br, zstd")
req.Header.Set("Accept-Language", "en-US,en;q=0.9")
req.Header.Set("Content-Length", "0")
req.Header.Set("Cookie", d.Cookie)
req.Header.Set("DNT", "1")
req.Header.Set("Origin", d.hostBase)
req.Header.Set("Priority", "u=1, i")
req.Header.Set("Referer", (d.hostBase + "/"))
req.Header.Set("Sec-Ch-Ua", d.secChUa)
req.Header.Set("Sec-Ch-Ua-Mobile", "?0")
req.Header.Set("Sec-Ch-Ua-Platform", d.secChUaPlatform)
req.Header.Set("Sec-Fetch-Dest", "empty")
req.Header.Set("Sec-Fetch-Mode", "cors")
req.Header.Set("Sec-Fetch-Site", "same-site")
req.Header.Set("User-Agent", d.userAgent)
//req.Header.Set("Connection", "keep-alive")
resp, err := base.HttpClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
//fmt.Printf("getSessionToken :: Raw response: %s\n", string(body))
//fmt.Printf("getSessionToken :: Parsed response: %+v\n", resp)
var tokenResp struct {
Response struct {
SessionToken string `json:"session_token"`
} `json:"response"`
}
if resp.StatusCode == 200 {
if err := json.Unmarshal(body, &tokenResp); err != nil {
return "", err
}
if tokenResp.Response.SessionToken == "" {
return "", fmt.Errorf("empty session token received")
}
cookieMap := make(map[string]string)
for _, cookie := range resp.Cookies() {
cookieMap[cookie.Name] = cookie.Value
}
if len(cookieMap) > 0 {
var cookies []string
for name, value := range cookieMap {
cookies = append(cookies, fmt.Sprintf("%s=%s", name, value))
}
d.Cookie = strings.Join(cookies, "; ")
op.MustSaveDriverStorage(d)
//fmt.Printf("getSessionToken :: Captured cookies: %s\n", d.Cookie)
}
} else {
return "", fmt.Errorf("getSessionToken :: failed to get session token, status code: %d", resp.StatusCode)
}
d.SessionToken = tokenResp.Response.SessionToken
//fmt.Printf("Init :: Obtain Session Token %v", d.SessionToken)
op.MustSaveDriverStorage(d)
return d.SessionToken, nil
}
func (d *Mediafire) renewToken(_ context.Context) error {
query := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
}
var resp MediafireRenewTokenResponse
_, err := d.postForm("/user/renew_session_token.php", query, &resp)
if err != nil {
return fmt.Errorf("failed to renew token: %w", err)
}
//fmt.Printf("getInfo :: Raw response: %s\n", string(body))
//fmt.Printf("getInfo :: Parsed response: %+v\n", resp)
if resp.Response.Result != "Success" {
return fmt.Errorf("MediaFire token renewal failed: %s", resp.Response.Result)
}
d.SessionToken = resp.Response.SessionToken
//fmt.Printf("Init :: Renew Session Token: %s", resp.Response.Result)
op.MustSaveDriverStorage(d)
return nil
}
func (d *Mediafire) getFiles(ctx context.Context, folderKey string) ([]File, error) {
files := make([]File, 0)
hasMore := true
chunkNumber := 1
for hasMore {
resp, err := d.getFolderContent(ctx, folderKey, chunkNumber)
if err != nil {
return nil, err
}
for _, folder := range resp.Folders {
files = append(files, File{
ID: folder.FolderKey,
Name: folder.Name,
Size: 0,
CreatedUTC: folder.CreatedUTC,
IsFolder: true,
})
}
for _, file := range resp.Files {
size, _ := strconv.ParseInt(file.Size, 10, 64)
files = append(files, File{
ID: file.QuickKey,
Name: file.Filename,
Size: size,
CreatedUTC: file.CreatedUTC,
IsFolder: false,
})
}
hasMore = resp.MoreChunks
chunkNumber++
}
return files, nil
}
func (d *Mediafire) getFolderContent(ctx context.Context, folderKey string, chunkNumber int) (*FolderContentResponse, error) {
foldersResp, err := d.getFolderContentByType(ctx, folderKey, "folders", chunkNumber)
if err != nil {
return nil, err
}
filesResp, err := d.getFolderContentByType(ctx, folderKey, "files", chunkNumber)
if err != nil {
return nil, err
}
return &FolderContentResponse{
Folders: foldersResp.Response.FolderContent.Folders,
Files: filesResp.Response.FolderContent.Files,
MoreChunks: foldersResp.Response.FolderContent.MoreChunks == "yes" || filesResp.Response.FolderContent.MoreChunks == "yes",
}, nil
}
func (d *Mediafire) getFolderContentByType(_ context.Context, folderKey, contentType string, chunkNumber int) (*MediafireResponse, error) {
data := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key": folderKey,
"content_type": contentType,
"chunk": strconv.Itoa(chunkNumber),
"chunk_size": strconv.FormatInt(d.ChunkSize, 10),
"details": "yes",
"order_direction": d.OrderDirection,
"order_by": d.OrderBy,
"filter": "",
}
var resp MediafireResponse
_, err := d.postForm("/folder/get_content.php", data, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
return &resp, nil
}
func (d *Mediafire) fileToObj(f File) *model.ObjThumb {
created, _ := time.Parse("2006-01-02T15:04:05Z", f.CreatedUTC)
var thumbnailURL string
if !f.IsFolder && f.ID != "" {
thumbnailURL = d.hostBase + "/convkey/acaa/" + f.ID + "3g.jpg"
}
return &model.ObjThumb{
Object: model.Object{
ID: f.ID,
//Path: "",
Name: f.Name,
Size: f.Size,
Modified: created,
Ctime: created,
IsFolder: f.IsFolder,
},
Thumbnail: model.Thumbnail{
Thumbnail: thumbnailURL,
},
}
}
func (d *Mediafire) getForm(endpoint string, query map[string]string, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
req.SetQueryParams(query)
req.SetHeaders(map[string]string{
"Cookie": d.Cookie,
//"User-Agent": base.UserAgent,
"User-Agent": d.userAgent,
"Origin": d.appBase,
"Referer": d.appBase + "/",
})
// If response OK
if resp != nil {
req.SetResult(resp)
}
// Targets MediaFire API
res, err := req.Get(d.apiBase + endpoint)
if err != nil {
return nil, err
}
return res.Body(), nil
}
func (d *Mediafire) postForm(endpoint string, data map[string]string, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
req.SetFormData(data)
req.SetHeaders(map[string]string{
"Cookie": d.Cookie,
"Content-Type": "application/x-www-form-urlencoded",
//"User-Agent": base.UserAgent,
"User-Agent": d.userAgent,
"Origin": d.appBase,
"Referer": d.appBase + "/",
})
// If response OK
if resp != nil {
req.SetResult(resp)
}
// Targets MediaFire API
res, err := req.Post(d.apiBase + endpoint)
if err != nil {
return nil, err
}
return res.Body(), nil
}
func (d *Mediafire) getDirectDownloadLink(_ context.Context, fileID string) (string, error) {
data := map[string]string{
"session_token": d.SessionToken,
"quick_key": fileID,
"link_type": "direct_download",
"response_format": "json",
}
var resp MediafireDirectDownloadResponse
_, err := d.getForm("/file/get_links.php", data, &resp)
if err != nil {
return "", err
}
if resp.Response.Result != "Success" {
return "", fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
}
if len(resp.Response.Links) == 0 {
return "", fmt.Errorf("no download links found")
}
return resp.Response.Links[0].DirectDownload, nil
}
func (d *Mediafire) calculateSHA256(file *os.File) (string, error) {
hasher := sha256.New()
if _, err := file.Seek(0, 0); err != nil {
return "", err
}
if _, err := io.Copy(hasher, file); err != nil {
return "", err
}
return hex.EncodeToString(hasher.Sum(nil)), nil
}
func (d *Mediafire) uploadCheck(ctx context.Context, filename string, filesize int64, filehash, folderKey string) (*MediafireCheckResponse, error) {
actionToken, err := d.getActionToken(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get action token: %w", err)
}
query := map[string]string{
"session_token": actionToken, /* d.SessionToken */
"filename": filename,
"size": strconv.FormatInt(filesize, 10),
"hash": filehash,
"folder_key": folderKey,
"resumable": "yes",
"response_format": "json",
}
var resp MediafireCheckResponse
_, err = d.postForm("/upload/check.php", query, &resp)
if err != nil {
return nil, err
}
//fmt.Printf("uploadCheck :: Raw response: %s\n", string(body))
//fmt.Printf("uploadCheck :: Parsed response: %+v\n", resp)
//fmt.Printf("uploadCheck :: ResumableUpload section: %+v\n", resp.Response.ResumableUpload)
//fmt.Printf("uploadCheck :: Upload key specifically: '%s'\n", resp.Response.ResumableUpload.UploadKey)
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire upload check failed: %s", resp.Response.Result)
}
return &resp, nil
}
func (d *Mediafire) resumableUpload(ctx context.Context, folderKey, uploadKey string, unitData []byte, unitID int, fileHash, filename string, totalFileSize int64) (string, error) {
actionToken, err := d.getActionToken(ctx)
if err != nil {
return "", err
}
url := d.apiBase + "/upload/resumable.php"
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(unitData))
if err != nil {
return "", err
}
q := req.URL.Query()
q.Add("folder_key", folderKey)
q.Add("response_format", "json")
q.Add("session_token", actionToken)
q.Add("key", uploadKey)
req.URL.RawQuery = q.Encode()
req.Header.Set("x-filehash", fileHash)
req.Header.Set("x-filesize", strconv.FormatInt(totalFileSize, 10))
req.Header.Set("x-unit-id", strconv.Itoa(unitID))
req.Header.Set("x-unit-size", strconv.FormatInt(int64(len(unitData)), 10))
req.Header.Set("x-unit-hash", d.sha256Hex(bytes.NewReader(unitData)))
req.Header.Set("x-filename", filename)
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = int64(len(unitData))
/* fmt.Printf("Debug resumable upload request:\n")
fmt.Printf(" URL: %s\n", req.URL.String())
fmt.Printf(" Headers: %+v\n", req.Header)
fmt.Printf(" Unit ID: %d\n", unitID)
fmt.Printf(" Unit Size: %d\n", len(unitData))
fmt.Printf(" Upload Key: %s\n", uploadKey)
fmt.Printf(" Action Token: %s\n", actionToken) */
res, err := base.HttpClient.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return "", fmt.Errorf("failed to read response body: %v", err)
}
//fmt.Printf("MediaFire resumable upload response (status %d): %s\n", res.StatusCode, string(body))
var uploadResp struct {
Response struct {
Doupload struct {
Key string `json:"key"`
} `json:"doupload"`
Result string `json:"result"`
} `json:"response"`
}
if err := json.Unmarshal(body, &uploadResp); err != nil {
return "", fmt.Errorf("failed to parse response: %v", err)
}
if res.StatusCode != 200 {
return "", fmt.Errorf("resumable upload failed with status %d", res.StatusCode)
}
return uploadResp.Response.Doupload.Key, nil
}
func (d *Mediafire) uploadUnits(ctx context.Context, file *os.File, checkResp *MediafireCheckResponse, filename, fileHash, folderKey string, up driver.UpdateProgress) (string, error) {
unitSize, _ := strconv.ParseInt(checkResp.Response.ResumableUpload.UnitSize, 10, 64)
numUnits, _ := strconv.Atoi(checkResp.Response.ResumableUpload.NumberOfUnits)
uploadKey := checkResp.Response.ResumableUpload.UploadKey
stringWords := checkResp.Response.ResumableUpload.Bitmap.Words
intWords := make([]int, len(stringWords))
for i, word := range stringWords {
intWords[i], _ = strconv.Atoi(word)
}
var finalUploadKey string
for unitID := 0; unitID < numUnits; unitID++ {
if utils.IsCanceled(ctx) {
return "", ctx.Err()
}
if d.isUnitUploaded(intWords, unitID) {
up(float64(unitID+1) * 100 / float64(numUnits))
continue
}
uploadKey, err := d.uploadSingleUnit(ctx, file, unitID, unitSize, fileHash, filename, uploadKey, folderKey)
if err != nil {
return "", err
}
finalUploadKey = uploadKey
up(float64(unitID+1) * 100 / float64(numUnits))
}
return finalUploadKey, nil
}
func (d *Mediafire) uploadSingleUnit(ctx context.Context, file *os.File, unitID int, unitSize int64, fileHash, filename, uploadKey, folderKey string) (string, error) {
start := int64(unitID) * unitSize
size := unitSize
stat, err := file.Stat()
if err != nil {
return "", err
}
fileSize := stat.Size()
if start+size > fileSize {
size = fileSize - start
}
unitData := make([]byte, size)
if _, err := file.ReadAt(unitData, start); err != nil {
return "", err
}
return d.resumableUpload(ctx, folderKey, uploadKey, unitData, unitID, fileHash, filename, fileSize)
}
func (d *Mediafire) getActionToken(_ context.Context) (string, error) {
if d.actionToken != "" {
return d.actionToken, nil
}
data := map[string]string{
"type": "upload",
"lifespan": "1440",
"response_format": "json",
"session_token": d.SessionToken,
}
var resp MediafireActionTokenResponse
_, err := d.postForm("/user/get_action_token.php", data, &resp)
if err != nil {
return "", err
}
if resp.Response.Result != "Success" {
return "", fmt.Errorf("MediaFire action token failed: %s", resp.Response.Result)
}
return resp.Response.ActionToken, nil
}
func (d *Mediafire) pollUpload(ctx context.Context, key string) (*MediafirePollResponse, error) {
actionToken, err := d.getActionToken(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get action token: %w", err)
}
//fmt.Printf("Debug Key: %+v\n", key)
query := map[string]string{
"key": key,
"response_format": "json",
"session_token": actionToken, /* d.SessionToken */
}
var resp MediafirePollResponse
_, err = d.postForm("/upload/poll_upload.php", query, &resp)
if err != nil {
return nil, err
}
//fmt.Printf("pollUpload :: Raw response: %s\n", string(body))
//fmt.Printf("pollUpload :: Parsed response: %+v\n", resp)
//fmt.Printf("pollUpload :: Debug Result: %+v\n", resp.Response.Result)
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire poll upload failed: %s", resp.Response.Result)
}
return &resp, nil
}
func (d *Mediafire) sha256Hex(r io.Reader) string {
h := sha256.New()
io.Copy(h, r)
return hex.EncodeToString(h.Sum(nil))
}
func (d *Mediafire) isUnitUploaded(words []int, unitID int) bool {
wordIndex := unitID / 16
bitIndex := unitID % 16
if wordIndex >= len(words) {
return false
}
return (words[wordIndex]>>bitIndex)&1 == 1
}
func (d *Mediafire) getExistingFileInfo(ctx context.Context, fileHash, filename, folderKey string) (*model.ObjThumb, error) {
if fileInfo, err := d.getFileByHash(ctx, fileHash); err == nil && fileInfo != nil {
return fileInfo, nil
}
files, err := d.getFiles(ctx, folderKey)
if err != nil {
return nil, err
}
for _, file := range files {
if file.Name == filename && !file.IsFolder {
return d.fileToObj(file), nil
}
}
return nil, fmt.Errorf("existing file not found")
}
func (d *Mediafire) getFileByHash(_ context.Context, hash string) (*model.ObjThumb, error) {
query := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"hash": hash,
}
var resp MediafireFileSearchResponse
_, err := d.postForm("/file/get_info.php", query, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire file search failed: %s", resp.Response.Result)
}
if len(resp.Response.FileInfo) == 0 {
return nil, fmt.Errorf("file not found by hash")
}
file := resp.Response.FileInfo[0]
return d.fileToObj(file), nil
}

View File

@@ -11,7 +11,6 @@ type Addition struct {
driver.RootID
OrderBy string `json:"order_by" type:"select" options:"updated_at,title,size" default:"title"`
OrderDesc bool `json:"order_desc"`
DeviceFingerprint string `json:"device_fingerprint" required:"true"`
}
var config = driver.Config{

View File

@@ -17,9 +17,6 @@ import (
func (d *MediaTrack) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
if d.DeviceFingerprint != "" {
req.SetHeader("X-Device-Fingerprint", d.DeviceFingerprint)
}
if callback != nil {
callback(req)
}

View File

@@ -11,7 +11,7 @@ type Addition struct {
IsSharepoint bool `json:"is_sharepoint"`
ClientID string `json:"client_id" required:"true"`
ClientSecret string `json:"client_secret" required:"true"`
RedirectUri string `json:"redirect_uri" required:"true" default:"https://alistgo.com/tool/onedrive/callback"`
RedirectUri string `json:"redirect_uri" required:"true" default:"https://alist.nn.ci/tool/onedrive/callback"`
RefreshToken string `json:"refresh_token" required:"true"`
SiteId string `json:"site_id"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`

View File

@@ -8,7 +8,6 @@ import (
"io"
"net/http"
stdpath "path"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
@@ -18,6 +17,7 @@ import (
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
log "github.com/sirupsen/logrus"
)
var onedriveHostMap = map[string]Host{
@@ -204,18 +204,19 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
var finish int64 = 0
DEFAULT := d.ChunkSize * 1024 * 1024
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
log.Debugf("upload: %d", finish)
var byteSize int64 = DEFAULT
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Onedrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
if left < DEFAULT {
byteSize = left
}
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
log.Debug(err, n)
if err != nil {
return err
}
@@ -227,31 +228,19 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Onedrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
}
res.Body.Close()
up(float64(finish) * 100 / float64(stream.GetSize()))
}
return nil
}

View File

@@ -8,7 +8,6 @@ import (
"io"
"net/http"
stdpath "path"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
@@ -18,6 +17,7 @@ import (
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
log "github.com/sirupsen/logrus"
)
var onedriveHostMap = map[string]Host{
@@ -154,18 +154,19 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
var finish int64 = 0
DEFAULT := d.ChunkSize * 1024 * 1024
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
log.Debugf("upload: %d", finish)
var byteSize int64 = DEFAULT
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[OnedriveAPP] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
if left < DEFAULT {
byteSize = left
}
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
log.Debug(err, n)
if err != nil {
return err
}
@@ -177,31 +178,19 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[OnedriveAPP] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
}
res.Body.Close()
up(float64(finish) * 100 / float64(stream.GetSize()))
}
return nil
}

View File

@@ -1,189 +0,0 @@
package pcloud
import (
"context"
"fmt"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
)
type PCloud struct {
model.Storage
Addition
AccessToken string // Actual access token obtained from refresh token
}
func (d *PCloud) Config() driver.Config {
return config
}
func (d *PCloud) GetAddition() driver.Additional {
return &d.Addition
}
func (d *PCloud) Init(ctx context.Context) error {
// Map hostname selection to actual API endpoints
if d.Hostname == "us" {
d.Hostname = "api.pcloud.com"
} else if d.Hostname == "eu" {
d.Hostname = "eapi.pcloud.com"
}
// Set default root folder ID if not provided
if d.RootFolderID == "" {
d.RootFolderID = "d0"
}
// Use the access token directly (like rclone)
d.AccessToken = d.RefreshToken // RefreshToken field actually contains the access_token
return nil
}
func (d *PCloud) Drop(ctx context.Context) error {
return nil
}
func (d *PCloud) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
folderID := d.RootFolderID
if dir.GetID() != "" {
folderID = dir.GetID()
}
files, err := d.getFiles(folderID)
if err != nil {
return nil, err
}
return utils.SliceConvert(files, func(src FileObject) (model.Obj, error) {
return fileToObj(src), nil
})
}
func (d *PCloud) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
downloadURL, err := d.getDownloadLink(file.GetID())
if err != nil {
return nil, err
}
return &model.Link{
URL: downloadURL,
}, nil
}
// Mkdir implements driver.Mkdir
func (d *PCloud) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
parentID := d.RootFolderID
if parentDir.GetID() != "" {
parentID = parentDir.GetID()
}
return d.createFolder(parentID, dirName)
}
// Move implements driver.Move
func (d *PCloud) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
// pCloud uses renamefile/renamefolder for both rename and move
endpoint := "/renamefile"
paramName := "fileid"
if srcObj.IsDir() {
endpoint = "/renamefolder"
paramName = "folderid"
}
var resp ItemResult
_, err := d.requestWithRetry(endpoint, "POST", func(req *resty.Request) {
req.SetFormData(map[string]string{
paramName: extractID(srcObj.GetID()),
"tofolderid": extractID(dstDir.GetID()),
"toname": srcObj.GetName(),
})
}, &resp)
if err != nil {
return err
}
if resp.Result != 0 {
return fmt.Errorf("pCloud error: result code %d", resp.Result)
}
return nil
}
// Rename implements driver.Rename
func (d *PCloud) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
endpoint := "/renamefile"
paramName := "fileid"
if srcObj.IsDir() {
endpoint = "/renamefolder"
paramName = "folderid"
}
var resp ItemResult
_, err := d.requestWithRetry(endpoint, "POST", func(req *resty.Request) {
req.SetFormData(map[string]string{
paramName: extractID(srcObj.GetID()),
"toname": newName,
})
}, &resp)
if err != nil {
return err
}
if resp.Result != 0 {
return fmt.Errorf("pCloud error: result code %d", resp.Result)
}
return nil
}
// Copy implements driver.Copy
func (d *PCloud) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
endpoint := "/copyfile"
paramName := "fileid"
if srcObj.IsDir() {
endpoint = "/copyfolder"
paramName = "folderid"
}
var resp ItemResult
_, err := d.requestWithRetry(endpoint, "POST", func(req *resty.Request) {
req.SetFormData(map[string]string{
paramName: extractID(srcObj.GetID()),
"tofolderid": extractID(dstDir.GetID()),
"toname": srcObj.GetName(),
})
}, &resp)
if err != nil {
return err
}
if resp.Result != 0 {
return fmt.Errorf("pCloud error: result code %d", resp.Result)
}
return nil
}
// Remove implements driver.Remove
func (d *PCloud) Remove(ctx context.Context, obj model.Obj) error {
return d.delete(obj.GetID(), obj.IsDir())
}
// Put implements driver.Put
func (d *PCloud) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
parentID := d.RootFolderID
if dstDir.GetID() != "" {
parentID = dstDir.GetID()
}
return d.uploadFile(ctx, stream, parentID, stream.GetName(), stream.GetSize())
}

View File

@@ -1,30 +0,0 @@
package pcloud
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
// Using json tag "access_token" for UI display, but internally it's a refresh token
RefreshToken string `json:"access_token" required:"true" help:"OAuth token from pCloud authorization"`
Hostname string `json:"hostname" type:"select" options:"us,eu" default:"us" help:"Select pCloud server region"`
RootFolderID string `json:"root_folder_id" help:"Get folder ID from URL like https://my.pcloud.com/#/filemanager?folder=12345678901 (leave empty for root folder)"`
ClientID string `json:"client_id" help:"Custom OAuth client ID (optional)"`
ClientSecret string `json:"client_secret" help:"Custom OAuth client secret (optional)"`
}
// Implement IRootId interface
func (a Addition) GetRootId() string {
return a.RootFolderID
}
var config = driver.Config{
Name: "pCloud",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &PCloud{}
})
}

View File

@@ -1,91 +0,0 @@
package pcloud
import (
"strconv"
"time"
"github.com/alist-org/alist/v3/internal/model"
)
// ErrorResult represents a pCloud API error response
type ErrorResult struct {
Result int `json:"result"`
Error string `json:"error"`
}
// TokenResponse represents OAuth token response
type TokenResponse struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
}
// ItemResult represents a common pCloud API response
type ItemResult struct {
Result int `json:"result"`
Metadata *FolderMeta `json:"metadata,omitempty"`
}
// FolderMeta contains folder metadata including contents
type FolderMeta struct {
Contents []FileObject `json:"contents,omitempty"`
}
// DownloadLinkResult represents download link response
type DownloadLinkResult struct {
Result int `json:"result"`
Hosts []string `json:"hosts"`
Path string `json:"path"`
}
// FileObject represents a file or folder object in pCloud
type FileObject struct {
Name string `json:"name"`
Created string `json:"created"` // pCloud returns RFC1123 format string
Modified string `json:"modified"` // pCloud returns RFC1123 format string
IsFolder bool `json:"isfolder"`
FolderID uint64 `json:"folderid,omitempty"`
FileID uint64 `json:"fileid,omitempty"`
Size uint64 `json:"size"`
ParentID uint64 `json:"parentfolderid"`
Icon string `json:"icon,omitempty"`
Hash uint64 `json:"hash,omitempty"`
Category int `json:"category,omitempty"`
ID string `json:"id,omitempty"`
}
// Convert FileObject to model.Obj
func fileToObj(f FileObject) model.Obj {
// Parse RFC1123 format time from pCloud
modTime, _ := time.Parse(time.RFC1123, f.Modified)
obj := model.Object{
Name: f.Name,
Size: int64(f.Size),
Modified: modTime,
IsFolder: f.IsFolder,
}
if f.IsFolder {
obj.ID = "d" + strconv.FormatUint(f.FolderID, 10)
} else {
obj.ID = "f" + strconv.FormatUint(f.FileID, 10)
}
return &obj
}
// Extract numeric ID from string ID (remove 'd' or 'f' prefix)
func extractID(id string) string {
if len(id) > 1 && (id[0] == 'd' || id[0] == 'f') {
return id[1:]
}
return id
}
// Get folder ID from path, return "0" for root
func getFolderID(path string) string {
if path == "/" || path == "" {
return "0"
}
return extractID(path)
}

View File

@@ -1,297 +0,0 @@
package pcloud
import (
"context"
"fmt"
"io"
"net/http"
"strconv"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
)
const (
defaultClientID = "DnONSzyJXpm"
defaultClientSecret = "VKEnd3ze4jsKFGg8TJiznwFG8"
)
// Get API base URL
func (d *PCloud) getAPIURL() string {
return "https://" + d.Hostname
}
// Get OAuth client credentials
func (d *PCloud) getClientCredentials() (string, string) {
clientID := d.ClientID
clientSecret := d.ClientSecret
if clientID == "" {
clientID = defaultClientID
}
if clientSecret == "" {
clientSecret = defaultClientSecret
}
return clientID, clientSecret
}
// Refresh OAuth access token
func (d *PCloud) refreshToken() error {
clientID, clientSecret := d.getClientCredentials()
var resp TokenResponse
_, err := base.RestyClient.R().
SetFormData(map[string]string{
"client_id": clientID,
"client_secret": clientSecret,
"grant_type": "refresh_token",
"refresh_token": d.RefreshToken,
}).
SetResult(&resp).
Post(d.getAPIURL() + "/oauth2_token")
if err != nil {
return err
}
d.AccessToken = resp.AccessToken
return nil
}
// shouldRetry determines if an error should be retried based on pCloud-specific logic
func (d *PCloud) shouldRetry(statusCode int, apiError *ErrorResult) bool {
// HTTP-level retry conditions
if statusCode == 429 || statusCode >= 500 {
return true
}
// pCloud API-specific retry conditions (like rclone)
if apiError != nil && apiError.Result != 0 {
// 4xxx: rate limiting
if apiError.Result/1000 == 4 {
return true
}
// 5xxx: internal errors
if apiError.Result/1000 == 5 {
return true
}
}
return false
}
// requestWithRetry makes authenticated API request with retry logic
func (d *PCloud) requestWithRetry(endpoint string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
maxRetries := 3
baseDelay := 500 * time.Millisecond
for attempt := 0; attempt <= maxRetries; attempt++ {
body, err := d.request(endpoint, method, callback, resp)
if err == nil {
return body, nil
}
// If this is the last attempt, return the error
if attempt == maxRetries {
return nil, err
}
// Check if we should retry based on error type
if !d.shouldRetryError(err) {
return nil, err
}
// Exponential backoff
delay := baseDelay * time.Duration(1<<attempt)
time.Sleep(delay)
}
return nil, fmt.Errorf("max retries exceeded")
}
// shouldRetryError checks if an error should trigger a retry
func (d *PCloud) shouldRetryError(err error) bool {
// For now, we'll retry on any error
// In production, you'd want more specific error handling
return true
}
// Make authenticated API request
func (d *PCloud) request(endpoint string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
// Add access token as query parameter (pCloud doesn't use Bearer auth)
req.SetQueryParam("access_token", d.AccessToken)
if callback != nil {
callback(req)
}
if resp != nil {
req.SetResult(resp)
}
var res *resty.Response
var err error
switch method {
case http.MethodGet:
res, err = req.Get(d.getAPIURL() + endpoint)
case http.MethodPost:
res, err = req.Post(d.getAPIURL() + endpoint)
default:
return nil, fmt.Errorf("unsupported method: %s", method)
}
if err != nil {
return nil, err
}
// Check for API errors with pCloud-specific logic
if res.StatusCode() != 200 {
var errResp ErrorResult
if err := utils.Json.Unmarshal(res.Body(), &errResp); err == nil {
// Check if this error should trigger a retry
if d.shouldRetry(res.StatusCode(), &errResp) {
return nil, fmt.Errorf("pCloud API error (retryable): %s (result: %d)", errResp.Error, errResp.Result)
}
return nil, fmt.Errorf("pCloud API error: %s (result: %d)", errResp.Error, errResp.Result)
}
return nil, fmt.Errorf("HTTP error: %d", res.StatusCode())
}
return res.Body(), nil
}
// List files in a folder
func (d *PCloud) getFiles(folderID string) ([]FileObject, error) {
var resp ItemResult
_, err := d.requestWithRetry("/listfolder", http.MethodGet, func(req *resty.Request) {
req.SetQueryParam("folderid", extractID(folderID))
}, &resp)
if err != nil {
return nil, err
}
if resp.Result != 0 {
return nil, fmt.Errorf("pCloud error: result code %d", resp.Result)
}
if resp.Metadata == nil {
return []FileObject{}, nil
}
return resp.Metadata.Contents, nil
}
// Get download link for a file
func (d *PCloud) getDownloadLink(fileID string) (string, error) {
var resp DownloadLinkResult
_, err := d.requestWithRetry("/getfilelink", http.MethodGet, func(req *resty.Request) {
req.SetQueryParam("fileid", extractID(fileID))
}, &resp)
if err != nil {
return "", err
}
if resp.Result != 0 {
return "", fmt.Errorf("pCloud error: result code %d", resp.Result)
}
if len(resp.Hosts) == 0 {
return "", fmt.Errorf("no download hosts available")
}
return "https://" + resp.Hosts[0] + resp.Path, nil
}
// Create a folder
func (d *PCloud) createFolder(parentID, name string) error {
var resp ItemResult
_, err := d.requestWithRetry("/createfolder", http.MethodPost, func(req *resty.Request) {
req.SetFormData(map[string]string{
"folderid": extractID(parentID),
"name": name,
})
}, &resp)
if err != nil {
return err
}
if resp.Result != 0 {
return fmt.Errorf("pCloud error: result code %d", resp.Result)
}
return nil
}
// Delete a file or folder
func (d *PCloud) delete(objID string, isFolder bool) error {
endpoint := "/deletefile"
paramName := "fileid"
if isFolder {
endpoint = "/deletefolderrecursive"
paramName = "folderid"
}
var resp ItemResult
_, err := d.requestWithRetry(endpoint, http.MethodPost, func(req *resty.Request) {
req.SetFormData(map[string]string{
paramName: extractID(objID),
})
}, &resp)
if err != nil {
return err
}
if resp.Result != 0 {
return fmt.Errorf("pCloud error: result code %d", resp.Result)
}
return nil
}
// Upload a file using direct /uploadfile endpoint like rclone
func (d *PCloud) uploadFile(ctx context.Context, file io.Reader, parentID, name string, size int64) error {
// pCloud requires Content-Length, so we need to know the size
if size <= 0 {
return fmt.Errorf("file size must be provided for pCloud upload")
}
// Upload directly to /uploadfile endpoint like rclone
var resp ItemResult
req := base.RestyClient.R().
SetQueryParam("access_token", d.AccessToken).
SetHeader("Content-Length", strconv.FormatInt(size, 10)).
SetFileReader("content", name, file).
SetFormData(map[string]string{
"filename": name,
"folderid": extractID(parentID),
"nopartial": "1",
})
// Use PUT method like rclone
res, err := req.Put(d.getAPIURL() + "/uploadfile")
if err != nil {
return err
}
// Parse response
if err := utils.Json.Unmarshal(res.Body(), &resp); err != nil {
return err
}
if resp.Result != 0 {
return fmt.Errorf("pCloud upload error: result code %d", resp.Result)
}
return nil
}

View File

@@ -1,418 +0,0 @@
package protondrive
/*
Package protondrive
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-18
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
The power of open-source, the force of teamwork and the magic of reverse engineering!
D@' 3z K!7 - The King Of Cracking
Да здравствует Родина))
*/
import (
"context"
"encoding/base64"
"fmt"
"net/http"
"sync"
"time"
"github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
proton_api_bridge "github.com/henrybear327/Proton-API-Bridge"
"github.com/henrybear327/Proton-API-Bridge/common"
"github.com/henrybear327/go-proton-api"
)
type ProtonDrive struct {
model.Storage
Addition
protonDrive *proton_api_bridge.ProtonDrive
credentials *common.ProtonDriveCredential
apiBase string
appVersion string
protonJson string
userAgent string
sdkVersion string
webDriveAV string
tempServer *http.Server
tempServerPort int
downloadTokens map[string]*downloadInfo
tokenMutex sync.RWMutex
c *proton.Client
//m *proton.Manager
credentialCacheFile string
//userKR *crypto.KeyRing
addrKRs map[string]*crypto.KeyRing
addrData map[string]proton.Address
MainShare *proton.Share
RootLink *proton.Link
DefaultAddrKR *crypto.KeyRing
MainShareKR *crypto.KeyRing
}
func (d *ProtonDrive) Config() driver.Config {
return config
}
func (d *ProtonDrive) GetAddition() driver.Additional {
return &d.Addition
}
func (d *ProtonDrive) Init(ctx context.Context) error {
defer func() {
if r := recover(); r != nil {
fmt.Printf("ProtonDrive initialization panic: %v", r)
}
}()
if d.Username == "" {
return fmt.Errorf("username is required")
}
if d.Password == "" {
return fmt.Errorf("password is required")
}
//fmt.Printf("ProtonDrive Init: Username=%s, TwoFACode=%s", d.Username, d.TwoFACode)
if ctx == nil {
return fmt.Errorf("context cannot be nil")
}
cachedCredentials, err := d.loadCachedCredentials()
useReusableLogin := false
var reusableCredential *common.ReusableCredentialData
if err == nil && cachedCredentials != nil &&
cachedCredentials.UID != "" && cachedCredentials.AccessToken != "" &&
cachedCredentials.RefreshToken != "" && cachedCredentials.SaltedKeyPass != "" {
useReusableLogin = true
reusableCredential = cachedCredentials
} else {
useReusableLogin = false
reusableCredential = &common.ReusableCredentialData{}
}
config := &common.Config{
AppVersion: d.appVersion,
UserAgent: d.userAgent,
FirstLoginCredential: &common.FirstLoginCredentialData{
Username: d.Username,
Password: d.Password,
TwoFA: d.TwoFACode,
},
EnableCaching: true,
ConcurrentBlockUploadCount: 5,
ConcurrentFileCryptoCount: 2,
UseReusableLogin: false,
ReplaceExistingDraft: true,
ReusableCredential: reusableCredential,
CredentialCacheFile: d.credentialCacheFile,
}
if config.FirstLoginCredential == nil {
return fmt.Errorf("failed to create login credentials, FirstLoginCredential cannot be nil")
}
//fmt.Printf("Calling NewProtonDrive...")
protonDrive, credentials, err := proton_api_bridge.NewProtonDrive(
ctx,
config,
func(auth proton.Auth) {},
func() {},
)
if credentials == nil && !useReusableLogin {
return fmt.Errorf("failed to get credentials from NewProtonDrive")
}
if err != nil {
return fmt.Errorf("failed to initialize ProtonDrive: %w", err)
}
d.protonDrive = protonDrive
var finalCredentials *common.ProtonDriveCredential
if useReusableLogin {
// For reusable login, create credentials from cached data
finalCredentials = &common.ProtonDriveCredential{
UID: reusableCredential.UID,
AccessToken: reusableCredential.AccessToken,
RefreshToken: reusableCredential.RefreshToken,
SaltedKeyPass: reusableCredential.SaltedKeyPass,
}
d.credentials = finalCredentials
} else {
d.credentials = credentials
}
clientOptions := []proton.Option{
proton.WithAppVersion(d.appVersion),
proton.WithUserAgent(d.userAgent),
}
manager := proton.New(clientOptions...)
d.c = manager.NewClient(d.credentials.UID, d.credentials.AccessToken, d.credentials.RefreshToken)
saltedKeyPassBytes, err := base64.StdEncoding.DecodeString(d.credentials.SaltedKeyPass)
if err != nil {
return fmt.Errorf("failed to decode salted key pass: %w", err)
}
_, addrKRs, addrs, _, err := getAccountKRs(ctx, d.c, nil, saltedKeyPassBytes)
if err != nil {
return fmt.Errorf("failed to get account keyrings: %w", err)
}
d.MainShare = protonDrive.MainShare
d.RootLink = protonDrive.RootLink
d.MainShareKR = protonDrive.MainShareKR
d.DefaultAddrKR = protonDrive.DefaultAddrKR
d.addrKRs = addrKRs
d.addrData = addrs
return nil
}
func (d *ProtonDrive) Drop(ctx context.Context) error {
if d.tempServer != nil {
d.tempServer.Shutdown(ctx)
}
return nil
}
func (d *ProtonDrive) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var linkID string
if dir.GetPath() == "/" {
linkID = d.protonDrive.RootLink.LinkID
} else {
link, err := d.searchByPath(ctx, dir.GetPath(), true)
if err != nil {
return nil, err
}
linkID = link.LinkID
}
entries, err := d.protonDrive.ListDirectory(ctx, linkID)
if err != nil {
return nil, fmt.Errorf("failed to list directory: %w", err)
}
//fmt.Printf("Found %d entries for path %s\n", len(entries), dir.GetPath())
//fmt.Printf("Found %d entries\n", len(entries))
if len(entries) == 0 {
emptySlice := []model.Obj{}
//fmt.Printf("Returning empty slice (entries): %+v\n", emptySlice)
return emptySlice, nil
}
var objects []model.Obj
for _, entry := range entries {
obj := &model.Object{
Name: entry.Name,
Size: entry.Link.Size,
Modified: time.Unix(entry.Link.ModifyTime, 0),
IsFolder: entry.IsFolder,
}
objects = append(objects, obj)
}
return objects, nil
}
func (d *ProtonDrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
link, err := d.searchByPath(ctx, file.GetPath(), false)
if err != nil {
return nil, err
}
if err := d.ensureTempServer(); err != nil {
return nil, fmt.Errorf("failed to start temp server: %w", err)
}
token := d.generateDownloadToken(link.LinkID, file.GetName())
/* return &model.Link{
URL: fmt.Sprintf("protondrive://download/%s", link.LinkID),
}, nil */
return &model.Link{
URL: fmt.Sprintf("http://localhost:%d/temp/%s", d.tempServerPort, token),
}, nil
}
func (d *ProtonDrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
var parentLinkID string
if parentDir.GetPath() == "/" {
parentLinkID = d.protonDrive.RootLink.LinkID
} else {
link, err := d.searchByPath(ctx, parentDir.GetPath(), true)
if err != nil {
return nil, err
}
parentLinkID = link.LinkID
}
_, err := d.protonDrive.CreateNewFolderByID(ctx, parentLinkID, dirName)
if err != nil {
return nil, fmt.Errorf("failed to create directory: %w", err)
}
newDir := &model.Object{
Name: dirName,
IsFolder: true,
Modified: time.Now(),
}
return newDir, nil
}
func (d *ProtonDrive) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return d.DirectMove(ctx, srcObj, dstDir)
}
func (d *ProtonDrive) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
if d.protonDrive == nil {
return nil, fmt.Errorf("protonDrive bridge is nil")
}
return d.DirectRename(ctx, srcObj, newName)
}
func (d *ProtonDrive) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if srcObj.IsDir() {
return nil, fmt.Errorf("directory copy not supported")
}
srcLink, err := d.searchByPath(ctx, srcObj.GetPath(), false)
if err != nil {
return nil, err
}
reader, linkSize, fileSystemAttrs, err := d.protonDrive.DownloadFile(ctx, srcLink, 0)
if err != nil {
return nil, fmt.Errorf("failed to download source file: %w", err)
}
defer reader.Close()
actualSize := linkSize
if fileSystemAttrs != nil && fileSystemAttrs.Size > 0 {
actualSize = fileSystemAttrs.Size
}
tempFile, err := utils.CreateTempFile(reader, actualSize)
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %w", err)
}
defer tempFile.Close()
updatedObj := &model.Object{
Name: srcObj.GetName(),
// Use the accurate and real size
Size: actualSize,
Modified: srcObj.ModTime(),
IsFolder: false,
}
return d.Put(ctx, dstDir, &fileStreamer{
ReadCloser: tempFile,
obj: updatedObj,
}, nil)
}
func (d *ProtonDrive) Remove(ctx context.Context, obj model.Obj) error {
link, err := d.searchByPath(ctx, obj.GetPath(), obj.IsDir())
if err != nil {
return err
}
if obj.IsDir() {
return d.protonDrive.MoveFolderToTrashByID(ctx, link.LinkID, false)
} else {
return d.protonDrive.MoveFileToTrashByID(ctx, link.LinkID)
}
}
func (d *ProtonDrive) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
var parentLinkID string
if dstDir.GetPath() == "/" {
parentLinkID = d.protonDrive.RootLink.LinkID
} else {
link, err := d.searchByPath(ctx, dstDir.GetPath(), true)
if err != nil {
return nil, err
}
parentLinkID = link.LinkID
}
tempFile, err := utils.CreateTempFile(file, file.GetSize())
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %w", err)
}
defer tempFile.Close()
err = d.uploadFile(ctx, parentLinkID, file.GetName(), tempFile, file.GetSize(), up)
if err != nil {
return nil, err
}
uploadedObj := &model.Object{
Name: file.GetName(),
Size: file.GetSize(),
Modified: file.ModTime(),
IsFolder: false,
}
return uploadedObj, nil
}
func (d *ProtonDrive) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *ProtonDrive) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *ProtonDrive) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *ProtonDrive) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// return errs.NotImplement to use an internal archive tool
return nil, errs.NotImplement
}
var _ driver.Driver = (*ProtonDrive)(nil)

View File

@@ -1,69 +0,0 @@
package protondrive
/*
Package protondrive
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-18
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
The power of open-source, the force of teamwork and the magic of reverse engineering!
D@' 3z K!7 - The King Of Cracking
Да здравствует Родина))
*/
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
driver.RootPath
//driver.RootID
Username string `json:"username" required:"true" type:"string"`
Password string `json:"password" required:"true" type:"string"`
TwoFACode string `json:"two_fa_code,omitempty" type:"string"`
}
type Config struct {
Name string `json:"name"`
LocalSort bool `json:"local_sort"`
OnlyLocal bool `json:"only_local"`
OnlyProxy bool `json:"only_proxy"`
NoCache bool `json:"no_cache"`
NoUpload bool `json:"no_upload"`
NeedMs bool `json:"need_ms"`
DefaultRoot string `json:"default_root"`
}
var config = driver.Config{
Name: "ProtonDrive",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "/",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &ProtonDrive{
apiBase: "https://drive.proton.me/api",
appVersion: "windows-drive@1.11.3+rclone+proton",
credentialCacheFile: ".prtcrd",
protonJson: "application/vnd.protonmail.v1+json",
sdkVersion: "js@0.3.0",
userAgent: "ProtonDrive/v1.70.0 (Windows NT 10.0.22000; Win64; x64)",
webDriveAV: "web-drive@5.2.0+0f69f7a8",
}
})
}

View File

@@ -1,124 +0,0 @@
package protondrive
/*
Package protondrive
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-18
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
The power of open-source, the force of teamwork and the magic of reverse engineering!
D@' 3z K!7 - The King Of Cracking
Да здравствует Родина))
*/
import (
"errors"
"io"
"os"
"time"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/henrybear327/go-proton-api"
)
type ProtonFile struct {
*proton.Link
Name string
IsFolder bool
}
func (p *ProtonFile) GetName() string {
return p.Name
}
func (p *ProtonFile) GetSize() int64 {
return p.Link.Size
}
func (p *ProtonFile) GetPath() string {
return p.Name
}
func (p *ProtonFile) IsDir() bool {
return p.IsFolder
}
func (p *ProtonFile) ModTime() time.Time {
return time.Unix(p.Link.ModifyTime, 0)
}
func (p *ProtonFile) CreateTime() time.Time {
return time.Unix(p.Link.CreateTime, 0)
}
type downloadInfo struct {
LinkID string
FileName string
}
type fileStreamer struct {
io.ReadCloser
obj model.Obj
}
func (fs *fileStreamer) GetMimetype() string { return "" }
func (fs *fileStreamer) NeedStore() bool { return false }
func (fs *fileStreamer) IsForceStreamUpload() bool { return false }
func (fs *fileStreamer) GetExist() model.Obj { return nil }
func (fs *fileStreamer) SetExist(model.Obj) {}
func (fs *fileStreamer) RangeRead(http_range.Range) (io.Reader, error) {
return nil, errors.New("not supported")
}
func (fs *fileStreamer) CacheFullInTempFile() (model.File, error) {
return nil, errors.New("not supported")
}
func (fs *fileStreamer) SetTmpFile(r *os.File) {}
func (fs *fileStreamer) GetFile() model.File { return nil }
func (fs *fileStreamer) GetName() string { return fs.obj.GetName() }
func (fs *fileStreamer) GetSize() int64 { return fs.obj.GetSize() }
func (fs *fileStreamer) GetPath() string { return fs.obj.GetPath() }
func (fs *fileStreamer) IsDir() bool { return fs.obj.IsDir() }
func (fs *fileStreamer) ModTime() time.Time { return fs.obj.ModTime() }
func (fs *fileStreamer) CreateTime() time.Time { return fs.obj.ModTime() }
func (fs *fileStreamer) GetHash() utils.HashInfo { return fs.obj.GetHash() }
func (fs *fileStreamer) GetID() string { return fs.obj.GetID() }
type httpRange struct {
start, end int64
}
type MoveRequest struct {
ParentLinkID string `json:"ParentLinkID"`
NodePassphrase string `json:"NodePassphrase"`
NodePassphraseSignature *string `json:"NodePassphraseSignature"`
Name string `json:"Name"`
NameSignatureEmail string `json:"NameSignatureEmail"`
Hash string `json:"Hash"`
OriginalHash string `json:"OriginalHash"`
ContentHash *string `json:"ContentHash"` // Maybe null
}
type progressReader struct {
reader io.Reader
total int64
current int64
callback driver.UpdateProgress
}
type RenameRequest struct {
Name string `json:"Name"` // PGP encrypted name
NameSignatureEmail string `json:"NameSignatureEmail"` // User's signature email
Hash string `json:"Hash"` // New name hash
OriginalHash string `json:"OriginalHash"` // Current name hash
}
type RenameResponse struct {
Code int `json:"Code"`
}

View File

@@ -1,918 +0,0 @@
package protondrive
/*
Package protondrive
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-18
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
The power of open-source, the force of teamwork and the magic of reverse engineering!
D@' 3z K!7 - The King Of Cracking
Да здравствует Родина))
*/
import (
"bufio"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"mime"
"net"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/henrybear327/Proton-API-Bridge/common"
"github.com/henrybear327/go-proton-api"
)
func (d *ProtonDrive) loadCachedCredentials() (*common.ReusableCredentialData, error) {
if d.credentialCacheFile == "" {
return nil, nil
}
if _, err := os.Stat(d.credentialCacheFile); os.IsNotExist(err) {
return nil, nil
}
data, err := os.ReadFile(d.credentialCacheFile)
if err != nil {
return nil, fmt.Errorf("failed to read credential cache file: %w", err)
}
var credentials common.ReusableCredentialData
if err := json.Unmarshal(data, &credentials); err != nil {
return nil, fmt.Errorf("failed to parse cached credentials: %w", err)
}
if credentials.UID == "" || credentials.AccessToken == "" ||
credentials.RefreshToken == "" || credentials.SaltedKeyPass == "" {
return nil, fmt.Errorf("cached credentials are incomplete")
}
return &credentials, nil
}
func (d *ProtonDrive) searchByPath(ctx context.Context, fullPath string, isFolder bool) (*proton.Link, error) {
if fullPath == "/" {
return d.protonDrive.RootLink, nil
}
cleanPath := strings.Trim(fullPath, "/")
pathParts := strings.Split(cleanPath, "/")
currentLink := d.protonDrive.RootLink
for i, part := range pathParts {
isLastPart := i == len(pathParts)-1
searchForFolder := !isLastPart || isFolder
entries, err := d.protonDrive.ListDirectory(ctx, currentLink.LinkID)
if err != nil {
return nil, fmt.Errorf("failed to list directory: %w", err)
}
found := false
for _, entry := range entries {
// entry.Name is already decrypted!
if entry.Name == part && entry.IsFolder == searchForFolder {
currentLink = entry.Link
found = true
break
}
}
if !found {
return nil, fmt.Errorf("path not found: %s (looking for part: %s)", fullPath, part)
}
}
return currentLink, nil
}
func (pr *progressReader) Read(p []byte) (int, error) {
n, err := pr.reader.Read(p)
pr.current += int64(n)
if pr.callback != nil {
percentage := float64(pr.current) / float64(pr.total) * 100
pr.callback(percentage)
}
return n, err
}
func (d *ProtonDrive) uploadFile(ctx context.Context, parentLinkID, fileName string, file *os.File, size int64, up driver.UpdateProgress) error {
fileInfo, err := file.Stat()
if err != nil {
return fmt.Errorf("failed to get file info: %w", err)
}
_, err = d.protonDrive.GetLink(ctx, parentLinkID)
if err != nil {
return fmt.Errorf("failed to get parent link: %w", err)
}
reader := &progressReader{
reader: bufio.NewReader(file),
total: size,
current: 0,
callback: up,
}
_, _, err = d.protonDrive.UploadFileByReader(ctx, parentLinkID, fileName, fileInfo.ModTime(), reader, 0)
if err != nil {
return fmt.Errorf("failed to upload file: %w", err)
}
return nil
}
func (d *ProtonDrive) ensureTempServer() error {
if d.tempServer != nil {
// Already running
return nil
}
listener, err := net.Listen("tcp", ":0")
if err != nil {
return err
}
d.tempServerPort = listener.Addr().(*net.TCPAddr).Port
mux := http.NewServeMux()
mux.HandleFunc("/temp/", d.handleTempDownload)
d.tempServer = &http.Server{
Handler: mux,
}
go func() {
d.tempServer.Serve(listener)
}()
return nil
}
func (d *ProtonDrive) handleTempDownload(w http.ResponseWriter, r *http.Request) {
token := strings.TrimPrefix(r.URL.Path, "/temp/")
d.tokenMutex.RLock()
info, exists := d.downloadTokens[token]
d.tokenMutex.RUnlock()
if !exists {
http.Error(w, "Invalid or expired token", http.StatusNotFound)
return
}
link, err := d.protonDrive.GetLink(r.Context(), info.LinkID)
if err != nil {
http.Error(w, "Failed to get file link", http.StatusInternalServerError)
return
}
// Get file size for range calculations
_, _, attrs, err := d.protonDrive.DownloadFile(r.Context(), link, 0)
if err != nil {
http.Error(w, "Failed to get file info", http.StatusInternalServerError)
return
}
fileSize := attrs.Size
rangeHeader := r.Header.Get("Range")
if rangeHeader != "" {
// Parse range header like "bytes=0-1023" or "bytes=1024-"
ranges, err := parseRange(rangeHeader, fileSize)
if err != nil {
http.Error(w, "Invalid range", http.StatusRequestedRangeNotSatisfiable)
return
}
if len(ranges) == 1 {
// Single range request, small
start, end := ranges[0].start, ranges[0].end
contentLength := end - start + 1
// Start download from offset
reader, _, _, err := d.protonDrive.DownloadFile(r.Context(), link, start)
if err != nil {
http.Error(w, "Failed to start download", http.StatusInternalServerError)
return
}
defer reader.Close()
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, fileSize))
w.Header().Set("Content-Length", fmt.Sprintf("%d", contentLength))
w.Header().Set("Content-Type", mime.TypeByExtension(filepath.Ext(link.Name)))
// Partial content...
// Setting fileName is more cosmetical here
//.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", link.Name))
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", info.FileName))
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(http.StatusPartialContent)
io.CopyN(w, reader, contentLength)
return
}
}
// Full file download (non-range request)
reader, _, _, err := d.protonDrive.DownloadFile(r.Context(), link, 0)
if err != nil {
http.Error(w, "Failed to start download", http.StatusInternalServerError)
return
}
defer reader.Close()
// Set headers for full content
w.Header().Set("Content-Length", fmt.Sprintf("%d", fileSize))
w.Header().Set("Content-Type", mime.TypeByExtension(filepath.Ext(link.Name)))
// Setting fileName is needed since ProtonDrive fileName is more like a random string
//w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", link.Name))
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", info.FileName))
w.Header().Set("Accept-Ranges", "bytes")
// Stream the full file
io.Copy(w, reader)
}
func (d *ProtonDrive) generateDownloadToken(linkID, fileName string) string {
token := fmt.Sprintf("%d_%s", time.Now().UnixNano(), linkID[:8])
d.tokenMutex.Lock()
if d.downloadTokens == nil {
d.downloadTokens = make(map[string]*downloadInfo)
}
d.downloadTokens[token] = &downloadInfo{
LinkID: linkID,
FileName: fileName,
}
d.tokenMutex.Unlock()
go func() {
// Token expires in 1 hour
time.Sleep(1 * time.Hour)
d.tokenMutex.Lock()
delete(d.downloadTokens, token)
d.tokenMutex.Unlock()
}()
return token
}
func parseRange(rangeHeader string, size int64) ([]httpRange, error) {
if !strings.HasPrefix(rangeHeader, "bytes=") {
return nil, fmt.Errorf("invalid range header")
}
rangeSpec := strings.TrimPrefix(rangeHeader, "bytes=")
ranges := strings.Split(rangeSpec, ",")
var result []httpRange
for _, r := range ranges {
r = strings.TrimSpace(r)
if strings.Contains(r, "-") {
parts := strings.Split(r, "-")
if len(parts) != 2 {
return nil, fmt.Errorf("invalid range format")
}
var start, end int64
var err error
if parts[0] == "" {
// Suffix range (e.g., "-500")
if parts[1] == "" {
return nil, fmt.Errorf("invalid range format")
}
end = size - 1
start, err = strconv.ParseInt(parts[1], 10, 64)
if err != nil {
return nil, err
}
start = size - start
if start < 0 {
start = 0
}
} else if parts[1] == "" {
// Prefix range (e.g., "500-")
start, err = strconv.ParseInt(parts[0], 10, 64)
if err != nil {
return nil, err
}
end = size - 1
} else {
// Full range (e.g., "0-1023")
start, err = strconv.ParseInt(parts[0], 10, 64)
if err != nil {
return nil, err
}
end, err = strconv.ParseInt(parts[1], 10, 64)
if err != nil {
return nil, err
}
}
if start >= size || end >= size || start > end {
return nil, fmt.Errorf("range out of bounds")
}
result = append(result, httpRange{start: start, end: end})
}
}
return result, nil
}
func (d *ProtonDrive) encryptFileName(ctx context.Context, name string, parentLinkID string) (string, error) {
parentLink, err := d.getLink(ctx, parentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get parent link: %w", err)
}
// Get parent node keyring
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
if err != nil {
return "", fmt.Errorf("failed to get parent keyring: %w", err)
}
// Temporary file (request)
tempReq := proton.CreateFileReq{
SignatureAddress: d.MainShare.Creator,
}
// Encrypt the filename
err = tempReq.SetName(name, d.DefaultAddrKR, parentNodeKR)
if err != nil {
return "", fmt.Errorf("failed to encrypt filename: %w", err)
}
return tempReq.Name, nil
}
func (d *ProtonDrive) generateFileNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
parentLink, err := d.getLink(ctx, parentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get parent link: %w", err)
}
// Get parent node keyring
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
if err != nil {
return "", fmt.Errorf("failed to get parent keyring: %w", err)
}
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
if err != nil {
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
}
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
if err != nil {
return "", fmt.Errorf("failed to get parent hash key: %w", err)
}
nameHash, err := proton.GetNameHash(name, parentHashKey)
if err != nil {
return "", fmt.Errorf("failed to generate name hash: %w", err)
}
return nameHash, nil
}
func (d *ProtonDrive) getOriginalNameHash(link *proton.Link) (string, error) {
if link == nil {
return "", fmt.Errorf("link cannot be nil")
}
if link.Hash == "" {
return "", fmt.Errorf("link hash is empty")
}
return link.Hash, nil
}
func (d *ProtonDrive) getLink(ctx context.Context, linkID string) (*proton.Link, error) {
if linkID == "" {
return nil, fmt.Errorf("linkID cannot be empty")
}
link, err := d.c.GetLink(ctx, d.MainShare.ShareID, linkID)
if err != nil {
return nil, err
}
return &link, nil
}
func (d *ProtonDrive) getLinkKR(ctx context.Context, link *proton.Link) (*crypto.KeyRing, error) {
if link == nil {
return nil, fmt.Errorf("link cannot be nil")
}
// Root Link or Root Dir
if link.ParentLinkID == "" {
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
if err != nil {
return nil, err
}
return link.GetKeyRing(d.MainShareKR, signatureVerificationKR)
}
// Get parent keyring recursively
parentLink, err := d.getLink(ctx, link.ParentLinkID)
if err != nil {
return nil, err
}
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
if err != nil {
return nil, err
}
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
if err != nil {
return nil, err
}
return link.GetKeyRing(parentNodeKR, signatureVerificationKR)
}
var (
ErrKeyPassOrSaltedKeyPassMustBeNotNil = errors.New("either keyPass or saltedKeyPass must be not nil")
ErrFailedToUnlockUserKeys = errors.New("failed to unlock user keys")
)
func getAccountKRs(ctx context.Context, c *proton.Client, keyPass, saltedKeyPass []byte) (*crypto.KeyRing, map[string]*crypto.KeyRing, map[string]proton.Address, []byte, error) {
user, err := c.GetUser(ctx)
if err != nil {
return nil, nil, nil, nil, err
}
// fmt.Printf("user %#v", user)
addrsArr, err := c.GetAddresses(ctx)
if err != nil {
return nil, nil, nil, nil, err
}
// fmt.Printf("addr %#v", addr)
if saltedKeyPass == nil {
if keyPass == nil {
return nil, nil, nil, nil, ErrKeyPassOrSaltedKeyPassMustBeNotNil
}
// Due to limitations, salts are stored using cacheCredentialToFile
salts, err := c.GetSalts(ctx)
if err != nil {
return nil, nil, nil, nil, err
}
// fmt.Printf("salts %#v", salts)
saltedKeyPass, err = salts.SaltForKey(keyPass, user.Keys.Primary().ID)
if err != nil {
return nil, nil, nil, nil, err
}
// fmt.Printf("saltedKeyPass ok")
}
userKR, addrKRs, err := proton.Unlock(user, addrsArr, saltedKeyPass, nil)
if err != nil {
return nil, nil, nil, nil, err
} else if userKR.CountDecryptionEntities() == 0 {
return nil, nil, nil, nil, ErrFailedToUnlockUserKeys
}
addrs := make(map[string]proton.Address)
for _, addr := range addrsArr {
addrs[addr.Email] = addr
}
return userKR, addrKRs, addrs, saltedKeyPass, nil
}
func (d *ProtonDrive) getSignatureVerificationKeyring(emailAddresses []string, verificationAddrKRs ...*crypto.KeyRing) (*crypto.KeyRing, error) {
ret, err := crypto.NewKeyRing(nil)
if err != nil {
return nil, err
}
for _, emailAddress := range emailAddresses {
if addr, ok := d.addrData[emailAddress]; ok {
if addrKR, exists := d.addrKRs[addr.ID]; exists {
err = d.addKeysFromKR(ret, addrKR)
if err != nil {
return nil, err
}
}
}
}
for _, kr := range verificationAddrKRs {
err = d.addKeysFromKR(ret, kr)
if err != nil {
return nil, err
}
}
if ret.CountEntities() == 0 {
return nil, fmt.Errorf("no keyring for signature verification")
}
return ret, nil
}
func (d *ProtonDrive) addKeysFromKR(kr *crypto.KeyRing, newKRs ...*crypto.KeyRing) error {
for i := range newKRs {
for _, key := range newKRs[i].GetKeys() {
err := kr.AddKey(key)
if err != nil {
return err
}
}
}
return nil
}
func (d *ProtonDrive) DirectRename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
//fmt.Printf("DEBUG DirectRename: path=%s, newName=%s", srcObj.GetPath(), newName)
if d.MainShare == nil || d.DefaultAddrKR == nil {
return nil, fmt.Errorf("missing required fields: MainShare=%v, DefaultAddrKR=%v",
d.MainShare != nil, d.DefaultAddrKR != nil)
}
if d.protonDrive == nil {
return nil, fmt.Errorf("protonDrive bridge is nil")
}
srcLink, err := d.searchByPath(ctx, srcObj.GetPath(), srcObj.IsDir())
if err != nil {
return nil, fmt.Errorf("failed to find source: %w", err)
}
parentLinkID := srcLink.ParentLinkID
if parentLinkID == "" {
return nil, fmt.Errorf("cannot rename root folder")
}
encryptedName, err := d.encryptFileName(ctx, newName, parentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
}
newHash, err := d.generateFileNameHash(ctx, newName, parentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to generate new hash: %w", err)
}
originalHash, err := d.getOriginalNameHash(srcLink)
if err != nil {
return nil, fmt.Errorf("failed to get original hash: %w", err)
}
renameReq := RenameRequest{
Name: encryptedName,
NameSignatureEmail: d.MainShare.Creator,
Hash: newHash,
OriginalHash: originalHash,
}
err = d.executeRenameAPI(ctx, srcLink.LinkID, renameReq)
if err != nil {
return nil, fmt.Errorf("rename API call failed: %w", err)
}
return &model.Object{
Name: newName,
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
IsFolder: srcObj.IsDir(),
}, nil
}
func (d *ProtonDrive) executeRenameAPI(ctx context.Context, linkID string, req RenameRequest) error {
renameURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/rename",
d.MainShare.VolumeID, linkID)
reqBody, err := json.Marshal(req)
if err != nil {
return fmt.Errorf("failed to marshal rename request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "PUT", renameURL, bytes.NewReader(reqBody))
if err != nil {
return fmt.Errorf("failed to create HTTP request: %w", err)
}
httpReq.Header.Set("Content-Type", "application/json")
httpReq.Header.Set("Accept", d.protonJson)
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
httpReq.Header.Set("X-Pm-Uid", d.credentials.UID)
httpReq.Header.Set("Authorization", "Bearer "+d.credentials.AccessToken)
client := &http.Client{}
resp, err := client.Do(httpReq)
if err != nil {
return fmt.Errorf("failed to execute rename request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("rename failed with status %d", resp.StatusCode)
}
var renameResp RenameResponse
if err := json.NewDecoder(resp.Body).Decode(&renameResp); err != nil {
return fmt.Errorf("failed to decode rename response: %w", err)
}
if renameResp.Code != 1000 {
return fmt.Errorf("rename failed with code %d", renameResp.Code)
}
return nil
}
func (d *ProtonDrive) executeMoveAPI(ctx context.Context, linkID string, req MoveRequest) error {
//fmt.Printf("DEBUG Move Request - Name: %s\n", req.Name)
//fmt.Printf("DEBUG Move Request - Hash: %s\n", req.Hash)
//fmt.Printf("DEBUG Move Request - OriginalHash: %s\n", req.OriginalHash)
//fmt.Printf("DEBUG Move Request - ParentLinkID: %s\n", req.ParentLinkID)
//fmt.Printf("DEBUG Move Request - Name length: %d\n", len(req.Name))
//fmt.Printf("DEBUG Move Request - NameSignatureEmail: %s\n", req.NameSignatureEmail)
//fmt.Printf("DEBUG Move Request - ContentHash: %v\n", req.ContentHash)
//fmt.Printf("DEBUG Move Request - NodePassphrase length: %d\n", len(req.NodePassphrase))
//fmt.Printf("DEBUG Move Request - NodePassphraseSignature length: %d\n", len(req.NodePassphraseSignature))
//fmt.Printf("DEBUG Move Request - SrcLinkID: %s\n", linkID)
//fmt.Printf("DEBUG Move Request - DstParentLinkID: %s\n", req.ParentLinkID)
//fmt.Printf("DEBUG Move Request - ShareID: %s\n", d.MainShare.ShareID)
srcLink, _ := d.getLink(ctx, linkID)
if srcLink != nil && srcLink.ParentLinkID == req.ParentLinkID {
return fmt.Errorf("cannot move to same parent directory")
}
moveURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/move",
d.MainShare.VolumeID, linkID)
reqBody, err := json.Marshal(req)
if err != nil {
return fmt.Errorf("failed to marshal move request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "PUT", moveURL, bytes.NewReader(reqBody))
if err != nil {
return fmt.Errorf("failed to create HTTP request: %w", err)
}
httpReq.Header.Set("Authorization", "Bearer "+d.credentials.AccessToken)
httpReq.Header.Set("Accept", d.protonJson)
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
httpReq.Header.Set("X-Pm-Uid", d.credentials.UID)
httpReq.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(httpReq)
if err != nil {
return fmt.Errorf("failed to execute move request: %w", err)
}
defer resp.Body.Close()
var moveResp RenameResponse
if err := json.NewDecoder(resp.Body).Decode(&moveResp); err != nil {
return fmt.Errorf("failed to decode move response: %w", err)
}
if moveResp.Code != 1000 {
return fmt.Errorf("move operation failed with code: %d", moveResp.Code)
}
return nil
}
func (d *ProtonDrive) DirectMove(ctx context.Context, srcObj model.Obj, dstDir model.Obj) (model.Obj, error) {
//fmt.Printf("DEBUG DirectMove: srcPath=%s, dstPath=%s", srcObj.GetPath(), dstDir.GetPath())
srcLink, err := d.searchByPath(ctx, srcObj.GetPath(), srcObj.IsDir())
if err != nil {
return nil, fmt.Errorf("failed to find source: %w", err)
}
var dstParentLinkID string
if dstDir.GetPath() == "/" {
dstParentLinkID = d.RootLink.LinkID
} else {
dstLink, err := d.searchByPath(ctx, dstDir.GetPath(), true)
if err != nil {
return nil, fmt.Errorf("failed to find destination: %w", err)
}
dstParentLinkID = dstLink.LinkID
}
if srcObj.IsDir() {
// Check if destination is a descendant of source
if err := d.checkCircularMove(ctx, srcLink.LinkID, dstParentLinkID); err != nil {
return nil, err
}
}
// Encrypt the filename for the new location
encryptedName, err := d.encryptFileName(ctx, srcObj.GetName(), dstParentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
}
newHash, err := d.generateNameHash(ctx, srcObj.GetName(), dstParentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to generate new hash: %w", err)
}
originalHash, err := d.getOriginalNameHash(srcLink)
if err != nil {
return nil, fmt.Errorf("failed to get original hash: %w", err)
}
// Re-encrypt node passphrase for new parent context
reencryptedPassphrase, err := d.reencryptNodePassphrase(ctx, srcLink, dstParentLinkID)
if err != nil {
return nil, fmt.Errorf("failed to re-encrypt node passphrase: %w", err)
}
moveReq := MoveRequest{
ParentLinkID: dstParentLinkID,
NodePassphrase: reencryptedPassphrase,
Name: encryptedName,
NameSignatureEmail: d.MainShare.Creator,
Hash: newHash,
OriginalHash: originalHash,
ContentHash: nil,
// *** Causes rejection ***
/* NodePassphraseSignature: srcLink.NodePassphraseSignature, */
}
//fmt.Printf("DEBUG MoveRequest validation:\n")
//fmt.Printf(" Name length: %d\n", len(moveReq.Name))
//fmt.Printf(" Hash: %s\n", moveReq.Hash)
//fmt.Printf(" OriginalHash: %s\n", moveReq.OriginalHash)
//fmt.Printf(" NodePassphrase length: %d\n", len(moveReq.NodePassphrase))
/* fmt.Printf(" NodePassphraseSignature length: %d\n", len(moveReq.NodePassphraseSignature)) */
//fmt.Printf(" NameSignatureEmail: %s\n", moveReq.NameSignatureEmail)
err = d.executeMoveAPI(ctx, srcLink.LinkID, moveReq)
if err != nil {
return nil, fmt.Errorf("move API call failed: %w", err)
}
return &model.Object{
Name: srcObj.GetName(),
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
IsFolder: srcObj.IsDir(),
}, nil
}
func (d *ProtonDrive) reencryptNodePassphrase(ctx context.Context, srcLink *proton.Link, dstParentLinkID string) (string, error) {
// Get source parent link with metadata
srcParentLink, err := d.getLink(ctx, srcLink.ParentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get source parent link: %w", err)
}
// Get source parent keyring using link object
srcParentKR, err := d.getLinkKR(ctx, srcParentLink)
if err != nil {
return "", fmt.Errorf("failed to get source parent keyring: %w", err)
}
// Get destination parent link with metadata
dstParentLink, err := d.getLink(ctx, dstParentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get destination parent link: %w", err)
}
// Get destination parent keyring using link object
dstParentKR, err := d.getLinkKR(ctx, dstParentLink)
if err != nil {
return "", fmt.Errorf("failed to get destination parent keyring: %w", err)
}
// Re-encrypt the node passphrase from source parent context to destination parent context
reencryptedPassphrase, err := reencryptKeyPacket(srcParentKR, dstParentKR, d.DefaultAddrKR, srcLink.NodePassphrase)
if err != nil {
return "", fmt.Errorf("failed to re-encrypt key packet: %w", err)
}
return reencryptedPassphrase, nil
}
func (d *ProtonDrive) generateNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
parentLink, err := d.getLink(ctx, parentLinkID)
if err != nil {
return "", fmt.Errorf("failed to get parent link: %w", err)
}
// Get parent node keyring
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
if err != nil {
return "", fmt.Errorf("failed to get parent keyring: %w", err)
}
// Get signature verification keyring
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
if err != nil {
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
}
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
if err != nil {
return "", fmt.Errorf("failed to get parent hash key: %w", err)
}
nameHash, err := proton.GetNameHash(name, parentHashKey)
if err != nil {
return "", fmt.Errorf("failed to generate name hash: %w", err)
}
return nameHash, nil
}
func reencryptKeyPacket(srcKR, dstKR, _ *crypto.KeyRing, passphrase string) (string, error) { // addrKR (3)
oldSplitMessage, err := crypto.NewPGPSplitMessageFromArmored(passphrase)
if err != nil {
return "", err
}
sessionKey, err := srcKR.DecryptSessionKey(oldSplitMessage.KeyPacket)
if err != nil {
return "", err
}
newKeyPacket, err := dstKR.EncryptSessionKey(sessionKey)
if err != nil {
return "", err
}
newSplitMessage := crypto.NewPGPSplitMessage(newKeyPacket, oldSplitMessage.DataPacket)
return newSplitMessage.GetArmored()
}
func (d *ProtonDrive) checkCircularMove(ctx context.Context, srcLinkID, dstParentLinkID string) error {
currentLinkID := dstParentLinkID
for currentLinkID != "" && currentLinkID != d.RootLink.LinkID {
if currentLinkID == srcLinkID {
return fmt.Errorf("cannot move folder into itself or its subfolder")
}
currentLink, err := d.getLink(ctx, currentLinkID)
if err != nil {
return err
}
currentLinkID = currentLink.ParentLinkID
}
return nil
}

View File

@@ -83,7 +83,7 @@ type Group struct {
Type int `json:"type"`
Name string `json:"name"`
IsAdministrator int `json:"is_administrator"`
Role []int `json:"role"`
Role int `json:"role"`
Avatar string `json:"avatar_url"`
IsStick int `json:"is_stick"`
Nickname string `json:"nickname"`

View File

@@ -15,7 +15,6 @@ import (
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/cron"
"github.com/alist-org/alist/v3/server/common"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
@@ -33,33 +32,6 @@ type S3 struct {
cron *cron.Cron
}
var storageClassLookup = map[string]string{
"standard": s3.ObjectStorageClassStandard,
"reduced_redundancy": s3.ObjectStorageClassReducedRedundancy,
"glacier": s3.ObjectStorageClassGlacier,
"standard_ia": s3.ObjectStorageClassStandardIa,
"onezone_ia": s3.ObjectStorageClassOnezoneIa,
"intelligent_tiering": s3.ObjectStorageClassIntelligentTiering,
"deep_archive": s3.ObjectStorageClassDeepArchive,
"outposts": s3.ObjectStorageClassOutposts,
"glacier_ir": s3.ObjectStorageClassGlacierIr,
"snow": s3.ObjectStorageClassSnow,
"express_onezone": s3.ObjectStorageClassExpressOnezone,
}
func (d *S3) resolveStorageClass() *string {
value := strings.TrimSpace(d.StorageClass)
if value == "" {
return nil
}
normalized := strings.ToLower(strings.ReplaceAll(value, "-", "_"))
if v, ok := storageClassLookup[normalized]; ok {
return aws.String(v)
}
log.Warnf("s3: unknown storage class %q, using raw value", d.StorageClass)
return aws.String(value)
}
func (d *S3) Config() driver.Config {
return d.config
}
@@ -207,14 +179,8 @@ func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up
}),
ContentType: &contentType,
}
if storageClass := d.resolveStorageClass(); storageClass != nil {
input.StorageClass = storageClass
}
_, err := uploader.UploadWithContext(ctx, input)
return err
}
var (
_ driver.Driver = (*S3)(nil)
_ driver.Other = (*S3)(nil)
)
var _ driver.Driver = (*S3)(nil)

View File

@@ -21,7 +21,6 @@ type Addition struct {
ListObjectVersion string `json:"list_object_version" type:"select" options:"v1,v2" default:"v1"`
RemoveBucket bool `json:"remove_bucket" help:"Remove bucket name from path when using custom host."`
AddFilenameToDisposition bool `json:"add_filename_to_disposition" help:"Add filename to Content-Disposition header."`
StorageClass string `json:"storage_class" type:"select" options:",standard,standard_ia,onezone_ia,intelligent_tiering,glacier,glacier_ir,deep_archive,archive" help:"Storage class for new objects. AWS and Tencent COS support different subsets (COS uses ARCHIVE/DEEP_ARCHIVE)."`
}
func init() {

View File

@@ -1,286 +0,0 @@
package s3
import (
"context"
"encoding/json"
"fmt"
"net/url"
"strings"
"time"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
)
const (
OtherMethodArchive = "archive"
OtherMethodArchiveStatus = "archive_status"
OtherMethodThaw = "thaw"
OtherMethodThawStatus = "thaw_status"
)
type ArchiveRequest struct {
StorageClass string `json:"storage_class"`
}
type ThawRequest struct {
Days int64 `json:"days"`
Tier string `json:"tier"`
}
type ObjectDescriptor struct {
Path string `json:"path"`
Bucket string `json:"bucket"`
Key string `json:"key"`
}
type ArchiveResponse struct {
Action string `json:"action"`
Object ObjectDescriptor `json:"object"`
StorageClass string `json:"storage_class"`
RequestID string `json:"request_id,omitempty"`
VersionID string `json:"version_id,omitempty"`
ETag string `json:"etag,omitempty"`
LastModified string `json:"last_modified,omitempty"`
}
type ThawResponse struct {
Action string `json:"action"`
Object ObjectDescriptor `json:"object"`
RequestID string `json:"request_id,omitempty"`
Status *RestoreStatus `json:"status,omitempty"`
}
type RestoreStatus struct {
Ongoing bool `json:"ongoing"`
Expiry string `json:"expiry,omitempty"`
Raw string `json:"raw"`
}
func (d *S3) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
if args.Obj == nil {
return nil, fmt.Errorf("missing object reference")
}
if args.Obj.IsDir() {
return nil, errs.NotSupport
}
switch strings.ToLower(strings.TrimSpace(args.Method)) {
case "archive":
return d.archive(ctx, args)
case "archive_status":
return d.archiveStatus(ctx, args)
case "thaw":
return d.thaw(ctx, args)
case "thaw_status":
return d.thawStatus(ctx, args)
default:
return nil, errs.NotSupport
}
}
func (d *S3) archive(ctx context.Context, args model.OtherArgs) (interface{}, error) {
key := getKey(args.Obj.GetPath(), false)
payload := ArchiveRequest{}
if err := DecodeOtherArgs(args.Data, &payload); err != nil {
return nil, fmt.Errorf("parse archive request: %w", err)
}
if payload.StorageClass == "" {
return nil, fmt.Errorf("storage_class is required")
}
storageClass := NormalizeStorageClass(payload.StorageClass)
input := &s3.CopyObjectInput{
Bucket: &d.Bucket,
Key: &key,
CopySource: aws.String(url.PathEscape(d.Bucket + "/" + key)),
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
StorageClass: aws.String(storageClass),
}
copyReq, output := d.client.CopyObjectRequest(input)
copyReq.SetContext(ctx)
if err := copyReq.Send(); err != nil {
return nil, err
}
resp := ArchiveResponse{
Action: "archive",
Object: d.describeObject(args.Obj, key),
StorageClass: storageClass,
RequestID: copyReq.RequestID,
}
if output.VersionId != nil {
resp.VersionID = aws.StringValue(output.VersionId)
}
if result := output.CopyObjectResult; result != nil {
resp.ETag = aws.StringValue(result.ETag)
if result.LastModified != nil {
resp.LastModified = result.LastModified.UTC().Format(time.RFC3339)
}
}
if status, err := d.describeObjectStatus(ctx, key); err == nil {
if status.StorageClass != "" {
resp.StorageClass = status.StorageClass
}
}
return resp, nil
}
func (d *S3) archiveStatus(ctx context.Context, args model.OtherArgs) (interface{}, error) {
key := getKey(args.Obj.GetPath(), false)
status, err := d.describeObjectStatus(ctx, key)
if err != nil {
return nil, err
}
return ArchiveResponse{
Action: "archive_status",
Object: d.describeObject(args.Obj, key),
StorageClass: status.StorageClass,
}, nil
}
func (d *S3) thaw(ctx context.Context, args model.OtherArgs) (interface{}, error) {
key := getKey(args.Obj.GetPath(), false)
payload := ThawRequest{Days: 1}
if err := DecodeOtherArgs(args.Data, &payload); err != nil {
return nil, fmt.Errorf("parse thaw request: %w", err)
}
if payload.Days <= 0 {
payload.Days = 1
}
restoreRequest := &s3.RestoreRequest{
Days: aws.Int64(payload.Days),
}
if tier := NormalizeRestoreTier(payload.Tier); tier != "" {
restoreRequest.GlacierJobParameters = &s3.GlacierJobParameters{Tier: aws.String(tier)}
}
input := &s3.RestoreObjectInput{
Bucket: &d.Bucket,
Key: &key,
RestoreRequest: restoreRequest,
}
restoreReq, _ := d.client.RestoreObjectRequest(input)
restoreReq.SetContext(ctx)
if err := restoreReq.Send(); err != nil {
return nil, err
}
status, _ := d.describeObjectStatus(ctx, key)
resp := ThawResponse{
Action: "thaw",
Object: d.describeObject(args.Obj, key),
RequestID: restoreReq.RequestID,
}
if status != nil {
resp.Status = status.Restore
}
return resp, nil
}
func (d *S3) thawStatus(ctx context.Context, args model.OtherArgs) (interface{}, error) {
key := getKey(args.Obj.GetPath(), false)
status, err := d.describeObjectStatus(ctx, key)
if err != nil {
return nil, err
}
return ThawResponse{
Action: "thaw_status",
Object: d.describeObject(args.Obj, key),
Status: status.Restore,
}, nil
}
func (d *S3) describeObject(obj model.Obj, key string) ObjectDescriptor {
return ObjectDescriptor{
Path: obj.GetPath(),
Bucket: d.Bucket,
Key: key,
}
}
type objectStatus struct {
StorageClass string
Restore *RestoreStatus
}
func (d *S3) describeObjectStatus(ctx context.Context, key string) (*objectStatus, error) {
head, err := d.client.HeadObjectWithContext(ctx, &s3.HeadObjectInput{Bucket: &d.Bucket, Key: &key})
if err != nil {
return nil, err
}
status := &objectStatus{
StorageClass: aws.StringValue(head.StorageClass),
Restore: parseRestoreHeader(head.Restore),
}
return status, nil
}
func parseRestoreHeader(header *string) *RestoreStatus {
if header == nil {
return nil
}
value := strings.TrimSpace(*header)
if value == "" {
return nil
}
status := &RestoreStatus{Raw: value}
parts := strings.Split(value, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
if part == "" {
continue
}
if strings.HasPrefix(part, "ongoing-request=") {
status.Ongoing = strings.Contains(part, "\"true\"")
}
if strings.HasPrefix(part, "expiry-date=") {
expiry := strings.Trim(part[len("expiry-date="):], "\"")
if expiry != "" {
if t, err := time.Parse(time.RFC1123, expiry); err == nil {
status.Expiry = t.UTC().Format(time.RFC3339)
} else {
status.Expiry = expiry
}
}
}
}
return status
}
func DecodeOtherArgs(data interface{}, target interface{}) error {
if data == nil {
return nil
}
raw, err := json.Marshal(data)
if err != nil {
return err
}
return json.Unmarshal(raw, target)
}
func NormalizeStorageClass(value string) string {
normalized := strings.ToLower(strings.TrimSpace(strings.ReplaceAll(value, "-", "_")))
if normalized == "" {
return value
}
if v, ok := storageClassLookup[normalized]; ok {
return v
}
return value
}
func NormalizeRestoreTier(value string) string {
normalized := strings.ToLower(strings.TrimSpace(value))
switch normalized {
case "", "default":
return ""
case "bulk":
return s3.TierBulk
case "standard":
return s3.TierStandard
case "expedited":
return s3.TierExpedited
default:
return value
}
}

View File

@@ -109,13 +109,13 @@ func (d *S3) listV1(prefix string, args model.ListArgs) ([]model.Obj, error) {
if !args.S3ShowPlaceholder && (name == getPlaceholderName(d.Placeholder) || name == d.Placeholder) {
continue
}
file := &model.Object{
file := model.Object{
//Id: *object.Key,
Name: name,
Size: *object.Size,
Modified: *object.LastModified,
}
files = append(files, model.WrapObjStorageClass(file, aws.StringValue(object.StorageClass)))
files = append(files, &file)
}
if listObjectsResult.IsTruncated == nil {
return nil, errors.New("IsTruncated nil")
@@ -164,13 +164,13 @@ func (d *S3) listV2(prefix string, args model.ListArgs) ([]model.Obj, error) {
if !args.S3ShowPlaceholder && (name == getPlaceholderName(d.Placeholder) || name == d.Placeholder) {
continue
}
file := &model.Object{
file := model.Object{
//Id: *object.Key,
Name: name,
Size: *object.Size,
Modified: *object.LastModified,
}
files = append(files, model.WrapObjStorageClass(file, aws.StringValue(object.StorageClass)))
files = append(files, &file)
}
if !aws.BoolValue(listObjectsResult.IsTruncated) {
break
@@ -202,9 +202,6 @@ func (d *S3) copyFile(ctx context.Context, src string, dst string) error {
CopySource: aws.String(url.PathEscape(d.Bucket + "/" + srcKey)),
Key: &dstKey,
}
if storageClass := d.resolveStorageClass(); storageClass != nil {
input.StorageClass = storageClass
}
_, err := d.client.CopyObject(input)
return err
}

29
go.mod
View File

@@ -8,8 +8,7 @@ require (
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0
github.com/KirCute/sftpd-alist v0.0.12
github.com/ProtonMail/go-crypto v1.0.0
github.com/ProtonMail/gopenpgp/v2 v2.7.4
github.com/SheltonZhu/115driver v1.1.2
github.com/SheltonZhu/115driver v1.0.34
github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21
github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4
github.com/alist-org/gofakes3 v0.0.7
@@ -39,8 +38,6 @@ require (
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.3
github.com/hekmon/transmissionrpc/v3 v3.0.0
github.com/henrybear327/Proton-API-Bridge v1.0.0
github.com/henrybear327/go-proton-api v1.0.0
github.com/hirochachacha/go-smb2 v1.1.0
github.com/ipfs/go-ipfs-api v0.7.0
github.com/jlaffaye/ftp v0.2.0
@@ -78,6 +75,7 @@ require (
golang.org/x/time v0.8.0
google.golang.org/appengine v1.6.8
gopkg.in/ldap.v3 v3.1.0
gorm.io/datatypes v1.2.5
gorm.io/driver/mysql v1.5.7
gorm.io/driver/postgres v1.5.9
gorm.io/driver/sqlite v1.5.6
@@ -85,19 +83,8 @@ require (
)
require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
github.com/ProtonMail/go-srp v0.0.7 // indirect
github.com/PuerkitoBio/goquery v1.8.1 // indirect
github.com/andybalholm/cascadia v1.3.2 // indirect
github.com/bradenaw/juniper v0.15.2 // indirect
github.com/cronokirby/saferith v0.33.0 // indirect
github.com/emersion/go-message v0.18.0 // indirect
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 // indirect
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 // indirect
github.com/relvacode/iso8601 v1.3.0 // indirect
)
require (
@@ -181,7 +168,7 @@ require (
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.20.0 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-sql-driver/mysql v1.8.1 // indirect
github.com/go-webauthn/x v0.1.12 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
@@ -195,7 +182,7 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/ipfs/go-cid v0.4.1
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect
github.com/jackc/pgx/v5 v5.5.5 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
@@ -282,8 +269,4 @@ require (
lukechampine.com/blake3 v1.1.7 // indirect
)
replace github.com/ProtonMail/go-proton-api => github.com/henrybear327/go-proton-api v1.0.0
replace github.com/cronokirby/saferith => github.com/Da3zKi7/saferith v0.33.0-fixed
replace github.com/SheltonZhu/115driver => github.com/okatu-loli/115driver v1.1.2
// replace github.com/xhofe/115-sdk-go => ../../xhofe/115-sdk-go

68
go.sum
View File

@@ -19,6 +19,8 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
@@ -34,37 +36,20 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Da3zKi7/saferith v0.33.0-fixed h1:fnIWTk7EP9mZAICf7aQjeoAwpfrlCrkOvqmi6CbWdTk=
github.com/Da3zKi7/saferith v0.33.0-fixed/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA=
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 h1:ikwCzeqoqN6wvBHOB9OI6dde/jbV7EoTMpUcxtYl5Po=
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0/go.mod h1:v0NgMtKDDi/6CM6r4P+daCljCW3eO9yS+Z+pZDTKo1E=
github.com/KirCute/sftpd-alist v0.0.12 h1:GNVM5QLbQLAfXP4wGUlXFA2IO6fVek0n0IsGnOuISdg=
github.com/KirCute/sftpd-alist v0.0.12/go.mod h1:2wNK7yyW2XfjyJq10OY6xB4COLac64hOwfV6clDJn6s=
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I=
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug=
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e h1:lCsqUUACrcMC83lg5rTo9Y0PnPItE61JSfvMyIcANwk=
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo=
github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE=
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI=
github.com/ProtonMail/go-srp v0.0.7/go.mod h1:giCp+7qRnMIcCvI6V6U3S1lDDXDQYx2ewJ6F/9wdlJk=
github.com/ProtonMail/gopenpgp/v2 v2.7.4 h1:Vz/8+HViFFnf2A6XX8JOvZMrA6F5puwNvvF21O1mRlo=
github.com/ProtonMail/gopenpgp/v2 v2.7.4/go.mod h1:IhkNEDaxec6NyzSI0PlxapinnwPVIESk8/76da3Ct3g=
github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM=
github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ=
github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM=
github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4=
github.com/SheltonZhu/115driver v1.0.34 h1:zhMLp4vgq7GksqvSxQQDOVfK6EOHldQl4b2n8tnZ+EE=
github.com/SheltonZhu/115driver v1.0.34/go.mod h1:rKvNd4Y4OkXv1TMbr/SKjGdcvMQxh6AW5Tw9w0CJb7E=
github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A=
github.com/Unknwon/goconfig v1.0.0/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw=
github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 h1:h6q5E9aMBhhdqouW81LozVPI1I+Pu6IxL2EKpfm5OjY=
@@ -86,9 +71,6 @@ github.com/andreburgaud/crypt2go v1.8.0/go.mod h1:L5nfShQ91W78hOWhUH2tlGRPO+POAP
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA=
github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
@@ -154,9 +136,6 @@ github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradenaw/juniper v0.15.2 h1:0JdjBGEF2jP1pOxmlNIrPhAoQN7Ng5IMAY5D0PHMW4U=
github.com/bradenaw/juniper v0.15.2/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
@@ -187,7 +166,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e h1:GLC8iDDcbt1H8+RkNao2nRGjyNTIo81e1rAJT9/uWYA=
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e/go.mod h1:ln9Whp+wVY/FTbn2SK0ag+SKD2fC0yQCF/Lqowc1LmU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
@@ -223,12 +201,6 @@ github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj6
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJLNCEi7YHVMkwwtfSr2k9splgdSM=
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8=
github.com/emersion/go-message v0.18.0 h1:7LxAXHRpSeoO/Wom3ZApVZYG7c3d17yCScYce8WiXA8=
github.com/emersion/go-message v0.18.0/go.mod h1:Zi69ACvzaoV/MBnrxfVBPV3xWEuCmC2nEN39oJF4B8A=
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 h1:IbFBtwoTQyw0fIM5xv1HF+Y+3ZijDR839WMulgxCcUY=
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594/go.mod h1:aqO8z8wPrjkscevZJFVE1wXJrLpC5LtJG7fqLOsPb2U=
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 h1:ATgqloALX6cHCranzkLb8/zjivwQ9DWWDCQRnxTPfaA=
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9/go.mod h1:HMJKR5wlh/ziNp+sHEDV2ltblO4JD2+IdDOWtGcQBTM=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
@@ -280,8 +252,9 @@ github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBEx
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-resty/resty/v2 v2.14.0 h1:/rhkzsAqGQkozwfKS5aFAbb6TyKd3zyFRWcdRXLPCAU=
github.com/go-resty/resty/v2 v2.14.0/go.mod h1:IW6mekUOsElt9C7oWr0XRt9BNSD6D5rr9mhk6NjmNHg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-webauthn/webauthn v0.11.1 h1:5G/+dg91/VcaJHTtJUfwIlNJkLwbJCcnUc4W8VtkpzA=
github.com/go-webauthn/webauthn v0.11.1/go.mod h1:YXRm1WG0OtUyDFaVAgB5KG7kVqW+6dYCJ7FTQH4SxEE=
github.com/go-webauthn/x v0.1.12 h1:RjQ5cvApzyU/xLCiP+rub0PE4HBZsLggbxGR5ZpUf/A=
@@ -294,6 +267,10 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI=
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo=
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -369,10 +346,6 @@ github.com/hekmon/cunits/v2 v2.1.0 h1:k6wIjc4PlacNOHwKEMBgWV2/c8jyD4eRMs5mR1BBhI
github.com/hekmon/cunits/v2 v2.1.0/go.mod h1:9r1TycXYXaTmEWlAIfFV8JT+Xo59U96yUJAYHxzii2M=
github.com/hekmon/transmissionrpc/v3 v3.0.0 h1:0Fb11qE0IBh4V4GlOwHNYpqpjcYDp5GouolwrpmcUDQ=
github.com/hekmon/transmissionrpc/v3 v3.0.0/go.mod h1:38SlNhFzinVUuY87wGj3acOmRxeYZAZfrj6Re7UgCDg=
github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0LTKWQciUm8PMZb0=
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@@ -386,8 +359,8 @@ github.com/ipfs/go-ipfs-api v0.7.0 h1:CMBNCUl0b45coC+lQCXEVpMhwoqjiaCwUIrM+coYW2
github.com/ipfs/go-ipfs-api v0.7.0/go.mod h1:AIxsTNB0+ZhkqIfTZpdZ0VR/cpX5zrXjATa3prSay3g=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA=
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
@@ -477,6 +450,8 @@ github.com/mholt/archives v0.1.0 h1:FacgJyrjiuyomTuNA92X5GyRBRZjE43Y/lrzKIlF35Q=
github.com/mholt/archives v0.1.0/go.mod h1:j/Ire/jm42GN7h90F5kzj6hf6ZFzEH66de+hmjEKu+I=
github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA=
github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/minio/sio v0.4.0 h1:u4SWVEm5lXSqU42ZWawV0D9I5AZ5YMmo2RXpEQ/kRhc=
@@ -524,8 +499,6 @@ github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg=
github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk=
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY=
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
github.com/okatu-loli/115driver v1.1.2 h1:XZT3r/51SZRQGzre2IeA+0/k4T1FneqArdhE4Wd600Q=
github.com/okatu-loli/115driver v1.1.2/go.mod h1:rKvNd4Y4OkXv1TMbr/SKjGdcvMQxh6AW5Tw9w0CJb7E=
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
@@ -565,8 +538,6 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rclone/rclone v1.67.0 h1:yLRNgHEG2vQ60HCuzFqd0hYwKCRuWuvPUhvhMJ2jI5E=
github.com/rclone/rclone v1.67.0/go.mod h1:Cb3Ar47M/SvwfhAjZTbVXdtrP/JLtPFCq2tkdtBVC6w=
github.com/relvacode/iso8601 v1.3.0 h1:HguUjsGpIMh/zsTczGN3DVJFxTU/GX+MMmzcKoMO7ko=
github.com/relvacode/iso8601 v1.3.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=
github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -696,8 +667,6 @@ go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGX
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
gocv.io/x/gocv v0.25.0/go.mod h1:Rar2PS6DV+T4FL+PM535EImD/h13hGVaHhnCu1xarBs=
@@ -775,7 +744,6 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
@@ -784,7 +752,6 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
@@ -833,7 +800,6 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -846,7 +812,6 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -863,7 +828,6 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
@@ -998,12 +962,16 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/datatypes v1.2.5 h1:9UogU3jkydFVW1bIVVeoYsTpLRgwDVW3rHfJG6/Ek9I=
gorm.io/datatypes v1.2.5/go.mod h1:I5FUdlKpLb5PMqeMQhm30CQ6jXP8Rj89xkTeCSAaAD4=
gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo=
gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM=
gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8=
gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
gorm.io/driver/sqlite v1.5.6 h1:fO/X46qn5NUEEOZtnjJRWRzZMe8nqJiQ9E+0hi+hKQE=
gorm.io/driver/sqlite v1.5.6/go.mod h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDah4=
gorm.io/driver/sqlserver v1.5.4 h1:xA+Y1KDNspv79q43bPyjDMUgHoYHLhXYmdFcYPobg8g=
gorm.io/driver/sqlserver v1.5.4/go.mod h1:+frZ/qYmuna11zHPlh5oc2O6ZA/lS88Keb0XSH1Zh/g=
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.11 h1:/Wfyg1B/je1hnDx3sMkX+gAlxrlZpn6X0BXRlwXlvHg=
gorm.io/gorm v1.25.11/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=

View File

@@ -3,10 +3,11 @@ package data
import "github.com/alist-org/alist/v3/cmd/flags"
func InitData() {
initRoles()
initUser()
initSettings()
initTasks()
initPermissions()
initRoles()
if flags.Dev {
initDevData()
initDevDo()

View File

@@ -26,7 +26,8 @@ func initDevData() {
Username: "Noah",
Password: "hsu",
BasePath: "/data",
Role: nil,
//Role: []int{0},
RoleInfo: []uint{0},
Permission: 512,
})
if err != nil {

View File

@@ -0,0 +1,50 @@
package data
import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
"gorm.io/gorm"
"time"
)
func initPermissions() {
_, err := op.GetPermissionByName("guest")
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
pg := &model.Permission{
Name: "guest",
Permission: 0x4000, // 14 bitcan access dir
PathPattern: "*",
AllowOpInfo: []string{"upload", "download", "delete"},
CreateTime: time.Now(),
UpdateTime: time.Now(),
}
if err := op.CreatePermission(pg); err != nil {
panic(err)
} else {
utils.Log.Infof("Successfully created the guest permission ")
}
}
}
_, err = op.GetPermissionByName("admin")
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
pa := &model.Permission{
Name: "admin",
Permission: 0x70FF, // 0、1、2、3、4、5、6、7、12、13、14 bit
PathPattern: "",
AllowOpInfo: []string{"upload", "download", "delete"},
CreateTime: time.Now(),
UpdateTime: time.Now(),
}
if err := op.CreatePermission(pa); err != nil {
panic(err)
} else {
utils.Log.Infof("Successfully created the admin permission ")
}
}
}
}

View File

@@ -1,52 +1,47 @@
package data
// initRoles creates the default admin and guest roles if missing.
// These roles are essential and must not be modified or removed.
import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
"gorm.io/gorm"
"time"
)
func initRoles() {
guestRole, err := op.GetRoleByName("guest")
_, err := op.GetRoleByName("guest")
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
guestRole = &model.Role{
ID: uint(model.GUEST),
roleGuest := &model.Role{
Name: "guest",
Description: "Guest",
PermissionScopes: []model.PermissionEntry{
{Path: "/", Permission: 0},
},
}
if err := op.CreateRole(guestRole); err != nil {
utils.Log.Fatalf("[init role] Failed to create guest role: %v", err)
PermissionInfo: []uint{1},
CreateTime: time.Time{},
UpdateTime: time.Time{},
}
if err := op.CreateRole(roleGuest); err != nil {
panic(err)
} else {
utils.Log.Fatalf("[init role] Failed to get guest role: %v", err)
utils.Log.Infof("Successfully created the guest role ")
}
}
}
_, err = op.GetRoleByName("admin")
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
adminRole := &model.Role{
ID: uint(model.ADMIN),
roleAdmin := &model.Role{
Name: "admin",
Description: "Administrator",
PermissionScopes: []model.PermissionEntry{
{Path: "/", Permission: 0xFFFF},
},
}
if err := op.CreateRole(adminRole); err != nil {
utils.Log.Fatalf("[init role] Failed to create admin role: %v", err)
PermissionInfo: []uint{2},
CreateTime: time.Time{},
UpdateTime: time.Time{},
}
if err := op.CreateRole(roleAdmin); err != nil {
panic(err)
} else {
utils.Log.Fatalf("[init role] Failed to get admin role: %v", err)
utils.Log.Infof("Successfully created the admin role ")
}
}
}
}

View File

@@ -91,7 +91,6 @@ func InitialSettings() []model.SettingItem {
} else {
token = random.Token()
}
defaultRoleID := strconv.Itoa(model.GUEST)
initialSettingItems = []model.SettingItem{
// site settings
{Key: conf.VERSION, Value: conf.Version, Type: conf.TypeString, Group: model.SITE, Flag: model.READONLY},
@@ -104,10 +103,6 @@ func InitialSettings() []model.SettingItem {
{Key: conf.AllowIndexed, Value: "false", Type: conf.TypeBool, Group: model.SITE},
{Key: conf.AllowMounted, Value: "true", Type: conf.TypeBool, Group: model.SITE},
{Key: conf.RobotsTxt, Value: "User-agent: *\nAllow: /", Type: conf.TypeText, Group: model.SITE},
{Key: conf.AllowRegister, Value: "false", Type: conf.TypeBool, Group: model.SITE},
{Key: conf.DefaultRole, Value: defaultRoleID, Type: conf.TypeSelect, Group: model.SITE},
// newui settings
{Key: conf.UseNewui, Value: "false", Type: conf.TypeBool, Group: model.SITE},
// style settings
{Key: conf.Logo, Value: "https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg", Type: conf.TypeText, Group: model.STYLE},
{Key: conf.Favicon, Value: "https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg", Type: conf.TypeString, Group: model.STYLE},
@@ -160,14 +155,11 @@ func InitialSettings() []model.SettingItem {
([[:xdigit:]]{1,4}(?::[[:xdigit:]]{1,4}){7}|::|:(?::[[:xdigit:]]{1,4}){1,6}|[[:xdigit:]]{1,4}:(?::[[:xdigit:]]{1,4}){1,5}|(?:[[:xdigit:]]{1,4}:){2}(?::[[:xdigit:]]{1,4}){1,4}|(?:[[:xdigit:]]{1,4}:){3}(?::[[:xdigit:]]{1,4}){1,3}|(?:[[:xdigit:]]{1,4}:){4}(?::[[:xdigit:]]{1,4}){1,2}|(?:[[:xdigit:]]{1,4}:){5}:[[:xdigit:]]{1,4}|(?:[[:xdigit:]]{1,4}:){1,6}:)
(?U)access_token=(.*)&`,
Type: conf.TypeText, Group: model.GLOBAL, Flag: model.PRIVATE},
{Key: conf.OcrApi, Value: "https://api.alistgo.com/ocr/file/json", Type: conf.TypeString, Group: model.GLOBAL},
{Key: conf.OcrApi, Value: "https://api.nn.ci/ocr/file/json", Type: conf.TypeString, Group: model.GLOBAL},
{Key: conf.FilenameCharMapping, Value: `{"/": "|"}`, Type: conf.TypeText, Group: model.GLOBAL},
{Key: conf.ForwardDirectLinkParams, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL},
{Key: conf.IgnoreDirectLinkParams, Value: "sign,alist_ts", Type: conf.TypeString, Group: model.GLOBAL},
{Key: conf.WebauthnLoginEnabled, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
{Key: conf.MaxDevices, Value: "0", Type: conf.TypeNumber, Group: model.GLOBAL},
{Key: conf.DeviceEvictPolicy, Value: "deny", Type: conf.TypeSelect, Options: "deny,evict_oldest", Group: model.GLOBAL},
{Key: conf.DeviceSessionTTL, Value: "86400", Type: conf.TypeNumber, Group: model.GLOBAL},
// single settings
{Key: conf.Token, Value: token, Type: conf.TypeString, Group: model.SINGLE, Flag: model.PRIVATE},

View File

@@ -1,10 +1,10 @@
package data
import (
"github.com/alist-org/alist/v3/internal/db"
"os"
"github.com/alist-org/alist/v3/cmd/flags"
"github.com/alist-org/alist/v3/internal/db"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
@@ -14,16 +14,47 @@ import (
)
func initUser() {
admin, err := op.GetAdmin()
adminPassword := random.String(8)
envpass := os.Getenv("ALIST_ADMIN_PASSWORD")
if flags.Dev {
adminPassword = "admin"
} else if len(envpass) > 0 {
adminPassword = envpass
}
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
salt := random.String(16)
admin = &model.User{
Username: "admin",
Salt: salt,
PwdHash: model.TwoHashPwd(adminPassword, salt),
//Role: []int{model.ADMIN},
RoleInfo: []uint{model.ADMIN},
BasePath: "/",
Authn: "[]",
// 0(can see hidden) - 7(can remove) & 12(can read archives) - 13(can decompress archives)
Permission: 0x30FF,
}
if err := op.CreateUser(admin); err != nil {
panic(err)
} else {
utils.Log.Infof("Successfully created the admin user and the initial password is: %s", adminPassword)
}
} else {
utils.Log.Fatalf("[init user] Failed to get admin user: %v", err)
}
}
guest, err := op.GetGuest()
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
salt := random.String(16)
guestRole, _ := op.GetRoleByName("guest")
guest = &model.User{
Username: "guest",
PwdHash: model.TwoHashPwd("guest", salt),
Salt: salt,
Role: model.Roles{int(guestRole.ID)},
//Role: []int{model.GUEST},
RoleInfo: []uint{model.GUEST},
BasePath: "/",
Permission: 0,
Disabled: true,
@@ -36,35 +67,4 @@ func initUser() {
utils.Log.Fatalf("[init user] Failed to get guest user: %v", err)
}
}
admin, err := op.GetAdmin()
adminPassword := random.String(8)
envpass := os.Getenv("ALIST_ADMIN_PASSWORD")
if flags.Dev {
adminPassword = "admin"
} else if len(envpass) > 0 {
adminPassword = envpass
}
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
salt := random.String(16)
adminRole, _ := op.GetRoleByName("admin")
admin = &model.User{
Username: "admin",
Salt: salt,
PwdHash: model.TwoHashPwd(adminPassword, salt),
Role: model.Roles{int(adminRole.ID)},
BasePath: "/",
Authn: "[]",
// 0(can see hidden) - 7(can remove) & 12(can read archives) - 13(can decompress archives)
Permission: 0xFFFF,
}
if err := op.CreateUser(admin); err != nil {
panic(err)
} else {
utils.Log.Infof("Successfully created the admin user and the initial password is: %s", adminPassword)
}
} else {
utils.Log.Fatalf("[init user] Failed to get admin user: %v", err)
}
}
}

View File

@@ -14,15 +14,11 @@ import (
func init() {
formatter := logrus.TextFormatter{
ForceColors: true,
EnvironmentOverrideColors: true,
TimestampFormat: "2006-01-02 15:04:05",
FullTimestamp: true,
}
if os.Getenv("NO_COLOR") != "" || os.Getenv("ALIST_NO_COLOR") == "1" {
formatter.DisableColors = true
} else {
formatter.ForceColors = true
formatter.EnvironmentOverrideColors = true
}
logrus.SetFormatter(&formatter)
utils.Log.SetFormatter(&formatter)
// logrus.SetLevel(logrus.DebugLevel)

View File

@@ -4,7 +4,6 @@ import (
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_24_0"
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_32_0"
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_41_0"
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_46_0"
)
type VersionPatches struct {
@@ -33,10 +32,4 @@ var UpgradePatches = []VersionPatches{
v3_41_0.GrantAdminPermissions,
},
},
{
Version: "v3.46.0",
Patches: []func(){
v3_46_0.ConvertLegacyRoles,
},
},
}

View File

@@ -1,186 +0,0 @@
package v3_46_0
import (
"database/sql"
"encoding/json"
"errors"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/db"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"gorm.io/gorm"
)
// ConvertLegacyRoles migrates old integer role values to a new role model with permission scopes.
func ConvertLegacyRoles() {
guestRole, err := op.GetRoleByName("guest")
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
guestRole = &model.Role{
ID: uint(model.GUEST),
Name: "guest",
Description: "Guest",
PermissionScopes: []model.PermissionEntry{
{
Path: "/",
Permission: 0,
},
},
}
if err = op.CreateRole(guestRole); err != nil {
utils.Log.Errorf("[convert roles] failed to create guest role: %v", err)
return
}
} else {
utils.Log.Errorf("[convert roles] failed to get guest role: %v", err)
return
}
}
adminRole, err := op.GetRoleByName("admin")
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
adminRole = &model.Role{
ID: uint(model.ADMIN),
Name: "admin",
Description: "Administrator",
PermissionScopes: []model.PermissionEntry{
{
Path: "/",
Permission: 0x33FF,
},
},
}
if err = op.CreateRole(adminRole); err != nil {
utils.Log.Errorf("[convert roles] failed to create admin role: %v", err)
return
}
} else {
utils.Log.Errorf("[convert roles] failed to get admin role: %v", err)
return
}
}
generalRole, err := op.GetRoleByName("general")
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
generalRole = &model.Role{
ID: uint(model.NEWGENERAL),
Name: "general",
Description: "General User",
PermissionScopes: []model.PermissionEntry{
{
Path: "/",
Permission: 0,
},
},
}
if err = op.CreateRole(generalRole); err != nil {
utils.Log.Errorf("[convert roles] failed create general role: %v", err)
return
}
} else {
utils.Log.Errorf("[convert roles] failed get general role: %v", err)
return
}
}
rawDb := db.GetDb()
table := conf.Conf.Database.TablePrefix + "users"
rows, err := rawDb.Table(table).Select("id, username, role").Rows()
if err != nil {
utils.Log.Errorf("[convert roles] failed to get users: %v", err)
return
}
defer rows.Close()
var updatedCount int
for rows.Next() {
var id uint
var username string
var rawRole []byte
if err := rows.Scan(&id, &username, &rawRole); err != nil {
utils.Log.Warnf("[convert roles] skip user scan err: %v", err)
continue
}
utils.Log.Debugf("[convert roles] user: %s raw role: %s", username, string(rawRole))
if len(rawRole) == 0 {
continue
}
var oldRoles []int
wasSingleInt := false
if err := json.Unmarshal(rawRole, &oldRoles); err != nil {
var single int
if err := json.Unmarshal(rawRole, &single); err != nil {
utils.Log.Warnf("[convert roles] user %s has invalid role: %s", username, string(rawRole))
continue
}
oldRoles = []int{single}
wasSingleInt = true
}
var newRoles model.Roles
for _, r := range oldRoles {
switch r {
case model.ADMIN:
newRoles = append(newRoles, int(adminRole.ID))
case model.GUEST:
newRoles = append(newRoles, int(guestRole.ID))
case model.GENERAL:
newRoles = append(newRoles, int(generalRole.ID))
default:
newRoles = append(newRoles, r)
}
}
if wasSingleInt {
err := rawDb.Table(table).Where("id = ?", id).Update("role", newRoles).Error
if err != nil {
utils.Log.Errorf("[convert roles] failed to update user %s: %v", username, err)
} else {
updatedCount++
utils.Log.Infof("[convert roles] updated user %s: %v → %v", username, oldRoles, newRoles)
}
}
}
utils.Log.Infof("[convert roles] completed role conversion for %d users", updatedCount)
}
func IsLegacyRoleDetected() bool {
rawDb := db.GetDb()
table := conf.Conf.Database.TablePrefix + "users"
rows, err := rawDb.Table(table).Select("role").Rows()
if err != nil {
utils.Log.Errorf("[role check] failed to scan user roles: %v", err)
return false
}
defer rows.Close()
for rows.Next() {
var raw sql.RawBytes
if err := rows.Scan(&raw); err != nil {
continue
}
if len(raw) == 0 {
continue
}
var roles []int
if err := json.Unmarshal(raw, &roles); err == nil {
continue
}
var single int
if err := json.Unmarshal(raw, &single); err == nil {
utils.Log.Infof("[role check] detected legacy int role: %d", single)
return true
}
}
return false
}

View File

@@ -37,18 +37,6 @@ func InitTaskManager() {
if len(tool.TransferTaskManager.GetAll()) == 0 { //prevent offline downloaded files from being deleted
CleanTempDir()
}
workers := conf.Conf.Tasks.S3Transition.Workers
if workers < 0 {
workers = 0
}
fs.S3TransitionTaskManager = tache.NewManager[*fs.S3TransitionTask](
tache.WithWorks(workers),
tache.WithPersistFunction(
db.GetTaskDataFunc("s3_transition", conf.Conf.Tasks.S3Transition.TaskPersistant),
db.UpdateTaskDataFunc("s3_transition", conf.Conf.Tasks.S3Transition.TaskPersistant),
),
tache.WithMaxRetry(conf.Conf.Tasks.S3Transition.MaxRetry),
)
fs.ArchiveDownloadTaskManager = tache.NewManager[*fs.ArchiveDownloadTask](tache.WithWorks(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant), db.UpdateTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Decompress.MaxRetry))
op.RegisterSettingChangingCallback(func() {
fs.ArchiveDownloadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)))

View File

@@ -60,7 +60,6 @@ type TasksConfig struct {
Copy TaskConfig `json:"copy" envPrefix:"COPY_"`
Decompress TaskConfig `json:"decompress" envPrefix:"DECOMPRESS_"`
DecompressUpload TaskConfig `json:"decompress_upload" envPrefix:"DECOMPRESS_UPLOAD_"`
S3Transition TaskConfig `json:"s3_transition" envPrefix:"S3_TRANSITION_"`
AllowRetryCanceled bool `json:"allow_retry_canceled" env:"ALLOW_RETRY_CANCELED"`
}
@@ -185,11 +184,6 @@ func DefaultConfig() *Config {
Workers: 5,
MaxRetry: 2,
},
S3Transition: TaskConfig{
Workers: 5,
MaxRetry: 2,
// TaskPersistant: true,
},
AllowRetryCanceled: false,
},
Cors: Cors{

Some files were not shown because too many files have changed in this diff Show More