mirror of
https://github.com/AlistGo/alist.git
synced 2025-11-25 03:15:10 +08:00
Compare commits
60 Commits
permission
...
b4d9beb49c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b4d9beb49c | ||
|
|
4c8401855c | ||
|
|
e2016dd031 | ||
|
|
a6bd90a9b2 | ||
|
|
35d322443b | ||
|
|
81a7f28ba2 | ||
|
|
fe564c42da | ||
|
|
d17889bf8e | ||
|
|
4f8bc478d5 | ||
|
|
e1800f18e4 | ||
|
|
16cce37947 | ||
|
|
6e7c7d1dd0 | ||
|
|
28a8428559 | ||
|
|
d0026030cb | ||
|
|
fcbc79cb24 | ||
|
|
930f9f6096 | ||
|
|
23107483a1 | ||
|
|
4b288a08ef | ||
|
|
63391a2091 | ||
|
|
a11e4cfb31 | ||
|
|
9a7c82a71e | ||
|
|
8623da5361 | ||
|
|
84adba3acc | ||
|
|
3bf0af1e68 | ||
|
|
de09ba08b6 | ||
|
|
c64f899a63 | ||
|
|
3319f6ea6a | ||
|
|
d7723c378f | ||
|
|
a9fcd51bc4 | ||
|
|
74e384175b | ||
|
|
eca500861a | ||
|
|
97d4f79b96 | ||
|
|
fcfb3369d1 | ||
|
|
aea3ba1499 | ||
|
|
6b2d81eede | ||
|
|
85fe4e5bb3 | ||
|
|
52da07e8a7 | ||
|
|
46de9e9ebb | ||
|
|
ae90fb579b | ||
|
|
394a18cbd9 | ||
|
|
280960ce3e | ||
|
|
74332e91fb | ||
|
|
540d6c7064 | ||
|
|
55b2bb6b80 | ||
|
|
d5df6fa4cf | ||
|
|
3353055482 | ||
|
|
4d7c2a09ce | ||
|
|
5b8c26510b | ||
|
|
91cc7529a0 | ||
|
|
f61d13d433 | ||
|
|
00120cba27 | ||
|
|
5e15a360b7 | ||
|
|
2bdc5bef9e | ||
|
|
13ea1c1405 | ||
|
|
fd41186679 | ||
|
|
9da56bab4d | ||
|
|
51eeb22465 | ||
|
|
b1586612ca | ||
|
|
7aeb0ab078 | ||
|
|
ffa03bfda1 |
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@@ -10,4 +10,4 @@ liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||
custom: ['https://alist.nn.ci/guide/sponsor.html']
|
||||
custom: ['https://alistgo.com/guide/sponsor.html']
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -16,14 +16,14 @@ body:
|
||||
您必须勾选以下所有内容,否则您的issue可能会被直接关闭。或者您可以去[讨论区](https://github.com/alist-org/alist/discussions)
|
||||
options:
|
||||
- label: |
|
||||
I have read the [documentation](https://alist.nn.ci).
|
||||
我已经阅读了[文档](https://alist.nn.ci)。
|
||||
I have read the [documentation](https://alistgo.com).
|
||||
我已经阅读了[文档](https://alistgo.com)。
|
||||
- label: |
|
||||
I'm sure there are no duplicate issues or discussions.
|
||||
我确定没有重复的issue或讨论。
|
||||
- label: |
|
||||
I'm sure it's due to `AList` and not something else(such as [Network](https://alist.nn.ci/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
|
||||
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alist.nn.ci/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host),`依赖`或`操作`)。
|
||||
I'm sure it's due to `AList` and not something else(such as [Network](https://alistgo.com/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
|
||||
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alistgo.com/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host),`依赖`或`操作`)。
|
||||
- label: |
|
||||
I'm sure this issue is not fixed in the latest version.
|
||||
我确定这个问题在最新版本中没有被修复。
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -7,7 +7,7 @@ body:
|
||||
label: Please make sure of the following things
|
||||
description: You may select more than one, even select all.
|
||||
options:
|
||||
- label: I have read the [documentation](https://alist.nn.ci).
|
||||
- label: I have read the [documentation](https://alistgo.com).
|
||||
- label: I'm sure there are no duplicate issues or discussions.
|
||||
- label: I'm sure this feature is not implemented.
|
||||
- label: I'm sure it's a reasonable and popular requirement.
|
||||
|
||||
4
.github/workflows/beta_release.yml
vendored
4
.github/workflows/beta_release.yml
vendored
@@ -119,7 +119,7 @@ jobs:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: alist-org/desktop-release
|
||||
repository: AlistGo/desktop-release
|
||||
ref: main
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
@@ -135,4 +135,4 @@ jobs:
|
||||
with:
|
||||
github_token: ${{ secrets.MY_TOKEN }}
|
||||
branch: main
|
||||
repository: alist-org/desktop-release
|
||||
repository: AlistGo/desktop-release
|
||||
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -25,6 +25,8 @@ jobs:
|
||||
- android-arm64
|
||||
name: Build
|
||||
runs-on: ${{ matrix.platform }}
|
||||
env:
|
||||
GOPROXY: https://proxy.golang.org,direct
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -72,7 +72,7 @@ jobs:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: alist-org/desktop-release
|
||||
repository: AlistGo/desktop-release
|
||||
ref: main
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
@@ -89,4 +89,4 @@ jobs:
|
||||
with:
|
||||
github_token: ${{ secrets.MY_TOKEN }}
|
||||
branch: main
|
||||
repository: alist-org/desktop-release
|
||||
repository: AlistGo/desktop-release
|
||||
13
.github/workflows/release_docker.yml
vendored
13
.github/workflows/release_docker.yml
vendored
@@ -18,6 +18,7 @@ env:
|
||||
REGISTRY: 'xhofe/alist'
|
||||
REGISTRY_USERNAME: 'xhofe'
|
||||
REGISTRY_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
GITHUB_CR_REPO: ghcr.io/${{ github.repository }}
|
||||
ARTIFACT_NAME: 'binaries_docker_release'
|
||||
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
|
||||
IMAGE_PUSH: ${{ github.event_name == 'push' }}
|
||||
@@ -114,11 +115,21 @@ jobs:
|
||||
username: ${{ env.REGISTRY_USERNAME }}
|
||||
password: ${{ env.REGISTRY_PASSWORD }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
logout: true
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}
|
||||
images: |
|
||||
${{ env.REGISTRY }}
|
||||
${{ env.GITHUB_CR_REPO }}
|
||||
tags: ${{ env.IMAGE_IS_PROD == 'true' && '' || env.IMAGE_TAGS_BETA }}
|
||||
flavor: |
|
||||
${{ env.IMAGE_IS_PROD == 'true' && 'latest=true' || '' }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:edge
|
||||
FROM alpine:3.20.7
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG INSTALL_FFMPEG=false
|
||||
@@ -31,4 +31,4 @@ RUN /entrypoint.sh version
|
||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||
VOLUME /opt/alist/data/
|
||||
EXPOSE 5244 5245
|
||||
CMD [ "/entrypoint.sh" ]
|
||||
CMD [ "/entrypoint.sh" ]
|
||||
|
||||
16
README.md
16
README.md
@@ -1,5 +1,5 @@
|
||||
<div align="center">
|
||||
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||
<p><em>🗂️A file list program that supports multiple storages, powered by Gin and Solidjs.</em></p>
|
||||
<div>
|
||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||
@@ -31,7 +31,7 @@
|
||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
||||
</a>
|
||||
<a href="https://alist.nn.ci/guide/sponsor.html">
|
||||
<a href="https://alistgo.com/guide/sponsor.html">
|
||||
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
||||
</a>
|
||||
</div>
|
||||
@@ -57,7 +57,9 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
|
||||
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||
@@ -88,7 +90,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
||||
- [x] Dark mode
|
||||
- [x] I18n
|
||||
- [x] Protected routes (password protection and authentication)
|
||||
- [x] WebDav (see https://alist.nn.ci/guide/webdav.html for details)
|
||||
- [x] WebDav (see https://alistgo.com/guide/webdav.html for details)
|
||||
- [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist)
|
||||
- [x] Cloudflare Workers proxy
|
||||
- [x] File/Folder package download
|
||||
@@ -101,6 +103,10 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
||||
|
||||
<https://alistgo.com/>
|
||||
|
||||
## API Documentation (via Apifox):
|
||||
|
||||
<https://alist-public.apifox.cn/>
|
||||
|
||||
## Demo
|
||||
|
||||
<https://al.nn.ci>
|
||||
@@ -112,13 +118,11 @@ Please go to our [discussion forum](https://github.com/alist-org/alist/discussio
|
||||
## Sponsor
|
||||
|
||||
AList is an open-source software, if you happen to like this project and want me to keep going, please consider sponsoring me or providing a single donation! Thanks for all the love and support:
|
||||
https://alist.nn.ci/guide/sponsor.html
|
||||
https://alistgo.com/guide/sponsor.html
|
||||
|
||||
### Special sponsors
|
||||
|
||||
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
|
||||
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
|
||||
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
|
||||
|
||||
## Contributors
|
||||
|
||||
|
||||
18
README_cn.md
18
README_cn.md
@@ -1,5 +1,5 @@
|
||||
<div align="center">
|
||||
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||
<p><em>🗂一个支持多存储的文件列表程序,使用 Gin 和 Solidjs。</em></p>
|
||||
<div>
|
||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||
@@ -31,7 +31,7 @@
|
||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
||||
</a>
|
||||
<a href="https://alist.nn.ci/zh/guide/sponsor.html">
|
||||
<a href="https://alistgo.com/zh/guide/sponsor.html">
|
||||
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
||||
</a>
|
||||
</div>
|
||||
@@ -57,7 +57,9 @@
|
||||
- [x] [又拍云对象存储](https://www.upyun.com/products/file-storage)
|
||||
- [x] WebDav(支持无API的OneDrive/SharePoint)
|
||||
- [x] Teambition([中国](https://www.teambition.com/ ),[国际](https://us.teambition.com/ ))
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [分秒帧](https://www.mediatrack.cn/)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [和彩云](https://yun.139.com/) (个人云, 家庭云,共享群组)
|
||||
- [x] [Yandex.Disk](https://disk.yandex.com/)
|
||||
- [x] [百度网盘](http://pan.baidu.com/)
|
||||
@@ -86,7 +88,7 @@
|
||||
- [x] 黑暗模式
|
||||
- [x] 国际化
|
||||
- [x] 受保护的路由(密码保护和身份验证)
|
||||
- [x] WebDav (具体见 https://alist.nn.ci/zh/guide/webdav.html)
|
||||
- [x] WebDav (具体见 https://alistgo.com/zh/guide/webdav.html)
|
||||
- [x] [Docker 部署](https://hub.docker.com/r/xhofe/alist)
|
||||
- [x] Cloudflare workers 中转
|
||||
- [x] 文件/文件夹打包下载
|
||||
@@ -97,7 +99,11 @@
|
||||
|
||||
## 文档
|
||||
|
||||
<https://alist.nn.ci/zh/>
|
||||
<https://alistgo.com/zh/>
|
||||
|
||||
## API 文档(通过 Apifox 提供)
|
||||
|
||||
<https://alist-public.apifox.cn/>
|
||||
|
||||
## Demo
|
||||
|
||||
@@ -109,13 +115,11 @@
|
||||
|
||||
## 赞助
|
||||
|
||||
AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我继续下去,请考虑赞助我或提供一个单一的捐款!感谢所有的爱和支持:https://alist.nn.ci/zh/guide/sponsor.html
|
||||
AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我继续下去,请考虑赞助我或提供一个单一的捐款!感谢所有的爱和支持:https://alistgo.com/zh/guide/sponsor.html
|
||||
|
||||
### 特别赞助
|
||||
|
||||
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - 苹果生态下优雅的网盘视频播放器,iPhone,iPad,Mac,Apple TV全平台支持。
|
||||
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (国内API服务器赞助)
|
||||
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
|
||||
|
||||
## 贡献者
|
||||
|
||||
|
||||
18
README_ja.md
18
README_ja.md
@@ -1,5 +1,5 @@
|
||||
<div align="center">
|
||||
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||
<p><em>🗂️Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
|
||||
<div>
|
||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||
@@ -31,7 +31,7 @@
|
||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
||||
</a>
|
||||
<a href="https://alist.nn.ci/guide/sponsor.html">
|
||||
<a href="https://alistgo.com/guide/sponsor.html">
|
||||
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
||||
</a>
|
||||
</div>
|
||||
@@ -57,7 +57,9 @@
|
||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||
- [x] [ProtonDrive](https://proton.me/drive)
|
||||
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
|
||||
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||
@@ -87,7 +89,7 @@
|
||||
- [x] ダークモード
|
||||
- [x] 国際化
|
||||
- [x] 保護されたルート (パスワード保護と認証)
|
||||
- [x] WebDav (詳細は https://alist.nn.ci/guide/webdav.html を参照)
|
||||
- [x] WebDav (詳細は https://alistgo.com/guide/webdav.html を参照)
|
||||
- [x] [Docker デプロイ](https://hub.docker.com/r/xhofe/alist)
|
||||
- [x] Cloudflare ワーカープロキシ
|
||||
- [x] ファイル/フォルダパッケージのダウンロード
|
||||
@@ -98,7 +100,11 @@
|
||||
|
||||
## ドキュメント
|
||||
|
||||
<https://alist.nn.ci/>
|
||||
<https://alistgo.com/>
|
||||
|
||||
## APIドキュメント(Apifox 提供)
|
||||
|
||||
<https://alist-public.apifox.cn/>
|
||||
|
||||
## デモ
|
||||
|
||||
@@ -111,13 +117,11 @@
|
||||
## スポンサー
|
||||
|
||||
AList はオープンソースのソフトウェアです。もしあなたがこのプロジェクトを気に入ってくださり、続けて欲しいと思ってくださるなら、ぜひスポンサーになってくださるか、1口でも寄付をしてくださるようご検討ください!すべての愛とサポートに感謝します:
|
||||
https://alist.nn.ci/guide/sponsor.html
|
||||
https://alistgo.com/guide/sponsor.html
|
||||
|
||||
### スペシャルスポンサー
|
||||
|
||||
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
|
||||
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
|
||||
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
|
||||
|
||||
## コントリビューター
|
||||
|
||||
|
||||
4
build.sh
4
build.sh
@@ -93,7 +93,7 @@ BuildDocker() {
|
||||
|
||||
PrepareBuildDockerMusl() {
|
||||
mkdir -p build/musl-libs
|
||||
BASE="https://musl.cc/"
|
||||
BASE="https://github.com/go-cross/musl-toolchain-archive/releases/latest/download/"
|
||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
|
||||
for i in "${FILES[@]}"; do
|
||||
url="${BASE}${i}.tgz"
|
||||
@@ -245,7 +245,7 @@ BuildReleaseFreeBSD() {
|
||||
cgo_cc="clang --target=${CGO_ARGS[$i]} --sysroot=/opt/freebsd/${os_arch}"
|
||||
echo building for freebsd-${os_arch}
|
||||
sudo mkdir -p "/opt/freebsd/${os_arch}"
|
||||
wget -q https://download.freebsd.org/releases/${os_arch}/14.1-RELEASE/base.txz
|
||||
wget -q https://download.freebsd.org/releases/${os_arch}/14.3-RELEASE/base.txz
|
||||
sudo tar -xf ./base.txz -C /opt/freebsd/${os_arch}
|
||||
rm base.txz
|
||||
export GOOS=freebsd
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_46_0"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -16,6 +17,12 @@ func Init() {
|
||||
bootstrap.InitConfig()
|
||||
bootstrap.Log()
|
||||
bootstrap.InitDB()
|
||||
|
||||
if v3_46_0.IsLegacyRoleDetected() {
|
||||
utils.Log.Warnf("Detected legacy role format, executing ConvertLegacyRoles patch early...")
|
||||
v3_46_0.ConvertLegacyRoles()
|
||||
}
|
||||
|
||||
data.InitData()
|
||||
bootstrap.InitStreamLimit()
|
||||
bootstrap.InitIndex()
|
||||
|
||||
@@ -16,7 +16,7 @@ var RootCmd = &cobra.Command{
|
||||
Short: "A file list program that supports multiple storage.",
|
||||
Long: `A file list program that supports multiple storage,
|
||||
built with love by Xhofe and friends in Go/Solid.js.
|
||||
Complete documentation is available at https://alist.nn.ci/`,
|
||||
Complete documentation is available at https://alistgo.com/`,
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -28,7 +30,8 @@ import (
|
||||
type Pan123 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
apiRateLimit sync.Map
|
||||
apiRateLimit sync.Map
|
||||
safeBoxUnlocked sync.Map
|
||||
}
|
||||
|
||||
func (d *Pan123) Config() driver.Config {
|
||||
@@ -52,9 +55,26 @@ func (d *Pan123) Drop(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if f, ok := dir.(File); ok && f.IsLock {
|
||||
if err := d.unlockSafeBox(f.FileId); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
files, err := d.getFiles(ctx, dir.GetID(), dir.GetName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
msg := strings.ToLower(err.Error())
|
||||
if strings.Contains(msg, "safe box") || strings.Contains(err.Error(), "保险箱") {
|
||||
if id, e := strconv.ParseInt(dir.GetID(), 10, 64); e == nil {
|
||||
if e = d.unlockSafeBox(id); e == nil {
|
||||
files, err = d.getFiles(ctx, dir.GetID(), dir.GetName())
|
||||
} else {
|
||||
return nil, e
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||
return src, nil
|
||||
|
||||
@@ -6,8 +6,9 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
SafePassword string `json:"safe_password"`
|
||||
driver.RootID
|
||||
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
|
||||
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
|
||||
@@ -20,6 +20,7 @@ type File struct {
|
||||
Etag string `json:"Etag"`
|
||||
S3KeyFlag string `json:"S3KeyFlag"`
|
||||
DownloadUrl string `json:"DownloadUrl"`
|
||||
IsLock bool `json:"IsLock"`
|
||||
}
|
||||
|
||||
func (f File) CreateTime() time.Time {
|
||||
|
||||
@@ -43,6 +43,7 @@ const (
|
||||
S3Auth = MainApi + "/file/s3_upload_object/auth"
|
||||
UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
|
||||
S3Complete = MainApi + "/file/s3_complete_multipart_upload"
|
||||
SafeBoxUnlock = MainApi + "/restful/goapi/v1/file/safe_box/auth/unlockbox"
|
||||
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||
)
|
||||
|
||||
@@ -161,12 +162,12 @@ func (d *Pan123) login() error {
|
||||
}
|
||||
res, err := base.RestyClient.R().
|
||||
SetHeaders(map[string]string{
|
||||
"origin": "https://www.123pan.com",
|
||||
"referer": "https://www.123pan.com/",
|
||||
"user-agent": "Dart/2.19(dart:io)-alist",
|
||||
"origin": "https://www.123pan.com",
|
||||
"referer": "https://www.123pan.com/",
|
||||
//"user-agent": "Dart/2.19(dart:io)-alist",
|
||||
"platform": "web",
|
||||
"app-version": "3",
|
||||
//"user-agent": base.UserAgent,
|
||||
"user-agent": base.UserAgent,
|
||||
}).
|
||||
SetBody(body).Post(SignIn)
|
||||
if err != nil {
|
||||
@@ -202,7 +203,7 @@ do:
|
||||
"origin": "https://www.123pan.com",
|
||||
"referer": "https://www.123pan.com/",
|
||||
"authorization": "Bearer " + d.AccessToken,
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) alist-client",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)",
|
||||
"platform": "web",
|
||||
"app-version": "3",
|
||||
//"user-agent": base.UserAgent,
|
||||
@@ -238,6 +239,22 @@ do:
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (d *Pan123) unlockSafeBox(fileId int64) error {
|
||||
if _, ok := d.safeBoxUnlocked.Load(fileId); ok {
|
||||
return nil
|
||||
}
|
||||
data := base.Json{"password": d.SafePassword}
|
||||
url := fmt.Sprintf("%s?fileId=%d", SafeBoxUnlock, fileId)
|
||||
_, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.safeBoxUnlocked.Store(fileId, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]File, error) {
|
||||
page := 1
|
||||
total := 0
|
||||
@@ -267,6 +284,15 @@ func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]
|
||||
req.SetQueryParams(query)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
msg := strings.ToLower(err.Error())
|
||||
if strings.Contains(msg, "safe box") || strings.Contains(err.Error(), "保险箱") {
|
||||
if fid, e := strconv.ParseInt(parentId, 10, 64); e == nil {
|
||||
if e = d.unlockSafeBox(fid); e == nil {
|
||||
return d.getFiles(ctx, parentId, name)
|
||||
}
|
||||
return nil, e
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
log.Debug(string(_res))
|
||||
|
||||
191
drivers/123_open/api.go
Normal file
191
drivers/123_open/api.go
Normal file
@@ -0,0 +1,191 @@
|
||||
package _123Open
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const (
|
||||
// baseurl
|
||||
ApiBaseURL = "https://open-api.123pan.com"
|
||||
|
||||
// auth
|
||||
ApiToken = "/api/v1/access_token"
|
||||
|
||||
// file list
|
||||
ApiFileList = "/api/v2/file/list"
|
||||
|
||||
// direct link
|
||||
ApiGetDirectLink = "/api/v1/direct-link/url"
|
||||
|
||||
// mkdir
|
||||
ApiMakeDir = "/upload/v1/file/mkdir"
|
||||
|
||||
// remove
|
||||
ApiRemove = "/api/v1/file/trash"
|
||||
|
||||
// upload
|
||||
ApiUploadDomainURL = "/upload/v2/file/domain"
|
||||
ApiSingleUploadURL = "/upload/v2/file/single/create"
|
||||
ApiCreateUploadURL = "/upload/v2/file/create"
|
||||
ApiUploadSliceURL = "/upload/v2/file/slice"
|
||||
ApiUploadCompleteURL = "/upload/v2/file/upload_complete"
|
||||
|
||||
// move
|
||||
ApiMove = "/api/v1/file/move"
|
||||
|
||||
// rename
|
||||
ApiRename = "/api/v1/file/name"
|
||||
)
|
||||
|
||||
type Response[T any] struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data T `json:"data"`
|
||||
}
|
||||
|
||||
type TokenResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data TokenData `json:"data"`
|
||||
}
|
||||
|
||||
type TokenData struct {
|
||||
AccessToken string `json:"accessToken"`
|
||||
ExpiredAt string `json:"expiredAt"`
|
||||
}
|
||||
|
||||
type FileListResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data FileListData `json:"data"`
|
||||
}
|
||||
|
||||
type FileListData struct {
|
||||
LastFileId int64 `json:"lastFileId"`
|
||||
FileList []File `json:"fileList"`
|
||||
}
|
||||
|
||||
type DirectLinkResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data DirectLinkData `json:"data"`
|
||||
}
|
||||
|
||||
type DirectLinkData struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
type MakeDirRequest struct {
|
||||
Name string `json:"name"`
|
||||
ParentID int64 `json:"parentID"`
|
||||
}
|
||||
|
||||
type MakeDirResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data MakeDirData `json:"data"`
|
||||
}
|
||||
|
||||
type MakeDirData struct {
|
||||
DirID int64 `json:"dirID"`
|
||||
}
|
||||
|
||||
type RemoveRequest struct {
|
||||
FileIDs []int64 `json:"fileIDs"`
|
||||
}
|
||||
|
||||
type UploadCreateResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data UploadCreateData `json:"data"`
|
||||
}
|
||||
|
||||
type UploadCreateData struct {
|
||||
FileID int64 `json:"fileId"`
|
||||
Reuse bool `json:"reuse"`
|
||||
PreuploadID string `json:"preuploadId"`
|
||||
SliceSize int64 `json:"sliceSize"`
|
||||
Servers []string `json:"servers"`
|
||||
}
|
||||
|
||||
type UploadUrlResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data UploadUrlData `json:"data"`
|
||||
}
|
||||
|
||||
type UploadUrlData struct {
|
||||
PresignedURL string `json:"presignedUrl"`
|
||||
}
|
||||
|
||||
type UploadCompleteResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data UploadCompleteData `json:"data"`
|
||||
}
|
||||
|
||||
type UploadCompleteData struct {
|
||||
FileID int `json:"fileID"`
|
||||
Completed bool `json:"completed"`
|
||||
}
|
||||
|
||||
func (d *Open123) Request(endpoint string, method string, setup func(*resty.Request), result any) (*resty.Response, error) {
|
||||
client := resty.New()
|
||||
token, err := d.tm.getToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := client.R().
|
||||
SetHeader("Authorization", "Bearer "+token).
|
||||
SetHeader("Platform", "open_platform").
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetResult(result)
|
||||
|
||||
if setup != nil {
|
||||
setup(req)
|
||||
}
|
||||
|
||||
switch method {
|
||||
case http.MethodGet:
|
||||
return req.Get(ApiBaseURL + endpoint)
|
||||
case http.MethodPost:
|
||||
return req.Post(ApiBaseURL + endpoint)
|
||||
case http.MethodPut:
|
||||
return req.Put(ApiBaseURL + endpoint)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported method: %s", method)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Open123) RequestTo(fullURL string, method string, setup func(*resty.Request), result any) (*resty.Response, error) {
|
||||
client := resty.New()
|
||||
|
||||
token, err := d.tm.getToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := client.R().
|
||||
SetHeader("Authorization", "Bearer "+token).
|
||||
SetHeader("Platform", "open_platform").
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetResult(result)
|
||||
|
||||
if setup != nil {
|
||||
setup(req)
|
||||
}
|
||||
|
||||
switch method {
|
||||
case http.MethodGet:
|
||||
return req.Get(fullURL)
|
||||
case http.MethodPost:
|
||||
return req.Post(fullURL)
|
||||
case http.MethodPut:
|
||||
return req.Put(fullURL)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported method: %s", method)
|
||||
}
|
||||
}
|
||||
294
drivers/123_open/driver.go
Normal file
294
drivers/123_open/driver.go
Normal file
@@ -0,0 +1,294 @@
|
||||
package _123Open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Open123 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
UploadThread int
|
||||
tm *tokenManager
|
||||
}
|
||||
|
||||
func (d *Open123) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Open123) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Open123) Init(ctx context.Context) error {
|
||||
d.tm = newTokenManager(d.ClientID, d.ClientSecret)
|
||||
|
||||
if _, err := d.tm.getToken(); err != nil {
|
||||
return fmt.Errorf("token 初始化失败: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Open123) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Open123) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
parentFileId, err := strconv.ParseInt(dir.GetID(), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileLastId := int64(0)
|
||||
var results []File
|
||||
|
||||
for fileLastId != -1 {
|
||||
files, err := d.getFiles(parentFileId, 100, fileLastId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range files.Data.FileList {
|
||||
if f.Trashed == 0 {
|
||||
results = append(results, f)
|
||||
}
|
||||
}
|
||||
fileLastId = files.Data.LastFileId
|
||||
}
|
||||
|
||||
objs := make([]model.Obj, 0, len(results))
|
||||
for _, f := range results {
|
||||
objs = append(objs, f)
|
||||
}
|
||||
return objs, nil
|
||||
}
|
||||
|
||||
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if file.IsDir() {
|
||||
return nil, errs.LinkIsDir
|
||||
}
|
||||
|
||||
fileID := file.GetID()
|
||||
|
||||
var result DirectLinkResp
|
||||
url := fmt.Sprintf("%s?fileID=%s", ApiGetDirectLink, fileID)
|
||||
_, err := d.Request(url, http.MethodGet, nil, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Code != 0 {
|
||||
return nil, fmt.Errorf("get link failed: %s", result.Message)
|
||||
}
|
||||
|
||||
linkURL := result.Data.URL
|
||||
if d.PrivateKey != "" {
|
||||
if d.UID == 0 {
|
||||
return nil, fmt.Errorf("uid is required when private key is set")
|
||||
}
|
||||
duration := time.Duration(d.ValidDuration)
|
||||
if duration <= 0 {
|
||||
duration = 30
|
||||
}
|
||||
signedURL, err := SignURL(linkURL, d.PrivateKey, d.UID, duration*time.Minute)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
linkURL = signedURL
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: linkURL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
parentID, err := strconv.ParseInt(parentDir.GetID(), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid parent ID: %w", err)
|
||||
}
|
||||
|
||||
var result MakeDirResp
|
||||
reqBody := MakeDirRequest{
|
||||
Name: dirName,
|
||||
ParentID: parentID,
|
||||
}
|
||||
|
||||
_, err = d.Request(ApiMakeDir, http.MethodPost, func(r *resty.Request) {
|
||||
r.SetBody(reqBody)
|
||||
}, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Code != 0 {
|
||||
return nil, fmt.Errorf("mkdir failed: %s", result.Message)
|
||||
}
|
||||
|
||||
newDir := File{
|
||||
FileId: result.Data.DirID,
|
||||
FileName: dirName,
|
||||
Type: 1,
|
||||
ParentFileId: int(parentID),
|
||||
Size: 0,
|
||||
Trashed: 0,
|
||||
}
|
||||
return newDir, nil
|
||||
}
|
||||
|
||||
func (d *Open123) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
srcID, err := strconv.ParseInt(srcObj.GetID(), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid src file ID: %w", err)
|
||||
}
|
||||
dstID, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid dest dir ID: %w", err)
|
||||
}
|
||||
|
||||
var result Response[any]
|
||||
reqBody := map[string]interface{}{
|
||||
"fileIDs": []int64{srcID},
|
||||
"toParentFileID": dstID,
|
||||
}
|
||||
|
||||
_, err = d.Request(ApiMove, http.MethodPost, func(r *resty.Request) {
|
||||
r.SetBody(reqBody)
|
||||
}, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Code != 0 {
|
||||
return nil, fmt.Errorf("move failed: %s", result.Message)
|
||||
}
|
||||
|
||||
files, err := d.getFiles(dstID, 100, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move succeed but failed to get target dir: %w", err)
|
||||
}
|
||||
for _, f := range files.Data.FileList {
|
||||
if f.FileId == srcID {
|
||||
return f, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("move succeed but file not found in target dir")
|
||||
}
|
||||
|
||||
func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
srcID, err := strconv.ParseInt(srcObj.GetID(), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid file ID: %w", err)
|
||||
}
|
||||
|
||||
var result Response[any]
|
||||
reqBody := map[string]interface{}{
|
||||
"fileId": srcID,
|
||||
"fileName": newName,
|
||||
}
|
||||
|
||||
_, err = d.Request(ApiRename, http.MethodPut, func(r *resty.Request) {
|
||||
r.SetBody(reqBody)
|
||||
}, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Code != 0 {
|
||||
return nil, fmt.Errorf("rename failed: %s", result.Message)
|
||||
}
|
||||
|
||||
parentID := 0
|
||||
if file, ok := srcObj.(File); ok {
|
||||
parentID = file.ParentFileId
|
||||
}
|
||||
files, err := d.getFiles(int64(parentID), 100, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename succeed but failed to get parent dir: %w", err)
|
||||
}
|
||||
for _, f := range files.Data.FileList {
|
||||
if f.FileId == srcID {
|
||||
return f, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("rename succeed but file not found in parent dir")
|
||||
}
|
||||
|
||||
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
|
||||
idStr := obj.GetID()
|
||||
id, err := strconv.ParseInt(idStr, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid file ID: %w", err)
|
||||
}
|
||||
|
||||
var result Response[any]
|
||||
reqBody := RemoveRequest{
|
||||
FileIDs: []int64{id},
|
||||
}
|
||||
|
||||
_, err = d.Request(ApiRemove, http.MethodPost, func(r *resty.Request) {
|
||||
r.SetBody(reqBody)
|
||||
}, &result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if result.Code != 0 {
|
||||
return fmt.Errorf("remove failed: %s", result.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
|
||||
if len(etag) < utils.MD5.Width {
|
||||
up = model.UpdateProgressWithRange(up, 50, 100)
|
||||
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if createResp.Data.Reuse {
|
||||
return nil
|
||||
}
|
||||
|
||||
return d.Upload(ctx, file, parentFileId, createResp, up)
|
||||
}
|
||||
|
||||
func (d *Open123) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Open123) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Open123) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Open123) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
//func (d *Open123) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*Open123)(nil)
|
||||
36
drivers/123_open/meta.go
Normal file
36
drivers/123_open/meta.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package _123Open
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
|
||||
ClientID string `json:"client_id" required:"true" label:"Client ID"`
|
||||
ClientSecret string `json:"client_secret" required:"true" label:"Client Secret"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
UID uint64 `json:"uid" type:"number"`
|
||||
ValidDuration int64 `json:"valid_duration" type:"number" default:"30" help:"minutes"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "123 Open",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "0",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Open123{}
|
||||
})
|
||||
}
|
||||
27
drivers/123_open/sign.go
Normal file
27
drivers/123_open/sign.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package _123Open
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
func SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (string, error) {
|
||||
if privateKey == "" {
|
||||
return originURL, nil
|
||||
}
|
||||
parsed, err := url.Parse(originURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ts := time.Now().Add(validDuration).Unix()
|
||||
randInt := rand.Int()
|
||||
signature := fmt.Sprintf("%d-%d-%d-%x", ts, randInt, uid, md5.Sum([]byte(fmt.Sprintf("%s-%d-%d-%d-%s",
|
||||
parsed.Path, ts, randInt, uid, privateKey))))
|
||||
query := parsed.Query()
|
||||
query.Add("auth_key", signature)
|
||||
parsed.RawQuery = query.Encode()
|
||||
return parsed.String(), nil
|
||||
}
|
||||
85
drivers/123_open/token.go
Normal file
85
drivers/123_open/token.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package _123Open
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const tokenURL = ApiBaseURL + ApiToken
|
||||
|
||||
type tokenManager struct {
|
||||
clientID string
|
||||
clientSecret string
|
||||
|
||||
mu sync.Mutex
|
||||
accessToken string
|
||||
expireTime time.Time
|
||||
}
|
||||
|
||||
func newTokenManager(clientID, clientSecret string) *tokenManager {
|
||||
return &tokenManager{
|
||||
clientID: clientID,
|
||||
clientSecret: clientSecret,
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *tokenManager) getToken() (string, error) {
|
||||
tm.mu.Lock()
|
||||
defer tm.mu.Unlock()
|
||||
|
||||
if tm.accessToken != "" && time.Now().Before(tm.expireTime.Add(-5*time.Minute)) {
|
||||
return tm.accessToken, nil
|
||||
}
|
||||
|
||||
reqBody := map[string]string{
|
||||
"clientID": tm.clientID,
|
||||
"clientSecret": tm.clientSecret,
|
||||
}
|
||||
body, _ := json.Marshal(reqBody)
|
||||
req, err := http.NewRequest("POST", tokenURL, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("Platform", "open_platform")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result TokenResp
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if result.Code != 0 {
|
||||
return "", fmt.Errorf("get token failed: %s", result.Message)
|
||||
}
|
||||
|
||||
tm.accessToken = result.Data.AccessToken
|
||||
expireAt, err := time.Parse(time.RFC3339, result.Data.ExpiredAt)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse expire time failed: %w", err)
|
||||
}
|
||||
tm.expireTime = expireAt
|
||||
|
||||
return tm.accessToken, nil
|
||||
}
|
||||
|
||||
func (tm *tokenManager) buildHeaders() (http.Header, error) {
|
||||
token, err := tm.getToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
header := http.Header{}
|
||||
header.Set("Authorization", "Bearer "+token)
|
||||
header.Set("Platform", "open_platform")
|
||||
header.Set("Content-Type", "application/json")
|
||||
return header, nil
|
||||
}
|
||||
70
drivers/123_open/types.go
Normal file
70
drivers/123_open/types.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package _123Open
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
type File struct {
|
||||
FileName string `json:"filename"`
|
||||
Size int64 `json:"size"`
|
||||
CreateAt string `json:"createAt"`
|
||||
UpdateAt string `json:"updateAt"`
|
||||
FileId int64 `json:"fileId"`
|
||||
Type int `json:"type"`
|
||||
Etag string `json:"etag"`
|
||||
S3KeyFlag string `json:"s3KeyFlag"`
|
||||
ParentFileId int `json:"parentFileId"`
|
||||
Category int `json:"category"`
|
||||
Status int `json:"status"`
|
||||
Trashed int `json:"trashed"`
|
||||
}
|
||||
|
||||
func (f File) GetID() string {
|
||||
return fmt.Sprint(f.FileId)
|
||||
}
|
||||
|
||||
func (f File) GetName() string {
|
||||
return f.FileName
|
||||
}
|
||||
|
||||
func (f File) GetSize() int64 {
|
||||
return f.Size
|
||||
}
|
||||
|
||||
func (f File) IsDir() bool {
|
||||
return f.Type == 1
|
||||
}
|
||||
|
||||
func (f File) GetModified() string {
|
||||
return f.UpdateAt
|
||||
}
|
||||
|
||||
func (f File) GetThumb() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (f File) ModTime() time.Time {
|
||||
t, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (f File) CreateTime() time.Time {
|
||||
t, err := time.Parse("2006-01-02 15:04:05", f.CreateAt)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.MD5, f.Etag)
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
return ""
|
||||
}
|
||||
282
drivers/123_open/upload.go
Normal file
282
drivers/123_open/upload.go
Normal file
@@ -0,0 +1,282 @@
|
||||
package _123Open
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (d *Open123) create(parentFileID int64, filename, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
|
||||
var resp UploadCreateResp
|
||||
|
||||
_, err := d.Request(ApiCreateUploadURL, http.MethodPost, func(req *resty.Request) {
|
||||
body := base.Json{
|
||||
"parentFileID": parentFileID,
|
||||
"filename": filename,
|
||||
"etag": etag,
|
||||
"size": size,
|
||||
}
|
||||
if duplicate > 0 {
|
||||
body["duplicate"] = duplicate
|
||||
}
|
||||
if containDir {
|
||||
body["containDir"] = true
|
||||
}
|
||||
req.SetBody(body)
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Open123) GetUploadDomains() ([]string, error) {
|
||||
var resp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data []string `json:"data"`
|
||||
}
|
||||
|
||||
_, err := d.Request(ApiUploadDomainURL, http.MethodGet, nil, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Code != 0 {
|
||||
return nil, fmt.Errorf("get upload domain failed: %s", resp.Message)
|
||||
}
|
||||
return resp.Data, nil
|
||||
}
|
||||
|
||||
func (d *Open123) UploadSingle(ctx context.Context, createResp *UploadCreateResp, file model.FileStreamer, parentID int64) error {
|
||||
domain := createResp.Data.Servers[0]
|
||||
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
if len(etag) < utils.MD5.Width {
|
||||
_, _, err := stream.CacheFullInTempFileAndHash(file, utils.MD5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
reader, err := file.RangeRead(http_range.Range{Start: 0, Length: file.GetSize()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader = driver.NewLimitedUploadStream(ctx, reader)
|
||||
|
||||
var b bytes.Buffer
|
||||
mw := multipart.NewWriter(&b)
|
||||
mw.WriteField("parentFileID", fmt.Sprint(parentID))
|
||||
mw.WriteField("filename", file.GetName())
|
||||
mw.WriteField("etag", etag)
|
||||
mw.WriteField("size", fmt.Sprint(file.GetSize()))
|
||||
fw, _ := mw.CreateFormFile("file", file.GetName())
|
||||
_, err = io.Copy(fw, reader)
|
||||
mw.Close()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", domain+ApiSingleUploadURL, &b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+d.tm.accessToken)
|
||||
req.Header.Set("Platform", "open_platform")
|
||||
req.Header.Set("Content-Type", mw.FormDataContentType())
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
FileID int64 `json:"fileID"`
|
||||
Completed bool `json:"completed"`
|
||||
} `json:"data"`
|
||||
}
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
if err := json.Unmarshal(body, &result); err != nil {
|
||||
return fmt.Errorf("unmarshal response error: %v, body: %s", err, string(body))
|
||||
}
|
||||
if result.Code != 0 {
|
||||
return fmt.Errorf("upload failed: %s", result.Message)
|
||||
}
|
||||
if !result.Data.Completed || result.Data.FileID == 0 {
|
||||
return fmt.Errorf("upload incomplete or missing fileID")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, parentID int64, createResp *UploadCreateResp, up driver.UpdateProgress) error {
|
||||
if cacher, ok := file.(interface{ CacheFullInTempFile() (model.File, error) }); ok {
|
||||
if _, err := cacher.CacheFullInTempFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
size := file.GetSize()
|
||||
chunkSize := createResp.Data.SliceSize
|
||||
uploadNums := (size + chunkSize - 1) / chunkSize
|
||||
uploadDomain := createResp.Data.Servers[0]
|
||||
|
||||
if d.UploadThread <= 0 {
|
||||
cpuCores := runtime.NumCPU()
|
||||
threads := cpuCores * 2
|
||||
if threads < 4 {
|
||||
threads = 4
|
||||
}
|
||||
if threads > 16 {
|
||||
threads = 16
|
||||
}
|
||||
d.UploadThread = threads
|
||||
fmt.Printf("[Upload] Auto set upload concurrency: %d (CPU cores=%d)\n", d.UploadThread, cpuCores)
|
||||
}
|
||||
|
||||
fmt.Printf("[Upload] File size: %d bytes, chunk size: %d bytes, total slices: %d, concurrency: %d\n",
|
||||
size, chunkSize, uploadNums, d.UploadThread)
|
||||
|
||||
if size <= 1<<30 {
|
||||
return d.UploadSingle(ctx, createResp, file, parentID)
|
||||
}
|
||||
|
||||
if createResp.Data.Reuse {
|
||||
up(100)
|
||||
return nil
|
||||
}
|
||||
|
||||
client := resty.New()
|
||||
semaphore := make(chan struct{}, d.UploadThread)
|
||||
threadG, _ := errgroup.WithContext(ctx)
|
||||
|
||||
var progressArr = make([]int64, uploadNums)
|
||||
|
||||
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
|
||||
partIndex := partIndex
|
||||
semaphore <- struct{}{}
|
||||
|
||||
threadG.Go(func() error {
|
||||
defer func() { <-semaphore }()
|
||||
offset := partIndex * chunkSize
|
||||
length := min(chunkSize, size-offset)
|
||||
partNumber := partIndex + 1
|
||||
|
||||
fmt.Printf("[Slice %d] Starting read from offset %d, length %d\n", partNumber, offset, length)
|
||||
reader, err := file.RangeRead(http_range.Range{Start: offset, Length: length})
|
||||
if err != nil {
|
||||
return fmt.Errorf("[Slice %d] RangeRead error: %v", partNumber, err)
|
||||
}
|
||||
|
||||
buf := make([]byte, length)
|
||||
n, err := io.ReadFull(reader, buf)
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("[Slice %d] Read error: %v", partNumber, err)
|
||||
}
|
||||
buf = buf[:n]
|
||||
hash := md5.Sum(buf)
|
||||
sliceMD5Str := hex.EncodeToString(hash[:])
|
||||
|
||||
body := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(body)
|
||||
writer.WriteField("preuploadID", createResp.Data.PreuploadID)
|
||||
writer.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
|
||||
writer.WriteField("sliceMD5", sliceMD5Str)
|
||||
partName := fmt.Sprintf("%s.part%d", file.GetName(), partNumber)
|
||||
fw, _ := writer.CreateFormFile("slice", partName)
|
||||
fw.Write(buf)
|
||||
writer.Close()
|
||||
|
||||
resp, err := client.R().
|
||||
SetHeader("Authorization", "Bearer "+d.tm.accessToken).
|
||||
SetHeader("Platform", "open_platform").
|
||||
SetHeader("Content-Type", writer.FormDataContentType()).
|
||||
SetBody(body.Bytes()).
|
||||
Post(uploadDomain + ApiUploadSliceURL)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("[Slice %d] Upload HTTP error: %v", partNumber, err)
|
||||
}
|
||||
if resp.StatusCode() != 200 {
|
||||
return fmt.Errorf("[Slice %d] Upload failed with status: %s, resp: %s", partNumber, resp.Status(), resp.String())
|
||||
}
|
||||
|
||||
progressArr[partIndex] = length
|
||||
var totalUploaded int64 = 0
|
||||
for _, v := range progressArr {
|
||||
totalUploaded += v
|
||||
}
|
||||
if up != nil {
|
||||
percent := float64(totalUploaded) / float64(size) * 100
|
||||
up(percent)
|
||||
}
|
||||
|
||||
fmt.Printf("[Slice %d] MD5: %s\n", partNumber, sliceMD5Str)
|
||||
fmt.Printf("[Slice %d] Upload finished\n", partNumber)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var completeResp struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
Completed bool `json:"completed"`
|
||||
FileID int64 `json:"fileID"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
for {
|
||||
reqBody := fmt.Sprintf(`{"preuploadID":"%s"}`, createResp.Data.PreuploadID)
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", uploadDomain+ApiUploadCompleteURL, bytes.NewBufferString(reqBody))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+d.tm.accessToken)
|
||||
req.Header.Set("Platform", "open_platform")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
|
||||
if err := json.Unmarshal(body, &completeResp); err != nil {
|
||||
return fmt.Errorf("completion response unmarshal error: %v, body: %s", err, string(body))
|
||||
}
|
||||
if completeResp.Code != 0 {
|
||||
return fmt.Errorf("completion API returned error code %d: %s", completeResp.Code, completeResp.Message)
|
||||
}
|
||||
if completeResp.Data.Completed && completeResp.Data.FileID != 0 {
|
||||
fmt.Printf("[Upload] Upload completed successfully. FileID: %d\n", completeResp.Data.FileID)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
up(100)
|
||||
return nil
|
||||
}
|
||||
20
drivers/123_open/util.go
Normal file
20
drivers/123_open/util.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package _123Open
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) {
|
||||
var result FileListResp
|
||||
url := fmt.Sprintf("%s?parentFileId=%d&limit=%d&lastFileId=%d", ApiFileList, parentFileId, limit, lastFileId)
|
||||
|
||||
_, err := d.Request(url, http.MethodGet, nil, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Code != 0 {
|
||||
return nil, fmt.Errorf("list error: %s", result.Message)
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
@@ -324,7 +324,7 @@ func (y *Cloud189PC) login() (err error) {
|
||||
_, err = y.client.R().
|
||||
SetResult(&tokenInfo).SetError(&erron).
|
||||
SetQueryParams(clientSuffix()).
|
||||
SetQueryParam("redirectURL", url.QueryEscape(loginresp.ToUrl)).
|
||||
SetQueryParam("redirectURL", loginresp.ToUrl).
|
||||
Post(API_URL + "/getSessionForPC.action")
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
@@ -56,7 +56,7 @@ func (d *AListV3) Init(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Data.Role == model.GUEST {
|
||||
if utils.SliceContains(resp.Data.Role, model.GUEST) {
|
||||
u := d.Address + "/api/public/settings"
|
||||
res, err := base.RestyClient.R().Get(u)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package alist_v3
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@@ -72,15 +73,15 @@ type LoginResp struct {
|
||||
}
|
||||
|
||||
type MeResp struct {
|
||||
Id int `json:"id"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
BasePath string `json:"base_path"`
|
||||
Role int `json:"role"`
|
||||
Disabled bool `json:"disabled"`
|
||||
Permission int `json:"permission"`
|
||||
SsoId string `json:"sso_id"`
|
||||
Otp bool `json:"otp"`
|
||||
Id int `json:"id"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
BasePath string `json:"base_path"`
|
||||
Role IntSlice `json:"role"`
|
||||
Disabled bool `json:"disabled"`
|
||||
Permission int `json:"permission"`
|
||||
SsoId string `json:"sso_id"`
|
||||
Otp bool `json:"otp"`
|
||||
}
|
||||
|
||||
type ArchiveMetaReq struct {
|
||||
@@ -168,3 +169,17 @@ type DecompressReq struct {
|
||||
PutIntoNewDir bool `json:"put_into_new_dir"`
|
||||
SrcDir string `json:"src_dir"`
|
||||
}
|
||||
|
||||
type IntSlice []int
|
||||
|
||||
func (s *IntSlice) UnmarshalJSON(data []byte) error {
|
||||
if len(data) > 0 && data[0] == '[' {
|
||||
return json.Unmarshal(data, (*[]int)(s))
|
||||
}
|
||||
var single int
|
||||
if err := json.Unmarshal(data, &single); err != nil {
|
||||
return err
|
||||
}
|
||||
*s = []int{single}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.DriveId = utils.Json.Get(res, "default_drive_id").ToString()
|
||||
d.DriveId = d.Addition.DeviceID
|
||||
d.UserID = utils.Json.Get(res, "user_id").ToString()
|
||||
d.cron = cron.NewCron(time.Hour * 2)
|
||||
d.cron.Do(func() {
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
//DeviceID string `json:"device_id" required:"true"`
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
DeviceID string `json:"device_id" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
|
||||
RapidUpload bool `json:"rapid_upload"`
|
||||
|
||||
@@ -11,7 +11,7 @@ type Addition struct {
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
|
||||
OauthTokenURL string `json:"oauth_token_url" default:"https://api.nn.ci/alist/ali_open/token"`
|
||||
OauthTokenURL string `json:"oauth_token_url" default:"https://api.alistgo.com/alist/ali_open/token"`
|
||||
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
|
||||
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
|
||||
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/115_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123_link"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123_open"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/139"
|
||||
_ "github.com/alist-org/alist/v3/drivers/189"
|
||||
@@ -20,8 +21,10 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
|
||||
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
||||
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/bitqiu"
|
||||
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
|
||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve_v4"
|
||||
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
||||
_ "github.com/alist-org/alist/v3/drivers/doubao"
|
||||
_ "github.com/alist-org/alist/v3/drivers/doubao_share"
|
||||
@@ -30,6 +33,7 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
||||
_ "github.com/alist-org/alist/v3/drivers/github"
|
||||
_ "github.com/alist-org/alist/v3/drivers/github_releases"
|
||||
_ "github.com/alist-org/alist/v3/drivers/gofile"
|
||||
_ "github.com/alist-org/alist/v3/drivers/google_drive"
|
||||
_ "github.com/alist-org/alist/v3/drivers/google_photo"
|
||||
_ "github.com/alist-org/alist/v3/drivers/halalcloud"
|
||||
@@ -39,6 +43,7 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/lanzou"
|
||||
_ "github.com/alist-org/alist/v3/drivers/lenovonas_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/local"
|
||||
_ "github.com/alist-org/alist/v3/drivers/mediafire"
|
||||
_ "github.com/alist-org/alist/v3/drivers/mediatrack"
|
||||
_ "github.com/alist-org/alist/v3/drivers/mega"
|
||||
_ "github.com/alist-org/alist/v3/drivers/misskey"
|
||||
@@ -47,8 +52,10 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/onedrive"
|
||||
_ "github.com/alist-org/alist/v3/drivers/onedrive_app"
|
||||
_ "github.com/alist-org/alist/v3/drivers/onedrive_sharelink"
|
||||
_ "github.com/alist-org/alist/v3/drivers/pcloud"
|
||||
_ "github.com/alist-org/alist/v3/drivers/pikpak"
|
||||
_ "github.com/alist-org/alist/v3/drivers/pikpak_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/proton_drive"
|
||||
_ "github.com/alist-org/alist/v3/drivers/quark_uc"
|
||||
_ "github.com/alist-org/alist/v3/drivers/quark_uc_tv"
|
||||
_ "github.com/alist-org/alist/v3/drivers/quqi"
|
||||
|
||||
@@ -11,8 +11,8 @@ type Addition struct {
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
DownloadAPI string `json:"download_api" type:"select" options:"official,crack,crack_video" default:"official"`
|
||||
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||
ClientID string `json:"client_id" required:"true" default:"hq9yQ9w9kR4YHj1kyYafLygVocobh7Sf"`
|
||||
ClientSecret string `json:"client_secret" required:"true" default:"YH2VpZcFJHYNnV6vLfHQXDBhcE7ZChyE"`
|
||||
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
||||
AccessToken string
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
|
||||
767
drivers/bitqiu/driver.go
Normal file
767
drivers/bitqiu/driver.go
Normal file
@@ -0,0 +1,767 @@
|
||||
package bitqiu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http/cookiejar"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
streamPkg "github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
baseURL = "https://pan.bitqiu.com"
|
||||
loginURL = baseURL + "/loginServer/login"
|
||||
userInfoURL = baseURL + "/user/getInfo"
|
||||
listURL = baseURL + "/apiToken/cfi/fs/resources/pages"
|
||||
uploadInitializeURL = baseURL + "/apiToken/cfi/fs/upload/v2/initialize"
|
||||
uploadCompleteURL = baseURL + "/apiToken/cfi/fs/upload/v2/complete"
|
||||
downloadURL = baseURL + "/download/getUrl"
|
||||
createDirURL = baseURL + "/resource/create"
|
||||
moveResourceURL = baseURL + "/resource/remove"
|
||||
renameResourceURL = baseURL + "/resource/rename"
|
||||
copyResourceURL = baseURL + "/apiToken/cfi/fs/async/copy"
|
||||
copyManagerURL = baseURL + "/apiToken/cfi/fs/async/manager"
|
||||
deleteResourceURL = baseURL + "/resource/delete"
|
||||
|
||||
successCode = "10200"
|
||||
uploadSuccessCode = "30010"
|
||||
copySubmittedCode = "10300"
|
||||
orgChannel = "default|default|default"
|
||||
)
|
||||
|
||||
const (
|
||||
copyPollInterval = time.Second
|
||||
copyPollMaxAttempts = 60
|
||||
chunkSize = int64(1 << 20)
|
||||
)
|
||||
|
||||
const defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"
|
||||
|
||||
type BitQiu struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
client *resty.Client
|
||||
userID string
|
||||
}
|
||||
|
||||
func (d *BitQiu) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *BitQiu) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *BitQiu) Init(ctx context.Context) error {
|
||||
if d.Addition.UserPlatform == "" {
|
||||
d.Addition.UserPlatform = uuid.NewString()
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
|
||||
if d.client == nil {
|
||||
jar, err := cookiejar.New(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.client = base.NewRestyClient()
|
||||
d.client.SetBaseURL(baseURL)
|
||||
d.client.SetCookieJar(jar)
|
||||
}
|
||||
d.client.SetHeader("user-agent", d.userAgent())
|
||||
|
||||
return d.login(ctx)
|
||||
}
|
||||
|
||||
func (d *BitQiu) Drop(ctx context.Context) error {
|
||||
d.client = nil
|
||||
d.userID = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *BitQiu) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
parentID := d.resolveParentID(dir)
|
||||
dirPath := ""
|
||||
if dir != nil {
|
||||
dirPath = dir.GetPath()
|
||||
}
|
||||
pageSize := d.pageSize()
|
||||
orderType := d.orderType()
|
||||
desc := d.orderDesc()
|
||||
|
||||
var results []model.Obj
|
||||
page := 1
|
||||
for {
|
||||
form := map[string]string{
|
||||
"parentId": parentID,
|
||||
"limit": strconv.Itoa(pageSize),
|
||||
"orderType": orderType,
|
||||
"desc": desc,
|
||||
"model": "1",
|
||||
"userId": d.userID,
|
||||
"currentPage": strconv.Itoa(page),
|
||||
"page": strconv.Itoa(page),
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
var resp Response[ResourcePage]
|
||||
if err := d.postForm(ctx, listURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Code != successCode {
|
||||
if resp.Code == "10401" || resp.Code == "10404" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("list failed: %s", resp.Message)
|
||||
}
|
||||
|
||||
objs, err := utils.SliceConvert(resp.Data.Data, func(item Resource) (model.Obj, error) {
|
||||
return item.toObject(parentID, dirPath)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, objs...)
|
||||
|
||||
if !resp.Data.HasNext || len(resp.Data.Data) == 0 {
|
||||
break
|
||||
}
|
||||
page++
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (d *BitQiu) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if file.IsDir() {
|
||||
return nil, errs.NotFile
|
||||
}
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
form := map[string]string{
|
||||
"fileIds": file.GetID(),
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[DownloadData]
|
||||
if err := d.postForm(ctx, downloadURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
if resp.Data.URL == "" {
|
||||
return nil, fmt.Errorf("empty download url returned")
|
||||
}
|
||||
return &model.Link{URL: resp.Data.URL}, nil
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("get link failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("get link failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
parentID := d.resolveParentID(parentDir)
|
||||
parentPath := ""
|
||||
if parentDir != nil {
|
||||
parentPath = parentDir.GetPath()
|
||||
}
|
||||
form := map[string]string{
|
||||
"parentId": parentID,
|
||||
"name": dirName,
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[CreateDirData]
|
||||
if err := d.postForm(ctx, createDirURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
newParentID := parentID
|
||||
if resp.Data.ParentID != "" {
|
||||
newParentID = resp.Data.ParentID
|
||||
}
|
||||
name := resp.Data.Name
|
||||
if name == "" {
|
||||
name = dirName
|
||||
}
|
||||
resource := Resource{
|
||||
ResourceID: resp.Data.DirID,
|
||||
ResourceType: 1,
|
||||
Name: name,
|
||||
ParentID: newParentID,
|
||||
}
|
||||
obj, err := resource.toObject(newParentID, parentPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if o, ok := obj.(*Object); ok {
|
||||
o.ParentID = newParentID
|
||||
}
|
||||
return obj, nil
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("create folder failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("create folder failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
targetParentID := d.resolveParentID(dstDir)
|
||||
form := map[string]string{
|
||||
"dirIds": "",
|
||||
"fileIds": "",
|
||||
"parentId": targetParentID,
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
if srcObj.IsDir() {
|
||||
form["dirIds"] = srcObj.GetID()
|
||||
} else {
|
||||
form["fileIds"] = srcObj.GetID()
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[any]
|
||||
if err := d.postForm(ctx, moveResourceURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
dstPath := ""
|
||||
if dstDir != nil {
|
||||
dstPath = dstDir.GetPath()
|
||||
}
|
||||
if setter, ok := srcObj.(model.SetPath); ok {
|
||||
setter.SetPath(path.Join(dstPath, srcObj.GetName()))
|
||||
}
|
||||
if o, ok := srcObj.(*Object); ok {
|
||||
o.ParentID = targetParentID
|
||||
}
|
||||
return srcObj, nil
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("move failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("move failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
form := map[string]string{
|
||||
"resourceId": srcObj.GetID(),
|
||||
"name": newName,
|
||||
"type": "0",
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
if srcObj.IsDir() {
|
||||
form["type"] = "1"
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[any]
|
||||
if err := d.postForm(ctx, renameResourceURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
return updateObjectName(srcObj, newName), nil
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("rename failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("rename failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
targetParentID := d.resolveParentID(dstDir)
|
||||
form := map[string]string{
|
||||
"dirIds": "",
|
||||
"fileIds": "",
|
||||
"parentId": targetParentID,
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
if srcObj.IsDir() {
|
||||
form["dirIds"] = srcObj.GetID()
|
||||
} else {
|
||||
form["fileIds"] = srcObj.GetID()
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[any]
|
||||
if err := d.postForm(ctx, copyResourceURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode, copySubmittedCode:
|
||||
return d.waitForCopiedObject(ctx, srcObj, dstDir)
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("copy failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("copy failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
form := map[string]string{
|
||||
"dirIds": "",
|
||||
"fileIds": "",
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
if obj.IsDir() {
|
||||
form["dirIds"] = obj.GetID()
|
||||
} else {
|
||||
form["fileIds"] = obj.GetID()
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[any]
|
||||
if err := d.postForm(ctx, deleteResourceURL, form, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
return nil
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("remove failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("remove failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
if d.userID == "" {
|
||||
if err := d.login(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
up(0)
|
||||
tmpFile, md5sum, err := streamPkg.CacheFullInTempFileAndHash(file, utils.MD5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
|
||||
parentID := d.resolveParentID(dstDir)
|
||||
parentPath := ""
|
||||
if dstDir != nil {
|
||||
parentPath = dstDir.GetPath()
|
||||
}
|
||||
form := map[string]string{
|
||||
"parentId": parentID,
|
||||
"name": file.GetName(),
|
||||
"size": strconv.FormatInt(file.GetSize(), 10),
|
||||
"hash": md5sum,
|
||||
"sampleMd5": md5sum,
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
var resp Response[json.RawMessage]
|
||||
if err = d.postForm(ctx, uploadInitializeURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Code != uploadSuccessCode {
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
var initData UploadInitData
|
||||
if err := json.Unmarshal(resp.Data, &initData); err != nil {
|
||||
return nil, fmt.Errorf("parse upload init response failed: %w", err)
|
||||
}
|
||||
serverCode, err := d.uploadFileInChunks(ctx, tmpFile, file.GetSize(), md5sum, initData, up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, err := d.completeChunkUpload(ctx, initData, parentID, parentPath, file.GetName(), file.GetSize(), md5sum, serverCode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up(100)
|
||||
return obj, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("upload failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
|
||||
var resource Resource
|
||||
if err := json.Unmarshal(resp.Data, &resource); err != nil {
|
||||
return nil, fmt.Errorf("parse upload response failed: %w", err)
|
||||
}
|
||||
obj, err := resource.toObject(parentID, parentPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up(100)
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (d *BitQiu) uploadFileInChunks(ctx context.Context, tmpFile model.File, size int64, md5sum string, initData UploadInitData, up driver.UpdateProgress) (string, error) {
|
||||
if d.client == nil {
|
||||
return "", fmt.Errorf("client not initialized")
|
||||
}
|
||||
if size <= 0 {
|
||||
return "", fmt.Errorf("invalid file size")
|
||||
}
|
||||
buf := make([]byte, chunkSize)
|
||||
offset := int64(0)
|
||||
var finishedFlag string
|
||||
|
||||
for offset < size {
|
||||
chunkLen := chunkSize
|
||||
remaining := size - offset
|
||||
if remaining < chunkLen {
|
||||
chunkLen = remaining
|
||||
}
|
||||
|
||||
reader := io.NewSectionReader(tmpFile, offset, chunkLen)
|
||||
chunkBuf := buf[:chunkLen]
|
||||
if _, err := io.ReadFull(reader, chunkBuf); err != nil {
|
||||
return "", fmt.Errorf("read chunk failed: %w", err)
|
||||
}
|
||||
|
||||
headers := map[string]string{
|
||||
"accept": "*/*",
|
||||
"content-type": "application/octet-stream",
|
||||
"appid": initData.AppID,
|
||||
"token": initData.Token,
|
||||
"userid": strconv.FormatInt(initData.UserID, 10),
|
||||
"serialnumber": initData.SerialNumber,
|
||||
"hash": md5sum,
|
||||
"len": strconv.FormatInt(chunkLen, 10),
|
||||
"offset": strconv.FormatInt(offset, 10),
|
||||
"user-agent": d.userAgent(),
|
||||
}
|
||||
|
||||
var chunkResp ChunkUploadResponse
|
||||
req := d.client.R().
|
||||
SetContext(ctx).
|
||||
SetHeaders(headers).
|
||||
SetBody(chunkBuf).
|
||||
SetResult(&chunkResp)
|
||||
|
||||
if _, err := req.Post(initData.UploadURL); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if chunkResp.ErrCode != 0 {
|
||||
return "", fmt.Errorf("chunk upload failed with code %d", chunkResp.ErrCode)
|
||||
}
|
||||
finishedFlag = chunkResp.FinishedFlag
|
||||
offset += chunkLen
|
||||
up(float64(offset) * 100 / float64(size))
|
||||
}
|
||||
|
||||
if finishedFlag == "" {
|
||||
return "", fmt.Errorf("upload finished without server code")
|
||||
}
|
||||
return finishedFlag, nil
|
||||
}
|
||||
|
||||
func (d *BitQiu) completeChunkUpload(ctx context.Context, initData UploadInitData, parentID, parentPath, name string, size int64, md5sum, serverCode string) (model.Obj, error) {
|
||||
form := map[string]string{
|
||||
"currentPage": "1",
|
||||
"limit": "1",
|
||||
"userId": strconv.FormatInt(initData.UserID, 10),
|
||||
"status": "0",
|
||||
"parentId": parentID,
|
||||
"name": name,
|
||||
"fileUid": initData.FileUID,
|
||||
"fileSid": initData.FileSID,
|
||||
"size": strconv.FormatInt(size, 10),
|
||||
"serverCode": serverCode,
|
||||
"snapTime": "",
|
||||
"hash": md5sum,
|
||||
"sampleMd5": md5sum,
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
|
||||
var resp Response[Resource]
|
||||
if err := d.postForm(ctx, uploadCompleteURL, form, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Code != successCode {
|
||||
return nil, fmt.Errorf("complete upload failed: %s", resp.Message)
|
||||
}
|
||||
|
||||
return resp.Data.toObject(parentID, parentPath)
|
||||
}
|
||||
|
||||
func (d *BitQiu) login(ctx context.Context) error {
|
||||
if d.client == nil {
|
||||
return fmt.Errorf("client not initialized")
|
||||
}
|
||||
|
||||
form := map[string]string{
|
||||
"passport": d.Username,
|
||||
"password": utils.GetMD5EncodeStr(d.Password),
|
||||
"remember": "0",
|
||||
"captcha": "",
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
var resp Response[LoginData]
|
||||
if err := d.postForm(ctx, loginURL, form, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Code != successCode {
|
||||
return fmt.Errorf("login failed: %s", resp.Message)
|
||||
}
|
||||
d.userID = strconv.FormatInt(resp.Data.UserID, 10)
|
||||
return d.ensureRootFolderID(ctx)
|
||||
}
|
||||
|
||||
func (d *BitQiu) ensureRootFolderID(ctx context.Context) error {
|
||||
rootID := d.Addition.GetRootId()
|
||||
if rootID != "" && rootID != "0" {
|
||||
return nil
|
||||
}
|
||||
|
||||
form := map[string]string{
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
var resp Response[UserInfoData]
|
||||
if err := d.postForm(ctx, userInfoURL, form, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Code != successCode {
|
||||
return fmt.Errorf("get user info failed: %s", resp.Message)
|
||||
}
|
||||
if resp.Data.RootDirID == "" {
|
||||
return fmt.Errorf("get user info failed: empty root dir id")
|
||||
}
|
||||
if d.Addition.RootFolderID != resp.Data.RootDirID {
|
||||
d.Addition.RootFolderID = resp.Data.RootDirID
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *BitQiu) postForm(ctx context.Context, url string, form map[string]string, result interface{}) error {
|
||||
if d.client == nil {
|
||||
return fmt.Errorf("client not initialized")
|
||||
}
|
||||
req := d.client.R().
|
||||
SetContext(ctx).
|
||||
SetHeaders(d.commonHeaders()).
|
||||
SetFormData(form)
|
||||
if result != nil {
|
||||
req = req.SetResult(result)
|
||||
}
|
||||
_, err := req.Post(url)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *BitQiu) waitForCopiedObject(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
expectedName := srcObj.GetName()
|
||||
expectedIsDir := srcObj.IsDir()
|
||||
var lastListErr error
|
||||
|
||||
for attempt := 0; attempt < copyPollMaxAttempts; attempt++ {
|
||||
if attempt > 0 {
|
||||
if err := waitWithContext(ctx, copyPollInterval); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.checkCopyFailure(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, err := d.findObjectInDir(ctx, dstDir, expectedName, expectedIsDir)
|
||||
if err != nil {
|
||||
lastListErr = err
|
||||
continue
|
||||
}
|
||||
if obj != nil {
|
||||
return obj, nil
|
||||
}
|
||||
}
|
||||
if lastListErr != nil {
|
||||
return nil, lastListErr
|
||||
}
|
||||
return nil, fmt.Errorf("copy task timed out waiting for completion")
|
||||
}
|
||||
|
||||
func (d *BitQiu) checkCopyFailure(ctx context.Context) error {
|
||||
form := map[string]string{
|
||||
"org_channel": orgChannel,
|
||||
}
|
||||
for attempt := 0; attempt < 2; attempt++ {
|
||||
var resp Response[AsyncManagerData]
|
||||
if err := d.postForm(ctx, copyManagerURL, form, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
switch resp.Code {
|
||||
case successCode:
|
||||
if len(resp.Data.FailTasks) > 0 {
|
||||
return fmt.Errorf("copy failed: %s", resp.Data.FailTasks[0].ErrorMessage())
|
||||
}
|
||||
return nil
|
||||
case "10401", "10404":
|
||||
if err := d.login(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("query copy status failed: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("query copy status failed: retry limit reached")
|
||||
}
|
||||
|
||||
func (d *BitQiu) findObjectInDir(ctx context.Context, dir model.Obj, name string, isDir bool) (model.Obj, error) {
|
||||
objs, err := d.List(ctx, dir, model.ListArgs{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, obj := range objs {
|
||||
if obj.GetName() == name && obj.IsDir() == isDir {
|
||||
return obj, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func waitWithContext(ctx context.Context, d time.Duration) error {
|
||||
timer := time.NewTimer(d)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-timer.C:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (d *BitQiu) commonHeaders() map[string]string {
|
||||
headers := map[string]string{
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "no-cache",
|
||||
"pragma": "no-cache",
|
||||
"user-platform": d.Addition.UserPlatform,
|
||||
"x-kl-saas-ajax-request": "Ajax_Request",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
"referer": baseURL + "/",
|
||||
"origin": baseURL,
|
||||
"user-agent": d.userAgent(),
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
func (d *BitQiu) userAgent() string {
|
||||
if ua := strings.TrimSpace(d.Addition.UserAgent); ua != "" {
|
||||
return ua
|
||||
}
|
||||
return defaultUserAgent
|
||||
}
|
||||
|
||||
func (d *BitQiu) resolveParentID(dir model.Obj) string {
|
||||
if dir != nil && dir.GetID() != "" {
|
||||
return dir.GetID()
|
||||
}
|
||||
if root := d.Addition.GetRootId(); root != "" {
|
||||
return root
|
||||
}
|
||||
return config.DefaultRoot
|
||||
}
|
||||
|
||||
func (d *BitQiu) pageSize() int {
|
||||
if size, err := strconv.Atoi(d.Addition.PageSize); err == nil && size > 0 {
|
||||
return size
|
||||
}
|
||||
return 24
|
||||
}
|
||||
|
||||
func (d *BitQiu) orderType() string {
|
||||
if d.Addition.OrderType != "" {
|
||||
return d.Addition.OrderType
|
||||
}
|
||||
return "updateTime"
|
||||
}
|
||||
|
||||
func (d *BitQiu) orderDesc() string {
|
||||
if d.Addition.OrderDesc {
|
||||
return "1"
|
||||
}
|
||||
return "0"
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*BitQiu)(nil)
|
||||
var _ driver.PutResult = (*BitQiu)(nil)
|
||||
28
drivers/bitqiu/meta.go
Normal file
28
drivers/bitqiu/meta.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package bitqiu
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
UserPlatform string `json:"user_platform" help:"Optional device identifier; auto-generated if empty."`
|
||||
OrderType string `json:"order_type" type:"select" options:"updateTime,createTime,name,size" default:"updateTime"`
|
||||
OrderDesc bool `json:"order_desc"`
|
||||
PageSize string `json:"page_size" default:"24" help:"Number of entries to request per page."`
|
||||
UserAgent string `json:"user_agent" default:"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "BitQiu",
|
||||
DefaultRoot: "0",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &BitQiu{}
|
||||
})
|
||||
}
|
||||
107
drivers/bitqiu/types.go
Normal file
107
drivers/bitqiu/types.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package bitqiu
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
type Response[T any] struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data T `json:"data"`
|
||||
}
|
||||
|
||||
type LoginData struct {
|
||||
UserID int64 `json:"userId"`
|
||||
}
|
||||
|
||||
type ResourcePage struct {
|
||||
CurrentPage int `json:"currentPage"`
|
||||
PageSize int `json:"pageSize"`
|
||||
TotalCount int `json:"totalCount"`
|
||||
TotalPageCount int `json:"totalPageCount"`
|
||||
Data []Resource `json:"data"`
|
||||
HasNext bool `json:"hasNext"`
|
||||
}
|
||||
|
||||
type Resource struct {
|
||||
ResourceID string `json:"resourceId"`
|
||||
ResourceUID string `json:"resourceUid"`
|
||||
ResourceType int `json:"resourceType"`
|
||||
ParentID string `json:"parentId"`
|
||||
Name string `json:"name"`
|
||||
ExtName string `json:"extName"`
|
||||
Size *json.Number `json:"size"`
|
||||
CreateTime *string `json:"createTime"`
|
||||
UpdateTime *string `json:"updateTime"`
|
||||
FileMD5 string `json:"fileMd5"`
|
||||
}
|
||||
|
||||
type DownloadData struct {
|
||||
URL string `json:"url"`
|
||||
MD5 string `json:"md5"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
type UserInfoData struct {
|
||||
RootDirID string `json:"rootDirId"`
|
||||
}
|
||||
|
||||
type CreateDirData struct {
|
||||
DirID string `json:"dirId"`
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parentId"`
|
||||
}
|
||||
|
||||
type AsyncManagerData struct {
|
||||
WaitTasks []AsyncTask `json:"waitTaskList"`
|
||||
RunningTasks []AsyncTask `json:"runningTaskList"`
|
||||
SuccessTasks []AsyncTask `json:"successTaskList"`
|
||||
FailTasks []AsyncTask `json:"failTaskList"`
|
||||
TaskList []AsyncTask `json:"taskList"`
|
||||
}
|
||||
|
||||
type AsyncTask struct {
|
||||
TaskID string `json:"taskId"`
|
||||
Status int `json:"status"`
|
||||
ErrorMsg string `json:"errorMsg"`
|
||||
Message string `json:"message"`
|
||||
Result *AsyncTaskInfo `json:"result"`
|
||||
TargetName string `json:"targetName"`
|
||||
TargetDirID string `json:"parentId"`
|
||||
}
|
||||
|
||||
type AsyncTaskInfo struct {
|
||||
Resource Resource `json:"resource"`
|
||||
DirID string `json:"dirId"`
|
||||
FileID string `json:"fileId"`
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parentId"`
|
||||
}
|
||||
|
||||
func (t AsyncTask) ErrorMessage() string {
|
||||
if t.ErrorMsg != "" {
|
||||
return t.ErrorMsg
|
||||
}
|
||||
if t.Message != "" {
|
||||
return t.Message
|
||||
}
|
||||
return "unknown error"
|
||||
}
|
||||
|
||||
type UploadInitData struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Token string `json:"token"`
|
||||
FileUID string `json:"fileUid"`
|
||||
FileSID string `json:"fileSid"`
|
||||
ParentID string `json:"parentId"`
|
||||
UserID int64 `json:"userId"`
|
||||
SerialNumber string `json:"serialNumber"`
|
||||
UploadURL string `json:"uploadUrl"`
|
||||
AppID string `json:"appId"`
|
||||
}
|
||||
|
||||
type ChunkUploadResponse struct {
|
||||
ErrCode int `json:"errCode"`
|
||||
Offset int64 `json:"offset"`
|
||||
Finished int `json:"finished"`
|
||||
FinishedFlag string `json:"finishedFlag"`
|
||||
}
|
||||
102
drivers/bitqiu/util.go
Normal file
102
drivers/bitqiu/util.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package bitqiu
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
type Object struct {
|
||||
model.Object
|
||||
ParentID string
|
||||
}
|
||||
|
||||
func (r Resource) toObject(parentID, parentPath string) (model.Obj, error) {
|
||||
id := r.ResourceID
|
||||
if id == "" {
|
||||
id = r.ResourceUID
|
||||
}
|
||||
obj := &Object{
|
||||
Object: model.Object{
|
||||
ID: id,
|
||||
Name: r.Name,
|
||||
IsFolder: r.ResourceType == 1,
|
||||
},
|
||||
ParentID: parentID,
|
||||
}
|
||||
if r.Size != nil {
|
||||
if size, err := (*r.Size).Int64(); err == nil {
|
||||
obj.Size = size
|
||||
}
|
||||
}
|
||||
if ct := parseBitQiuTime(r.CreateTime); !ct.IsZero() {
|
||||
obj.Ctime = ct
|
||||
}
|
||||
if mt := parseBitQiuTime(r.UpdateTime); !mt.IsZero() {
|
||||
obj.Modified = mt
|
||||
}
|
||||
if r.FileMD5 != "" {
|
||||
obj.HashInfo = utils.NewHashInfo(utils.MD5, strings.ToLower(r.FileMD5))
|
||||
}
|
||||
obj.SetPath(path.Join(parentPath, obj.Name))
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func parseBitQiuTime(value *string) time.Time {
|
||||
if value == nil {
|
||||
return time.Time{}
|
||||
}
|
||||
trimmed := strings.TrimSpace(*value)
|
||||
if trimmed == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
if ts, err := time.ParseInLocation("2006-01-02 15:04:05", trimmed, time.Local); err == nil {
|
||||
return ts
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func updateObjectName(obj model.Obj, newName string) model.Obj {
|
||||
newPath := path.Join(parentPathOf(obj.GetPath()), newName)
|
||||
|
||||
switch o := obj.(type) {
|
||||
case *Object:
|
||||
o.Name = newName
|
||||
o.Object.Name = newName
|
||||
o.SetPath(newPath)
|
||||
return o
|
||||
case *model.Object:
|
||||
o.Name = newName
|
||||
o.SetPath(newPath)
|
||||
return o
|
||||
}
|
||||
|
||||
if setter, ok := obj.(model.SetPath); ok {
|
||||
setter.SetPath(newPath)
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: obj.GetID(),
|
||||
Path: newPath,
|
||||
Name: newName,
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
Ctime: obj.CreateTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}
|
||||
}
|
||||
|
||||
func parentPathOf(p string) string {
|
||||
if p == "" {
|
||||
return ""
|
||||
}
|
||||
dir := path.Dir(p)
|
||||
if dir == "." {
|
||||
return ""
|
||||
}
|
||||
return dir
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
type Cloudreve struct {
|
||||
model.Storage
|
||||
Addition
|
||||
ref *Cloudreve
|
||||
}
|
||||
|
||||
func (d *Cloudreve) Config() driver.Config {
|
||||
@@ -37,8 +38,18 @@ func (d *Cloudreve) Init(ctx context.Context) error {
|
||||
return d.login()
|
||||
}
|
||||
|
||||
func (d *Cloudreve) InitReference(storage driver.Driver) error {
|
||||
refStorage, ok := storage.(*Cloudreve)
|
||||
if ok {
|
||||
d.ref = refStorage
|
||||
return nil
|
||||
}
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Cloudreve) Drop(ctx context.Context) error {
|
||||
d.Cookie = ""
|
||||
d.ref = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
@@ -19,7 +21,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/pkg/cookie"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
json "github.com/json-iterator/go"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
@@ -35,6 +36,9 @@ func (d *Cloudreve) getUA() string {
|
||||
}
|
||||
|
||||
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
||||
if d.ref != nil {
|
||||
return d.ref.request(method, path, callback, out)
|
||||
}
|
||||
u := d.Address + "/api/v3" + path
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
@@ -79,11 +83,11 @@ func (d *Cloudreve) request(method string, path string, callback base.ReqCallbac
|
||||
}
|
||||
if out != nil && r.Data != nil {
|
||||
var marshal []byte
|
||||
marshal, err = json.Marshal(r.Data)
|
||||
marshal, err = jsoniter.Marshal(r.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(marshal, out)
|
||||
err = jsoniter.Unmarshal(marshal, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -187,12 +191,9 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-Local] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Cloudreve-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
@@ -205,9 +206,26 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
|
||||
req.SetHeader("User-Agent", d.getUA())
|
||||
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
req.AddRetryCondition(func(r *resty.Response, err error) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
if r.IsError() {
|
||||
return true
|
||||
}
|
||||
var retryResp Resp
|
||||
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
|
||||
if jErr != nil {
|
||||
return true
|
||||
}
|
||||
if retryResp.Code != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
break
|
||||
return err
|
||||
}
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
@@ -222,16 +240,15 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-Remote] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
@@ -248,14 +265,43 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
err = func() error {
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return errors.New(res.Status)
|
||||
}
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var up Resp
|
||||
err = json.Unmarshal(body, &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if up.Code != 0 {
|
||||
return errors.New(up.Msg)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err == nil {
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
} else {
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -264,16 +310,15 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
var finish int64 = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-OneDrive] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
@@ -295,22 +340,31 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
return err
|
||||
}
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
switch {
|
||||
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
res.Body.Close()
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Cloudreve-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
_ = res.Body.Close()
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
default:
|
||||
res.Body.Close()
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
// 上传成功发送回调请求
|
||||
err := d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
|
||||
return d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
|
||||
req.SetBody("{}")
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
@@ -318,16 +372,15 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
|
||||
var chunk int = 0
|
||||
var etags []string
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-S3] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
@@ -346,10 +399,26 @@ func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u Uploa
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
etags = append(etags, res.Header.Get("ETag"))
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
etag := res.Header.Get("ETag")
|
||||
res.Body.Close()
|
||||
switch {
|
||||
case res.StatusCode != 200:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Cloudreve-S3] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case etag == "":
|
||||
return errors.New("faild to get ETag from header")
|
||||
default:
|
||||
retryCount = 0
|
||||
etags = append(etags, etag)
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
}
|
||||
|
||||
// s3LikeFinishUpload
|
||||
|
||||
305
drivers/cloudreve_v4/driver.go
Normal file
305
drivers/cloudreve_v4/driver.go
Normal file
@@ -0,0 +1,305 @@
|
||||
package cloudreve_v4
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type CloudreveV4 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
ref *CloudreveV4
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Config() driver.Config {
|
||||
if d.ref != nil {
|
||||
return d.ref.Config()
|
||||
}
|
||||
if d.EnableVersionUpload {
|
||||
config.NoOverwriteUpload = false
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Init(ctx context.Context) error {
|
||||
// removing trailing slash
|
||||
d.Address = strings.TrimSuffix(d.Address, "/")
|
||||
op.MustSaveDriverStorage(d)
|
||||
if d.ref != nil {
|
||||
return nil
|
||||
}
|
||||
if d.AccessToken == "" && d.RefreshToken != "" {
|
||||
return d.refreshToken()
|
||||
}
|
||||
if d.Username != "" {
|
||||
return d.login()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) InitReference(storage driver.Driver) error {
|
||||
refStorage, ok := storage.(*CloudreveV4)
|
||||
if ok {
|
||||
d.ref = refStorage
|
||||
return nil
|
||||
}
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Drop(ctx context.Context) error {
|
||||
d.ref = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
const pageSize int = 100
|
||||
var f []File
|
||||
var r FileResp
|
||||
params := map[string]string{
|
||||
"page_size": strconv.Itoa(pageSize),
|
||||
"uri": dir.GetPath(),
|
||||
"order_by": d.OrderBy,
|
||||
"order_direction": d.OrderDirection,
|
||||
"page": "0",
|
||||
}
|
||||
|
||||
for {
|
||||
err := d.request(http.MethodGet, "/file", func(req *resty.Request) {
|
||||
req.SetQueryParams(params)
|
||||
}, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f = append(f, r.Files...)
|
||||
if r.Pagination.NextToken == "" || len(r.Files) < pageSize {
|
||||
break
|
||||
}
|
||||
params["next_page_token"] = r.Pagination.NextToken
|
||||
}
|
||||
|
||||
return utils.SliceConvert(f, func(src File) (model.Obj, error) {
|
||||
if d.EnableFolderSize && src.Type == 1 {
|
||||
var ds FolderSummaryResp
|
||||
err := d.request(http.MethodGet, "/file/info", func(req *resty.Request) {
|
||||
req.SetQueryParam("uri", src.Path)
|
||||
req.SetQueryParam("folder_summary", "true")
|
||||
}, &ds)
|
||||
if err == nil && ds.FolderSummary.Size > 0 {
|
||||
src.Size = ds.FolderSummary.Size
|
||||
}
|
||||
}
|
||||
var thumb model.Thumbnail
|
||||
if d.EnableThumb && src.Type == 0 {
|
||||
var t FileThumbResp
|
||||
err := d.request(http.MethodGet, "/file/thumb", func(req *resty.Request) {
|
||||
req.SetQueryParam("uri", src.Path)
|
||||
}, &t)
|
||||
if err == nil && t.URL != "" {
|
||||
thumb = model.Thumbnail{
|
||||
Thumbnail: t.URL,
|
||||
}
|
||||
}
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: src.ID,
|
||||
Path: src.Path,
|
||||
Name: src.Name,
|
||||
Size: src.Size,
|
||||
Modified: src.UpdatedAt,
|
||||
Ctime: src.CreatedAt,
|
||||
IsFolder: src.Type == 1,
|
||||
},
|
||||
Thumbnail: thumb,
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
var url FileUrlResp
|
||||
err := d.request(http.MethodPost, "/file/url", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"uris": []string{file.GetPath()},
|
||||
"download": true,
|
||||
})
|
||||
}, &url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(url.Urls) == 0 {
|
||||
return nil, errors.New("server returns no url")
|
||||
}
|
||||
exp := time.Until(url.Expires)
|
||||
return &model.Link{
|
||||
URL: url.Urls[0].URL,
|
||||
Expiration: &exp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"type": "folder",
|
||||
"uri": parentDir.GetPath() + "/" + dirName,
|
||||
"error_on_conflict": true,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return d.request(http.MethodPost, "/file/move", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"uris": []string{srcObj.GetPath()},
|
||||
"dst": dstDir.GetPath(),
|
||||
"copy": false,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"new_name": newName,
|
||||
"uri": srcObj.GetPath(),
|
||||
})
|
||||
}, nil)
|
||||
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
return d.request(http.MethodPost, "/file/move", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"uris": []string{srcObj.GetPath()},
|
||||
"dst": dstDir.GetPath(),
|
||||
"copy": true,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return d.request(http.MethodDelete, "/file", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"uris": []string{obj.GetPath()},
|
||||
"unlink": false,
|
||||
"skip_soft_delete": true,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if file.GetSize() == 0 {
|
||||
// 空文件使用新建文件方法,避免上传卡锁
|
||||
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"type": "file",
|
||||
"uri": dstDir.GetPath() + "/" + file.GetName(),
|
||||
"error_on_conflict": true,
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
var p StoragePolicy
|
||||
var r FileResp
|
||||
var u FileUploadResp
|
||||
var err error
|
||||
params := map[string]string{
|
||||
"page_size": "10",
|
||||
"uri": dstDir.GetPath(),
|
||||
"order_by": "created_at",
|
||||
"order_direction": "asc",
|
||||
"page": "0",
|
||||
}
|
||||
err = d.request(http.MethodGet, "/file", func(req *resty.Request) {
|
||||
req.SetQueryParams(params)
|
||||
}, &r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p = r.StoragePolicy
|
||||
body := base.Json{
|
||||
"uri": dstDir.GetPath() + "/" + file.GetName(),
|
||||
"size": file.GetSize(),
|
||||
"policy_id": p.ID,
|
||||
"last_modified": file.ModTime().UnixMilli(),
|
||||
"mime_type": "",
|
||||
}
|
||||
if d.EnableVersionUpload {
|
||||
body["entity_type"] = "version"
|
||||
}
|
||||
err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) {
|
||||
req.SetBody(body)
|
||||
}, &u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u.StoragePolicy.Relay {
|
||||
err = d.upLocal(ctx, file, u, up)
|
||||
} else {
|
||||
switch u.StoragePolicy.Type {
|
||||
case "local":
|
||||
err = d.upLocal(ctx, file, u, up)
|
||||
case "remote":
|
||||
err = d.upRemote(ctx, file, u, up)
|
||||
case "onedrive":
|
||||
err = d.upOneDrive(ctx, file, u, up)
|
||||
case "s3":
|
||||
err = d.upS3(ctx, file, u, up)
|
||||
default:
|
||||
return errs.NotImplement
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
// 删除失败的会话
|
||||
_ = d.request(http.MethodDelete, "/file/upload", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"id": u.SessionID,
|
||||
"uri": u.URI,
|
||||
})
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||
// return errs.NotImplement to use an internal archive tool
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*CloudreveV4)(nil)
|
||||
44
drivers/cloudreve_v4/meta.go
Normal file
44
drivers/cloudreve_v4/meta.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package cloudreve_v4
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
driver.RootPath
|
||||
// driver.RootID
|
||||
// define other
|
||||
Address string `json:"address" required:"true"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
CustomUA string `json:"custom_ua"`
|
||||
EnableFolderSize bool `json:"enable_folder_size"`
|
||||
EnableThumb bool `json:"enable_thumb"`
|
||||
EnableVersionUpload bool `json:"enable_version_upload"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at" default:"name" required:"true"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc" required:"true"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Cloudreve V4",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "cloudreve://my",
|
||||
CheckStatus: true,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &CloudreveV4{}
|
||||
})
|
||||
}
|
||||
164
drivers/cloudreve_v4/types.go
Normal file
164
drivers/cloudreve_v4/types.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package cloudreve_v4
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
type Object struct {
|
||||
model.Object
|
||||
StoragePolicy StoragePolicy
|
||||
}
|
||||
|
||||
type Resp struct {
|
||||
Code int `json:"code"`
|
||||
Msg string `json:"msg"`
|
||||
Data any `json:"data"`
|
||||
}
|
||||
|
||||
type BasicConfigResp struct {
|
||||
InstanceID string `json:"instance_id"`
|
||||
// Title string `json:"title"`
|
||||
// Themes string `json:"themes"`
|
||||
// DefaultTheme string `json:"default_theme"`
|
||||
User struct {
|
||||
ID string `json:"id"`
|
||||
// Nickname string `json:"nickname"`
|
||||
// CreatedAt time.Time `json:"created_at"`
|
||||
// Anonymous bool `json:"anonymous"`
|
||||
Group struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Permission string `json:"permission"`
|
||||
} `json:"group"`
|
||||
} `json:"user"`
|
||||
// Logo string `json:"logo"`
|
||||
// LogoLight string `json:"logo_light"`
|
||||
// CaptchaReCaptchaKey string `json:"captcha_ReCaptchaKey"`
|
||||
CaptchaType string `json:"captcha_type"` // support 'normal' only
|
||||
// AppPromotion bool `json:"app_promotion"`
|
||||
}
|
||||
|
||||
type SiteLoginConfigResp struct {
|
||||
LoginCaptcha bool `json:"login_captcha"`
|
||||
Authn bool `json:"authn"`
|
||||
}
|
||||
|
||||
type PrepareLoginResp struct {
|
||||
WebauthnEnabled bool `json:"webauthn_enabled"`
|
||||
PasswordEnabled bool `json:"password_enabled"`
|
||||
}
|
||||
|
||||
type CaptchaResp struct {
|
||||
Image string `json:"image"`
|
||||
Ticket string `json:"ticket"`
|
||||
}
|
||||
|
||||
type Token struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
AccessExpires time.Time `json:"access_expires"`
|
||||
RefreshExpires time.Time `json:"refresh_expires"`
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
User struct {
|
||||
ID string `json:"id"`
|
||||
// Email string `json:"email"`
|
||||
// Nickname string `json:"nickname"`
|
||||
Status string `json:"status"`
|
||||
// CreatedAt time.Time `json:"created_at"`
|
||||
Group struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Permission string `json:"permission"`
|
||||
// DirectLinkBatchSize int `json:"direct_link_batch_size"`
|
||||
// TrashRetention int `json:"trash_retention"`
|
||||
} `json:"group"`
|
||||
// Language string `json:"language"`
|
||||
} `json:"user"`
|
||||
Token Token `json:"token"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Type int `json:"type"` // 0: file, 1: folder
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
Size int64 `json:"size"`
|
||||
Metadata interface{} `json:"metadata"`
|
||||
Path string `json:"path"`
|
||||
Capability string `json:"capability"`
|
||||
Owned bool `json:"owned"`
|
||||
PrimaryEntity string `json:"primary_entity"`
|
||||
}
|
||||
|
||||
type StoragePolicy struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
MaxSize int64 `json:"max_size"`
|
||||
Relay bool `json:"relay,omitempty"`
|
||||
}
|
||||
|
||||
type Pagination struct {
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"page_size"`
|
||||
IsCursor bool `json:"is_cursor"`
|
||||
NextToken string `json:"next_token,omitempty"`
|
||||
}
|
||||
|
||||
type Props struct {
|
||||
Capability string `json:"capability"`
|
||||
MaxPageSize int `json:"max_page_size"`
|
||||
OrderByOptions []string `json:"order_by_options"`
|
||||
OrderDirectionOptions []string `json:"order_direction_options"`
|
||||
}
|
||||
|
||||
type FileResp struct {
|
||||
Files []File `json:"files"`
|
||||
Parent File `json:"parent"`
|
||||
Pagination Pagination `json:"pagination"`
|
||||
Props Props `json:"props"`
|
||||
ContextHint string `json:"context_hint"`
|
||||
MixedType bool `json:"mixed_type"`
|
||||
StoragePolicy StoragePolicy `json:"storage_policy"`
|
||||
}
|
||||
|
||||
type FileUrlResp struct {
|
||||
Urls []struct {
|
||||
URL string `json:"url"`
|
||||
} `json:"urls"`
|
||||
Expires time.Time `json:"expires"`
|
||||
}
|
||||
|
||||
type FileUploadResp struct {
|
||||
// UploadID string `json:"upload_id"`
|
||||
SessionID string `json:"session_id"`
|
||||
ChunkSize int64 `json:"chunk_size"`
|
||||
Expires int64 `json:"expires"`
|
||||
StoragePolicy StoragePolicy `json:"storage_policy"`
|
||||
URI string `json:"uri"`
|
||||
CompleteURL string `json:"completeURL,omitempty"` // for S3-like
|
||||
CallbackSecret string `json:"callback_secret,omitempty"` // for S3-like, OneDrive
|
||||
UploadUrls []string `json:"upload_urls,omitempty"` // for not-local
|
||||
Credential string `json:"credential,omitempty"` // for local
|
||||
}
|
||||
|
||||
type FileThumbResp struct {
|
||||
URL string `json:"url"`
|
||||
Expires time.Time `json:"expires"`
|
||||
}
|
||||
|
||||
type FolderSummaryResp struct {
|
||||
File
|
||||
FolderSummary struct {
|
||||
Size int64 `json:"size"`
|
||||
Files int64 `json:"files"`
|
||||
Folders int64 `json:"folders"`
|
||||
Completed bool `json:"completed"`
|
||||
CalculatedAt time.Time `json:"calculated_at"`
|
||||
} `json:"folder_summary"`
|
||||
}
|
||||
476
drivers/cloudreve_v4/util.go
Normal file
476
drivers/cloudreve_v4/util.go
Normal file
@@ -0,0 +1,476 @@
|
||||
package cloudreve_v4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *CloudreveV4) getUA() string {
|
||||
if d.CustomUA != "" {
|
||||
return d.CustomUA
|
||||
}
|
||||
return base.UserAgent
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) request(method string, path string, callback base.ReqCallback, out any) error {
|
||||
if d.ref != nil {
|
||||
return d.ref.request(method, path, callback, out)
|
||||
}
|
||||
u := d.Address + "/api/v4" + path
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"User-Agent": d.getUA(),
|
||||
})
|
||||
if d.AccessToken != "" {
|
||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||
}
|
||||
|
||||
var r Resp
|
||||
req.SetResult(&r)
|
||||
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
|
||||
resp, err := req.Execute(method, u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !resp.IsSuccess() {
|
||||
return errors.New(resp.String())
|
||||
}
|
||||
|
||||
if r.Code != 0 {
|
||||
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" {
|
||||
// try to refresh token
|
||||
err = d.refreshToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return d.request(method, path, callback, out)
|
||||
}
|
||||
return errors.New(r.Msg)
|
||||
}
|
||||
|
||||
if out != nil && r.Data != nil {
|
||||
var marshal []byte
|
||||
marshal, err = json.Marshal(r.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(marshal, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) login() error {
|
||||
var siteConfig SiteLoginConfigResp
|
||||
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !siteConfig.Authn {
|
||||
return errors.New("authn not support")
|
||||
}
|
||||
var prepareLogin PrepareLoginResp
|
||||
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !prepareLogin.PasswordEnabled {
|
||||
return errors.New("password not enabled")
|
||||
}
|
||||
if prepareLogin.WebauthnEnabled {
|
||||
return errors.New("webauthn not support")
|
||||
}
|
||||
for range 5 {
|
||||
err = d.doLogin(siteConfig.LoginCaptcha)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err.Error() != "CAPTCHA not match." {
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) doLogin(needCaptcha bool) error {
|
||||
var err error
|
||||
loginBody := base.Json{
|
||||
"email": d.Username,
|
||||
"password": d.Password,
|
||||
}
|
||||
if needCaptcha {
|
||||
var config BasicConfigResp
|
||||
err = d.request(http.MethodGet, "/site/config/basic", nil, &config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if config.CaptchaType != "normal" {
|
||||
return fmt.Errorf("captcha type %s not support", config.CaptchaType)
|
||||
}
|
||||
var captcha CaptchaResp
|
||||
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.HasPrefix(captcha.Image, "data:image/png;base64,") {
|
||||
return errors.New("can not get captcha")
|
||||
}
|
||||
loginBody["ticket"] = captcha.Ticket
|
||||
i := strings.Index(captcha.Image, ",")
|
||||
dec := base64.NewDecoder(base64.StdEncoding, strings.NewReader(captcha.Image[i+1:]))
|
||||
vRes, err := base.RestyClient.R().SetMultipartField(
|
||||
"image", "validateCode.png", "image/png", dec).
|
||||
Post(setting.GetStr(conf.OcrApi))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if jsoniter.Get(vRes.Body(), "status").ToInt() != 200 {
|
||||
return errors.New("ocr error:" + jsoniter.Get(vRes.Body(), "msg").ToString())
|
||||
}
|
||||
captchaCode := jsoniter.Get(vRes.Body(), "result").ToString()
|
||||
if captchaCode == "" {
|
||||
return errors.New("ocr error: empty result")
|
||||
}
|
||||
loginBody["captcha"] = captchaCode
|
||||
}
|
||||
var token TokenResponse
|
||||
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) {
|
||||
req.SetBody(loginBody)
|
||||
}, &token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) refreshToken() error {
|
||||
var token Token
|
||||
if token.RefreshToken == "" {
|
||||
if d.Username != "" {
|
||||
err := d.login()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot login to get refresh token, error: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"refresh_token": d.RefreshToken,
|
||||
})
|
||||
}, &token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
if DEFAULT == 0 {
|
||||
// support relay
|
||||
DEFAULT = file.GetSize()
|
||||
}
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[CloudreveV4-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
||||
req.SetHeader("Content-Type", "application/octet-stream")
|
||||
req.SetContentLength(true)
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
|
||||
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
req.AddRetryCondition(func(r *resty.Response, err error) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
if r.IsError() {
|
||||
return true
|
||||
}
|
||||
var retryResp Resp
|
||||
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
|
||||
if jErr != nil {
|
||||
return true
|
||||
}
|
||||
if retryResp.Code != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
uploadUrl := u.UploadUrls[0]
|
||||
credential := u.Credential
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
err = func() error {
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return errors.New(res.Status)
|
||||
}
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var up Resp
|
||||
err = json.Unmarshal(body, &up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if up.Code != 0 {
|
||||
return errors.New(up.Msg)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err == nil {
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
chunk++
|
||||
} else {
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
uploadUrl := u.UploadUrls[0]
|
||||
var finish int64 = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize()))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
switch {
|
||||
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
res.Body.Close()
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[CloudreveV4-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
default:
|
||||
res.Body.Close()
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
}
|
||||
}
|
||||
// 上传成功发送回调请求
|
||||
return d.request(http.MethodPost, "/callback/onedrive/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
|
||||
req.SetBody("{}")
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
var etags []string
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < file.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
left := file.GetSize() - finish
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(file, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPut, u.UploadUrls[chunk],
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.ContentLength = byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etag := res.Header.Get("ETag")
|
||||
res.Body.Close()
|
||||
switch {
|
||||
case res.StatusCode != 200:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("server error %d, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case etag == "":
|
||||
return errors.New("faild to get ETag from header")
|
||||
default:
|
||||
retryCount = 0
|
||||
etags = append(etags, etag)
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
}
|
||||
|
||||
// s3LikeFinishUpload
|
||||
bodyBuilder := &strings.Builder{}
|
||||
bodyBuilder.WriteString("<CompleteMultipartUpload>")
|
||||
for i, etag := range etags {
|
||||
bodyBuilder.WriteString(fmt.Sprintf(
|
||||
`<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>`,
|
||||
i+1, // PartNumber 从 1 开始
|
||||
etag,
|
||||
))
|
||||
}
|
||||
bodyBuilder.WriteString("</CompleteMultipartUpload>")
|
||||
req, err := http.NewRequest(
|
||||
"POST",
|
||||
u.CompleteURL,
|
||||
strings.NewReader(bodyBuilder.String()),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/xml")
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body))
|
||||
}
|
||||
|
||||
// 上传成功发送回调请求
|
||||
return d.request(http.MethodPost, "/callback/s3/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
|
||||
req.SetBody("{}")
|
||||
}, nil)
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@@ -36,88 +37,130 @@ func (d *GithubReleases) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// processPoint 处理单个挂载点的文件列表
|
||||
func (d *GithubReleases) processPoint(point *MountPoint, path string, args model.ListArgs) []File {
|
||||
var pointFiles []File
|
||||
|
||||
if !d.Addition.ShowAllVersion { // latest
|
||||
point.RequestLatestRelease(d.GetRequest, args.Refresh)
|
||||
pointFiles = d.processLatestVersion(point, path)
|
||||
} else { // all version
|
||||
point.RequestReleases(d.GetRequest, args.Refresh)
|
||||
pointFiles = d.processAllVersions(point, path)
|
||||
}
|
||||
|
||||
return pointFiles
|
||||
}
|
||||
|
||||
// processLatestVersion 处理最新版本的逻辑
|
||||
func (d *GithubReleases) processLatestVersion(point *MountPoint, path string) []File {
|
||||
var pointFiles []File
|
||||
|
||||
if point.Point == path { // 与仓库路径相同
|
||||
pointFiles = append(pointFiles, point.GetLatestRelease()...)
|
||||
if d.Addition.ShowReadme {
|
||||
files := point.GetOtherFile(d.GetRequest, false)
|
||||
pointFiles = append(pointFiles, files...)
|
||||
}
|
||||
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
|
||||
nextDir := GetNextDir(point.Point, path)
|
||||
if nextDir != "" {
|
||||
dirFile := File{
|
||||
Path: path + "/" + nextDir,
|
||||
FileName: nextDir,
|
||||
Size: point.GetLatestSize(),
|
||||
UpdateAt: point.Release.PublishedAt,
|
||||
CreateAt: point.Release.CreatedAt,
|
||||
Type: "dir",
|
||||
Url: "",
|
||||
}
|
||||
pointFiles = append(pointFiles, dirFile)
|
||||
}
|
||||
}
|
||||
|
||||
return pointFiles
|
||||
}
|
||||
|
||||
// processAllVersions 处理所有版本的逻辑
|
||||
func (d *GithubReleases) processAllVersions(point *MountPoint, path string) []File {
|
||||
var pointFiles []File
|
||||
|
||||
if point.Point == path { // 与仓库路径相同
|
||||
pointFiles = append(pointFiles, point.GetAllVersion()...)
|
||||
if d.Addition.ShowReadme {
|
||||
files := point.GetOtherFile(d.GetRequest, false)
|
||||
pointFiles = append(pointFiles, files...)
|
||||
}
|
||||
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
|
||||
nextDir := GetNextDir(point.Point, path)
|
||||
if nextDir != "" {
|
||||
dirFile := File{
|
||||
FileName: nextDir,
|
||||
Path: path + "/" + nextDir,
|
||||
Size: point.GetAllVersionSize(),
|
||||
UpdateAt: (*point.Releases)[0].PublishedAt,
|
||||
CreateAt: (*point.Releases)[0].CreatedAt,
|
||||
Type: "dir",
|
||||
Url: "",
|
||||
}
|
||||
pointFiles = append(pointFiles, dirFile)
|
||||
}
|
||||
} else if strings.HasPrefix(path, point.Point) { // 仓库目录的子目录
|
||||
tagName := GetNextDir(path, point.Point)
|
||||
if tagName != "" {
|
||||
pointFiles = append(pointFiles, point.GetReleaseByTagName(tagName)...)
|
||||
}
|
||||
}
|
||||
|
||||
return pointFiles
|
||||
}
|
||||
|
||||
// mergeFiles 合并文件列表,处理重复目录
|
||||
func (d *GithubReleases) mergeFiles(files *[]File, newFiles []File) {
|
||||
for _, newFile := range newFiles {
|
||||
if newFile.Type == "dir" {
|
||||
hasSameDir := false
|
||||
for index := range *files {
|
||||
if (*files)[index].GetName() == newFile.GetName() && (*files)[index].Type == "dir" {
|
||||
hasSameDir = true
|
||||
(*files)[index].Size += newFile.Size
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasSameDir {
|
||||
*files = append(*files, newFile)
|
||||
}
|
||||
} else {
|
||||
*files = append(*files, newFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
files := make([]File, 0)
|
||||
path := fmt.Sprintf("/%s", strings.Trim(dir.GetPath(), "/"))
|
||||
|
||||
for i := range d.points {
|
||||
point := &d.points[i]
|
||||
if d.Addition.ConcurrentRequests && d.Addition.Token != "" { // 并发处理
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if !d.Addition.ShowAllVersion { // latest
|
||||
point.RequestRelease(d.GetRequest, args.Refresh)
|
||||
for i := range d.points {
|
||||
wg.Add(1)
|
||||
go func(point *MountPoint) {
|
||||
defer wg.Done()
|
||||
pointFiles := d.processPoint(point, path, args)
|
||||
|
||||
if point.Point == path { // 与仓库路径相同
|
||||
files = append(files, point.GetLatestRelease()...)
|
||||
if d.Addition.ShowReadme {
|
||||
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
|
||||
}
|
||||
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
|
||||
nextDir := GetNextDir(point.Point, path)
|
||||
if nextDir == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
hasSameDir := false
|
||||
for index := range files {
|
||||
if files[index].GetName() == nextDir {
|
||||
hasSameDir = true
|
||||
files[index].Size += point.GetLatestSize()
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasSameDir {
|
||||
files = append(files, File{
|
||||
Path: path + "/" + nextDir,
|
||||
FileName: nextDir,
|
||||
Size: point.GetLatestSize(),
|
||||
UpdateAt: point.Release.PublishedAt,
|
||||
CreateAt: point.Release.CreatedAt,
|
||||
Type: "dir",
|
||||
Url: "",
|
||||
})
|
||||
}
|
||||
}
|
||||
} else { // all version
|
||||
point.RequestReleases(d.GetRequest, args.Refresh)
|
||||
|
||||
if point.Point == path { // 与仓库路径相同
|
||||
files = append(files, point.GetAllVersion()...)
|
||||
if d.Addition.ShowReadme {
|
||||
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
|
||||
}
|
||||
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
|
||||
nextDir := GetNextDir(point.Point, path)
|
||||
if nextDir == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
hasSameDir := false
|
||||
for index := range files {
|
||||
if files[index].GetName() == nextDir {
|
||||
hasSameDir = true
|
||||
files[index].Size += point.GetAllVersionSize()
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasSameDir {
|
||||
files = append(files, File{
|
||||
FileName: nextDir,
|
||||
Path: path + "/" + nextDir,
|
||||
Size: point.GetAllVersionSize(),
|
||||
UpdateAt: (*point.Releases)[0].PublishedAt,
|
||||
CreateAt: (*point.Releases)[0].CreatedAt,
|
||||
Type: "dir",
|
||||
Url: "",
|
||||
})
|
||||
}
|
||||
} else if strings.HasPrefix(path, point.Point) { // 仓库目录的子目录
|
||||
tagName := GetNextDir(path, point.Point)
|
||||
if tagName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
files = append(files, point.GetReleaseByTagName(tagName)...)
|
||||
}
|
||||
mu.Lock()
|
||||
d.mergeFiles(&files, pointFiles)
|
||||
mu.Unlock()
|
||||
}(&d.points[i])
|
||||
}
|
||||
wg.Wait()
|
||||
} else { // 串行处理
|
||||
for i := range d.points {
|
||||
point := &d.points[i]
|
||||
pointFiles := d.processPoint(point, path, args)
|
||||
d.mergeFiles(&files, pointFiles)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,11 +7,12 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"alistGo/alist" help:"structure:[path:]org/repo"`
|
||||
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
|
||||
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
|
||||
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
|
||||
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
|
||||
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"alistGo/alist" help:"structure:[path:]org/repo"`
|
||||
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
|
||||
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
|
||||
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
|
||||
ConcurrentRequests bool `json:"concurrent_requests" type:"bool" default:"false" help:"To concurrently request the GitHub API, you must enter a GitHub token"`
|
||||
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -18,7 +18,7 @@ type MountPoint struct {
|
||||
}
|
||||
|
||||
// 请求最新版本
|
||||
func (m *MountPoint) RequestRelease(get func(url string) (*resty.Response, error), refresh bool) {
|
||||
func (m *MountPoint) RequestLatestRelease(get func(url string) (*resty.Response, error), refresh bool) {
|
||||
if m.Repo == "" {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// 发送 GET 请求
|
||||
@@ -23,7 +23,7 @@ func (d *GithubReleases) GetRequest(url string) (*resty.Response, error) {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode() != 200 {
|
||||
log.Warn("failed to get request: ", res.StatusCode(), res.String())
|
||||
utils.Log.Warnf("failed to get request: %s %d %s", url, res.StatusCode(), res.String())
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
271
drivers/gofile/driver.go
Normal file
271
drivers/gofile/driver.go
Normal file
@@ -0,0 +1,271 @@
|
||||
package gofile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Gofile struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
accountId string
|
||||
}
|
||||
|
||||
func (d *Gofile) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Gofile) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Gofile) Init(ctx context.Context) error {
|
||||
if d.APIToken == "" {
|
||||
return fmt.Errorf("API token is required")
|
||||
}
|
||||
|
||||
// Get account ID
|
||||
accountId, err := d.getAccountId(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get account ID: %w", err)
|
||||
}
|
||||
d.accountId = accountId
|
||||
|
||||
// Get account info to set root folder if not specified
|
||||
if d.RootFolderID == "" {
|
||||
accountInfo, err := d.getAccountInfo(ctx, accountId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get account info: %w", err)
|
||||
}
|
||||
d.RootFolderID = accountInfo.Data.RootFolder
|
||||
}
|
||||
|
||||
// Save driver storage
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Gofile) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Gofile) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
var folderId string
|
||||
if dir.GetID() == "" {
|
||||
folderId = d.GetRootId()
|
||||
} else {
|
||||
folderId = dir.GetID()
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("/contents/%s", folderId)
|
||||
|
||||
var response ContentsResponse
|
||||
err := d.getJSON(ctx, endpoint, &response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var objects []model.Obj
|
||||
|
||||
// Process children or contents
|
||||
contents := response.Data.Children
|
||||
if contents == nil {
|
||||
contents = response.Data.Contents
|
||||
}
|
||||
|
||||
for _, content := range contents {
|
||||
objects = append(objects, d.convertContentToObj(content))
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if file.IsDir() {
|
||||
return nil, errs.NotFile
|
||||
}
|
||||
|
||||
// Create a direct link for the file
|
||||
directLink, err := d.createDirectLink(ctx, file.GetID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create direct link: %w", err)
|
||||
}
|
||||
|
||||
// Configure cache expiration based on user setting
|
||||
link := &model.Link{
|
||||
URL: directLink,
|
||||
}
|
||||
|
||||
// Only set expiration if LinkExpiry > 0 (0 means no caching)
|
||||
if d.LinkExpiry > 0 {
|
||||
expiration := time.Duration(d.LinkExpiry) * 24 * time.Hour
|
||||
link.Expiration = &expiration
|
||||
}
|
||||
|
||||
return link, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
var parentId string
|
||||
if parentDir.GetID() == "" {
|
||||
parentId = d.GetRootId()
|
||||
} else {
|
||||
parentId = parentDir.GetID()
|
||||
}
|
||||
|
||||
data := map[string]interface{}{
|
||||
"parentFolderId": parentId,
|
||||
"folderName": dirName,
|
||||
}
|
||||
|
||||
var response CreateFolderResponse
|
||||
err := d.postJSON(ctx, "/contents/createFolder", data, &response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: response.Data.ID,
|
||||
Name: response.Data.Name,
|
||||
IsFolder: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var dstId string
|
||||
if dstDir.GetID() == "" {
|
||||
dstId = d.GetRootId()
|
||||
} else {
|
||||
dstId = dstDir.GetID()
|
||||
}
|
||||
|
||||
data := map[string]interface{}{
|
||||
"contentsId": srcObj.GetID(),
|
||||
"folderId": dstId,
|
||||
}
|
||||
|
||||
err := d.putJSON(ctx, "/contents/move", data, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return updated object
|
||||
return &model.Object{
|
||||
ID: srcObj.GetID(),
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
data := map[string]interface{}{
|
||||
"attribute": "name",
|
||||
"attributeValue": newName,
|
||||
}
|
||||
|
||||
var response UpdateResponse
|
||||
err := d.putJSON(ctx, fmt.Sprintf("/contents/%s/update", srcObj.GetID()), data, &response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: srcObj.GetID(),
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var dstId string
|
||||
if dstDir.GetID() == "" {
|
||||
dstId = d.GetRootId()
|
||||
} else {
|
||||
dstId = dstDir.GetID()
|
||||
}
|
||||
|
||||
data := map[string]interface{}{
|
||||
"contentsId": srcObj.GetID(),
|
||||
"folderId": dstId,
|
||||
}
|
||||
|
||||
var response CopyResponse
|
||||
err := d.postJSON(ctx, "/contents/copy", data, &response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the new ID from the response
|
||||
newId := srcObj.GetID()
|
||||
if response.Data.CopiedContents != nil {
|
||||
if id, ok := response.Data.CopiedContents[srcObj.GetID()]; ok {
|
||||
newId = id
|
||||
}
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: newId,
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) Remove(ctx context.Context, obj model.Obj) error {
|
||||
data := map[string]interface{}{
|
||||
"contentsId": obj.GetID(),
|
||||
}
|
||||
|
||||
return d.deleteJSON(ctx, "/contents", data)
|
||||
}
|
||||
|
||||
func (d *Gofile) Put(ctx context.Context, dstDir model.Obj, fileStreamer model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
var folderId string
|
||||
if dstDir.GetID() == "" {
|
||||
folderId = d.GetRootId()
|
||||
} else {
|
||||
folderId = dstDir.GetID()
|
||||
}
|
||||
|
||||
response, err := d.uploadFile(ctx, folderId, fileStreamer, up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: response.Data.FileId,
|
||||
Name: response.Data.FileName,
|
||||
Size: fileStreamer.GetSize(),
|
||||
IsFolder: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Gofile) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Gofile) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Gofile) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Gofile)(nil)
|
||||
28
drivers/gofile/meta.go
Normal file
28
drivers/gofile/meta.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package gofile
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
APIToken string `json:"api_token" required:"true" help:"Get your API token from your Gofile profile page"`
|
||||
LinkExpiry int `json:"link_expiry" type:"number" default:"30" help:"Direct link cache duration in days. Set to 0 to disable caching"`
|
||||
DirectLinkExpiry int `json:"direct_link_expiry" type:"number" default:"0" help:"Direct link expiration time in hours on Gofile server. Set to 0 for no expiration"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Gofile",
|
||||
DefaultRoot: "",
|
||||
LocalSort: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Gofile{}
|
||||
})
|
||||
}
|
||||
124
drivers/gofile/types.go
Normal file
124
drivers/gofile/types.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package gofile
|
||||
|
||||
import "time"
|
||||
|
||||
type APIResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
type AccountResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type AccountInfoResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Email string `json:"email"`
|
||||
RootFolder string `json:"rootFolder"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type Content struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"` // "file" or "folder"
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
ModTime int64 `json:"modTime,omitempty"`
|
||||
DirectLink string `json:"directLink,omitempty"`
|
||||
Children map[string]Content `json:"children,omitempty"`
|
||||
ParentFolder string `json:"parentFolder,omitempty"`
|
||||
MD5 string `json:"md5,omitempty"`
|
||||
MimeType string `json:"mimeType,omitempty"`
|
||||
Link string `json:"link,omitempty"`
|
||||
}
|
||||
|
||||
type ContentsResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
IsOwner bool `json:"isOwner"`
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
ChildrenList []string `json:"childrenList,omitempty"`
|
||||
Children map[string]Content `json:"children,omitempty"`
|
||||
Contents map[string]Content `json:"contents,omitempty"`
|
||||
Public bool `json:"public,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Tags string `json:"tags,omitempty"`
|
||||
Expiry int64 `json:"expiry,omitempty"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
DownloadPage string `json:"downloadPage"`
|
||||
Code string `json:"code"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
FileId string `json:"fileId"`
|
||||
FileName string `json:"fileName"`
|
||||
GuestToken string `json:"guestToken,omitempty"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type DirectLinkResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
DirectLink string `json:"directLink"`
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type CreateFolderResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type CopyResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
CopiedContents map[string]string `json:"copiedContents"` // oldId -> newId mapping
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UpdateResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type ErrorResponse struct {
|
||||
Status string `json:"status"`
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
Code string `json:"code"`
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
func (c *Content) ModifiedTime() time.Time {
|
||||
if c.ModTime > 0 {
|
||||
return time.Unix(c.ModTime, 0)
|
||||
}
|
||||
return time.Unix(c.CreateTime, 0)
|
||||
}
|
||||
|
||||
func (c *Content) IsDir() bool {
|
||||
return c.Type == "folder"
|
||||
}
|
||||
265
drivers/gofile/util.go
Normal file
265
drivers/gofile/util.go
Normal file
@@ -0,0 +1,265 @@
|
||||
package gofile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
baseAPI = "https://api.gofile.io"
|
||||
uploadAPI = "https://upload.gofile.io"
|
||||
)
|
||||
|
||||
func (d *Gofile) request(ctx context.Context, method, endpoint string, body io.Reader, headers map[string]string) (*http.Response, error) {
|
||||
var url string
|
||||
if strings.HasPrefix(endpoint, "http") {
|
||||
url = endpoint
|
||||
} else {
|
||||
url = baseAPI + endpoint
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", "Bearer "+d.APIToken)
|
||||
req.Header.Set("User-Agent", "AList/3.0")
|
||||
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
return base.HttpClient.Do(req)
|
||||
}
|
||||
|
||||
func (d *Gofile) getJSON(ctx context.Context, endpoint string, result interface{}) error {
|
||||
resp, err := d.request(ctx, "GET", endpoint, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return d.handleError(resp)
|
||||
}
|
||||
|
||||
return json.NewDecoder(resp.Body).Decode(result)
|
||||
}
|
||||
|
||||
func (d *Gofile) postJSON(ctx context.Context, endpoint string, data interface{}, result interface{}) error {
|
||||
jsonData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
resp, err := d.request(ctx, "POST", endpoint, bytes.NewBuffer(jsonData), headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return d.handleError(resp)
|
||||
}
|
||||
|
||||
if result != nil {
|
||||
return json.NewDecoder(resp.Body).Decode(result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Gofile) putJSON(ctx context.Context, endpoint string, data interface{}, result interface{}) error {
|
||||
jsonData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
resp, err := d.request(ctx, "PUT", endpoint, bytes.NewBuffer(jsonData), headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return d.handleError(resp)
|
||||
}
|
||||
|
||||
if result != nil {
|
||||
return json.NewDecoder(resp.Body).Decode(result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Gofile) deleteJSON(ctx context.Context, endpoint string, data interface{}) error {
|
||||
jsonData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
resp, err := d.request(ctx, "DELETE", endpoint, bytes.NewBuffer(jsonData), headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return d.handleError(resp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Gofile) handleError(resp *http.Response) error {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
log.Debugf("Gofile API error (HTTP %d): %s", resp.StatusCode, string(body))
|
||||
|
||||
var errorResp ErrorResponse
|
||||
if err := json.Unmarshal(body, &errorResp); err == nil && errorResp.Status == "error" {
|
||||
return fmt.Errorf("gofile API error: %s (code: %s)", errorResp.Error.Message, errorResp.Error.Code)
|
||||
}
|
||||
|
||||
return fmt.Errorf("gofile API error: HTTP %d - %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
func (d *Gofile) uploadFile(ctx context.Context, folderId string, file model.FileStreamer, up driver.UpdateProgress) (*UploadResponse, error) {
|
||||
var body bytes.Buffer
|
||||
writer := multipart.NewWriter(&body)
|
||||
|
||||
if folderId != "" {
|
||||
writer.WriteField("folderId", folderId)
|
||||
}
|
||||
|
||||
part, err := writer.CreateFormFile("file", filepath.Base(file.GetName()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Copy with progress tracking if available
|
||||
if up != nil {
|
||||
reader := &progressReader{
|
||||
reader: file,
|
||||
total: file.GetSize(),
|
||||
up: up,
|
||||
}
|
||||
_, err = io.Copy(part, reader)
|
||||
} else {
|
||||
_, err = io.Copy(part, file)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
writer.Close()
|
||||
|
||||
headers := map[string]string{
|
||||
"Content-Type": writer.FormDataContentType(),
|
||||
}
|
||||
|
||||
resp, err := d.request(ctx, "POST", uploadAPI+"/uploadfile", &body, headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, d.handleError(resp)
|
||||
}
|
||||
|
||||
var result UploadResponse
|
||||
err = json.NewDecoder(resp.Body).Decode(&result)
|
||||
return &result, err
|
||||
}
|
||||
|
||||
func (d *Gofile) createDirectLink(ctx context.Context, contentId string) (string, error) {
|
||||
data := map[string]interface{}{}
|
||||
|
||||
if d.DirectLinkExpiry > 0 {
|
||||
expireTime := time.Now().Add(time.Duration(d.DirectLinkExpiry) * time.Hour).Unix()
|
||||
data["expireTime"] = expireTime
|
||||
}
|
||||
|
||||
var result DirectLinkResponse
|
||||
err := d.postJSON(ctx, fmt.Sprintf("/contents/%s/directlinks", contentId), data, &result)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return result.Data.DirectLink, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) convertContentToObj(content Content) model.Obj {
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: content.ID,
|
||||
Name: content.Name,
|
||||
Size: content.Size,
|
||||
Modified: content.ModifiedTime(),
|
||||
IsFolder: content.IsDir(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Gofile) getAccountId(ctx context.Context) (string, error) {
|
||||
var result AccountResponse
|
||||
err := d.getJSON(ctx, "/accounts/getid", &result)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result.Data.ID, nil
|
||||
}
|
||||
|
||||
func (d *Gofile) getAccountInfo(ctx context.Context, accountId string) (*AccountInfoResponse, error) {
|
||||
var result AccountInfoResponse
|
||||
err := d.getJSON(ctx, fmt.Sprintf("/accounts/%s", accountId), &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// progressReader wraps an io.Reader to track upload progress
|
||||
type progressReader struct {
|
||||
reader io.Reader
|
||||
total int64
|
||||
read int64
|
||||
up driver.UpdateProgress
|
||||
}
|
||||
|
||||
func (pr *progressReader) Read(p []byte) (n int, err error) {
|
||||
n, err = pr.reader.Read(p)
|
||||
pr.read += int64(n)
|
||||
if pr.up != nil && pr.total > 0 {
|
||||
progress := float64(pr.read) * 100 / float64(pr.total)
|
||||
pr.up(progress)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@@ -94,6 +94,7 @@ func RemoveJSComment(data string) string {
|
||||
}
|
||||
if inComment && v == '*' && i+1 < len(data) && data[i+1] == '/' {
|
||||
inComment = false
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if v == '/' && i+1 < len(data) {
|
||||
@@ -108,6 +109,9 @@ func RemoveJSComment(data string) string {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if inComment || inSingleLineComment {
|
||||
continue
|
||||
}
|
||||
result.WriteByte(v)
|
||||
}
|
||||
|
||||
|
||||
@@ -146,13 +146,14 @@ func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string
|
||||
thumb += "?type=thumb&sign=" + sign.Sign(stdpath.Join(reqPath, f.Name()))
|
||||
}
|
||||
}
|
||||
isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
|
||||
filePath := filepath.Join(fullPath, f.Name())
|
||||
isFolder := f.IsDir() || isLinkedDir(f, filePath)
|
||||
var size int64
|
||||
if !isFolder {
|
||||
size = f.Size()
|
||||
}
|
||||
var ctime time.Time
|
||||
t, err := times.Stat(stdpath.Join(fullPath, f.Name()))
|
||||
t, err := times.Stat(filePath)
|
||||
if err == nil {
|
||||
if t.HasBirthTime() {
|
||||
ctime = t.BirthTime()
|
||||
@@ -161,7 +162,7 @@ func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string
|
||||
|
||||
file := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Path: filepath.Join(fullPath, f.Name()),
|
||||
Path: filePath,
|
||||
Name: f.Name(),
|
||||
Modified: f.ModTime(),
|
||||
Size: size,
|
||||
@@ -197,7 +198,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
isFolder := f.IsDir() || isSymlinkDir(f, path)
|
||||
isFolder := f.IsDir() || isLinkedDir(f, path)
|
||||
size := f.Size()
|
||||
if isFolder {
|
||||
size = 0
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -18,14 +19,18 @@ import (
|
||||
ffmpeg "github.com/u2takey/ffmpeg-go"
|
||||
)
|
||||
|
||||
func isSymlinkDir(f fs.FileInfo, path string) bool {
|
||||
if f.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
dst, err := os.Readlink(filepath.Join(path, f.Name()))
|
||||
func isLinkedDir(f fs.FileInfo, path string) bool {
|
||||
if f.Mode()&os.ModeSymlink == os.ModeSymlink || (runtime.GOOS == "windows" && f.Mode()&os.ModeIrregular != 0) {
|
||||
dst, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if !filepath.IsAbs(dst) {
|
||||
dst = filepath.Join(path, dst)
|
||||
dst = filepath.Join(filepath.Dir(path), dst)
|
||||
}
|
||||
dst, err = filepath.Abs(dst)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
stat, err := os.Stat(dst)
|
||||
if err != nil {
|
||||
|
||||
433
drivers/mediafire/driver.go
Normal file
433
drivers/mediafire/driver.go
Normal file
@@ -0,0 +1,433 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
type Mediafire struct {
|
||||
model.Storage
|
||||
Addition
|
||||
cron *cron.Cron
|
||||
|
||||
actionToken string
|
||||
|
||||
appBase string
|
||||
apiBase string
|
||||
hostBase string
|
||||
maxRetries int
|
||||
|
||||
secChUa string
|
||||
secChUaPlatform string
|
||||
userAgent string
|
||||
}
|
||||
|
||||
func (d *Mediafire) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Mediafire) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Mediafire) Init(ctx context.Context) error {
|
||||
if d.SessionToken == "" {
|
||||
return fmt.Errorf("Init :: [MediaFire] {critical} missing sessionToken")
|
||||
}
|
||||
|
||||
if d.Cookie == "" {
|
||||
return fmt.Errorf("Init :: [MediaFire] {critical} missing Cookie")
|
||||
}
|
||||
|
||||
if _, err := d.getSessionToken(ctx); err != nil {
|
||||
|
||||
d.renewToken(ctx)
|
||||
|
||||
num := rand.Intn(4) + 6
|
||||
|
||||
d.cron = cron.NewCron(time.Minute * time.Duration(num))
|
||||
d.cron.Do(func() {
|
||||
d.renewToken(ctx)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
files, err := d.getFiles(ctx, dir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||
return d.fileToObj(src), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Mediafire) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
|
||||
downloadUrl, err := d.getDirectDownloadLink(ctx, file.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := base.NoRedirectClient.R().SetDoNotParseResponse(true).SetContext(ctx).Get(downloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = res.RawBody().Close()
|
||||
}()
|
||||
|
||||
if res.StatusCode() == 302 {
|
||||
downloadUrl = res.Header().Get("location")
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: downloadUrl,
|
||||
Header: http.Header{
|
||||
"Origin": []string{d.appBase},
|
||||
"Referer": []string{d.appBase + "/"},
|
||||
"sec-ch-ua": []string{d.secChUa},
|
||||
"sec-ch-ua-platform": []string{d.secChUaPlatform},
|
||||
"User-Agent": []string{d.userAgent},
|
||||
//"User-Agent": []string{base.UserAgent},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"parent_key": parentDir.GetID(),
|
||||
"foldername": dirName,
|
||||
}
|
||||
|
||||
var resp MediafireFolderCreateResponse
|
||||
_, err := d.postForm("/folder/create.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
created, _ := time.Parse("2006-01-02T15:04:05Z", resp.Response.CreatedUTC)
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: resp.Response.FolderKey,
|
||||
Name: resp.Response.Name,
|
||||
Size: 0,
|
||||
Modified: created,
|
||||
Ctime: created,
|
||||
IsFolder: true,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/move.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key_src": srcObj.GetID(),
|
||||
"folder_key_dst": dstDir.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/move.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"folder_key": dstDir.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireMoveResponse
|
||||
_, err := d.postForm(endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return srcObj, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/update.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": srcObj.GetID(),
|
||||
"foldername": newName,
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/update.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"filename": newName,
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireRenameResponse
|
||||
_, err := d.postForm(endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: srcObj.GetID(),
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
Ctime: srcObj.CreateTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/copy.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key_src": srcObj.GetID(),
|
||||
"folder_key_dst": dstDir.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/copy.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"folder_key": dstDir.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireCopyResponse
|
||||
_, err := d.postForm(endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
var newID string
|
||||
if srcObj.IsDir() {
|
||||
if len(resp.Response.NewFolderKeys) > 0 {
|
||||
newID = resp.Response.NewFolderKeys[0]
|
||||
}
|
||||
} else {
|
||||
if len(resp.Response.NewQuickKeys) > 0 {
|
||||
newID = resp.Response.NewQuickKeys[0]
|
||||
}
|
||||
}
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: newID,
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
Ctime: srcObj.CreateTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) Remove(ctx context.Context, obj model.Obj) error {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if obj.IsDir() {
|
||||
|
||||
endpoint = "/folder/delete.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": obj.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/delete.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": obj.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireRemoveResponse
|
||||
_, err := d.postForm(endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
_, err := d.PutResult(ctx, dstDir, file, up)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Mediafire) PutResult(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tempFile.Close()
|
||||
|
||||
osFile, ok := tempFile.(*os.File)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected *os.File, got %T", tempFile)
|
||||
}
|
||||
|
||||
fileHash, err := d.calculateSHA256(osFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
checkResp, err := d.uploadCheck(ctx, file.GetName(), file.GetSize(), fileHash, dstDir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if checkResp.Response.ResumableUpload.AllUnitsReady == "yes" {
|
||||
up(100.0)
|
||||
}
|
||||
|
||||
if checkResp.Response.HashExists == "yes" && checkResp.Response.InAccount == "yes" {
|
||||
up(100.0)
|
||||
existingFile, err := d.getExistingFileInfo(ctx, fileHash, file.GetName(), dstDir.GetID())
|
||||
if err == nil {
|
||||
return existingFile, nil
|
||||
}
|
||||
}
|
||||
|
||||
var pollKey string
|
||||
|
||||
if checkResp.Response.ResumableUpload.AllUnitsReady != "yes" {
|
||||
|
||||
var err error
|
||||
|
||||
pollKey, err = d.uploadUnits(ctx, osFile, checkResp, file.GetName(), fileHash, dstDir.GetID(), up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
|
||||
pollKey = checkResp.Response.ResumableUpload.UploadKey
|
||||
}
|
||||
|
||||
//fmt.Printf("pollKey: %+v\n", pollKey)
|
||||
|
||||
pollResp, err := d.pollUpload(ctx, pollKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
quickKey := pollResp.Response.Doupload.QuickKey
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: quickKey,
|
||||
Name: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Mediafire) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Mediafire) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Mediafire) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||
// return errs.NotImplement to use an internal archive tool
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
//func (d *Mediafire) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*Mediafire)(nil)
|
||||
54
drivers/mediafire/meta.go
Normal file
54
drivers/mediafire/meta.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
*/
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
//driver.RootID
|
||||
|
||||
SessionToken string `json:"session_token" required:"true" type:"string" help:"Required for MediaFire API"`
|
||||
Cookie string `json:"cookie" required:"true" type:"string" help:"Required for navigation"`
|
||||
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"100"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "MediaFire",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Mediafire{
|
||||
appBase: "https://app.mediafire.com",
|
||||
apiBase: "https://www.mediafire.com/api/1.5",
|
||||
hostBase: "https://www.mediafire.com",
|
||||
maxRetries: 3,
|
||||
secChUa: "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"139\", \"Google Chrome\";v=\"139\"",
|
||||
secChUaPlatform: "Windows",
|
||||
userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
}
|
||||
})
|
||||
}
|
||||
232
drivers/mediafire/types.go
Normal file
232
drivers/mediafire/types.go
Normal file
@@ -0,0 +1,232 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
*/
|
||||
|
||||
type MediafireRenewTokenResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
SessionToken string `json:"session_token"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FolderContent struct {
|
||||
ChunkSize string `json:"chunk_size"`
|
||||
ContentType string `json:"content_type"`
|
||||
ChunkNumber string `json:"chunk_number"`
|
||||
FolderKey string `json:"folderkey"`
|
||||
Folders []MediafireFolder `json:"folders,omitempty"`
|
||||
Files []MediafireFile `json:"files,omitempty"`
|
||||
MoreChunks string `json:"more_chunks"`
|
||||
} `json:"folder_content"`
|
||||
Result string `json:"result"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFolder struct {
|
||||
FolderKey string `json:"folderkey"`
|
||||
Name string `json:"name"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
}
|
||||
|
||||
type MediafireFile struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
Filename string `json:"filename"`
|
||||
Size string `json:"size"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
MimeType string `json:"mimetype"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
ID string
|
||||
Name string
|
||||
Size int64
|
||||
CreatedUTC string
|
||||
IsFolder bool
|
||||
}
|
||||
|
||||
type FolderContentResponse struct {
|
||||
Folders []MediafireFolder
|
||||
Files []MediafireFile
|
||||
MoreChunks bool
|
||||
}
|
||||
|
||||
type MediafireLinksResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Links []struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
View string `json:"view"`
|
||||
NormalDownload string `json:"normal_download"`
|
||||
OneTime struct {
|
||||
Download string `json:"download"`
|
||||
View string `json:"view"`
|
||||
} `json:"one_time"`
|
||||
} `json:"links"`
|
||||
OneTimeKeyRequestCount string `json:"one_time_key_request_count"`
|
||||
OneTimeKeyRequestMaxCount string `json:"one_time_key_request_max_count"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireDirectDownloadResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Links []struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
DirectDownload string `json:"direct_download"`
|
||||
} `json:"links"`
|
||||
DirectDownloadFreeBandwidth string `json:"direct_download_free_bandwidth"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFolderCreateResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FolderKey string `json:"folder_key"`
|
||||
UploadKey string `json:"upload_key"`
|
||||
ParentFolderKey string `json:"parent_folderkey"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
Privacy string `json:"privacy"`
|
||||
FileCount string `json:"file_count"`
|
||||
FolderCount string `json:"folder_count"`
|
||||
Revision string `json:"revision"`
|
||||
DropboxEnabled string `json:"dropbox_enabled"`
|
||||
Flag string `json:"flag"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireMoveResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
NewNames []string `json:"new_names"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireRenameResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireCopyResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
NewQuickKeys []string `json:"new_quickkeys,omitempty"`
|
||||
NewFolderKeys []string `json:"new_folderkeys,omitempty"`
|
||||
SkippedCount string `json:"skipped_count,omitempty"`
|
||||
OtherCount string `json:"other_count,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireRemoveResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireCheckResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
HashExists string `json:"hash_exists"`
|
||||
InAccount string `json:"in_account"`
|
||||
InFolder string `json:"in_folder"`
|
||||
FileExists string `json:"file_exists"`
|
||||
ResumableUpload struct {
|
||||
AllUnitsReady string `json:"all_units_ready"`
|
||||
NumberOfUnits string `json:"number_of_units"`
|
||||
UnitSize string `json:"unit_size"`
|
||||
Bitmap struct {
|
||||
Count string `json:"count"`
|
||||
Words []string `json:"words"`
|
||||
} `json:"bitmap"`
|
||||
UploadKey string `json:"upload_key"`
|
||||
} `json:"resumable_upload"`
|
||||
AvailableSpace string `json:"available_space"`
|
||||
UsedStorageSize string `json:"used_storage_size"`
|
||||
StorageLimit string `json:"storage_limit"`
|
||||
StorageLimitExceeded string `json:"storage_limit_exceeded"`
|
||||
UploadURL struct {
|
||||
Simple string `json:"simple"`
|
||||
SimpleFallback string `json:"simple_fallback"`
|
||||
Resumable string `json:"resumable"`
|
||||
ResumableFallback string `json:"resumable_fallback"`
|
||||
} `json:"upload_url"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
type MediafireActionTokenResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
ActionToken string `json:"action_token"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafirePollResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Doupload struct {
|
||||
Result string `json:"result"`
|
||||
Status string `json:"status"`
|
||||
Description string `json:"description"`
|
||||
QuickKey string `json:"quickkey"`
|
||||
Hash string `json:"hash"`
|
||||
Filename string `json:"filename"`
|
||||
Size string `json:"size"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
Revision string `json:"revision"`
|
||||
} `json:"doupload"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFileSearchResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FileInfo []File `json:"file_info"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
626
drivers/mediafire/util.go
Normal file
626
drivers/mediafire/util.go
Normal file
@@ -0,0 +1,626 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
func (d *Mediafire) getSessionToken(ctx context.Context) (string, error) {
|
||||
tokenURL := d.hostBase + "/application/get_session_token.php"
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req.Header.Set("Accept", "*/*")
|
||||
req.Header.Set("Accept-Encoding", "gzip, deflate, br, zstd")
|
||||
req.Header.Set("Accept-Language", "en-US,en;q=0.9")
|
||||
req.Header.Set("Content-Length", "0")
|
||||
req.Header.Set("Cookie", d.Cookie)
|
||||
req.Header.Set("DNT", "1")
|
||||
req.Header.Set("Origin", d.hostBase)
|
||||
req.Header.Set("Priority", "u=1, i")
|
||||
req.Header.Set("Referer", (d.hostBase + "/"))
|
||||
req.Header.Set("Sec-Ch-Ua", d.secChUa)
|
||||
req.Header.Set("Sec-Ch-Ua-Mobile", "?0")
|
||||
req.Header.Set("Sec-Ch-Ua-Platform", d.secChUaPlatform)
|
||||
req.Header.Set("Sec-Fetch-Dest", "empty")
|
||||
req.Header.Set("Sec-Fetch-Mode", "cors")
|
||||
req.Header.Set("Sec-Fetch-Site", "same-site")
|
||||
req.Header.Set("User-Agent", d.userAgent)
|
||||
//req.Header.Set("Connection", "keep-alive")
|
||||
|
||||
resp, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
//fmt.Printf("getSessionToken :: Raw response: %s\n", string(body))
|
||||
//fmt.Printf("getSessionToken :: Parsed response: %+v\n", resp)
|
||||
|
||||
var tokenResp struct {
|
||||
Response struct {
|
||||
SessionToken string `json:"session_token"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
if resp.StatusCode == 200 {
|
||||
if err := json.Unmarshal(body, &tokenResp); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if tokenResp.Response.SessionToken == "" {
|
||||
return "", fmt.Errorf("empty session token received")
|
||||
}
|
||||
|
||||
cookieMap := make(map[string]string)
|
||||
for _, cookie := range resp.Cookies() {
|
||||
cookieMap[cookie.Name] = cookie.Value
|
||||
}
|
||||
|
||||
if len(cookieMap) > 0 {
|
||||
|
||||
var cookies []string
|
||||
for name, value := range cookieMap {
|
||||
cookies = append(cookies, fmt.Sprintf("%s=%s", name, value))
|
||||
}
|
||||
d.Cookie = strings.Join(cookies, "; ")
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
//fmt.Printf("getSessionToken :: Captured cookies: %s\n", d.Cookie)
|
||||
}
|
||||
|
||||
} else {
|
||||
return "", fmt.Errorf("getSessionToken :: failed to get session token, status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
d.SessionToken = tokenResp.Response.SessionToken
|
||||
|
||||
//fmt.Printf("Init :: Obtain Session Token %v", d.SessionToken)
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
return d.SessionToken, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) renewToken(_ context.Context) error {
|
||||
query := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireRenewTokenResponse
|
||||
_, err := d.postForm("/user/renew_session_token.php", query, &resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to renew token: %w", err)
|
||||
}
|
||||
|
||||
//fmt.Printf("getInfo :: Raw response: %s\n", string(body))
|
||||
//fmt.Printf("getInfo :: Parsed response: %+v\n", resp)
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return fmt.Errorf("MediaFire token renewal failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
d.SessionToken = resp.Response.SessionToken
|
||||
|
||||
//fmt.Printf("Init :: Renew Session Token: %s", resp.Response.Result)
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFiles(ctx context.Context, folderKey string) ([]File, error) {
|
||||
files := make([]File, 0)
|
||||
hasMore := true
|
||||
chunkNumber := 1
|
||||
|
||||
for hasMore {
|
||||
resp, err := d.getFolderContent(ctx, folderKey, chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, folder := range resp.Folders {
|
||||
files = append(files, File{
|
||||
ID: folder.FolderKey,
|
||||
Name: folder.Name,
|
||||
Size: 0,
|
||||
CreatedUTC: folder.CreatedUTC,
|
||||
IsFolder: true,
|
||||
})
|
||||
}
|
||||
|
||||
for _, file := range resp.Files {
|
||||
size, _ := strconv.ParseInt(file.Size, 10, 64)
|
||||
files = append(files, File{
|
||||
ID: file.QuickKey,
|
||||
Name: file.Filename,
|
||||
Size: size,
|
||||
CreatedUTC: file.CreatedUTC,
|
||||
IsFolder: false,
|
||||
})
|
||||
}
|
||||
|
||||
hasMore = resp.MoreChunks
|
||||
chunkNumber++
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFolderContent(ctx context.Context, folderKey string, chunkNumber int) (*FolderContentResponse, error) {
|
||||
|
||||
foldersResp, err := d.getFolderContentByType(ctx, folderKey, "folders", chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filesResp, err := d.getFolderContentByType(ctx, folderKey, "files", chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FolderContentResponse{
|
||||
Folders: foldersResp.Response.FolderContent.Folders,
|
||||
Files: filesResp.Response.FolderContent.Files,
|
||||
MoreChunks: foldersResp.Response.FolderContent.MoreChunks == "yes" || filesResp.Response.FolderContent.MoreChunks == "yes",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFolderContentByType(_ context.Context, folderKey, contentType string, chunkNumber int) (*MediafireResponse, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": folderKey,
|
||||
"content_type": contentType,
|
||||
"chunk": strconv.Itoa(chunkNumber),
|
||||
"chunk_size": strconv.FormatInt(d.ChunkSize, 10),
|
||||
"details": "yes",
|
||||
"order_direction": d.OrderDirection,
|
||||
"order_by": d.OrderBy,
|
||||
"filter": "",
|
||||
}
|
||||
|
||||
var resp MediafireResponse
|
||||
_, err := d.postForm("/folder/get_content.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) fileToObj(f File) *model.ObjThumb {
|
||||
created, _ := time.Parse("2006-01-02T15:04:05Z", f.CreatedUTC)
|
||||
|
||||
var thumbnailURL string
|
||||
if !f.IsFolder && f.ID != "" {
|
||||
thumbnailURL = d.hostBase + "/convkey/acaa/" + f.ID + "3g.jpg"
|
||||
}
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: f.ID,
|
||||
//Path: "",
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Modified: created,
|
||||
Ctime: created,
|
||||
IsFolder: f.IsFolder,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumbnailURL,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Mediafire) getForm(endpoint string, query map[string]string, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
|
||||
req.SetQueryParams(query)
|
||||
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": d.Cookie,
|
||||
//"User-Agent": base.UserAgent,
|
||||
"User-Agent": d.userAgent,
|
||||
"Origin": d.appBase,
|
||||
"Referer": d.appBase + "/",
|
||||
})
|
||||
|
||||
// If response OK
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
|
||||
// Targets MediaFire API
|
||||
res, err := req.Get(d.apiBase + endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) postForm(endpoint string, data map[string]string, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
|
||||
req.SetFormData(data)
|
||||
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": d.Cookie,
|
||||
"Content-Type": "application/x-www-form-urlencoded",
|
||||
//"User-Agent": base.UserAgent,
|
||||
"User-Agent": d.userAgent,
|
||||
"Origin": d.appBase,
|
||||
"Referer": d.appBase + "/",
|
||||
})
|
||||
|
||||
// If response OK
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
|
||||
// Targets MediaFire API
|
||||
res, err := req.Post(d.apiBase + endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getDirectDownloadLink(_ context.Context, fileID string) (string, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"quick_key": fileID,
|
||||
"link_type": "direct_download",
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireDirectDownloadResponse
|
||||
_, err := d.getForm("/file/get_links.php", data, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return "", fmt.Errorf("MediaFire API error: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
if len(resp.Response.Links) == 0 {
|
||||
return "", fmt.Errorf("no download links found")
|
||||
}
|
||||
|
||||
return resp.Response.Links[0].DirectDownload, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) calculateSHA256(file *os.File) (string, error) {
|
||||
hasher := sha256.New()
|
||||
if _, err := file.Seek(0, 0); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := io.Copy(hasher, file); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.EncodeToString(hasher.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) uploadCheck(ctx context.Context, filename string, filesize int64, filehash, folderKey string) (*MediafireCheckResponse, error) {
|
||||
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get action token: %w", err)
|
||||
}
|
||||
|
||||
query := map[string]string{
|
||||
"session_token": actionToken, /* d.SessionToken */
|
||||
"filename": filename,
|
||||
"size": strconv.FormatInt(filesize, 10),
|
||||
"hash": filehash,
|
||||
"folder_key": folderKey,
|
||||
"resumable": "yes",
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireCheckResponse
|
||||
_, err = d.postForm("/upload/check.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//fmt.Printf("uploadCheck :: Raw response: %s\n", string(body))
|
||||
//fmt.Printf("uploadCheck :: Parsed response: %+v\n", resp)
|
||||
|
||||
//fmt.Printf("uploadCheck :: ResumableUpload section: %+v\n", resp.Response.ResumableUpload)
|
||||
//fmt.Printf("uploadCheck :: Upload key specifically: '%s'\n", resp.Response.ResumableUpload.UploadKey)
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire upload check failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) resumableUpload(ctx context.Context, folderKey, uploadKey string, unitData []byte, unitID int, fileHash, filename string, totalFileSize int64) (string, error) {
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
url := d.apiBase + "/upload/resumable.php"
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(unitData))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
q := req.URL.Query()
|
||||
q.Add("folder_key", folderKey)
|
||||
q.Add("response_format", "json")
|
||||
q.Add("session_token", actionToken)
|
||||
q.Add("key", uploadKey)
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
req.Header.Set("x-filehash", fileHash)
|
||||
req.Header.Set("x-filesize", strconv.FormatInt(totalFileSize, 10))
|
||||
req.Header.Set("x-unit-id", strconv.Itoa(unitID))
|
||||
req.Header.Set("x-unit-size", strconv.FormatInt(int64(len(unitData)), 10))
|
||||
req.Header.Set("x-unit-hash", d.sha256Hex(bytes.NewReader(unitData)))
|
||||
req.Header.Set("x-filename", filename)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.ContentLength = int64(len(unitData))
|
||||
|
||||
/* fmt.Printf("Debug resumable upload request:\n")
|
||||
fmt.Printf(" URL: %s\n", req.URL.String())
|
||||
fmt.Printf(" Headers: %+v\n", req.Header)
|
||||
fmt.Printf(" Unit ID: %d\n", unitID)
|
||||
fmt.Printf(" Unit Size: %d\n", len(unitData))
|
||||
fmt.Printf(" Upload Key: %s\n", uploadKey)
|
||||
fmt.Printf(" Action Token: %s\n", actionToken) */
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read response body: %v", err)
|
||||
}
|
||||
|
||||
//fmt.Printf("MediaFire resumable upload response (status %d): %s\n", res.StatusCode, string(body))
|
||||
|
||||
var uploadResp struct {
|
||||
Response struct {
|
||||
Doupload struct {
|
||||
Key string `json:"key"`
|
||||
} `json:"doupload"`
|
||||
Result string `json:"result"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &uploadResp); err != nil {
|
||||
return "", fmt.Errorf("failed to parse response: %v", err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return "", fmt.Errorf("resumable upload failed with status %d", res.StatusCode)
|
||||
}
|
||||
|
||||
return uploadResp.Response.Doupload.Key, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) uploadUnits(ctx context.Context, file *os.File, checkResp *MediafireCheckResponse, filename, fileHash, folderKey string, up driver.UpdateProgress) (string, error) {
|
||||
unitSize, _ := strconv.ParseInt(checkResp.Response.ResumableUpload.UnitSize, 10, 64)
|
||||
numUnits, _ := strconv.Atoi(checkResp.Response.ResumableUpload.NumberOfUnits)
|
||||
uploadKey := checkResp.Response.ResumableUpload.UploadKey
|
||||
|
||||
stringWords := checkResp.Response.ResumableUpload.Bitmap.Words
|
||||
intWords := make([]int, len(stringWords))
|
||||
for i, word := range stringWords {
|
||||
intWords[i], _ = strconv.Atoi(word)
|
||||
}
|
||||
|
||||
var finalUploadKey string
|
||||
|
||||
for unitID := 0; unitID < numUnits; unitID++ {
|
||||
|
||||
if utils.IsCanceled(ctx) {
|
||||
return "", ctx.Err()
|
||||
}
|
||||
|
||||
if d.isUnitUploaded(intWords, unitID) {
|
||||
up(float64(unitID+1) * 100 / float64(numUnits))
|
||||
continue
|
||||
}
|
||||
|
||||
uploadKey, err := d.uploadSingleUnit(ctx, file, unitID, unitSize, fileHash, filename, uploadKey, folderKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
finalUploadKey = uploadKey
|
||||
|
||||
up(float64(unitID+1) * 100 / float64(numUnits))
|
||||
}
|
||||
|
||||
return finalUploadKey, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) uploadSingleUnit(ctx context.Context, file *os.File, unitID int, unitSize int64, fileHash, filename, uploadKey, folderKey string) (string, error) {
|
||||
start := int64(unitID) * unitSize
|
||||
size := unitSize
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
fileSize := stat.Size()
|
||||
|
||||
if start+size > fileSize {
|
||||
size = fileSize - start
|
||||
}
|
||||
|
||||
unitData := make([]byte, size)
|
||||
if _, err := file.ReadAt(unitData, start); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return d.resumableUpload(ctx, folderKey, uploadKey, unitData, unitID, fileHash, filename, fileSize)
|
||||
}
|
||||
|
||||
func (d *Mediafire) getActionToken(_ context.Context) (string, error) {
|
||||
|
||||
if d.actionToken != "" {
|
||||
return d.actionToken, nil
|
||||
}
|
||||
|
||||
data := map[string]string{
|
||||
"type": "upload",
|
||||
"lifespan": "1440",
|
||||
"response_format": "json",
|
||||
"session_token": d.SessionToken,
|
||||
}
|
||||
|
||||
var resp MediafireActionTokenResponse
|
||||
_, err := d.postForm("/user/get_action_token.php", data, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return "", fmt.Errorf("MediaFire action token failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return resp.Response.ActionToken, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) pollUpload(ctx context.Context, key string) (*MediafirePollResponse, error) {
|
||||
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get action token: %w", err)
|
||||
}
|
||||
|
||||
//fmt.Printf("Debug Key: %+v\n", key)
|
||||
|
||||
query := map[string]string{
|
||||
"key": key,
|
||||
"response_format": "json",
|
||||
"session_token": actionToken, /* d.SessionToken */
|
||||
}
|
||||
|
||||
var resp MediafirePollResponse
|
||||
_, err = d.postForm("/upload/poll_upload.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//fmt.Printf("pollUpload :: Raw response: %s\n", string(body))
|
||||
//fmt.Printf("pollUpload :: Parsed response: %+v\n", resp)
|
||||
|
||||
//fmt.Printf("pollUpload :: Debug Result: %+v\n", resp.Response.Result)
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire poll upload failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) sha256Hex(r io.Reader) string {
|
||||
h := sha256.New()
|
||||
io.Copy(h, r)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func (d *Mediafire) isUnitUploaded(words []int, unitID int) bool {
|
||||
wordIndex := unitID / 16
|
||||
bitIndex := unitID % 16
|
||||
if wordIndex >= len(words) {
|
||||
return false
|
||||
}
|
||||
return (words[wordIndex]>>bitIndex)&1 == 1
|
||||
}
|
||||
|
||||
func (d *Mediafire) getExistingFileInfo(ctx context.Context, fileHash, filename, folderKey string) (*model.ObjThumb, error) {
|
||||
|
||||
if fileInfo, err := d.getFileByHash(ctx, fileHash); err == nil && fileInfo != nil {
|
||||
return fileInfo, nil
|
||||
}
|
||||
|
||||
files, err := d.getFiles(ctx, folderKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if file.Name == filename && !file.IsFolder {
|
||||
return d.fileToObj(file), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("existing file not found")
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFileByHash(_ context.Context, hash string) (*model.ObjThumb, error) {
|
||||
query := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"hash": hash,
|
||||
}
|
||||
|
||||
var resp MediafireFileSearchResponse
|
||||
_, err := d.postForm("/file/get_info.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire file search failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
if len(resp.Response.FileInfo) == 0 {
|
||||
return nil, fmt.Errorf("file not found by hash")
|
||||
}
|
||||
|
||||
file := resp.Response.FileInfo[0]
|
||||
return d.fileToObj(file), nil
|
||||
}
|
||||
@@ -9,8 +9,9 @@ type Addition struct {
|
||||
AccessToken string `json:"access_token" required:"true"`
|
||||
ProjectID string `json:"project_id"`
|
||||
driver.RootID
|
||||
OrderBy string `json:"order_by" type:"select" options:"updated_at,title,size" default:"title"`
|
||||
OrderDesc bool `json:"order_desc"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"updated_at,title,size" default:"title"`
|
||||
OrderDesc bool `json:"order_desc"`
|
||||
DeviceFingerprint string `json:"device_fingerprint" required:"true"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
||||
@@ -17,6 +17,9 @@ import (
|
||||
func (d *MediaTrack) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||
if d.DeviceFingerprint != "" {
|
||||
req.SetHeader("X-Device-Fingerprint", d.DeviceFingerprint)
|
||||
}
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ type Addition struct {
|
||||
IsSharepoint bool `json:"is_sharepoint"`
|
||||
ClientID string `json:"client_id" required:"true"`
|
||||
ClientSecret string `json:"client_secret" required:"true"`
|
||||
RedirectUri string `json:"redirect_uri" required:"true" default:"https://alist.nn.ci/tool/onedrive/callback"`
|
||||
RedirectUri string `json:"redirect_uri" required:"true" default:"https://alistgo.com/tool/onedrive/callback"`
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
SiteId string `json:"site_id"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@@ -17,7 +18,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var onedriveHostMap = map[string]Host{
|
||||
@@ -204,19 +204,18 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
var finish int64 = 0
|
||||
DEFAULT := d.ChunkSize * 1024 * 1024
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
log.Debugf("upload: %d", finish)
|
||||
var byteSize int64 = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[Onedrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
log.Debug(err, n)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -228,19 +227,31 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
switch {
|
||||
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
res.Body.Close()
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[Onedrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
default:
|
||||
res.Body.Close()
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
res.Body.Close()
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@@ -17,7 +18,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var onedriveHostMap = map[string]Host{
|
||||
@@ -154,19 +154,18 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
|
||||
var finish int64 = 0
|
||||
DEFAULT := d.ChunkSize * 1024 * 1024
|
||||
retryCount := 0
|
||||
maxRetries := 3
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
log.Debugf("upload: %d", finish)
|
||||
var byteSize int64 = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteSize := min(left, DEFAULT)
|
||||
utils.Log.Debugf("[OnedriveAPP] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
log.Debug(err, n)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -178,19 +177,31 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
switch {
|
||||
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||
retryCount++
|
||||
if retryCount > maxRetries {
|
||||
res.Body.Close()
|
||||
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||
}
|
||||
backoff := time.Duration(1<<retryCount) * time.Second
|
||||
utils.Log.Warnf("[OnedriveAPP] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||
time.Sleep(backoff)
|
||||
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
default:
|
||||
res.Body.Close()
|
||||
retryCount = 0
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
res.Body.Close()
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
189
drivers/pcloud/driver.go
Normal file
189
drivers/pcloud/driver.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package pcloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type PCloud struct {
|
||||
model.Storage
|
||||
Addition
|
||||
AccessToken string // Actual access token obtained from refresh token
|
||||
}
|
||||
|
||||
func (d *PCloud) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *PCloud) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *PCloud) Init(ctx context.Context) error {
|
||||
// Map hostname selection to actual API endpoints
|
||||
if d.Hostname == "us" {
|
||||
d.Hostname = "api.pcloud.com"
|
||||
} else if d.Hostname == "eu" {
|
||||
d.Hostname = "eapi.pcloud.com"
|
||||
}
|
||||
|
||||
// Set default root folder ID if not provided
|
||||
if d.RootFolderID == "" {
|
||||
d.RootFolderID = "d0"
|
||||
}
|
||||
|
||||
// Use the access token directly (like rclone)
|
||||
d.AccessToken = d.RefreshToken // RefreshToken field actually contains the access_token
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *PCloud) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *PCloud) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
folderID := d.RootFolderID
|
||||
if dir.GetID() != "" {
|
||||
folderID = dir.GetID()
|
||||
}
|
||||
|
||||
files, err := d.getFiles(folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return utils.SliceConvert(files, func(src FileObject) (model.Obj, error) {
|
||||
return fileToObj(src), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *PCloud) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
downloadURL, err := d.getDownloadLink(file.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: downloadURL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Mkdir implements driver.Mkdir
|
||||
func (d *PCloud) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
parentID := d.RootFolderID
|
||||
if parentDir.GetID() != "" {
|
||||
parentID = parentDir.GetID()
|
||||
}
|
||||
|
||||
return d.createFolder(parentID, dirName)
|
||||
}
|
||||
|
||||
// Move implements driver.Move
|
||||
func (d *PCloud) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// pCloud uses renamefile/renamefolder for both rename and move
|
||||
endpoint := "/renamefile"
|
||||
paramName := "fileid"
|
||||
|
||||
if srcObj.IsDir() {
|
||||
endpoint = "/renamefolder"
|
||||
paramName = "folderid"
|
||||
}
|
||||
|
||||
var resp ItemResult
|
||||
_, err := d.requestWithRetry(endpoint, "POST", func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
paramName: extractID(srcObj.GetID()),
|
||||
"tofolderid": extractID(dstDir.GetID()),
|
||||
"toname": srcObj.GetName(),
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rename implements driver.Rename
|
||||
func (d *PCloud) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
endpoint := "/renamefile"
|
||||
paramName := "fileid"
|
||||
|
||||
if srcObj.IsDir() {
|
||||
endpoint = "/renamefolder"
|
||||
paramName = "folderid"
|
||||
}
|
||||
|
||||
var resp ItemResult
|
||||
_, err := d.requestWithRetry(endpoint, "POST", func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
paramName: extractID(srcObj.GetID()),
|
||||
"toname": newName,
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy implements driver.Copy
|
||||
func (d *PCloud) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
endpoint := "/copyfile"
|
||||
paramName := "fileid"
|
||||
|
||||
if srcObj.IsDir() {
|
||||
endpoint = "/copyfolder"
|
||||
paramName = "folderid"
|
||||
}
|
||||
|
||||
var resp ItemResult
|
||||
_, err := d.requestWithRetry(endpoint, "POST", func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
paramName: extractID(srcObj.GetID()),
|
||||
"tofolderid": extractID(dstDir.GetID()),
|
||||
"toname": srcObj.GetName(),
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove implements driver.Remove
|
||||
func (d *PCloud) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return d.delete(obj.GetID(), obj.IsDir())
|
||||
}
|
||||
|
||||
// Put implements driver.Put
|
||||
func (d *PCloud) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
parentID := d.RootFolderID
|
||||
if dstDir.GetID() != "" {
|
||||
parentID = dstDir.GetID()
|
||||
}
|
||||
|
||||
return d.uploadFile(ctx, stream, parentID, stream.GetName(), stream.GetSize())
|
||||
}
|
||||
30
drivers/pcloud/meta.go
Normal file
30
drivers/pcloud/meta.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package pcloud
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Using json tag "access_token" for UI display, but internally it's a refresh token
|
||||
RefreshToken string `json:"access_token" required:"true" help:"OAuth token from pCloud authorization"`
|
||||
Hostname string `json:"hostname" type:"select" options:"us,eu" default:"us" help:"Select pCloud server region"`
|
||||
RootFolderID string `json:"root_folder_id" help:"Get folder ID from URL like https://my.pcloud.com/#/filemanager?folder=12345678901 (leave empty for root folder)"`
|
||||
ClientID string `json:"client_id" help:"Custom OAuth client ID (optional)"`
|
||||
ClientSecret string `json:"client_secret" help:"Custom OAuth client secret (optional)"`
|
||||
}
|
||||
|
||||
// Implement IRootId interface
|
||||
func (a Addition) GetRootId() string {
|
||||
return a.RootFolderID
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "pCloud",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &PCloud{}
|
||||
})
|
||||
}
|
||||
91
drivers/pcloud/types.go
Normal file
91
drivers/pcloud/types.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package pcloud
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
// ErrorResult represents a pCloud API error response
|
||||
type ErrorResult struct {
|
||||
Result int `json:"result"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// TokenResponse represents OAuth token response
|
||||
type TokenResponse struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
}
|
||||
|
||||
// ItemResult represents a common pCloud API response
|
||||
type ItemResult struct {
|
||||
Result int `json:"result"`
|
||||
Metadata *FolderMeta `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// FolderMeta contains folder metadata including contents
|
||||
type FolderMeta struct {
|
||||
Contents []FileObject `json:"contents,omitempty"`
|
||||
}
|
||||
|
||||
// DownloadLinkResult represents download link response
|
||||
type DownloadLinkResult struct {
|
||||
Result int `json:"result"`
|
||||
Hosts []string `json:"hosts"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// FileObject represents a file or folder object in pCloud
|
||||
type FileObject struct {
|
||||
Name string `json:"name"`
|
||||
Created string `json:"created"` // pCloud returns RFC1123 format string
|
||||
Modified string `json:"modified"` // pCloud returns RFC1123 format string
|
||||
IsFolder bool `json:"isfolder"`
|
||||
FolderID uint64 `json:"folderid,omitempty"`
|
||||
FileID uint64 `json:"fileid,omitempty"`
|
||||
Size uint64 `json:"size"`
|
||||
ParentID uint64 `json:"parentfolderid"`
|
||||
Icon string `json:"icon,omitempty"`
|
||||
Hash uint64 `json:"hash,omitempty"`
|
||||
Category int `json:"category,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
// Convert FileObject to model.Obj
|
||||
func fileToObj(f FileObject) model.Obj {
|
||||
// Parse RFC1123 format time from pCloud
|
||||
modTime, _ := time.Parse(time.RFC1123, f.Modified)
|
||||
|
||||
obj := model.Object{
|
||||
Name: f.Name,
|
||||
Size: int64(f.Size),
|
||||
Modified: modTime,
|
||||
IsFolder: f.IsFolder,
|
||||
}
|
||||
|
||||
if f.IsFolder {
|
||||
obj.ID = "d" + strconv.FormatUint(f.FolderID, 10)
|
||||
} else {
|
||||
obj.ID = "f" + strconv.FormatUint(f.FileID, 10)
|
||||
}
|
||||
|
||||
return &obj
|
||||
}
|
||||
|
||||
// Extract numeric ID from string ID (remove 'd' or 'f' prefix)
|
||||
func extractID(id string) string {
|
||||
if len(id) > 1 && (id[0] == 'd' || id[0] == 'f') {
|
||||
return id[1:]
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Get folder ID from path, return "0" for root
|
||||
func getFolderID(path string) string {
|
||||
if path == "/" || path == "" {
|
||||
return "0"
|
||||
}
|
||||
return extractID(path)
|
||||
}
|
||||
297
drivers/pcloud/util.go
Normal file
297
drivers/pcloud/util.go
Normal file
@@ -0,0 +1,297 @@
|
||||
package pcloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultClientID = "DnONSzyJXpm"
|
||||
defaultClientSecret = "VKEnd3ze4jsKFGg8TJiznwFG8"
|
||||
)
|
||||
|
||||
// Get API base URL
|
||||
func (d *PCloud) getAPIURL() string {
|
||||
return "https://" + d.Hostname
|
||||
}
|
||||
|
||||
// Get OAuth client credentials
|
||||
func (d *PCloud) getClientCredentials() (string, string) {
|
||||
clientID := d.ClientID
|
||||
clientSecret := d.ClientSecret
|
||||
|
||||
if clientID == "" {
|
||||
clientID = defaultClientID
|
||||
}
|
||||
if clientSecret == "" {
|
||||
clientSecret = defaultClientSecret
|
||||
}
|
||||
|
||||
return clientID, clientSecret
|
||||
}
|
||||
|
||||
// Refresh OAuth access token
|
||||
func (d *PCloud) refreshToken() error {
|
||||
clientID, clientSecret := d.getClientCredentials()
|
||||
|
||||
var resp TokenResponse
|
||||
_, err := base.RestyClient.R().
|
||||
SetFormData(map[string]string{
|
||||
"client_id": clientID,
|
||||
"client_secret": clientSecret,
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": d.RefreshToken,
|
||||
}).
|
||||
SetResult(&resp).
|
||||
Post(d.getAPIURL() + "/oauth2_token")
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.AccessToken = resp.AccessToken
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldRetry determines if an error should be retried based on pCloud-specific logic
|
||||
func (d *PCloud) shouldRetry(statusCode int, apiError *ErrorResult) bool {
|
||||
// HTTP-level retry conditions
|
||||
if statusCode == 429 || statusCode >= 500 {
|
||||
return true
|
||||
}
|
||||
|
||||
// pCloud API-specific retry conditions (like rclone)
|
||||
if apiError != nil && apiError.Result != 0 {
|
||||
// 4xxx: rate limiting
|
||||
if apiError.Result/1000 == 4 {
|
||||
return true
|
||||
}
|
||||
// 5xxx: internal errors
|
||||
if apiError.Result/1000 == 5 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// requestWithRetry makes authenticated API request with retry logic
|
||||
func (d *PCloud) requestWithRetry(endpoint string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
maxRetries := 3
|
||||
baseDelay := 500 * time.Millisecond
|
||||
|
||||
for attempt := 0; attempt <= maxRetries; attempt++ {
|
||||
body, err := d.request(endpoint, method, callback, resp)
|
||||
if err == nil {
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// If this is the last attempt, return the error
|
||||
if attempt == maxRetries {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if we should retry based on error type
|
||||
if !d.shouldRetryError(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Exponential backoff
|
||||
delay := baseDelay * time.Duration(1<<attempt)
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("max retries exceeded")
|
||||
}
|
||||
|
||||
// shouldRetryError checks if an error should trigger a retry
|
||||
func (d *PCloud) shouldRetryError(err error) bool {
|
||||
// For now, we'll retry on any error
|
||||
// In production, you'd want more specific error handling
|
||||
return true
|
||||
}
|
||||
|
||||
// Make authenticated API request
|
||||
func (d *PCloud) request(endpoint string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
|
||||
// Add access token as query parameter (pCloud doesn't use Bearer auth)
|
||||
req.SetQueryParam("access_token", d.AccessToken)
|
||||
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
|
||||
var res *resty.Response
|
||||
var err error
|
||||
|
||||
switch method {
|
||||
case http.MethodGet:
|
||||
res, err = req.Get(d.getAPIURL() + endpoint)
|
||||
case http.MethodPost:
|
||||
res, err = req.Post(d.getAPIURL() + endpoint)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported method: %s", method)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check for API errors with pCloud-specific logic
|
||||
if res.StatusCode() != 200 {
|
||||
var errResp ErrorResult
|
||||
if err := utils.Json.Unmarshal(res.Body(), &errResp); err == nil {
|
||||
// Check if this error should trigger a retry
|
||||
if d.shouldRetry(res.StatusCode(), &errResp) {
|
||||
return nil, fmt.Errorf("pCloud API error (retryable): %s (result: %d)", errResp.Error, errResp.Result)
|
||||
}
|
||||
return nil, fmt.Errorf("pCloud API error: %s (result: %d)", errResp.Error, errResp.Result)
|
||||
}
|
||||
return nil, fmt.Errorf("HTTP error: %d", res.StatusCode())
|
||||
}
|
||||
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
// List files in a folder
|
||||
func (d *PCloud) getFiles(folderID string) ([]FileObject, error) {
|
||||
var resp ItemResult
|
||||
_, err := d.requestWithRetry("/listfolder", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParam("folderid", extractID(folderID))
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return nil, fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
if resp.Metadata == nil {
|
||||
return []FileObject{}, nil
|
||||
}
|
||||
|
||||
return resp.Metadata.Contents, nil
|
||||
}
|
||||
|
||||
// Get download link for a file
|
||||
func (d *PCloud) getDownloadLink(fileID string) (string, error) {
|
||||
var resp DownloadLinkResult
|
||||
_, err := d.requestWithRetry("/getfilelink", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParam("fileid", extractID(fileID))
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return "", fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
if len(resp.Hosts) == 0 {
|
||||
return "", fmt.Errorf("no download hosts available")
|
||||
}
|
||||
|
||||
return "https://" + resp.Hosts[0] + resp.Path, nil
|
||||
}
|
||||
|
||||
// Create a folder
|
||||
func (d *PCloud) createFolder(parentID, name string) error {
|
||||
var resp ItemResult
|
||||
_, err := d.requestWithRetry("/createfolder", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"folderid": extractID(parentID),
|
||||
"name": name,
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete a file or folder
|
||||
func (d *PCloud) delete(objID string, isFolder bool) error {
|
||||
endpoint := "/deletefile"
|
||||
paramName := "fileid"
|
||||
|
||||
if isFolder {
|
||||
endpoint = "/deletefolderrecursive"
|
||||
paramName = "folderid"
|
||||
}
|
||||
|
||||
var resp ItemResult
|
||||
_, err := d.requestWithRetry(endpoint, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
paramName: extractID(objID),
|
||||
})
|
||||
}, &resp)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return fmt.Errorf("pCloud error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upload a file using direct /uploadfile endpoint like rclone
|
||||
func (d *PCloud) uploadFile(ctx context.Context, file io.Reader, parentID, name string, size int64) error {
|
||||
// pCloud requires Content-Length, so we need to know the size
|
||||
if size <= 0 {
|
||||
return fmt.Errorf("file size must be provided for pCloud upload")
|
||||
}
|
||||
|
||||
// Upload directly to /uploadfile endpoint like rclone
|
||||
var resp ItemResult
|
||||
req := base.RestyClient.R().
|
||||
SetQueryParam("access_token", d.AccessToken).
|
||||
SetHeader("Content-Length", strconv.FormatInt(size, 10)).
|
||||
SetFileReader("content", name, file).
|
||||
SetFormData(map[string]string{
|
||||
"filename": name,
|
||||
"folderid": extractID(parentID),
|
||||
"nopartial": "1",
|
||||
})
|
||||
|
||||
// Use PUT method like rclone
|
||||
res, err := req.Put(d.getAPIURL() + "/uploadfile")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse response
|
||||
if err := utils.Json.Unmarshal(res.Body(), &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Result != 0 {
|
||||
return fmt.Errorf("pCloud upload error: result code %d", resp.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
418
drivers/proton_drive/driver.go
Normal file
418
drivers/proton_drive/driver.go
Normal file
@@ -0,0 +1,418 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
proton_api_bridge "github.com/henrybear327/Proton-API-Bridge"
|
||||
"github.com/henrybear327/Proton-API-Bridge/common"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
)
|
||||
|
||||
type ProtonDrive struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
protonDrive *proton_api_bridge.ProtonDrive
|
||||
credentials *common.ProtonDriveCredential
|
||||
|
||||
apiBase string
|
||||
appVersion string
|
||||
protonJson string
|
||||
userAgent string
|
||||
sdkVersion string
|
||||
webDriveAV string
|
||||
|
||||
tempServer *http.Server
|
||||
tempServerPort int
|
||||
downloadTokens map[string]*downloadInfo
|
||||
tokenMutex sync.RWMutex
|
||||
|
||||
c *proton.Client
|
||||
//m *proton.Manager
|
||||
|
||||
credentialCacheFile string
|
||||
|
||||
//userKR *crypto.KeyRing
|
||||
addrKRs map[string]*crypto.KeyRing
|
||||
addrData map[string]proton.Address
|
||||
|
||||
MainShare *proton.Share
|
||||
RootLink *proton.Link
|
||||
|
||||
DefaultAddrKR *crypto.KeyRing
|
||||
MainShareKR *crypto.KeyRing
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Init(ctx context.Context) error {
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Printf("ProtonDrive initialization panic: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
if d.Username == "" {
|
||||
return fmt.Errorf("username is required")
|
||||
}
|
||||
if d.Password == "" {
|
||||
return fmt.Errorf("password is required")
|
||||
}
|
||||
|
||||
//fmt.Printf("ProtonDrive Init: Username=%s, TwoFACode=%s", d.Username, d.TwoFACode)
|
||||
|
||||
if ctx == nil {
|
||||
return fmt.Errorf("context cannot be nil")
|
||||
}
|
||||
|
||||
cachedCredentials, err := d.loadCachedCredentials()
|
||||
useReusableLogin := false
|
||||
var reusableCredential *common.ReusableCredentialData
|
||||
|
||||
if err == nil && cachedCredentials != nil &&
|
||||
cachedCredentials.UID != "" && cachedCredentials.AccessToken != "" &&
|
||||
cachedCredentials.RefreshToken != "" && cachedCredentials.SaltedKeyPass != "" {
|
||||
useReusableLogin = true
|
||||
reusableCredential = cachedCredentials
|
||||
} else {
|
||||
useReusableLogin = false
|
||||
reusableCredential = &common.ReusableCredentialData{}
|
||||
}
|
||||
|
||||
config := &common.Config{
|
||||
AppVersion: d.appVersion,
|
||||
UserAgent: d.userAgent,
|
||||
FirstLoginCredential: &common.FirstLoginCredentialData{
|
||||
Username: d.Username,
|
||||
Password: d.Password,
|
||||
TwoFA: d.TwoFACode,
|
||||
},
|
||||
EnableCaching: true,
|
||||
ConcurrentBlockUploadCount: 5,
|
||||
ConcurrentFileCryptoCount: 2,
|
||||
UseReusableLogin: false,
|
||||
ReplaceExistingDraft: true,
|
||||
ReusableCredential: reusableCredential,
|
||||
CredentialCacheFile: d.credentialCacheFile,
|
||||
}
|
||||
|
||||
if config.FirstLoginCredential == nil {
|
||||
return fmt.Errorf("failed to create login credentials, FirstLoginCredential cannot be nil")
|
||||
}
|
||||
|
||||
//fmt.Printf("Calling NewProtonDrive...")
|
||||
|
||||
protonDrive, credentials, err := proton_api_bridge.NewProtonDrive(
|
||||
ctx,
|
||||
config,
|
||||
func(auth proton.Auth) {},
|
||||
func() {},
|
||||
)
|
||||
|
||||
if credentials == nil && !useReusableLogin {
|
||||
return fmt.Errorf("failed to get credentials from NewProtonDrive")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize ProtonDrive: %w", err)
|
||||
}
|
||||
|
||||
d.protonDrive = protonDrive
|
||||
|
||||
var finalCredentials *common.ProtonDriveCredential
|
||||
|
||||
if useReusableLogin {
|
||||
|
||||
// For reusable login, create credentials from cached data
|
||||
finalCredentials = &common.ProtonDriveCredential{
|
||||
UID: reusableCredential.UID,
|
||||
AccessToken: reusableCredential.AccessToken,
|
||||
RefreshToken: reusableCredential.RefreshToken,
|
||||
SaltedKeyPass: reusableCredential.SaltedKeyPass,
|
||||
}
|
||||
|
||||
d.credentials = finalCredentials
|
||||
} else {
|
||||
d.credentials = credentials
|
||||
}
|
||||
|
||||
clientOptions := []proton.Option{
|
||||
proton.WithAppVersion(d.appVersion),
|
||||
proton.WithUserAgent(d.userAgent),
|
||||
}
|
||||
manager := proton.New(clientOptions...)
|
||||
d.c = manager.NewClient(d.credentials.UID, d.credentials.AccessToken, d.credentials.RefreshToken)
|
||||
|
||||
saltedKeyPassBytes, err := base64.StdEncoding.DecodeString(d.credentials.SaltedKeyPass)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode salted key pass: %w", err)
|
||||
}
|
||||
|
||||
_, addrKRs, addrs, _, err := getAccountKRs(ctx, d.c, nil, saltedKeyPassBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get account keyrings: %w", err)
|
||||
}
|
||||
|
||||
d.MainShare = protonDrive.MainShare
|
||||
d.RootLink = protonDrive.RootLink
|
||||
d.MainShareKR = protonDrive.MainShareKR
|
||||
d.DefaultAddrKR = protonDrive.DefaultAddrKR
|
||||
d.addrKRs = addrKRs
|
||||
d.addrData = addrs
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Drop(ctx context.Context) error {
|
||||
if d.tempServer != nil {
|
||||
d.tempServer.Shutdown(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
var linkID string
|
||||
|
||||
if dir.GetPath() == "/" {
|
||||
linkID = d.protonDrive.RootLink.LinkID
|
||||
} else {
|
||||
|
||||
link, err := d.searchByPath(ctx, dir.GetPath(), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
linkID = link.LinkID
|
||||
}
|
||||
|
||||
entries, err := d.protonDrive.ListDirectory(ctx, linkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list directory: %w", err)
|
||||
}
|
||||
|
||||
//fmt.Printf("Found %d entries for path %s\n", len(entries), dir.GetPath())
|
||||
//fmt.Printf("Found %d entries\n", len(entries))
|
||||
|
||||
if len(entries) == 0 {
|
||||
emptySlice := []model.Obj{}
|
||||
|
||||
//fmt.Printf("Returning empty slice (entries): %+v\n", emptySlice)
|
||||
|
||||
return emptySlice, nil
|
||||
}
|
||||
|
||||
var objects []model.Obj
|
||||
for _, entry := range entries {
|
||||
obj := &model.Object{
|
||||
Name: entry.Name,
|
||||
Size: entry.Link.Size,
|
||||
Modified: time.Unix(entry.Link.ModifyTime, 0),
|
||||
IsFolder: entry.IsFolder,
|
||||
}
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
link, err := d.searchByPath(ctx, file.GetPath(), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := d.ensureTempServer(); err != nil {
|
||||
return nil, fmt.Errorf("failed to start temp server: %w", err)
|
||||
}
|
||||
|
||||
token := d.generateDownloadToken(link.LinkID, file.GetName())
|
||||
|
||||
/* return &model.Link{
|
||||
URL: fmt.Sprintf("protondrive://download/%s", link.LinkID),
|
||||
}, nil */
|
||||
|
||||
return &model.Link{
|
||||
URL: fmt.Sprintf("http://localhost:%d/temp/%s", d.tempServerPort, token),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
var parentLinkID string
|
||||
|
||||
if parentDir.GetPath() == "/" {
|
||||
parentLinkID = d.protonDrive.RootLink.LinkID
|
||||
} else {
|
||||
link, err := d.searchByPath(ctx, parentDir.GetPath(), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentLinkID = link.LinkID
|
||||
}
|
||||
|
||||
_, err := d.protonDrive.CreateNewFolderByID(ctx, parentLinkID, dirName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
|
||||
newDir := &model.Object{
|
||||
Name: dirName,
|
||||
IsFolder: true,
|
||||
Modified: time.Now(),
|
||||
}
|
||||
return newDir, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
return d.DirectMove(ctx, srcObj, dstDir)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
|
||||
if d.protonDrive == nil {
|
||||
return nil, fmt.Errorf("protonDrive bridge is nil")
|
||||
}
|
||||
|
||||
return d.DirectRename(ctx, srcObj, newName)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if srcObj.IsDir() {
|
||||
return nil, fmt.Errorf("directory copy not supported")
|
||||
}
|
||||
|
||||
srcLink, err := d.searchByPath(ctx, srcObj.GetPath(), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, linkSize, fileSystemAttrs, err := d.protonDrive.DownloadFile(ctx, srcLink, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to download source file: %w", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
actualSize := linkSize
|
||||
if fileSystemAttrs != nil && fileSystemAttrs.Size > 0 {
|
||||
actualSize = fileSystemAttrs.Size
|
||||
}
|
||||
|
||||
tempFile, err := utils.CreateTempFile(reader, actualSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp file: %w", err)
|
||||
}
|
||||
defer tempFile.Close()
|
||||
|
||||
updatedObj := &model.Object{
|
||||
Name: srcObj.GetName(),
|
||||
// Use the accurate and real size
|
||||
Size: actualSize,
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: false,
|
||||
}
|
||||
|
||||
return d.Put(ctx, dstDir, &fileStreamer{
|
||||
ReadCloser: tempFile,
|
||||
obj: updatedObj,
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Remove(ctx context.Context, obj model.Obj) error {
|
||||
link, err := d.searchByPath(ctx, obj.GetPath(), obj.IsDir())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if obj.IsDir() {
|
||||
return d.protonDrive.MoveFolderToTrashByID(ctx, link.LinkID, false)
|
||||
} else {
|
||||
return d.protonDrive.MoveFileToTrashByID(ctx, link.LinkID)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
var parentLinkID string
|
||||
|
||||
if dstDir.GetPath() == "/" {
|
||||
parentLinkID = d.protonDrive.RootLink.LinkID
|
||||
} else {
|
||||
link, err := d.searchByPath(ctx, dstDir.GetPath(), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentLinkID = link.LinkID
|
||||
}
|
||||
|
||||
tempFile, err := utils.CreateTempFile(file, file.GetSize())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp file: %w", err)
|
||||
}
|
||||
defer tempFile.Close()
|
||||
|
||||
err = d.uploadFile(ctx, parentLinkID, file.GetName(), tempFile, file.GetSize(), up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uploadedObj := &model.Object{
|
||||
Name: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
Modified: file.ModTime(),
|
||||
IsFolder: false,
|
||||
}
|
||||
return uploadedObj, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||
// return errs.NotImplement to use an internal archive tool
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*ProtonDrive)(nil)
|
||||
69
drivers/proton_drive/meta.go
Normal file
69
drivers/proton_drive/meta.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
//driver.RootID
|
||||
|
||||
Username string `json:"username" required:"true" type:"string"`
|
||||
Password string `json:"password" required:"true" type:"string"`
|
||||
TwoFACode string `json:"two_fa_code,omitempty" type:"string"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `json:"name"`
|
||||
LocalSort bool `json:"local_sort"`
|
||||
OnlyLocal bool `json:"only_local"`
|
||||
OnlyProxy bool `json:"only_proxy"`
|
||||
NoCache bool `json:"no_cache"`
|
||||
NoUpload bool `json:"no_upload"`
|
||||
NeedMs bool `json:"need_ms"`
|
||||
DefaultRoot string `json:"default_root"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "ProtonDrive",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &ProtonDrive{
|
||||
apiBase: "https://drive.proton.me/api",
|
||||
appVersion: "windows-drive@1.11.3+rclone+proton",
|
||||
credentialCacheFile: ".prtcrd",
|
||||
protonJson: "application/vnd.protonmail.v1+json",
|
||||
sdkVersion: "js@0.3.0",
|
||||
userAgent: "ProtonDrive/v1.70.0 (Windows NT 10.0.22000; Win64; x64)",
|
||||
webDriveAV: "web-drive@5.2.0+0f69f7a8",
|
||||
}
|
||||
})
|
||||
}
|
||||
124
drivers/proton_drive/types.go
Normal file
124
drivers/proton_drive/types.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
)
|
||||
|
||||
type ProtonFile struct {
|
||||
*proton.Link
|
||||
Name string
|
||||
IsFolder bool
|
||||
}
|
||||
|
||||
func (p *ProtonFile) GetName() string {
|
||||
return p.Name
|
||||
}
|
||||
|
||||
func (p *ProtonFile) GetSize() int64 {
|
||||
return p.Link.Size
|
||||
}
|
||||
|
||||
func (p *ProtonFile) GetPath() string {
|
||||
return p.Name
|
||||
}
|
||||
|
||||
func (p *ProtonFile) IsDir() bool {
|
||||
return p.IsFolder
|
||||
}
|
||||
|
||||
func (p *ProtonFile) ModTime() time.Time {
|
||||
return time.Unix(p.Link.ModifyTime, 0)
|
||||
}
|
||||
|
||||
func (p *ProtonFile) CreateTime() time.Time {
|
||||
return time.Unix(p.Link.CreateTime, 0)
|
||||
}
|
||||
|
||||
type downloadInfo struct {
|
||||
LinkID string
|
||||
FileName string
|
||||
}
|
||||
|
||||
type fileStreamer struct {
|
||||
io.ReadCloser
|
||||
obj model.Obj
|
||||
}
|
||||
|
||||
func (fs *fileStreamer) GetMimetype() string { return "" }
|
||||
func (fs *fileStreamer) NeedStore() bool { return false }
|
||||
func (fs *fileStreamer) IsForceStreamUpload() bool { return false }
|
||||
func (fs *fileStreamer) GetExist() model.Obj { return nil }
|
||||
func (fs *fileStreamer) SetExist(model.Obj) {}
|
||||
func (fs *fileStreamer) RangeRead(http_range.Range) (io.Reader, error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
func (fs *fileStreamer) CacheFullInTempFile() (model.File, error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
func (fs *fileStreamer) SetTmpFile(r *os.File) {}
|
||||
func (fs *fileStreamer) GetFile() model.File { return nil }
|
||||
func (fs *fileStreamer) GetName() string { return fs.obj.GetName() }
|
||||
func (fs *fileStreamer) GetSize() int64 { return fs.obj.GetSize() }
|
||||
func (fs *fileStreamer) GetPath() string { return fs.obj.GetPath() }
|
||||
func (fs *fileStreamer) IsDir() bool { return fs.obj.IsDir() }
|
||||
func (fs *fileStreamer) ModTime() time.Time { return fs.obj.ModTime() }
|
||||
func (fs *fileStreamer) CreateTime() time.Time { return fs.obj.ModTime() }
|
||||
func (fs *fileStreamer) GetHash() utils.HashInfo { return fs.obj.GetHash() }
|
||||
func (fs *fileStreamer) GetID() string { return fs.obj.GetID() }
|
||||
|
||||
type httpRange struct {
|
||||
start, end int64
|
||||
}
|
||||
|
||||
type MoveRequest struct {
|
||||
ParentLinkID string `json:"ParentLinkID"`
|
||||
NodePassphrase string `json:"NodePassphrase"`
|
||||
NodePassphraseSignature *string `json:"NodePassphraseSignature"`
|
||||
Name string `json:"Name"`
|
||||
NameSignatureEmail string `json:"NameSignatureEmail"`
|
||||
Hash string `json:"Hash"`
|
||||
OriginalHash string `json:"OriginalHash"`
|
||||
ContentHash *string `json:"ContentHash"` // Maybe null
|
||||
}
|
||||
|
||||
type progressReader struct {
|
||||
reader io.Reader
|
||||
total int64
|
||||
current int64
|
||||
callback driver.UpdateProgress
|
||||
}
|
||||
|
||||
type RenameRequest struct {
|
||||
Name string `json:"Name"` // PGP encrypted name
|
||||
NameSignatureEmail string `json:"NameSignatureEmail"` // User's signature email
|
||||
Hash string `json:"Hash"` // New name hash
|
||||
OriginalHash string `json:"OriginalHash"` // Current name hash
|
||||
}
|
||||
|
||||
type RenameResponse struct {
|
||||
Code int `json:"Code"`
|
||||
}
|
||||
918
drivers/proton_drive/util.go
Normal file
918
drivers/proton_drive/util.go
Normal file
@@ -0,0 +1,918 @@
|
||||
package protondrive
|
||||
|
||||
/*
|
||||
Package protondrive
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-18
|
||||
|
||||
Thanks to @henrybear327 for modded go-proton-api & Proton-API-Bridge
|
||||
|
||||
The power of open-source, the force of teamwork and the magic of reverse engineering!
|
||||
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Да здравствует Родина))
|
||||
*/
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/henrybear327/Proton-API-Bridge/common"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
)
|
||||
|
||||
func (d *ProtonDrive) loadCachedCredentials() (*common.ReusableCredentialData, error) {
|
||||
if d.credentialCacheFile == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if _, err := os.Stat(d.credentialCacheFile); os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(d.credentialCacheFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read credential cache file: %w", err)
|
||||
}
|
||||
|
||||
var credentials common.ReusableCredentialData
|
||||
if err := json.Unmarshal(data, &credentials); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse cached credentials: %w", err)
|
||||
}
|
||||
|
||||
if credentials.UID == "" || credentials.AccessToken == "" ||
|
||||
credentials.RefreshToken == "" || credentials.SaltedKeyPass == "" {
|
||||
return nil, fmt.Errorf("cached credentials are incomplete")
|
||||
}
|
||||
|
||||
return &credentials, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) searchByPath(ctx context.Context, fullPath string, isFolder bool) (*proton.Link, error) {
|
||||
if fullPath == "/" {
|
||||
return d.protonDrive.RootLink, nil
|
||||
}
|
||||
|
||||
cleanPath := strings.Trim(fullPath, "/")
|
||||
pathParts := strings.Split(cleanPath, "/")
|
||||
|
||||
currentLink := d.protonDrive.RootLink
|
||||
|
||||
for i, part := range pathParts {
|
||||
isLastPart := i == len(pathParts)-1
|
||||
searchForFolder := !isLastPart || isFolder
|
||||
|
||||
entries, err := d.protonDrive.ListDirectory(ctx, currentLink.LinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list directory: %w", err)
|
||||
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, entry := range entries {
|
||||
// entry.Name is already decrypted!
|
||||
if entry.Name == part && entry.IsFolder == searchForFolder {
|
||||
currentLink = entry.Link
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return nil, fmt.Errorf("path not found: %s (looking for part: %s)", fullPath, part)
|
||||
}
|
||||
}
|
||||
|
||||
return currentLink, nil
|
||||
}
|
||||
|
||||
func (pr *progressReader) Read(p []byte) (int, error) {
|
||||
n, err := pr.reader.Read(p)
|
||||
pr.current += int64(n)
|
||||
|
||||
if pr.callback != nil {
|
||||
percentage := float64(pr.current) / float64(pr.total) * 100
|
||||
pr.callback(percentage)
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) uploadFile(ctx context.Context, parentLinkID, fileName string, file *os.File, size int64, up driver.UpdateProgress) error {
|
||||
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get file info: %w", err)
|
||||
}
|
||||
|
||||
_, err = d.protonDrive.GetLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
reader := &progressReader{
|
||||
reader: bufio.NewReader(file),
|
||||
total: size,
|
||||
current: 0,
|
||||
callback: up,
|
||||
}
|
||||
|
||||
_, _, err = d.protonDrive.UploadFileByReader(ctx, parentLinkID, fileName, fileInfo.ModTime(), reader, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) ensureTempServer() error {
|
||||
if d.tempServer != nil {
|
||||
|
||||
// Already running
|
||||
return nil
|
||||
}
|
||||
|
||||
listener, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.tempServerPort = listener.Addr().(*net.TCPAddr).Port
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/temp/", d.handleTempDownload)
|
||||
|
||||
d.tempServer = &http.Server{
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
go func() {
|
||||
d.tempServer.Serve(listener)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) handleTempDownload(w http.ResponseWriter, r *http.Request) {
|
||||
token := strings.TrimPrefix(r.URL.Path, "/temp/")
|
||||
|
||||
d.tokenMutex.RLock()
|
||||
info, exists := d.downloadTokens[token]
|
||||
d.tokenMutex.RUnlock()
|
||||
|
||||
if !exists {
|
||||
http.Error(w, "Invalid or expired token", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
link, err := d.protonDrive.GetLink(r.Context(), info.LinkID)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to get file link", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Get file size for range calculations
|
||||
_, _, attrs, err := d.protonDrive.DownloadFile(r.Context(), link, 0)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to get file info", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
fileSize := attrs.Size
|
||||
|
||||
rangeHeader := r.Header.Get("Range")
|
||||
if rangeHeader != "" {
|
||||
|
||||
// Parse range header like "bytes=0-1023" or "bytes=1024-"
|
||||
ranges, err := parseRange(rangeHeader, fileSize)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid range", http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
}
|
||||
|
||||
if len(ranges) == 1 {
|
||||
|
||||
// Single range request, small
|
||||
start, end := ranges[0].start, ranges[0].end
|
||||
contentLength := end - start + 1
|
||||
|
||||
// Start download from offset
|
||||
reader, _, _, err := d.protonDrive.DownloadFile(r.Context(), link, start)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to start download", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, fileSize))
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", contentLength))
|
||||
w.Header().Set("Content-Type", mime.TypeByExtension(filepath.Ext(link.Name)))
|
||||
|
||||
// Partial content...
|
||||
// Setting fileName is more cosmetical here
|
||||
//.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", link.Name))
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", info.FileName))
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
|
||||
w.WriteHeader(http.StatusPartialContent)
|
||||
|
||||
io.CopyN(w, reader, contentLength)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Full file download (non-range request)
|
||||
reader, _, _, err := d.protonDrive.DownloadFile(r.Context(), link, 0)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to start download", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
// Set headers for full content
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", fileSize))
|
||||
w.Header().Set("Content-Type", mime.TypeByExtension(filepath.Ext(link.Name)))
|
||||
|
||||
// Setting fileName is needed since ProtonDrive fileName is more like a random string
|
||||
//w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", link.Name))
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", info.FileName))
|
||||
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
|
||||
// Stream the full file
|
||||
io.Copy(w, reader)
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) generateDownloadToken(linkID, fileName string) string {
|
||||
token := fmt.Sprintf("%d_%s", time.Now().UnixNano(), linkID[:8])
|
||||
|
||||
d.tokenMutex.Lock()
|
||||
if d.downloadTokens == nil {
|
||||
d.downloadTokens = make(map[string]*downloadInfo)
|
||||
}
|
||||
|
||||
d.downloadTokens[token] = &downloadInfo{
|
||||
LinkID: linkID,
|
||||
FileName: fileName,
|
||||
}
|
||||
|
||||
d.tokenMutex.Unlock()
|
||||
|
||||
go func() {
|
||||
|
||||
// Token expires in 1 hour
|
||||
time.Sleep(1 * time.Hour)
|
||||
d.tokenMutex.Lock()
|
||||
|
||||
delete(d.downloadTokens, token)
|
||||
d.tokenMutex.Unlock()
|
||||
}()
|
||||
|
||||
return token
|
||||
}
|
||||
|
||||
func parseRange(rangeHeader string, size int64) ([]httpRange, error) {
|
||||
if !strings.HasPrefix(rangeHeader, "bytes=") {
|
||||
return nil, fmt.Errorf("invalid range header")
|
||||
}
|
||||
|
||||
rangeSpec := strings.TrimPrefix(rangeHeader, "bytes=")
|
||||
ranges := strings.Split(rangeSpec, ",")
|
||||
|
||||
var result []httpRange
|
||||
for _, r := range ranges {
|
||||
r = strings.TrimSpace(r)
|
||||
if strings.Contains(r, "-") {
|
||||
parts := strings.Split(r, "-")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid range format")
|
||||
}
|
||||
|
||||
var start, end int64
|
||||
var err error
|
||||
|
||||
if parts[0] == "" {
|
||||
|
||||
// Suffix range (e.g., "-500")
|
||||
if parts[1] == "" {
|
||||
return nil, fmt.Errorf("invalid range format")
|
||||
}
|
||||
end = size - 1
|
||||
start, err = strconv.ParseInt(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
start = size - start
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
} else if parts[1] == "" {
|
||||
|
||||
// Prefix range (e.g., "500-")
|
||||
start, err = strconv.ParseInt(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
end = size - 1
|
||||
} else {
|
||||
// Full range (e.g., "0-1023")
|
||||
start, err = strconv.ParseInt(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
end, err = strconv.ParseInt(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if start >= size || end >= size || start > end {
|
||||
return nil, fmt.Errorf("range out of bounds")
|
||||
}
|
||||
|
||||
result = append(result, httpRange{start: start, end: end})
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) encryptFileName(ctx context.Context, name string, parentLinkID string) (string, error) {
|
||||
|
||||
parentLink, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get parent node keyring
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Temporary file (request)
|
||||
tempReq := proton.CreateFileReq{
|
||||
SignatureAddress: d.MainShare.Creator,
|
||||
}
|
||||
|
||||
// Encrypt the filename
|
||||
err = tempReq.SetName(name, d.DefaultAddrKR, parentNodeKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to encrypt filename: %w", err)
|
||||
}
|
||||
|
||||
return tempReq.Name, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) generateFileNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
|
||||
|
||||
parentLink, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get parent node keyring
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent keyring: %w", err)
|
||||
}
|
||||
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
|
||||
}
|
||||
|
||||
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent hash key: %w", err)
|
||||
}
|
||||
|
||||
nameHash, err := proton.GetNameHash(name, parentHashKey)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate name hash: %w", err)
|
||||
}
|
||||
|
||||
return nameHash, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getOriginalNameHash(link *proton.Link) (string, error) {
|
||||
if link == nil {
|
||||
return "", fmt.Errorf("link cannot be nil")
|
||||
}
|
||||
|
||||
if link.Hash == "" {
|
||||
return "", fmt.Errorf("link hash is empty")
|
||||
}
|
||||
|
||||
return link.Hash, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getLink(ctx context.Context, linkID string) (*proton.Link, error) {
|
||||
if linkID == "" {
|
||||
return nil, fmt.Errorf("linkID cannot be empty")
|
||||
}
|
||||
|
||||
link, err := d.c.GetLink(ctx, d.MainShare.ShareID, linkID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &link, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getLinkKR(ctx context.Context, link *proton.Link) (*crypto.KeyRing, error) {
|
||||
if link == nil {
|
||||
return nil, fmt.Errorf("link cannot be nil")
|
||||
}
|
||||
|
||||
// Root Link or Root Dir
|
||||
if link.ParentLinkID == "" {
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return link.GetKeyRing(d.MainShareKR, signatureVerificationKR)
|
||||
}
|
||||
|
||||
// Get parent keyring recursively
|
||||
parentLink, err := d.getLink(ctx, link.ParentLinkID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{link.SignatureEmail})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return link.GetKeyRing(parentNodeKR, signatureVerificationKR)
|
||||
}
|
||||
|
||||
var (
|
||||
ErrKeyPassOrSaltedKeyPassMustBeNotNil = errors.New("either keyPass or saltedKeyPass must be not nil")
|
||||
ErrFailedToUnlockUserKeys = errors.New("failed to unlock user keys")
|
||||
)
|
||||
|
||||
func getAccountKRs(ctx context.Context, c *proton.Client, keyPass, saltedKeyPass []byte) (*crypto.KeyRing, map[string]*crypto.KeyRing, map[string]proton.Address, []byte, error) {
|
||||
|
||||
user, err := c.GetUser(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("user %#v", user)
|
||||
|
||||
addrsArr, err := c.GetAddresses(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("addr %#v", addr)
|
||||
|
||||
if saltedKeyPass == nil {
|
||||
if keyPass == nil {
|
||||
return nil, nil, nil, nil, ErrKeyPassOrSaltedKeyPassMustBeNotNil
|
||||
}
|
||||
|
||||
// Due to limitations, salts are stored using cacheCredentialToFile
|
||||
salts, err := c.GetSalts(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("salts %#v", salts)
|
||||
|
||||
saltedKeyPass, err = salts.SaltForKey(keyPass, user.Keys.Primary().ID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
// fmt.Printf("saltedKeyPass ok")
|
||||
}
|
||||
|
||||
userKR, addrKRs, err := proton.Unlock(user, addrsArr, saltedKeyPass, nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
|
||||
} else if userKR.CountDecryptionEntities() == 0 {
|
||||
return nil, nil, nil, nil, ErrFailedToUnlockUserKeys
|
||||
}
|
||||
|
||||
addrs := make(map[string]proton.Address)
|
||||
for _, addr := range addrsArr {
|
||||
addrs[addr.Email] = addr
|
||||
}
|
||||
|
||||
return userKR, addrKRs, addrs, saltedKeyPass, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) getSignatureVerificationKeyring(emailAddresses []string, verificationAddrKRs ...*crypto.KeyRing) (*crypto.KeyRing, error) {
|
||||
ret, err := crypto.NewKeyRing(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, emailAddress := range emailAddresses {
|
||||
if addr, ok := d.addrData[emailAddress]; ok {
|
||||
if addrKR, exists := d.addrKRs[addr.ID]; exists {
|
||||
err = d.addKeysFromKR(ret, addrKR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, kr := range verificationAddrKRs {
|
||||
err = d.addKeysFromKR(ret, kr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if ret.CountEntities() == 0 {
|
||||
return nil, fmt.Errorf("no keyring for signature verification")
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) addKeysFromKR(kr *crypto.KeyRing, newKRs ...*crypto.KeyRing) error {
|
||||
for i := range newKRs {
|
||||
for _, key := range newKRs[i].GetKeys() {
|
||||
err := kr.AddKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) DirectRename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
//fmt.Printf("DEBUG DirectRename: path=%s, newName=%s", srcObj.GetPath(), newName)
|
||||
|
||||
if d.MainShare == nil || d.DefaultAddrKR == nil {
|
||||
return nil, fmt.Errorf("missing required fields: MainShare=%v, DefaultAddrKR=%v",
|
||||
d.MainShare != nil, d.DefaultAddrKR != nil)
|
||||
}
|
||||
|
||||
if d.protonDrive == nil {
|
||||
return nil, fmt.Errorf("protonDrive bridge is nil")
|
||||
}
|
||||
|
||||
srcLink, err := d.searchByPath(ctx, srcObj.GetPath(), srcObj.IsDir())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find source: %w", err)
|
||||
}
|
||||
|
||||
parentLinkID := srcLink.ParentLinkID
|
||||
if parentLinkID == "" {
|
||||
return nil, fmt.Errorf("cannot rename root folder")
|
||||
}
|
||||
|
||||
encryptedName, err := d.encryptFileName(ctx, newName, parentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
|
||||
}
|
||||
|
||||
newHash, err := d.generateFileNameHash(ctx, newName, parentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate new hash: %w", err)
|
||||
}
|
||||
|
||||
originalHash, err := d.getOriginalNameHash(srcLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get original hash: %w", err)
|
||||
}
|
||||
|
||||
renameReq := RenameRequest{
|
||||
Name: encryptedName,
|
||||
NameSignatureEmail: d.MainShare.Creator,
|
||||
Hash: newHash,
|
||||
OriginalHash: originalHash,
|
||||
}
|
||||
|
||||
err = d.executeRenameAPI(ctx, srcLink.LinkID, renameReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename API call failed: %w", err)
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) executeRenameAPI(ctx context.Context, linkID string, req RenameRequest) error {
|
||||
|
||||
renameURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/rename",
|
||||
d.MainShare.VolumeID, linkID)
|
||||
|
||||
reqBody, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal rename request: %w", err)
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "PUT", renameURL, bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
httpReq.Header.Set("Accept", d.protonJson)
|
||||
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
|
||||
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
|
||||
httpReq.Header.Set("X-Pm-Uid", d.credentials.UID)
|
||||
httpReq.Header.Set("Authorization", "Bearer "+d.credentials.AccessToken)
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(httpReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute rename request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("rename failed with status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var renameResp RenameResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&renameResp); err != nil {
|
||||
return fmt.Errorf("failed to decode rename response: %w", err)
|
||||
}
|
||||
|
||||
if renameResp.Code != 1000 {
|
||||
return fmt.Errorf("rename failed with code %d", renameResp.Code)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) executeMoveAPI(ctx context.Context, linkID string, req MoveRequest) error {
|
||||
//fmt.Printf("DEBUG Move Request - Name: %s\n", req.Name)
|
||||
//fmt.Printf("DEBUG Move Request - Hash: %s\n", req.Hash)
|
||||
//fmt.Printf("DEBUG Move Request - OriginalHash: %s\n", req.OriginalHash)
|
||||
//fmt.Printf("DEBUG Move Request - ParentLinkID: %s\n", req.ParentLinkID)
|
||||
|
||||
//fmt.Printf("DEBUG Move Request - Name length: %d\n", len(req.Name))
|
||||
//fmt.Printf("DEBUG Move Request - NameSignatureEmail: %s\n", req.NameSignatureEmail)
|
||||
//fmt.Printf("DEBUG Move Request - ContentHash: %v\n", req.ContentHash)
|
||||
//fmt.Printf("DEBUG Move Request - NodePassphrase length: %d\n", len(req.NodePassphrase))
|
||||
//fmt.Printf("DEBUG Move Request - NodePassphraseSignature length: %d\n", len(req.NodePassphraseSignature))
|
||||
|
||||
//fmt.Printf("DEBUG Move Request - SrcLinkID: %s\n", linkID)
|
||||
//fmt.Printf("DEBUG Move Request - DstParentLinkID: %s\n", req.ParentLinkID)
|
||||
//fmt.Printf("DEBUG Move Request - ShareID: %s\n", d.MainShare.ShareID)
|
||||
|
||||
srcLink, _ := d.getLink(ctx, linkID)
|
||||
if srcLink != nil && srcLink.ParentLinkID == req.ParentLinkID {
|
||||
return fmt.Errorf("cannot move to same parent directory")
|
||||
}
|
||||
|
||||
moveURL := fmt.Sprintf(d.apiBase+"/drive/v2/volumes/%s/links/%s/move",
|
||||
d.MainShare.VolumeID, linkID)
|
||||
|
||||
reqBody, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal move request: %w", err)
|
||||
}
|
||||
|
||||
httpReq, err := http.NewRequestWithContext(ctx, "PUT", moveURL, bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create HTTP request: %w", err)
|
||||
}
|
||||
|
||||
httpReq.Header.Set("Authorization", "Bearer "+d.credentials.AccessToken)
|
||||
httpReq.Header.Set("Accept", d.protonJson)
|
||||
httpReq.Header.Set("X-Pm-Appversion", d.webDriveAV)
|
||||
httpReq.Header.Set("X-Pm-Drive-Sdk-Version", d.sdkVersion)
|
||||
httpReq.Header.Set("X-Pm-Uid", d.credentials.UID)
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(httpReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute move request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var moveResp RenameResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&moveResp); err != nil {
|
||||
return fmt.Errorf("failed to decode move response: %w", err)
|
||||
}
|
||||
|
||||
if moveResp.Code != 1000 {
|
||||
return fmt.Errorf("move operation failed with code: %d", moveResp.Code)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) DirectMove(ctx context.Context, srcObj model.Obj, dstDir model.Obj) (model.Obj, error) {
|
||||
//fmt.Printf("DEBUG DirectMove: srcPath=%s, dstPath=%s", srcObj.GetPath(), dstDir.GetPath())
|
||||
|
||||
srcLink, err := d.searchByPath(ctx, srcObj.GetPath(), srcObj.IsDir())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find source: %w", err)
|
||||
}
|
||||
|
||||
var dstParentLinkID string
|
||||
if dstDir.GetPath() == "/" {
|
||||
dstParentLinkID = d.RootLink.LinkID
|
||||
} else {
|
||||
dstLink, err := d.searchByPath(ctx, dstDir.GetPath(), true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find destination: %w", err)
|
||||
}
|
||||
dstParentLinkID = dstLink.LinkID
|
||||
}
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
// Check if destination is a descendant of source
|
||||
if err := d.checkCircularMove(ctx, srcLink.LinkID, dstParentLinkID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Encrypt the filename for the new location
|
||||
encryptedName, err := d.encryptFileName(ctx, srcObj.GetName(), dstParentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encrypt filename: %w", err)
|
||||
}
|
||||
|
||||
newHash, err := d.generateNameHash(ctx, srcObj.GetName(), dstParentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate new hash: %w", err)
|
||||
}
|
||||
|
||||
originalHash, err := d.getOriginalNameHash(srcLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get original hash: %w", err)
|
||||
}
|
||||
|
||||
// Re-encrypt node passphrase for new parent context
|
||||
reencryptedPassphrase, err := d.reencryptNodePassphrase(ctx, srcLink, dstParentLinkID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to re-encrypt node passphrase: %w", err)
|
||||
}
|
||||
|
||||
moveReq := MoveRequest{
|
||||
ParentLinkID: dstParentLinkID,
|
||||
NodePassphrase: reencryptedPassphrase,
|
||||
Name: encryptedName,
|
||||
NameSignatureEmail: d.MainShare.Creator,
|
||||
Hash: newHash,
|
||||
OriginalHash: originalHash,
|
||||
ContentHash: nil,
|
||||
|
||||
// *** Causes rejection ***
|
||||
/* NodePassphraseSignature: srcLink.NodePassphraseSignature, */
|
||||
}
|
||||
|
||||
//fmt.Printf("DEBUG MoveRequest validation:\n")
|
||||
//fmt.Printf(" Name length: %d\n", len(moveReq.Name))
|
||||
//fmt.Printf(" Hash: %s\n", moveReq.Hash)
|
||||
//fmt.Printf(" OriginalHash: %s\n", moveReq.OriginalHash)
|
||||
//fmt.Printf(" NodePassphrase length: %d\n", len(moveReq.NodePassphrase))
|
||||
/* fmt.Printf(" NodePassphraseSignature length: %d\n", len(moveReq.NodePassphraseSignature)) */
|
||||
//fmt.Printf(" NameSignatureEmail: %s\n", moveReq.NameSignatureEmail)
|
||||
|
||||
err = d.executeMoveAPI(ctx, srcLink.LinkID, moveReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move API call failed: %w", err)
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) reencryptNodePassphrase(ctx context.Context, srcLink *proton.Link, dstParentLinkID string) (string, error) {
|
||||
// Get source parent link with metadata
|
||||
srcParentLink, err := d.getLink(ctx, srcLink.ParentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get source parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get source parent keyring using link object
|
||||
srcParentKR, err := d.getLinkKR(ctx, srcParentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get source parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Get destination parent link with metadata
|
||||
dstParentLink, err := d.getLink(ctx, dstParentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get destination parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get destination parent keyring using link object
|
||||
dstParentKR, err := d.getLinkKR(ctx, dstParentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get destination parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Re-encrypt the node passphrase from source parent context to destination parent context
|
||||
reencryptedPassphrase, err := reencryptKeyPacket(srcParentKR, dstParentKR, d.DefaultAddrKR, srcLink.NodePassphrase)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to re-encrypt key packet: %w", err)
|
||||
}
|
||||
|
||||
return reencryptedPassphrase, nil
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) generateNameHash(ctx context.Context, name string, parentLinkID string) (string, error) {
|
||||
|
||||
parentLink, err := d.getLink(ctx, parentLinkID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent link: %w", err)
|
||||
}
|
||||
|
||||
// Get parent node keyring
|
||||
parentNodeKR, err := d.getLinkKR(ctx, parentLink)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent keyring: %w", err)
|
||||
}
|
||||
|
||||
// Get signature verification keyring
|
||||
signatureVerificationKR, err := d.getSignatureVerificationKeyring([]string{parentLink.SignatureEmail}, parentNodeKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get signature verification keyring: %w", err)
|
||||
}
|
||||
|
||||
parentHashKey, err := parentLink.GetHashKey(parentNodeKR, signatureVerificationKR)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent hash key: %w", err)
|
||||
}
|
||||
|
||||
nameHash, err := proton.GetNameHash(name, parentHashKey)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate name hash: %w", err)
|
||||
}
|
||||
|
||||
return nameHash, nil
|
||||
}
|
||||
|
||||
func reencryptKeyPacket(srcKR, dstKR, _ *crypto.KeyRing, passphrase string) (string, error) { // addrKR (3)
|
||||
oldSplitMessage, err := crypto.NewPGPSplitMessageFromArmored(passphrase)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
sessionKey, err := srcKR.DecryptSessionKey(oldSplitMessage.KeyPacket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
newKeyPacket, err := dstKR.EncryptSessionKey(sessionKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
newSplitMessage := crypto.NewPGPSplitMessage(newKeyPacket, oldSplitMessage.DataPacket)
|
||||
|
||||
return newSplitMessage.GetArmored()
|
||||
}
|
||||
|
||||
func (d *ProtonDrive) checkCircularMove(ctx context.Context, srcLinkID, dstParentLinkID string) error {
|
||||
currentLinkID := dstParentLinkID
|
||||
|
||||
for currentLinkID != "" && currentLinkID != d.RootLink.LinkID {
|
||||
if currentLinkID == srcLinkID {
|
||||
return fmt.Errorf("cannot move folder into itself or its subfolder")
|
||||
}
|
||||
|
||||
currentLink, err := d.getLink(ctx, currentLinkID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentLinkID = currentLink.ParentLinkID
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -83,7 +83,7 @@ type Group struct {
|
||||
Type int `json:"type"`
|
||||
Name string `json:"name"`
|
||||
IsAdministrator int `json:"is_administrator"`
|
||||
Role int `json:"role"`
|
||||
Role []int `json:"role"`
|
||||
Avatar string `json:"avatar_url"`
|
||||
IsStick int `json:"is_stick"`
|
||||
Nickname string `json:"nickname"`
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
@@ -32,6 +33,33 @@ type S3 struct {
|
||||
cron *cron.Cron
|
||||
}
|
||||
|
||||
var storageClassLookup = map[string]string{
|
||||
"standard": s3.ObjectStorageClassStandard,
|
||||
"reduced_redundancy": s3.ObjectStorageClassReducedRedundancy,
|
||||
"glacier": s3.ObjectStorageClassGlacier,
|
||||
"standard_ia": s3.ObjectStorageClassStandardIa,
|
||||
"onezone_ia": s3.ObjectStorageClassOnezoneIa,
|
||||
"intelligent_tiering": s3.ObjectStorageClassIntelligentTiering,
|
||||
"deep_archive": s3.ObjectStorageClassDeepArchive,
|
||||
"outposts": s3.ObjectStorageClassOutposts,
|
||||
"glacier_ir": s3.ObjectStorageClassGlacierIr,
|
||||
"snow": s3.ObjectStorageClassSnow,
|
||||
"express_onezone": s3.ObjectStorageClassExpressOnezone,
|
||||
}
|
||||
|
||||
func (d *S3) resolveStorageClass() *string {
|
||||
value := strings.TrimSpace(d.StorageClass)
|
||||
if value == "" {
|
||||
return nil
|
||||
}
|
||||
normalized := strings.ToLower(strings.ReplaceAll(value, "-", "_"))
|
||||
if v, ok := storageClassLookup[normalized]; ok {
|
||||
return aws.String(v)
|
||||
}
|
||||
log.Warnf("s3: unknown storage class %q, using raw value", d.StorageClass)
|
||||
return aws.String(value)
|
||||
}
|
||||
|
||||
func (d *S3) Config() driver.Config {
|
||||
return d.config
|
||||
}
|
||||
@@ -179,8 +207,14 @@ func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up
|
||||
}),
|
||||
ContentType: &contentType,
|
||||
}
|
||||
if storageClass := d.resolveStorageClass(); storageClass != nil {
|
||||
input.StorageClass = storageClass
|
||||
}
|
||||
_, err := uploader.UploadWithContext(ctx, input)
|
||||
return err
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*S3)(nil)
|
||||
var (
|
||||
_ driver.Driver = (*S3)(nil)
|
||||
_ driver.Other = (*S3)(nil)
|
||||
)
|
||||
|
||||
@@ -21,6 +21,7 @@ type Addition struct {
|
||||
ListObjectVersion string `json:"list_object_version" type:"select" options:"v1,v2" default:"v1"`
|
||||
RemoveBucket bool `json:"remove_bucket" help:"Remove bucket name from path when using custom host."`
|
||||
AddFilenameToDisposition bool `json:"add_filename_to_disposition" help:"Add filename to Content-Disposition header."`
|
||||
StorageClass string `json:"storage_class" type:"select" options:",standard,standard_ia,onezone_ia,intelligent_tiering,glacier,glacier_ir,deep_archive,archive" help:"Storage class for new objects. AWS and Tencent COS support different subsets (COS uses ARCHIVE/DEEP_ARCHIVE)."`
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
286
drivers/s3/other.go
Normal file
286
drivers/s3/other.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
const (
|
||||
OtherMethodArchive = "archive"
|
||||
OtherMethodArchiveStatus = "archive_status"
|
||||
OtherMethodThaw = "thaw"
|
||||
OtherMethodThawStatus = "thaw_status"
|
||||
)
|
||||
|
||||
type ArchiveRequest struct {
|
||||
StorageClass string `json:"storage_class"`
|
||||
}
|
||||
|
||||
type ThawRequest struct {
|
||||
Days int64 `json:"days"`
|
||||
Tier string `json:"tier"`
|
||||
}
|
||||
|
||||
type ObjectDescriptor struct {
|
||||
Path string `json:"path"`
|
||||
Bucket string `json:"bucket"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
type ArchiveResponse struct {
|
||||
Action string `json:"action"`
|
||||
Object ObjectDescriptor `json:"object"`
|
||||
StorageClass string `json:"storage_class"`
|
||||
RequestID string `json:"request_id,omitempty"`
|
||||
VersionID string `json:"version_id,omitempty"`
|
||||
ETag string `json:"etag,omitempty"`
|
||||
LastModified string `json:"last_modified,omitempty"`
|
||||
}
|
||||
|
||||
type ThawResponse struct {
|
||||
Action string `json:"action"`
|
||||
Object ObjectDescriptor `json:"object"`
|
||||
RequestID string `json:"request_id,omitempty"`
|
||||
Status *RestoreStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
type RestoreStatus struct {
|
||||
Ongoing bool `json:"ongoing"`
|
||||
Expiry string `json:"expiry,omitempty"`
|
||||
Raw string `json:"raw"`
|
||||
}
|
||||
|
||||
func (d *S3) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
if args.Obj == nil {
|
||||
return nil, fmt.Errorf("missing object reference")
|
||||
}
|
||||
if args.Obj.IsDir() {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
switch strings.ToLower(strings.TrimSpace(args.Method)) {
|
||||
case "archive":
|
||||
return d.archive(ctx, args)
|
||||
case "archive_status":
|
||||
return d.archiveStatus(ctx, args)
|
||||
case "thaw":
|
||||
return d.thaw(ctx, args)
|
||||
case "thaw_status":
|
||||
return d.thawStatus(ctx, args)
|
||||
default:
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
}
|
||||
|
||||
func (d *S3) archive(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
key := getKey(args.Obj.GetPath(), false)
|
||||
payload := ArchiveRequest{}
|
||||
if err := DecodeOtherArgs(args.Data, &payload); err != nil {
|
||||
return nil, fmt.Errorf("parse archive request: %w", err)
|
||||
}
|
||||
if payload.StorageClass == "" {
|
||||
return nil, fmt.Errorf("storage_class is required")
|
||||
}
|
||||
storageClass := NormalizeStorageClass(payload.StorageClass)
|
||||
input := &s3.CopyObjectInput{
|
||||
Bucket: &d.Bucket,
|
||||
Key: &key,
|
||||
CopySource: aws.String(url.PathEscape(d.Bucket + "/" + key)),
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
StorageClass: aws.String(storageClass),
|
||||
}
|
||||
copyReq, output := d.client.CopyObjectRequest(input)
|
||||
copyReq.SetContext(ctx)
|
||||
if err := copyReq.Send(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := ArchiveResponse{
|
||||
Action: "archive",
|
||||
Object: d.describeObject(args.Obj, key),
|
||||
StorageClass: storageClass,
|
||||
RequestID: copyReq.RequestID,
|
||||
}
|
||||
if output.VersionId != nil {
|
||||
resp.VersionID = aws.StringValue(output.VersionId)
|
||||
}
|
||||
if result := output.CopyObjectResult; result != nil {
|
||||
resp.ETag = aws.StringValue(result.ETag)
|
||||
if result.LastModified != nil {
|
||||
resp.LastModified = result.LastModified.UTC().Format(time.RFC3339)
|
||||
}
|
||||
}
|
||||
if status, err := d.describeObjectStatus(ctx, key); err == nil {
|
||||
if status.StorageClass != "" {
|
||||
resp.StorageClass = status.StorageClass
|
||||
}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *S3) archiveStatus(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
key := getKey(args.Obj.GetPath(), false)
|
||||
status, err := d.describeObjectStatus(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ArchiveResponse{
|
||||
Action: "archive_status",
|
||||
Object: d.describeObject(args.Obj, key),
|
||||
StorageClass: status.StorageClass,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *S3) thaw(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
key := getKey(args.Obj.GetPath(), false)
|
||||
payload := ThawRequest{Days: 1}
|
||||
if err := DecodeOtherArgs(args.Data, &payload); err != nil {
|
||||
return nil, fmt.Errorf("parse thaw request: %w", err)
|
||||
}
|
||||
if payload.Days <= 0 {
|
||||
payload.Days = 1
|
||||
}
|
||||
restoreRequest := &s3.RestoreRequest{
|
||||
Days: aws.Int64(payload.Days),
|
||||
}
|
||||
if tier := NormalizeRestoreTier(payload.Tier); tier != "" {
|
||||
restoreRequest.GlacierJobParameters = &s3.GlacierJobParameters{Tier: aws.String(tier)}
|
||||
}
|
||||
input := &s3.RestoreObjectInput{
|
||||
Bucket: &d.Bucket,
|
||||
Key: &key,
|
||||
RestoreRequest: restoreRequest,
|
||||
}
|
||||
restoreReq, _ := d.client.RestoreObjectRequest(input)
|
||||
restoreReq.SetContext(ctx)
|
||||
if err := restoreReq.Send(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
status, _ := d.describeObjectStatus(ctx, key)
|
||||
resp := ThawResponse{
|
||||
Action: "thaw",
|
||||
Object: d.describeObject(args.Obj, key),
|
||||
RequestID: restoreReq.RequestID,
|
||||
}
|
||||
if status != nil {
|
||||
resp.Status = status.Restore
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *S3) thawStatus(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
key := getKey(args.Obj.GetPath(), false)
|
||||
status, err := d.describeObjectStatus(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ThawResponse{
|
||||
Action: "thaw_status",
|
||||
Object: d.describeObject(args.Obj, key),
|
||||
Status: status.Restore,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *S3) describeObject(obj model.Obj, key string) ObjectDescriptor {
|
||||
return ObjectDescriptor{
|
||||
Path: obj.GetPath(),
|
||||
Bucket: d.Bucket,
|
||||
Key: key,
|
||||
}
|
||||
}
|
||||
|
||||
type objectStatus struct {
|
||||
StorageClass string
|
||||
Restore *RestoreStatus
|
||||
}
|
||||
|
||||
func (d *S3) describeObjectStatus(ctx context.Context, key string) (*objectStatus, error) {
|
||||
head, err := d.client.HeadObjectWithContext(ctx, &s3.HeadObjectInput{Bucket: &d.Bucket, Key: &key})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
status := &objectStatus{
|
||||
StorageClass: aws.StringValue(head.StorageClass),
|
||||
Restore: parseRestoreHeader(head.Restore),
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func parseRestoreHeader(header *string) *RestoreStatus {
|
||||
if header == nil {
|
||||
return nil
|
||||
}
|
||||
value := strings.TrimSpace(*header)
|
||||
if value == "" {
|
||||
return nil
|
||||
}
|
||||
status := &RestoreStatus{Raw: value}
|
||||
parts := strings.Split(value, ",")
|
||||
for _, part := range parts {
|
||||
part = strings.TrimSpace(part)
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(part, "ongoing-request=") {
|
||||
status.Ongoing = strings.Contains(part, "\"true\"")
|
||||
}
|
||||
if strings.HasPrefix(part, "expiry-date=") {
|
||||
expiry := strings.Trim(part[len("expiry-date="):], "\"")
|
||||
if expiry != "" {
|
||||
if t, err := time.Parse(time.RFC1123, expiry); err == nil {
|
||||
status.Expiry = t.UTC().Format(time.RFC3339)
|
||||
} else {
|
||||
status.Expiry = expiry
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func DecodeOtherArgs(data interface{}, target interface{}) error {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
raw, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(raw, target)
|
||||
}
|
||||
|
||||
func NormalizeStorageClass(value string) string {
|
||||
normalized := strings.ToLower(strings.TrimSpace(strings.ReplaceAll(value, "-", "_")))
|
||||
if normalized == "" {
|
||||
return value
|
||||
}
|
||||
if v, ok := storageClassLookup[normalized]; ok {
|
||||
return v
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func NormalizeRestoreTier(value string) string {
|
||||
normalized := strings.ToLower(strings.TrimSpace(value))
|
||||
switch normalized {
|
||||
case "", "default":
|
||||
return ""
|
||||
case "bulk":
|
||||
return s3.TierBulk
|
||||
case "standard":
|
||||
return s3.TierStandard
|
||||
case "expedited":
|
||||
return s3.TierExpedited
|
||||
default:
|
||||
return value
|
||||
}
|
||||
}
|
||||
@@ -109,13 +109,13 @@ func (d *S3) listV1(prefix string, args model.ListArgs) ([]model.Obj, error) {
|
||||
if !args.S3ShowPlaceholder && (name == getPlaceholderName(d.Placeholder) || name == d.Placeholder) {
|
||||
continue
|
||||
}
|
||||
file := model.Object{
|
||||
file := &model.Object{
|
||||
//Id: *object.Key,
|
||||
Name: name,
|
||||
Size: *object.Size,
|
||||
Modified: *object.LastModified,
|
||||
}
|
||||
files = append(files, &file)
|
||||
files = append(files, model.WrapObjStorageClass(file, aws.StringValue(object.StorageClass)))
|
||||
}
|
||||
if listObjectsResult.IsTruncated == nil {
|
||||
return nil, errors.New("IsTruncated nil")
|
||||
@@ -164,13 +164,13 @@ func (d *S3) listV2(prefix string, args model.ListArgs) ([]model.Obj, error) {
|
||||
if !args.S3ShowPlaceholder && (name == getPlaceholderName(d.Placeholder) || name == d.Placeholder) {
|
||||
continue
|
||||
}
|
||||
file := model.Object{
|
||||
file := &model.Object{
|
||||
//Id: *object.Key,
|
||||
Name: name,
|
||||
Size: *object.Size,
|
||||
Modified: *object.LastModified,
|
||||
}
|
||||
files = append(files, &file)
|
||||
files = append(files, model.WrapObjStorageClass(file, aws.StringValue(object.StorageClass)))
|
||||
}
|
||||
if !aws.BoolValue(listObjectsResult.IsTruncated) {
|
||||
break
|
||||
@@ -202,6 +202,9 @@ func (d *S3) copyFile(ctx context.Context, src string, dst string) error {
|
||||
CopySource: aws.String(url.PathEscape(d.Bucket + "/" + srcKey)),
|
||||
Key: &dstKey,
|
||||
}
|
||||
if storageClass := d.resolveStorageClass(); storageClass != nil {
|
||||
input.StorageClass = storageClass
|
||||
}
|
||||
_, err := d.client.CopyObject(input)
|
||||
return err
|
||||
}
|
||||
|
||||
28
go.mod
28
go.mod
@@ -3,10 +3,13 @@ module github.com/alist-org/alist/v3
|
||||
go 1.23.4
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
|
||||
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0
|
||||
github.com/KirCute/sftpd-alist v0.0.12
|
||||
github.com/ProtonMail/go-crypto v1.0.0
|
||||
github.com/SheltonZhu/115driver v1.0.34
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.4
|
||||
github.com/SheltonZhu/115driver v1.1.2
|
||||
github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21
|
||||
github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4
|
||||
github.com/alist-org/gofakes3 v0.0.7
|
||||
@@ -36,6 +39,8 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0
|
||||
github.com/henrybear327/go-proton-api v1.0.0
|
||||
github.com/hirochachacha/go-smb2 v1.1.0
|
||||
github.com/ipfs/go-ipfs-api v0.7.0
|
||||
github.com/jlaffaye/ftp v0.2.0
|
||||
@@ -80,9 +85,19 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 // indirect
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.8.1 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||
github.com/bradenaw/juniper v0.15.2 // indirect
|
||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||
github.com/emersion/go-message v0.18.0 // indirect
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 // indirect
|
||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 // indirect
|
||||
github.com/relvacode/iso8601 v1.3.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -109,7 +124,6 @@ require (
|
||||
github.com/ipfs/boxo v0.12.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/matoous/go-nanoid/v2 v2.1.0 // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.27
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78
|
||||
@@ -268,4 +282,8 @@ require (
|
||||
lukechampine.com/blake3 v1.1.7 // indirect
|
||||
)
|
||||
|
||||
// replace github.com/xhofe/115-sdk-go => ../../xhofe/115-sdk-go
|
||||
replace github.com/ProtonMail/go-proton-api => github.com/henrybear327/go-proton-api v1.0.0
|
||||
|
||||
replace github.com/cronokirby/saferith => github.com/Da3zKi7/saferith v0.33.0-fixed
|
||||
|
||||
replace github.com/SheltonZhu/115driver => github.com/okatu-loli/115driver v1.1.2
|
||||
|
||||
62
go.sum
62
go.sum
@@ -21,27 +21,50 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Da3zKi7/saferith v0.33.0-fixed h1:fnIWTk7EP9mZAICf7aQjeoAwpfrlCrkOvqmi6CbWdTk=
|
||||
github.com/Da3zKi7/saferith v0.33.0-fixed/go.mod h1:QKJhjoqUtBsXCAVEjw38mFqoi7DebT7kthcD7UzbnoA=
|
||||
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 h1:ikwCzeqoqN6wvBHOB9OI6dde/jbV7EoTMpUcxtYl5Po=
|
||||
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0/go.mod h1:v0NgMtKDDi/6CM6r4P+daCljCW3eO9yS+Z+pZDTKo1E=
|
||||
github.com/KirCute/sftpd-alist v0.0.12 h1:GNVM5QLbQLAfXP4wGUlXFA2IO6fVek0n0IsGnOuISdg=
|
||||
github.com/KirCute/sftpd-alist v0.0.12/go.mod h1:2wNK7yyW2XfjyJq10OY6xB4COLac64hOwfV6clDJn6s=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e h1:lCsqUUACrcMC83lg5rTo9Y0PnPItE61JSfvMyIcANwk=
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
|
||||
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
|
||||
github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI=
|
||||
github.com/ProtonMail/go-srp v0.0.7/go.mod h1:giCp+7qRnMIcCvI6V6U3S1lDDXDQYx2ewJ6F/9wdlJk=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.4 h1:Vz/8+HViFFnf2A6XX8JOvZMrA6F5puwNvvF21O1mRlo=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.4/go.mod h1:IhkNEDaxec6NyzSI0PlxapinnwPVIESk8/76da3Ct3g=
|
||||
github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM=
|
||||
github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ=
|
||||
github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM=
|
||||
github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
|
||||
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
|
||||
github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4=
|
||||
github.com/SheltonZhu/115driver v1.0.34 h1:zhMLp4vgq7GksqvSxQQDOVfK6EOHldQl4b2n8tnZ+EE=
|
||||
github.com/SheltonZhu/115driver v1.0.34/go.mod h1:rKvNd4Y4OkXv1TMbr/SKjGdcvMQxh6AW5Tw9w0CJb7E=
|
||||
github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A=
|
||||
github.com/Unknwon/goconfig v1.0.0/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw=
|
||||
github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 h1:h6q5E9aMBhhdqouW81LozVPI1I+Pu6IxL2EKpfm5OjY=
|
||||
@@ -63,6 +86,9 @@ github.com/andreburgaud/crypt2go v1.8.0/go.mod h1:L5nfShQ91W78hOWhUH2tlGRPO+POAP
|
||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA=
|
||||
github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
|
||||
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
|
||||
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
|
||||
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
|
||||
github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
@@ -128,6 +154,9 @@ github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
|
||||
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bradenaw/juniper v0.15.2 h1:0JdjBGEF2jP1pOxmlNIrPhAoQN7Ng5IMAY5D0PHMW4U=
|
||||
github.com/bradenaw/juniper v0.15.2/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8=
|
||||
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
||||
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
||||
@@ -158,6 +187,7 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e h1:GLC8iDDcbt1H8+RkNao2nRGjyNTIo81e1rAJT9/uWYA=
|
||||
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e/go.mod h1:ln9Whp+wVY/FTbn2SK0ag+SKD2fC0yQCF/Lqowc1LmU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
|
||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
|
||||
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||
@@ -172,7 +202,6 @@ github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03V
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg=
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
@@ -194,6 +223,12 @@ github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707/go.mod h1:qssHWj6
|
||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJLNCEi7YHVMkwwtfSr2k9splgdSM=
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8=
|
||||
github.com/emersion/go-message v0.18.0 h1:7LxAXHRpSeoO/Wom3ZApVZYG7c3d17yCScYce8WiXA8=
|
||||
github.com/emersion/go-message v0.18.0/go.mod h1:Zi69ACvzaoV/MBnrxfVBPV3xWEuCmC2nEN39oJF4B8A=
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 h1:IbFBtwoTQyw0fIM5xv1HF+Y+3ZijDR839WMulgxCcUY=
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594/go.mod h1:aqO8z8wPrjkscevZJFVE1wXJrLpC5LtJG7fqLOsPb2U=
|
||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 h1:ATgqloALX6cHCranzkLb8/zjivwQ9DWWDCQRnxTPfaA=
|
||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9/go.mod h1:HMJKR5wlh/ziNp+sHEDV2ltblO4JD2+IdDOWtGcQBTM=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
@@ -334,6 +369,10 @@ github.com/hekmon/cunits/v2 v2.1.0 h1:k6wIjc4PlacNOHwKEMBgWV2/c8jyD4eRMs5mR1BBhI
|
||||
github.com/hekmon/cunits/v2 v2.1.0/go.mod h1:9r1TycXYXaTmEWlAIfFV8JT+Xo59U96yUJAYHxzii2M=
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0 h1:0Fb11qE0IBh4V4GlOwHNYpqpjcYDp5GouolwrpmcUDQ=
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0/go.mod h1:38SlNhFzinVUuY87wGj3acOmRxeYZAZfrj6Re7UgCDg=
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0LTKWQciUm8PMZb0=
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
|
||||
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
|
||||
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
@@ -398,6 +437,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/larksuite/oapi-sdk-go/v3 v3.3.1 h1:DLQQEgHUAGZB6RVlceB1f6A94O206exxW2RIMH+gMUc=
|
||||
github.com/larksuite/oapi-sdk-go/v3 v3.3.1/go.mod h1:ZEplY+kwuIrj/nqw5uSCINNATcH3KdxSN7y+UxYY5fI=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
@@ -483,6 +524,8 @@ github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg=
|
||||
github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk=
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 h1:MYzLheyVx1tJVDqfu3YnN4jtnyALNzLvwl+f58TcvQY=
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
|
||||
github.com/okatu-loli/115driver v1.1.2 h1:XZT3r/51SZRQGzre2IeA+0/k4T1FneqArdhE4Wd600Q=
|
||||
github.com/okatu-loli/115driver v1.1.2/go.mod h1:rKvNd4Y4OkXv1TMbr/SKjGdcvMQxh6AW5Tw9w0CJb7E=
|
||||
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
|
||||
github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
|
||||
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
|
||||
@@ -492,6 +535,8 @@ github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -520,6 +565,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/rclone/rclone v1.67.0 h1:yLRNgHEG2vQ60HCuzFqd0hYwKCRuWuvPUhvhMJ2jI5E=
|
||||
github.com/rclone/rclone v1.67.0/go.mod h1:Cb3Ar47M/SvwfhAjZTbVXdtrP/JLtPFCq2tkdtBVC6w=
|
||||
github.com/relvacode/iso8601 v1.3.0 h1:HguUjsGpIMh/zsTczGN3DVJFxTU/GX+MMmzcKoMO7ko=
|
||||
github.com/relvacode/iso8601 v1.3.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
|
||||
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=
|
||||
github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@@ -649,6 +696,8 @@ go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGX
|
||||
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
|
||||
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
|
||||
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
|
||||
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
|
||||
gocv.io/x/gocv v0.25.0/go.mod h1:Rar2PS6DV+T4FL+PM535EImD/h13hGVaHhnCu1xarBs=
|
||||
@@ -726,6 +775,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
@@ -734,13 +784,12 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
|
||||
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -784,6 +833,7 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -796,6 +846,7 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -812,6 +863,7 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
|
||||
@@ -3,6 +3,7 @@ package data
|
||||
import "github.com/alist-org/alist/v3/cmd/flags"
|
||||
|
||||
func InitData() {
|
||||
initRoles()
|
||||
initUser()
|
||||
initSettings()
|
||||
initTasks()
|
||||
|
||||
@@ -26,7 +26,7 @@ func initDevData() {
|
||||
Username: "Noah",
|
||||
Password: "hsu",
|
||||
BasePath: "/data",
|
||||
Role: 0,
|
||||
Role: nil,
|
||||
Permission: 512,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
52
internal/bootstrap/data/role.go
Normal file
52
internal/bootstrap/data/role.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package data
|
||||
|
||||
// initRoles creates the default admin and guest roles if missing.
|
||||
// These roles are essential and must not be modified or removed.
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
func initRoles() {
|
||||
guestRole, err := op.GetRoleByName("guest")
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
guestRole = &model.Role{
|
||||
ID: uint(model.GUEST),
|
||||
Name: "guest",
|
||||
Description: "Guest",
|
||||
PermissionScopes: []model.PermissionEntry{
|
||||
{Path: "/", Permission: 0},
|
||||
},
|
||||
}
|
||||
if err := op.CreateRole(guestRole); err != nil {
|
||||
utils.Log.Fatalf("[init role] Failed to create guest role: %v", err)
|
||||
}
|
||||
} else {
|
||||
utils.Log.Fatalf("[init role] Failed to get guest role: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = op.GetRoleByName("admin")
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
adminRole := &model.Role{
|
||||
ID: uint(model.ADMIN),
|
||||
Name: "admin",
|
||||
Description: "Administrator",
|
||||
PermissionScopes: []model.PermissionEntry{
|
||||
{Path: "/", Permission: 0xFFFF},
|
||||
},
|
||||
}
|
||||
if err := op.CreateRole(adminRole); err != nil {
|
||||
utils.Log.Fatalf("[init role] Failed to create admin role: %v", err)
|
||||
}
|
||||
} else {
|
||||
utils.Log.Fatalf("[init role] Failed to get admin role: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -91,6 +91,7 @@ func InitialSettings() []model.SettingItem {
|
||||
} else {
|
||||
token = random.Token()
|
||||
}
|
||||
defaultRoleID := strconv.Itoa(model.GUEST)
|
||||
initialSettingItems = []model.SettingItem{
|
||||
// site settings
|
||||
{Key: conf.VERSION, Value: conf.Version, Type: conf.TypeString, Group: model.SITE, Flag: model.READONLY},
|
||||
@@ -103,6 +104,10 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: conf.AllowIndexed, Value: "false", Type: conf.TypeBool, Group: model.SITE},
|
||||
{Key: conf.AllowMounted, Value: "true", Type: conf.TypeBool, Group: model.SITE},
|
||||
{Key: conf.RobotsTxt, Value: "User-agent: *\nAllow: /", Type: conf.TypeText, Group: model.SITE},
|
||||
{Key: conf.AllowRegister, Value: "false", Type: conf.TypeBool, Group: model.SITE},
|
||||
{Key: conf.DefaultRole, Value: defaultRoleID, Type: conf.TypeSelect, Group: model.SITE},
|
||||
// newui settings
|
||||
{Key: conf.UseNewui, Value: "false", Type: conf.TypeBool, Group: model.SITE},
|
||||
// style settings
|
||||
{Key: conf.Logo, Value: "https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg", Type: conf.TypeText, Group: model.STYLE},
|
||||
{Key: conf.Favicon, Value: "https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg", Type: conf.TypeString, Group: model.STYLE},
|
||||
@@ -155,11 +160,14 @@ func InitialSettings() []model.SettingItem {
|
||||
([[:xdigit:]]{1,4}(?::[[:xdigit:]]{1,4}){7}|::|:(?::[[:xdigit:]]{1,4}){1,6}|[[:xdigit:]]{1,4}:(?::[[:xdigit:]]{1,4}){1,5}|(?:[[:xdigit:]]{1,4}:){2}(?::[[:xdigit:]]{1,4}){1,4}|(?:[[:xdigit:]]{1,4}:){3}(?::[[:xdigit:]]{1,4}){1,3}|(?:[[:xdigit:]]{1,4}:){4}(?::[[:xdigit:]]{1,4}){1,2}|(?:[[:xdigit:]]{1,4}:){5}:[[:xdigit:]]{1,4}|(?:[[:xdigit:]]{1,4}:){1,6}:)
|
||||
(?U)access_token=(.*)&`,
|
||||
Type: conf.TypeText, Group: model.GLOBAL, Flag: model.PRIVATE},
|
||||
{Key: conf.OcrApi, Value: "https://api.nn.ci/ocr/file/json", Type: conf.TypeString, Group: model.GLOBAL},
|
||||
{Key: conf.OcrApi, Value: "https://api.alistgo.com/ocr/file/json", Type: conf.TypeString, Group: model.GLOBAL},
|
||||
{Key: conf.FilenameCharMapping, Value: `{"/": "|"}`, Type: conf.TypeText, Group: model.GLOBAL},
|
||||
{Key: conf.ForwardDirectLinkParams, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL},
|
||||
{Key: conf.IgnoreDirectLinkParams, Value: "sign,alist_ts", Type: conf.TypeString, Group: model.GLOBAL},
|
||||
{Key: conf.WebauthnLoginEnabled, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
|
||||
{Key: conf.MaxDevices, Value: "0", Type: conf.TypeNumber, Group: model.GLOBAL},
|
||||
{Key: conf.DeviceEvictPolicy, Value: "deny", Type: conf.TypeSelect, Options: "deny,evict_oldest", Group: model.GLOBAL},
|
||||
{Key: conf.DeviceSessionTTL, Value: "86400", Type: conf.TypeNumber, Group: model.GLOBAL},
|
||||
|
||||
// single settings
|
||||
{Key: conf.Token, Value: token, Type: conf.TypeString, Group: model.SINGLE, Flag: model.PRIVATE},
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/db"
|
||||
"os"
|
||||
|
||||
"github.com/alist-org/alist/v3/cmd/flags"
|
||||
"github.com/alist-org/alist/v3/internal/db"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
@@ -14,45 +14,16 @@ import (
|
||||
)
|
||||
|
||||
func initUser() {
|
||||
admin, err := op.GetAdmin()
|
||||
adminPassword := random.String(8)
|
||||
envpass := os.Getenv("ALIST_ADMIN_PASSWORD")
|
||||
if flags.Dev {
|
||||
adminPassword = "admin"
|
||||
} else if len(envpass) > 0 {
|
||||
adminPassword = envpass
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
salt := random.String(16)
|
||||
admin = &model.User{
|
||||
Username: "admin",
|
||||
Salt: salt,
|
||||
PwdHash: model.TwoHashPwd(adminPassword, salt),
|
||||
Role: model.ADMIN,
|
||||
BasePath: "/",
|
||||
Authn: "[]",
|
||||
// 0(can see hidden) - 7(can remove) & 12(can read archives) - 13(can decompress archives)
|
||||
Permission: 0x30FF,
|
||||
}
|
||||
if err := op.CreateUser(admin); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
utils.Log.Infof("Successfully created the admin user and the initial password is: %s", adminPassword)
|
||||
}
|
||||
} else {
|
||||
utils.Log.Fatalf("[init user] Failed to get admin user: %v", err)
|
||||
}
|
||||
}
|
||||
guest, err := op.GetGuest()
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
salt := random.String(16)
|
||||
guestRole, _ := op.GetRoleByName("guest")
|
||||
guest = &model.User{
|
||||
Username: "guest",
|
||||
PwdHash: model.TwoHashPwd("guest", salt),
|
||||
Salt: salt,
|
||||
Role: model.GUEST,
|
||||
Role: model.Roles{int(guestRole.ID)},
|
||||
BasePath: "/",
|
||||
Permission: 0,
|
||||
Disabled: true,
|
||||
@@ -65,4 +36,35 @@ func initUser() {
|
||||
utils.Log.Fatalf("[init user] Failed to get guest user: %v", err)
|
||||
}
|
||||
}
|
||||
admin, err := op.GetAdmin()
|
||||
adminPassword := random.String(8)
|
||||
envpass := os.Getenv("ALIST_ADMIN_PASSWORD")
|
||||
if flags.Dev {
|
||||
adminPassword = "admin"
|
||||
} else if len(envpass) > 0 {
|
||||
adminPassword = envpass
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
salt := random.String(16)
|
||||
adminRole, _ := op.GetRoleByName("admin")
|
||||
admin = &model.User{
|
||||
Username: "admin",
|
||||
Salt: salt,
|
||||
PwdHash: model.TwoHashPwd(adminPassword, salt),
|
||||
Role: model.Roles{int(adminRole.ID)},
|
||||
BasePath: "/",
|
||||
Authn: "[]",
|
||||
// 0(can see hidden) - 7(can remove) & 12(can read archives) - 13(can decompress archives)
|
||||
Permission: 0xFFFF,
|
||||
}
|
||||
if err := op.CreateUser(admin); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
utils.Log.Infof("Successfully created the admin user and the initial password is: %s", adminPassword)
|
||||
}
|
||||
} else {
|
||||
utils.Log.Fatalf("[init user] Failed to get admin user: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,10 +14,14 @@ import (
|
||||
|
||||
func init() {
|
||||
formatter := logrus.TextFormatter{
|
||||
ForceColors: true,
|
||||
EnvironmentOverrideColors: true,
|
||||
TimestampFormat: "2006-01-02 15:04:05",
|
||||
FullTimestamp: true,
|
||||
TimestampFormat: "2006-01-02 15:04:05",
|
||||
FullTimestamp: true,
|
||||
}
|
||||
if os.Getenv("NO_COLOR") != "" || os.Getenv("ALIST_NO_COLOR") == "1" {
|
||||
formatter.DisableColors = true
|
||||
} else {
|
||||
formatter.ForceColors = true
|
||||
formatter.EnvironmentOverrideColors = true
|
||||
}
|
||||
logrus.SetFormatter(&formatter)
|
||||
utils.Log.SetFormatter(&formatter)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_24_0"
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_32_0"
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_41_0"
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_46_0"
|
||||
)
|
||||
|
||||
type VersionPatches struct {
|
||||
@@ -32,4 +33,10 @@ var UpgradePatches = []VersionPatches{
|
||||
v3_41_0.GrantAdminPermissions,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: "v3.46.0",
|
||||
Patches: []func(){
|
||||
v3_46_0.ConvertLegacyRoles,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
186
internal/bootstrap/patch/v3_46_0/convert_role.go
Normal file
186
internal/bootstrap/patch/v3_46_0/convert_role.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package v3_46_0
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/db"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// ConvertLegacyRoles migrates old integer role values to a new role model with permission scopes.
|
||||
func ConvertLegacyRoles() {
|
||||
guestRole, err := op.GetRoleByName("guest")
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
guestRole = &model.Role{
|
||||
ID: uint(model.GUEST),
|
||||
Name: "guest",
|
||||
Description: "Guest",
|
||||
PermissionScopes: []model.PermissionEntry{
|
||||
{
|
||||
Path: "/",
|
||||
Permission: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err = op.CreateRole(guestRole); err != nil {
|
||||
utils.Log.Errorf("[convert roles] failed to create guest role: %v", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
utils.Log.Errorf("[convert roles] failed to get guest role: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
adminRole, err := op.GetRoleByName("admin")
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
adminRole = &model.Role{
|
||||
ID: uint(model.ADMIN),
|
||||
Name: "admin",
|
||||
Description: "Administrator",
|
||||
PermissionScopes: []model.PermissionEntry{
|
||||
{
|
||||
Path: "/",
|
||||
Permission: 0x33FF,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err = op.CreateRole(adminRole); err != nil {
|
||||
utils.Log.Errorf("[convert roles] failed to create admin role: %v", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
utils.Log.Errorf("[convert roles] failed to get admin role: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
generalRole, err := op.GetRoleByName("general")
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
generalRole = &model.Role{
|
||||
ID: uint(model.NEWGENERAL),
|
||||
Name: "general",
|
||||
Description: "General User",
|
||||
PermissionScopes: []model.PermissionEntry{
|
||||
{
|
||||
Path: "/",
|
||||
Permission: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err = op.CreateRole(generalRole); err != nil {
|
||||
utils.Log.Errorf("[convert roles] failed create general role: %v", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
utils.Log.Errorf("[convert roles] failed get general role: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
rawDb := db.GetDb()
|
||||
table := conf.Conf.Database.TablePrefix + "users"
|
||||
rows, err := rawDb.Table(table).Select("id, username, role").Rows()
|
||||
if err != nil {
|
||||
utils.Log.Errorf("[convert roles] failed to get users: %v", err)
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var updatedCount int
|
||||
for rows.Next() {
|
||||
var id uint
|
||||
var username string
|
||||
var rawRole []byte
|
||||
|
||||
if err := rows.Scan(&id, &username, &rawRole); err != nil {
|
||||
utils.Log.Warnf("[convert roles] skip user scan err: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
utils.Log.Debugf("[convert roles] user: %s raw role: %s", username, string(rawRole))
|
||||
|
||||
if len(rawRole) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var oldRoles []int
|
||||
wasSingleInt := false
|
||||
if err := json.Unmarshal(rawRole, &oldRoles); err != nil {
|
||||
var single int
|
||||
if err := json.Unmarshal(rawRole, &single); err != nil {
|
||||
utils.Log.Warnf("[convert roles] user %s has invalid role: %s", username, string(rawRole))
|
||||
continue
|
||||
}
|
||||
oldRoles = []int{single}
|
||||
wasSingleInt = true
|
||||
}
|
||||
|
||||
var newRoles model.Roles
|
||||
for _, r := range oldRoles {
|
||||
switch r {
|
||||
case model.ADMIN:
|
||||
newRoles = append(newRoles, int(adminRole.ID))
|
||||
case model.GUEST:
|
||||
newRoles = append(newRoles, int(guestRole.ID))
|
||||
case model.GENERAL:
|
||||
newRoles = append(newRoles, int(generalRole.ID))
|
||||
default:
|
||||
newRoles = append(newRoles, r)
|
||||
}
|
||||
}
|
||||
|
||||
if wasSingleInt {
|
||||
err := rawDb.Table(table).Where("id = ?", id).Update("role", newRoles).Error
|
||||
if err != nil {
|
||||
utils.Log.Errorf("[convert roles] failed to update user %s: %v", username, err)
|
||||
} else {
|
||||
updatedCount++
|
||||
utils.Log.Infof("[convert roles] updated user %s: %v → %v", username, oldRoles, newRoles)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
utils.Log.Infof("[convert roles] completed role conversion for %d users", updatedCount)
|
||||
}
|
||||
|
||||
func IsLegacyRoleDetected() bool {
|
||||
rawDb := db.GetDb()
|
||||
table := conf.Conf.Database.TablePrefix + "users"
|
||||
rows, err := rawDb.Table(table).Select("role").Rows()
|
||||
if err != nil {
|
||||
utils.Log.Errorf("[role check] failed to scan user roles: %v", err)
|
||||
return false
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var raw sql.RawBytes
|
||||
if err := rows.Scan(&raw); err != nil {
|
||||
continue
|
||||
}
|
||||
if len(raw) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var roles []int
|
||||
if err := json.Unmarshal(raw, &roles); err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var single int
|
||||
if err := json.Unmarshal(raw, &single); err == nil {
|
||||
utils.Log.Infof("[role check] detected legacy int role: %d", single)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -37,6 +37,18 @@ func InitTaskManager() {
|
||||
if len(tool.TransferTaskManager.GetAll()) == 0 { //prevent offline downloaded files from being deleted
|
||||
CleanTempDir()
|
||||
}
|
||||
workers := conf.Conf.Tasks.S3Transition.Workers
|
||||
if workers < 0 {
|
||||
workers = 0
|
||||
}
|
||||
fs.S3TransitionTaskManager = tache.NewManager[*fs.S3TransitionTask](
|
||||
tache.WithWorks(workers),
|
||||
tache.WithPersistFunction(
|
||||
db.GetTaskDataFunc("s3_transition", conf.Conf.Tasks.S3Transition.TaskPersistant),
|
||||
db.UpdateTaskDataFunc("s3_transition", conf.Conf.Tasks.S3Transition.TaskPersistant),
|
||||
),
|
||||
tache.WithMaxRetry(conf.Conf.Tasks.S3Transition.MaxRetry),
|
||||
)
|
||||
fs.ArchiveDownloadTaskManager = tache.NewManager[*fs.ArchiveDownloadTask](tache.WithWorks(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant), db.UpdateTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Decompress.MaxRetry))
|
||||
op.RegisterSettingChangingCallback(func() {
|
||||
fs.ArchiveDownloadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)))
|
||||
|
||||
@@ -60,6 +60,7 @@ type TasksConfig struct {
|
||||
Copy TaskConfig `json:"copy" envPrefix:"COPY_"`
|
||||
Decompress TaskConfig `json:"decompress" envPrefix:"DECOMPRESS_"`
|
||||
DecompressUpload TaskConfig `json:"decompress_upload" envPrefix:"DECOMPRESS_UPLOAD_"`
|
||||
S3Transition TaskConfig `json:"s3_transition" envPrefix:"S3_TRANSITION_"`
|
||||
AllowRetryCanceled bool `json:"allow_retry_canceled" env:"ALLOW_RETRY_CANCELED"`
|
||||
}
|
||||
|
||||
@@ -184,6 +185,11 @@ func DefaultConfig() *Config {
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
},
|
||||
S3Transition: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
AllowRetryCanceled: false,
|
||||
},
|
||||
Cors: Cors{
|
||||
|
||||
@@ -10,12 +10,15 @@ const (
|
||||
|
||||
const (
|
||||
// site
|
||||
VERSION = "version"
|
||||
SiteTitle = "site_title"
|
||||
Announcement = "announcement"
|
||||
AllowIndexed = "allow_indexed"
|
||||
AllowMounted = "allow_mounted"
|
||||
RobotsTxt = "robots_txt"
|
||||
VERSION = "version"
|
||||
SiteTitle = "site_title"
|
||||
Announcement = "announcement"
|
||||
AllowIndexed = "allow_indexed"
|
||||
AllowMounted = "allow_mounted"
|
||||
RobotsTxt = "robots_txt"
|
||||
AllowRegister = "allow_register"
|
||||
DefaultRole = "default_role"
|
||||
UseNewui = "use_newui"
|
||||
|
||||
Logo = "logo"
|
||||
Favicon = "favicon"
|
||||
@@ -45,6 +48,9 @@ const (
|
||||
ForwardDirectLinkParams = "forward_direct_link_params"
|
||||
IgnoreDirectLinkParams = "ignore_direct_link_params"
|
||||
WebauthnLoginEnabled = "webauthn_login_enabled"
|
||||
MaxDevices = "max_devices"
|
||||
DeviceEvictPolicy = "device_evict_policy"
|
||||
DeviceSessionTTL = "device_session_ttl"
|
||||
|
||||
// index
|
||||
SearchIndex = "search_index"
|
||||
|
||||
@@ -12,7 +12,7 @@ var db *gorm.DB
|
||||
|
||||
func Init(d *gorm.DB) {
|
||||
db = d
|
||||
err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey))
|
||||
err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey), new(model.Role), new(model.Label), new(model.LabelFileBinding), new(model.ObjFile), new(model.Session))
|
||||
if err != nil {
|
||||
log.Fatalf("failed migrate database: %s", err.Error())
|
||||
}
|
||||
|
||||
79
internal/db/label.go
Normal file
79
internal/db/label.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/pkg/errors"
|
||||
"gorm.io/gorm"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GetLabels Get all label from database order by id
|
||||
func GetLabels(pageIndex, pageSize int) ([]model.Label, int64, error) {
|
||||
labelDB := db.Model(&model.Label{})
|
||||
var count int64
|
||||
if err := labelDB.Count(&count).Error; err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed get label count")
|
||||
}
|
||||
var labels []model.Label
|
||||
if err := labelDB.Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&labels).Error; err != nil {
|
||||
return nil, 0, errors.WithStack(err)
|
||||
}
|
||||
return labels, count, nil
|
||||
}
|
||||
|
||||
// GetLabelById Get Label by id, used to update label usually
|
||||
func GetLabelById(id uint) (*model.Label, error) {
|
||||
var label model.Label
|
||||
label.ID = id
|
||||
if err := db.First(&label).Error; err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return &label, nil
|
||||
}
|
||||
|
||||
// CreateLabel just insert label to database
|
||||
func CreateLabel(label model.Label) (uint, error) {
|
||||
label.CreateTime = time.Now()
|
||||
err := errors.WithStack(db.Create(&label).Error)
|
||||
if err != nil {
|
||||
return label.ID, errors.WithMessage(err, "failed create label in database")
|
||||
}
|
||||
return label.ID, nil
|
||||
}
|
||||
|
||||
// UpdateLabel just update storage in database
|
||||
func UpdateLabel(label *model.Label) (*model.Label, error) {
|
||||
label.CreateTime = time.Now()
|
||||
_, err := GetLabelById(label.ID)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get old label")
|
||||
}
|
||||
err = errors.WithStack(db.Save(label).Error)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed create label in database")
|
||||
}
|
||||
return label, nil
|
||||
}
|
||||
|
||||
// DeleteLabelById just delete label from database by id
|
||||
func DeleteLabelById(id uint) error {
|
||||
return errors.WithStack(db.Delete(&model.Label{}, id).Error)
|
||||
}
|
||||
|
||||
// GetLabelByIds Get label from database order by ids
|
||||
func GetLabelByIds(ids []uint) ([]model.Label, error) {
|
||||
labelDB := db.Model(&model.Label{})
|
||||
var labels []model.Label
|
||||
if err := labelDB.Where(ids).Find(&labels).Error; err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
// GetLabelByName Get Label by name
|
||||
func GetLabelByName(name string) bool {
|
||||
var label model.Label
|
||||
result := db.Where("name = ?", name).First(&label)
|
||||
exists := !errors.Is(result.Error, gorm.ErrRecordNotFound)
|
||||
return exists
|
||||
}
|
||||
192
internal/db/label_file_binding.go
Normal file
192
internal/db/label_file_binding.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/pkg/errors"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GetLabelIds Get all label_ids from database order by file_name
|
||||
func GetLabelIds(userId uint, fileName string) ([]uint, error) {
|
||||
//fmt.Printf(">>> [GetLabelIds] userId: %d, fileName: %s\n", userId, fileName)
|
||||
labelFileBinDingDB := db.Model(&model.LabelFileBinding{})
|
||||
var labelIds []uint
|
||||
if err := labelFileBinDingDB.Where("file_name = ?", fileName).Where("user_id = ?", userId).Pluck("label_id", &labelIds).Error; err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return labelIds, nil
|
||||
}
|
||||
|
||||
func CreateLabelFileBinDing(fileName string, labelId, userId uint) error {
|
||||
var labelFileBinDing model.LabelFileBinding
|
||||
labelFileBinDing.UserId = userId
|
||||
labelFileBinDing.LabelId = labelId
|
||||
labelFileBinDing.FileName = fileName
|
||||
labelFileBinDing.CreateTime = time.Now()
|
||||
err := errors.WithStack(db.Create(&labelFileBinDing).Error)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed create label in database")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLabelFileBinDingByLabelIdExists Get Label by label_id, used to del label usually
|
||||
func GetLabelFileBinDingByLabelIdExists(labelId, userId uint) bool {
|
||||
var labelFileBinDing model.LabelFileBinding
|
||||
result := db.Where("label_id = ?", labelId).Where("user_id = ?", userId).First(&labelFileBinDing)
|
||||
exists := !errors.Is(result.Error, gorm.ErrRecordNotFound)
|
||||
return exists
|
||||
}
|
||||
|
||||
// DelLabelFileBinDingByFileName used to del usually
|
||||
func DelLabelFileBinDingByFileName(userId uint, fileName string) error {
|
||||
return errors.WithStack(db.Where("file_name = ?", fileName).Where("user_id = ?", userId).Delete(model.LabelFileBinding{}).Error)
|
||||
}
|
||||
|
||||
// DelLabelFileBinDingById used to del usually
|
||||
func DelLabelFileBinDingById(labelId, userId uint, fileName string) error {
|
||||
return errors.WithStack(db.Where("label_id = ?", labelId).Where("file_name = ?", fileName).Where("user_id = ?", userId).Delete(model.LabelFileBinding{}).Error)
|
||||
}
|
||||
|
||||
func GetLabelFileBinDingByLabelId(labelIds []uint, userId uint) (result []model.LabelFileBinding, err error) {
|
||||
if err := db.Where("label_id in (?)", labelIds).Where("user_id = ?", userId).Find(&result).Error; err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func GetLabelBindingsByFileNamesPublic(fileNames []string) (map[string][]uint, error) {
|
||||
var binds []model.LabelFileBinding
|
||||
if err := db.Where("file_name IN ?", fileNames).Find(&binds).Error; err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
out := make(map[string][]uint, len(fileNames))
|
||||
seen := make(map[string]struct{}, len(binds))
|
||||
for _, b := range binds {
|
||||
key := fmt.Sprintf("%s-%d", b.FileName, b.LabelId)
|
||||
if _, ok := seen[key]; ok {
|
||||
continue
|
||||
}
|
||||
seen[key] = struct{}{}
|
||||
out[b.FileName] = append(out[b.FileName], b.LabelId)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func GetLabelsByFileNamesPublic(fileNames []string) (map[string][]model.Label, error) {
|
||||
bindMap, err := GetLabelBindingsByFileNamesPublic(fileNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idSet := make(map[uint]struct{})
|
||||
for _, ids := range bindMap {
|
||||
for _, id := range ids {
|
||||
idSet[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(idSet) == 0 {
|
||||
return make(map[string][]model.Label, 0), nil
|
||||
}
|
||||
allIDs := make([]uint, 0, len(idSet))
|
||||
for id := range idSet {
|
||||
allIDs = append(allIDs, id)
|
||||
}
|
||||
labels, err := GetLabelByIds(allIDs) // 你已有的函数
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
labelByID := make(map[uint]model.Label, len(labels))
|
||||
for _, l := range labels {
|
||||
labelByID[l.ID] = l
|
||||
}
|
||||
|
||||
out := make(map[string][]model.Label, len(bindMap))
|
||||
for fname, ids := range bindMap {
|
||||
for _, id := range ids {
|
||||
if lab, ok := labelByID[id]; ok {
|
||||
out[fname] = append(out[fname], lab)
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func ListLabelFileBinDing(userId uint, labelIDs []uint, fileName string, page, pageSize int) ([]model.LabelFileBinding, int64, error) {
|
||||
q := db.Model(&model.LabelFileBinding{}).Where("user_id = ?", userId)
|
||||
|
||||
if len(labelIDs) > 0 {
|
||||
q = q.Where("label_id IN ?", labelIDs)
|
||||
}
|
||||
if fileName != "" {
|
||||
q = q.Where("file_name LIKE ?", "%"+fileName+"%")
|
||||
}
|
||||
|
||||
var total int64
|
||||
if err := q.Count(&total).Error; err != nil {
|
||||
return nil, 0, errors.WithStack(err)
|
||||
}
|
||||
|
||||
var rows []model.LabelFileBinding
|
||||
if err := q.
|
||||
Order("id DESC").
|
||||
Offset((page - 1) * pageSize).
|
||||
Limit(pageSize).
|
||||
Find(&rows).Error; err != nil {
|
||||
return nil, 0, errors.WithStack(err)
|
||||
}
|
||||
return rows, total, nil
|
||||
}
|
||||
|
||||
func RestoreLabelFileBindings(bindings []model.LabelFileBinding, keepIDs bool, override bool) error {
|
||||
if len(bindings) == 0 {
|
||||
return nil
|
||||
}
|
||||
tx := db.Begin()
|
||||
|
||||
if override {
|
||||
type key struct {
|
||||
uid uint
|
||||
name string
|
||||
}
|
||||
toDel := make(map[key]struct{}, len(bindings))
|
||||
for i := range bindings {
|
||||
k := key{uid: bindings[i].UserId, name: bindings[i].FileName}
|
||||
toDel[k] = struct{}{}
|
||||
}
|
||||
for k := range toDel {
|
||||
if err := tx.Where("user_id = ? AND file_name = ?", k.uid, k.name).
|
||||
Delete(&model.LabelFileBinding{}).Error; err != nil {
|
||||
tx.Rollback()
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := range bindings {
|
||||
b := bindings[i]
|
||||
if !keepIDs {
|
||||
b.ID = 0
|
||||
}
|
||||
if b.CreateTime.IsZero() {
|
||||
b.CreateTime = time.Now()
|
||||
}
|
||||
if override {
|
||||
if err := tx.Create(&b).Error; err != nil {
|
||||
tx.Rollback()
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
} else {
|
||||
if err := tx.Clauses(clause.OnConflict{DoNothing: true}).Create(&b).Error; err != nil {
|
||||
tx.Rollback()
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errors.WithStack(tx.Commit().Error)
|
||||
}
|
||||
31
internal/db/obj_file.go
Normal file
31
internal/db/obj_file.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/pkg/errors"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// GetFileByNameExists Get file by name
|
||||
func GetFileByNameExists(name string) bool {
|
||||
var label model.ObjFile
|
||||
result := db.Where("name = ?", name).First(&label)
|
||||
exists := !errors.Is(result.Error, gorm.ErrRecordNotFound)
|
||||
return exists
|
||||
}
|
||||
|
||||
// GetFileByName Get file by name
|
||||
func GetFileByName(name string, userId uint) (objFile model.ObjFile, err error) {
|
||||
if err = db.Where("name = ?", name).Where("user_id = ?", userId).First(&objFile).Error; err != nil {
|
||||
return objFile, errors.WithStack(err)
|
||||
}
|
||||
return objFile, nil
|
||||
}
|
||||
|
||||
func CreateObjFile(obj model.ObjFile) error {
|
||||
err := errors.WithStack(db.Create(&obj).Error)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed create file in database")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
103
internal/db/role.go
Normal file
103
internal/db/role.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/pkg/errors"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func GetRole(id uint) (*model.Role, error) {
|
||||
var r model.Role
|
||||
if err := db.First(&r, id).Error; err != nil {
|
||||
return nil, errors.Wrapf(err, "failed get role")
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
func GetRoleByName(name string) (*model.Role, error) {
|
||||
r := model.Role{Name: name}
|
||||
if err := db.Where(r).First(&r).Error; err != nil {
|
||||
return nil, errors.Wrapf(err, "failed get role")
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
func GetRoles(pageIndex, pageSize int) (roles []model.Role, count int64, err error) {
|
||||
roleDB := db.Model(&model.Role{})
|
||||
if err = roleDB.Count(&count).Error; err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed get roles count")
|
||||
}
|
||||
if err = roleDB.Order(columnName("id")).Offset((pageIndex - 1) * pageSize).Limit(pageSize).Find(&roles).Error; err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed get find roles")
|
||||
}
|
||||
return roles, count, nil
|
||||
}
|
||||
|
||||
func GetAllRoles() ([]model.Role, error) {
|
||||
var roles []model.Role
|
||||
if err := db.Find(&roles).Error; err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return roles, nil
|
||||
}
|
||||
|
||||
func CreateRole(r *model.Role) error {
|
||||
if err := db.Create(r).Error; err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if r.Default {
|
||||
if err := db.Model(&model.Role{}).Where("id <> ?", r.ID).Update("default", false).Error; err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func UpdateRole(r *model.Role) error {
|
||||
if err := db.Save(r).Error; err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if r.Default {
|
||||
if err := db.Model(&model.Role{}).Where("id <> ?", r.ID).Update("default", false).Error; err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteRole(id uint) error {
|
||||
return errors.WithStack(db.Delete(&model.Role{}, id).Error)
|
||||
}
|
||||
|
||||
func UpdateRolePermissionsPathPrefix(oldPath, newPath string) ([]uint, error) {
|
||||
var roles []model.Role
|
||||
var modifiedRoleIDs []uint
|
||||
|
||||
if err := db.Find(&roles).Error; err != nil {
|
||||
return nil, errors.WithMessage(err, "failed to load roles")
|
||||
}
|
||||
|
||||
for _, role := range roles {
|
||||
updated := false
|
||||
for i, entry := range role.PermissionScopes {
|
||||
entryPath := path.Clean(entry.Path)
|
||||
oldPathClean := path.Clean(oldPath)
|
||||
|
||||
if entryPath == oldPathClean {
|
||||
role.PermissionScopes[i].Path = newPath
|
||||
updated = true
|
||||
} else if strings.HasPrefix(entryPath, oldPathClean+"/") {
|
||||
role.PermissionScopes[i].Path = newPath + entryPath[len(oldPathClean):]
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
if updated {
|
||||
if err := UpdateRole(&role); err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed to update role ID %d", role.ID)
|
||||
}
|
||||
modifiedRoleIDs = append(modifiedRoleIDs, role.ID)
|
||||
}
|
||||
}
|
||||
return modifiedRoleIDs, nil
|
||||
}
|
||||
69
internal/db/session.go
Normal file
69
internal/db/session.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/pkg/errors"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
func GetSession(userID uint, deviceKey string) (*model.Session, error) {
|
||||
s := model.Session{UserID: userID, DeviceKey: deviceKey}
|
||||
if err := db.Select("user_id, device_key, last_active, status, user_agent, ip").Where(&s).First(&s).Error; err != nil {
|
||||
return nil, errors.Wrap(err, "failed find session")
|
||||
}
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
func CreateSession(s *model.Session) error {
|
||||
return errors.WithStack(db.Create(s).Error)
|
||||
}
|
||||
|
||||
func UpsertSession(s *model.Session) error {
|
||||
return errors.WithStack(db.Clauses(clause.OnConflict{UpdateAll: true}).Create(s).Error)
|
||||
}
|
||||
|
||||
func DeleteSession(userID uint, deviceKey string) error {
|
||||
return errors.WithStack(db.Where("user_id = ? AND device_key = ?", userID, deviceKey).Delete(&model.Session{}).Error)
|
||||
}
|
||||
|
||||
func CountActiveSessionsByUser(userID uint) (int64, error) {
|
||||
var count int64
|
||||
err := db.Model(&model.Session{}).
|
||||
Where("user_id = ? AND status = ?", userID, model.SessionActive).
|
||||
Count(&count).Error
|
||||
return count, errors.WithStack(err)
|
||||
}
|
||||
|
||||
func DeleteSessionsBefore(ts int64) error {
|
||||
return errors.WithStack(db.Where("last_active < ?", ts).Delete(&model.Session{}).Error)
|
||||
}
|
||||
|
||||
// GetOldestActiveSession returns the oldest active session for the specified user.
|
||||
func GetOldestActiveSession(userID uint) (*model.Session, error) {
|
||||
var s model.Session
|
||||
if err := db.Where("user_id = ? AND status = ?", userID, model.SessionActive).
|
||||
Order("last_active ASC").First(&s).Error; err != nil {
|
||||
return nil, errors.Wrap(err, "failed get oldest active session")
|
||||
}
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
func UpdateSessionLastActive(userID uint, deviceKey string, lastActive int64) error {
|
||||
return errors.WithStack(db.Model(&model.Session{}).Where("user_id = ? AND device_key = ?", userID, deviceKey).Update("last_active", lastActive).Error)
|
||||
}
|
||||
|
||||
func ListSessionsByUser(userID uint) ([]model.Session, error) {
|
||||
var sessions []model.Session
|
||||
err := db.Select("user_id, device_key, last_active, status, user_agent, ip").Where("user_id = ? AND status = ?", userID, model.SessionActive).Find(&sessions).Error
|
||||
return sessions, errors.WithStack(err)
|
||||
}
|
||||
|
||||
func ListSessions() ([]model.Session, error) {
|
||||
var sessions []model.Session
|
||||
err := db.Select("user_id, device_key, last_active, status, user_agent, ip").Where("status = ?", model.SessionActive).Find(&sessions).Error
|
||||
return sessions, errors.WithStack(err)
|
||||
}
|
||||
|
||||
func MarkInactive(sessionID string) error {
|
||||
return errors.WithStack(db.Model(&model.Session{}).Where("device_key = ?", sessionID).Update("status", model.SessionInactive).Error)
|
||||
}
|
||||
@@ -2,19 +2,42 @@ package db
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-webauthn/webauthn/webauthn"
|
||||
"github.com/pkg/errors"
|
||||
"gorm.io/gorm"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func GetUserByRole(role int) (*model.User, error) {
|
||||
user := model.User{Role: role}
|
||||
if err := db.Where(user).Take(&user).Error; err != nil {
|
||||
var users []model.User
|
||||
if err := db.Find(&users).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &user, nil
|
||||
for i := range users {
|
||||
if users[i].Role.Contains(role) {
|
||||
return &users[i], nil
|
||||
}
|
||||
}
|
||||
return nil, gorm.ErrRecordNotFound
|
||||
}
|
||||
|
||||
func GetUsersByRole(roleID int) ([]model.User, error) {
|
||||
var users []model.User
|
||||
if err := db.Find(&users).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var result []model.User
|
||||
for _, u := range users {
|
||||
if slices.Contains(u.Role, roleID) {
|
||||
result = append(result, u)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func GetUserByName(username string) (*model.User, error) {
|
||||
@@ -60,6 +83,14 @@ func GetUsers(pageIndex, pageSize int) (users []model.User, count int64, err err
|
||||
return users, count, nil
|
||||
}
|
||||
|
||||
func GetAllUsers() ([]model.User, error) {
|
||||
var users []model.User
|
||||
if err := db.Find(&users).Error; err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return users, nil
|
||||
}
|
||||
|
||||
func DeleteUserById(id uint) error {
|
||||
return errors.WithStack(db.Delete(&model.User{}, id).Error)
|
||||
}
|
||||
@@ -100,3 +131,50 @@ func RemoveAuthn(u *model.User, id string) error {
|
||||
}
|
||||
return UpdateAuthn(u.ID, string(res))
|
||||
}
|
||||
|
||||
func UpdateUserBasePathPrefix(oldPath, newPath string, usersOpt ...[]model.User) ([]string, error) {
|
||||
var users []model.User
|
||||
var modifiedUsernames []string
|
||||
|
||||
oldPathClean := path.Clean(oldPath)
|
||||
|
||||
if len(usersOpt) > 0 {
|
||||
users = usersOpt[0]
|
||||
} else {
|
||||
if err := db.Find(&users).Error; err != nil {
|
||||
return nil, errors.WithMessage(err, "failed to load users")
|
||||
}
|
||||
}
|
||||
|
||||
for _, user := range users {
|
||||
basePath := path.Clean(user.BasePath)
|
||||
updated := false
|
||||
|
||||
if basePath == oldPathClean {
|
||||
user.BasePath = path.Clean(newPath)
|
||||
updated = true
|
||||
} else if strings.HasPrefix(basePath, oldPathClean+"/") {
|
||||
user.BasePath = path.Clean(newPath + basePath[len(oldPathClean):])
|
||||
updated = true
|
||||
}
|
||||
|
||||
if updated {
|
||||
if err := UpdateUser(&user); err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed to update user ID %d", user.ID)
|
||||
}
|
||||
modifiedUsernames = append(modifiedUsernames, user.Username)
|
||||
}
|
||||
}
|
||||
|
||||
return modifiedUsernames, nil
|
||||
}
|
||||
|
||||
func CountUsersByRoleAndEnabledExclude(roleID uint, excludeUserID uint) (int64, error) {
|
||||
var count int64
|
||||
jsonValue := fmt.Sprintf("[%d]", roleID)
|
||||
err := db.Model(&model.User{}).
|
||||
Where("disabled = ? AND id != ?", false, excludeUserID).
|
||||
Where("JSON_CONTAINS(role, ?)", jsonValue).
|
||||
Count(&count).Error
|
||||
return count, err
|
||||
}
|
||||
|
||||
138
internal/device/session.go
Normal file
138
internal/device/session.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package device
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/db"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// Handle verifies device sessions for a user and upserts current session.
|
||||
func Handle(userID uint, deviceKey, ua, ip string) error {
|
||||
ttl := setting.GetInt(conf.DeviceSessionTTL, 86400)
|
||||
if ttl > 0 {
|
||||
_ = db.DeleteSessionsBefore(time.Now().Unix() - int64(ttl))
|
||||
}
|
||||
|
||||
ip = utils.MaskIP(ip)
|
||||
|
||||
now := time.Now().Unix()
|
||||
sess, err := db.GetSession(userID, deviceKey)
|
||||
if err == nil {
|
||||
if sess.Status == model.SessionInactive {
|
||||
return errors.WithStack(errs.SessionInactive)
|
||||
}
|
||||
sess.Status = model.SessionActive
|
||||
sess.LastActive = now
|
||||
sess.UserAgent = ua
|
||||
sess.IP = ip
|
||||
return db.UpsertSession(sess)
|
||||
}
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
max := setting.GetInt(conf.MaxDevices, 0)
|
||||
if max > 0 {
|
||||
count, err := db.CountActiveSessionsByUser(userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if count >= int64(max) {
|
||||
policy := setting.GetStr(conf.DeviceEvictPolicy, "deny")
|
||||
if policy == "evict_oldest" {
|
||||
if oldest, err := db.GetOldestActiveSession(userID); err == nil {
|
||||
if err := db.MarkInactive(oldest.DeviceKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.WithStack(errs.TooManyDevices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s := &model.Session{UserID: userID, DeviceKey: deviceKey, UserAgent: ua, IP: ip, LastActive: now, Status: model.SessionActive}
|
||||
return db.CreateSession(s)
|
||||
}
|
||||
|
||||
// EnsureActiveOnLogin is used only in login flow:
|
||||
// - If session exists (even Inactive): reactivate and refresh fields.
|
||||
// - If not exists: apply max-devices policy, then create Active session.
|
||||
func EnsureActiveOnLogin(userID uint, deviceKey, ua, ip string) error {
|
||||
ip = utils.MaskIP(ip)
|
||||
now := time.Now().Unix()
|
||||
|
||||
sess, err := db.GetSession(userID, deviceKey)
|
||||
if err == nil {
|
||||
if sess.Status == model.SessionInactive {
|
||||
max := setting.GetInt(conf.MaxDevices, 0)
|
||||
if max > 0 {
|
||||
count, err := db.CountActiveSessionsByUser(userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if count >= int64(max) {
|
||||
policy := setting.GetStr(conf.DeviceEvictPolicy, "deny")
|
||||
if policy == "evict_oldest" {
|
||||
if oldest, gerr := db.GetOldestActiveSession(userID); gerr == nil {
|
||||
if err := db.MarkInactive(oldest.DeviceKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.WithStack(errs.TooManyDevices)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sess.Status = model.SessionActive
|
||||
sess.LastActive = now
|
||||
sess.UserAgent = ua
|
||||
sess.IP = ip
|
||||
return db.UpsertSession(sess)
|
||||
}
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
max := setting.GetInt(conf.MaxDevices, 0)
|
||||
if max > 0 {
|
||||
count, err := db.CountActiveSessionsByUser(userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if count >= int64(max) {
|
||||
policy := setting.GetStr(conf.DeviceEvictPolicy, "deny")
|
||||
if policy == "evict_oldest" {
|
||||
if oldest, gerr := db.GetOldestActiveSession(userID); gerr == nil {
|
||||
if err := db.MarkInactive(oldest.DeviceKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return errors.WithStack(errs.TooManyDevices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return db.CreateSession(&model.Session{
|
||||
UserID: userID,
|
||||
DeviceKey: deviceKey,
|
||||
UserAgent: ua,
|
||||
IP: ip,
|
||||
LastActive: now,
|
||||
Status: model.SessionActive,
|
||||
})
|
||||
}
|
||||
|
||||
// Refresh updates last_active for the session.
|
||||
func Refresh(userID uint, deviceKey string) {
|
||||
_ = db.UpdateSessionLastActive(userID, deviceKey, time.Now().Unix())
|
||||
}
|
||||
8
internal/errs/device.go
Normal file
8
internal/errs/device.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package errs
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
TooManyDevices = errors.New("too many active devices")
|
||||
SessionInactive = errors.New("session inactive")
|
||||
)
|
||||
@@ -4,4 +4,5 @@ import "errors"
|
||||
|
||||
var (
|
||||
EmptyToken = errors.New("empty token")
|
||||
LinkIsDir = errors.New("link is dir")
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user