mirror of
https://github.com/AlistGo/alist.git
synced 2025-11-25 19:37:41 +08:00
Compare commits
98 Commits
v3.42.0
...
fix/docker
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dbf26cd222 | ||
|
|
4d7c2a09ce | ||
|
|
91cc7529a0 | ||
|
|
f61d13d433 | ||
|
|
00120cba27 | ||
|
|
5e15a360b7 | ||
|
|
2bdc5bef9e | ||
|
|
13ea1c1405 | ||
|
|
fd41186679 | ||
|
|
9da56bab4d | ||
|
|
51eeb22465 | ||
|
|
b1586612ca | ||
|
|
7aeb0ab078 | ||
|
|
ffa03bfda1 | ||
|
|
630cf30af5 | ||
|
|
bc5117fa4f | ||
|
|
11e7284824 | ||
|
|
b2b91a9281 | ||
|
|
f541489d7d | ||
|
|
6d9c554f6f | ||
|
|
e532ab31ef | ||
|
|
bf0705ec17 | ||
|
|
17b42b9fa4 | ||
|
|
41bdab49aa | ||
|
|
8f89c55aca | ||
|
|
b449312da8 | ||
|
|
52d4e8ec47 | ||
|
|
28e5b5759e | ||
|
|
477c43971f | ||
|
|
0a9921fa79 | ||
|
|
88abb323cb | ||
|
|
f0b1aeaf8d | ||
|
|
c8470b9a2a | ||
|
|
d0ee90cd11 | ||
|
|
544a7ea022 | ||
|
|
4f5cabc725 | ||
|
|
a2f266277c | ||
|
|
a4bfbf8a83 | ||
|
|
ddffacf07b | ||
|
|
3375c26c41 | ||
|
|
ab68faef44 | ||
|
|
2e21df0661 | ||
|
|
af18cb138b | ||
|
|
31c55a2adf | ||
|
|
465dd1703d | ||
|
|
a6304285b6 | ||
|
|
affd0cecd1 | ||
|
|
37640221c0 | ||
|
|
e4bd223d1c | ||
|
|
0cde4e73d6 | ||
|
|
7b62dcb88c | ||
|
|
c38dc6df7c | ||
|
|
5668e4a4ea | ||
|
|
1335f80362 | ||
|
|
704d3854df | ||
|
|
44cc71d354 | ||
|
|
9a9aee9ac6 | ||
|
|
4fcc3a187e | ||
|
|
10a76c701d | ||
|
|
6e13923225 | ||
|
|
32890da29f | ||
|
|
758554a40f | ||
|
|
4563aea47e | ||
|
|
35d6f3b8fc | ||
|
|
b4e6ab12d9 | ||
|
|
3499c4db87 | ||
|
|
d20f41d687 | ||
|
|
d16ba65f42 | ||
|
|
c82e632ee1 | ||
|
|
04f5525f20 | ||
|
|
28b61a93fd | ||
|
|
0126af4de0 | ||
|
|
7579d44517 | ||
|
|
5dfea714d8 | ||
|
|
370a6c15a9 | ||
|
|
2570707a06 | ||
|
|
4145734c18 | ||
|
|
646c7bcd21 | ||
|
|
cdc41595bc | ||
|
|
79bef0be9e | ||
|
|
c230f24ebe | ||
|
|
30d8c20756 | ||
|
|
3b71500f23 | ||
|
|
399336b33c | ||
|
|
36b4204623 | ||
|
|
f25be154c6 | ||
|
|
ec3fc945a3 | ||
|
|
3f9bed3d5f | ||
|
|
b9ad18bd0a | ||
|
|
0219c4e15a | ||
|
|
d983a4ebcb | ||
|
|
f795807753 | ||
|
|
6164e4577b | ||
|
|
39bde328ee | ||
|
|
779c293f04 | ||
|
|
b9f397d29f | ||
|
|
d53eecc229 | ||
|
|
f88fd83d4a |
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@@ -10,4 +10,4 @@ liberapay: # Replace with a single Liberapay username
|
|||||||
issuehunt: # Replace with a single IssueHunt username
|
issuehunt: # Replace with a single IssueHunt username
|
||||||
otechie: # Replace with a single Otechie username
|
otechie: # Replace with a single Otechie username
|
||||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||||
custom: ['https://alist.nn.ci/guide/sponsor.html']
|
custom: ['https://alistgo.com/guide/sponsor.html']
|
||||||
|
|||||||
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -16,14 +16,14 @@ body:
|
|||||||
您必须勾选以下所有内容,否则您的issue可能会被直接关闭。或者您可以去[讨论区](https://github.com/alist-org/alist/discussions)
|
您必须勾选以下所有内容,否则您的issue可能会被直接关闭。或者您可以去[讨论区](https://github.com/alist-org/alist/discussions)
|
||||||
options:
|
options:
|
||||||
- label: |
|
- label: |
|
||||||
I have read the [documentation](https://alist.nn.ci).
|
I have read the [documentation](https://alistgo.com).
|
||||||
我已经阅读了[文档](https://alist.nn.ci)。
|
我已经阅读了[文档](https://alistgo.com)。
|
||||||
- label: |
|
- label: |
|
||||||
I'm sure there are no duplicate issues or discussions.
|
I'm sure there are no duplicate issues or discussions.
|
||||||
我确定没有重复的issue或讨论。
|
我确定没有重复的issue或讨论。
|
||||||
- label: |
|
- label: |
|
||||||
I'm sure it's due to `AList` and not something else(such as [Network](https://alist.nn.ci/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
|
I'm sure it's due to `AList` and not something else(such as [Network](https://alistgo.com/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
|
||||||
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alist.nn.ci/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host),`依赖`或`操作`)。
|
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alistgo.com/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host),`依赖`或`操作`)。
|
||||||
- label: |
|
- label: |
|
||||||
I'm sure this issue is not fixed in the latest version.
|
I'm sure this issue is not fixed in the latest version.
|
||||||
我确定这个问题在最新版本中没有被修复。
|
我确定这个问题在最新版本中没有被修复。
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
2
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -7,7 +7,7 @@ body:
|
|||||||
label: Please make sure of the following things
|
label: Please make sure of the following things
|
||||||
description: You may select more than one, even select all.
|
description: You may select more than one, even select all.
|
||||||
options:
|
options:
|
||||||
- label: I have read the [documentation](https://alist.nn.ci).
|
- label: I have read the [documentation](https://alistgo.com).
|
||||||
- label: I'm sure there are no duplicate issues or discussions.
|
- label: I'm sure there are no duplicate issues or discussions.
|
||||||
- label: I'm sure this feature is not implemented.
|
- label: I'm sure this feature is not implemented.
|
||||||
- label: I'm sure it's a reasonable and popular requirement.
|
- label: I'm sure it's a reasonable and popular requirement.
|
||||||
|
|||||||
5
.github/workflows/beta_release.yml
vendored
5
.github/workflows/beta_release.yml
vendored
@@ -94,7 +94,6 @@ jobs:
|
|||||||
out-dir: build
|
out-dir: build
|
||||||
x-flags: |
|
x-flags: |
|
||||||
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
|
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
|
||||||
github.com/alist-org/alist/v3/internal/conf.GoVersion=$go_version
|
|
||||||
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
|
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
|
||||||
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
|
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
|
||||||
github.com/alist-org/alist/v3/internal/conf.Version=$tag
|
github.com/alist-org/alist/v3/internal/conf.Version=$tag
|
||||||
@@ -120,7 +119,7 @@ jobs:
|
|||||||
- name: Checkout repo
|
- name: Checkout repo
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: alist-org/desktop-release
|
repository: AlistGo/desktop-release
|
||||||
ref: main
|
ref: main
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
@@ -136,4 +135,4 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.MY_TOKEN }}
|
github_token: ${{ secrets.MY_TOKEN }}
|
||||||
branch: main
|
branch: main
|
||||||
repository: alist-org/desktop-release
|
repository: AlistGo/desktop-release
|
||||||
43
.github/workflows/build.yml
vendored
43
.github/workflows/build.yml
vendored
@@ -15,14 +15,17 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
platform: [ubuntu-latest]
|
platform: [ubuntu-latest]
|
||||||
go-version: [ '1.21' ]
|
target:
|
||||||
|
- darwin-amd64
|
||||||
|
- darwin-arm64
|
||||||
|
- windows-amd64
|
||||||
|
- linux-arm64-musl
|
||||||
|
- linux-amd64-musl
|
||||||
|
- windows-arm64
|
||||||
|
- android-arm64
|
||||||
name: Build
|
name: Build
|
||||||
runs-on: ${{ matrix.platform }}
|
runs-on: ${{ matrix.platform }}
|
||||||
steps:
|
steps:
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ matrix.go-version }}
|
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -30,19 +33,29 @@ jobs:
|
|||||||
- uses: benjlevesque/short-sha@v3.0
|
- uses: benjlevesque/short-sha@v3.0
|
||||||
id: short-sha
|
id: short-sha
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Setup Go
|
||||||
run: |
|
uses: actions/setup-go@v5
|
||||||
sudo snap install zig --classic --beta
|
with:
|
||||||
docker pull crazymax/xgo:latest
|
go-version: '1.22'
|
||||||
go install github.com/crazy-max/xgo@latest
|
|
||||||
sudo apt install upx
|
- name: Setup web
|
||||||
|
run: bash build.sh dev web
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
uses: go-cross/cgo-actions@v1
|
||||||
bash build.sh dev
|
with:
|
||||||
|
targets: ${{ matrix.target }}
|
||||||
|
musl-target-format: $os-$musl-$arch
|
||||||
|
out-dir: build
|
||||||
|
x-flags: |
|
||||||
|
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
|
||||||
|
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
|
||||||
|
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
|
||||||
|
github.com/alist-org/alist/v3/internal/conf.Version=$tag
|
||||||
|
github.com/alist-org/alist/v3/internal/conf.WebVersion=dev
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: alist_${{ env.SHA }}
|
name: alist_${{ env.SHA }}_${{ matrix.target }}
|
||||||
path: dist
|
path: build/*
|
||||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -72,7 +72,7 @@ jobs:
|
|||||||
- name: Checkout repo
|
- name: Checkout repo
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: alist-org/desktop-release
|
repository: AlistGo/desktop-release
|
||||||
ref: main
|
ref: main
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
@@ -89,4 +89,4 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.MY_TOKEN }}
|
github_token: ${{ secrets.MY_TOKEN }}
|
||||||
branch: main
|
branch: main
|
||||||
repository: alist-org/desktop-release
|
repository: AlistGo/desktop-release
|
||||||
13
.github/workflows/release_docker.yml
vendored
13
.github/workflows/release_docker.yml
vendored
@@ -18,6 +18,7 @@ env:
|
|||||||
REGISTRY: 'xhofe/alist'
|
REGISTRY: 'xhofe/alist'
|
||||||
REGISTRY_USERNAME: 'xhofe'
|
REGISTRY_USERNAME: 'xhofe'
|
||||||
REGISTRY_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
|
REGISTRY_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
GITHUB_CR_REPO: ghcr.io/${{ github.repository }}
|
||||||
ARTIFACT_NAME: 'binaries_docker_release'
|
ARTIFACT_NAME: 'binaries_docker_release'
|
||||||
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
|
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
|
||||||
IMAGE_PUSH: ${{ github.event_name == 'push' }}
|
IMAGE_PUSH: ${{ github.event_name == 'push' }}
|
||||||
@@ -114,11 +115,21 @@ jobs:
|
|||||||
username: ${{ env.REGISTRY_USERNAME }}
|
username: ${{ env.REGISTRY_USERNAME }}
|
||||||
password: ${{ env.REGISTRY_PASSWORD }}
|
password: ${{ env.REGISTRY_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Login to GHCR
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
logout: true
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: ${{ env.REGISTRY }}
|
images: |
|
||||||
|
${{ env.REGISTRY }}
|
||||||
|
${{ env.GITHUB_CR_REPO }}
|
||||||
tags: ${{ env.IMAGE_IS_PROD == 'true' && '' || env.IMAGE_TAGS_BETA }}
|
tags: ${{ env.IMAGE_IS_PROD == 'true' && '' || env.IMAGE_TAGS_BETA }}
|
||||||
flavor: |
|
flavor: |
|
||||||
${{ env.IMAGE_IS_PROD == 'true' && 'latest=true' || '' }}
|
${{ env.IMAGE_IS_PROD == 'true' && 'latest=true' || '' }}
|
||||||
|
|||||||
@@ -32,10 +32,9 @@ RUN apk update && \
|
|||||||
/opt/aria2/.aria2/tracker.sh ; \
|
/opt/aria2/.aria2/tracker.sh ; \
|
||||||
rm -rf /var/cache/apk/*
|
rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
COPY --from=builder /app/bin/alist ./
|
COPY --chmod=755 --from=builder /app/bin/alist ./
|
||||||
COPY entrypoint.sh /entrypoint.sh
|
COPY --chmod=755 entrypoint.sh /entrypoint.sh
|
||||||
RUN chmod +x /opt/alist/alist && \
|
RUN /entrypoint.sh version
|
||||||
chmod +x /entrypoint.sh && /entrypoint.sh version
|
|
||||||
|
|
||||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||||
VOLUME /opt/alist/data/
|
VOLUME /opt/alist/data/
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM alpine:edge
|
FROM alpine:3.20.7
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
ARG INSTALL_FFMPEG=false
|
ARG INSTALL_FFMPEG=false
|
||||||
@@ -24,12 +24,11 @@ RUN apk update && \
|
|||||||
/opt/aria2/.aria2/tracker.sh ; \
|
/opt/aria2/.aria2/tracker.sh ; \
|
||||||
rm -rf /var/cache/apk/*
|
rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
COPY /build/${TARGETPLATFORM}/alist ./
|
COPY --chmod=755 /build/${TARGETPLATFORM}/alist ./
|
||||||
COPY entrypoint.sh /entrypoint.sh
|
COPY --chmod=755 entrypoint.sh /entrypoint.sh
|
||||||
RUN chmod +x /opt/alist/alist && \
|
RUN /entrypoint.sh version
|
||||||
chmod +x /entrypoint.sh && /entrypoint.sh version
|
|
||||||
|
|
||||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
||||||
VOLUME /opt/alist/data/
|
VOLUME /opt/alist/data/
|
||||||
EXPOSE 5244 5245
|
EXPOSE 5244 5245
|
||||||
CMD [ "/entrypoint.sh" ]
|
CMD [ "/entrypoint.sh" ]
|
||||||
|
|||||||
13
README.md
13
README.md
@@ -1,5 +1,5 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||||
<p><em>🗂️A file list program that supports multiple storages, powered by Gin and Solidjs.</em></p>
|
<p><em>🗂️A file list program that supports multiple storages, powered by Gin and Solidjs.</em></p>
|
||||||
<div>
|
<div>
|
||||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||||
@@ -31,7 +31,7 @@
|
|||||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||||
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://alist.nn.ci/guide/sponsor.html">
|
<a href="https://alistgo.com/guide/sponsor.html">
|
||||||
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
@@ -77,6 +77,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
|||||||
- [x] [Dropbox](https://www.dropbox.com/)
|
- [x] [Dropbox](https://www.dropbox.com/)
|
||||||
- [x] [FeijiPan](https://www.feijipan.com/)
|
- [x] [FeijiPan](https://www.feijipan.com/)
|
||||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||||
|
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||||
- [x] Easy to deploy and out-of-the-box
|
- [x] Easy to deploy and out-of-the-box
|
||||||
- [x] File preview (PDF, markdown, code, plain text, ...)
|
- [x] File preview (PDF, markdown, code, plain text, ...)
|
||||||
- [x] Image preview in gallery mode
|
- [x] Image preview in gallery mode
|
||||||
@@ -87,7 +88,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
|||||||
- [x] Dark mode
|
- [x] Dark mode
|
||||||
- [x] I18n
|
- [x] I18n
|
||||||
- [x] Protected routes (password protection and authentication)
|
- [x] Protected routes (password protection and authentication)
|
||||||
- [x] WebDav (see https://alist.nn.ci/guide/webdav.html for details)
|
- [x] WebDav (see https://alistgo.com/guide/webdav.html for details)
|
||||||
- [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist)
|
- [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist)
|
||||||
- [x] Cloudflare Workers proxy
|
- [x] Cloudflare Workers proxy
|
||||||
- [x] File/Folder package download
|
- [x] File/Folder package download
|
||||||
@@ -100,6 +101,10 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
|||||||
|
|
||||||
<https://alistgo.com/>
|
<https://alistgo.com/>
|
||||||
|
|
||||||
|
## API Documentation (via Apifox):
|
||||||
|
|
||||||
|
<https://alist-public.apifox.cn/>
|
||||||
|
|
||||||
## Demo
|
## Demo
|
||||||
|
|
||||||
<https://al.nn.ci>
|
<https://al.nn.ci>
|
||||||
@@ -111,7 +116,7 @@ Please go to our [discussion forum](https://github.com/alist-org/alist/discussio
|
|||||||
## Sponsor
|
## Sponsor
|
||||||
|
|
||||||
AList is an open-source software, if you happen to like this project and want me to keep going, please consider sponsoring me or providing a single donation! Thanks for all the love and support:
|
AList is an open-source software, if you happen to like this project and want me to keep going, please consider sponsoring me or providing a single donation! Thanks for all the love and support:
|
||||||
https://alist.nn.ci/guide/sponsor.html
|
https://alistgo.com/guide/sponsor.html
|
||||||
|
|
||||||
### Special sponsors
|
### Special sponsors
|
||||||
|
|
||||||
|
|||||||
14
README_cn.md
14
README_cn.md
@@ -1,5 +1,5 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||||
<p><em>🗂一个支持多存储的文件列表程序,使用 Gin 和 Solidjs。</em></p>
|
<p><em>🗂一个支持多存储的文件列表程序,使用 Gin 和 Solidjs。</em></p>
|
||||||
<div>
|
<div>
|
||||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||||
@@ -31,7 +31,7 @@
|
|||||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||||
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://alist.nn.ci/zh/guide/sponsor.html">
|
<a href="https://alistgo.com/zh/guide/sponsor.html">
|
||||||
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
@@ -86,7 +86,7 @@
|
|||||||
- [x] 黑暗模式
|
- [x] 黑暗模式
|
||||||
- [x] 国际化
|
- [x] 国际化
|
||||||
- [x] 受保护的路由(密码保护和身份验证)
|
- [x] 受保护的路由(密码保护和身份验证)
|
||||||
- [x] WebDav (具体见 https://alist.nn.ci/zh/guide/webdav.html)
|
- [x] WebDav (具体见 https://alistgo.com/zh/guide/webdav.html)
|
||||||
- [x] [Docker 部署](https://hub.docker.com/r/xhofe/alist)
|
- [x] [Docker 部署](https://hub.docker.com/r/xhofe/alist)
|
||||||
- [x] Cloudflare workers 中转
|
- [x] Cloudflare workers 中转
|
||||||
- [x] 文件/文件夹打包下载
|
- [x] 文件/文件夹打包下载
|
||||||
@@ -97,7 +97,11 @@
|
|||||||
|
|
||||||
## 文档
|
## 文档
|
||||||
|
|
||||||
<https://alist.nn.ci/zh/>
|
<https://alistgo.com/zh/>
|
||||||
|
|
||||||
|
## API 文档(通过 Apifox 提供)
|
||||||
|
|
||||||
|
<https://alist-public.apifox.cn/>
|
||||||
|
|
||||||
## Demo
|
## Demo
|
||||||
|
|
||||||
@@ -109,7 +113,7 @@
|
|||||||
|
|
||||||
## 赞助
|
## 赞助
|
||||||
|
|
||||||
AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我继续下去,请考虑赞助我或提供一个单一的捐款!感谢所有的爱和支持:https://alist.nn.ci/zh/guide/sponsor.html
|
AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我继续下去,请考虑赞助我或提供一个单一的捐款!感谢所有的爱和支持:https://alistgo.com/zh/guide/sponsor.html
|
||||||
|
|
||||||
### 特别赞助
|
### 特别赞助
|
||||||
|
|
||||||
|
|||||||
14
README_ja.md
14
README_ja.md
@@ -1,5 +1,5 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||||
<p><em>🗂️Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
|
<p><em>🗂️Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
|
||||||
<div>
|
<div>
|
||||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||||
@@ -31,7 +31,7 @@
|
|||||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||||
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://alist.nn.ci/guide/sponsor.html">
|
<a href="https://alistgo.com/guide/sponsor.html">
|
||||||
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
@@ -87,7 +87,7 @@
|
|||||||
- [x] ダークモード
|
- [x] ダークモード
|
||||||
- [x] 国際化
|
- [x] 国際化
|
||||||
- [x] 保護されたルート (パスワード保護と認証)
|
- [x] 保護されたルート (パスワード保護と認証)
|
||||||
- [x] WebDav (詳細は https://alist.nn.ci/guide/webdav.html を参照)
|
- [x] WebDav (詳細は https://alistgo.com/guide/webdav.html を参照)
|
||||||
- [x] [Docker デプロイ](https://hub.docker.com/r/xhofe/alist)
|
- [x] [Docker デプロイ](https://hub.docker.com/r/xhofe/alist)
|
||||||
- [x] Cloudflare ワーカープロキシ
|
- [x] Cloudflare ワーカープロキシ
|
||||||
- [x] ファイル/フォルダパッケージのダウンロード
|
- [x] ファイル/フォルダパッケージのダウンロード
|
||||||
@@ -98,7 +98,11 @@
|
|||||||
|
|
||||||
## ドキュメント
|
## ドキュメント
|
||||||
|
|
||||||
<https://alist.nn.ci/>
|
<https://alistgo.com/>
|
||||||
|
|
||||||
|
## APIドキュメント(Apifox 提供)
|
||||||
|
|
||||||
|
<https://alist-public.apifox.cn/>
|
||||||
|
|
||||||
## デモ
|
## デモ
|
||||||
|
|
||||||
@@ -111,7 +115,7 @@
|
|||||||
## スポンサー
|
## スポンサー
|
||||||
|
|
||||||
AList はオープンソースのソフトウェアです。もしあなたがこのプロジェクトを気に入ってくださり、続けて欲しいと思ってくださるなら、ぜひスポンサーになってくださるか、1口でも寄付をしてくださるようご検討ください!すべての愛とサポートに感謝します:
|
AList はオープンソースのソフトウェアです。もしあなたがこのプロジェクトを気に入ってくださり、続けて欲しいと思ってくださるなら、ぜひスポンサーになってくださるか、1口でも寄付をしてくださるようご検討ください!すべての愛とサポートに感謝します:
|
||||||
https://alist.nn.ci/guide/sponsor.html
|
https://alistgo.com/guide/sponsor.html
|
||||||
|
|
||||||
### スペシャルスポンサー
|
### スペシャルスポンサー
|
||||||
|
|
||||||
|
|||||||
6
build.sh
6
build.sh
@@ -1,6 +1,5 @@
|
|||||||
appName="alist"
|
appName="alist"
|
||||||
builtAt="$(date +'%F %T %z')"
|
builtAt="$(date +'%F %T %z')"
|
||||||
goVersion=$(go version | sed 's/go version //')
|
|
||||||
gitAuthor="Xhofe <i@nn.ci>"
|
gitAuthor="Xhofe <i@nn.ci>"
|
||||||
gitCommit=$(git log --pretty=format:"%h" -1)
|
gitCommit=$(git log --pretty=format:"%h" -1)
|
||||||
|
|
||||||
@@ -22,7 +21,6 @@ echo "frontend version: $webVersion"
|
|||||||
ldflags="\
|
ldflags="\
|
||||||
-w -s \
|
-w -s \
|
||||||
-X 'github.com/alist-org/alist/v3/internal/conf.BuiltAt=$builtAt' \
|
-X 'github.com/alist-org/alist/v3/internal/conf.BuiltAt=$builtAt' \
|
||||||
-X 'github.com/alist-org/alist/v3/internal/conf.GoVersion=$goVersion' \
|
|
||||||
-X 'github.com/alist-org/alist/v3/internal/conf.GitAuthor=$gitAuthor' \
|
-X 'github.com/alist-org/alist/v3/internal/conf.GitAuthor=$gitAuthor' \
|
||||||
-X 'github.com/alist-org/alist/v3/internal/conf.GitCommit=$gitCommit' \
|
-X 'github.com/alist-org/alist/v3/internal/conf.GitCommit=$gitCommit' \
|
||||||
-X 'github.com/alist-org/alist/v3/internal/conf.Version=$version' \
|
-X 'github.com/alist-org/alist/v3/internal/conf.Version=$version' \
|
||||||
@@ -95,7 +93,7 @@ BuildDocker() {
|
|||||||
|
|
||||||
PrepareBuildDockerMusl() {
|
PrepareBuildDockerMusl() {
|
||||||
mkdir -p build/musl-libs
|
mkdir -p build/musl-libs
|
||||||
BASE="https://musl.cc/"
|
BASE="https://github.com/go-cross/musl-toolchain-archive/releases/latest/download/"
|
||||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
|
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
|
||||||
for i in "${FILES[@]}"; do
|
for i in "${FILES[@]}"; do
|
||||||
url="${BASE}${i}.tgz"
|
url="${BASE}${i}.tgz"
|
||||||
@@ -247,7 +245,7 @@ BuildReleaseFreeBSD() {
|
|||||||
cgo_cc="clang --target=${CGO_ARGS[$i]} --sysroot=/opt/freebsd/${os_arch}"
|
cgo_cc="clang --target=${CGO_ARGS[$i]} --sysroot=/opt/freebsd/${os_arch}"
|
||||||
echo building for freebsd-${os_arch}
|
echo building for freebsd-${os_arch}
|
||||||
sudo mkdir -p "/opt/freebsd/${os_arch}"
|
sudo mkdir -p "/opt/freebsd/${os_arch}"
|
||||||
wget -q https://download.freebsd.org/releases/${os_arch}/14.1-RELEASE/base.txz
|
wget -q https://download.freebsd.org/releases/${os_arch}/14.3-RELEASE/base.txz
|
||||||
sudo tar -xf ./base.txz -C /opt/freebsd/${os_arch}
|
sudo tar -xf ./base.txz -C /opt/freebsd/${os_arch}
|
||||||
rm base.txz
|
rm base.txz
|
||||||
export GOOS=freebsd
|
export GOOS=freebsd
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_46_0"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -16,7 +17,14 @@ func Init() {
|
|||||||
bootstrap.InitConfig()
|
bootstrap.InitConfig()
|
||||||
bootstrap.Log()
|
bootstrap.Log()
|
||||||
bootstrap.InitDB()
|
bootstrap.InitDB()
|
||||||
|
|
||||||
|
if v3_46_0.IsLegacyRoleDetected() {
|
||||||
|
utils.Log.Warnf("Detected legacy role format, executing ConvertLegacyRoles patch early...")
|
||||||
|
v3_46_0.ConvertLegacyRoles()
|
||||||
|
}
|
||||||
|
|
||||||
data.InitData()
|
data.InitData()
|
||||||
|
bootstrap.InitStreamLimit()
|
||||||
bootstrap.InitIndex()
|
bootstrap.InitIndex()
|
||||||
bootstrap.InitUpgradePatch()
|
bootstrap.InitUpgradePatch()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers"
|
_ "github.com/alist-org/alist/v3/drivers"
|
||||||
|
"github.com/alist-org/alist/v3/internal/bootstrap"
|
||||||
"github.com/alist-org/alist/v3/internal/bootstrap/data"
|
"github.com/alist-org/alist/v3/internal/bootstrap/data"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
@@ -137,6 +138,7 @@ var LangCmd = &cobra.Command{
|
|||||||
Use: "lang",
|
Use: "lang",
|
||||||
Short: "Generate language json file",
|
Short: "Generate language json file",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
bootstrap.InitConfig()
|
||||||
err := os.MkdirAll("lang", 0777)
|
err := os.MkdirAll("lang", 0777)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Log.Fatalf("failed create folder: %s", err.Error())
|
utils.Log.Fatalf("failed create folder: %s", err.Error())
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ var RootCmd = &cobra.Command{
|
|||||||
Short: "A file list program that supports multiple storage.",
|
Short: "A file list program that supports multiple storage.",
|
||||||
Long: `A file list program that supports multiple storage,
|
Long: `A file list program that supports multiple storage,
|
||||||
built with love by Xhofe and friends in Go/Solid.js.
|
built with love by Xhofe and friends in Go/Solid.js.
|
||||||
Complete documentation is available at https://alist.nn.ci/`,
|
Complete documentation is available at https://alistgo.com/`,
|
||||||
}
|
}
|
||||||
|
|
||||||
func Execute() {
|
func Execute() {
|
||||||
|
|||||||
@@ -4,9 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
|
|
||||||
"github.com/KirCute/sftpd-alist"
|
|
||||||
"github.com/alist-org/alist/v3/internal/fs"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
@@ -16,14 +13,19 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
|
||||||
|
"github.com/KirCute/sftpd-alist"
|
||||||
"github.com/alist-org/alist/v3/cmd/flags"
|
"github.com/alist-org/alist/v3/cmd/flags"
|
||||||
"github.com/alist-org/alist/v3/internal/bootstrap"
|
"github.com/alist-org/alist/v3/internal/bootstrap"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
|
"github.com/alist-org/alist/v3/internal/fs"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/server"
|
"github.com/alist-org/alist/v3/server"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"golang.org/x/net/http2/h2c"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ServerCmd represents the server command
|
// ServerCmd represents the server command
|
||||||
@@ -47,11 +49,15 @@ the address is defined in config file`,
|
|||||||
r := gin.New()
|
r := gin.New()
|
||||||
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
|
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
|
||||||
server.Init(r)
|
server.Init(r)
|
||||||
|
var httpHandler http.Handler = r
|
||||||
|
if conf.Conf.Scheme.EnableH2c {
|
||||||
|
httpHandler = h2c.NewHandler(r, &http2.Server{})
|
||||||
|
}
|
||||||
var httpSrv, httpsSrv, unixSrv *http.Server
|
var httpSrv, httpsSrv, unixSrv *http.Server
|
||||||
if conf.Conf.Scheme.HttpPort != -1 {
|
if conf.Conf.Scheme.HttpPort != -1 {
|
||||||
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
||||||
utils.Log.Infof("start HTTP server @ %s", httpBase)
|
utils.Log.Infof("start HTTP server @ %s", httpBase)
|
||||||
httpSrv = &http.Server{Addr: httpBase, Handler: r}
|
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
|
||||||
go func() {
|
go func() {
|
||||||
err := httpSrv.ListenAndServe()
|
err := httpSrv.ListenAndServe()
|
||||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
@@ -72,7 +78,7 @@ the address is defined in config file`,
|
|||||||
}
|
}
|
||||||
if conf.Conf.Scheme.UnixFile != "" {
|
if conf.Conf.Scheme.UnixFile != "" {
|
||||||
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
|
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
|
||||||
unixSrv = &http.Server{Handler: r}
|
unixSrv = &http.Server{Handler: httpHandler}
|
||||||
go func() {
|
go func() {
|
||||||
listener, err := net.Listen("unix", conf.Conf.Scheme.UnixFile)
|
listener, err := net.Listen("unix", conf.Conf.Scheme.UnixFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@@ -16,14 +17,15 @@ var VersionCmd = &cobra.Command{
|
|||||||
Use: "version",
|
Use: "version",
|
||||||
Short: "Show current version of AList",
|
Short: "Show current version of AList",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
goVersion := fmt.Sprintf("%s %s/%s", runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
||||||
|
|
||||||
fmt.Printf(`Built At: %s
|
fmt.Printf(`Built At: %s
|
||||||
Go Version: %s
|
Go Version: %s
|
||||||
Author: %s
|
Author: %s
|
||||||
Commit ID: %s
|
Commit ID: %s
|
||||||
Version: %s
|
Version: %s
|
||||||
WebVersion: %s
|
WebVersion: %s
|
||||||
`,
|
`, conf.BuiltAt, goVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion)
|
||||||
conf.BuiltAt, conf.GoVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion)
|
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -215,12 +215,12 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
var uploadResult *UploadResult
|
var uploadResult *UploadResult
|
||||||
// 闪传失败,上传
|
// 闪传失败,上传
|
||||||
if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB,改用普通模式上传
|
if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB,改用普通模式上传
|
||||||
if uploadResult, err = d.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID); err != nil {
|
if uploadResult, err = d.UploadByOSS(ctx, &fastInfo.UploadOSSParams, stream, dirID, up); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// 分片上传
|
// 分片上传
|
||||||
if uploadResult, err = d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID); err != nil {
|
if uploadResult, err = d.UploadByMultipart(ctx, &fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID, up); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package _115
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
@@ -13,9 +14,11 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
@@ -140,7 +143,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes, err := crypto.Decode(string(result.EncodedData), key)
|
b, err := crypto.Decode(string(result.EncodedData), key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -148,7 +151,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
|
|||||||
downloadInfo := struct {
|
downloadInfo := struct {
|
||||||
Url string `json:"url"`
|
Url string `json:"url"`
|
||||||
}{}
|
}{}
|
||||||
if err := utils.Json.Unmarshal(bytes, &downloadInfo); err != nil {
|
if err := utils.Json.Unmarshal(b, &downloadInfo); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -271,7 +274,7 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UploadByOSS use aliyun sdk to upload
|
// UploadByOSS use aliyun sdk to upload
|
||||||
func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dirID string) (*UploadResult, error) {
|
func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSParams, s model.FileStreamer, dirID string, up driver.UpdateProgress) (*UploadResult, error) {
|
||||||
ossToken, err := c.client.GetOSSToken()
|
ossToken, err := c.client.GetOSSToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -286,6 +289,10 @@ func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dir
|
|||||||
}
|
}
|
||||||
|
|
||||||
var bodyBytes []byte
|
var bodyBytes []byte
|
||||||
|
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
})
|
||||||
if err = bucket.PutObject(params.Object, r, append(
|
if err = bucket.PutObject(params.Object, r, append(
|
||||||
driver115.OssOption(params, ossToken),
|
driver115.OssOption(params, ossToken),
|
||||||
oss.CallbackResult(&bodyBytes),
|
oss.CallbackResult(&bodyBytes),
|
||||||
@@ -301,7 +308,8 @@ func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dir
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UploadByMultipart upload by mutipart blocks
|
// UploadByMultipart upload by mutipart blocks
|
||||||
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
|
func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.UploadOSSParams, fileSize int64, s model.FileStreamer,
|
||||||
|
dirID string, up driver.UpdateProgress, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
|
||||||
var (
|
var (
|
||||||
chunks []oss.FileChunk
|
chunks []oss.FileChunk
|
||||||
parts []oss.UploadPart
|
parts []oss.UploadPart
|
||||||
@@ -313,7 +321,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
|||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
tmpF, err := stream.CacheFullInTempFile()
|
tmpF, err := s.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -372,6 +380,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
|||||||
quit <- struct{}{}
|
quit <- struct{}{}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
completedNum := atomic.Int32{}
|
||||||
// consumers
|
// consumers
|
||||||
for i := 0; i < options.ThreadsNum; i++ {
|
for i := 0; i < options.ThreadsNum; i++ {
|
||||||
go func(threadId int) {
|
go func(threadId int) {
|
||||||
@@ -384,24 +393,28 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
|||||||
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||||||
for retry := 0; retry < 3; retry++ {
|
for retry := 0; retry < 3; retry++ {
|
||||||
select {
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
break
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
||||||
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := make([]byte, chunk.Size)
|
buf := make([]byte, chunk.Size)
|
||||||
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf)),
|
||||||
if part, err = bucket.UploadPart(imur, bytes.NewBuffer(buf), chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", s.GetName(), chunk.Number, err))
|
||||||
|
} else {
|
||||||
|
num := completedNum.Add(1)
|
||||||
|
up(float64(num) * 100.0 / float64(len(chunks)))
|
||||||
}
|
}
|
||||||
UploadedPartsCh <- part
|
UploadedPartsCh <- part
|
||||||
}
|
}
|
||||||
|
|||||||
335
drivers/115_open/driver.go
Normal file
335
drivers/115_open/driver.go
Normal file
@@ -0,0 +1,335 @@
|
|||||||
|
package _115_open
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/cmd/flags"
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
sdk "github.com/xhofe/115-sdk-go"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Open115 struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
client *sdk.Client
|
||||||
|
limiter *rate.Limiter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Init(ctx context.Context) error {
|
||||||
|
d.client = sdk.New(sdk.WithRefreshToken(d.Addition.RefreshToken),
|
||||||
|
sdk.WithAccessToken(d.Addition.AccessToken),
|
||||||
|
sdk.WithOnRefreshToken(func(s1, s2 string) {
|
||||||
|
d.Addition.AccessToken = s1
|
||||||
|
d.Addition.RefreshToken = s2
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
}))
|
||||||
|
if flags.Debug || flags.Dev {
|
||||||
|
d.client.SetDebug(true)
|
||||||
|
}
|
||||||
|
_, err := d.client.UserInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if d.Addition.LimitRate > 0 {
|
||||||
|
d.limiter = rate.NewLimiter(rate.Limit(d.Addition.LimitRate), 1)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) WaitLimit(ctx context.Context) error {
|
||||||
|
if d.limiter != nil {
|
||||||
|
return d.limiter.Wait(ctx)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
var res []model.Obj
|
||||||
|
pageSize := int64(200)
|
||||||
|
offset := int64(0)
|
||||||
|
for {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := d.client.GetFiles(ctx, &sdk.GetFilesReq{
|
||||||
|
CID: dir.GetID(),
|
||||||
|
Limit: pageSize,
|
||||||
|
Offset: offset,
|
||||||
|
ASC: d.Addition.OrderDirection == "asc",
|
||||||
|
O: d.Addition.OrderBy,
|
||||||
|
// Cur: 1,
|
||||||
|
ShowDir: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res = append(res, utils.MustSliceConvert(resp.Data, func(src sdk.GetFilesResp_File) model.Obj {
|
||||||
|
obj := Obj(src)
|
||||||
|
return &obj
|
||||||
|
})...)
|
||||||
|
if len(res) >= int(resp.Count) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
offset += pageSize
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var ua string
|
||||||
|
if args.Header != nil {
|
||||||
|
ua = args.Header.Get("User-Agent")
|
||||||
|
}
|
||||||
|
if ua == "" {
|
||||||
|
ua = base.UserAgent
|
||||||
|
}
|
||||||
|
obj, ok := file.(*Obj)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("can't convert obj")
|
||||||
|
}
|
||||||
|
pc := obj.Pc
|
||||||
|
resp, err := d.client.DownURL(ctx, pc, ua)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
u, ok := resp[obj.GetID()]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("can't get link")
|
||||||
|
}
|
||||||
|
return &model.Link{
|
||||||
|
URL: u.URL.URL,
|
||||||
|
Header: http.Header{
|
||||||
|
"User-Agent": []string{ua},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := d.client.Mkdir(ctx, parentDir.GetID(), dirName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Obj{
|
||||||
|
Fid: resp.FileID,
|
||||||
|
Pid: parentDir.GetID(),
|
||||||
|
Fn: dirName,
|
||||||
|
Fc: "0",
|
||||||
|
Upt: time.Now().Unix(),
|
||||||
|
Uet: time.Now().Unix(),
|
||||||
|
UpPt: time.Now().Unix(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, err := d.client.Move(ctx, &sdk.MoveReq{
|
||||||
|
FileIDs: srcObj.GetID(),
|
||||||
|
ToCid: dstDir.GetID(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, err := d.client.UpdateFile(ctx, &sdk.UpdateFileReq{
|
||||||
|
FileID: srcObj.GetID(),
|
||||||
|
FileNma: newName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
obj, ok := srcObj.(*Obj)
|
||||||
|
if ok {
|
||||||
|
obj.Fn = newName
|
||||||
|
}
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, err := d.client.Copy(ctx, &sdk.CopyReq{
|
||||||
|
PID: dstDir.GetID(),
|
||||||
|
FileID: srcObj.GetID(),
|
||||||
|
NoDupli: "1",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_obj, ok := obj.(*Obj)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("can't convert obj")
|
||||||
|
}
|
||||||
|
_, err := d.client.DelFile(ctx, &sdk.DelFileReq{
|
||||||
|
FileIDs: _obj.GetID(),
|
||||||
|
ParentID: _obj.Pid,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tempF, err := file.CacheFullInTempFile()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// cal full sha1
|
||||||
|
sha1, err := utils.HashReader(utils.SHA1, tempF)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = tempF.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// pre 128k sha1
|
||||||
|
sha1128k, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, 128*1024))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = tempF.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// 1. Init
|
||||||
|
resp, err := d.client.UploadInit(ctx, &sdk.UploadInitReq{
|
||||||
|
FileName: file.GetName(),
|
||||||
|
FileSize: file.GetSize(),
|
||||||
|
Target: dstDir.GetID(),
|
||||||
|
FileID: strings.ToUpper(sha1),
|
||||||
|
PreID: strings.ToUpper(sha1128k),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.Status == 2 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// 2. two way verify
|
||||||
|
if utils.SliceContains([]int{6, 7, 8}, resp.Status) {
|
||||||
|
signCheck := strings.Split(resp.SignCheck, "-") //"sign_check": "2392148-2392298" 取2392148-2392298之间的内容(包含2392148、2392298)的sha1
|
||||||
|
start, err := strconv.ParseInt(signCheck[0], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
end, err := strconv.ParseInt(signCheck[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = tempF.Seek(start, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
signVal, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, end-start+1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = tempF.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp, err = d.client.UploadInit(ctx, &sdk.UploadInitReq{
|
||||||
|
FileName: file.GetName(),
|
||||||
|
FileSize: file.GetSize(),
|
||||||
|
Target: dstDir.GetID(),
|
||||||
|
FileID: strings.ToUpper(sha1),
|
||||||
|
PreID: strings.ToUpper(sha1128k),
|
||||||
|
SignKey: resp.SignKey,
|
||||||
|
SignVal: strings.ToUpper(signVal),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.Status == 2 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// 3. get upload token
|
||||||
|
tokenResp, err := d.client.UploadGetToken(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// 4. upload
|
||||||
|
err = d.multpartUpload(ctx, tempF, file, up, tokenResp, resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
// return nil, errs.NotImplement
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (d *Open115) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
// // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
// return nil, errs.NotImplement
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (d *Open115) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
// // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
// return nil, errs.NotImplement
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (d *Open115) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||||
|
// // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||||
|
// // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||||
|
// // return errs.NotImplement to use an internal archive tool
|
||||||
|
// return nil, errs.NotImplement
|
||||||
|
// }
|
||||||
|
|
||||||
|
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
// return nil, errs.NotSupport
|
||||||
|
//}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*Open115)(nil)
|
||||||
37
drivers/115_open/meta.go
Normal file
37
drivers/115_open/meta.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package _115_open
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
// Usually one of two
|
||||||
|
driver.RootID
|
||||||
|
// define other
|
||||||
|
RefreshToken string `json:"refresh_token" required:"true"`
|
||||||
|
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
|
||||||
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
|
||||||
|
LimitRate float64 `json:"limit_rate" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"`
|
||||||
|
AccessToken string
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "115 Open",
|
||||||
|
LocalSort: false,
|
||||||
|
OnlyLocal: false,
|
||||||
|
OnlyProxy: false,
|
||||||
|
NoCache: false,
|
||||||
|
NoUpload: false,
|
||||||
|
NeedMs: false,
|
||||||
|
DefaultRoot: "0",
|
||||||
|
CheckStatus: false,
|
||||||
|
Alert: "",
|
||||||
|
NoOverwriteUpload: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &Open115{}
|
||||||
|
})
|
||||||
|
}
|
||||||
59
drivers/115_open/types.go
Normal file
59
drivers/115_open/types.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
package _115_open
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
sdk "github.com/xhofe/115-sdk-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Obj sdk.GetFilesResp_File
|
||||||
|
|
||||||
|
// Thumb implements model.Thumb.
|
||||||
|
func (o *Obj) Thumb() string {
|
||||||
|
return o.Thumbnail
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTime implements model.Obj.
|
||||||
|
func (o *Obj) CreateTime() time.Time {
|
||||||
|
return time.Unix(o.UpPt, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHash implements model.Obj.
|
||||||
|
func (o *Obj) GetHash() utils.HashInfo {
|
||||||
|
return utils.NewHashInfo(utils.SHA1, o.Sha1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetID implements model.Obj.
|
||||||
|
func (o *Obj) GetID() string {
|
||||||
|
return o.Fid
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetName implements model.Obj.
|
||||||
|
func (o *Obj) GetName() string {
|
||||||
|
return o.Fn
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPath implements model.Obj.
|
||||||
|
func (o *Obj) GetPath() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSize implements model.Obj.
|
||||||
|
func (o *Obj) GetSize() int64 {
|
||||||
|
return o.FS
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDir implements model.Obj.
|
||||||
|
func (o *Obj) IsDir() bool {
|
||||||
|
return o.Fc == "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime implements model.Obj.
|
||||||
|
func (o *Obj) ModTime() time.Time {
|
||||||
|
return time.Unix(o.Upt, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ model.Obj = (*Obj)(nil)
|
||||||
|
var _ model.Thumb = (*Obj)(nil)
|
||||||
140
drivers/115_open/upload.go
Normal file
140
drivers/115_open/upload.go
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
package _115_open
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
|
sdk "github.com/xhofe/115-sdk-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func calPartSize(fileSize int64) int64 {
|
||||||
|
var partSize int64 = 20 * utils.MB
|
||||||
|
if fileSize > partSize {
|
||||||
|
if fileSize > 1*utils.TB { // file Size over 1TB
|
||||||
|
partSize = 5 * utils.GB // file part size 5GB
|
||||||
|
} else if fileSize > 768*utils.GB { // over 768GB
|
||||||
|
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
||||||
|
} else if fileSize > 512*utils.GB { // over 512GB
|
||||||
|
partSize = 82463373 // ≈ 78.6432MB
|
||||||
|
} else if fileSize > 384*utils.GB { // over 384GB
|
||||||
|
partSize = 54975582 // ≈ 52.4288MB
|
||||||
|
} else if fileSize > 256*utils.GB { // over 256GB
|
||||||
|
partSize = 41231687 // ≈ 39.3216MB
|
||||||
|
} else if fileSize > 128*utils.GB { // over 128GB
|
||||||
|
partSize = 27487791 // ≈ 26.2144MB
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return partSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
|
||||||
|
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bucket, err := ossClient.Bucket(initResp.Bucket)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = bucket.PutObject(initResp.Object, tempF,
|
||||||
|
oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))),
|
||||||
|
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))),
|
||||||
|
)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// type CallbackResult struct {
|
||||||
|
// State bool `json:"state"`
|
||||||
|
// Code int `json:"code"`
|
||||||
|
// Message string `json:"message"`
|
||||||
|
// Data struct {
|
||||||
|
// PickCode string `json:"pick_code"`
|
||||||
|
// FileName string `json:"file_name"`
|
||||||
|
// FileSize int64 `json:"file_size"`
|
||||||
|
// FileID string `json:"file_id"`
|
||||||
|
// ThumbURL string `json:"thumb_url"`
|
||||||
|
// Sha1 string `json:"sha1"`
|
||||||
|
// Aid int `json:"aid"`
|
||||||
|
// Cid string `json:"cid"`
|
||||||
|
// } `json:"data"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
func (d *Open115) multpartUpload(ctx context.Context, tempF model.File, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
|
||||||
|
fileSize := stream.GetSize()
|
||||||
|
chunkSize := calPartSize(fileSize)
|
||||||
|
|
||||||
|
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bucket, err := ossClient.Bucket(initResp.Bucket)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
imur, err := bucket.InitiateMultipartUpload(initResp.Object, oss.Sequential())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
|
||||||
|
parts := make([]oss.UploadPart, partNum)
|
||||||
|
offset := int64(0)
|
||||||
|
for i := int64(1); i <= partNum; i++ {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
partSize := chunkSize
|
||||||
|
if i == partNum {
|
||||||
|
partSize = fileSize - (i-1)*chunkSize
|
||||||
|
}
|
||||||
|
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||||
|
err = retry.Do(func() error {
|
||||||
|
_ = rd.Reset()
|
||||||
|
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||||
|
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
parts[i-1] = part
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.DelayType(retry.BackOffDelay),
|
||||||
|
retry.Delay(time.Second))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == partNum {
|
||||||
|
offset = fileSize
|
||||||
|
} else {
|
||||||
|
offset += partSize
|
||||||
|
}
|
||||||
|
up(float64(offset) / float64(fileSize))
|
||||||
|
}
|
||||||
|
|
||||||
|
// callbackRespBytes := make([]byte, 1024)
|
||||||
|
_, err = bucket.CompleteMultipartUpload(
|
||||||
|
imur,
|
||||||
|
parts,
|
||||||
|
oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))),
|
||||||
|
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))),
|
||||||
|
// oss.CallbackResult(&callbackRespBytes),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
3
drivers/115_open/util.go
Normal file
3
drivers/115_open/util.go
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
package _115_open
|
||||||
|
|
||||||
|
// do others that not defined in Driver interface
|
||||||
@@ -2,11 +2,8 @@ package _123
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -18,6 +15,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
@@ -185,32 +183,22 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
// const DEFAULT int64 = 10485760
|
etag := file.GetHash().GetHash(utils.MD5)
|
||||||
h := md5.New()
|
var err error
|
||||||
// need to calculate md5 of the full content
|
if len(etag) < utils.MD5.Width {
|
||||||
tempFile, err := stream.CacheFullInTempFile()
|
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
_ = tempFile.Close()
|
|
||||||
}()
|
|
||||||
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = tempFile.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
etag := hex.EncodeToString(h.Sum(nil))
|
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
"driveId": 0,
|
"driveId": 0,
|
||||||
"duplicate": 2, // 2->覆盖 1->重命名 0->默认
|
"duplicate": 2, // 2->覆盖 1->重命名 0->默认
|
||||||
"etag": etag,
|
"etag": etag,
|
||||||
"fileName": stream.GetName(),
|
"fileName": file.GetName(),
|
||||||
"parentFileId": dstDir.GetID(),
|
"parentFileId": dstDir.GetID(),
|
||||||
"size": stream.GetSize(),
|
"size": file.GetSize(),
|
||||||
"type": 0,
|
"type": 0,
|
||||||
}
|
}
|
||||||
var resp UploadResp
|
var resp UploadResp
|
||||||
@@ -225,7 +213,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
|
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
|
||||||
err = d.newUpload(ctx, &resp, stream, tempFile, up)
|
err = d.newUpload(ctx, &resp, file, up)
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
cfg := &aws.Config{
|
cfg := &aws.Config{
|
||||||
@@ -239,15 +227,21 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
uploader := s3manager.NewUploader(s)
|
uploader := s3manager.NewUploader(s)
|
||||||
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||||
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
|
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||||
}
|
}
|
||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
Bucket: &resp.Data.Bucket,
|
Bucket: &resp.Data.Bucket,
|
||||||
Key: &resp.Data.Key,
|
Key: &resp.Data.Key,
|
||||||
Body: tempFile,
|
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: file,
|
||||||
|
UpdateProgress: up,
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, input)
|
_, err = uploader.UploadWithContext(ctx, input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_, err = d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
|
_, err = d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
@@ -69,15 +68,25 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error {
|
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
chunkSize := int64(1024 * 1024 * 16)
|
tmpF, err := file.CacheFullInTempFile()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
// fetch s3 pre signed urls
|
// fetch s3 pre signed urls
|
||||||
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
|
size := file.GetSize()
|
||||||
|
chunkSize := min(size, 16*utils.MB)
|
||||||
|
chunkCount := int(size / chunkSize)
|
||||||
|
lastChunkSize := size % chunkSize
|
||||||
|
if lastChunkSize > 0 {
|
||||||
|
chunkCount++
|
||||||
|
} else {
|
||||||
|
lastChunkSize = chunkSize
|
||||||
|
}
|
||||||
// only 1 batch is allowed
|
// only 1 batch is allowed
|
||||||
isMultipart := chunkCount > 1
|
|
||||||
batchSize := 1
|
batchSize := 1
|
||||||
getS3UploadUrl := d.getS3Auth
|
getS3UploadUrl := d.getS3Auth
|
||||||
if isMultipart {
|
if chunkCount > 1 {
|
||||||
batchSize = 10
|
batchSize = 10
|
||||||
getS3UploadUrl = d.getS3PreSignedUrls
|
getS3UploadUrl = d.getS3PreSignedUrls
|
||||||
}
|
}
|
||||||
@@ -86,10 +95,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
start := i
|
start := i
|
||||||
end := i + batchSize
|
end := min(i+batchSize, chunkCount+1)
|
||||||
if end > chunkCount+1 {
|
|
||||||
end = chunkCount + 1
|
|
||||||
}
|
|
||||||
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
|
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -101,9 +107,9 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
}
|
}
|
||||||
curSize := chunkSize
|
curSize := chunkSize
|
||||||
if j == chunkCount {
|
if j == chunkCount {
|
||||||
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
curSize = lastChunkSize
|
||||||
}
|
}
|
||||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false, getS3UploadUrl)
|
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -114,12 +120,12 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
return d.completeS3(ctx, upReq, file, chunkCount > 1)
|
return d.completeS3(ctx, upReq, file, chunkCount > 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
|
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
|
||||||
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
||||||
if uploadUrl == "" {
|
if uploadUrl == "" {
|
||||||
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("PUT", uploadUrl, reader)
|
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -142,6 +148,7 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign
|
|||||||
}
|
}
|
||||||
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
||||||
// retry
|
// retry
|
||||||
|
reader.Seek(0, io.SeekStart)
|
||||||
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
|
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
|
||||||
}
|
}
|
||||||
if res.StatusCode != http.StatusOK {
|
if res.StatusCode != http.StatusOK {
|
||||||
|
|||||||
@@ -161,12 +161,12 @@ func (d *Pan123) login() error {
|
|||||||
}
|
}
|
||||||
res, err := base.RestyClient.R().
|
res, err := base.RestyClient.R().
|
||||||
SetHeaders(map[string]string{
|
SetHeaders(map[string]string{
|
||||||
"origin": "https://www.123pan.com",
|
"origin": "https://www.123pan.com",
|
||||||
"referer": "https://www.123pan.com/",
|
"referer": "https://www.123pan.com/",
|
||||||
"user-agent": "Dart/2.19(dart:io)-alist",
|
//"user-agent": "Dart/2.19(dart:io)-alist",
|
||||||
"platform": "web",
|
"platform": "web",
|
||||||
"app-version": "3",
|
"app-version": "3",
|
||||||
//"user-agent": base.UserAgent,
|
"user-agent": base.UserAgent,
|
||||||
}).
|
}).
|
||||||
SetBody(body).Post(SignIn)
|
SetBody(body).Post(SignIn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -202,7 +202,7 @@ do:
|
|||||||
"origin": "https://www.123pan.com",
|
"origin": "https://www.123pan.com",
|
||||||
"referer": "https://www.123pan.com/",
|
"referer": "https://www.123pan.com/",
|
||||||
"authorization": "Bearer " + d.AccessToken,
|
"authorization": "Bearer " + d.AccessToken,
|
||||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) alist-client",
|
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)",
|
||||||
"platform": "web",
|
"platform": "web",
|
||||||
"app-version": "3",
|
"app-version": "3",
|
||||||
//"user-agent": base.UserAgent,
|
//"user-agent": base.UserAgent,
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package _139
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -13,6 +14,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
streamPkg "github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/cron"
|
"github.com/alist-org/alist/v3/pkg/cron"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||||
@@ -22,9 +24,10 @@ import (
|
|||||||
type Yun139 struct {
|
type Yun139 struct {
|
||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
cron *cron.Cron
|
cron *cron.Cron
|
||||||
Account string
|
Account string
|
||||||
ref *Yun139
|
ref *Yun139
|
||||||
|
PersonalCloudHost string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) Config() driver.Config {
|
func (d *Yun139) Config() driver.Config {
|
||||||
@@ -37,13 +40,36 @@ func (d *Yun139) GetAddition() driver.Additional {
|
|||||||
|
|
||||||
func (d *Yun139) Init(ctx context.Context) error {
|
func (d *Yun139) Init(ctx context.Context) error {
|
||||||
if d.ref == nil {
|
if d.ref == nil {
|
||||||
if d.Authorization == "" {
|
if len(d.Authorization) == 0 {
|
||||||
return fmt.Errorf("authorization is empty")
|
return fmt.Errorf("authorization is empty")
|
||||||
}
|
}
|
||||||
err := d.refreshToken()
|
err := d.refreshToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Query Route Policy
|
||||||
|
var resp QueryRoutePolicyResp
|
||||||
|
_, err = d.requestRoute(base.Json{
|
||||||
|
"userInfo": base.Json{
|
||||||
|
"userType": 1,
|
||||||
|
"accountType": 1,
|
||||||
|
"accountName": d.Account},
|
||||||
|
"modAddrType": 1,
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, policyItem := range resp.Data.RoutePolicyList {
|
||||||
|
if policyItem.ModName == "personal" {
|
||||||
|
d.PersonalCloudHost = policyItem.HttpsUrl
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(d.PersonalCloudHost) == 0 {
|
||||||
|
return fmt.Errorf("PersonalCloudHost is empty")
|
||||||
|
}
|
||||||
|
|
||||||
d.cron = cron.NewCron(time.Hour * 12)
|
d.cron = cron.NewCron(time.Hour * 12)
|
||||||
d.cron.Do(func() {
|
d.cron.Do(func() {
|
||||||
err := d.refreshToken()
|
err := d.refreshToken()
|
||||||
@@ -69,28 +95,6 @@ func (d *Yun139) Init(ctx context.Context) error {
|
|||||||
default:
|
default:
|
||||||
return errs.NotImplement
|
return errs.NotImplement
|
||||||
}
|
}
|
||||||
// if d.ref != nil {
|
|
||||||
// return nil
|
|
||||||
// }
|
|
||||||
// decode, err := base64.StdEncoding.DecodeString(d.Authorization)
|
|
||||||
// if err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// decodeStr := string(decode)
|
|
||||||
// splits := strings.Split(decodeStr, ":")
|
|
||||||
// if len(splits) < 2 {
|
|
||||||
// return fmt.Errorf("authorization is invalid, splits < 2")
|
|
||||||
// }
|
|
||||||
// d.Account = splits[1]
|
|
||||||
// _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{
|
|
||||||
// "qryUserExternInfoReq": base.Json{
|
|
||||||
// "commonAccountInfo": base.Json{
|
|
||||||
// "account": d.getAccount(),
|
|
||||||
// "accountType": 1,
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// }, nil)
|
|
||||||
// return err
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -158,7 +162,7 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
|||||||
"type": "folder",
|
"type": "folder",
|
||||||
"fileRenameMode": "force_rename",
|
"fileRenameMode": "force_rename",
|
||||||
}
|
}
|
||||||
pathname := "/hcy/file/create"
|
pathname := "/file/create"
|
||||||
_, err = d.personalPost(pathname, data, nil)
|
_, err = d.personalPost(pathname, data, nil)
|
||||||
case MetaPersonal:
|
case MetaPersonal:
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
@@ -211,7 +215,7 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
|
|||||||
"fileIds": []string{srcObj.GetID()},
|
"fileIds": []string{srcObj.GetID()},
|
||||||
"toParentFileId": dstDir.GetID(),
|
"toParentFileId": dstDir.GetID(),
|
||||||
}
|
}
|
||||||
pathname := "/hcy/file/batchMove"
|
pathname := "/file/batchMove"
|
||||||
_, err := d.personalPost(pathname, data, nil)
|
_, err := d.personalPost(pathname, data, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -288,7 +292,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e
|
|||||||
"name": newName,
|
"name": newName,
|
||||||
"description": "",
|
"description": "",
|
||||||
}
|
}
|
||||||
pathname := "/hcy/file/update"
|
pathname := "/file/update"
|
||||||
_, err = d.personalPost(pathname, data, nil)
|
_, err = d.personalPost(pathname, data, nil)
|
||||||
case MetaPersonal:
|
case MetaPersonal:
|
||||||
var data base.Json
|
var data base.Json
|
||||||
@@ -388,7 +392,7 @@ func (d *Yun139) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
"fileIds": []string{srcObj.GetID()},
|
"fileIds": []string{srcObj.GetID()},
|
||||||
"toParentFileId": dstDir.GetID(),
|
"toParentFileId": dstDir.GetID(),
|
||||||
}
|
}
|
||||||
pathname := "/hcy/file/batchCopy"
|
pathname := "/file/batchCopy"
|
||||||
_, err := d.personalPost(pathname, data, nil)
|
_, err := d.personalPost(pathname, data, nil)
|
||||||
return err
|
return err
|
||||||
case MetaPersonal:
|
case MetaPersonal:
|
||||||
@@ -428,7 +432,7 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
data := base.Json{
|
data := base.Json{
|
||||||
"fileIds": []string{obj.GetID()},
|
"fileIds": []string{obj.GetID()},
|
||||||
}
|
}
|
||||||
pathname := "/hcy/recyclebin/batchTrash"
|
pathname := "/recyclebin/batchTrash"
|
||||||
_, err := d.personalPost(pathname, data, nil)
|
_, err := d.personalPost(pathname, data, nil)
|
||||||
return err
|
return err
|
||||||
case MetaGroup:
|
case MetaGroup:
|
||||||
@@ -501,23 +505,15 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
_ = iota //ignore first value by assigning to blank identifier
|
|
||||||
KB = 1 << (10 * iota)
|
|
||||||
MB
|
|
||||||
GB
|
|
||||||
TB
|
|
||||||
)
|
|
||||||
|
|
||||||
func (d *Yun139) getPartSize(size int64) int64 {
|
func (d *Yun139) getPartSize(size int64) int64 {
|
||||||
if d.CustomUploadPartSize != 0 {
|
if d.CustomUploadPartSize != 0 {
|
||||||
return d.CustomUploadPartSize
|
return d.CustomUploadPartSize
|
||||||
}
|
}
|
||||||
// 网盘对于分片数量存在上限
|
// 网盘对于分片数量存在上限
|
||||||
if size/GB > 30 {
|
if size/utils.GB > 30 {
|
||||||
return 512 * MB
|
return 512 * utils.MB
|
||||||
}
|
}
|
||||||
return 100 * MB
|
return 100 * utils.MB
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
@@ -525,29 +521,28 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
case MetaPersonalNew:
|
case MetaPersonalNew:
|
||||||
var err error
|
var err error
|
||||||
fullHash := stream.GetHash().GetHash(utils.SHA256)
|
fullHash := stream.GetHash().GetHash(utils.SHA256)
|
||||||
if len(fullHash) <= 0 {
|
if len(fullHash) != utils.SHA256.Width {
|
||||||
tmpF, err := stream.CacheFullInTempFile()
|
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA256)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fullHash, err = utils.HashFile(utils.SHA256, tmpF)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
partInfos := []PartInfo{}
|
size := stream.GetSize()
|
||||||
var partSize = d.getPartSize(stream.GetSize())
|
var partSize = d.getPartSize(size)
|
||||||
part := (stream.GetSize() + partSize - 1) / partSize
|
part := size / partSize
|
||||||
if part == 0 {
|
if size%partSize > 0 {
|
||||||
|
part++
|
||||||
|
} else if part == 0 {
|
||||||
part = 1
|
part = 1
|
||||||
}
|
}
|
||||||
|
partInfos := make([]PartInfo, 0, part)
|
||||||
for i := int64(0); i < part; i++ {
|
for i := int64(0); i < part; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
start := i * partSize
|
start := i * partSize
|
||||||
byteSize := stream.GetSize() - start
|
byteSize := size - start
|
||||||
if byteSize > partSize {
|
if byteSize > partSize {
|
||||||
byteSize = partSize
|
byteSize = partSize
|
||||||
}
|
}
|
||||||
@@ -575,13 +570,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
"contentType": "application/octet-stream",
|
"contentType": "application/octet-stream",
|
||||||
"parallelUpload": false,
|
"parallelUpload": false,
|
||||||
"partInfos": firstPartInfos,
|
"partInfos": firstPartInfos,
|
||||||
"size": stream.GetSize(),
|
"size": size,
|
||||||
"parentFileId": dstDir.GetID(),
|
"parentFileId": dstDir.GetID(),
|
||||||
"name": stream.GetName(),
|
"name": stream.GetName(),
|
||||||
"type": "file",
|
"type": "file",
|
||||||
"fileRenameMode": "auto_rename",
|
"fileRenameMode": "auto_rename",
|
||||||
}
|
}
|
||||||
pathname := "/hcy/file/create"
|
pathname := "/file/create"
|
||||||
var resp PersonalUploadResp
|
var resp PersonalUploadResp
|
||||||
_, err = d.personalPost(pathname, data, &resp)
|
_, err = d.personalPost(pathname, data, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -618,7 +613,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
"accountType": 1,
|
"accountType": 1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
pathname := "/hcy/file/getUploadUrl"
|
pathname := "/file/getUploadUrl"
|
||||||
var moreresp PersonalUploadUrlResp
|
var moreresp PersonalUploadUrlResp
|
||||||
_, err = d.personalPost(pathname, moredata, &moreresp)
|
_, err = d.personalPost(pathname, moredata, &moreresp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -628,14 +623,15 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Progress
|
// Progress
|
||||||
p := driver.NewProgress(stream.GetSize(), up)
|
p := driver.NewProgress(size, up)
|
||||||
|
|
||||||
|
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||||
// 上传所有分片
|
// 上传所有分片
|
||||||
for _, uploadPartInfo := range uploadPartInfos {
|
for _, uploadPartInfo := range uploadPartInfos {
|
||||||
index := uploadPartInfo.PartNumber - 1
|
index := uploadPartInfo.PartNumber - 1
|
||||||
partSize := partInfos[index].PartSize
|
partSize := partInfos[index].PartSize
|
||||||
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
|
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
|
||||||
limitReader := io.LimitReader(stream, partSize)
|
limitReader := io.LimitReader(rateLimited, partSize)
|
||||||
|
|
||||||
// Update Progress
|
// Update Progress
|
||||||
r := io.TeeReader(limitReader, p)
|
r := io.TeeReader(limitReader, p)
|
||||||
@@ -668,7 +664,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
"fileId": resp.Data.FileId,
|
"fileId": resp.Data.FileId,
|
||||||
"uploadId": resp.Data.UploadId,
|
"uploadId": resp.Data.UploadId,
|
||||||
}
|
}
|
||||||
_, err = d.personalPost("/hcy/file/complete", data, nil)
|
_, err = d.personalPost("/file/complete", data, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -738,14 +734,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
var reportSize int64
|
||||||
|
if d.ReportRealSize {
|
||||||
|
reportSize = stream.GetSize()
|
||||||
|
} else {
|
||||||
|
reportSize = 0
|
||||||
|
}
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
"manualRename": 2,
|
"manualRename": 2,
|
||||||
"operation": 0,
|
"operation": 0,
|
||||||
"fileCount": 1,
|
"fileCount": 1,
|
||||||
"totalSize": 0, // 去除上传大小限制
|
"totalSize": reportSize,
|
||||||
"uploadContentList": []base.Json{{
|
"uploadContentList": []base.Json{{
|
||||||
"contentName": stream.GetName(),
|
"contentName": stream.GetName(),
|
||||||
"contentSize": 0, // 去除上传大小限制
|
"contentSize": reportSize,
|
||||||
// "digest": "5a3231986ce7a6b46e408612d385bafa"
|
// "digest": "5a3231986ce7a6b46e408612d385bafa"
|
||||||
}},
|
}},
|
||||||
"parentCatalogID": dstDir.GetID(),
|
"parentCatalogID": dstDir.GetID(),
|
||||||
@@ -763,10 +765,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
"operation": 0,
|
"operation": 0,
|
||||||
"path": path.Join(dstDir.GetPath(), dstDir.GetID()),
|
"path": path.Join(dstDir.GetPath(), dstDir.GetID()),
|
||||||
"seqNo": random.String(32), //序列号不能为空
|
"seqNo": random.String(32), //序列号不能为空
|
||||||
"totalSize": 0,
|
"totalSize": reportSize,
|
||||||
"uploadContentList": []base.Json{{
|
"uploadContentList": []base.Json{{
|
||||||
"contentName": stream.GetName(),
|
"contentName": stream.GetName(),
|
||||||
"contentSize": 0,
|
"contentSize": reportSize,
|
||||||
// "digest": "5a3231986ce7a6b46e408612d385bafa"
|
// "digest": "5a3231986ce7a6b46e408612d385bafa"
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
@@ -777,27 +779,30 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if resp.Data.Result.ResultCode != "0" {
|
||||||
|
return fmt.Errorf("get file upload url failed with result code: %s, message: %s", resp.Data.Result.ResultCode, resp.Data.Result.ResultDesc)
|
||||||
|
}
|
||||||
|
|
||||||
|
size := stream.GetSize()
|
||||||
// Progress
|
// Progress
|
||||||
p := driver.NewProgress(stream.GetSize(), up)
|
p := driver.NewProgress(size, up)
|
||||||
|
var partSize = d.getPartSize(size)
|
||||||
var partSize = d.getPartSize(stream.GetSize())
|
part := size / partSize
|
||||||
part := (stream.GetSize() + partSize - 1) / partSize
|
if size%partSize > 0 {
|
||||||
if part == 0 {
|
part++
|
||||||
|
} else if part == 0 {
|
||||||
part = 1
|
part = 1
|
||||||
}
|
}
|
||||||
|
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||||
for i := int64(0); i < part; i++ {
|
for i := int64(0); i < part; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
start := i * partSize
|
start := i * partSize
|
||||||
byteSize := stream.GetSize() - start
|
byteSize := min(size-start, partSize)
|
||||||
if byteSize > partSize {
|
|
||||||
byteSize = partSize
|
|
||||||
}
|
|
||||||
|
|
||||||
limitReader := io.LimitReader(stream, byteSize)
|
limitReader := io.LimitReader(rateLimited, byteSize)
|
||||||
// Update Progress
|
// Update Progress
|
||||||
r := io.TeeReader(limitReader, p)
|
r := io.TeeReader(limitReader, p)
|
||||||
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
|
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
|
||||||
@@ -807,7 +812,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
|
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName()))
|
req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName()))
|
||||||
req.Header.Set("contentSize", strconv.FormatInt(stream.GetSize(), 10))
|
req.Header.Set("contentSize", strconv.FormatInt(size, 10))
|
||||||
req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1))
|
req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1))
|
||||||
req.Header.Set("uploadtaskID", resp.Data.UploadResult.UploadTaskID)
|
req.Header.Set("uploadtaskID", resp.Data.UploadResult.UploadTaskID)
|
||||||
req.Header.Set("rangeType", "0")
|
req.Header.Set("rangeType", "0")
|
||||||
@@ -817,13 +822,23 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_ = res.Body.Close()
|
|
||||||
log.Debugf("%+v", res)
|
|
||||||
if res.StatusCode != http.StatusOK {
|
if res.StatusCode != http.StatusOK {
|
||||||
|
res.Body.Close()
|
||||||
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
||||||
}
|
}
|
||||||
|
bodyBytes, err := io.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading response body: %v", err)
|
||||||
|
}
|
||||||
|
var result InterLayerUploadResult
|
||||||
|
err = xml.Unmarshal(bodyBytes, &result)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing XML: %v", err)
|
||||||
|
}
|
||||||
|
if result.ResultCode != 0 {
|
||||||
|
return fmt.Errorf("upload failed with result code: %d, message: %s", result.ResultCode, result.Msg)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
return errs.NotImplement
|
return errs.NotImplement
|
||||||
@@ -841,7 +856,7 @@ func (d *Yun139) Other(ctx context.Context, args model.OtherArgs) (interface{},
|
|||||||
}
|
}
|
||||||
switch args.Method {
|
switch args.Method {
|
||||||
case "video_preview":
|
case "video_preview":
|
||||||
uri = "/hcy/videoPreview/getPreviewInfo"
|
uri = "/videoPreview/getPreviewInfo"
|
||||||
default:
|
default:
|
||||||
return nil, errs.NotSupport
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,8 @@ type Addition struct {
|
|||||||
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"`
|
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"`
|
||||||
CloudID string `json:"cloud_id"`
|
CloudID string `json:"cloud_id"`
|
||||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||||
|
ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"`
|
||||||
|
UseLargeThumbnail bool `json:"use_large_thumbnail" type:"bool" default:"false" help:"Enable to use large thumbnail for images"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
|||||||
@@ -143,6 +143,13 @@ type UploadResp struct {
|
|||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type InterLayerUploadResult struct {
|
||||||
|
XMLName xml.Name `xml:"result"`
|
||||||
|
Text string `xml:",chardata"`
|
||||||
|
ResultCode int `xml:"resultCode"`
|
||||||
|
Msg string `xml:"msg"`
|
||||||
|
}
|
||||||
|
|
||||||
type CloudContent struct {
|
type CloudContent struct {
|
||||||
ContentID string `json:"contentID"`
|
ContentID string `json:"contentID"`
|
||||||
//Modifier string `json:"modifier"`
|
//Modifier string `json:"modifier"`
|
||||||
@@ -278,11 +285,30 @@ type PersonalUploadUrlResp struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type RefreshTokenResp struct {
|
type QueryRoutePolicyResp struct {
|
||||||
XMLName xml.Name `xml:"root"`
|
Success bool `json:"success"`
|
||||||
Return string `xml:"return"`
|
Code string `json:"code"`
|
||||||
Token string `xml:"token"`
|
Message string `json:"message"`
|
||||||
Expiretime int32 `xml:"expiretime"`
|
Data struct {
|
||||||
AccessToken string `xml:"accessToken"`
|
RoutePolicyList []struct {
|
||||||
Desc string `xml:"desc"`
|
SiteID string `json:"siteID"`
|
||||||
|
SiteCode string `json:"siteCode"`
|
||||||
|
ModName string `json:"modName"`
|
||||||
|
HttpUrl string `json:"httpUrl"`
|
||||||
|
HttpsUrl string `json:"httpsUrl"`
|
||||||
|
EnvID string `json:"envID"`
|
||||||
|
ExtInfo string `json:"extInfo"`
|
||||||
|
HashName string `json:"hashName"`
|
||||||
|
ModAddrType int `json:"modAddrType"`
|
||||||
|
} `json:"routePolicyList"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RefreshTokenResp struct {
|
||||||
|
XMLName xml.Name `xml:"root"`
|
||||||
|
Return string `xml:"return"`
|
||||||
|
Token string `xml:"token"`
|
||||||
|
Expiretime int32 `xml:"expiretime"`
|
||||||
|
AccessToken string `xml:"accessToken"`
|
||||||
|
Desc string `xml:"desc"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -67,6 +67,7 @@ func (d *Yun139) refreshToken() error {
|
|||||||
if len(splits) < 3 {
|
if len(splits) < 3 {
|
||||||
return fmt.Errorf("authorization is invalid, splits < 3")
|
return fmt.Errorf("authorization is invalid, splits < 3")
|
||||||
}
|
}
|
||||||
|
d.Account = splits[1]
|
||||||
strs := strings.Split(splits[2], "|")
|
strs := strings.Split(splits[2], "|")
|
||||||
if len(strs) < 4 {
|
if len(strs) < 4 {
|
||||||
return fmt.Errorf("authorization is invalid, strs < 4")
|
return fmt.Errorf("authorization is invalid, strs < 4")
|
||||||
@@ -156,6 +157,64 @@ func (d *Yun139) request(pathname string, method string, callback base.ReqCallba
|
|||||||
}
|
}
|
||||||
return res.Body(), nil
|
return res.Body(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Yun139) requestRoute(data interface{}, resp interface{}) ([]byte, error) {
|
||||||
|
url := "https://user-njs.yun.139.com/user/route/qryRoutePolicy"
|
||||||
|
req := base.RestyClient.R()
|
||||||
|
randStr := random.String(16)
|
||||||
|
ts := time.Now().Format("2006-01-02 15:04:05")
|
||||||
|
callback := func(req *resty.Request) {
|
||||||
|
req.SetBody(data)
|
||||||
|
}
|
||||||
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
|
body, err := utils.Json.Marshal(req.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sign := calSign(string(body), ts, randStr)
|
||||||
|
svcType := "1"
|
||||||
|
if d.isFamily() {
|
||||||
|
svcType = "2"
|
||||||
|
}
|
||||||
|
req.SetHeaders(map[string]string{
|
||||||
|
"Accept": "application/json, text/plain, */*",
|
||||||
|
"CMS-DEVICE": "default",
|
||||||
|
"Authorization": "Basic " + d.getAuthorization(),
|
||||||
|
"mcloud-channel": "1000101",
|
||||||
|
"mcloud-client": "10701",
|
||||||
|
//"mcloud-route": "001",
|
||||||
|
"mcloud-sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign),
|
||||||
|
//"mcloud-skey":"",
|
||||||
|
"mcloud-version": "7.14.0",
|
||||||
|
"Origin": "https://yun.139.com",
|
||||||
|
"Referer": "https://yun.139.com/w/",
|
||||||
|
"x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||",
|
||||||
|
"x-huawei-channelSrc": "10000034",
|
||||||
|
"x-inner-ntwk": "2",
|
||||||
|
"x-m4c-caller": "PC",
|
||||||
|
"x-m4c-src": "10002",
|
||||||
|
"x-SvcType": svcType,
|
||||||
|
"Inner-Hcy-Router-Https": "1",
|
||||||
|
})
|
||||||
|
|
||||||
|
var e BaseResp
|
||||||
|
req.SetResult(&e)
|
||||||
|
res, err := req.Execute(http.MethodPost, url)
|
||||||
|
log.Debugln(res.String())
|
||||||
|
if !e.Success {
|
||||||
|
return nil, errors.New(e.Message)
|
||||||
|
}
|
||||||
|
if resp != nil {
|
||||||
|
err = utils.Json.Unmarshal(res.Body(), resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res.Body(), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Yun139) post(pathname string, data interface{}, resp interface{}) ([]byte, error) {
|
func (d *Yun139) post(pathname string, data interface{}, resp interface{}) ([]byte, error) {
|
||||||
return d.request(pathname, http.MethodPost, func(req *resty.Request) {
|
return d.request(pathname, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data)
|
req.SetBody(data)
|
||||||
@@ -390,7 +449,7 @@ func unicode(str string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) personalRequest(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *Yun139) personalRequest(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
url := "https://personal-kd-njs.yun.139.com" + pathname
|
url := d.getPersonalCloudHost() + pathname
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
randStr := random.String(16)
|
randStr := random.String(16)
|
||||||
ts := time.Now().Format("2006-01-02 15:04:05")
|
ts := time.Now().Format("2006-01-02 15:04:05")
|
||||||
@@ -416,8 +475,6 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R
|
|||||||
"Mcloud-Route": "001",
|
"Mcloud-Route": "001",
|
||||||
"Mcloud-Sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign),
|
"Mcloud-Sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign),
|
||||||
"Mcloud-Version": "7.14.0",
|
"Mcloud-Version": "7.14.0",
|
||||||
"Origin": "https://yun.139.com",
|
|
||||||
"Referer": "https://yun.139.com/w/",
|
|
||||||
"x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||",
|
"x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||",
|
||||||
"x-huawei-channelSrc": "10000034",
|
"x-huawei-channelSrc": "10000034",
|
||||||
"x-inner-ntwk": "2",
|
"x-inner-ntwk": "2",
|
||||||
@@ -479,7 +536,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
|
|||||||
"parentFileId": fileId,
|
"parentFileId": fileId,
|
||||||
}
|
}
|
||||||
var resp PersonalListResp
|
var resp PersonalListResp
|
||||||
_, err := d.personalPost("/hcy/file/list", data, &resp)
|
_, err := d.personalPost("/file/list", data, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -499,7 +556,15 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
|
|||||||
} else {
|
} else {
|
||||||
var Thumbnails = item.Thumbnails
|
var Thumbnails = item.Thumbnails
|
||||||
var ThumbnailUrl string
|
var ThumbnailUrl string
|
||||||
if len(Thumbnails) > 0 {
|
if d.UseLargeThumbnail {
|
||||||
|
for _, thumb := range Thumbnails {
|
||||||
|
if strings.Contains(thumb.Style, "Large") {
|
||||||
|
ThumbnailUrl = thumb.Url
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ThumbnailUrl == "" && len(Thumbnails) > 0 {
|
||||||
ThumbnailUrl = Thumbnails[len(Thumbnails)-1].Url
|
ThumbnailUrl = Thumbnails[len(Thumbnails)-1].Url
|
||||||
}
|
}
|
||||||
f = &model.ObjThumb{
|
f = &model.ObjThumb{
|
||||||
@@ -527,7 +592,7 @@ func (d *Yun139) personalGetLink(fileId string) (string, error) {
|
|||||||
data := base.Json{
|
data := base.Json{
|
||||||
"fileId": fileId,
|
"fileId": fileId,
|
||||||
}
|
}
|
||||||
res, err := d.personalPost("/hcy/file/getDownloadUrl",
|
res, err := d.personalPost("/file/getDownloadUrl",
|
||||||
data, nil)
|
data, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -552,3 +617,9 @@ func (d *Yun139) getAccount() string {
|
|||||||
}
|
}
|
||||||
return d.Account
|
return d.Account
|
||||||
}
|
}
|
||||||
|
func (d *Yun139) getPersonalCloudHost() string {
|
||||||
|
if d.ref != nil {
|
||||||
|
return d.ref.getPersonalCloudHost()
|
||||||
|
}
|
||||||
|
return d.PersonalCloudHost
|
||||||
|
}
|
||||||
|
|||||||
@@ -365,7 +365,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
|||||||
log.Debugf("uploadData: %+v", uploadData)
|
log.Debugf("uploadData: %+v", uploadData)
|
||||||
requestURL := uploadData.RequestURL
|
requestURL := uploadData.RequestURL
|
||||||
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
|
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
|
||||||
req, err := http.NewRequest(http.MethodPut, requestURL, bytes.NewReader(byteData))
|
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -375,11 +375,11 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
|||||||
req.Header.Set(v[0:i], v[i+1:])
|
req.Header.Set(v[0:i], v[i+1:])
|
||||||
}
|
}
|
||||||
r, err := base.HttpClient.Do(req)
|
r, err := base.HttpClient.Do(req)
|
||||||
log.Debugf("%+v %+v", r, r.Request.Header)
|
|
||||||
r.Body.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
log.Debugf("%+v %+v", r, r.Request.Header)
|
||||||
|
_ = r.Body.Close()
|
||||||
up(float64(i) * 100 / float64(count))
|
up(float64(i) * 100 / float64(count))
|
||||||
}
|
}
|
||||||
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package _189pc
|
package _189pc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"container/ring"
|
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
|
"github.com/google/uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Cloud189PC struct {
|
type Cloud189PC struct {
|
||||||
@@ -29,7 +30,7 @@ type Cloud189PC struct {
|
|||||||
|
|
||||||
uploadThread int
|
uploadThread int
|
||||||
|
|
||||||
familyTransferFolder *ring.Ring
|
familyTransferFolder *Cloud189Folder
|
||||||
cleanFamilyTransferFile func()
|
cleanFamilyTransferFile func()
|
||||||
|
|
||||||
storageConfig driver.Config
|
storageConfig driver.Config
|
||||||
@@ -48,9 +49,18 @@ func (y *Cloud189PC) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||||
// 兼容旧上传接口
|
y.storageConfig = config
|
||||||
y.storageConfig.NoOverwriteUpload = y.isFamily() && (y.Addition.RapidUpload || y.Addition.UploadMethod == "old")
|
if y.isFamily() {
|
||||||
|
// 兼容旧上传接口
|
||||||
|
if y.Addition.RapidUpload || y.Addition.UploadMethod == "old" {
|
||||||
|
y.storageConfig.NoOverwriteUpload = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// 家庭云转存,不支持覆盖上传
|
||||||
|
if y.Addition.FamilyTransfer {
|
||||||
|
y.storageConfig.NoOverwriteUpload = true
|
||||||
|
}
|
||||||
|
}
|
||||||
// 处理个人云和家庭云参数
|
// 处理个人云和家庭云参数
|
||||||
if y.isFamily() && y.RootFolderID == "-11" {
|
if y.isFamily() && y.RootFolderID == "-11" {
|
||||||
y.RootFolderID = ""
|
y.RootFolderID = ""
|
||||||
@@ -91,13 +101,14 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 创建中转文件夹,防止重名文件
|
// 创建中转文件夹
|
||||||
if y.FamilyTransfer {
|
if y.FamilyTransfer {
|
||||||
if y.familyTransferFolder, err = y.createFamilyTransferFolder(32); err != nil {
|
if err := y.createFamilyTransferFolder(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 清理转存文件节流
|
||||||
y.cleanFamilyTransferFile = utils.NewThrottle2(time.Minute, func() {
|
y.cleanFamilyTransferFile = utils.NewThrottle2(time.Minute, func() {
|
||||||
if err := y.cleanFamilyTransfer(context.TODO()); err != nil {
|
if err := y.cleanFamilyTransfer(context.TODO()); err != nil {
|
||||||
utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err)
|
utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err)
|
||||||
@@ -327,35 +338,49 @@ func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
if !isFamily && y.FamilyTransfer {
|
if !isFamily && y.FamilyTransfer {
|
||||||
// 修改上传目标为家庭云文件夹
|
// 修改上传目标为家庭云文件夹
|
||||||
transferDstDir := dstDir
|
transferDstDir := dstDir
|
||||||
dstDir = (y.familyTransferFolder.Value).(*Cloud189Folder)
|
dstDir = y.familyTransferFolder
|
||||||
y.familyTransferFolder = y.familyTransferFolder.Next()
|
|
||||||
|
|
||||||
|
// 使用临时文件名
|
||||||
|
srcName := stream.GetName()
|
||||||
|
stream = &WrapFileStreamer{
|
||||||
|
FileStreamer: stream,
|
||||||
|
Name: fmt.Sprintf("0%s.transfer", uuid.NewString()),
|
||||||
|
}
|
||||||
|
|
||||||
|
// 使用家庭云上传
|
||||||
isFamily = true
|
isFamily = true
|
||||||
overwrite = false
|
overwrite = false
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if newObj != nil {
|
if newObj != nil {
|
||||||
// 批量任务有概率删不掉
|
|
||||||
y.cleanFamilyTransferFile()
|
|
||||||
|
|
||||||
// 转存家庭云文件到个人云
|
// 转存家庭云文件到个人云
|
||||||
err = y.SaveFamilyFileToPersonCloud(context.TODO(), y.FamilyID, newObj, transferDstDir, true)
|
err = y.SaveFamilyFileToPersonCloud(context.TODO(), y.FamilyID, newObj, transferDstDir, true)
|
||||||
|
// 删除家庭云源文件
|
||||||
task := BatchTaskInfo{
|
go y.Delete(context.TODO(), y.FamilyID, newObj)
|
||||||
FileId: newObj.GetID(),
|
// 批量任务有概率删不掉
|
||||||
FileName: newObj.GetName(),
|
go y.cleanFamilyTransferFile()
|
||||||
IsFolder: BoolToNumber(newObj.IsDir()),
|
// 转存失败返回错误
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// 删除源文件
|
// 查找转存文件
|
||||||
if resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, task); err == nil {
|
var file *Cloud189File
|
||||||
y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
|
file, err = y.findFileByName(context.TODO(), newObj.GetName(), transferDstDir.GetID(), false)
|
||||||
// 永久删除
|
if err != nil {
|
||||||
if resp, err := y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, task); err == nil {
|
if err == errs.ObjectNotFound {
|
||||||
y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
|
err = fmt.Errorf("unknown error: No transfer file obtained %s", newObj.GetName())
|
||||||
}
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
newObj = nil
|
|
||||||
|
// 重命名转存文件
|
||||||
|
newObj, err = y.Rename(context.TODO(), file, srcName)
|
||||||
|
if err != nil {
|
||||||
|
// 重命名失败删除源文件
|
||||||
|
_ = y.Delete(context.TODO(), "", file)
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -208,3 +209,12 @@ func IF[V any](o bool, t V, f V) V {
|
|||||||
}
|
}
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type WrapFileStreamer struct {
|
||||||
|
model.FileStreamer
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapFileStreamer) GetName() string {
|
||||||
|
return w.Name
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,30 +2,32 @@ package _189pc
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"container/ring"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/cookiejar"
|
"net/http/cookiejar"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sync/semaphore"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/internal/setting"
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
|
||||||
@@ -174,8 +176,8 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
|
|||||||
}
|
}
|
||||||
|
|
||||||
var erron RespErr
|
var erron RespErr
|
||||||
jsoniter.Unmarshal(body, &erron)
|
_ = jsoniter.Unmarshal(body, &erron)
|
||||||
xml.Unmarshal(body, &erron)
|
_ = xml.Unmarshal(body, &erron)
|
||||||
if erron.HasError() {
|
if erron.HasError() {
|
||||||
return nil, &erron
|
return nil, &erron
|
||||||
}
|
}
|
||||||
@@ -185,39 +187,9 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
|
|||||||
return body, nil
|
return body, nil
|
||||||
}
|
}
|
||||||
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
|
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
|
||||||
fullUrl := API_URL
|
res := make([]model.Obj, 0, 100)
|
||||||
if isFamily {
|
|
||||||
fullUrl += "/family/file"
|
|
||||||
}
|
|
||||||
fullUrl += "/listFiles.action"
|
|
||||||
|
|
||||||
res := make([]model.Obj, 0, 130)
|
|
||||||
for pageNum := 1; ; pageNum++ {
|
for pageNum := 1; ; pageNum++ {
|
||||||
var resp Cloud189FilesResp
|
resp, err := y.getFilesWithPage(ctx, fileId, isFamily, pageNum, 1000, y.OrderBy, y.OrderDirection)
|
||||||
_, err := y.get(fullUrl, func(r *resty.Request) {
|
|
||||||
r.SetContext(ctx)
|
|
||||||
r.SetQueryParams(map[string]string{
|
|
||||||
"folderId": fileId,
|
|
||||||
"fileType": "0",
|
|
||||||
"mediaAttr": "0",
|
|
||||||
"iconOption": "5",
|
|
||||||
"pageNum": fmt.Sprint(pageNum),
|
|
||||||
"pageSize": "130",
|
|
||||||
})
|
|
||||||
if isFamily {
|
|
||||||
r.SetQueryParams(map[string]string{
|
|
||||||
"familyId": y.FamilyID,
|
|
||||||
"orderBy": toFamilyOrderBy(y.OrderBy),
|
|
||||||
"descending": toDesc(y.OrderDirection),
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
r.SetQueryParams(map[string]string{
|
|
||||||
"recursive": "0",
|
|
||||||
"orderBy": y.OrderBy,
|
|
||||||
"descending": toDesc(y.OrderDirection),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}, &resp, isFamily)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -236,6 +208,63 @@ func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool)
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) getFilesWithPage(ctx context.Context, fileId string, isFamily bool, pageNum int, pageSize int, orderBy string, orderDirection string) (*Cloud189FilesResp, error) {
|
||||||
|
fullUrl := API_URL
|
||||||
|
if isFamily {
|
||||||
|
fullUrl += "/family/file"
|
||||||
|
}
|
||||||
|
fullUrl += "/listFiles.action"
|
||||||
|
|
||||||
|
var resp Cloud189FilesResp
|
||||||
|
_, err := y.get(fullUrl, func(r *resty.Request) {
|
||||||
|
r.SetContext(ctx)
|
||||||
|
r.SetQueryParams(map[string]string{
|
||||||
|
"folderId": fileId,
|
||||||
|
"fileType": "0",
|
||||||
|
"mediaAttr": "0",
|
||||||
|
"iconOption": "5",
|
||||||
|
"pageNum": fmt.Sprint(pageNum),
|
||||||
|
"pageSize": fmt.Sprint(pageSize),
|
||||||
|
})
|
||||||
|
if isFamily {
|
||||||
|
r.SetQueryParams(map[string]string{
|
||||||
|
"familyId": y.FamilyID,
|
||||||
|
"orderBy": toFamilyOrderBy(orderBy),
|
||||||
|
"descending": toDesc(orderDirection),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
r.SetQueryParams(map[string]string{
|
||||||
|
"recursive": "0",
|
||||||
|
"orderBy": orderBy,
|
||||||
|
"descending": toDesc(orderDirection),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}, &resp, isFamily)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, folderId string, isFamily bool) (*Cloud189File, error) {
|
||||||
|
for pageNum := 1; ; pageNum++ {
|
||||||
|
resp, err := y.getFilesWithPage(ctx, folderId, isFamily, pageNum, 10, "filename", "asc")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// 获取完毕跳出
|
||||||
|
if resp.FileListAO.Count == 0 {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
for i := 0; i < len(resp.FileListAO.FileList); i++ {
|
||||||
|
file := resp.FileListAO.FileList[i]
|
||||||
|
if file.Name == searchName {
|
||||||
|
return &file, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) login() (err error) {
|
func (y *Cloud189PC) login() (err error) {
|
||||||
// 初始化登陆所需参数
|
// 初始化登陆所需参数
|
||||||
if y.loginParam == nil {
|
if y.loginParam == nil {
|
||||||
@@ -295,7 +324,7 @@ func (y *Cloud189PC) login() (err error) {
|
|||||||
_, err = y.client.R().
|
_, err = y.client.R().
|
||||||
SetResult(&tokenInfo).SetError(&erron).
|
SetResult(&tokenInfo).SetError(&erron).
|
||||||
SetQueryParams(clientSuffix()).
|
SetQueryParams(clientSuffix()).
|
||||||
SetQueryParam("redirectURL", url.QueryEscape(loginresp.ToUrl)).
|
SetQueryParam("redirectURL", loginresp.ToUrl).
|
||||||
Post(API_URL + "/getSessionForPC.action")
|
Post(API_URL + "/getSessionForPC.action")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@@ -444,12 +473,8 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
|||||||
// 普通上传
|
// 普通上传
|
||||||
// 无法上传大小为0的文件
|
// 无法上传大小为0的文件
|
||||||
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||||
var sliceSize = partSize(file.GetSize())
|
size := file.GetSize()
|
||||||
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
sliceSize := partSize(size)
|
||||||
lastPartSize := file.GetSize() % sliceSize
|
|
||||||
if file.GetSize() > 0 && lastPartSize == 0 {
|
|
||||||
lastPartSize = sliceSize
|
|
||||||
}
|
|
||||||
|
|
||||||
params := Params{
|
params := Params{
|
||||||
"parentFolderId": dstDir.GetID(),
|
"parentFolderId": dstDir.GetID(),
|
||||||
@@ -481,24 +506,32 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
sem := semaphore.NewWeighted(3)
|
||||||
|
|
||||||
fileMd5 := md5.New()
|
count := int(size / sliceSize)
|
||||||
silceMd5 := md5.New()
|
lastPartSize := size % sliceSize
|
||||||
|
if lastPartSize > 0 {
|
||||||
|
count++
|
||||||
|
} else {
|
||||||
|
lastPartSize = sliceSize
|
||||||
|
}
|
||||||
|
fileMd5 := utils.MD5.NewFunc()
|
||||||
|
silceMd5 := utils.MD5.NewFunc()
|
||||||
silceMd5Hexs := make([]string, 0, count)
|
silceMd5Hexs := make([]string, 0, count)
|
||||||
|
teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5))
|
||||||
|
byteSize := sliceSize
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
byteData := make([]byte, sliceSize)
|
|
||||||
if i == count {
|
if i == count {
|
||||||
byteData = byteData[:lastPartSize]
|
byteSize = lastPartSize
|
||||||
}
|
}
|
||||||
|
byteData := make([]byte, byteSize)
|
||||||
// 读取块
|
// 读取块
|
||||||
silceMd5.Reset()
|
silceMd5.Reset()
|
||||||
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
|
if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil {
|
||||||
|
sem.Release(1)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -508,6 +541,10 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||||
|
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
if err = sem.Acquire(ctx, 1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer sem.Release(1)
|
||||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -515,7 +552,8 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
|
|
||||||
// step.4 上传切片
|
// step.4 上传切片
|
||||||
uploadUrl := uploadUrls[0]
|
uploadUrl := uploadUrls[0]
|
||||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData), isFamily)
|
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
|
||||||
|
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -572,24 +610,43 @@ func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
|
|||||||
|
|
||||||
// 快传
|
// 快传
|
||||||
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||||
tempFile, err := file.CacheFullInTempFile()
|
var (
|
||||||
if err != nil {
|
cache = file.GetFile()
|
||||||
return nil, err
|
tmpF *os.File
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
size := file.GetSize()
|
||||||
|
if _, ok := cache.(io.ReaderAt); !ok && size > 0 {
|
||||||
|
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = tmpF.Close()
|
||||||
|
_ = os.Remove(tmpF.Name())
|
||||||
|
}()
|
||||||
|
cache = tmpF
|
||||||
}
|
}
|
||||||
|
sliceSize := partSize(size)
|
||||||
var sliceSize = partSize(file.GetSize())
|
count := int(size / sliceSize)
|
||||||
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
lastSliceSize := size % sliceSize
|
||||||
lastSliceSize := file.GetSize() % sliceSize
|
if lastSliceSize > 0 {
|
||||||
if file.GetSize() > 0 && lastSliceSize == 0 {
|
count++
|
||||||
|
} else {
|
||||||
lastSliceSize = sliceSize
|
lastSliceSize = sliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
//step.1 优先计算所需信息
|
//step.1 优先计算所需信息
|
||||||
byteSize := sliceSize
|
byteSize := sliceSize
|
||||||
fileMd5 := md5.New()
|
fileMd5 := utils.MD5.NewFunc()
|
||||||
silceMd5 := md5.New()
|
sliceMd5 := utils.MD5.NewFunc()
|
||||||
silceMd5Hexs := make([]string, 0, count)
|
sliceMd5Hexs := make([]string, 0, count)
|
||||||
partInfos := make([]string, 0, count)
|
partInfos := make([]string, 0, count)
|
||||||
|
writers := []io.Writer{fileMd5, sliceMd5}
|
||||||
|
if tmpF != nil {
|
||||||
|
writers = append(writers, tmpF)
|
||||||
|
}
|
||||||
|
written := int64(0)
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
@@ -599,19 +656,31 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
|||||||
byteSize = lastSliceSize
|
byteSize = lastSliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
silceMd5.Reset()
|
n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), file, byteSize)
|
||||||
if _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
|
written += n
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
md5Byte := silceMd5.Sum(nil)
|
md5Byte := sliceMd5.Sum(nil)
|
||||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
|
sliceMd5Hexs = append(sliceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
|
||||||
partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
|
partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
|
||||||
|
sliceMd5.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
if tmpF != nil {
|
||||||
|
if size > 0 && written != size {
|
||||||
|
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, size)
|
||||||
|
}
|
||||||
|
_, err = tmpF.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||||
sliceMd5Hex := fileMd5Hex
|
sliceMd5Hex := fileMd5Hex
|
||||||
if file.GetSize() > sliceSize {
|
if size > sliceSize {
|
||||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(sliceMd5Hexs, "\n")))
|
||||||
}
|
}
|
||||||
|
|
||||||
fullUrl := UPLOAD_URL
|
fullUrl := UPLOAD_URL
|
||||||
@@ -677,7 +746,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
|||||||
}
|
}
|
||||||
|
|
||||||
// step.4 上传切片
|
// step.4 上传切片
|
||||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize), isFamily)
|
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -759,14 +828,11 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
|
|||||||
|
|
||||||
// 旧版本上传,家庭云不支持覆盖
|
// 旧版本上传,家庭云不支持覆盖
|
||||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||||
tempFile, err := file.CacheFullInTempFile()
|
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, utils.MD5)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
rateLimited := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
|
||||||
|
|
||||||
// 创建上传会话
|
// 创建上传会话
|
||||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily)
|
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily)
|
||||||
@@ -793,7 +859,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
|
_, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimited, isFamily)
|
||||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -902,8 +968,7 @@ func (y *Cloud189PC) isLogin() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 创建家庭云中转文件夹
|
// 创建家庭云中转文件夹
|
||||||
func (y *Cloud189PC) createFamilyTransferFolder(count int) (*ring.Ring, error) {
|
func (y *Cloud189PC) createFamilyTransferFolder() error {
|
||||||
folders := ring.New(count)
|
|
||||||
var rootFolder Cloud189Folder
|
var rootFolder Cloud189Folder
|
||||||
_, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) {
|
_, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) {
|
||||||
req.SetQueryParams(map[string]string{
|
req.SetQueryParams(map[string]string{
|
||||||
@@ -912,81 +977,61 @@ func (y *Cloud189PC) createFamilyTransferFolder(count int) (*ring.Ring, error) {
|
|||||||
})
|
})
|
||||||
}, &rootFolder, true)
|
}, &rootFolder, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
y.familyTransferFolder = &rootFolder
|
||||||
folderCount := 0
|
return nil
|
||||||
|
|
||||||
// 获取已有目录
|
|
||||||
files, err := y.getFiles(context.TODO(), rootFolder.GetID(), true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, file := range files {
|
|
||||||
if folder, ok := file.(*Cloud189Folder); ok {
|
|
||||||
folders.Value = folder
|
|
||||||
folders = folders.Next()
|
|
||||||
folderCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 创建新的目录
|
|
||||||
for folderCount < count {
|
|
||||||
var newFolder Cloud189Folder
|
|
||||||
_, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) {
|
|
||||||
req.SetQueryParams(map[string]string{
|
|
||||||
"folderName": uuid.NewString(),
|
|
||||||
"familyId": y.FamilyID,
|
|
||||||
"parentId": rootFolder.GetID(),
|
|
||||||
})
|
|
||||||
}, &newFolder, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
folders.Value = &newFolder
|
|
||||||
folders = folders.Next()
|
|
||||||
folderCount++
|
|
||||||
}
|
|
||||||
return folders, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 清理中转文件夹
|
// 清理中转文件夹
|
||||||
func (y *Cloud189PC) cleanFamilyTransfer(ctx context.Context) error {
|
func (y *Cloud189PC) cleanFamilyTransfer(ctx context.Context) error {
|
||||||
var tasks []BatchTaskInfo
|
transferFolderId := y.familyTransferFolder.GetID()
|
||||||
r := y.familyTransferFolder
|
for pageNum := 1; ; pageNum++ {
|
||||||
for p := r.Next(); p != r; p = p.Next() {
|
resp, err := y.getFilesWithPage(ctx, transferFolderId, true, pageNum, 100, "lastOpTime", "asc")
|
||||||
folder := p.Value.(*Cloud189Folder)
|
|
||||||
|
|
||||||
files, err := y.getFiles(ctx, folder.GetID(), true)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, file := range files {
|
// 获取完毕跳出
|
||||||
|
if resp.FileListAO.Count == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
var tasks []BatchTaskInfo
|
||||||
|
for i := 0; i < len(resp.FileListAO.FolderList); i++ {
|
||||||
|
folder := resp.FileListAO.FolderList[i]
|
||||||
|
tasks = append(tasks, BatchTaskInfo{
|
||||||
|
FileId: folder.GetID(),
|
||||||
|
FileName: folder.GetName(),
|
||||||
|
IsFolder: BoolToNumber(folder.IsDir()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for i := 0; i < len(resp.FileListAO.FileList); i++ {
|
||||||
|
file := resp.FileListAO.FileList[i]
|
||||||
tasks = append(tasks, BatchTaskInfo{
|
tasks = append(tasks, BatchTaskInfo{
|
||||||
FileId: file.GetID(),
|
FileId: file.GetID(),
|
||||||
FileName: file.GetName(),
|
FileName: file.GetName(),
|
||||||
IsFolder: BoolToNumber(file.IsDir()),
|
IsFolder: BoolToNumber(file.IsDir()),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if len(tasks) > 0 {
|
if len(tasks) > 0 {
|
||||||
// 删除
|
// 删除
|
||||||
resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...)
|
resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// 永久删除
|
||||||
|
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// 永久删除
|
|
||||||
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1063,6 +1108,34 @@ func (y *Cloud189PC) SaveFamilyFileToPersonCloud(ctx context.Context, familyId s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 永久删除文件
|
||||||
|
func (y *Cloud189PC) Delete(ctx context.Context, familyId string, srcObj model.Obj) error {
|
||||||
|
task := BatchTaskInfo{
|
||||||
|
FileId: srcObj.GetID(),
|
||||||
|
FileName: srcObj.GetName(),
|
||||||
|
IsFolder: BoolToNumber(srcObj.IsDir()),
|
||||||
|
}
|
||||||
|
// 删除源文件
|
||||||
|
resp, err := y.CreateBatchTask("DELETE", familyId, "", nil, task)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// 清除回收站
|
||||||
|
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", familyId, "", nil, task)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) CreateBatchTask(aType string, familyID string, targetFolderId string, other map[string]string, taskInfos ...BatchTaskInfo) (*CreateBatchTaskResp, error) {
|
func (y *Cloud189PC) CreateBatchTask(aType string, familyID string, targetFolderId string, other map[string]string, taskInfos ...BatchTaskInfo) (*CreateBatchTaskResp, error) {
|
||||||
var resp CreateBatchTaskResp
|
var resp CreateBatchTaskResp
|
||||||
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package alias
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
@@ -126,8 +127,46 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
reqPath, err := d.getReqPath(ctx, parentDir, true)
|
||||||
|
if err == nil {
|
||||||
|
return fs.MakeDir(ctx, stdpath.Join(*reqPath, dirName))
|
||||||
|
}
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name dirs cannot make sub-dir")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name files cannot be moved")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name dirs cannot be moved to")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fs.Move(ctx, *srcPath, *dstPath)
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
reqPath, err := d.getReqPath(ctx, srcObj)
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
reqPath, err := d.getReqPath(ctx, srcObj, false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return fs.Rename(ctx, *reqPath, newName)
|
return fs.Rename(ctx, *reqPath, newName)
|
||||||
}
|
}
|
||||||
@@ -137,8 +176,33 @@ func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) er
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name files cannot be copied")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name dirs cannot be copied to")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = fs.Copy(ctx, *srcPath, *dstPath)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
reqPath, err := d.getReqPath(ctx, obj)
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
reqPath, err := d.getReqPath(ctx, obj, false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return fs.Remove(ctx, *reqPath)
|
return fs.Remove(ctx, *reqPath)
|
||||||
}
|
}
|
||||||
@@ -148,4 +212,110 @@ func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
|
if err == nil {
|
||||||
|
return fs.PutDirectly(ctx, *reqPath, s)
|
||||||
|
}
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name dirs cannot be Put")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string) error {
|
||||||
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
|
if err == nil {
|
||||||
|
return fs.PutURL(ctx, *reqPath, name, url)
|
||||||
|
}
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name files cannot offline download")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
root, sub := d.getRootAndPath(obj.GetPath())
|
||||||
|
dsts, ok := d.pathMap[root]
|
||||||
|
if !ok {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
for _, dst := range dsts {
|
||||||
|
meta, err := d.getArchiveMeta(ctx, dst, sub, args)
|
||||||
|
if err == nil {
|
||||||
|
return meta, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
root, sub := d.getRootAndPath(obj.GetPath())
|
||||||
|
dsts, ok := d.pathMap[root]
|
||||||
|
if !ok {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
for _, dst := range dsts {
|
||||||
|
l, err := d.listArchive(ctx, dst, sub, args)
|
||||||
|
if err == nil {
|
||||||
|
return l, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
// alias的两个驱动,一个支持驱动提取,一个不支持,如何兼容?
|
||||||
|
// 如果访问的是不支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回errs.NotImplement,提取URL前缀就会是/ae,Extract就不会被调用
|
||||||
|
// 如果访问的是支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回有效值,提取URL前缀就会是/ad,Extract就会被调用
|
||||||
|
root, sub := d.getRootAndPath(obj.GetPath())
|
||||||
|
dsts, ok := d.pathMap[root]
|
||||||
|
if !ok {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
for _, dst := range dsts {
|
||||||
|
link, err := d.extract(ctx, dst, sub, args)
|
||||||
|
if err == nil {
|
||||||
|
if !args.Redirect && len(link.URL) > 0 {
|
||||||
|
if d.DownloadConcurrency > 0 {
|
||||||
|
link.Concurrency = d.DownloadConcurrency
|
||||||
|
}
|
||||||
|
if d.DownloadPartSize > 0 {
|
||||||
|
link.PartSize = d.DownloadPartSize * utils.KB
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return link, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error {
|
||||||
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name files cannot be decompressed")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name dirs cannot be decompressed to")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = fs.ArchiveDecompress(ctx, *srcPath, *dstPath, args)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Alias)(nil)
|
var _ driver.Driver = (*Alias)(nil)
|
||||||
|
|||||||
@@ -13,13 +13,14 @@ type Addition struct {
|
|||||||
ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"`
|
ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"`
|
||||||
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
||||||
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
||||||
|
Writable bool `json:"writable" type:"bool" default:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "Alias",
|
Name: "Alias",
|
||||||
LocalSort: true,
|
LocalSort: true,
|
||||||
NoCache: true,
|
NoCache: true,
|
||||||
NoUpload: true,
|
NoUpload: false,
|
||||||
DefaultRoot: "/",
|
DefaultRoot: "/",
|
||||||
ProxyRangeOption: true,
|
ProxyRangeOption: true,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,9 +3,11 @@ package alias
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/fs"
|
"github.com/alist-org/alist/v3/internal/fs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
@@ -63,6 +65,7 @@ func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Ob
|
|||||||
Size: obj.GetSize(),
|
Size: obj.GetSize(),
|
||||||
Modified: obj.ModTime(),
|
Modified: obj.ModTime(),
|
||||||
IsFolder: obj.IsDir(),
|
IsFolder: obj.IsDir(),
|
||||||
|
HashInfo: obj.GetHash(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -124,9 +127,9 @@ func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs)
|
|||||||
return link, err
|
return link, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error) {
|
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) (*string, error) {
|
||||||
root, sub := d.getRootAndPath(obj.GetPath())
|
root, sub := d.getRootAndPath(obj.GetPath())
|
||||||
if sub == "" {
|
if sub == "" && !isParent {
|
||||||
return nil, errs.NotSupport
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
dsts, ok := d.pathMap[root]
|
dsts, ok := d.pathMap[root]
|
||||||
@@ -155,3 +158,68 @@ func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error)
|
|||||||
}
|
}
|
||||||
return reqPath, nil
|
return reqPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Alias) getArchiveMeta(ctx context.Context, dst, sub string, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
reqPath := stdpath.Join(dst, sub)
|
||||||
|
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, ok := storage.(driver.ArchiveReader); ok {
|
||||||
|
return op.GetArchiveMeta(ctx, storage, reqActualPath, model.ArchiveMetaArgs{
|
||||||
|
ArchiveArgs: args,
|
||||||
|
Refresh: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
reqPath := stdpath.Join(dst, sub)
|
||||||
|
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, ok := storage.(driver.ArchiveReader); ok {
|
||||||
|
return op.ListArchive(ctx, storage, reqActualPath, model.ArchiveListArgs{
|
||||||
|
ArchiveInnerArgs: args,
|
||||||
|
Refresh: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
reqPath := stdpath.Join(dst, sub)
|
||||||
|
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, ok := storage.(driver.ArchiveReader); ok {
|
||||||
|
if _, ok := storage.(*Alias); !ok && !args.Redirect {
|
||||||
|
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
||||||
|
return link, err
|
||||||
|
}
|
||||||
|
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if common.ShouldProxy(storage, stdpath.Base(sub)) {
|
||||||
|
link := &model.Link{
|
||||||
|
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
|
||||||
|
common.GetApiUrl(args.HttpReq),
|
||||||
|
utils.EncodePath(reqPath, true),
|
||||||
|
utils.EncodePath(args.InnerPath, true),
|
||||||
|
url.QueryEscape(args.Password),
|
||||||
|
sign.SignArchive(reqPath)),
|
||||||
|
}
|
||||||
|
if args.HttpReq != nil && d.ProxyRange {
|
||||||
|
link.RangeReadCloser = common.NoProxyRange
|
||||||
|
}
|
||||||
|
return link, nil
|
||||||
|
}
|
||||||
|
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
||||||
|
return link, err
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,12 +5,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/server/common"
|
"github.com/alist-org/alist/v3/server/common"
|
||||||
@@ -34,7 +36,7 @@ func (d *AListV3) GetAddition() driver.Additional {
|
|||||||
func (d *AListV3) Init(ctx context.Context) error {
|
func (d *AListV3) Init(ctx context.Context) error {
|
||||||
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
|
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
|
||||||
var resp common.Resp[MeResp]
|
var resp common.Resp[MeResp]
|
||||||
_, err := d.request("/me", http.MethodGet, func(req *resty.Request) {
|
_, _, err := d.request("/me", http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetResult(&resp)
|
req.SetResult(&resp)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -48,15 +50,15 @@ func (d *AListV3) Init(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// re-get the user info
|
// re-get the user info
|
||||||
_, err = d.request("/me", http.MethodGet, func(req *resty.Request) {
|
_, _, err = d.request("/me", http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetResult(&resp)
|
req.SetResult(&resp)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if resp.Data.Role == model.GUEST {
|
if utils.SliceContains(resp.Data.Role, model.GUEST) {
|
||||||
url := d.Address + "/api/public/settings"
|
u := d.Address + "/api/public/settings"
|
||||||
res, err := base.RestyClient.R().Get(url)
|
res, err := base.RestyClient.R().Get(u)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -74,7 +76,7 @@ func (d *AListV3) Drop(ctx context.Context) error {
|
|||||||
|
|
||||||
func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
var resp common.Resp[FsListResp]
|
var resp common.Resp[FsListResp]
|
||||||
_, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetResult(&resp).SetBody(ListReq{
|
req.SetResult(&resp).SetBody(ListReq{
|
||||||
PageReq: model.PageReq{
|
PageReq: model.PageReq{
|
||||||
Page: 1,
|
Page: 1,
|
||||||
@@ -116,7 +118,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
userAgent = base.UserAgent
|
userAgent = base.UserAgent
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetResult(&resp).SetBody(FsGetReq{
|
req.SetResult(&resp).SetBody(FsGetReq{
|
||||||
Path: file.GetPath(),
|
Path: file.GetPath(),
|
||||||
Password: d.MetaPassword,
|
Password: d.MetaPassword,
|
||||||
@@ -131,7 +133,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
_, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(MkdirOrLinkReq{
|
req.SetBody(MkdirOrLinkReq{
|
||||||
Path: path.Join(parentDir.GetPath(), dirName),
|
Path: path.Join(parentDir.GetPath(), dirName),
|
||||||
})
|
})
|
||||||
@@ -140,7 +142,7 @@ func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
_, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(MoveCopyReq{
|
req.SetBody(MoveCopyReq{
|
||||||
SrcDir: path.Dir(srcObj.GetPath()),
|
SrcDir: path.Dir(srcObj.GetPath()),
|
||||||
DstDir: dstDir.GetPath(),
|
DstDir: dstDir.GetPath(),
|
||||||
@@ -151,7 +153,7 @@ func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
_, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(RenameReq{
|
req.SetBody(RenameReq{
|
||||||
Path: srcObj.GetPath(),
|
Path: srcObj.GetPath(),
|
||||||
Name: newName,
|
Name: newName,
|
||||||
@@ -161,7 +163,7 @@ func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
_, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(MoveCopyReq{
|
req.SetBody(MoveCopyReq{
|
||||||
SrcDir: path.Dir(srcObj.GetPath()),
|
SrcDir: path.Dir(srcObj.GetPath()),
|
||||||
DstDir: dstDir.GetPath(),
|
DstDir: dstDir.GetPath(),
|
||||||
@@ -172,7 +174,7 @@ func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
_, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(RemoveReq{
|
req.SetBody(RemoveReq{
|
||||||
Dir: path.Dir(obj.GetPath()),
|
Dir: path.Dir(obj.GetPath()),
|
||||||
Names: []string{obj.GetName()},
|
Names: []string{obj.GetName()},
|
||||||
@@ -181,25 +183,29 @@ func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", stream)
|
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
})
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req.Header.Set("Authorization", d.Token)
|
req.Header.Set("Authorization", d.Token)
|
||||||
req.Header.Set("File-Path", path.Join(dstDir.GetPath(), stream.GetName()))
|
req.Header.Set("File-Path", path.Join(dstDir.GetPath(), s.GetName()))
|
||||||
req.Header.Set("Password", d.MetaPassword)
|
req.Header.Set("Password", d.MetaPassword)
|
||||||
if md5 := stream.GetHash().GetHash(utils.MD5); len(md5) > 0 {
|
if md5 := s.GetHash().GetHash(utils.MD5); len(md5) > 0 {
|
||||||
req.Header.Set("X-File-Md5", md5)
|
req.Header.Set("X-File-Md5", md5)
|
||||||
}
|
}
|
||||||
if sha1 := stream.GetHash().GetHash(utils.SHA1); len(sha1) > 0 {
|
if sha1 := s.GetHash().GetHash(utils.SHA1); len(sha1) > 0 {
|
||||||
req.Header.Set("X-File-Sha1", sha1)
|
req.Header.Set("X-File-Sha1", sha1)
|
||||||
}
|
}
|
||||||
if sha256 := stream.GetHash().GetHash(utils.SHA256); len(sha256) > 0 {
|
if sha256 := s.GetHash().GetHash(utils.SHA256); len(sha256) > 0 {
|
||||||
req.Header.Set("X-File-Sha256", sha256)
|
req.Header.Set("X-File-Sha256", sha256)
|
||||||
}
|
}
|
||||||
|
|
||||||
req.ContentLength = stream.GetSize()
|
req.ContentLength = s.GetSize()
|
||||||
// client := base.NewHttpClient()
|
// client := base.NewHttpClient()
|
||||||
// client.Timeout = time.Hour * 6
|
// client.Timeout = time.Hour * 6
|
||||||
res, err := base.HttpClient.Do(req)
|
res, err := base.HttpClient.Do(req)
|
||||||
@@ -228,6 +234,127 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *AListV3) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
if !d.ForwardArchiveReq {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
var resp common.Resp[ArchiveMetaResp]
|
||||||
|
_, code, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetResult(&resp).SetBody(ArchiveMetaReq{
|
||||||
|
ArchivePass: args.Password,
|
||||||
|
Password: d.MetaPassword,
|
||||||
|
Path: obj.GetPath(),
|
||||||
|
Refresh: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
if code == 202 {
|
||||||
|
return nil, errs.WrongArchivePassword
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var tree []model.ObjTree
|
||||||
|
if resp.Data.Content != nil {
|
||||||
|
tree = make([]model.ObjTree, 0, len(resp.Data.Content))
|
||||||
|
for _, content := range resp.Data.Content {
|
||||||
|
tree = append(tree, &content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &model.ArchiveMetaInfo{
|
||||||
|
Comment: resp.Data.Comment,
|
||||||
|
Encrypted: resp.Data.Encrypted,
|
||||||
|
Tree: tree,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AListV3) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
if !d.ForwardArchiveReq {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
var resp common.Resp[ArchiveListResp]
|
||||||
|
_, code, err := d.request("/fs/archive/list", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetResult(&resp).SetBody(ArchiveListReq{
|
||||||
|
ArchiveMetaReq: ArchiveMetaReq{
|
||||||
|
ArchivePass: args.Password,
|
||||||
|
Password: d.MetaPassword,
|
||||||
|
Path: obj.GetPath(),
|
||||||
|
Refresh: false,
|
||||||
|
},
|
||||||
|
PageReq: model.PageReq{
|
||||||
|
Page: 1,
|
||||||
|
PerPage: 0,
|
||||||
|
},
|
||||||
|
InnerPath: args.InnerPath,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
if code == 202 {
|
||||||
|
return nil, errs.WrongArchivePassword
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var files []model.Obj
|
||||||
|
for _, f := range resp.Data.Content {
|
||||||
|
file := model.ObjThumb{
|
||||||
|
Object: model.Object{
|
||||||
|
Name: f.Name,
|
||||||
|
Modified: f.Modified,
|
||||||
|
Ctime: f.Created,
|
||||||
|
Size: f.Size,
|
||||||
|
IsFolder: f.IsDir,
|
||||||
|
HashInfo: utils.FromString(f.HashInfo),
|
||||||
|
},
|
||||||
|
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
|
||||||
|
}
|
||||||
|
files = append(files, &file)
|
||||||
|
}
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AListV3) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
if !d.ForwardArchiveReq {
|
||||||
|
return nil, errs.NotSupport
|
||||||
|
}
|
||||||
|
var resp common.Resp[ArchiveMetaResp]
|
||||||
|
_, _, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetResult(&resp).SetBody(ArchiveMetaReq{
|
||||||
|
ArchivePass: args.Password,
|
||||||
|
Password: d.MetaPassword,
|
||||||
|
Path: obj.GetPath(),
|
||||||
|
Refresh: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.Link{
|
||||||
|
URL: fmt.Sprintf("%s?inner=%s&pass=%s&sign=%s",
|
||||||
|
resp.Data.RawURL,
|
||||||
|
utils.EncodePath(args.InnerPath, true),
|
||||||
|
url.QueryEscape(args.Password),
|
||||||
|
resp.Data.Sign),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AListV3) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error {
|
||||||
|
if !d.ForwardArchiveReq {
|
||||||
|
return errs.NotImplement
|
||||||
|
}
|
||||||
|
dir, name := path.Split(srcObj.GetPath())
|
||||||
|
_, _, err := d.request("/fs/archive/decompress", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(DecompressReq{
|
||||||
|
ArchivePass: args.Password,
|
||||||
|
CacheFull: args.CacheFull,
|
||||||
|
DstDir: dstDir.GetPath(),
|
||||||
|
InnerPath: args.InnerPath,
|
||||||
|
Name: []string{name},
|
||||||
|
PutIntoNewDir: args.PutIntoNewDir,
|
||||||
|
SrcDir: dir,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
//func (d *AList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *AList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
// return nil, errs.NotSupport
|
// return nil, errs.NotSupport
|
||||||
//}
|
//}
|
||||||
|
|||||||
@@ -7,12 +7,13 @@ import (
|
|||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
driver.RootPath
|
driver.RootPath
|
||||||
Address string `json:"url" required:"true"`
|
Address string `json:"url" required:"true"`
|
||||||
MetaPassword string `json:"meta_password"`
|
MetaPassword string `json:"meta_password"`
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
Token string `json:"token"`
|
Token string `json:"token"`
|
||||||
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
|
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
|
||||||
|
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ListReq struct {
|
type ListReq struct {
|
||||||
@@ -75,9 +76,95 @@ type MeResp struct {
|
|||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
BasePath string `json:"base_path"`
|
BasePath string `json:"base_path"`
|
||||||
Role int `json:"role"`
|
Role []int `json:"role"`
|
||||||
Disabled bool `json:"disabled"`
|
Disabled bool `json:"disabled"`
|
||||||
Permission int `json:"permission"`
|
Permission int `json:"permission"`
|
||||||
SsoId string `json:"sso_id"`
|
SsoId string `json:"sso_id"`
|
||||||
Otp bool `json:"otp"`
|
Otp bool `json:"otp"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ArchiveMetaReq struct {
|
||||||
|
ArchivePass string `json:"archive_pass"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Refresh bool `json:"refresh"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TreeResp struct {
|
||||||
|
ObjResp
|
||||||
|
Children []TreeResp `json:"children"`
|
||||||
|
hashCache *utils.HashInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) GetSize() int64 {
|
||||||
|
return t.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) GetName() string {
|
||||||
|
return t.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) ModTime() time.Time {
|
||||||
|
return t.Modified
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) CreateTime() time.Time {
|
||||||
|
return t.Created
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) IsDir() bool {
|
||||||
|
return t.ObjResp.IsDir
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) GetHash() utils.HashInfo {
|
||||||
|
return utils.FromString(t.HashInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) GetID() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) GetPath() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) GetChildren() []model.ObjTree {
|
||||||
|
ret := make([]model.ObjTree, 0, len(t.Children))
|
||||||
|
for _, child := range t.Children {
|
||||||
|
ret = append(ret, &child)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) Thumb() string {
|
||||||
|
return t.ObjResp.Thumb
|
||||||
|
}
|
||||||
|
|
||||||
|
type ArchiveMetaResp struct {
|
||||||
|
Comment string `json:"comment"`
|
||||||
|
Encrypted bool `json:"encrypted"`
|
||||||
|
Content []TreeResp `json:"content"`
|
||||||
|
RawURL string `json:"raw_url"`
|
||||||
|
Sign string `json:"sign"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ArchiveListReq struct {
|
||||||
|
model.PageReq
|
||||||
|
ArchiveMetaReq
|
||||||
|
InnerPath string `json:"inner_path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ArchiveListResp struct {
|
||||||
|
Content []ObjResp `json:"content"`
|
||||||
|
Total int64 `json:"total"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DecompressReq struct {
|
||||||
|
ArchivePass string `json:"archive_pass"`
|
||||||
|
CacheFull bool `json:"cache_full"`
|
||||||
|
DstDir string `json:"dst_dir"`
|
||||||
|
InnerPath string `json:"inner_path"`
|
||||||
|
Name []string `json:"name"`
|
||||||
|
PutIntoNewDir bool `json:"put_into_new_dir"`
|
||||||
|
SrcDir string `json:"src_dir"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ func (d *AListV3) login() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var resp common.Resp[LoginResp]
|
var resp common.Resp[LoginResp]
|
||||||
_, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetResult(&resp).SetBody(base.Json{
|
req.SetResult(&resp).SetBody(base.Json{
|
||||||
"username": d.Username,
|
"username": d.Username,
|
||||||
"password": d.Password,
|
"password": d.Password,
|
||||||
@@ -31,7 +31,7 @@ func (d *AListV3) login() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, int, error) {
|
||||||
url := d.Address + "/api" + api
|
url := d.Address + "/api" + api
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeader("Authorization", d.Token)
|
req.SetHeader("Authorization", d.Token)
|
||||||
@@ -40,22 +40,26 @@ func (d *AListV3) request(api, method string, callback base.ReqCallback, retry .
|
|||||||
}
|
}
|
||||||
res, err := req.Execute(method, url)
|
res, err := req.Execute(method, url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
code := 0
|
||||||
|
if res != nil {
|
||||||
|
code = res.StatusCode()
|
||||||
|
}
|
||||||
|
return nil, code, err
|
||||||
}
|
}
|
||||||
log.Debugf("[alist_v3] response body: %s", res.String())
|
log.Debugf("[alist_v3] response body: %s", res.String())
|
||||||
if res.StatusCode() >= 400 {
|
if res.StatusCode() >= 400 {
|
||||||
return nil, fmt.Errorf("request failed, status: %s", res.Status())
|
return nil, res.StatusCode(), fmt.Errorf("request failed, status: %s", res.Status())
|
||||||
}
|
}
|
||||||
code := utils.Json.Get(res.Body(), "code").ToInt()
|
code := utils.Json.Get(res.Body(), "code").ToInt()
|
||||||
if code != 200 {
|
if code != 200 {
|
||||||
if (code == 401 || code == 403) && !utils.IsBool(retry...) {
|
if (code == 401 || code == 403) && !utils.IsBool(retry...) {
|
||||||
err = d.login()
|
err = d.login()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, code, err
|
||||||
}
|
}
|
||||||
return d.request(api, method, callback, true)
|
return d.request(api, method, callback, true)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
|
return nil, code, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
|
||||||
}
|
}
|
||||||
return res.Body(), nil
|
return res.Body(), 200, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,13 +14,12 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/cron"
|
"github.com/alist-org/alist/v3/pkg/cron"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@@ -56,7 +55,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.DriveId = utils.Json.Get(res, "default_drive_id").ToString()
|
d.DriveId = d.Addition.DeviceID
|
||||||
d.UserID = utils.Json.Get(res, "user_id").ToString()
|
d.UserID = utils.Json.Get(res, "user_id").ToString()
|
||||||
d.cron = cron.NewCron(time.Hour * 2)
|
d.cron = cron.NewCron(time.Hour * 2)
|
||||||
d.cron.Do(func() {
|
d.cron.Do(func() {
|
||||||
@@ -194,7 +193,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
|||||||
}
|
}
|
||||||
if d.RapidUpload {
|
if d.RapidUpload {
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
utils.CopyWithBufferN(buf, file, 1024)
|
_, err := utils.CopyWithBufferN(buf, file, 1024)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
||||||
if localFile != nil {
|
if localFile != nil {
|
||||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||||
@@ -286,6 +288,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
|||||||
file.Reader = localFile
|
file.Reader = localFile
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rateLimited := driver.NewLimitedUploadStream(ctx, file)
|
||||||
for i, partInfo := range resp.PartInfoList {
|
for i, partInfo := range resp.PartInfoList {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@@ -294,7 +297,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
|||||||
if d.InternalUpload {
|
if d.InternalUpload {
|
||||||
url = partInfo.InternalUploadUrl
|
url = partInfo.InternalUploadUrl
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("PUT", url, io.LimitReader(file, DEFAULT))
|
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -303,7 +306,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
res.Body.Close()
|
_ = res.Body.Close()
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
up(float64(i) * 100 / float64(count))
|
up(float64(i) * 100 / float64(count))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ import (
|
|||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
driver.RootID
|
driver.RootID
|
||||||
RefreshToken string `json:"refresh_token" required:"true"`
|
RefreshToken string `json:"refresh_token" required:"true"`
|
||||||
//DeviceID string `json:"device_id" required:"true"`
|
DeviceID string `json:"device_id" required:"true"`
|
||||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
|
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
|
||||||
RapidUpload bool `json:"rapid_upload"`
|
RapidUpload bool `json:"rapid_upload"`
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Xhofe/rateg"
|
"github.com/Xhofe/rateg"
|
||||||
@@ -14,6 +15,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type AliyundriveOpen struct {
|
type AliyundriveOpen struct {
|
||||||
@@ -72,6 +74,18 @@ func (d *AliyundriveOpen) Drop(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetRoot implements the driver.GetRooter interface to properly set up the root object
|
||||||
|
func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||||
|
return &model.Object{
|
||||||
|
ID: d.RootFolderID,
|
||||||
|
Path: "/",
|
||||||
|
Name: "root",
|
||||||
|
Size: 0,
|
||||||
|
Modified: d.Modified,
|
||||||
|
IsFolder: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
if d.limitList == nil {
|
if d.limitList == nil {
|
||||||
return nil, fmt.Errorf("driver not init")
|
return nil, fmt.Errorf("driver not init")
|
||||||
@@ -80,9 +94,17 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
|
||||||
return fileToObj(src), nil
|
objs, err := utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||||
|
obj := fileToObj(src)
|
||||||
|
// Set the correct path for the object
|
||||||
|
if dir.GetPath() != "" {
|
||||||
|
obj.Path = filepath.Join(dir.GetPath(), obj.GetName())
|
||||||
|
}
|
||||||
|
return obj, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return objs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) {
|
func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) {
|
||||||
@@ -132,7 +154,16 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return fileToObj(newDir), nil
|
obj := fileToObj(newDir)
|
||||||
|
|
||||||
|
// Set the correct Path for the returned directory object
|
||||||
|
if parentDir.GetPath() != "" {
|
||||||
|
obj.Path = filepath.Join(parentDir.GetPath(), dirName)
|
||||||
|
} else {
|
||||||
|
obj.Path = "/" + dirName
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
@@ -142,20 +173,24 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m
|
|||||||
"drive_id": d.DriveId,
|
"drive_id": d.DriveId,
|
||||||
"file_id": srcObj.GetID(),
|
"file_id": srcObj.GetID(),
|
||||||
"to_parent_file_id": dstDir.GetID(),
|
"to_parent_file_id": dstDir.GetID(),
|
||||||
"check_name_mode": "refuse", // optional:ignore,auto_rename,refuse
|
"check_name_mode": "ignore", // optional:ignore,auto_rename,refuse
|
||||||
//"new_name": "newName", // The new name to use when a file of the same name exists
|
//"new_name": "newName", // The new name to use when a file of the same name exists
|
||||||
}).SetResult(&resp)
|
}).SetResult(&resp)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if resp.Exist {
|
|
||||||
return nil, errors.New("existence of files with the same name")
|
|
||||||
}
|
|
||||||
|
|
||||||
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||||
srcObj.ID = resp.FileID
|
srcObj.ID = resp.FileID
|
||||||
srcObj.Modified = time.Now()
|
srcObj.Modified = time.Now()
|
||||||
|
srcObj.Path = filepath.Join(dstDir.GetPath(), srcObj.GetName())
|
||||||
|
|
||||||
|
// Check for duplicate files in the destination directory
|
||||||
|
if err := d.removeDuplicateFiles(ctx, dstDir.GetPath(), srcObj.GetName(), srcObj.GetID()); err != nil {
|
||||||
|
// Only log a warning instead of returning an error since the move operation has already completed successfully
|
||||||
|
log.Warnf("Failed to remove duplicate files after move: %v", err)
|
||||||
|
}
|
||||||
return srcObj, nil
|
return srcObj, nil
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -173,19 +208,47 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return fileToObj(newFile), nil
|
|
||||||
|
// Check for duplicate files in the parent directory
|
||||||
|
parentPath := filepath.Dir(srcObj.GetPath())
|
||||||
|
if err := d.removeDuplicateFiles(ctx, parentPath, newName, newFile.FileId); err != nil {
|
||||||
|
// Only log a warning instead of returning an error since the rename operation has already completed successfully
|
||||||
|
log.Warnf("Failed to remove duplicate files after rename: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
obj := fileToObj(newFile)
|
||||||
|
|
||||||
|
// Set the correct Path for the renamed object
|
||||||
|
if parentPath != "" && parentPath != "." {
|
||||||
|
obj.Path = filepath.Join(parentPath, newName)
|
||||||
|
} else {
|
||||||
|
obj.Path = "/" + newName
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
var resp MoveOrCopyResp
|
||||||
_, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"drive_id": d.DriveId,
|
"drive_id": d.DriveId,
|
||||||
"file_id": srcObj.GetID(),
|
"file_id": srcObj.GetID(),
|
||||||
"to_parent_file_id": dstDir.GetID(),
|
"to_parent_file_id": dstDir.GetID(),
|
||||||
"auto_rename": true,
|
"auto_rename": false,
|
||||||
})
|
}).SetResult(&resp)
|
||||||
})
|
})
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for duplicate files in the destination directory
|
||||||
|
if err := d.removeDuplicateFiles(ctx, dstDir.GetPath(), srcObj.GetName(), resp.FileID); err != nil {
|
||||||
|
// Only log a warning instead of returning an error since the copy operation has already completed successfully
|
||||||
|
log.Warnf("Failed to remove duplicate files after copy: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
@@ -203,7 +266,18 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
return d.upload(ctx, dstDir, stream, up)
|
obj, err := d.upload(ctx, dstDir, stream, up)
|
||||||
|
|
||||||
|
// Set the correct Path for the returned file object
|
||||||
|
if obj != nil && obj.GetPath() == "" {
|
||||||
|
if dstDir.GetPath() != "" {
|
||||||
|
if objWithPath, ok := obj.(model.SetPath); ok {
|
||||||
|
objWithPath.SetPath(filepath.Join(dstDir.GetPath(), obj.GetName()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
@@ -235,3 +309,4 @@ var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
|||||||
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
||||||
var _ driver.RenameResult = (*AliyundriveOpen)(nil)
|
var _ driver.RenameResult = (*AliyundriveOpen)(nil)
|
||||||
var _ driver.PutResult = (*AliyundriveOpen)(nil)
|
var _ driver.PutResult = (*AliyundriveOpen)(nil)
|
||||||
|
var _ driver.GetRooter = (*AliyundriveOpen)(nil)
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package aliyundrive_open
|
package aliyundrive_open
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -15,6 +14,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
streamPkg "github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/avast/retry-go"
|
"github.com/avast/retry-go"
|
||||||
@@ -77,7 +77,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
res.Body.Close()
|
_ = res.Body.Close()
|
||||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
||||||
return fmt.Errorf("upload status: %d", res.StatusCode)
|
return fmt.Errorf("upload status: %d", res.StatusCode)
|
||||||
}
|
}
|
||||||
@@ -131,16 +131,19 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
length := proofRange.End - proofRange.Start
|
length := proofRange.End - proofRange.Start
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, length))
|
|
||||||
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
|
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
_, err = utils.CopyWithBufferN(buf, reader, length)
|
buf := make([]byte, length)
|
||||||
|
n, err := io.ReadFull(reader, buf)
|
||||||
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
|
return base64.StdEncoding.EncodeToString(buf), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
@@ -183,25 +186,18 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
|||||||
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(createData).SetResult(&createResp)
|
req.SetBody(createData).SetResult(&createResp)
|
||||||
})
|
})
|
||||||
var tmpF model.File
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e.Code != "PreHashMatched" || !rapidUpload {
|
if e.Code != "PreHashMatched" || !rapidUpload {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
|
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
|
||||||
|
|
||||||
hi := stream.GetHash()
|
hash := stream.GetHash().GetHash(utils.SHA1)
|
||||||
hash := hi.GetHash(utils.SHA1)
|
if len(hash) != utils.SHA1.Width {
|
||||||
if len(hash) <= 0 {
|
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1)
|
||||||
tmpF, err = stream.CacheFullInTempFile()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
hash, err = utils.HashFile(utils.SHA1, tmpF)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(createData, "pre_hash")
|
delete(createData, "pre_hash")
|
||||||
@@ -251,8 +247,9 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
|||||||
rd = utils.NewMultiReadable(srd)
|
rd = utils.NewMultiReadable(srd)
|
||||||
}
|
}
|
||||||
err = retry.Do(func() error {
|
err = retry.Do(func() error {
|
||||||
rd.Reset()
|
_ = rd.Reset()
|
||||||
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
|
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||||
|
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
|
||||||
},
|
},
|
||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@@ -186,3 +187,36 @@ func (d *AliyundriveOpen) getAccessToken() string {
|
|||||||
}
|
}
|
||||||
return d.AccessToken
|
return d.AccessToken
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove duplicate files with the same name in the given directory path,
|
||||||
|
// preserving the file with the given skipID if provided
|
||||||
|
func (d *AliyundriveOpen) removeDuplicateFiles(ctx context.Context, parentPath string, fileName string, skipID string) error {
|
||||||
|
// Handle empty path (root directory) case
|
||||||
|
if parentPath == "" {
|
||||||
|
parentPath = "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
// List all files in the parent directory
|
||||||
|
files, err := op.List(ctx, d, parentPath, model.ListArgs{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find all files with the same name
|
||||||
|
var duplicates []model.Obj
|
||||||
|
for _, file := range files {
|
||||||
|
if file.GetName() == fileName && file.GetID() != skipID {
|
||||||
|
duplicates = append(duplicates, file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all duplicates files, except the file with the given ID
|
||||||
|
for _, file := range duplicates {
|
||||||
|
err := d.Remove(ctx, file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package drivers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
_ "github.com/alist-org/alist/v3/drivers/115"
|
_ "github.com/alist-org/alist/v3/drivers/115"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/115_open"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/115_share"
|
_ "github.com/alist-org/alist/v3/drivers/115_share"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/123"
|
_ "github.com/alist-org/alist/v3/drivers/123"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/123_link"
|
_ "github.com/alist-org/alist/v3/drivers/123_link"
|
||||||
@@ -15,12 +16,16 @@ import (
|
|||||||
_ "github.com/alist-org/alist/v3/drivers/aliyundrive"
|
_ "github.com/alist-org/alist/v3/drivers/aliyundrive"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_open"
|
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_open"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_share"
|
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_share"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/azure_blob"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
|
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/cloudreve_v4"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/doubao"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/doubao_share"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/febbox"
|
_ "github.com/alist-org/alist/v3/drivers/febbox"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
||||||
|
|||||||
313
drivers/azure_blob/driver.go
Normal file
313
drivers/azure_blob/driver.go
Normal file
@@ -0,0 +1,313 @@
|
|||||||
|
package azure_blob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
)
|
||||||
|
// Azure Blob Storage based on the blob APIs
|
||||||
|
// Link: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api
|
||||||
|
type AzureBlob struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
client *azblob.Client
|
||||||
|
containerClient *container.Client
|
||||||
|
config driver.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config returns the driver configuration.
|
||||||
|
func (d *AzureBlob) Config() driver.Config {
|
||||||
|
return d.config
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAddition returns additional settings specific to Azure Blob Storage.
|
||||||
|
func (d *AzureBlob) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the Azure Blob Storage client using shared key authentication.
|
||||||
|
func (d *AzureBlob) Init(ctx context.Context) error {
|
||||||
|
// Validate the endpoint URL
|
||||||
|
accountName := extractAccountName(d.Addition.Endpoint)
|
||||||
|
if !regexp.MustCompile(`^[a-z0-9]+$`).MatchString(accountName) {
|
||||||
|
return fmt.Errorf("invalid storage account name: must be chars of lowercase letters or numbers only")
|
||||||
|
}
|
||||||
|
|
||||||
|
credential, err := azblob.NewSharedKeyCredential(accountName, d.Addition.AccessKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create credential: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if Endpoint is just account name
|
||||||
|
endpoint := d.Addition.Endpoint
|
||||||
|
if accountName == endpoint {
|
||||||
|
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
|
||||||
|
}
|
||||||
|
// Initialize Azure Blob client with retry policy
|
||||||
|
client, err := azblob.NewClientWithSharedKeyCredential(endpoint, credential,
|
||||||
|
&azblob.ClientOptions{ClientOptions: azcore.ClientOptions{
|
||||||
|
Retry: policy.RetryOptions{
|
||||||
|
MaxRetries: MaxRetries,
|
||||||
|
RetryDelay: RetryDelay,
|
||||||
|
},
|
||||||
|
}})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create client: %w", err)
|
||||||
|
}
|
||||||
|
d.client = client
|
||||||
|
|
||||||
|
// Ensure container exists or create it
|
||||||
|
containerName := strings.Trim(d.Addition.ContainerName, "/ \\")
|
||||||
|
if containerName == "" {
|
||||||
|
return fmt.Errorf("container name cannot be empty")
|
||||||
|
}
|
||||||
|
return d.createContainerIfNotExists(ctx, containerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop releases resources associated with the Azure Blob client.
|
||||||
|
func (d *AzureBlob) Drop(ctx context.Context) error {
|
||||||
|
d.client = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List retrieves blobs and directories under the specified path.
|
||||||
|
func (d *AzureBlob) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
prefix := ensureTrailingSlash(dir.GetPath())
|
||||||
|
|
||||||
|
pager := d.containerClient.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{
|
||||||
|
Prefix: &prefix,
|
||||||
|
})
|
||||||
|
|
||||||
|
var objs []model.Obj
|
||||||
|
for pager.More() {
|
||||||
|
page, err := pager.NextPage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list blobs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process directories
|
||||||
|
for _, blobPrefix := range page.Segment.BlobPrefixes {
|
||||||
|
objs = append(objs, &model.Object{
|
||||||
|
Name: path.Base(strings.TrimSuffix(*blobPrefix.Name, "/")),
|
||||||
|
Path: *blobPrefix.Name,
|
||||||
|
Modified: *blobPrefix.Properties.LastModified,
|
||||||
|
Ctime: *blobPrefix.Properties.CreationTime,
|
||||||
|
IsFolder: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process files
|
||||||
|
for _, blob := range page.Segment.BlobItems {
|
||||||
|
if strings.HasSuffix(*blob.Name, "/") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
objs = append(objs, &model.Object{
|
||||||
|
Name: path.Base(*blob.Name),
|
||||||
|
Path: *blob.Name,
|
||||||
|
Size: *blob.Properties.ContentLength,
|
||||||
|
Modified: *blob.Properties.LastModified,
|
||||||
|
Ctime: *blob.Properties.CreationTime,
|
||||||
|
IsFolder: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return objs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Link generates a temporary SAS URL for accessing a blob.
|
||||||
|
func (d *AzureBlob) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
blobClient := d.containerClient.NewBlobClient(file.GetPath())
|
||||||
|
expireDuration := time.Hour * time.Duration(d.SignURLExpire)
|
||||||
|
|
||||||
|
sasURL, err := blobClient.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to generate SAS URL: %w", err)
|
||||||
|
}
|
||||||
|
return &model.Link{URL: sasURL}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeDir creates a virtual directory by uploading an empty blob as a marker.
|
||||||
|
func (d *AzureBlob) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
|
dirPath := path.Join(parentDir.GetPath(), dirName)
|
||||||
|
if err := d.mkDir(ctx, dirPath); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create directory marker: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
Path: dirPath,
|
||||||
|
Name: dirName,
|
||||||
|
IsFolder: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move relocates an object (file or directory) to a new directory.
|
||||||
|
func (d *AzureBlob) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
srcPath := srcObj.GetPath()
|
||||||
|
dstPath := path.Join(dstDir.GetPath(), srcObj.GetName())
|
||||||
|
|
||||||
|
if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil {
|
||||||
|
return nil, fmt.Errorf("move operation failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
Path: dstPath,
|
||||||
|
Name: srcObj.GetName(),
|
||||||
|
Modified: time.Now(),
|
||||||
|
IsFolder: srcObj.IsDir(),
|
||||||
|
Size: srcObj.GetSize(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rename changes the name of an existing object.
|
||||||
|
func (d *AzureBlob) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
|
srcPath := srcObj.GetPath()
|
||||||
|
dstPath := path.Join(path.Dir(srcPath), newName)
|
||||||
|
|
||||||
|
if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil {
|
||||||
|
return nil, fmt.Errorf("rename operation failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
Path: dstPath,
|
||||||
|
Name: newName,
|
||||||
|
Modified: time.Now(),
|
||||||
|
IsFolder: srcObj.IsDir(),
|
||||||
|
Size: srcObj.GetSize(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy duplicates an object (file or directory) to a specified destination directory.
|
||||||
|
func (d *AzureBlob) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
dstPath := path.Join(dstDir.GetPath(), srcObj.GetName())
|
||||||
|
|
||||||
|
// Handle directory copying using flat listing
|
||||||
|
if srcObj.IsDir() {
|
||||||
|
srcPrefix := srcObj.GetPath()
|
||||||
|
srcPrefix = ensureTrailingSlash(srcPrefix)
|
||||||
|
|
||||||
|
// Get all blobs under the source directory
|
||||||
|
blobs, err := d.flattenListBlobs(ctx, srcPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list source directory contents: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process each blob - copy to destination
|
||||||
|
for _, blob := range blobs {
|
||||||
|
// Skip the directory marker itself
|
||||||
|
if *blob.Name == srcPrefix {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate relative path from source
|
||||||
|
relPath := strings.TrimPrefix(*blob.Name, srcPrefix)
|
||||||
|
itemDstPath := path.Join(dstPath, relPath)
|
||||||
|
|
||||||
|
if strings.HasSuffix(itemDstPath, "/") || (blob.Metadata["hdi_isfolder"] != nil && *blob.Metadata["hdi_isfolder"] == "true") {
|
||||||
|
// Create directory marker at destination
|
||||||
|
err := d.mkDir(ctx, itemDstPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Copy the blob
|
||||||
|
if err := d.copyFile(ctx, *blob.Name, itemDstPath); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to copy %s: %w", *blob.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create directory marker at destination if needed
|
||||||
|
if len(blobs) == 0 {
|
||||||
|
err := d.mkDir(ctx, dstPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create directory [%s]: %w", dstPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
Path: dstPath,
|
||||||
|
Name: srcObj.GetName(),
|
||||||
|
Modified: time.Now(),
|
||||||
|
IsFolder: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy a single file
|
||||||
|
if err := d.copyFile(ctx, srcObj.GetPath(), dstPath); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to copy blob: %w", err)
|
||||||
|
}
|
||||||
|
return &model.Object{
|
||||||
|
Path: dstPath,
|
||||||
|
Name: srcObj.GetName(),
|
||||||
|
Size: srcObj.GetSize(),
|
||||||
|
Modified: time.Now(),
|
||||||
|
IsFolder: false,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove deletes a specified blob or recursively deletes a directory and its contents.
|
||||||
|
func (d *AzureBlob) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
path := obj.GetPath()
|
||||||
|
|
||||||
|
// Handle recursive directory deletion
|
||||||
|
if obj.IsDir() {
|
||||||
|
return d.deleteFolder(ctx, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete single file
|
||||||
|
return d.deleteFile(ctx, path, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put uploads a file stream to Azure Blob Storage with progress tracking.
|
||||||
|
func (d *AzureBlob) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
blobPath := path.Join(dstDir.GetPath(), stream.GetName())
|
||||||
|
blobClient := d.containerClient.NewBlockBlobClient(blobPath)
|
||||||
|
|
||||||
|
// Determine optimal upload options based on file size
|
||||||
|
options := optimizedUploadOptions(stream.GetSize())
|
||||||
|
|
||||||
|
// Track upload progress
|
||||||
|
progressTracker := &progressTracker{
|
||||||
|
total: stream.GetSize(),
|
||||||
|
updateProgress: up,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap stream to handle context cancellation and progress tracking
|
||||||
|
limitedStream := driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, progressTracker))
|
||||||
|
|
||||||
|
// Upload the stream to Azure Blob Storage
|
||||||
|
_, err := blobClient.UploadStream(ctx, limitedStream, options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to upload file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
Path: blobPath,
|
||||||
|
Name: stream.GetName(),
|
||||||
|
Size: stream.GetSize(),
|
||||||
|
Modified: time.Now(),
|
||||||
|
IsFolder: false,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following methods related to archive handling are not implemented yet.
|
||||||
|
// func (d *AzureBlob) GetArchiveMeta(...) {...}
|
||||||
|
// func (d *AzureBlob) ListArchive(...) {...}
|
||||||
|
// func (d *AzureBlob) Extract(...) {...}
|
||||||
|
// func (d *AzureBlob) ArchiveDecompress(...) {...}
|
||||||
|
|
||||||
|
// Ensure AzureBlob implements the driver.Driver interface.
|
||||||
|
var _ driver.Driver = (*AzureBlob)(nil)
|
||||||
32
drivers/azure_blob/meta.go
Normal file
32
drivers/azure_blob/meta.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
package azure_blob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
Endpoint string `json:"endpoint" required:"true" default:"https://<accountname>.blob.core.windows.net/" help:"e.g. https://accountname.blob.core.windows.net/. The full endpoint URL for Azure Storage, including the unique storage account name (3 ~ 24 numbers and lowercase letters only)."`
|
||||||
|
AccessKey string `json:"access_key" required:"true" help:"The access key for Azure Storage, used for authentication. https://learn.microsoft.com/azure/storage/common/storage-account-keys-manage"`
|
||||||
|
ContainerName string `json:"container_name" required:"true" help:"The name of the container in Azure Storage (created in the Azure portal). https://learn.microsoft.com/azure/storage/blobs/blob-containers-portal"`
|
||||||
|
SignURLExpire int `json:"sign_url_expire" type:"number" default:"4" help:"The expiration time for SAS URLs, in hours."`
|
||||||
|
}
|
||||||
|
|
||||||
|
// implement GetRootId interface
|
||||||
|
func (r Addition) GetRootId() string {
|
||||||
|
return r.ContainerName
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "Azure Blob Storage",
|
||||||
|
LocalSort: true,
|
||||||
|
CheckStatus: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &AzureBlob{
|
||||||
|
config: config,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
20
drivers/azure_blob/types.go
Normal file
20
drivers/azure_blob/types.go
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
package azure_blob
|
||||||
|
|
||||||
|
import "github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
|
||||||
|
// progressTracker is used to track upload progress
|
||||||
|
type progressTracker struct {
|
||||||
|
total int64
|
||||||
|
current int64
|
||||||
|
updateProgress driver.UpdateProgress
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write implements io.Writer to track progress
|
||||||
|
func (pt *progressTracker) Write(p []byte) (n int, err error) {
|
||||||
|
n = len(p)
|
||||||
|
pt.current += int64(n)
|
||||||
|
if pt.updateProgress != nil && pt.total > 0 {
|
||||||
|
pt.updateProgress(float64(pt.current) * 100 / float64(pt.total))
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
401
drivers/azure_blob/util.go
Normal file
401
drivers/azure_blob/util.go
Normal file
@@ -0,0 +1,401 @@
|
|||||||
|
package azure_blob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MaxRetries defines the maximum number of retry attempts for Azure operations
|
||||||
|
MaxRetries = 3
|
||||||
|
// RetryDelay defines the base delay between retries
|
||||||
|
RetryDelay = 3 * time.Second
|
||||||
|
// MaxBatchSize defines the maximum number of operations in a single batch request
|
||||||
|
MaxBatchSize = 128
|
||||||
|
)
|
||||||
|
|
||||||
|
// extractAccountName 从 Azure 存储 Endpoint 中提取账户名
|
||||||
|
func extractAccountName(endpoint string) string {
|
||||||
|
// 移除协议前缀
|
||||||
|
endpoint = strings.TrimPrefix(endpoint, "https://")
|
||||||
|
endpoint = strings.TrimPrefix(endpoint, "http://")
|
||||||
|
|
||||||
|
// 获取第一个点之前的部分(即账户名)
|
||||||
|
parts := strings.Split(endpoint, ".")
|
||||||
|
if len(parts) > 0 {
|
||||||
|
// to lower case
|
||||||
|
return strings.ToLower(parts[0])
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// isNotFoundError checks if the error is a "not found" type error
|
||||||
|
func isNotFoundError(err error) bool {
|
||||||
|
var storageErr *azcore.ResponseError
|
||||||
|
if errors.As(err, &storageErr) {
|
||||||
|
return storageErr.StatusCode == 404
|
||||||
|
}
|
||||||
|
// Fallback to string matching for backwards compatibility
|
||||||
|
return err != nil && strings.Contains(err.Error(), "BlobNotFound")
|
||||||
|
}
|
||||||
|
|
||||||
|
// flattenListBlobs - Optimize blob listing to handle pagination better
|
||||||
|
func (d *AzureBlob) flattenListBlobs(ctx context.Context, prefix string) ([]container.BlobItem, error) {
|
||||||
|
// Standardize prefix format
|
||||||
|
prefix = ensureTrailingSlash(prefix)
|
||||||
|
|
||||||
|
var blobItems []container.BlobItem
|
||||||
|
pager := d.containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{
|
||||||
|
Prefix: &prefix,
|
||||||
|
Include: container.ListBlobsInclude{
|
||||||
|
Metadata: true,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
for pager.More() {
|
||||||
|
page, err := pager.NextPage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list blobs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, blob := range page.Segment.BlobItems {
|
||||||
|
blobItems = append(blobItems, *blob)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return blobItems, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// batchDeleteBlobs - Simplify batch deletion logic
|
||||||
|
func (d *AzureBlob) batchDeleteBlobs(ctx context.Context, blobPaths []string) error {
|
||||||
|
if len(blobPaths) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process in batches of MaxBatchSize
|
||||||
|
for i := 0; i < len(blobPaths); i += MaxBatchSize {
|
||||||
|
end := min(i+MaxBatchSize, len(blobPaths))
|
||||||
|
currentBatch := blobPaths[i:end]
|
||||||
|
|
||||||
|
// Create batch builder
|
||||||
|
batchBuilder, err := d.containerClient.NewBatchBuilder()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create batch builder: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add delete operations
|
||||||
|
for _, blobPath := range currentBatch {
|
||||||
|
if err := batchBuilder.Delete(blobPath, nil); err != nil {
|
||||||
|
return fmt.Errorf("failed to add delete operation for %s: %w", blobPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Submit batch
|
||||||
|
responses, err := d.containerClient.SubmitBatch(ctx, batchBuilder, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("batch delete request failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check responses
|
||||||
|
for _, resp := range responses.Responses {
|
||||||
|
if resp.Error != nil && !isNotFoundError(resp.Error) {
|
||||||
|
// 获取 blob 名称以提供更好的错误信息
|
||||||
|
blobName := "unknown"
|
||||||
|
if resp.BlobName != nil {
|
||||||
|
blobName = *resp.BlobName
|
||||||
|
}
|
||||||
|
return fmt.Errorf("failed to delete blob %s: %v", blobName, resp.Error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteFolder recursively deletes a directory and all its contents
|
||||||
|
func (d *AzureBlob) deleteFolder(ctx context.Context, prefix string) error {
|
||||||
|
// Ensure directory path ends with slash
|
||||||
|
prefix = ensureTrailingSlash(prefix)
|
||||||
|
|
||||||
|
// Get all blobs under the directory using flattenListBlobs
|
||||||
|
globs, err := d.flattenListBlobs(ctx, prefix)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list blobs for deletion: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are blobs in the directory, delete them
|
||||||
|
if len(globs) > 0 {
|
||||||
|
// 分离文件和目录标记
|
||||||
|
var filePaths []string
|
||||||
|
var dirPaths []string
|
||||||
|
|
||||||
|
for _, blob := range globs {
|
||||||
|
blobName := *blob.Name
|
||||||
|
if isDirectory(blob) {
|
||||||
|
// remove trailing slash for directory names
|
||||||
|
dirPaths = append(dirPaths, strings.TrimSuffix(blobName, "/"))
|
||||||
|
} else {
|
||||||
|
filePaths = append(filePaths, blobName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 先删除文件,再删除目录
|
||||||
|
if len(filePaths) > 0 {
|
||||||
|
if err := d.batchDeleteBlobs(ctx, filePaths); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(dirPaths) > 0 {
|
||||||
|
// 按路径深度分组
|
||||||
|
depthMap := make(map[int][]string)
|
||||||
|
for _, dir := range dirPaths {
|
||||||
|
depth := strings.Count(dir, "/") // 计算目录深度
|
||||||
|
depthMap[depth] = append(depthMap[depth], dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 按深度从大到小排序
|
||||||
|
var depths []int
|
||||||
|
for depth := range depthMap {
|
||||||
|
depths = append(depths, depth)
|
||||||
|
}
|
||||||
|
sort.Sort(sort.Reverse(sort.IntSlice(depths)))
|
||||||
|
|
||||||
|
// 按深度逐层批量删除
|
||||||
|
for _, depth := range depths {
|
||||||
|
batch := depthMap[depth]
|
||||||
|
if err := d.batchDeleteBlobs(ctx, batch); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 最后删除目录标记本身
|
||||||
|
return d.deleteEmptyDirectory(ctx, prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteFile deletes a single file or blob with better error handling
|
||||||
|
func (d *AzureBlob) deleteFile(ctx context.Context, path string, isDir bool) error {
|
||||||
|
blobClient := d.containerClient.NewBlobClient(path)
|
||||||
|
_, err := blobClient.Delete(ctx, nil)
|
||||||
|
if err != nil && !(isDir && isNotFoundError(err)) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyFile copies a single blob from source path to destination path
|
||||||
|
func (d *AzureBlob) copyFile(ctx context.Context, srcPath, dstPath string) error {
|
||||||
|
srcBlob := d.containerClient.NewBlobClient(srcPath)
|
||||||
|
dstBlob := d.containerClient.NewBlobClient(dstPath)
|
||||||
|
|
||||||
|
// Use configured expiration time for SAS URL
|
||||||
|
expireDuration := time.Hour * time.Duration(d.SignURLExpire)
|
||||||
|
srcURL, err := srcBlob.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to generate source SAS URL: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = dstBlob.StartCopyFromURL(ctx, srcURL, nil)
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// createContainerIfNotExists - Create container if not exists
|
||||||
|
// Clean up commented code
|
||||||
|
func (d *AzureBlob) createContainerIfNotExists(ctx context.Context, containerName string) error {
|
||||||
|
serviceClient := d.client.ServiceClient()
|
||||||
|
containerClient := serviceClient.NewContainerClient(containerName)
|
||||||
|
|
||||||
|
var options = service.CreateContainerOptions{}
|
||||||
|
_, err := containerClient.Create(ctx, &options)
|
||||||
|
if err != nil {
|
||||||
|
var responseErr *azcore.ResponseError
|
||||||
|
if errors.As(err, &responseErr) && responseErr.ErrorCode != "ContainerAlreadyExists" {
|
||||||
|
return fmt.Errorf("failed to create or access container [%s]: %w", containerName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.containerClient = containerClient
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mkDir creates a virtual directory marker by uploading an empty blob with metadata.
|
||||||
|
func (d *AzureBlob) mkDir(ctx context.Context, fullDirName string) error {
|
||||||
|
dirPath := ensureTrailingSlash(fullDirName)
|
||||||
|
blobClient := d.containerClient.NewBlockBlobClient(dirPath)
|
||||||
|
|
||||||
|
// Upload an empty blob with metadata indicating it's a directory
|
||||||
|
_, err := blobClient.Upload(ctx, struct {
|
||||||
|
*bytes.Reader
|
||||||
|
io.Closer
|
||||||
|
}{
|
||||||
|
Reader: bytes.NewReader([]byte{}),
|
||||||
|
Closer: io.NopCloser(nil),
|
||||||
|
}, &blockblob.UploadOptions{
|
||||||
|
Metadata: map[string]*string{
|
||||||
|
"hdi_isfolder": to.Ptr("true"),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureTrailingSlash ensures the provided path ends with a trailing slash.
|
||||||
|
func ensureTrailingSlash(path string) string {
|
||||||
|
if !strings.HasSuffix(path, "/") {
|
||||||
|
return path + "/"
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
// moveOrRename moves or renames blobs or directories from source to destination.
|
||||||
|
func (d *AzureBlob) moveOrRename(ctx context.Context, srcPath, dstPath string, isDir bool, srcSize int64) error {
|
||||||
|
if isDir {
|
||||||
|
// Normalize paths for directory operations
|
||||||
|
srcPath = ensureTrailingSlash(srcPath)
|
||||||
|
dstPath = ensureTrailingSlash(dstPath)
|
||||||
|
|
||||||
|
// List all blobs under the source directory
|
||||||
|
blobs, err := d.flattenListBlobs(ctx, srcPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list blobs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate and copy each blob to the destination
|
||||||
|
for _, item := range blobs {
|
||||||
|
srcBlobName := *item.Name
|
||||||
|
relPath := strings.TrimPrefix(srcBlobName, srcPath)
|
||||||
|
itemDstPath := path.Join(dstPath, relPath)
|
||||||
|
|
||||||
|
if isDirectory(item) {
|
||||||
|
// Create directory marker at destination
|
||||||
|
if err := d.mkDir(ctx, itemDstPath); err != nil {
|
||||||
|
return fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Copy file blob to destination
|
||||||
|
if err := d.copyFile(ctx, srcBlobName, itemDstPath); err != nil {
|
||||||
|
return fmt.Errorf("failed to copy blob [%s]: %w", srcBlobName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle empty directories by creating a marker at destination
|
||||||
|
if len(blobs) == 0 {
|
||||||
|
if err := d.mkDir(ctx, dstPath); err != nil {
|
||||||
|
return fmt.Errorf("failed to create directory [%s]: %w", dstPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete source directory and its contents
|
||||||
|
if err := d.deleteFolder(ctx, srcPath); err != nil {
|
||||||
|
log.Warnf("failed to delete source directory [%s]: %v\n, and try again", srcPath, err)
|
||||||
|
// Retry deletion once more and ignore the result
|
||||||
|
if err := d.deleteFolder(ctx, srcPath); err != nil {
|
||||||
|
log.Errorf("Retry deletion of source directory [%s] failed: %v", srcPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Single file move or rename operation
|
||||||
|
if err := d.copyFile(ctx, srcPath, dstPath); err != nil {
|
||||||
|
return fmt.Errorf("failed to copy file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete source file after successful copy
|
||||||
|
if err := d.deleteFile(ctx, srcPath, false); err != nil {
|
||||||
|
log.Errorf("Error deleting source file [%s]: %v", srcPath, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// optimizedUploadOptions returns the optimal upload options based on file size
|
||||||
|
func optimizedUploadOptions(fileSize int64) *azblob.UploadStreamOptions {
|
||||||
|
options := &azblob.UploadStreamOptions{
|
||||||
|
BlockSize: 4 * 1024 * 1024, // 4MB block size
|
||||||
|
Concurrency: 4, // Default concurrency
|
||||||
|
}
|
||||||
|
|
||||||
|
// For large files, increase block size and concurrency
|
||||||
|
if fileSize > 256*1024*1024 { // For files larger than 256MB
|
||||||
|
options.BlockSize = 8 * 1024 * 1024 // 8MB blocks
|
||||||
|
options.Concurrency = 8 // More concurrent uploads
|
||||||
|
}
|
||||||
|
|
||||||
|
// For very large files (>1GB)
|
||||||
|
if fileSize > 1024*1024*1024 {
|
||||||
|
options.BlockSize = 16 * 1024 * 1024 // 16MB blocks
|
||||||
|
options.Concurrency = 16 // Higher concurrency
|
||||||
|
}
|
||||||
|
|
||||||
|
return options
|
||||||
|
}
|
||||||
|
|
||||||
|
// isDirectory determines if a blob represents a directory
|
||||||
|
// Checks multiple indicators: path suffix, metadata, and content type
|
||||||
|
func isDirectory(blob container.BlobItem) bool {
|
||||||
|
// Check path suffix
|
||||||
|
if strings.HasSuffix(*blob.Name, "/") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check metadata for directory marker
|
||||||
|
if blob.Metadata != nil {
|
||||||
|
if val, ok := blob.Metadata["hdi_isfolder"]; ok && val != nil && *val == "true" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Azure Storage Explorer and other tools may use different metadata keys
|
||||||
|
if val, ok := blob.Metadata["is_directory"]; ok && val != nil && strings.ToLower(*val) == "true" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check content type (some tools mark directories with specific content types)
|
||||||
|
if blob.Properties != nil && blob.Properties.ContentType != nil {
|
||||||
|
contentType := strings.ToLower(*blob.Properties.ContentType)
|
||||||
|
if blob.Properties.ContentLength != nil && *blob.Properties.ContentLength == 0 && (contentType == "application/directory" || contentType == "directory") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteEmptyDirectory deletes a directory only if it's empty
|
||||||
|
func (d *AzureBlob) deleteEmptyDirectory(ctx context.Context, dirPath string) error {
|
||||||
|
// Directory is empty, delete the directory marker
|
||||||
|
blobClient := d.containerClient.NewBlobClient(strings.TrimSuffix(dirPath, "/"))
|
||||||
|
_, err := blobClient.Delete(ctx, nil)
|
||||||
|
|
||||||
|
// Also try deleting with trailing slash (for different directory marker formats)
|
||||||
|
if err != nil && isNotFoundError(err) {
|
||||||
|
blobClient = d.containerClient.NewBlobClient(dirPath)
|
||||||
|
_, err = blobClient.Delete(ctx, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore not found errors
|
||||||
|
if err != nil && isNotFoundError(err) {
|
||||||
|
log.Infof("Directory [%s] not found during deletion: %v", dirPath, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
@@ -6,13 +6,16 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"os"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sync/semaphore"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
@@ -76,6 +79,8 @@ func (d *BaiduNetdisk) List(ctx context.Context, dir model.Obj, args model.ListA
|
|||||||
func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
if d.DownloadAPI == "crack" {
|
if d.DownloadAPI == "crack" {
|
||||||
return d.linkCrack(file, args)
|
return d.linkCrack(file, args)
|
||||||
|
} else if d.DownloadAPI == "crack_video" {
|
||||||
|
return d.linkCrackVideo(file, args)
|
||||||
}
|
}
|
||||||
return d.linkOfficial(file, args)
|
return d.linkOfficial(file, args)
|
||||||
}
|
}
|
||||||
@@ -181,21 +186,35 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
return newObj, nil
|
return newObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
tempFile, err := stream.CacheFullInTempFile()
|
var (
|
||||||
if err != nil {
|
cache = stream.GetFile()
|
||||||
return nil, err
|
tmpF *os.File
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if _, ok := cache.(io.ReaderAt); !ok {
|
||||||
|
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = tmpF.Close()
|
||||||
|
_ = os.Remove(tmpF.Name())
|
||||||
|
}()
|
||||||
|
cache = tmpF
|
||||||
}
|
}
|
||||||
|
|
||||||
streamSize := stream.GetSize()
|
streamSize := stream.GetSize()
|
||||||
sliceSize := d.getSliceSize()
|
sliceSize := d.getSliceSize(streamSize)
|
||||||
count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
|
count := int(streamSize / sliceSize)
|
||||||
lastBlockSize := streamSize % sliceSize
|
lastBlockSize := streamSize % sliceSize
|
||||||
if streamSize > 0 && lastBlockSize == 0 {
|
if lastBlockSize > 0 {
|
||||||
|
count++
|
||||||
|
} else {
|
||||||
lastBlockSize = sliceSize
|
lastBlockSize = sliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
//cal md5 for first 256k data
|
//cal md5 for first 256k data
|
||||||
const SliceSize int64 = 256 * 1024
|
const SliceSize int64 = 256 * utils.KB
|
||||||
// cal md5
|
// cal md5
|
||||||
blockList := make([]string, 0, count)
|
blockList := make([]string, 0, count)
|
||||||
byteSize := sliceSize
|
byteSize := sliceSize
|
||||||
@@ -203,6 +222,11 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
sliceMd5H := md5.New()
|
sliceMd5H := md5.New()
|
||||||
sliceMd5H2 := md5.New()
|
sliceMd5H2 := md5.New()
|
||||||
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
||||||
|
writers := []io.Writer{fileMd5H, sliceMd5H, slicemd5H2Write}
|
||||||
|
if tmpF != nil {
|
||||||
|
writers = append(writers, tmpF)
|
||||||
|
}
|
||||||
|
written := int64(0)
|
||||||
|
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
@@ -211,13 +235,23 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
if i == count {
|
if i == count {
|
||||||
byteSize = lastBlockSize
|
byteSize = lastBlockSize
|
||||||
}
|
}
|
||||||
_, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), stream, byteSize)
|
||||||
|
written += n
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
||||||
sliceMd5H.Reset()
|
sliceMd5H.Reset()
|
||||||
}
|
}
|
||||||
|
if tmpF != nil {
|
||||||
|
if written != streamSize {
|
||||||
|
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
|
||||||
|
}
|
||||||
|
_, err = tmpF.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ")
|
||||||
|
}
|
||||||
|
}
|
||||||
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
||||||
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
||||||
blockListStr, _ := utils.Json.MarshalToString(blockList)
|
blockListStr, _ := utils.Json.MarshalToString(blockList)
|
||||||
@@ -260,9 +294,10 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
}
|
}
|
||||||
// step.2 上传分片
|
// step.2 上传分片
|
||||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||||
retry.Attempts(3),
|
retry.Attempts(1),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
sem := semaphore.NewWeighted(3)
|
||||||
for i, partseq := range precreateResp.BlockList {
|
for i, partseq := range precreateResp.BlockList {
|
||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
@@ -273,6 +308,10 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
byteSize = lastBlockSize
|
byteSize = lastBlockSize
|
||||||
}
|
}
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
if err = sem.Acquire(ctx, 1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer sem.Release(1)
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "upload",
|
"method": "upload",
|
||||||
"access_token": d.AccessToken,
|
"access_token": d.AccessToken,
|
||||||
@@ -281,7 +320,8 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
"uploadid": precreateResp.Uploadid,
|
"uploadid": precreateResp.Uploadid,
|
||||||
"partseq": strconv.Itoa(partseq),
|
"partseq": strconv.Itoa(partseq),
|
||||||
}
|
}
|
||||||
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
err := d.uploadSlice(ctx, params, stream.GetName(),
|
||||||
|
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,16 +8,18 @@ import (
|
|||||||
type Addition struct {
|
type Addition struct {
|
||||||
RefreshToken string `json:"refresh_token" required:"true"`
|
RefreshToken string `json:"refresh_token" required:"true"`
|
||||||
driver.RootPath
|
driver.RootPath
|
||||||
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
|
DownloadAPI string `json:"download_api" type:"select" options:"official,crack,crack_video" default:"official"`
|
||||||
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||||
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
||||||
AccessToken string
|
AccessToken string
|
||||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||||
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||||
|
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
|
||||||
|
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ type TokenErrResp struct {
|
|||||||
type File struct {
|
type File struct {
|
||||||
//TkbindId int `json:"tkbind_id"`
|
//TkbindId int `json:"tkbind_id"`
|
||||||
//OwnerType int `json:"owner_type"`
|
//OwnerType int `json:"owner_type"`
|
||||||
//Category int `json:"category"`
|
Category int `json:"category"`
|
||||||
//RealCategory string `json:"real_category"`
|
//RealCategory string `json:"real_category"`
|
||||||
FsId int64 `json:"fs_id"`
|
FsId int64 `json:"fs_id"`
|
||||||
//OperId int `json:"oper_id"`
|
//OperId int `json:"oper_id"`
|
||||||
|
|||||||
@@ -79,6 +79,12 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
|
|||||||
return retry.Unrecoverable(err2)
|
return retry.Unrecoverable(err2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if 31023 == errno && d.DownloadAPI == "crack_video" {
|
||||||
|
result = res.Body()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
|
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
|
||||||
}
|
}
|
||||||
result = res.Body()
|
result = res.Body()
|
||||||
@@ -131,12 +137,21 @@ func (d *BaiduNetdisk) getFiles(dir string) ([]File, error) {
|
|||||||
if len(resp.List) == 0 {
|
if len(resp.List) == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
res = append(res, resp.List...)
|
|
||||||
|
if d.OnlyListVideoFile {
|
||||||
|
for _, file := range resp.List {
|
||||||
|
if file.Isdir == 1 || file.Category == 1 {
|
||||||
|
res = append(res, file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
res = append(res, resp.List...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
|
||||||
var resp DownloadResp
|
var resp DownloadResp
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "filemetas",
|
"method": "filemetas",
|
||||||
@@ -164,7 +179,7 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
|
||||||
var resp DownloadResp2
|
var resp DownloadResp2
|
||||||
param := map[string]string{
|
param := map[string]string{
|
||||||
"target": fmt.Sprintf("[\"%s\"]", file.GetPath()),
|
"target": fmt.Sprintf("[\"%s\"]", file.GetPath()),
|
||||||
@@ -187,6 +202,34 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *BaiduNetdisk) linkCrackVideo(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
|
||||||
|
param := map[string]string{
|
||||||
|
"type": "VideoURL",
|
||||||
|
"path": fmt.Sprintf("%s", file.GetPath()),
|
||||||
|
"fs_id": file.GetID(),
|
||||||
|
"devuid": "0%1",
|
||||||
|
"clienttype": "1",
|
||||||
|
"channel": "android_15_25010PN30C_bd-netdisk_1523a",
|
||||||
|
"nom3u8": "1",
|
||||||
|
"dlink": "1",
|
||||||
|
"media": "1",
|
||||||
|
"origin": "dlna",
|
||||||
|
}
|
||||||
|
resp, err := d.request("https://pan.baidu.com/api/mediainfo", http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(param)
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Link{
|
||||||
|
URL: utils.Json.Get(resp, "info", "dlink").ToString(),
|
||||||
|
Header: http.Header{
|
||||||
|
"User-Agent": []string{d.CustomCrackUA},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
|
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "filemanager",
|
"method": "filemanager",
|
||||||
@@ -230,22 +273,72 @@ func joinTime(form map[string]string, ctime, mtime int64) {
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
DefaultSliceSize int64 = 4 * utils.MB
|
DefaultSliceSize int64 = 4 * utils.MB
|
||||||
VipSliceSize = 16 * utils.MB
|
VipSliceSize int64 = 16 * utils.MB
|
||||||
SVipSliceSize = 32 * utils.MB
|
SVipSliceSize int64 = 32 * utils.MB
|
||||||
|
|
||||||
|
MaxSliceNum = 2048 // 文档写的是 1024/没写 ,但实际测试是 2048
|
||||||
|
SliceStep int64 = 1 * utils.MB
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *BaiduNetdisk) getSliceSize() int64 {
|
func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||||
if d.CustomUploadPartSize != 0 {
|
// 非会员固定为 4MB
|
||||||
return d.CustomUploadPartSize
|
if d.vipType == 0 {
|
||||||
}
|
if d.CustomUploadPartSize != 0 {
|
||||||
switch d.vipType {
|
log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
|
||||||
case 1:
|
}
|
||||||
return VipSliceSize
|
if filesize > MaxSliceNum*DefaultSliceSize {
|
||||||
case 2:
|
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
|
||||||
return SVipSliceSize
|
}
|
||||||
default:
|
|
||||||
return DefaultSliceSize
|
return DefaultSliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.CustomUploadPartSize != 0 {
|
||||||
|
if d.CustomUploadPartSize < DefaultSliceSize {
|
||||||
|
log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
|
||||||
|
return DefaultSliceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize {
|
||||||
|
log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
|
||||||
|
return VipSliceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize {
|
||||||
|
log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
|
||||||
|
return SVipSliceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.CustomUploadPartSize
|
||||||
|
}
|
||||||
|
|
||||||
|
maxSliceSize := DefaultSliceSize
|
||||||
|
|
||||||
|
switch d.vipType {
|
||||||
|
case 1:
|
||||||
|
maxSliceSize = VipSliceSize
|
||||||
|
case 2:
|
||||||
|
maxSliceSize = SVipSliceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// upload on low bandwidth
|
||||||
|
if d.LowBandwithUploadMode {
|
||||||
|
size := DefaultSliceSize
|
||||||
|
|
||||||
|
for size <= maxSliceSize {
|
||||||
|
if filesize <= MaxSliceNum*size {
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
|
size += SliceStep
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if filesize > MaxSliceNum*maxSliceSize {
|
||||||
|
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
|
||||||
|
}
|
||||||
|
|
||||||
|
return maxSliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// func encodeURIComponent(str string) string {
|
// func encodeURIComponent(str string) string {
|
||||||
|
|||||||
@@ -7,13 +7,16 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sync/semaphore"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
@@ -239,11 +242,21 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
|
|
||||||
// TODO:
|
// TODO:
|
||||||
// 暂时没有找到妙传方式
|
// 暂时没有找到妙传方式
|
||||||
|
var (
|
||||||
// 需要获取完整文件md5,必须支持 io.Seek
|
cache = stream.GetFile()
|
||||||
tempFile, err := stream.CacheFullInTempFile()
|
tmpF *os.File
|
||||||
if err != nil {
|
err error
|
||||||
return nil, err
|
)
|
||||||
|
if _, ok := cache.(io.ReaderAt); !ok {
|
||||||
|
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = tmpF.Close()
|
||||||
|
_ = os.Remove(tmpF.Name())
|
||||||
|
}()
|
||||||
|
cache = tmpF
|
||||||
}
|
}
|
||||||
|
|
||||||
const DEFAULT int64 = 1 << 22
|
const DEFAULT int64 = 1 << 22
|
||||||
@@ -251,9 +264,11 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
|
|
||||||
// 计算需要的数据
|
// 计算需要的数据
|
||||||
streamSize := stream.GetSize()
|
streamSize := stream.GetSize()
|
||||||
count := int(math.Ceil(float64(streamSize) / float64(DEFAULT)))
|
count := int(streamSize / DEFAULT)
|
||||||
lastBlockSize := streamSize % DEFAULT
|
lastBlockSize := streamSize % DEFAULT
|
||||||
if lastBlockSize == 0 {
|
if lastBlockSize > 0 {
|
||||||
|
count++
|
||||||
|
} else {
|
||||||
lastBlockSize = DEFAULT
|
lastBlockSize = DEFAULT
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,6 +279,11 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
sliceMd5H := md5.New()
|
sliceMd5H := md5.New()
|
||||||
sliceMd5H2 := md5.New()
|
sliceMd5H2 := md5.New()
|
||||||
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
||||||
|
writers := []io.Writer{fileMd5H, sliceMd5H, slicemd5H2Write}
|
||||||
|
if tmpF != nil {
|
||||||
|
writers = append(writers, tmpF)
|
||||||
|
}
|
||||||
|
written := int64(0)
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
@@ -271,13 +291,23 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
if i == count {
|
if i == count {
|
||||||
byteSize = lastBlockSize
|
byteSize = lastBlockSize
|
||||||
}
|
}
|
||||||
_, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), stream, byteSize)
|
||||||
|
written += n
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
||||||
sliceMd5H.Reset()
|
sliceMd5H.Reset()
|
||||||
}
|
}
|
||||||
|
if tmpF != nil {
|
||||||
|
if written != streamSize {
|
||||||
|
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
|
||||||
|
}
|
||||||
|
_, err = tmpF.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ")
|
||||||
|
}
|
||||||
|
}
|
||||||
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
||||||
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
||||||
blockListStr, _ := utils.Json.MarshalToString(sliceMD5List)
|
blockListStr, _ := utils.Json.MarshalToString(sliceMD5List)
|
||||||
@@ -289,7 +319,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
"rtype": "1",
|
"rtype": "1",
|
||||||
"ctype": "11",
|
"ctype": "11",
|
||||||
"path": fmt.Sprintf("/%s", stream.GetName()),
|
"path": fmt.Sprintf("/%s", stream.GetName()),
|
||||||
"size": fmt.Sprint(stream.GetSize()),
|
"size": fmt.Sprint(streamSize),
|
||||||
"slice-md5": sliceMd5,
|
"slice-md5": sliceMd5,
|
||||||
"content-md5": contentMd5,
|
"content-md5": contentMd5,
|
||||||
"block_list": blockListStr,
|
"block_list": blockListStr,
|
||||||
@@ -314,6 +344,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
sem := semaphore.NewWeighted(3)
|
||||||
for i, partseq := range precreateResp.BlockList {
|
for i, partseq := range precreateResp.BlockList {
|
||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
@@ -325,6 +356,10 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
}
|
}
|
||||||
|
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
if err = sem.Acquire(ctx, 1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer sem.Release(1)
|
||||||
uploadParams := map[string]string{
|
uploadParams := map[string]string{
|
||||||
"method": "upload",
|
"method": "upload",
|
||||||
"path": params["path"],
|
"path": params["path"],
|
||||||
@@ -335,7 +370,8 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||||
r.SetContext(ctx)
|
r.SetContext(ctx)
|
||||||
r.SetQueryParams(uploadParams)
|
r.SetQueryParams(uploadParams)
|
||||||
r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
r.SetFileReader("file", stream.GetName(),
|
||||||
|
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)))
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
|
"github.com/alist-org/alist/v3/internal/net"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -26,7 +27,7 @@ func InitClient() {
|
|||||||
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
||||||
|
|
||||||
RestyClient = NewRestyClient()
|
RestyClient = NewRestyClient()
|
||||||
HttpClient = NewHttpClient()
|
HttpClient = net.NewHttpClient()
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRestyClient() *resty.Client {
|
func NewRestyClient() *resty.Client {
|
||||||
@@ -38,13 +39,3 @@ func NewRestyClient() *resty.Client {
|
|||||||
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHttpClient() *http.Client {
|
|
||||||
return &http.Client{
|
|
||||||
Timeout: time.Hour * 48,
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Proxy: http.ProxyFromEnvironment,
|
|
||||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -215,7 +215,7 @@ func (d *ChaoXing) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
var resp UploadDataRsp
|
var resp UploadDataRsp
|
||||||
_, err := d.request("https://noteyd.chaoxing.com/pc/files/getUploadConfig", http.MethodGet, func(req *resty.Request) {
|
_, err := d.request("https://noteyd.chaoxing.com/pc/files/getUploadConfig", http.MethodGet, func(req *resty.Request) {
|
||||||
}, &resp)
|
}, &resp)
|
||||||
@@ -227,11 +227,11 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||||||
}
|
}
|
||||||
body := &bytes.Buffer{}
|
body := &bytes.Buffer{}
|
||||||
writer := multipart.NewWriter(body)
|
writer := multipart.NewWriter(body)
|
||||||
filePart, err := writer.CreateFormFile("file", stream.GetName())
|
filePart, err := writer.CreateFormFile("file", file.GetName())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = utils.CopyWithBuffer(filePart, stream)
|
_, err = utils.CopyWithBuffer(filePart, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -248,7 +248,14 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("POST", "https://pan-yz.chaoxing.com/upload", body)
|
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: &driver.SimpleReaderWithSize{
|
||||||
|
Reader: body,
|
||||||
|
Size: int64(body.Len()),
|
||||||
|
},
|
||||||
|
UpdateProgress: up,
|
||||||
|
})
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
@@ -19,6 +18,7 @@ import (
|
|||||||
type Cloudreve struct {
|
type Cloudreve struct {
|
||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
|
ref *Cloudreve
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Cloudreve) Config() driver.Config {
|
func (d *Cloudreve) Config() driver.Config {
|
||||||
@@ -38,8 +38,18 @@ func (d *Cloudreve) Init(ctx context.Context) error {
|
|||||||
return d.login()
|
return d.login()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Cloudreve) InitReference(storage driver.Driver) error {
|
||||||
|
refStorage, ok := storage.(*Cloudreve)
|
||||||
|
if ok {
|
||||||
|
d.ref = refStorage
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Cloudreve) Drop(ctx context.Context) error {
|
func (d *Cloudreve) Drop(ctx context.Context) error {
|
||||||
d.Cookie = ""
|
d.Cookie = ""
|
||||||
|
d.ref = nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -147,7 +157,7 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
|||||||
"size": stream.GetSize(),
|
"size": stream.GetSize(),
|
||||||
"name": stream.GetName(),
|
"name": stream.GetName(),
|
||||||
"policy_id": r.Policy.Id,
|
"policy_id": r.Policy.Id,
|
||||||
"last_modified": stream.ModTime().Unix(),
|
"last_modified": stream.ModTime().UnixMilli(),
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取上传会话信息
|
// 获取上传会话信息
|
||||||
@@ -163,42 +173,18 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
|||||||
switch r.Policy.Type {
|
switch r.Policy.Type {
|
||||||
case "onedrive":
|
case "onedrive":
|
||||||
err = d.upOneDrive(ctx, stream, u, up)
|
err = d.upOneDrive(ctx, stream, u, up)
|
||||||
|
case "s3":
|
||||||
|
err = d.upS3(ctx, stream, u, up)
|
||||||
case "remote": // 从机存储
|
case "remote": // 从机存储
|
||||||
err = d.upRemote(ctx, stream, u, up)
|
err = d.upRemote(ctx, stream, u, up)
|
||||||
case "local": // 本机存储
|
case "local": // 本机存储
|
||||||
var chunkSize = u.ChunkSize
|
err = d.upLocal(ctx, stream, u, up)
|
||||||
var buf []byte
|
|
||||||
var chunk int
|
|
||||||
for {
|
|
||||||
var n int
|
|
||||||
buf = make([]byte, chunkSize)
|
|
||||||
n, err = io.ReadAtLeast(stream, buf, chunkSize)
|
|
||||||
if err != nil && err != io.ErrUnexpectedEOF {
|
|
||||||
if err == io.EOF {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if n == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
buf = buf[:n]
|
|
||||||
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
|
||||||
req.SetHeader("Content-Type", "application/octet-stream")
|
|
||||||
req.SetHeader("Content-Length", strconv.Itoa(n))
|
|
||||||
req.SetBody(buf)
|
|
||||||
}, nil)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
chunk++
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
err = errs.NotImplement
|
err = errs.NotImplement
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// 删除失败的会话
|
// 删除失败的会话
|
||||||
err = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil)
|
_ = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -21,11 +21,12 @@ type Policy struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type UploadInfo struct {
|
type UploadInfo struct {
|
||||||
SessionID string `json:"sessionID"`
|
SessionID string `json:"sessionID"`
|
||||||
ChunkSize int `json:"chunkSize"`
|
ChunkSize int `json:"chunkSize"`
|
||||||
Expires int `json:"expires"`
|
Expires int `json:"expires"`
|
||||||
UploadURLs []string `json:"uploadURLs"`
|
UploadURLs []string `json:"uploadURLs"`
|
||||||
Credential string `json:"credential,omitempty"`
|
Credential string `json:"credential,omitempty"` // local
|
||||||
|
CompleteURL string `json:"completeURL,omitempty"` // s3
|
||||||
}
|
}
|
||||||
|
|
||||||
type DirectoryResp struct {
|
type DirectoryResp struct {
|
||||||
|
|||||||
@@ -4,12 +4,14 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
@@ -19,7 +21,6 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/pkg/cookie"
|
"github.com/alist-org/alist/v3/pkg/cookie"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
json "github.com/json-iterator/go"
|
|
||||||
jsoniter "github.com/json-iterator/go"
|
jsoniter "github.com/json-iterator/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -27,17 +28,23 @@ import (
|
|||||||
|
|
||||||
const loginPath = "/user/session"
|
const loginPath = "/user/session"
|
||||||
|
|
||||||
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
func (d *Cloudreve) getUA() string {
|
||||||
u := d.Address + "/api/v3" + path
|
if d.CustomUA != "" {
|
||||||
ua := d.CustomUA
|
return d.CustomUA
|
||||||
if ua == "" {
|
|
||||||
ua = base.UserAgent
|
|
||||||
}
|
}
|
||||||
|
return base.UserAgent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
||||||
|
if d.ref != nil {
|
||||||
|
return d.ref.request(method, path, callback, out)
|
||||||
|
}
|
||||||
|
u := d.Address + "/api/v3" + path
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||||
"Accept": "application/json, text/plain, */*",
|
"Accept": "application/json, text/plain, */*",
|
||||||
"User-Agent": ua,
|
"User-Agent": d.getUA(),
|
||||||
})
|
})
|
||||||
|
|
||||||
var r Resp
|
var r Resp
|
||||||
@@ -76,11 +83,11 @@ func (d *Cloudreve) request(method string, path string, callback base.ReqCallbac
|
|||||||
}
|
}
|
||||||
if out != nil && r.Data != nil {
|
if out != nil && r.Data != nil {
|
||||||
var marshal []byte
|
var marshal []byte
|
||||||
marshal, err = json.Marshal(r.Data)
|
marshal, err = jsoniter.Marshal(r.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = json.Unmarshal(marshal, out)
|
err = jsoniter.Unmarshal(marshal, out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -100,7 +107,7 @@ func (d *Cloudreve) login() error {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil && err.Error() != "CAPTCHA not match." {
|
if err.Error() != "CAPTCHA not match." {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -161,15 +168,11 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
|
|||||||
if !d.Addition.EnableThumbAndFolderSize {
|
if !d.Addition.EnableThumbAndFolderSize {
|
||||||
return model.Thumbnail{}, nil
|
return model.Thumbnail{}, nil
|
||||||
}
|
}
|
||||||
ua := d.CustomUA
|
|
||||||
if ua == "" {
|
|
||||||
ua = base.UserAgent
|
|
||||||
}
|
|
||||||
req := base.NoRedirectClient.R()
|
req := base.NoRedirectClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||||
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
|
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
|
||||||
"User-Agent": ua,
|
"User-Agent": d.getUA(),
|
||||||
})
|
})
|
||||||
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
|
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -180,9 +183,7 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||||
uploadUrl := u.UploadURLs[0]
|
|
||||||
credential := u.Credential
|
|
||||||
var finish int64 = 0
|
var finish int64 = 0
|
||||||
var chunk int = 0
|
var chunk int = 0
|
||||||
DEFAULT := int64(u.ChunkSize)
|
DEFAULT := int64(u.ChunkSize)
|
||||||
@@ -190,33 +191,117 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
|||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
utils.Log.Debugf("[Cloudreve-Remote] upload: %d", finish)
|
|
||||||
var byteSize = DEFAULT
|
|
||||||
left := stream.GetSize() - finish
|
left := stream.GetSize() - finish
|
||||||
if left < DEFAULT {
|
byteSize := min(left, DEFAULT)
|
||||||
byteSize = left
|
utils.Log.Debugf("[Cloudreve-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||||
}
|
|
||||||
byteData := make([]byte, byteSize)
|
byteData := make([]byte, byteSize)
|
||||||
n, err := io.ReadFull(stream, byteData)
|
n, err := io.ReadFull(stream, byteData)
|
||||||
utils.Log.Debug(err, n)
|
utils.Log.Debug(err, n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), bytes.NewBuffer(byteData))
|
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
||||||
|
req.SetHeader("Content-Type", "application/octet-stream")
|
||||||
|
req.SetContentLength(true)
|
||||||
|
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
|
||||||
|
req.SetHeader("User-Agent", d.getUA())
|
||||||
|
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||||
|
req.AddRetryCondition(func(r *resty.Response, err error) bool {
|
||||||
|
if err != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if r.IsError() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
var retryResp Resp
|
||||||
|
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
|
||||||
|
if jErr != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if retryResp.Code != 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
finish += byteSize
|
||||||
|
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||||
|
chunk++
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||||
|
uploadUrl := u.UploadURLs[0]
|
||||||
|
credential := u.Credential
|
||||||
|
var finish int64 = 0
|
||||||
|
var chunk int = 0
|
||||||
|
DEFAULT := int64(u.ChunkSize)
|
||||||
|
retryCount := 0
|
||||||
|
maxRetries := 3
|
||||||
|
for finish < stream.GetSize() {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
left := stream.GetSize() - finish
|
||||||
|
byteSize := min(left, DEFAULT)
|
||||||
|
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||||
|
byteData := make([]byte, byteSize)
|
||||||
|
n, err := io.ReadFull(stream, byteData)
|
||||||
|
utils.Log.Debug(err, n)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||||
|
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
req.ContentLength = byteSize
|
||||||
|
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||||
finish += byteSize
|
req.Header.Set("User-Agent", d.getUA())
|
||||||
res, err := base.HttpClient.Do(req)
|
err = func() error {
|
||||||
if err != nil {
|
res, err := base.HttpClient.Do(req)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode != 200 {
|
||||||
|
return errors.New(res.Status)
|
||||||
|
}
|
||||||
|
body, err := io.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var up Resp
|
||||||
|
err = json.Unmarshal(body, &up)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if up.Code != 0 {
|
||||||
|
return errors.New(up.Msg)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err == nil {
|
||||||
|
retryCount = 0
|
||||||
|
finish += byteSize
|
||||||
|
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||||
|
chunk++
|
||||||
|
} else {
|
||||||
|
retryCount++
|
||||||
|
if retryCount > maxRetries {
|
||||||
|
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
|
||||||
|
}
|
||||||
|
backoff := time.Duration(1<<retryCount) * time.Second
|
||||||
|
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
|
||||||
|
time.Sleep(backoff)
|
||||||
}
|
}
|
||||||
res.Body.Close()
|
|
||||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
|
||||||
chunk++
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -225,47 +310,151 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
|||||||
uploadUrl := u.UploadURLs[0]
|
uploadUrl := u.UploadURLs[0]
|
||||||
var finish int64 = 0
|
var finish int64 = 0
|
||||||
DEFAULT := int64(u.ChunkSize)
|
DEFAULT := int64(u.ChunkSize)
|
||||||
|
retryCount := 0
|
||||||
|
maxRetries := 3
|
||||||
for finish < stream.GetSize() {
|
for finish < stream.GetSize() {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
utils.Log.Debugf("[Cloudreve-OneDrive] upload: %d", finish)
|
|
||||||
var byteSize = DEFAULT
|
|
||||||
left := stream.GetSize() - finish
|
left := stream.GetSize() - finish
|
||||||
if left < DEFAULT {
|
byteSize := min(left, DEFAULT)
|
||||||
byteSize = left
|
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||||
}
|
|
||||||
byteData := make([]byte, byteSize)
|
byteData := make([]byte, byteSize)
|
||||||
n, err := io.ReadFull(stream, byteData)
|
n, err := io.ReadFull(stream, byteData)
|
||||||
utils.Log.Debug(err, n)
|
utils.Log.Debug(err, n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
|
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
req.ContentLength = byteSize
|
||||||
|
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||||
|
req.Header.Set("User-Agent", d.getUA())
|
||||||
finish += byteSize
|
finish += byteSize
|
||||||
res, err := base.HttpClient.Do(req)
|
res, err := base.HttpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
switch {
|
||||||
|
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||||
|
retryCount++
|
||||||
|
if retryCount > maxRetries {
|
||||||
|
res.Body.Close()
|
||||||
|
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||||
|
}
|
||||||
|
backoff := time.Duration(1<<retryCount) * time.Second
|
||||||
|
utils.Log.Warnf("[Cloudreve-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||||
|
time.Sleep(backoff)
|
||||||
|
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||||
data, _ := io.ReadAll(res.Body)
|
data, _ := io.ReadAll(res.Body)
|
||||||
res.Body.Close()
|
res.Body.Close()
|
||||||
return errors.New(string(data))
|
return errors.New(string(data))
|
||||||
|
default:
|
||||||
|
res.Body.Close()
|
||||||
|
retryCount = 0
|
||||||
|
finish += byteSize
|
||||||
|
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||||
}
|
}
|
||||||
res.Body.Close()
|
|
||||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
|
||||||
}
|
}
|
||||||
// 上传成功发送回调请求
|
// 上传成功发送回调请求
|
||||||
err := d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
|
return d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
|
||||||
req.SetBody("{}")
|
req.SetBody("{}")
|
||||||
}, nil)
|
}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||||
|
var finish int64 = 0
|
||||||
|
var chunk int = 0
|
||||||
|
var etags []string
|
||||||
|
DEFAULT := int64(u.ChunkSize)
|
||||||
|
retryCount := 0
|
||||||
|
maxRetries := 3
|
||||||
|
for finish < stream.GetSize() {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
left := stream.GetSize() - finish
|
||||||
|
byteSize := min(left, DEFAULT)
|
||||||
|
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
|
||||||
|
byteData := make([]byte, byteSize)
|
||||||
|
n, err := io.ReadFull(stream, byteData)
|
||||||
|
utils.Log.Debug(err, n)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("PUT", u.UploadURLs[chunk],
|
||||||
|
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
req.ContentLength = byteSize
|
||||||
|
finish += byteSize
|
||||||
|
res, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
etag := res.Header.Get("ETag")
|
||||||
|
res.Body.Close()
|
||||||
|
switch {
|
||||||
|
case res.StatusCode != 200:
|
||||||
|
retryCount++
|
||||||
|
if retryCount > maxRetries {
|
||||||
|
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||||
|
}
|
||||||
|
backoff := time.Duration(1<<retryCount) * time.Second
|
||||||
|
utils.Log.Warnf("[Cloudreve-S3] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||||
|
time.Sleep(backoff)
|
||||||
|
case etag == "":
|
||||||
|
return errors.New("faild to get ETag from header")
|
||||||
|
default:
|
||||||
|
retryCount = 0
|
||||||
|
etags = append(etags, etag)
|
||||||
|
finish += byteSize
|
||||||
|
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||||
|
chunk++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// s3LikeFinishUpload
|
||||||
|
// https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252
|
||||||
|
bodyBuilder := &strings.Builder{}
|
||||||
|
bodyBuilder.WriteString("<CompleteMultipartUpload>")
|
||||||
|
for i, etag := range etags {
|
||||||
|
bodyBuilder.WriteString(fmt.Sprintf(
|
||||||
|
`<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>`,
|
||||||
|
i+1, // PartNumber 从 1 开始
|
||||||
|
etag,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
bodyBuilder.WriteString("</CompleteMultipartUpload>")
|
||||||
|
req, err := http.NewRequest(
|
||||||
|
"POST",
|
||||||
|
u.CompleteURL,
|
||||||
|
strings.NewReader(bodyBuilder.String()),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/xml")
|
||||||
|
req.Header.Set("User-Agent", d.getUA())
|
||||||
|
res, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(res.Body)
|
||||||
|
return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
// 上传成功发送回调请求
|
||||||
|
err = d.request(http.MethodGet, "/callback/s3/"+u.SessionID, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
305
drivers/cloudreve_v4/driver.go
Normal file
305
drivers/cloudreve_v4/driver.go
Normal file
@@ -0,0 +1,305 @@
|
|||||||
|
package cloudreve_v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CloudreveV4 struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
ref *CloudreveV4
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) Config() driver.Config {
|
||||||
|
if d.ref != nil {
|
||||||
|
return d.ref.Config()
|
||||||
|
}
|
||||||
|
if d.EnableVersionUpload {
|
||||||
|
config.NoOverwriteUpload = false
|
||||||
|
}
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) Init(ctx context.Context) error {
|
||||||
|
// removing trailing slash
|
||||||
|
d.Address = strings.TrimSuffix(d.Address, "/")
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
if d.ref != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if d.AccessToken == "" && d.RefreshToken != "" {
|
||||||
|
return d.refreshToken()
|
||||||
|
}
|
||||||
|
if d.Username != "" {
|
||||||
|
return d.login()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) InitReference(storage driver.Driver) error {
|
||||||
|
refStorage, ok := storage.(*CloudreveV4)
|
||||||
|
if ok {
|
||||||
|
d.ref = refStorage
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) Drop(ctx context.Context) error {
|
||||||
|
d.ref = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
const pageSize int = 100
|
||||||
|
var f []File
|
||||||
|
var r FileResp
|
||||||
|
params := map[string]string{
|
||||||
|
"page_size": strconv.Itoa(pageSize),
|
||||||
|
"uri": dir.GetPath(),
|
||||||
|
"order_by": d.OrderBy,
|
||||||
|
"order_direction": d.OrderDirection,
|
||||||
|
"page": "0",
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
err := d.request(http.MethodGet, "/file", func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(params)
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f = append(f, r.Files...)
|
||||||
|
if r.Pagination.NextToken == "" || len(r.Files) < pageSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
params["next_page_token"] = r.Pagination.NextToken
|
||||||
|
}
|
||||||
|
|
||||||
|
return utils.SliceConvert(f, func(src File) (model.Obj, error) {
|
||||||
|
if d.EnableFolderSize && src.Type == 1 {
|
||||||
|
var ds FolderSummaryResp
|
||||||
|
err := d.request(http.MethodGet, "/file/info", func(req *resty.Request) {
|
||||||
|
req.SetQueryParam("uri", src.Path)
|
||||||
|
req.SetQueryParam("folder_summary", "true")
|
||||||
|
}, &ds)
|
||||||
|
if err == nil && ds.FolderSummary.Size > 0 {
|
||||||
|
src.Size = ds.FolderSummary.Size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var thumb model.Thumbnail
|
||||||
|
if d.EnableThumb && src.Type == 0 {
|
||||||
|
var t FileThumbResp
|
||||||
|
err := d.request(http.MethodGet, "/file/thumb", func(req *resty.Request) {
|
||||||
|
req.SetQueryParam("uri", src.Path)
|
||||||
|
}, &t)
|
||||||
|
if err == nil && t.URL != "" {
|
||||||
|
thumb = model.Thumbnail{
|
||||||
|
Thumbnail: t.URL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &model.ObjThumb{
|
||||||
|
Object: model.Object{
|
||||||
|
ID: src.ID,
|
||||||
|
Path: src.Path,
|
||||||
|
Name: src.Name,
|
||||||
|
Size: src.Size,
|
||||||
|
Modified: src.UpdatedAt,
|
||||||
|
Ctime: src.CreatedAt,
|
||||||
|
IsFolder: src.Type == 1,
|
||||||
|
},
|
||||||
|
Thumbnail: thumb,
|
||||||
|
}, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
var url FileUrlResp
|
||||||
|
err := d.request(http.MethodPost, "/file/url", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"uris": []string{file.GetPath()},
|
||||||
|
"download": true,
|
||||||
|
})
|
||||||
|
}, &url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(url.Urls) == 0 {
|
||||||
|
return nil, errors.New("server returns no url")
|
||||||
|
}
|
||||||
|
exp := time.Until(url.Expires)
|
||||||
|
return &model.Link{
|
||||||
|
URL: url.Urls[0].URL,
|
||||||
|
Expiration: &exp,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"type": "folder",
|
||||||
|
"uri": parentDir.GetPath() + "/" + dirName,
|
||||||
|
"error_on_conflict": true,
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
return d.request(http.MethodPost, "/file/move", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"uris": []string{srcObj.GetPath()},
|
||||||
|
"dst": dstDir.GetPath(),
|
||||||
|
"copy": false,
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"new_name": newName,
|
||||||
|
"uri": srcObj.GetPath(),
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
return d.request(http.MethodPost, "/file/move", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"uris": []string{srcObj.GetPath()},
|
||||||
|
"dst": dstDir.GetPath(),
|
||||||
|
"copy": true,
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
return d.request(http.MethodDelete, "/file", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"uris": []string{obj.GetPath()},
|
||||||
|
"unlink": false,
|
||||||
|
"skip_soft_delete": true,
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
if file.GetSize() == 0 {
|
||||||
|
// 空文件使用新建文件方法,避免上传卡锁
|
||||||
|
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"type": "file",
|
||||||
|
"uri": dstDir.GetPath() + "/" + file.GetName(),
|
||||||
|
"error_on_conflict": true,
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
var p StoragePolicy
|
||||||
|
var r FileResp
|
||||||
|
var u FileUploadResp
|
||||||
|
var err error
|
||||||
|
params := map[string]string{
|
||||||
|
"page_size": "10",
|
||||||
|
"uri": dstDir.GetPath(),
|
||||||
|
"order_by": "created_at",
|
||||||
|
"order_direction": "asc",
|
||||||
|
"page": "0",
|
||||||
|
}
|
||||||
|
err = d.request(http.MethodGet, "/file", func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(params)
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p = r.StoragePolicy
|
||||||
|
body := base.Json{
|
||||||
|
"uri": dstDir.GetPath() + "/" + file.GetName(),
|
||||||
|
"size": file.GetSize(),
|
||||||
|
"policy_id": p.ID,
|
||||||
|
"last_modified": file.ModTime().UnixMilli(),
|
||||||
|
"mime_type": "",
|
||||||
|
}
|
||||||
|
if d.EnableVersionUpload {
|
||||||
|
body["entity_type"] = "version"
|
||||||
|
}
|
||||||
|
err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) {
|
||||||
|
req.SetBody(body)
|
||||||
|
}, &u)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if u.StoragePolicy.Relay {
|
||||||
|
err = d.upLocal(ctx, file, u, up)
|
||||||
|
} else {
|
||||||
|
switch u.StoragePolicy.Type {
|
||||||
|
case "local":
|
||||||
|
err = d.upLocal(ctx, file, u, up)
|
||||||
|
case "remote":
|
||||||
|
err = d.upRemote(ctx, file, u, up)
|
||||||
|
case "onedrive":
|
||||||
|
err = d.upOneDrive(ctx, file, u, up)
|
||||||
|
case "s3":
|
||||||
|
err = d.upS3(ctx, file, u, up)
|
||||||
|
default:
|
||||||
|
return errs.NotImplement
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
// 删除失败的会话
|
||||||
|
_ = d.request(http.MethodDelete, "/file/upload", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"id": u.SessionID,
|
||||||
|
"uri": u.URI,
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||||
|
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||||
|
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||||
|
// return errs.NotImplement to use an internal archive tool
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
// return nil, errs.NotSupport
|
||||||
|
//}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*CloudreveV4)(nil)
|
||||||
44
drivers/cloudreve_v4/meta.go
Normal file
44
drivers/cloudreve_v4/meta.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package cloudreve_v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
// Usually one of two
|
||||||
|
driver.RootPath
|
||||||
|
// driver.RootID
|
||||||
|
// define other
|
||||||
|
Address string `json:"address" required:"true"`
|
||||||
|
Username string `json:"username"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
RefreshToken string `json:"refresh_token"`
|
||||||
|
CustomUA string `json:"custom_ua"`
|
||||||
|
EnableFolderSize bool `json:"enable_folder_size"`
|
||||||
|
EnableThumb bool `json:"enable_thumb"`
|
||||||
|
EnableVersionUpload bool `json:"enable_version_upload"`
|
||||||
|
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at" default:"name" required:"true"`
|
||||||
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc" required:"true"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "Cloudreve V4",
|
||||||
|
LocalSort: false,
|
||||||
|
OnlyLocal: false,
|
||||||
|
OnlyProxy: false,
|
||||||
|
NoCache: false,
|
||||||
|
NoUpload: false,
|
||||||
|
NeedMs: false,
|
||||||
|
DefaultRoot: "cloudreve://my",
|
||||||
|
CheckStatus: true,
|
||||||
|
Alert: "",
|
||||||
|
NoOverwriteUpload: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &CloudreveV4{}
|
||||||
|
})
|
||||||
|
}
|
||||||
164
drivers/cloudreve_v4/types.go
Normal file
164
drivers/cloudreve_v4/types.go
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
package cloudreve_v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Object struct {
|
||||||
|
model.Object
|
||||||
|
StoragePolicy StoragePolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
type Resp struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Msg string `json:"msg"`
|
||||||
|
Data any `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type BasicConfigResp struct {
|
||||||
|
InstanceID string `json:"instance_id"`
|
||||||
|
// Title string `json:"title"`
|
||||||
|
// Themes string `json:"themes"`
|
||||||
|
// DefaultTheme string `json:"default_theme"`
|
||||||
|
User struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
// Nickname string `json:"nickname"`
|
||||||
|
// CreatedAt time.Time `json:"created_at"`
|
||||||
|
// Anonymous bool `json:"anonymous"`
|
||||||
|
Group struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Permission string `json:"permission"`
|
||||||
|
} `json:"group"`
|
||||||
|
} `json:"user"`
|
||||||
|
// Logo string `json:"logo"`
|
||||||
|
// LogoLight string `json:"logo_light"`
|
||||||
|
// CaptchaReCaptchaKey string `json:"captcha_ReCaptchaKey"`
|
||||||
|
CaptchaType string `json:"captcha_type"` // support 'normal' only
|
||||||
|
// AppPromotion bool `json:"app_promotion"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SiteLoginConfigResp struct {
|
||||||
|
LoginCaptcha bool `json:"login_captcha"`
|
||||||
|
Authn bool `json:"authn"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PrepareLoginResp struct {
|
||||||
|
WebauthnEnabled bool `json:"webauthn_enabled"`
|
||||||
|
PasswordEnabled bool `json:"password_enabled"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CaptchaResp struct {
|
||||||
|
Image string `json:"image"`
|
||||||
|
Ticket string `json:"ticket"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Token struct {
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
RefreshToken string `json:"refresh_token"`
|
||||||
|
AccessExpires time.Time `json:"access_expires"`
|
||||||
|
RefreshExpires time.Time `json:"refresh_expires"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TokenResponse struct {
|
||||||
|
User struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
// Email string `json:"email"`
|
||||||
|
// Nickname string `json:"nickname"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
// CreatedAt time.Time `json:"created_at"`
|
||||||
|
Group struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Permission string `json:"permission"`
|
||||||
|
// DirectLinkBatchSize int `json:"direct_link_batch_size"`
|
||||||
|
// TrashRetention int `json:"trash_retention"`
|
||||||
|
} `json:"group"`
|
||||||
|
// Language string `json:"language"`
|
||||||
|
} `json:"user"`
|
||||||
|
Token Token `json:"token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type File struct {
|
||||||
|
Type int `json:"type"` // 0: file, 1: folder
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Metadata interface{} `json:"metadata"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Capability string `json:"capability"`
|
||||||
|
Owned bool `json:"owned"`
|
||||||
|
PrimaryEntity string `json:"primary_entity"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type StoragePolicy struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
MaxSize int64 `json:"max_size"`
|
||||||
|
Relay bool `json:"relay,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Pagination struct {
|
||||||
|
Page int `json:"page"`
|
||||||
|
PageSize int `json:"page_size"`
|
||||||
|
IsCursor bool `json:"is_cursor"`
|
||||||
|
NextToken string `json:"next_token,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Props struct {
|
||||||
|
Capability string `json:"capability"`
|
||||||
|
MaxPageSize int `json:"max_page_size"`
|
||||||
|
OrderByOptions []string `json:"order_by_options"`
|
||||||
|
OrderDirectionOptions []string `json:"order_direction_options"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileResp struct {
|
||||||
|
Files []File `json:"files"`
|
||||||
|
Parent File `json:"parent"`
|
||||||
|
Pagination Pagination `json:"pagination"`
|
||||||
|
Props Props `json:"props"`
|
||||||
|
ContextHint string `json:"context_hint"`
|
||||||
|
MixedType bool `json:"mixed_type"`
|
||||||
|
StoragePolicy StoragePolicy `json:"storage_policy"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileUrlResp struct {
|
||||||
|
Urls []struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
} `json:"urls"`
|
||||||
|
Expires time.Time `json:"expires"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileUploadResp struct {
|
||||||
|
// UploadID string `json:"upload_id"`
|
||||||
|
SessionID string `json:"session_id"`
|
||||||
|
ChunkSize int64 `json:"chunk_size"`
|
||||||
|
Expires int64 `json:"expires"`
|
||||||
|
StoragePolicy StoragePolicy `json:"storage_policy"`
|
||||||
|
URI string `json:"uri"`
|
||||||
|
CompleteURL string `json:"completeURL,omitempty"` // for S3-like
|
||||||
|
CallbackSecret string `json:"callback_secret,omitempty"` // for S3-like, OneDrive
|
||||||
|
UploadUrls []string `json:"upload_urls,omitempty"` // for not-local
|
||||||
|
Credential string `json:"credential,omitempty"` // for local
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileThumbResp struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Expires time.Time `json:"expires"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FolderSummaryResp struct {
|
||||||
|
File
|
||||||
|
FolderSummary struct {
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Files int64 `json:"files"`
|
||||||
|
Folders int64 `json:"folders"`
|
||||||
|
Completed bool `json:"completed"`
|
||||||
|
CalculatedAt time.Time `json:"calculated_at"`
|
||||||
|
} `json:"folder_summary"`
|
||||||
|
}
|
||||||
476
drivers/cloudreve_v4/util.go
Normal file
476
drivers/cloudreve_v4/util.go
Normal file
@@ -0,0 +1,476 @@
|
|||||||
|
package cloudreve_v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// do others that not defined in Driver interface
|
||||||
|
|
||||||
|
func (d *CloudreveV4) getUA() string {
|
||||||
|
if d.CustomUA != "" {
|
||||||
|
return d.CustomUA
|
||||||
|
}
|
||||||
|
return base.UserAgent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) request(method string, path string, callback base.ReqCallback, out any) error {
|
||||||
|
if d.ref != nil {
|
||||||
|
return d.ref.request(method, path, callback, out)
|
||||||
|
}
|
||||||
|
u := d.Address + "/api/v4" + path
|
||||||
|
req := base.RestyClient.R()
|
||||||
|
req.SetHeaders(map[string]string{
|
||||||
|
"Accept": "application/json, text/plain, */*",
|
||||||
|
"User-Agent": d.getUA(),
|
||||||
|
})
|
||||||
|
if d.AccessToken != "" {
|
||||||
|
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
var r Resp
|
||||||
|
req.SetResult(&r)
|
||||||
|
|
||||||
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := req.Execute(method, u)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !resp.IsSuccess() {
|
||||||
|
return errors.New(resp.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Code != 0 {
|
||||||
|
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" {
|
||||||
|
// try to refresh token
|
||||||
|
err = d.refreshToken()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return d.request(method, path, callback, out)
|
||||||
|
}
|
||||||
|
return errors.New(r.Msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if out != nil && r.Data != nil {
|
||||||
|
var marshal []byte
|
||||||
|
marshal, err = json.Marshal(r.Data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(marshal, out)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) login() error {
|
||||||
|
var siteConfig SiteLoginConfigResp
|
||||||
|
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !siteConfig.Authn {
|
||||||
|
return errors.New("authn not support")
|
||||||
|
}
|
||||||
|
var prepareLogin PrepareLoginResp
|
||||||
|
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !prepareLogin.PasswordEnabled {
|
||||||
|
return errors.New("password not enabled")
|
||||||
|
}
|
||||||
|
if prepareLogin.WebauthnEnabled {
|
||||||
|
return errors.New("webauthn not support")
|
||||||
|
}
|
||||||
|
for range 5 {
|
||||||
|
err = d.doLogin(siteConfig.LoginCaptcha)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err.Error() != "CAPTCHA not match." {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) doLogin(needCaptcha bool) error {
|
||||||
|
var err error
|
||||||
|
loginBody := base.Json{
|
||||||
|
"email": d.Username,
|
||||||
|
"password": d.Password,
|
||||||
|
}
|
||||||
|
if needCaptcha {
|
||||||
|
var config BasicConfigResp
|
||||||
|
err = d.request(http.MethodGet, "/site/config/basic", nil, &config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if config.CaptchaType != "normal" {
|
||||||
|
return fmt.Errorf("captcha type %s not support", config.CaptchaType)
|
||||||
|
}
|
||||||
|
var captcha CaptchaResp
|
||||||
|
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(captcha.Image, "data:image/png;base64,") {
|
||||||
|
return errors.New("can not get captcha")
|
||||||
|
}
|
||||||
|
loginBody["ticket"] = captcha.Ticket
|
||||||
|
i := strings.Index(captcha.Image, ",")
|
||||||
|
dec := base64.NewDecoder(base64.StdEncoding, strings.NewReader(captcha.Image[i+1:]))
|
||||||
|
vRes, err := base.RestyClient.R().SetMultipartField(
|
||||||
|
"image", "validateCode.png", "image/png", dec).
|
||||||
|
Post(setting.GetStr(conf.OcrApi))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if jsoniter.Get(vRes.Body(), "status").ToInt() != 200 {
|
||||||
|
return errors.New("ocr error:" + jsoniter.Get(vRes.Body(), "msg").ToString())
|
||||||
|
}
|
||||||
|
captchaCode := jsoniter.Get(vRes.Body(), "result").ToString()
|
||||||
|
if captchaCode == "" {
|
||||||
|
return errors.New("ocr error: empty result")
|
||||||
|
}
|
||||||
|
loginBody["captcha"] = captchaCode
|
||||||
|
}
|
||||||
|
var token TokenResponse
|
||||||
|
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) {
|
||||||
|
req.SetBody(loginBody)
|
||||||
|
}, &token)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) refreshToken() error {
|
||||||
|
var token Token
|
||||||
|
if token.RefreshToken == "" {
|
||||||
|
if d.Username != "" {
|
||||||
|
err := d.login()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot login to get refresh token, error: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"refresh_token": d.RefreshToken,
|
||||||
|
})
|
||||||
|
}, &token)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||||
|
var finish int64 = 0
|
||||||
|
var chunk int = 0
|
||||||
|
DEFAULT := int64(u.ChunkSize)
|
||||||
|
if DEFAULT == 0 {
|
||||||
|
// support relay
|
||||||
|
DEFAULT = file.GetSize()
|
||||||
|
}
|
||||||
|
for finish < file.GetSize() {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
left := file.GetSize() - finish
|
||||||
|
byteSize := min(left, DEFAULT)
|
||||||
|
utils.Log.Debugf("[CloudreveV4-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||||
|
byteData := make([]byte, byteSize)
|
||||||
|
n, err := io.ReadFull(file, byteData)
|
||||||
|
utils.Log.Debug(err, n)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
||||||
|
req.SetHeader("Content-Type", "application/octet-stream")
|
||||||
|
req.SetContentLength(true)
|
||||||
|
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
|
||||||
|
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||||
|
req.AddRetryCondition(func(r *resty.Response, err error) bool {
|
||||||
|
if err != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if r.IsError() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
var retryResp Resp
|
||||||
|
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
|
||||||
|
if jErr != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if retryResp.Code != 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
finish += byteSize
|
||||||
|
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||||
|
chunk++
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||||
|
uploadUrl := u.UploadUrls[0]
|
||||||
|
credential := u.Credential
|
||||||
|
var finish int64 = 0
|
||||||
|
var chunk int = 0
|
||||||
|
DEFAULT := int64(u.ChunkSize)
|
||||||
|
retryCount := 0
|
||||||
|
maxRetries := 3
|
||||||
|
for finish < file.GetSize() {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
left := file.GetSize() - finish
|
||||||
|
byteSize := min(left, DEFAULT)
|
||||||
|
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||||
|
byteData := make([]byte, byteSize)
|
||||||
|
n, err := io.ReadFull(file, byteData)
|
||||||
|
utils.Log.Debug(err, n)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||||
|
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
req.ContentLength = byteSize
|
||||||
|
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||||
|
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||||
|
req.Header.Set("User-Agent", d.getUA())
|
||||||
|
err = func() error {
|
||||||
|
res, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode != 200 {
|
||||||
|
return errors.New(res.Status)
|
||||||
|
}
|
||||||
|
body, err := io.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var up Resp
|
||||||
|
err = json.Unmarshal(body, &up)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if up.Code != 0 {
|
||||||
|
return errors.New(up.Msg)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err == nil {
|
||||||
|
retryCount = 0
|
||||||
|
finish += byteSize
|
||||||
|
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||||
|
chunk++
|
||||||
|
} else {
|
||||||
|
retryCount++
|
||||||
|
if retryCount > maxRetries {
|
||||||
|
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
|
||||||
|
}
|
||||||
|
backoff := time.Duration(1<<retryCount) * time.Second
|
||||||
|
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
|
||||||
|
time.Sleep(backoff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||||
|
uploadUrl := u.UploadUrls[0]
|
||||||
|
var finish int64 = 0
|
||||||
|
DEFAULT := int64(u.ChunkSize)
|
||||||
|
retryCount := 0
|
||||||
|
maxRetries := 3
|
||||||
|
for finish < file.GetSize() {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
left := file.GetSize() - finish
|
||||||
|
byteSize := min(left, DEFAULT)
|
||||||
|
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||||
|
byteData := make([]byte, byteSize)
|
||||||
|
n, err := io.ReadFull(file, byteData)
|
||||||
|
utils.Log.Debug(err, n)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
req.ContentLength = byteSize
|
||||||
|
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||||
|
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize()))
|
||||||
|
req.Header.Set("User-Agent", d.getUA())
|
||||||
|
finish += byteSize
|
||||||
|
res, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||||
|
switch {
|
||||||
|
case res.StatusCode >= 500 && res.StatusCode <= 504:
|
||||||
|
retryCount++
|
||||||
|
if retryCount > maxRetries {
|
||||||
|
res.Body.Close()
|
||||||
|
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
|
||||||
|
}
|
||||||
|
backoff := time.Duration(1<<retryCount) * time.Second
|
||||||
|
utils.Log.Warnf("[CloudreveV4-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
|
||||||
|
time.Sleep(backoff)
|
||||||
|
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
|
||||||
|
data, _ := io.ReadAll(res.Body)
|
||||||
|
res.Body.Close()
|
||||||
|
return errors.New(string(data))
|
||||||
|
default:
|
||||||
|
res.Body.Close()
|
||||||
|
retryCount = 0
|
||||||
|
finish += byteSize
|
||||||
|
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// 上传成功发送回调请求
|
||||||
|
return d.request(http.MethodPost, "/callback/onedrive/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
|
||||||
|
req.SetBody("{}")
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||||
|
var finish int64 = 0
|
||||||
|
var chunk int = 0
|
||||||
|
var etags []string
|
||||||
|
DEFAULT := int64(u.ChunkSize)
|
||||||
|
retryCount := 0
|
||||||
|
maxRetries := 3
|
||||||
|
for finish < file.GetSize() {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
left := file.GetSize() - finish
|
||||||
|
byteSize := min(left, DEFAULT)
|
||||||
|
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
|
||||||
|
byteData := make([]byte, byteSize)
|
||||||
|
n, err := io.ReadFull(file, byteData)
|
||||||
|
utils.Log.Debug(err, n)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(http.MethodPut, u.UploadUrls[chunk],
|
||||||
|
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
req.ContentLength = byteSize
|
||||||
|
res, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
etag := res.Header.Get("ETag")
|
||||||
|
res.Body.Close()
|
||||||
|
switch {
|
||||||
|
case res.StatusCode != 200:
|
||||||
|
retryCount++
|
||||||
|
if retryCount > maxRetries {
|
||||||
|
return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries)
|
||||||
|
}
|
||||||
|
backoff := time.Duration(1<<retryCount) * time.Second
|
||||||
|
utils.Log.Warnf("server error %d, retrying after %v...", res.StatusCode, backoff)
|
||||||
|
time.Sleep(backoff)
|
||||||
|
case etag == "":
|
||||||
|
return errors.New("faild to get ETag from header")
|
||||||
|
default:
|
||||||
|
retryCount = 0
|
||||||
|
etags = append(etags, etag)
|
||||||
|
finish += byteSize
|
||||||
|
up(float64(finish) * 100 / float64(file.GetSize()))
|
||||||
|
chunk++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// s3LikeFinishUpload
|
||||||
|
bodyBuilder := &strings.Builder{}
|
||||||
|
bodyBuilder.WriteString("<CompleteMultipartUpload>")
|
||||||
|
for i, etag := range etags {
|
||||||
|
bodyBuilder.WriteString(fmt.Sprintf(
|
||||||
|
`<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>`,
|
||||||
|
i+1, // PartNumber 从 1 开始
|
||||||
|
etag,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
bodyBuilder.WriteString("</CompleteMultipartUpload>")
|
||||||
|
req, err := http.NewRequest(
|
||||||
|
"POST",
|
||||||
|
u.CompleteURL,
|
||||||
|
strings.NewReader(bodyBuilder.String()),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/xml")
|
||||||
|
req.Header.Set("User-Agent", d.getUA())
|
||||||
|
res, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(res.Body)
|
||||||
|
return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
// 上传成功发送回调请求
|
||||||
|
return d.request(http.MethodPost, "/callback/s3/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
|
||||||
|
req.SetBody("{}")
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
@@ -263,12 +263,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
}
|
}
|
||||||
rrc := remoteLink.RangeReadCloser
|
rrc := remoteLink.RangeReadCloser
|
||||||
if len(remoteLink.URL) > 0 {
|
if len(remoteLink.URL) > 0 {
|
||||||
|
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, remoteLink)
|
||||||
rangedRemoteLink := &model.Link{
|
|
||||||
URL: remoteLink.URL,
|
|
||||||
Header: remoteLink.Header,
|
|
||||||
}
|
|
||||||
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -287,8 +282,9 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// 可以直接返回,读取完也不会调用Close,直到连接断开Close
|
//keep reuse same MFile and close at last.
|
||||||
return remoteLink.MFile, nil
|
remoteClosers.Add(remoteLink.MFile)
|
||||||
|
return io.NopCloser(remoteLink.MFile), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errs.NotSupport
|
return nil, errs.NotSupport
|
||||||
@@ -304,7 +300,6 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
|
|
||||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
|
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
|
||||||
resultLink := &model.Link{
|
resultLink := &model.Link{
|
||||||
Header: remoteLink.Header,
|
|
||||||
RangeReadCloser: resultRangeReadCloser,
|
RangeReadCloser: resultRangeReadCloser,
|
||||||
Expiration: remoteLink.Expiration,
|
Expiration: remoteLink.Expiration,
|
||||||
}
|
}
|
||||||
|
|||||||
271
drivers/doubao/driver.go
Normal file
271
drivers/doubao/driver.go
Normal file
@@ -0,0 +1,271 @@
|
|||||||
|
package doubao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Doubao struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
*UploadToken
|
||||||
|
UserId string
|
||||||
|
uploadThread int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Init(ctx context.Context) error {
|
||||||
|
// TODO login / refresh token
|
||||||
|
//op.MustSaveDriverStorage(d)
|
||||||
|
uploadThread, err := strconv.Atoi(d.UploadThread)
|
||||||
|
if err != nil || uploadThread < 1 {
|
||||||
|
d.uploadThread, d.UploadThread = 3, "3" // Set default value
|
||||||
|
} else {
|
||||||
|
d.uploadThread = uploadThread
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.UserId == "" {
|
||||||
|
userInfo, err := d.getUserInfo()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.UserId = strconv.FormatInt(userInfo.UserID, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.UploadToken == nil {
|
||||||
|
uploadToken, err := d.initUploadToken()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.UploadToken = uploadToken
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
var files []model.Obj
|
||||||
|
fileList, err := d.getFiles(dir.GetID(), "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, child := range fileList {
|
||||||
|
files = append(files, &Object{
|
||||||
|
Object: model.Object{
|
||||||
|
ID: child.ID,
|
||||||
|
Path: child.ParentID,
|
||||||
|
Name: child.Name,
|
||||||
|
Size: child.Size,
|
||||||
|
Modified: time.Unix(child.UpdateTime, 0),
|
||||||
|
Ctime: time.Unix(child.CreateTime, 0),
|
||||||
|
IsFolder: child.NodeType == 1,
|
||||||
|
},
|
||||||
|
Key: child.Key,
|
||||||
|
NodeType: child.NodeType,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
var downloadUrl string
|
||||||
|
|
||||||
|
if u, ok := file.(*Object); ok {
|
||||||
|
switch d.DownloadApi {
|
||||||
|
case "get_download_info":
|
||||||
|
var r GetDownloadInfoResp
|
||||||
|
_, err := d.request("/samantha/aispace/get_download_info", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"requests": []base.Json{{"node_id": file.GetID()}},
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadUrl = r.Data.DownloadInfos[0].MainURL
|
||||||
|
case "get_file_url":
|
||||||
|
switch u.NodeType {
|
||||||
|
case VideoType, AudioType:
|
||||||
|
var r GetVideoFileUrlResp
|
||||||
|
_, err := d.request("/samantha/media/get_play_info", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"key": u.Key,
|
||||||
|
"node_id": file.GetID(),
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadUrl = r.Data.OriginalMediaInfo.MainURL
|
||||||
|
default:
|
||||||
|
var r GetFileUrlResp
|
||||||
|
_, err := d.request("/alice/message/get_file_url", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"uris": []string{u.Key},
|
||||||
|
"type": FileNodeType[u.NodeType],
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadUrl = r.Data.FileUrls[0].MainURL
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
// 生成标准的Content-Disposition
|
||||||
|
contentDisposition := generateContentDisposition(u.Name)
|
||||||
|
|
||||||
|
return &model.Link{
|
||||||
|
URL: downloadUrl,
|
||||||
|
Header: http.Header{
|
||||||
|
"User-Agent": []string{UserAgent},
|
||||||
|
"Content-Disposition": []string{contentDisposition},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("can't convert obj to URL")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
var r UploadNodeResp
|
||||||
|
_, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"node_list": []base.Json{
|
||||||
|
{
|
||||||
|
"local_id": uuid.New().String(),
|
||||||
|
"name": dirName,
|
||||||
|
"parent_id": parentDir.GetID(),
|
||||||
|
"node_type": 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
var r UploadNodeResp
|
||||||
|
_, err := d.request("/samantha/aispace/move_node", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"node_list": []base.Json{
|
||||||
|
{"id": srcObj.GetID()},
|
||||||
|
},
|
||||||
|
"current_parent_id": srcObj.GetPath(),
|
||||||
|
"target_parent_id": dstDir.GetID(),
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
var r BaseResp
|
||||||
|
_, err := d.request("/samantha/aispace/rename_node", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"node_id": srcObj.GetID(),
|
||||||
|
"node_name": newName,
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
// TODO copy obj, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
var r BaseResp
|
||||||
|
_, err := d.request("/samantha/aispace/delete_node", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
|
||||||
|
}, &r)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
// 根据MIME类型确定数据类型
|
||||||
|
mimetype := file.GetMimetype()
|
||||||
|
dataType := FileDataType
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case strings.HasPrefix(mimetype, "video/"):
|
||||||
|
dataType = VideoDataType
|
||||||
|
case strings.HasPrefix(mimetype, "audio/"):
|
||||||
|
dataType = VideoDataType // 音频与视频使用相同的处理方式
|
||||||
|
case strings.HasPrefix(mimetype, "image/"):
|
||||||
|
dataType = ImgDataType
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取上传配置
|
||||||
|
uploadConfig := UploadConfig{}
|
||||||
|
if err := d.getUploadConfig(&uploadConfig, dataType, file); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 根据文件大小选择上传方式
|
||||||
|
if file.GetSize() <= 1*utils.MB { // 小于1MB,使用普通模式上传
|
||||||
|
return d.Upload(&uploadConfig, dstDir, file, up, dataType)
|
||||||
|
}
|
||||||
|
// 大文件使用分片上传
|
||||||
|
return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||||
|
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||||
|
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||||
|
// return errs.NotImplement to use an internal archive tool
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
//func (d *Doubao) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
// return nil, errs.NotSupport
|
||||||
|
//}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*Doubao)(nil)
|
||||||
36
drivers/doubao/meta.go
Normal file
36
drivers/doubao/meta.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
package doubao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
// Usually one of two
|
||||||
|
// driver.RootPath
|
||||||
|
driver.RootID
|
||||||
|
// define other
|
||||||
|
Cookie string `json:"cookie" type:"text"`
|
||||||
|
UploadThread string `json:"upload_thread" default:"3"`
|
||||||
|
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "Doubao",
|
||||||
|
LocalSort: true,
|
||||||
|
OnlyLocal: false,
|
||||||
|
OnlyProxy: false,
|
||||||
|
NoCache: false,
|
||||||
|
NoUpload: false,
|
||||||
|
NeedMs: false,
|
||||||
|
DefaultRoot: "0",
|
||||||
|
CheckStatus: false,
|
||||||
|
Alert: "",
|
||||||
|
NoOverwriteUpload: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &Doubao{}
|
||||||
|
})
|
||||||
|
}
|
||||||
415
drivers/doubao/types.go
Normal file
415
drivers/doubao/types.go
Normal file
@@ -0,0 +1,415 @@
|
|||||||
|
package doubao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BaseResp struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Msg string `json:"msg"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodeInfoResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
NodeInfo File `json:"node_info"`
|
||||||
|
Children []File `json:"children"`
|
||||||
|
NextCursor string `json:"next_cursor"`
|
||||||
|
HasMore bool `json:"has_more"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type File struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Source int `json:"source"`
|
||||||
|
NameReviewStatus int `json:"name_review_status"`
|
||||||
|
ContentReviewStatus int `json:"content_review_status"`
|
||||||
|
RiskReviewStatus int `json:"risk_review_status"`
|
||||||
|
ConversationID string `json:"conversation_id"`
|
||||||
|
ParentID string `json:"parent_id"`
|
||||||
|
CreateTime int64 `json:"create_time"`
|
||||||
|
UpdateTime int64 `json:"update_time"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetDownloadInfoResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
DownloadInfos []struct {
|
||||||
|
NodeID string `json:"node_id"`
|
||||||
|
MainURL string `json:"main_url"`
|
||||||
|
BackupURL string `json:"backup_url"`
|
||||||
|
} `json:"download_infos"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetFileUrlResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
FileUrls []struct {
|
||||||
|
URI string `json:"uri"`
|
||||||
|
MainURL string `json:"main_url"`
|
||||||
|
BackURL string `json:"back_url"`
|
||||||
|
} `json:"file_urls"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetVideoFileUrlResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
MediaType string `json:"media_type"`
|
||||||
|
MediaInfo []struct {
|
||||||
|
Meta struct {
|
||||||
|
Height string `json:"height"`
|
||||||
|
Width string `json:"width"`
|
||||||
|
Format string `json:"format"`
|
||||||
|
Duration float64 `json:"duration"`
|
||||||
|
CodecType string `json:"codec_type"`
|
||||||
|
Definition string `json:"definition"`
|
||||||
|
} `json:"meta"`
|
||||||
|
MainURL string `json:"main_url"`
|
||||||
|
BackupURL string `json:"backup_url"`
|
||||||
|
} `json:"media_info"`
|
||||||
|
OriginalMediaInfo struct {
|
||||||
|
Meta struct {
|
||||||
|
Height string `json:"height"`
|
||||||
|
Width string `json:"width"`
|
||||||
|
Format string `json:"format"`
|
||||||
|
Duration float64 `json:"duration"`
|
||||||
|
CodecType string `json:"codec_type"`
|
||||||
|
Definition string `json:"definition"`
|
||||||
|
} `json:"meta"`
|
||||||
|
MainURL string `json:"main_url"`
|
||||||
|
BackupURL string `json:"backup_url"`
|
||||||
|
} `json:"original_media_info"`
|
||||||
|
PosterURL string `json:"poster_url"`
|
||||||
|
PlayableStatus int `json:"playable_status"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UploadNodeResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
NodeList []struct {
|
||||||
|
LocalID string `json:"local_id"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
ParentID string `json:"parent_id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹
|
||||||
|
} `json:"node_list"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Object struct {
|
||||||
|
model.Object
|
||||||
|
Key string
|
||||||
|
NodeType int
|
||||||
|
}
|
||||||
|
|
||||||
|
type UserInfoResp struct {
|
||||||
|
Data UserInfo `json:"data"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
type AppUserInfo struct {
|
||||||
|
BuiAuditInfo string `json:"bui_audit_info"`
|
||||||
|
}
|
||||||
|
type AuditInfo struct {
|
||||||
|
}
|
||||||
|
type Details struct {
|
||||||
|
}
|
||||||
|
type BuiAuditInfo struct {
|
||||||
|
AuditInfo AuditInfo `json:"audit_info"`
|
||||||
|
IsAuditing bool `json:"is_auditing"`
|
||||||
|
AuditStatus int `json:"audit_status"`
|
||||||
|
LastUpdateTime int `json:"last_update_time"`
|
||||||
|
UnpassReason string `json:"unpass_reason"`
|
||||||
|
Details Details `json:"details"`
|
||||||
|
}
|
||||||
|
type Connects struct {
|
||||||
|
Platform string `json:"platform"`
|
||||||
|
ProfileImageURL string `json:"profile_image_url"`
|
||||||
|
ExpiredTime int `json:"expired_time"`
|
||||||
|
ExpiresIn int `json:"expires_in"`
|
||||||
|
PlatformScreenName string `json:"platform_screen_name"`
|
||||||
|
UserID int64 `json:"user_id"`
|
||||||
|
PlatformUID string `json:"platform_uid"`
|
||||||
|
SecPlatformUID string `json:"sec_platform_uid"`
|
||||||
|
PlatformAppID int `json:"platform_app_id"`
|
||||||
|
ModifyTime int `json:"modify_time"`
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
OpenID string `json:"open_id"`
|
||||||
|
}
|
||||||
|
type OperStaffRelationInfo struct {
|
||||||
|
HasPassword int `json:"has_password"`
|
||||||
|
Mobile string `json:"mobile"`
|
||||||
|
SecOperStaffUserID string `json:"sec_oper_staff_user_id"`
|
||||||
|
RelationMobileCountryCode int `json:"relation_mobile_country_code"`
|
||||||
|
}
|
||||||
|
type UserInfo struct {
|
||||||
|
AppID int `json:"app_id"`
|
||||||
|
AppUserInfo AppUserInfo `json:"app_user_info"`
|
||||||
|
AvatarURL string `json:"avatar_url"`
|
||||||
|
BgImgURL string `json:"bg_img_url"`
|
||||||
|
BuiAuditInfo BuiAuditInfo `json:"bui_audit_info"`
|
||||||
|
CanBeFoundByPhone int `json:"can_be_found_by_phone"`
|
||||||
|
Connects []Connects `json:"connects"`
|
||||||
|
CountryCode int `json:"country_code"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
DeviceID int `json:"device_id"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
EmailCollected bool `json:"email_collected"`
|
||||||
|
Gender int `json:"gender"`
|
||||||
|
HasPassword int `json:"has_password"`
|
||||||
|
HmRegion int `json:"hm_region"`
|
||||||
|
IsBlocked int `json:"is_blocked"`
|
||||||
|
IsBlocking int `json:"is_blocking"`
|
||||||
|
IsRecommendAllowed int `json:"is_recommend_allowed"`
|
||||||
|
IsVisitorAccount bool `json:"is_visitor_account"`
|
||||||
|
Mobile string `json:"mobile"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
NeedCheckBindStatus bool `json:"need_check_bind_status"`
|
||||||
|
OdinUserType int `json:"odin_user_type"`
|
||||||
|
OperStaffRelationInfo OperStaffRelationInfo `json:"oper_staff_relation_info"`
|
||||||
|
PhoneCollected bool `json:"phone_collected"`
|
||||||
|
RecommendHintMessage string `json:"recommend_hint_message"`
|
||||||
|
ScreenName string `json:"screen_name"`
|
||||||
|
SecUserID string `json:"sec_user_id"`
|
||||||
|
SessionKey string `json:"session_key"`
|
||||||
|
UseHmRegion bool `json:"use_hm_region"`
|
||||||
|
UserCreateTime int `json:"user_create_time"`
|
||||||
|
UserID int64 `json:"user_id"`
|
||||||
|
UserIDStr string `json:"user_id_str"`
|
||||||
|
UserVerified bool `json:"user_verified"`
|
||||||
|
VerifiedContent string `json:"verified_content"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadToken 上传令牌配置
|
||||||
|
type UploadToken struct {
|
||||||
|
Alice map[string]UploadAuthToken
|
||||||
|
Samantha MediaUploadAuthToken
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadAuthToken 多种类型的上传配置:图片/文件
|
||||||
|
type UploadAuthToken struct {
|
||||||
|
ServiceID string `json:"service_id"`
|
||||||
|
UploadPathPrefix string `json:"upload_path_prefix"`
|
||||||
|
Auth struct {
|
||||||
|
AccessKeyID string `json:"access_key_id"`
|
||||||
|
SecretAccessKey string `json:"secret_access_key"`
|
||||||
|
SessionToken string `json:"session_token"`
|
||||||
|
ExpiredTime time.Time `json:"expired_time"`
|
||||||
|
CurrentTime time.Time `json:"current_time"`
|
||||||
|
} `json:"auth"`
|
||||||
|
UploadHost string `json:"upload_host"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaUploadAuthToken 媒体上传配置
|
||||||
|
type MediaUploadAuthToken struct {
|
||||||
|
StsToken struct {
|
||||||
|
AccessKeyID string `json:"access_key_id"`
|
||||||
|
SecretAccessKey string `json:"secret_access_key"`
|
||||||
|
SessionToken string `json:"session_token"`
|
||||||
|
ExpiredTime time.Time `json:"expired_time"`
|
||||||
|
CurrentTime time.Time `json:"current_time"`
|
||||||
|
} `json:"sts_token"`
|
||||||
|
UploadInfo struct {
|
||||||
|
VideoHost string `json:"video_host"`
|
||||||
|
SpaceName string `json:"space_name"`
|
||||||
|
} `json:"upload_info"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UploadAuthTokenResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data UploadAuthToken `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MediaUploadAuthTokenResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data MediaUploadAuthToken `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResponseMetadata struct {
|
||||||
|
RequestID string `json:"RequestId"`
|
||||||
|
Action string `json:"Action"`
|
||||||
|
Version string `json:"Version"`
|
||||||
|
Service string `json:"Service"`
|
||||||
|
Region string `json:"Region"`
|
||||||
|
Error struct {
|
||||||
|
CodeN int `json:"CodeN,omitempty"`
|
||||||
|
Code string `json:"Code,omitempty"`
|
||||||
|
Message string `json:"Message,omitempty"`
|
||||||
|
} `json:"Error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UploadConfig struct {
|
||||||
|
UploadAddress UploadAddress `json:"UploadAddress"`
|
||||||
|
FallbackUploadAddress FallbackUploadAddress `json:"FallbackUploadAddress"`
|
||||||
|
InnerUploadAddress InnerUploadAddress `json:"InnerUploadAddress"`
|
||||||
|
RequestID string `json:"RequestId"`
|
||||||
|
SDKParam interface{} `json:"SDKParam"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UploadConfigResp struct {
|
||||||
|
ResponseMetadata `json:"ResponseMetadata"`
|
||||||
|
Result UploadConfig `json:"Result"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoreInfo 存储信息
|
||||||
|
type StoreInfo struct {
|
||||||
|
StoreURI string `json:"StoreUri"`
|
||||||
|
Auth string `json:"Auth"`
|
||||||
|
UploadID string `json:"UploadID"`
|
||||||
|
UploadHeader map[string]interface{} `json:"UploadHeader,omitempty"`
|
||||||
|
StorageHeader map[string]interface{} `json:"StorageHeader,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadAddress 上传地址信息
|
||||||
|
type UploadAddress struct {
|
||||||
|
StoreInfos []StoreInfo `json:"StoreInfos"`
|
||||||
|
UploadHosts []string `json:"UploadHosts"`
|
||||||
|
UploadHeader map[string]interface{} `json:"UploadHeader"`
|
||||||
|
SessionKey string `json:"SessionKey"`
|
||||||
|
Cloud string `json:"Cloud"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FallbackUploadAddress 备用上传地址
|
||||||
|
type FallbackUploadAddress struct {
|
||||||
|
StoreInfos []StoreInfo `json:"StoreInfos"`
|
||||||
|
UploadHosts []string `json:"UploadHosts"`
|
||||||
|
UploadHeader map[string]interface{} `json:"UploadHeader"`
|
||||||
|
SessionKey string `json:"SessionKey"`
|
||||||
|
Cloud string `json:"Cloud"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadNode 上传节点信息
|
||||||
|
type UploadNode struct {
|
||||||
|
Vid string `json:"Vid"`
|
||||||
|
Vids []string `json:"Vids"`
|
||||||
|
StoreInfos []StoreInfo `json:"StoreInfos"`
|
||||||
|
UploadHost string `json:"UploadHost"`
|
||||||
|
UploadHeader map[string]interface{} `json:"UploadHeader"`
|
||||||
|
Type string `json:"Type"`
|
||||||
|
Protocol string `json:"Protocol"`
|
||||||
|
SessionKey string `json:"SessionKey"`
|
||||||
|
NodeConfig struct {
|
||||||
|
UploadMode string `json:"UploadMode"`
|
||||||
|
} `json:"NodeConfig"`
|
||||||
|
Cluster string `json:"Cluster"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AdvanceOption 高级选项
|
||||||
|
type AdvanceOption struct {
|
||||||
|
Parallel int `json:"Parallel"`
|
||||||
|
Stream int `json:"Stream"`
|
||||||
|
SliceSize int `json:"SliceSize"`
|
||||||
|
EncryptionKey string `json:"EncryptionKey"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InnerUploadAddress 内部上传地址
|
||||||
|
type InnerUploadAddress struct {
|
||||||
|
UploadNodes []UploadNode `json:"UploadNodes"`
|
||||||
|
AdvanceOption AdvanceOption `json:"AdvanceOption"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadPart 上传分片信息
|
||||||
|
type UploadPart struct {
|
||||||
|
UploadId string `json:"uploadid,omitempty"`
|
||||||
|
PartNumber string `json:"part_number,omitempty"`
|
||||||
|
Crc32 string `json:"crc32,omitempty"`
|
||||||
|
Etag string `json:"etag,omitempty"`
|
||||||
|
Mode string `json:"mode,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadResp 上传响应体
|
||||||
|
type UploadResp struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
ApiVersion string `json:"apiversion"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Data UploadPart `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type VideoCommitUpload struct {
|
||||||
|
Vid string `json:"Vid"`
|
||||||
|
VideoMeta struct {
|
||||||
|
URI string `json:"Uri"`
|
||||||
|
Height int `json:"Height"`
|
||||||
|
Width int `json:"Width"`
|
||||||
|
OriginHeight int `json:"OriginHeight"`
|
||||||
|
OriginWidth int `json:"OriginWidth"`
|
||||||
|
Duration float64 `json:"Duration"`
|
||||||
|
Bitrate int `json:"Bitrate"`
|
||||||
|
Md5 string `json:"Md5"`
|
||||||
|
Format string `json:"Format"`
|
||||||
|
Size int `json:"Size"`
|
||||||
|
FileType string `json:"FileType"`
|
||||||
|
Codec string `json:"Codec"`
|
||||||
|
} `json:"VideoMeta"`
|
||||||
|
WorkflowInput struct {
|
||||||
|
TemplateID string `json:"TemplateId"`
|
||||||
|
} `json:"WorkflowInput"`
|
||||||
|
GetPosterMode string `json:"GetPosterMode"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type VideoCommitUploadResp struct {
|
||||||
|
ResponseMetadata ResponseMetadata `json:"ResponseMetadata"`
|
||||||
|
Result struct {
|
||||||
|
RequestID string `json:"RequestId"`
|
||||||
|
Results []VideoCommitUpload `json:"Results"`
|
||||||
|
} `json:"Result"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommonResp struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Msg string `json:"msg,omitempty"`
|
||||||
|
Message string `json:"message,omitempty"` // 错误情况下的消息
|
||||||
|
Data json.RawMessage `json:"data,omitempty"` // 原始数据,稍后解析
|
||||||
|
Error *struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Locale string `json:"locale"`
|
||||||
|
} `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSuccess 判断响应是否成功
|
||||||
|
func (r *CommonResp) IsSuccess() bool {
|
||||||
|
return r.Code == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetError 获取错误信息
|
||||||
|
func (r *CommonResp) GetError() error {
|
||||||
|
if r.IsSuccess() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// 优先使用message字段
|
||||||
|
errMsg := r.Message
|
||||||
|
if errMsg == "" {
|
||||||
|
errMsg = r.Msg
|
||||||
|
}
|
||||||
|
// 如果error对象存在且有详细消息,则使用error中的信息
|
||||||
|
if r.Error != nil && r.Error.Message != "" {
|
||||||
|
errMsg = r.Error.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("[doubao] API error (code: %d): %s", r.Code, errMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalData 将data字段解析为指定类型
|
||||||
|
func (r *CommonResp) UnmarshalData(v interface{}) error {
|
||||||
|
if !r.IsSuccess() {
|
||||||
|
return r.GetError()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(r.Data) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Unmarshal(r.Data, v)
|
||||||
|
}
|
||||||
970
drivers/doubao/util.go
Normal file
970
drivers/doubao/util.go
Normal file
@@ -0,0 +1,970 @@
|
|||||||
|
package doubao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"hash/crc32"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DirectoryType = 1
|
||||||
|
FileType = 2
|
||||||
|
LinkType = 3
|
||||||
|
ImageType = 4
|
||||||
|
PagesType = 5
|
||||||
|
VideoType = 6
|
||||||
|
AudioType = 7
|
||||||
|
MeetingMinutesType = 8
|
||||||
|
)
|
||||||
|
|
||||||
|
var FileNodeType = map[int]string{
|
||||||
|
1: "directory",
|
||||||
|
2: "file",
|
||||||
|
3: "link",
|
||||||
|
4: "image",
|
||||||
|
5: "pages",
|
||||||
|
6: "video",
|
||||||
|
7: "audio",
|
||||||
|
8: "meeting_minutes",
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
BaseURL = "https://www.doubao.com"
|
||||||
|
FileDataType = "file"
|
||||||
|
ImgDataType = "image"
|
||||||
|
VideoDataType = "video"
|
||||||
|
DefaultChunkSize = int64(5 * 1024 * 1024) // 5MB
|
||||||
|
MaxRetryAttempts = 3 // 最大重试次数
|
||||||
|
UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
|
||||||
|
Region = "cn-north-1"
|
||||||
|
UploadTimeout = 3 * time.Minute
|
||||||
|
)
|
||||||
|
|
||||||
|
// do others that not defined in Driver interface
|
||||||
|
func (d *Doubao) request(path string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
|
reqUrl := BaseURL + path
|
||||||
|
req := base.RestyClient.R()
|
||||||
|
req.SetHeader("Cookie", d.Cookie)
|
||||||
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
var commonResp CommonResp
|
||||||
|
|
||||||
|
res, err := req.Execute(method, reqUrl)
|
||||||
|
log.Debugln(res.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
body := res.Body()
|
||||||
|
// 先解析为通用响应
|
||||||
|
if err = json.Unmarshal(body, &commonResp); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// 检查响应是否成功
|
||||||
|
if !commonResp.IsSuccess() {
|
||||||
|
return body, commonResp.GetError()
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp != nil {
|
||||||
|
if err = json.Unmarshal(body, resp); err != nil {
|
||||||
|
return body, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) getFiles(dirId, cursor string) (resp []File, err error) {
|
||||||
|
var r NodeInfoResp
|
||||||
|
|
||||||
|
var body = base.Json{
|
||||||
|
"node_id": dirId,
|
||||||
|
}
|
||||||
|
// 如果有游标,则设置游标和大小
|
||||||
|
if cursor != "" {
|
||||||
|
body["cursor"] = cursor
|
||||||
|
body["size"] = 50
|
||||||
|
} else {
|
||||||
|
body["need_full_path"] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = d.request("/samantha/aispace/node_info", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(body)
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Data.Children != nil {
|
||||||
|
resp = r.Data.Children
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Data.NextCursor != "-1" {
|
||||||
|
// 递归获取下一页
|
||||||
|
nextFiles, err := d.getFiles(dirId, r.Data.NextCursor)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp = append(r.Data.Children, nextFiles...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) getUserInfo() (UserInfo, error) {
|
||||||
|
var r UserInfoResp
|
||||||
|
|
||||||
|
_, err := d.request("/passport/account/info/v2/", http.MethodGet, nil, &r)
|
||||||
|
if err != nil {
|
||||||
|
return UserInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.Data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 签名请求
|
||||||
|
func (d *Doubao) signRequest(req *resty.Request, method, tokenType, uploadUrl string) error {
|
||||||
|
parsedUrl, err := url.Parse(uploadUrl)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid URL format: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var accessKeyId, secretAccessKey, sessionToken string
|
||||||
|
var serviceName string
|
||||||
|
|
||||||
|
if tokenType == VideoDataType {
|
||||||
|
accessKeyId = d.UploadToken.Samantha.StsToken.AccessKeyID
|
||||||
|
secretAccessKey = d.UploadToken.Samantha.StsToken.SecretAccessKey
|
||||||
|
sessionToken = d.UploadToken.Samantha.StsToken.SessionToken
|
||||||
|
serviceName = "vod"
|
||||||
|
} else {
|
||||||
|
accessKeyId = d.UploadToken.Alice[tokenType].Auth.AccessKeyID
|
||||||
|
secretAccessKey = d.UploadToken.Alice[tokenType].Auth.SecretAccessKey
|
||||||
|
sessionToken = d.UploadToken.Alice[tokenType].Auth.SessionToken
|
||||||
|
serviceName = "imagex"
|
||||||
|
}
|
||||||
|
|
||||||
|
// 当前时间,格式为 ISO8601
|
||||||
|
now := time.Now().UTC()
|
||||||
|
amzDate := now.Format("20060102T150405Z")
|
||||||
|
dateStamp := now.Format("20060102")
|
||||||
|
|
||||||
|
req.SetHeader("X-Amz-Date", amzDate)
|
||||||
|
|
||||||
|
if sessionToken != "" {
|
||||||
|
req.SetHeader("X-Amz-Security-Token", sessionToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 计算请求体的SHA256哈希
|
||||||
|
var bodyHash string
|
||||||
|
if req.Body != nil {
|
||||||
|
bodyBytes, ok := req.Body.([]byte)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("request body must be []byte")
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyHash = hashSHA256(string(bodyBytes))
|
||||||
|
req.SetHeader("X-Amz-Content-Sha256", bodyHash)
|
||||||
|
} else {
|
||||||
|
bodyHash = hashSHA256("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// 创建规范请求
|
||||||
|
canonicalURI := parsedUrl.Path
|
||||||
|
if canonicalURI == "" {
|
||||||
|
canonicalURI = "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
// 查询参数按照字母顺序排序
|
||||||
|
canonicalQueryString := getCanonicalQueryString(req.QueryParam)
|
||||||
|
// 规范请求头
|
||||||
|
canonicalHeaders, signedHeaders := getCanonicalHeadersFromMap(req.Header)
|
||||||
|
canonicalRequest := method + "\n" +
|
||||||
|
canonicalURI + "\n" +
|
||||||
|
canonicalQueryString + "\n" +
|
||||||
|
canonicalHeaders + "\n" +
|
||||||
|
signedHeaders + "\n" +
|
||||||
|
bodyHash
|
||||||
|
|
||||||
|
algorithm := "AWS4-HMAC-SHA256"
|
||||||
|
credentialScope := fmt.Sprintf("%s/%s/%s/aws4_request", dateStamp, Region, serviceName)
|
||||||
|
|
||||||
|
stringToSign := algorithm + "\n" +
|
||||||
|
amzDate + "\n" +
|
||||||
|
credentialScope + "\n" +
|
||||||
|
hashSHA256(canonicalRequest)
|
||||||
|
// 计算签名密钥
|
||||||
|
signingKey := getSigningKey(secretAccessKey, dateStamp, Region, serviceName)
|
||||||
|
// 计算签名
|
||||||
|
signature := hmacSHA256Hex(signingKey, stringToSign)
|
||||||
|
// 构建授权头
|
||||||
|
authorizationHeader := fmt.Sprintf(
|
||||||
|
"%s Credential=%s/%s, SignedHeaders=%s, Signature=%s",
|
||||||
|
algorithm,
|
||||||
|
accessKeyId,
|
||||||
|
credentialScope,
|
||||||
|
signedHeaders,
|
||||||
|
signature,
|
||||||
|
)
|
||||||
|
|
||||||
|
req.SetHeader("Authorization", authorizationHeader)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) requestApi(url, method, tokenType string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
|
req := base.RestyClient.R()
|
||||||
|
req.SetHeaders(map[string]string{
|
||||||
|
"user-agent": UserAgent,
|
||||||
|
})
|
||||||
|
|
||||||
|
if method == http.MethodPost {
|
||||||
|
req.SetHeader("Content-Type", "text/plain;charset=UTF-8")
|
||||||
|
}
|
||||||
|
|
||||||
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp != nil {
|
||||||
|
req.SetResult(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 使用自定义AWS SigV4签名
|
||||||
|
err := d.signRequest(req, method, tokenType, url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := req.Execute(method, url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.Body(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) initUploadToken() (*UploadToken, error) {
|
||||||
|
uploadToken := &UploadToken{
|
||||||
|
Alice: make(map[string]UploadAuthToken),
|
||||||
|
Samantha: MediaUploadAuthToken{},
|
||||||
|
}
|
||||||
|
|
||||||
|
fileAuthToken, err := d.getUploadAuthToken(FileDataType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
imgAuthToken, err := d.getUploadAuthToken(ImgDataType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mediaAuthToken, err := d.getSamantaUploadAuthToken()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadToken.Alice[FileDataType] = fileAuthToken
|
||||||
|
uploadToken.Alice[ImgDataType] = imgAuthToken
|
||||||
|
uploadToken.Samantha = mediaAuthToken
|
||||||
|
|
||||||
|
return uploadToken, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) getUploadAuthToken(dataType string) (ut UploadAuthToken, err error) {
|
||||||
|
var r UploadAuthTokenResp
|
||||||
|
_, err = d.request("/alice/upload/auth_token", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"scene": "bot_chat",
|
||||||
|
"data_type": dataType,
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
|
||||||
|
return r.Data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) getSamantaUploadAuthToken() (mt MediaUploadAuthToken, err error) {
|
||||||
|
var r MediaUploadAuthTokenResp
|
||||||
|
_, err = d.request("/samantha/media/get_upload_token", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{})
|
||||||
|
}, &r)
|
||||||
|
|
||||||
|
return r.Data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getUploadConfig 获取上传配置信息
|
||||||
|
func (d *Doubao) getUploadConfig(upConfig *UploadConfig, dataType string, file model.FileStreamer) error {
|
||||||
|
tokenType := dataType
|
||||||
|
// 配置参数函数
|
||||||
|
configureParams := func() (string, map[string]string) {
|
||||||
|
var uploadUrl string
|
||||||
|
var params map[string]string
|
||||||
|
// 根据数据类型设置不同的上传参数
|
||||||
|
switch dataType {
|
||||||
|
case VideoDataType:
|
||||||
|
// 音频/视频类型 - 使用uploadToken.Samantha的配置
|
||||||
|
uploadUrl = d.UploadToken.Samantha.UploadInfo.VideoHost
|
||||||
|
params = map[string]string{
|
||||||
|
"Action": "ApplyUploadInner",
|
||||||
|
"Version": "2020-11-19",
|
||||||
|
"SpaceName": d.UploadToken.Samantha.UploadInfo.SpaceName,
|
||||||
|
"FileType": "video",
|
||||||
|
"IsInner": "1",
|
||||||
|
"NeedFallback": "true",
|
||||||
|
"FileSize": strconv.FormatInt(file.GetSize(), 10),
|
||||||
|
"s": randomString(),
|
||||||
|
}
|
||||||
|
case ImgDataType, FileDataType:
|
||||||
|
// 图片或其他文件类型 - 使用uploadToken.Alice对应配置
|
||||||
|
uploadUrl = "https://" + d.UploadToken.Alice[dataType].UploadHost
|
||||||
|
params = map[string]string{
|
||||||
|
"Action": "ApplyImageUpload",
|
||||||
|
"Version": "2018-08-01",
|
||||||
|
"ServiceId": d.UploadToken.Alice[dataType].ServiceID,
|
||||||
|
"NeedFallback": "true",
|
||||||
|
"FileSize": strconv.FormatInt(file.GetSize(), 10),
|
||||||
|
"FileExtension": filepath.Ext(file.GetName()),
|
||||||
|
"s": randomString(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return uploadUrl, params
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取初始参数
|
||||||
|
uploadUrl, params := configureParams()
|
||||||
|
|
||||||
|
tokenRefreshed := false
|
||||||
|
var configResp UploadConfigResp
|
||||||
|
|
||||||
|
err := d._retryOperation("get upload_config", func() error {
|
||||||
|
configResp = UploadConfigResp{}
|
||||||
|
|
||||||
|
_, err := d.requestApi(uploadUrl, http.MethodGet, tokenType, func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(params)
|
||||||
|
}, &configResp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if configResp.ResponseMetadata.Error.Code == "" {
|
||||||
|
*upConfig = configResp.Result
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 100028 凭证过期
|
||||||
|
if configResp.ResponseMetadata.Error.CodeN == 100028 && !tokenRefreshed {
|
||||||
|
log.Debugln("[doubao] Upload token expired, re-fetching...")
|
||||||
|
newToken, err := d.initUploadToken()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to refresh token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.UploadToken = newToken
|
||||||
|
tokenRefreshed = true
|
||||||
|
uploadUrl, params = configureParams()
|
||||||
|
|
||||||
|
return retry.Error{errors.New("token refreshed, retry needed")}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("get upload_config failed: %s", configResp.ResponseMetadata.Error.Message)
|
||||||
|
})
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// uploadNode 上传 文件信息
|
||||||
|
func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file model.FileStreamer, dataType string) (UploadNodeResp, error) {
|
||||||
|
reqUuid := uuid.New().String()
|
||||||
|
var key string
|
||||||
|
var nodeType int
|
||||||
|
|
||||||
|
mimetype := file.GetMimetype()
|
||||||
|
switch dataType {
|
||||||
|
case VideoDataType:
|
||||||
|
key = uploadConfig.InnerUploadAddress.UploadNodes[0].Vid
|
||||||
|
if strings.HasPrefix(mimetype, "audio/") {
|
||||||
|
nodeType = AudioType // 音频类型
|
||||||
|
} else {
|
||||||
|
nodeType = VideoType // 视频类型
|
||||||
|
}
|
||||||
|
case ImgDataType:
|
||||||
|
key = uploadConfig.InnerUploadAddress.UploadNodes[0].StoreInfos[0].StoreURI
|
||||||
|
nodeType = ImageType // 图片类型
|
||||||
|
default: // FileDataType
|
||||||
|
key = uploadConfig.InnerUploadAddress.UploadNodes[0].StoreInfos[0].StoreURI
|
||||||
|
nodeType = FileType // 文件类型
|
||||||
|
}
|
||||||
|
|
||||||
|
var r UploadNodeResp
|
||||||
|
_, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"node_list": []base.Json{
|
||||||
|
{
|
||||||
|
"local_id": reqUuid,
|
||||||
|
"parent_id": dir.GetID(),
|
||||||
|
"name": file.GetName(),
|
||||||
|
"key": key,
|
||||||
|
"node_content": base.Json{},
|
||||||
|
"node_type": nodeType,
|
||||||
|
"size": file.GetSize(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"request_id": reqUuid,
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
|
||||||
|
return r, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload 普通上传实现
|
||||||
|
func (d *Doubao) Upload(config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
|
||||||
|
data, err := io.ReadAll(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 计算CRC32
|
||||||
|
crc32Hash := crc32.NewIEEE()
|
||||||
|
crc32Hash.Write(data)
|
||||||
|
crc32Value := hex.EncodeToString(crc32Hash.Sum(nil))
|
||||||
|
|
||||||
|
// 构建请求路径
|
||||||
|
uploadNode := config.InnerUploadAddress.UploadNodes[0]
|
||||||
|
storeInfo := uploadNode.StoreInfos[0]
|
||||||
|
uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI)
|
||||||
|
|
||||||
|
uploadResp := UploadResp{}
|
||||||
|
|
||||||
|
if _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
|
||||||
|
req.SetHeaders(map[string]string{
|
||||||
|
"Content-Type": "application/octet-stream",
|
||||||
|
"Content-Crc32": crc32Value,
|
||||||
|
"Content-Length": fmt.Sprintf("%d", len(data)),
|
||||||
|
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
|
||||||
|
})
|
||||||
|
|
||||||
|
req.SetBody(data)
|
||||||
|
}, &uploadResp); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if uploadResp.Code != 2000 {
|
||||||
|
return nil, fmt.Errorf("upload failed: %s", uploadResp.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
ID: uploadNodeResp.Data.NodeList[0].ID,
|
||||||
|
Name: uploadNodeResp.Data.NodeList[0].Name,
|
||||||
|
Size: file.GetSize(),
|
||||||
|
IsFolder: false,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadByMultipart 分片上传
|
||||||
|
func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fileSize int64, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
|
||||||
|
// 构建请求路径
|
||||||
|
uploadNode := config.InnerUploadAddress.UploadNodes[0]
|
||||||
|
storeInfo := uploadNode.StoreInfos[0]
|
||||||
|
uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI)
|
||||||
|
// 初始化分片上传
|
||||||
|
var uploadID string
|
||||||
|
err := d._retryOperation("Initialize multipart upload", func() error {
|
||||||
|
var err error
|
||||||
|
uploadID, err = d.initMultipartUpload(config, uploadUrl, storeInfo)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to initialize multipart upload: %w", err)
|
||||||
|
}
|
||||||
|
// 准备分片参数
|
||||||
|
chunkSize := DefaultChunkSize
|
||||||
|
if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 {
|
||||||
|
chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize)
|
||||||
|
}
|
||||||
|
totalParts := (fileSize + chunkSize - 1) / chunkSize
|
||||||
|
// 创建分片信息组
|
||||||
|
parts := make([]UploadPart, totalParts)
|
||||||
|
// 缓存文件
|
||||||
|
tempFile, err := file.CacheFullInTempFile()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to cache file: %w", err)
|
||||||
|
}
|
||||||
|
defer tempFile.Close()
|
||||||
|
up(10.0) // 更新进度
|
||||||
|
// 设置并行上传
|
||||||
|
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||||
|
retry.Attempts(1),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
|
||||||
|
var partsMutex sync.Mutex
|
||||||
|
// 并行上传所有分片
|
||||||
|
for partIndex := int64(0); partIndex < totalParts; partIndex++ {
|
||||||
|
if utils.IsCanceled(uploadCtx) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
partIndex := partIndex
|
||||||
|
partNumber := partIndex + 1 // 分片编号从1开始
|
||||||
|
|
||||||
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
// 计算此分片的大小和偏移
|
||||||
|
offset := partIndex * chunkSize
|
||||||
|
size := chunkSize
|
||||||
|
if partIndex == totalParts-1 {
|
||||||
|
size = fileSize - offset
|
||||||
|
}
|
||||||
|
|
||||||
|
limitedReader := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, size))
|
||||||
|
// 读取数据到内存
|
||||||
|
data, err := io.ReadAll(limitedReader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read part %d: %w", partNumber, err)
|
||||||
|
}
|
||||||
|
// 计算CRC32
|
||||||
|
crc32Value := calculateCRC32(data)
|
||||||
|
// 使用_retryOperation上传分片
|
||||||
|
var uploadPart UploadPart
|
||||||
|
if err = d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error {
|
||||||
|
var err error
|
||||||
|
uploadPart, err = d.uploadPart(config, uploadUrl, uploadID, partNumber, data, crc32Value)
|
||||||
|
return err
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("part %d upload failed: %w", partNumber, err)
|
||||||
|
}
|
||||||
|
// 记录成功上传的分片
|
||||||
|
partsMutex.Lock()
|
||||||
|
parts[partIndex] = UploadPart{
|
||||||
|
PartNumber: strconv.FormatInt(partNumber, 10),
|
||||||
|
Etag: uploadPart.Etag,
|
||||||
|
Crc32: crc32Value,
|
||||||
|
}
|
||||||
|
partsMutex.Unlock()
|
||||||
|
// 更新进度
|
||||||
|
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
|
||||||
|
up(math.Min(progress, 95.0))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = threadG.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// 完成上传-分片合并
|
||||||
|
if err = d._retryOperation("Complete multipart upload", func() error {
|
||||||
|
return d.completeMultipartUpload(config, uploadUrl, uploadID, parts)
|
||||||
|
}); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to complete multipart upload: %w", err)
|
||||||
|
}
|
||||||
|
// 提交上传
|
||||||
|
if err = d._retryOperation("Commit upload", func() error {
|
||||||
|
return d.commitMultipartUpload(config)
|
||||||
|
}); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to commit upload: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
up(98.0) // 更新到98%
|
||||||
|
// 上传节点信息
|
||||||
|
var uploadNodeResp UploadNodeResp
|
||||||
|
|
||||||
|
if err = d._retryOperation("Upload node", func() error {
|
||||||
|
var err error
|
||||||
|
uploadNodeResp, err = d.uploadNode(config, dstDir, file, dataType)
|
||||||
|
return err
|
||||||
|
}); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to upload node: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
up(100.0) // 完成上传
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
ID: uploadNodeResp.Data.NodeList[0].ID,
|
||||||
|
Name: uploadNodeResp.Data.NodeList[0].Name,
|
||||||
|
Size: file.GetSize(),
|
||||||
|
IsFolder: false,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 统一上传请求方法
|
||||||
|
func (d *Doubao) uploadRequest(uploadUrl string, method string, storeInfo StoreInfo, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
|
client := resty.New()
|
||||||
|
client.SetTransport(&http.Transport{
|
||||||
|
DisableKeepAlives: true, // 禁用连接复用
|
||||||
|
ForceAttemptHTTP2: false, // 强制使用HTTP/1.1
|
||||||
|
})
|
||||||
|
client.SetTimeout(UploadTimeout)
|
||||||
|
|
||||||
|
req := client.R()
|
||||||
|
req.SetHeaders(map[string]string{
|
||||||
|
"Host": strings.Split(uploadUrl, "/")[2],
|
||||||
|
"Referer": BaseURL + "/",
|
||||||
|
"Origin": BaseURL,
|
||||||
|
"User-Agent": UserAgent,
|
||||||
|
"X-Storage-U": d.UserId,
|
||||||
|
"Authorization": storeInfo.Auth,
|
||||||
|
})
|
||||||
|
|
||||||
|
if method == http.MethodPost {
|
||||||
|
req.SetHeader("Content-Type", "text/plain;charset=UTF-8")
|
||||||
|
}
|
||||||
|
|
||||||
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp != nil {
|
||||||
|
req.SetResult(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := req.Execute(method, uploadUrl)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, fmt.Errorf("upload request failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.Body(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 初始化分片上传
|
||||||
|
func (d *Doubao) initMultipartUpload(config *UploadConfig, uploadUrl string, storeInfo StoreInfo) (uploadId string, err error) {
|
||||||
|
uploadResp := UploadResp{}
|
||||||
|
|
||||||
|
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(map[string]string{
|
||||||
|
"uploadmode": "part",
|
||||||
|
"phase": "init",
|
||||||
|
})
|
||||||
|
}, &uploadResp)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return uploadId, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if uploadResp.Code != 2000 {
|
||||||
|
return uploadId, fmt.Errorf("init upload failed: %s", uploadResp.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
return uploadResp.Data.UploadId, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 分片上传实现
|
||||||
|
func (d *Doubao) uploadPart(config *UploadConfig, uploadUrl, uploadID string, partNumber int64, data []byte, crc32Value string) (resp UploadPart, err error) {
|
||||||
|
uploadResp := UploadResp{}
|
||||||
|
storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0]
|
||||||
|
|
||||||
|
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
|
||||||
|
req.SetHeaders(map[string]string{
|
||||||
|
"Content-Type": "application/octet-stream",
|
||||||
|
"Content-Crc32": crc32Value,
|
||||||
|
"Content-Length": fmt.Sprintf("%d", len(data)),
|
||||||
|
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
|
||||||
|
})
|
||||||
|
|
||||||
|
req.SetQueryParams(map[string]string{
|
||||||
|
"uploadid": uploadID,
|
||||||
|
"part_number": strconv.FormatInt(partNumber, 10),
|
||||||
|
"phase": "transfer",
|
||||||
|
})
|
||||||
|
|
||||||
|
req.SetBody(data)
|
||||||
|
req.SetContentLength(true)
|
||||||
|
}, &uploadResp)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if uploadResp.Code != 2000 {
|
||||||
|
return resp, fmt.Errorf("upload part failed: %s", uploadResp.Message)
|
||||||
|
} else if uploadResp.Data.Crc32 != crc32Value {
|
||||||
|
return resp, fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
|
||||||
|
}
|
||||||
|
|
||||||
|
return uploadResp.Data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 完成分片上传
|
||||||
|
func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error {
|
||||||
|
uploadResp := UploadResp{}
|
||||||
|
|
||||||
|
storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0]
|
||||||
|
|
||||||
|
body := _convertUploadParts(parts)
|
||||||
|
|
||||||
|
err := utils.Retry(MaxRetryAttempts, time.Second, func() (err error) {
|
||||||
|
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(map[string]string{
|
||||||
|
"uploadid": uploadID,
|
||||||
|
"phase": "finish",
|
||||||
|
"uploadmode": "part",
|
||||||
|
})
|
||||||
|
req.SetBody(body)
|
||||||
|
}, &uploadResp)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// 检查响应状态码 2000 成功 4024 分片合并中
|
||||||
|
if uploadResp.Code != 2000 && uploadResp.Code != 4024 {
|
||||||
|
return fmt.Errorf("finish upload failed: %s", uploadResp.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to complete multipart upload: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) commitMultipartUpload(uploadConfig *UploadConfig) error {
|
||||||
|
uploadUrl := d.UploadToken.Samantha.UploadInfo.VideoHost
|
||||||
|
params := map[string]string{
|
||||||
|
"Action": "CommitUploadInner",
|
||||||
|
"Version": "2020-11-19",
|
||||||
|
"SpaceName": d.UploadToken.Samantha.UploadInfo.SpaceName,
|
||||||
|
}
|
||||||
|
tokenType := VideoDataType
|
||||||
|
|
||||||
|
videoCommitUploadResp := VideoCommitUploadResp{}
|
||||||
|
|
||||||
|
jsonBytes, err := json.Marshal(base.Json{
|
||||||
|
"SessionKey": uploadConfig.InnerUploadAddress.UploadNodes[0].SessionKey,
|
||||||
|
"Functions": []base.Json{},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal request data: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = d.requestApi(uploadUrl, http.MethodPost, tokenType, func(req *resty.Request) {
|
||||||
|
req.SetHeader("Content-Type", "application/json")
|
||||||
|
req.SetQueryParams(params)
|
||||||
|
req.SetBody(jsonBytes)
|
||||||
|
|
||||||
|
}, &videoCommitUploadResp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 计算CRC32
|
||||||
|
func calculateCRC32(data []byte) string {
|
||||||
|
hash := crc32.NewIEEE()
|
||||||
|
hash.Write(data)
|
||||||
|
return hex.EncodeToString(hash.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// _retryOperation 操作重试
|
||||||
|
func (d *Doubao) _retryOperation(operation string, fn func() error) error {
|
||||||
|
return retry.Do(
|
||||||
|
fn,
|
||||||
|
retry.Attempts(MaxRetryAttempts),
|
||||||
|
retry.Delay(500*time.Millisecond),
|
||||||
|
retry.DelayType(retry.BackOffDelay),
|
||||||
|
retry.MaxJitter(200*time.Millisecond),
|
||||||
|
retry.OnRetry(func(n uint, err error) {
|
||||||
|
log.Debugf("[doubao] %s retry #%d: %v", operation, n+1, err)
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// _convertUploadParts 将分片信息转换为字符串
|
||||||
|
func _convertUploadParts(parts []UploadPart) string {
|
||||||
|
if len(parts) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var result strings.Builder
|
||||||
|
|
||||||
|
for i, part := range parts {
|
||||||
|
if i > 0 {
|
||||||
|
result.WriteString(",")
|
||||||
|
}
|
||||||
|
result.WriteString(fmt.Sprintf("%s:%s", part.PartNumber, part.Crc32))
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取规范查询字符串
|
||||||
|
func getCanonicalQueryString(query url.Values) string {
|
||||||
|
if len(query) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := make([]string, 0, len(query))
|
||||||
|
for k := range query {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
parts := make([]string, 0, len(keys))
|
||||||
|
for _, k := range keys {
|
||||||
|
values := query[k]
|
||||||
|
for _, v := range values {
|
||||||
|
parts = append(parts, urlEncode(k)+"="+urlEncode(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(parts, "&")
|
||||||
|
}
|
||||||
|
|
||||||
|
func urlEncode(s string) string {
|
||||||
|
s = url.QueryEscape(s)
|
||||||
|
s = strings.ReplaceAll(s, "+", "%20")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取规范头信息和已签名头列表
|
||||||
|
func getCanonicalHeadersFromMap(headers map[string][]string) (string, string) {
|
||||||
|
// 不可签名的头部列表
|
||||||
|
unsignableHeaders := map[string]bool{
|
||||||
|
"authorization": true,
|
||||||
|
"content-type": true,
|
||||||
|
"content-length": true,
|
||||||
|
"user-agent": true,
|
||||||
|
"presigned-expires": true,
|
||||||
|
"expect": true,
|
||||||
|
"x-amzn-trace-id": true,
|
||||||
|
}
|
||||||
|
headerValues := make(map[string]string)
|
||||||
|
var signedHeadersList []string
|
||||||
|
|
||||||
|
for k, v := range headers {
|
||||||
|
if len(v) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
lowerKey := strings.ToLower(k)
|
||||||
|
// 检查是否可签名
|
||||||
|
if strings.HasPrefix(lowerKey, "x-amz-") || !unsignableHeaders[lowerKey] {
|
||||||
|
value := strings.TrimSpace(v[0])
|
||||||
|
value = strings.Join(strings.Fields(value), " ")
|
||||||
|
headerValues[lowerKey] = value
|
||||||
|
signedHeadersList = append(signedHeadersList, lowerKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(signedHeadersList)
|
||||||
|
|
||||||
|
var canonicalHeadersStr strings.Builder
|
||||||
|
for _, key := range signedHeadersList {
|
||||||
|
canonicalHeadersStr.WriteString(key)
|
||||||
|
canonicalHeadersStr.WriteString(":")
|
||||||
|
canonicalHeadersStr.WriteString(headerValues[key])
|
||||||
|
canonicalHeadersStr.WriteString("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
signedHeaders := strings.Join(signedHeadersList, ";")
|
||||||
|
|
||||||
|
return canonicalHeadersStr.String(), signedHeaders
|
||||||
|
}
|
||||||
|
|
||||||
|
// 计算HMAC-SHA256
|
||||||
|
func hmacSHA256(key []byte, data string) []byte {
|
||||||
|
h := hmac.New(sha256.New, key)
|
||||||
|
h.Write([]byte(data))
|
||||||
|
return h.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 计算HMAC-SHA256并返回十六进制字符串
|
||||||
|
func hmacSHA256Hex(key []byte, data string) string {
|
||||||
|
return hex.EncodeToString(hmacSHA256(key, data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// 计算SHA256哈希并返回十六进制字符串
|
||||||
|
func hashSHA256(data string) string {
|
||||||
|
h := sha256.New()
|
||||||
|
h.Write([]byte(data))
|
||||||
|
return hex.EncodeToString(h.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取签名密钥
|
||||||
|
func getSigningKey(secretKey, dateStamp, region, service string) []byte {
|
||||||
|
kDate := hmacSHA256([]byte("AWS4"+secretKey), dateStamp)
|
||||||
|
kRegion := hmacSHA256(kDate, region)
|
||||||
|
kService := hmacSHA256(kRegion, service)
|
||||||
|
kSigning := hmacSHA256(kService, "aws4_request")
|
||||||
|
return kSigning
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateContentDisposition 生成符合RFC 5987标准的Content-Disposition头部
|
||||||
|
func generateContentDisposition(filename string) string {
|
||||||
|
// 按照RFC 2047进行编码,用于filename部分
|
||||||
|
encodedName := urlEncode(filename)
|
||||||
|
|
||||||
|
// 按照RFC 5987进行编码,用于filename*部分
|
||||||
|
encodedNameRFC5987 := encodeRFC5987(filename)
|
||||||
|
|
||||||
|
return fmt.Sprintf("attachment; filename=\"%s\"; filename*=utf-8''%s",
|
||||||
|
encodedName, encodedNameRFC5987)
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeRFC5987 按照RFC 5987规范编码字符串,适用于HTTP头部参数中的非ASCII字符
|
||||||
|
func encodeRFC5987(s string) string {
|
||||||
|
var buf strings.Builder
|
||||||
|
for _, r := range []byte(s) {
|
||||||
|
// 根据RFC 5987,只有字母、数字和部分特殊符号可以不编码
|
||||||
|
if (r >= 'a' && r <= 'z') ||
|
||||||
|
(r >= 'A' && r <= 'Z') ||
|
||||||
|
(r >= '0' && r <= '9') ||
|
||||||
|
r == '-' || r == '.' || r == '_' || r == '~' {
|
||||||
|
buf.WriteByte(r)
|
||||||
|
} else {
|
||||||
|
// 其他字符都需要百分号编码
|
||||||
|
fmt.Fprintf(&buf, "%%%02X", r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func randomString() string {
|
||||||
|
const charset = "0123456789abcdefghijklmnopqrstuvwxyz"
|
||||||
|
const length = 11 // 11位随机字符串
|
||||||
|
|
||||||
|
var sb strings.Builder
|
||||||
|
sb.Grow(length)
|
||||||
|
|
||||||
|
for i := 0; i < length; i++ {
|
||||||
|
sb.WriteByte(charset[rand.Intn(len(charset))])
|
||||||
|
}
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
177
drivers/doubao_share/driver.go
Normal file
177
drivers/doubao_share/driver.go
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
package doubao_share
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DoubaoShare struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
RootFiles []RootFileList
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) Init(ctx context.Context) error {
|
||||||
|
// 初始化 虚拟分享列表
|
||||||
|
if err := d.initShareList(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
// 检查是否为根目录
|
||||||
|
if dir.GetID() == "" && dir.GetPath() == "/" {
|
||||||
|
return d.listRootDirectory(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 非根目录,处理不同情况
|
||||||
|
if fo, ok := dir.(*FileObject); ok {
|
||||||
|
if fo.ShareID == "" {
|
||||||
|
// 虚拟目录,需要列出子目录
|
||||||
|
return d.listVirtualDirectoryContent(dir)
|
||||||
|
} else {
|
||||||
|
// 具有分享ID的目录,获取此分享下的文件
|
||||||
|
shareId, relativePath, err := d._findShareAndPath(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return d.getFilesInPath(ctx, shareId, dir.GetID(), relativePath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 使用通用方法
|
||||||
|
shareId, relativePath, err := d._findShareAndPath(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取指定路径下的文件
|
||||||
|
return d.getFilesInPath(ctx, shareId, dir.GetID(), relativePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
var downloadUrl string
|
||||||
|
|
||||||
|
if u, ok := file.(*FileObject); ok {
|
||||||
|
switch u.NodeType {
|
||||||
|
case VideoType, AudioType:
|
||||||
|
var r GetVideoFileUrlResp
|
||||||
|
_, err := d.request("/samantha/media/get_play_info", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"key": u.Key,
|
||||||
|
"share_id": u.ShareID,
|
||||||
|
"node_id": file.GetID(),
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadUrl = r.Data.OriginalMediaInfo.MainURL
|
||||||
|
default:
|
||||||
|
var r GetFileUrlResp
|
||||||
|
_, err := d.request("/alice/message/get_file_url", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"uris": []string{u.Key},
|
||||||
|
"type": FileNodeType[u.NodeType],
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadUrl = r.Data.FileUrls[0].MainURL
|
||||||
|
}
|
||||||
|
|
||||||
|
// 生成标准的Content-Disposition
|
||||||
|
contentDisposition := generateContentDisposition(u.Name)
|
||||||
|
|
||||||
|
return &model.Link{
|
||||||
|
URL: downloadUrl,
|
||||||
|
Header: http.Header{
|
||||||
|
"User-Agent": []string{UserAgent},
|
||||||
|
"Content-Disposition": []string{contentDisposition},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("can't convert obj to URL")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
|
// TODO create folder, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
// TODO move obj, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
|
// TODO rename obj, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
// TODO copy obj, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
// TODO remove obj, optional
|
||||||
|
return errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
// TODO upload file, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||||
|
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||||
|
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||||
|
// return errs.NotImplement to use an internal archive tool
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
//func (d *DoubaoShare) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
// return nil, errs.NotSupport
|
||||||
|
//}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*DoubaoShare)(nil)
|
||||||
32
drivers/doubao_share/meta.go
Normal file
32
drivers/doubao_share/meta.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
package doubao_share
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
driver.RootPath
|
||||||
|
Cookie string `json:"cookie" type:"text"`
|
||||||
|
ShareIds string `json:"share_ids" type:"text" required:"true"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "DoubaoShare",
|
||||||
|
LocalSort: true,
|
||||||
|
OnlyLocal: false,
|
||||||
|
OnlyProxy: false,
|
||||||
|
NoCache: false,
|
||||||
|
NoUpload: true,
|
||||||
|
NeedMs: false,
|
||||||
|
DefaultRoot: "/",
|
||||||
|
CheckStatus: false,
|
||||||
|
Alert: "",
|
||||||
|
NoOverwriteUpload: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &DoubaoShare{}
|
||||||
|
})
|
||||||
|
}
|
||||||
207
drivers/doubao_share/types.go
Normal file
207
drivers/doubao_share/types.go
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
package doubao_share
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BaseResp struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Msg string `json:"msg"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodeInfoData struct {
|
||||||
|
Share ShareInfo `json:"share,omitempty"`
|
||||||
|
Creator CreatorInfo `json:"creator,omitempty"`
|
||||||
|
NodeList []File `json:"node_list,omitempty"`
|
||||||
|
NodeInfo File `json:"node_info,omitempty"`
|
||||||
|
Children []File `json:"children,omitempty"`
|
||||||
|
Path FilePath `json:"path,omitempty"`
|
||||||
|
NextCursor string `json:"next_cursor,omitempty"`
|
||||||
|
HasMore bool `json:"has_more,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodeInfoResp struct {
|
||||||
|
BaseResp
|
||||||
|
NodeInfoData `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RootFileList struct {
|
||||||
|
ShareID string
|
||||||
|
VirtualPath string
|
||||||
|
NodeInfo NodeInfoData
|
||||||
|
Child *[]RootFileList
|
||||||
|
}
|
||||||
|
|
||||||
|
type File struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
NodeType int `json:"node_type"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Source int `json:"source"`
|
||||||
|
NameReviewStatus int `json:"name_review_status"`
|
||||||
|
ContentReviewStatus int `json:"content_review_status"`
|
||||||
|
RiskReviewStatus int `json:"risk_review_status"`
|
||||||
|
ConversationID string `json:"conversation_id"`
|
||||||
|
ParentID string `json:"parent_id"`
|
||||||
|
CreateTime int64 `json:"create_time"`
|
||||||
|
UpdateTime int64 `json:"update_time"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileObject struct {
|
||||||
|
model.Object
|
||||||
|
ShareID string
|
||||||
|
Key string
|
||||||
|
NodeID string
|
||||||
|
NodeType int
|
||||||
|
}
|
||||||
|
|
||||||
|
type ShareInfo struct {
|
||||||
|
ShareID string `json:"share_id"`
|
||||||
|
FirstNode struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
NodeType int `json:"node_type"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
Source int `json:"source"`
|
||||||
|
Content struct {
|
||||||
|
LinkFileType string `json:"link_file_type"`
|
||||||
|
ImageWidth int `json:"image_width"`
|
||||||
|
ImageHeight int `json:"image_height"`
|
||||||
|
AiSkillStatus int `json:"ai_skill_status"`
|
||||||
|
} `json:"content"`
|
||||||
|
NameReviewStatus int `json:"name_review_status"`
|
||||||
|
ContentReviewStatus int `json:"content_review_status"`
|
||||||
|
RiskReviewStatus int `json:"risk_review_status"`
|
||||||
|
ConversationID string `json:"conversation_id"`
|
||||||
|
ParentID string `json:"parent_id"`
|
||||||
|
CreateTime int `json:"create_time"`
|
||||||
|
UpdateTime int `json:"update_time"`
|
||||||
|
} `json:"first_node"`
|
||||||
|
NodeCount int `json:"node_count"`
|
||||||
|
CreateTime int `json:"create_time"`
|
||||||
|
Channel string `json:"channel"`
|
||||||
|
InfluencerType int `json:"influencer_type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreatorInfo struct {
|
||||||
|
EntityID string `json:"entity_id"`
|
||||||
|
UserName string `json:"user_name"`
|
||||||
|
NickName string `json:"nick_name"`
|
||||||
|
Avatar struct {
|
||||||
|
OriginURL string `json:"origin_url"`
|
||||||
|
TinyURL string `json:"tiny_url"`
|
||||||
|
URI string `json:"uri"`
|
||||||
|
} `json:"avatar"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilePath []struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
NodeType int `json:"node_type"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
Source int `json:"source"`
|
||||||
|
NameReviewStatus int `json:"name_review_status"`
|
||||||
|
ContentReviewStatus int `json:"content_review_status"`
|
||||||
|
RiskReviewStatus int `json:"risk_review_status"`
|
||||||
|
ConversationID string `json:"conversation_id"`
|
||||||
|
ParentID string `json:"parent_id"`
|
||||||
|
CreateTime int `json:"create_time"`
|
||||||
|
UpdateTime int `json:"update_time"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetFileUrlResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
FileUrls []struct {
|
||||||
|
URI string `json:"uri"`
|
||||||
|
MainURL string `json:"main_url"`
|
||||||
|
BackURL string `json:"back_url"`
|
||||||
|
} `json:"file_urls"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetVideoFileUrlResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
MediaType string `json:"media_type"`
|
||||||
|
MediaInfo []struct {
|
||||||
|
Meta struct {
|
||||||
|
Height string `json:"height"`
|
||||||
|
Width string `json:"width"`
|
||||||
|
Format string `json:"format"`
|
||||||
|
Duration float64 `json:"duration"`
|
||||||
|
CodecType string `json:"codec_type"`
|
||||||
|
Definition string `json:"definition"`
|
||||||
|
} `json:"meta"`
|
||||||
|
MainURL string `json:"main_url"`
|
||||||
|
BackupURL string `json:"backup_url"`
|
||||||
|
} `json:"media_info"`
|
||||||
|
OriginalMediaInfo struct {
|
||||||
|
Meta struct {
|
||||||
|
Height string `json:"height"`
|
||||||
|
Width string `json:"width"`
|
||||||
|
Format string `json:"format"`
|
||||||
|
Duration float64 `json:"duration"`
|
||||||
|
CodecType string `json:"codec_type"`
|
||||||
|
Definition string `json:"definition"`
|
||||||
|
} `json:"meta"`
|
||||||
|
MainURL string `json:"main_url"`
|
||||||
|
BackupURL string `json:"backup_url"`
|
||||||
|
} `json:"original_media_info"`
|
||||||
|
PosterURL string `json:"poster_url"`
|
||||||
|
PlayableStatus int `json:"playable_status"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommonResp struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Msg string `json:"msg,omitempty"`
|
||||||
|
Message string `json:"message,omitempty"` // 错误情况下的消息
|
||||||
|
Data json.RawMessage `json:"data,omitempty"` // 原始数据,稍后解析
|
||||||
|
Error *struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Locale string `json:"locale"`
|
||||||
|
} `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSuccess 判断响应是否成功
|
||||||
|
func (r *CommonResp) IsSuccess() bool {
|
||||||
|
return r.Code == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetError 获取错误信息
|
||||||
|
func (r *CommonResp) GetError() error {
|
||||||
|
if r.IsSuccess() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// 优先使用message字段
|
||||||
|
errMsg := r.Message
|
||||||
|
if errMsg == "" {
|
||||||
|
errMsg = r.Msg
|
||||||
|
}
|
||||||
|
// 如果error对象存在且有详细消息,则使用error中的信息
|
||||||
|
if r.Error != nil && r.Error.Message != "" {
|
||||||
|
errMsg = r.Error.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("[doubao] API error (code: %d): %s", r.Code, errMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalData 将data字段解析为指定类型
|
||||||
|
func (r *CommonResp) UnmarshalData(v interface{}) error {
|
||||||
|
if !r.IsSuccess() {
|
||||||
|
return r.GetError()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(r.Data) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Unmarshal(r.Data, v)
|
||||||
|
}
|
||||||
744
drivers/doubao_share/util.go
Normal file
744
drivers/doubao_share/util.go
Normal file
@@ -0,0 +1,744 @@
|
|||||||
|
package doubao_share
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DirectoryType = 1
|
||||||
|
FileType = 2
|
||||||
|
LinkType = 3
|
||||||
|
ImageType = 4
|
||||||
|
PagesType = 5
|
||||||
|
VideoType = 6
|
||||||
|
AudioType = 7
|
||||||
|
MeetingMinutesType = 8
|
||||||
|
)
|
||||||
|
|
||||||
|
var FileNodeType = map[int]string{
|
||||||
|
1: "directory",
|
||||||
|
2: "file",
|
||||||
|
3: "link",
|
||||||
|
4: "image",
|
||||||
|
5: "pages",
|
||||||
|
6: "video",
|
||||||
|
7: "audio",
|
||||||
|
8: "meeting_minutes",
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
BaseURL = "https://www.doubao.com"
|
||||||
|
FileDataType = "file"
|
||||||
|
ImgDataType = "image"
|
||||||
|
VideoDataType = "video"
|
||||||
|
UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *DoubaoShare) request(path string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
|
reqUrl := BaseURL + path
|
||||||
|
req := base.RestyClient.R()
|
||||||
|
|
||||||
|
req.SetHeaders(map[string]string{
|
||||||
|
"Cookie": d.Cookie,
|
||||||
|
"User-Agent": UserAgent,
|
||||||
|
})
|
||||||
|
|
||||||
|
req.SetQueryParams(map[string]string{
|
||||||
|
"version_code": "20800",
|
||||||
|
"device_platform": "web",
|
||||||
|
})
|
||||||
|
|
||||||
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
var commonResp CommonResp
|
||||||
|
|
||||||
|
res, err := req.Execute(method, reqUrl)
|
||||||
|
log.Debugln(res.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
body := res.Body()
|
||||||
|
// 先解析为通用响应
|
||||||
|
if err = json.Unmarshal(body, &commonResp); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// 检查响应是否成功
|
||||||
|
if !commonResp.IsSuccess() {
|
||||||
|
return body, commonResp.GetError()
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp != nil {
|
||||||
|
if err = json.Unmarshal(body, resp); err != nil {
|
||||||
|
return body, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) getFiles(dirId, nodeId, cursor string) (resp []File, err error) {
|
||||||
|
var r NodeInfoResp
|
||||||
|
|
||||||
|
var body = base.Json{
|
||||||
|
"share_id": dirId,
|
||||||
|
"node_id": nodeId,
|
||||||
|
}
|
||||||
|
// 如果有游标,则设置游标和大小
|
||||||
|
if cursor != "" {
|
||||||
|
body["cursor"] = cursor
|
||||||
|
body["size"] = 50
|
||||||
|
} else {
|
||||||
|
body["need_full_path"] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = d.request("/samantha/aispace/share/node_info", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(body)
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.NodeInfoData.Children != nil {
|
||||||
|
resp = r.NodeInfoData.Children
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.NodeInfoData.NextCursor != "-1" {
|
||||||
|
// 递归获取下一页
|
||||||
|
nextFiles, err := d.getFiles(dirId, nodeId, r.NodeInfoData.NextCursor)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp = append(r.NodeInfoData.Children, nextFiles...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) getShareOverview(shareId, cursor string) (resp []File, err error) {
|
||||||
|
return d.getShareOverviewWithHistory(shareId, cursor, make(map[string]bool))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) getShareOverviewWithHistory(shareId, cursor string, cursorHistory map[string]bool) (resp []File, err error) {
|
||||||
|
var r NodeInfoResp
|
||||||
|
|
||||||
|
var body = base.Json{
|
||||||
|
"share_id": shareId,
|
||||||
|
}
|
||||||
|
// 如果有游标,则设置游标和大小
|
||||||
|
if cursor != "" {
|
||||||
|
body["cursor"] = cursor
|
||||||
|
body["size"] = 50
|
||||||
|
} else {
|
||||||
|
body["need_full_path"] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = d.request("/samantha/aispace/share/overview", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(body)
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.NodeInfoData.NodeList != nil {
|
||||||
|
resp = r.NodeInfoData.NodeList
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.NodeInfoData.NextCursor != "-1" {
|
||||||
|
// 检查游标是否重复出现,防止无限循环
|
||||||
|
if cursorHistory[r.NodeInfoData.NextCursor] {
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 记录当前游标
|
||||||
|
cursorHistory[r.NodeInfoData.NextCursor] = true
|
||||||
|
|
||||||
|
// 递归获取下一页
|
||||||
|
nextFiles, err := d.getShareOverviewWithHistory(shareId, r.NodeInfoData.NextCursor, cursorHistory)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp = append(resp, nextFiles...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DoubaoShare) initShareList() error {
|
||||||
|
if d.Addition.ShareIds == "" {
|
||||||
|
return fmt.Errorf("share_ids is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
// 解析分享配置
|
||||||
|
shareConfigs, rootShares, err := d._parseShareConfigs()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 检查路径冲突
|
||||||
|
if err := d._detectPathConflicts(shareConfigs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 构建树形结构
|
||||||
|
rootMap := d._buildTreeStructure(shareConfigs, rootShares)
|
||||||
|
|
||||||
|
// 提取顶级节点
|
||||||
|
topLevelNodes := d._extractTopLevelNodes(rootMap, rootShares)
|
||||||
|
if len(topLevelNodes) == 0 {
|
||||||
|
return fmt.Errorf("no valid share_ids found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// 存储结果
|
||||||
|
d.RootFiles = topLevelNodes
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 从配置中解析分享ID和路径
|
||||||
|
func (d *DoubaoShare) _parseShareConfigs() (map[string]string, []string, error) {
|
||||||
|
shareConfigs := make(map[string]string) // 路径 -> 分享ID
|
||||||
|
rootShares := make([]string, 0) // 根目录显示的分享ID
|
||||||
|
|
||||||
|
lines := strings.Split(strings.TrimSpace(d.Addition.ShareIds), "\n")
|
||||||
|
if len(lines) == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("no share_ids found")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
line = strings.TrimSpace(line)
|
||||||
|
if line == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// 解析分享ID和路径
|
||||||
|
parts := strings.Split(line, "|")
|
||||||
|
var shareId, sharePath string
|
||||||
|
|
||||||
|
if len(parts) == 1 {
|
||||||
|
// 无路径分享,直接在根目录显示
|
||||||
|
shareId = _extractShareId(parts[0])
|
||||||
|
if shareId != "" {
|
||||||
|
rootShares = append(rootShares, shareId)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else if len(parts) >= 2 {
|
||||||
|
shareId = _extractShareId(parts[0])
|
||||||
|
sharePath = strings.Trim(parts[1], "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
if shareId == "" {
|
||||||
|
log.Warnf("[doubao_share] Invalid Share_id Format: %s", line)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// 空路径也加入根目录显示
|
||||||
|
if sharePath == "" {
|
||||||
|
rootShares = append(rootShares, shareId)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// 添加到路径映射
|
||||||
|
shareConfigs[sharePath] = shareId
|
||||||
|
}
|
||||||
|
|
||||||
|
return shareConfigs, rootShares, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 检测路径冲突
|
||||||
|
func (d *DoubaoShare) _detectPathConflicts(shareConfigs map[string]string) error {
|
||||||
|
// 检查直接路径冲突
|
||||||
|
pathToShareIds := make(map[string][]string)
|
||||||
|
for sharePath, id := range shareConfigs {
|
||||||
|
pathToShareIds[sharePath] = append(pathToShareIds[sharePath], id)
|
||||||
|
}
|
||||||
|
|
||||||
|
for sharePath, ids := range pathToShareIds {
|
||||||
|
if len(ids) > 1 {
|
||||||
|
return fmt.Errorf("路径冲突: 路径 '%s' 被多个不同的分享ID使用: %s",
|
||||||
|
sharePath, strings.Join(ids, ", "))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 检查层次冲突
|
||||||
|
for path1, id1 := range shareConfigs {
|
||||||
|
for path2, id2 := range shareConfigs {
|
||||||
|
if path1 == path2 || id1 == id2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// 检查前缀冲突
|
||||||
|
if strings.HasPrefix(path2, path1+"/") || strings.HasPrefix(path1, path2+"/") {
|
||||||
|
return fmt.Errorf("路径冲突: 路径 '%s' (ID: %s) 与路径 '%s' (ID: %s) 存在层次冲突",
|
||||||
|
path1, id1, path2, id2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 构建树形结构
|
||||||
|
func (d *DoubaoShare) _buildTreeStructure(shareConfigs map[string]string, rootShares []string) map[string]*RootFileList {
|
||||||
|
rootMap := make(map[string]*RootFileList)
|
||||||
|
|
||||||
|
// 添加所有分享节点
|
||||||
|
for sharePath, shareId := range shareConfigs {
|
||||||
|
children := make([]RootFileList, 0)
|
||||||
|
rootMap[sharePath] = &RootFileList{
|
||||||
|
ShareID: shareId,
|
||||||
|
VirtualPath: sharePath,
|
||||||
|
NodeInfo: NodeInfoData{},
|
||||||
|
Child: &children,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 构建父子关系
|
||||||
|
for sharePath, node := range rootMap {
|
||||||
|
if sharePath == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pathParts := strings.Split(sharePath, "/")
|
||||||
|
if len(pathParts) > 1 {
|
||||||
|
parentPath := strings.Join(pathParts[:len(pathParts)-1], "/")
|
||||||
|
|
||||||
|
// 确保所有父级路径都已创建
|
||||||
|
_ensurePathExists(rootMap, parentPath)
|
||||||
|
|
||||||
|
// 添加当前节点到父节点
|
||||||
|
if parent, exists := rootMap[parentPath]; exists {
|
||||||
|
*parent.Child = append(*parent.Child, *node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rootMap
|
||||||
|
}
|
||||||
|
|
||||||
|
// 提取顶级节点
|
||||||
|
func (d *DoubaoShare) _extractTopLevelNodes(rootMap map[string]*RootFileList, rootShares []string) []RootFileList {
|
||||||
|
var topLevelNodes []RootFileList
|
||||||
|
|
||||||
|
// 添加根目录分享
|
||||||
|
for _, shareId := range rootShares {
|
||||||
|
children := make([]RootFileList, 0)
|
||||||
|
topLevelNodes = append(topLevelNodes, RootFileList{
|
||||||
|
ShareID: shareId,
|
||||||
|
VirtualPath: "",
|
||||||
|
NodeInfo: NodeInfoData{},
|
||||||
|
Child: &children,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// 添加顶级目录
|
||||||
|
for rootPath, node := range rootMap {
|
||||||
|
if rootPath == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
isTopLevel := true
|
||||||
|
pathParts := strings.Split(rootPath, "/")
|
||||||
|
|
||||||
|
if len(pathParts) > 1 {
|
||||||
|
parentPath := strings.Join(pathParts[:len(pathParts)-1], "/")
|
||||||
|
if _, exists := rootMap[parentPath]; exists {
|
||||||
|
isTopLevel = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if isTopLevel {
|
||||||
|
topLevelNodes = append(topLevelNodes, *node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return topLevelNodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// 确保路径存在,创建所有必要的中间节点
|
||||||
|
func _ensurePathExists(rootMap map[string]*RootFileList, path string) {
|
||||||
|
if path == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// 如果路径已存在,不需要再处理
|
||||||
|
if _, exists := rootMap[path]; exists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// 创建当前路径节点
|
||||||
|
children := make([]RootFileList, 0)
|
||||||
|
rootMap[path] = &RootFileList{
|
||||||
|
ShareID: "",
|
||||||
|
VirtualPath: path,
|
||||||
|
NodeInfo: NodeInfoData{},
|
||||||
|
Child: &children,
|
||||||
|
}
|
||||||
|
|
||||||
|
// 处理父路径
|
||||||
|
pathParts := strings.Split(path, "/")
|
||||||
|
if len(pathParts) > 1 {
|
||||||
|
parentPath := strings.Join(pathParts[:len(pathParts)-1], "/")
|
||||||
|
|
||||||
|
// 确保父路径存在
|
||||||
|
_ensurePathExists(rootMap, parentPath)
|
||||||
|
|
||||||
|
// 将当前节点添加为父节点的子节点
|
||||||
|
if parent, exists := rootMap[parentPath]; exists {
|
||||||
|
*parent.Child = append(*parent.Child, *rootMap[path])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// _extractShareId 从URL或直接ID中提取分享ID
|
||||||
|
func _extractShareId(input string) string {
|
||||||
|
input = strings.TrimSpace(input)
|
||||||
|
if strings.HasPrefix(input, "http") {
|
||||||
|
regex := regexp.MustCompile(`/drive/s/([a-zA-Z0-9]+)`)
|
||||||
|
if matches := regex.FindStringSubmatch(input); len(matches) > 1 {
|
||||||
|
return matches[1]
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return input // 直接返回ID
|
||||||
|
}
|
||||||
|
|
||||||
|
// _findRootFileByShareID 查找指定ShareID的配置
|
||||||
|
func _findRootFileByShareID(rootFiles []RootFileList, shareID string) *RootFileList {
|
||||||
|
for i, rf := range rootFiles {
|
||||||
|
if rf.ShareID == shareID {
|
||||||
|
return &rootFiles[i]
|
||||||
|
}
|
||||||
|
if rf.Child != nil && len(*rf.Child) > 0 {
|
||||||
|
if found := _findRootFileByShareID(*rf.Child, shareID); found != nil {
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// _findNodeByPath 查找指定路径的节点
|
||||||
|
func _findNodeByPath(rootFiles []RootFileList, path string) *RootFileList {
|
||||||
|
for i, rf := range rootFiles {
|
||||||
|
if rf.VirtualPath == path {
|
||||||
|
return &rootFiles[i]
|
||||||
|
}
|
||||||
|
if rf.Child != nil && len(*rf.Child) > 0 {
|
||||||
|
if found := _findNodeByPath(*rf.Child, path); found != nil {
|
||||||
|
return found
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// _findShareByPath 根据路径查找分享和相对路径
|
||||||
|
func _findShareByPath(rootFiles []RootFileList, path string) (*RootFileList, string) {
|
||||||
|
// 完全匹配或子路径匹配
|
||||||
|
for i, rf := range rootFiles {
|
||||||
|
if rf.VirtualPath == path {
|
||||||
|
return &rootFiles[i], ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf.VirtualPath != "" && strings.HasPrefix(path, rf.VirtualPath+"/") {
|
||||||
|
relPath := strings.TrimPrefix(path, rf.VirtualPath+"/")
|
||||||
|
|
||||||
|
// 先检查子节点
|
||||||
|
if rf.Child != nil && len(*rf.Child) > 0 {
|
||||||
|
if child, childPath := _findShareByPath(*rf.Child, path); child != nil {
|
||||||
|
return child, childPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &rootFiles[i], relPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// 递归检查子节点
|
||||||
|
if rf.Child != nil && len(*rf.Child) > 0 {
|
||||||
|
if child, childPath := _findShareByPath(*rf.Child, path); child != nil {
|
||||||
|
return child, childPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 检查根目录分享
|
||||||
|
for i, rf := range rootFiles {
|
||||||
|
if rf.VirtualPath == "" && rf.ShareID != "" {
|
||||||
|
parts := strings.SplitN(path, "/", 2)
|
||||||
|
if len(parts) > 0 && parts[0] == rf.ShareID {
|
||||||
|
if len(parts) > 1 {
|
||||||
|
return &rootFiles[i], parts[1]
|
||||||
|
}
|
||||||
|
return &rootFiles[i], ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// _findShareAndPath 根据给定路径查找对应的ShareID和相对路径
|
||||||
|
func (d *DoubaoShare) _findShareAndPath(dir model.Obj) (string, string, error) {
|
||||||
|
dirPath := dir.GetPath()
|
||||||
|
|
||||||
|
// 如果是根目录,返回空值表示需要列出所有分享
|
||||||
|
if dirPath == "/" || dirPath == "" {
|
||||||
|
return "", "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 检查是否是 FileObject 类型,并获取 ShareID
|
||||||
|
if fo, ok := dir.(*FileObject); ok && fo.ShareID != "" {
|
||||||
|
// 直接使用对象中存储的 ShareID
|
||||||
|
// 计算相对路径(移除前导斜杠)
|
||||||
|
relativePath := strings.TrimPrefix(dirPath, "/")
|
||||||
|
|
||||||
|
// 递归查找对应的 RootFile
|
||||||
|
found := _findRootFileByShareID(d.RootFiles, fo.ShareID)
|
||||||
|
if found != nil {
|
||||||
|
if found.VirtualPath != "" {
|
||||||
|
// 如果此分享配置了路径前缀,需要考虑相对路径的计算
|
||||||
|
if strings.HasPrefix(relativePath, found.VirtualPath) {
|
||||||
|
return fo.ShareID, strings.TrimPrefix(relativePath, found.VirtualPath+"/"), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fo.ShareID, relativePath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 如果找不到对应的 RootFile 配置,仍然使用对象中的 ShareID
|
||||||
|
return fo.ShareID, relativePath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 移除开头的斜杠
|
||||||
|
cleanPath := strings.TrimPrefix(dirPath, "/")
|
||||||
|
|
||||||
|
// 先检查是否有直接匹配的根目录分享
|
||||||
|
for _, rootFile := range d.RootFiles {
|
||||||
|
if rootFile.VirtualPath == "" && rootFile.ShareID != "" {
|
||||||
|
// 检查是否匹配当前路径的第一部分
|
||||||
|
parts := strings.SplitN(cleanPath, "/", 2)
|
||||||
|
if len(parts) > 0 && parts[0] == rootFile.ShareID {
|
||||||
|
if len(parts) > 1 {
|
||||||
|
return rootFile.ShareID, parts[1], nil
|
||||||
|
}
|
||||||
|
return rootFile.ShareID, "", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 查找匹配此路径的分享或虚拟目录
|
||||||
|
share, relPath := _findShareByPath(d.RootFiles, cleanPath)
|
||||||
|
if share != nil {
|
||||||
|
return share.ShareID, relPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Warnf("[doubao_share] No matching share path found: %s", dirPath)
|
||||||
|
return "", "", fmt.Errorf("no matching share path found: %s", dirPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertToFileObject 将File转换为FileObject
|
||||||
|
func (d *DoubaoShare) convertToFileObject(file File, shareId string, relativePath string) *FileObject {
|
||||||
|
// 构建文件对象
|
||||||
|
obj := &FileObject{
|
||||||
|
Object: model.Object{
|
||||||
|
ID: file.ID,
|
||||||
|
Name: file.Name,
|
||||||
|
Size: file.Size,
|
||||||
|
Modified: time.Unix(file.UpdateTime, 0),
|
||||||
|
Ctime: time.Unix(file.CreateTime, 0),
|
||||||
|
IsFolder: file.NodeType == DirectoryType,
|
||||||
|
Path: path.Join(relativePath, file.Name),
|
||||||
|
},
|
||||||
|
ShareID: shareId,
|
||||||
|
Key: file.Key,
|
||||||
|
NodeID: file.ID,
|
||||||
|
NodeType: file.NodeType,
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFilesInPath 获取指定分享和路径下的文件
|
||||||
|
func (d *DoubaoShare) getFilesInPath(ctx context.Context, shareId, nodeId, relativePath string) ([]model.Obj, error) {
|
||||||
|
var (
|
||||||
|
files []File
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
// 调用overview接口获取分享链接信息 nodeId
|
||||||
|
if nodeId == "" {
|
||||||
|
files, err = d.getShareOverview(shareId, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get share link information: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make([]model.Obj, 0, len(files))
|
||||||
|
for _, file := range files {
|
||||||
|
result = append(result, d.convertToFileObject(file, shareId, "/"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
|
||||||
|
} else {
|
||||||
|
files, err = d.getFiles(shareId, nodeId, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get share file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make([]model.Obj, 0, len(files))
|
||||||
|
for _, file := range files {
|
||||||
|
result = append(result, d.convertToFileObject(file, shareId, path.Join("/", relativePath)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// listRootDirectory 处理根目录的内容展示
|
||||||
|
func (d *DoubaoShare) listRootDirectory(ctx context.Context) ([]model.Obj, error) {
|
||||||
|
objects := make([]model.Obj, 0)
|
||||||
|
|
||||||
|
// 分组处理:直接显示的分享内容 vs 虚拟目录
|
||||||
|
var directShareIDs []string
|
||||||
|
addedDirs := make(map[string]bool)
|
||||||
|
|
||||||
|
// 处理所有根节点
|
||||||
|
for _, rootFile := range d.RootFiles {
|
||||||
|
if rootFile.VirtualPath == "" && rootFile.ShareID != "" {
|
||||||
|
// 无路径分享,记录ShareID以便后续获取内容
|
||||||
|
directShareIDs = append(directShareIDs, rootFile.ShareID)
|
||||||
|
} else {
|
||||||
|
// 有路径的分享,显示第一级目录
|
||||||
|
parts := strings.SplitN(rootFile.VirtualPath, "/", 2)
|
||||||
|
firstLevel := parts[0]
|
||||||
|
|
||||||
|
// 避免重复添加同名目录
|
||||||
|
if _, exists := addedDirs[firstLevel]; exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// 创建虚拟目录对象
|
||||||
|
obj := &FileObject{
|
||||||
|
Object: model.Object{
|
||||||
|
ID: "",
|
||||||
|
Name: firstLevel,
|
||||||
|
Modified: time.Now(),
|
||||||
|
Ctime: time.Now(),
|
||||||
|
IsFolder: true,
|
||||||
|
Path: path.Join("/", firstLevel),
|
||||||
|
},
|
||||||
|
ShareID: rootFile.ShareID,
|
||||||
|
Key: "",
|
||||||
|
NodeID: "",
|
||||||
|
NodeType: DirectoryType,
|
||||||
|
}
|
||||||
|
objects = append(objects, obj)
|
||||||
|
addedDirs[firstLevel] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 处理直接显示的分享内容
|
||||||
|
for _, shareID := range directShareIDs {
|
||||||
|
shareFiles, err := d.getFilesInPath(ctx, shareID, "", "")
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("[doubao_share] Failed to get list of files in share %s: %s", shareID, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
objects = append(objects, shareFiles...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return objects, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// listVirtualDirectoryContent 列出虚拟目录的内容
|
||||||
|
func (d *DoubaoShare) listVirtualDirectoryContent(dir model.Obj) ([]model.Obj, error) {
|
||||||
|
dirPath := strings.TrimPrefix(dir.GetPath(), "/")
|
||||||
|
objects := make([]model.Obj, 0)
|
||||||
|
|
||||||
|
// 递归查找此路径的节点
|
||||||
|
node := _findNodeByPath(d.RootFiles, dirPath)
|
||||||
|
|
||||||
|
if node != nil && node.Child != nil {
|
||||||
|
// 显示此节点的所有子节点
|
||||||
|
for _, child := range *node.Child {
|
||||||
|
// 计算显示名称(取路径的最后一部分)
|
||||||
|
displayName := child.VirtualPath
|
||||||
|
if child.VirtualPath != "" {
|
||||||
|
parts := strings.Split(child.VirtualPath, "/")
|
||||||
|
displayName = parts[len(parts)-1]
|
||||||
|
} else if child.ShareID != "" {
|
||||||
|
displayName = child.ShareID
|
||||||
|
}
|
||||||
|
|
||||||
|
obj := &FileObject{
|
||||||
|
Object: model.Object{
|
||||||
|
ID: "",
|
||||||
|
Name: displayName,
|
||||||
|
Modified: time.Now(),
|
||||||
|
Ctime: time.Now(),
|
||||||
|
IsFolder: true,
|
||||||
|
Path: path.Join("/", child.VirtualPath),
|
||||||
|
},
|
||||||
|
ShareID: child.ShareID,
|
||||||
|
Key: "",
|
||||||
|
NodeID: "",
|
||||||
|
NodeType: DirectoryType,
|
||||||
|
}
|
||||||
|
objects = append(objects, obj)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return objects, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateContentDisposition 生成符合RFC 5987标准的Content-Disposition头部
|
||||||
|
func generateContentDisposition(filename string) string {
|
||||||
|
// 按照RFC 2047进行编码,用于filename部分
|
||||||
|
encodedName := urlEncode(filename)
|
||||||
|
|
||||||
|
// 按照RFC 5987进行编码,用于filename*部分
|
||||||
|
encodedNameRFC5987 := encodeRFC5987(filename)
|
||||||
|
|
||||||
|
return fmt.Sprintf("attachment; filename=\"%s\"; filename*=utf-8''%s",
|
||||||
|
encodedName, encodedNameRFC5987)
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeRFC5987 按照RFC 5987规范编码字符串,适用于HTTP头部参数中的非ASCII字符
|
||||||
|
func encodeRFC5987(s string) string {
|
||||||
|
var buf strings.Builder
|
||||||
|
for _, r := range []byte(s) {
|
||||||
|
// 根据RFC 5987,只有字母、数字和部分特殊符号可以不编码
|
||||||
|
if (r >= 'a' && r <= 'z') ||
|
||||||
|
(r >= 'A' && r <= 'Z') ||
|
||||||
|
(r >= '0' && r <= '9') ||
|
||||||
|
r == '-' || r == '.' || r == '_' || r == '~' {
|
||||||
|
buf.WriteByte(r)
|
||||||
|
} else {
|
||||||
|
// 其他字符都需要百分号编码
|
||||||
|
fmt.Fprintf(&buf, "%%%02X", r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func urlEncode(s string) string {
|
||||||
|
s = url.QueryEscape(s)
|
||||||
|
s = strings.ReplaceAll(s, "+", "%20")
|
||||||
|
return s
|
||||||
|
}
|
||||||
@@ -191,7 +191,7 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
}
|
}
|
||||||
|
|
||||||
url := d.contentBase + "/2/files/upload_session/append_v2"
|
url := d.contentBase + "/2/files/upload_session/append_v2"
|
||||||
reader := io.LimitReader(stream, PartSize)
|
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize))
|
||||||
req, err := http.NewRequest(http.MethodPost, url, reader)
|
req, err := http.NewRequest(http.MethodPost, url, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to update file when append to upload session, err: %+v", err)
|
log.Errorf("failed to update file when append to upload session, err: %+v", err)
|
||||||
@@ -219,13 +219,8 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_ = res.Body.Close()
|
_ = res.Body.Close()
|
||||||
|
up(float64(i+1) * 100 / float64(count))
|
||||||
if count > 0 {
|
|
||||||
up(float64(i+1) * 100 / float64(count))
|
|
||||||
}
|
|
||||||
|
|
||||||
offset += byteSize
|
offset += byteSize
|
||||||
|
|
||||||
}
|
}
|
||||||
// 3.finish
|
// 3.finish
|
||||||
toPath := dstDir.GetPath() + "/" + stream.GetName()
|
toPath := dstDir.GetPath() + "/" + stream.GetName()
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package febbox
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@@ -135,6 +136,9 @@ func (d *FebBox) getDownloadLink(id string, ip string) (string, error) {
|
|||||||
if err = json.Unmarshal(res, &fileDownloadResp); err != nil {
|
if err = json.Unmarshal(res, &fileDownloadResp); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
if len(fileDownloadResp.Data) == 0 {
|
||||||
|
return "", fmt.Errorf("can not get download link, code:%d, msg:%s", fileDownloadResp.Code, fileDownloadResp.Msg)
|
||||||
|
}
|
||||||
|
|
||||||
return fileDownloadResp.Data[0].DownloadURL, nil
|
return fileDownloadResp.Data[0].DownloadURL, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -114,13 +114,15 @@ func (d *FTP) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *FTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *FTP) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
if err := d.login(); err != nil {
|
if err := d.login(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// TODO: support cancel
|
path := stdpath.Join(dstDir.GetPath(), s.GetName())
|
||||||
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
return d.conn.Stor(encode(path, d.Encoding), driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
return d.conn.Stor(encode(path, d.Encoding), stream)
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*FTP)(nil)
|
var _ driver.Driver = (*FTP)(nil)
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package github
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -12,12 +11,14 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/ProtonMail/go-crypto/openpgp"
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
|
"github.com/pkg/errors"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -33,6 +34,7 @@ type Github struct {
|
|||||||
moveMsgTmpl *template.Template
|
moveMsgTmpl *template.Template
|
||||||
isOnBranch bool
|
isOnBranch bool
|
||||||
commitMutex sync.Mutex
|
commitMutex sync.Mutex
|
||||||
|
pgpEntity *openpgp.Entity
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Github) Config() driver.Config {
|
func (d *Github) Config() driver.Config {
|
||||||
@@ -84,10 +86,13 @@ func (d *Github) Init(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
d.client = base.NewRestyClient().
|
d.client = base.NewRestyClient().
|
||||||
SetHeader("Accept", "application/vnd.github.object+json").
|
SetHeader("Accept", "application/vnd.github.object+json").
|
||||||
SetHeader("Authorization", "Bearer "+d.Token).
|
|
||||||
SetHeader("X-GitHub-Api-Version", "2022-11-28").
|
SetHeader("X-GitHub-Api-Version", "2022-11-28").
|
||||||
SetLogger(log.StandardLogger()).
|
SetLogger(log.StandardLogger()).
|
||||||
SetDebug(false)
|
SetDebug(false)
|
||||||
|
token := strings.TrimSpace(d.Token)
|
||||||
|
if token != "" {
|
||||||
|
d.client = d.client.SetHeader("Authorization", "Bearer "+token)
|
||||||
|
}
|
||||||
if d.Ref == "" {
|
if d.Ref == "" {
|
||||||
repo, err := d.getRepo()
|
repo, err := d.getRepo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -99,6 +104,26 @@ func (d *Github) Init(ctx context.Context) error {
|
|||||||
_, err = d.getBranchHead()
|
_, err = d.getBranchHead()
|
||||||
d.isOnBranch = err == nil
|
d.isOnBranch = err == nil
|
||||||
}
|
}
|
||||||
|
if d.GPGPrivateKey != "" {
|
||||||
|
if d.CommitterName == "" || d.AuthorName == "" {
|
||||||
|
user, e := d.getAuthenticatedUser()
|
||||||
|
if e != nil {
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
if d.CommitterName == "" {
|
||||||
|
d.CommitterName = user.Name
|
||||||
|
d.CommitterEmail = user.Email
|
||||||
|
}
|
||||||
|
if d.AuthorName == "" {
|
||||||
|
d.AuthorName = user.Name
|
||||||
|
d.AuthorEmail = user.Email
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.pgpEntity, err = loadPrivateKey(d.GPGPrivateKey, d.GPGKeyPassphrase)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,8 +173,13 @@ func (d *Github) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
if obj.Type == "submodule" {
|
if obj.Type == "submodule" {
|
||||||
return nil, errors.New("cannot download a submodule")
|
return nil, errors.New("cannot download a submodule")
|
||||||
}
|
}
|
||||||
|
url := obj.DownloadURL
|
||||||
|
ghProxy := strings.TrimSpace(d.Addition.GitHubProxy)
|
||||||
|
if ghProxy != "" {
|
||||||
|
url = strings.Replace(url, "https://raw.githubusercontent.com", ghProxy, 1)
|
||||||
|
}
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
URL: obj.DownloadURL,
|
URL: url,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -166,10 +196,39 @@ func (d *Github) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
|||||||
if parent.Entries == nil {
|
if parent.Entries == nil {
|
||||||
return errs.NotFolder
|
return errs.NotFolder
|
||||||
}
|
}
|
||||||
// if parent folder contains .gitkeep only, mark it and delete .gitkeep later
|
subDirSha, err := d.newTree("", []interface{}{
|
||||||
gitKeepSha := ""
|
map[string]string{
|
||||||
|
"path": ".gitkeep",
|
||||||
|
"mode": "100644",
|
||||||
|
"type": "blob",
|
||||||
|
"content": "",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
newTree := make([]interface{}, 0, 2)
|
||||||
|
newTree = append(newTree, TreeObjReq{
|
||||||
|
Path: dirName,
|
||||||
|
Mode: "040000",
|
||||||
|
Type: "tree",
|
||||||
|
Sha: subDirSha,
|
||||||
|
})
|
||||||
if len(parent.Entries) == 1 && parent.Entries[0].Name == ".gitkeep" {
|
if len(parent.Entries) == 1 && parent.Entries[0].Name == ".gitkeep" {
|
||||||
gitKeepSha = parent.Entries[0].Sha
|
newTree = append(newTree, TreeObjReq{
|
||||||
|
Path: ".gitkeep",
|
||||||
|
Mode: "100644",
|
||||||
|
Type: "blob",
|
||||||
|
Sha: nil,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
newSha, err := d.newTree(parent.Sha, newTree)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rootSha, err := d.renewParentTrees(parentDir.GetPath(), parent.Sha, newSha, "/")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
commitMessage, err := getMessage(d.mkdirMsgTmpl, &MessageTemplateVars{
|
commitMessage, err := getMessage(d.mkdirMsgTmpl, &MessageTemplateVars{
|
||||||
@@ -182,13 +241,7 @@ func (d *Github) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = d.createGitKeep(stdpath.Join(parentDir.GetPath(), dirName), commitMessage); err != nil {
|
return d.commit(commitMessage, rootSha)
|
||||||
return err
|
|
||||||
}
|
|
||||||
if gitKeepSha != "" {
|
|
||||||
err = d.delete(stdpath.Join(parentDir.GetPath(), ".gitkeep"), gitKeepSha, commitMessage)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Github) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *Github) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
@@ -631,33 +684,15 @@ func (d *Github) get(path string) (*Object, error) {
|
|||||||
return &resp, err
|
return &resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Github) createGitKeep(path, message string) error {
|
func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.UpdateProgress) (string, error) {
|
||||||
body := map[string]interface{}{
|
|
||||||
"message": message,
|
|
||||||
"content": "",
|
|
||||||
"branch": d.Ref,
|
|
||||||
}
|
|
||||||
d.addCommitterAndAuthor(&body)
|
|
||||||
|
|
||||||
res, err := d.client.R().SetBody(body).Put(d.getContentApiUrl(stdpath.Join(path, ".gitkeep")))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if res.StatusCode() != 200 && res.StatusCode() != 201 {
|
|
||||||
return toErr(res)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) (string, error) {
|
|
||||||
beforeContent := "{\"encoding\":\"base64\",\"content\":\""
|
beforeContent := "{\"encoding\":\"base64\",\"content\":\""
|
||||||
afterContent := "\"}"
|
afterContent := "\"}"
|
||||||
length := int64(len(beforeContent)) + calculateBase64Length(stream.GetSize()) + int64(len(afterContent))
|
length := int64(len(beforeContent)) + calculateBase64Length(s.GetSize()) + int64(len(afterContent))
|
||||||
beforeContentReader := strings.NewReader(beforeContent)
|
beforeContentReader := strings.NewReader(beforeContent)
|
||||||
contentReader, contentWriter := io.Pipe()
|
contentReader, contentWriter := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
encoder := base64.NewEncoder(base64.StdEncoding, contentWriter)
|
encoder := base64.NewEncoder(base64.StdEncoding, contentWriter)
|
||||||
if _, err := utils.CopyWithBuffer(encoder, stream); err != nil {
|
if _, err := utils.CopyWithBuffer(encoder, s); err != nil {
|
||||||
_ = contentWriter.CloseWithError(err)
|
_ = contentWriter.CloseWithError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -667,23 +702,29 @@ func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driv
|
|||||||
afterContentReader := strings.NewReader(afterContent)
|
afterContentReader := strings.NewReader(afterContent)
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
||||||
fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo),
|
fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo),
|
||||||
&ReaderWithProgress{
|
driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader),
|
Reader: &driver.SimpleReaderWithSize{
|
||||||
Length: length,
|
Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader),
|
||||||
Progress: up,
|
Size: length,
|
||||||
})
|
},
|
||||||
|
UpdateProgress: up,
|
||||||
|
}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
req.Header.Set("Accept", "application/vnd.github+json")
|
req.Header.Set("Accept", "application/vnd.github+json")
|
||||||
req.Header.Set("Authorization", "Bearer "+d.Token)
|
|
||||||
req.Header.Set("X-GitHub-Api-Version", "2022-11-28")
|
req.Header.Set("X-GitHub-Api-Version", "2022-11-28")
|
||||||
|
token := strings.TrimSpace(d.Token)
|
||||||
|
if token != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+token)
|
||||||
|
}
|
||||||
req.ContentLength = length
|
req.ContentLength = length
|
||||||
|
|
||||||
res, err := base.HttpClient.Do(req)
|
res, err := base.HttpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
resBody, err := io.ReadAll(res.Body)
|
resBody, err := io.ReadAll(res.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -703,23 +744,6 @@ func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driv
|
|||||||
return resp.Sha, nil
|
return resp.Sha, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Github) delete(path, sha, message string) error {
|
|
||||||
body := map[string]interface{}{
|
|
||||||
"message": message,
|
|
||||||
"sha": sha,
|
|
||||||
"branch": d.Ref,
|
|
||||||
}
|
|
||||||
d.addCommitterAndAuthor(&body)
|
|
||||||
res, err := d.client.R().SetBody(body).Delete(d.getContentApiUrl(path))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if res.StatusCode() != 200 {
|
|
||||||
return toErr(res)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Github) renewParentTrees(path, prevSha, curSha, until string) (string, error) {
|
func (d *Github) renewParentTrees(path, prevSha, curSha, until string) (string, error) {
|
||||||
for path != until {
|
for path != until {
|
||||||
path = stdpath.Dir(path)
|
path = stdpath.Dir(path)
|
||||||
@@ -781,11 +805,11 @@ func (d *Github) getTreeDirectly(path string) (*TreeResp, string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Github) newTree(baseSha string, tree []interface{}) (string, error) {
|
func (d *Github) newTree(baseSha string, tree []interface{}) (string, error) {
|
||||||
res, err := d.client.R().
|
body := &TreeReq{Trees: tree}
|
||||||
SetBody(&TreeReq{
|
if baseSha != "" {
|
||||||
BaseTree: baseSha,
|
body.BaseTree = baseSha
|
||||||
Trees: tree,
|
}
|
||||||
}).
|
res, err := d.client.R().SetBody(body).
|
||||||
Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/trees", d.Owner, d.Repo))
|
Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/trees", d.Owner, d.Repo))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -808,6 +832,13 @@ func (d *Github) commit(message, treeSha string) error {
|
|||||||
"parents": []string{oldCommit},
|
"parents": []string{oldCommit},
|
||||||
}
|
}
|
||||||
d.addCommitterAndAuthor(&body)
|
d.addCommitterAndAuthor(&body)
|
||||||
|
if d.pgpEntity != nil {
|
||||||
|
signature, e := signCommit(&body, d.pgpEntity)
|
||||||
|
if e != nil {
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
body["signature"] = signature
|
||||||
|
}
|
||||||
res, err := d.client.R().SetBody(body).Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/commits", d.Owner, d.Repo))
|
res, err := d.client.R().SetBody(body).Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/commits", d.Owner, d.Repo))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -911,6 +942,21 @@ func (d *Github) getRepo() (*RepoResp, error) {
|
|||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Github) getAuthenticatedUser() (*UserResp, error) {
|
||||||
|
res, err := d.client.R().Get("https://api.github.com/user")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if res.StatusCode() != 200 {
|
||||||
|
return nil, toErr(res)
|
||||||
|
}
|
||||||
|
resp := &UserResp{}
|
||||||
|
if err = utils.Json.Unmarshal(res.Body(), resp); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Github) addCommitterAndAuthor(m *map[string]interface{}) {
|
func (d *Github) addCommitterAndAuthor(m *map[string]interface{}) {
|
||||||
if d.CommitterName != "" {
|
if d.CommitterName != "" {
|
||||||
committer := map[string]string{
|
committer := map[string]string{
|
||||||
|
|||||||
@@ -7,20 +7,23 @@ import (
|
|||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
driver.RootPath
|
driver.RootPath
|
||||||
Token string `json:"token" type:"string" required:"true"`
|
Token string `json:"token" type:"string" required:"true"`
|
||||||
Owner string `json:"owner" type:"string" required:"true"`
|
Owner string `json:"owner" type:"string" required:"true"`
|
||||||
Repo string `json:"repo" type:"string" required:"true"`
|
Repo string `json:"repo" type:"string" required:"true"`
|
||||||
Ref string `json:"ref" type:"string" help:"A branch, a tag or a commit SHA, main branch by default."`
|
Ref string `json:"ref" type:"string" help:"A branch, a tag or a commit SHA, main branch by default."`
|
||||||
CommitterName string `json:"committer_name" type:"string"`
|
GitHubProxy string `json:"gh_proxy" type:"string" help:"GitHub proxy, e.g. https://ghproxy.net/raw.githubusercontent.com or https://gh-proxy.com/raw.githubusercontent.com"`
|
||||||
CommitterEmail string `json:"committer_email" type:"string"`
|
GPGPrivateKey string `json:"gpg_private_key" type:"text"`
|
||||||
AuthorName string `json:"author_name" type:"string"`
|
GPGKeyPassphrase string `json:"gpg_key_passphrase" type:"string"`
|
||||||
AuthorEmail string `json:"author_email" type:"string"`
|
CommitterName string `json:"committer_name" type:"string"`
|
||||||
MkdirCommitMsg string `json:"mkdir_commit_message" type:"text" default:"{{.UserName}} mkdir {{.ObjPath}}"`
|
CommitterEmail string `json:"committer_email" type:"string"`
|
||||||
DeleteCommitMsg string `json:"delete_commit_message" type:"text" default:"{{.UserName}} remove {{.ObjPath}}"`
|
AuthorName string `json:"author_name" type:"string"`
|
||||||
PutCommitMsg string `json:"put_commit_message" type:"text" default:"{{.UserName}} upload {{.ObjPath}}"`
|
AuthorEmail string `json:"author_email" type:"string"`
|
||||||
RenameCommitMsg string `json:"rename_commit_message" type:"text" default:"{{.UserName}} rename {{.ObjPath}} to {{.TargetName}}"`
|
MkdirCommitMsg string `json:"mkdir_commit_message" type:"text" default:"{{.UserName}} mkdir {{.ObjPath}}"`
|
||||||
CopyCommitMsg string `json:"copy_commit_message" type:"text" default:"{{.UserName}} copy {{.ObjPath}} to {{.TargetPath}}"`
|
DeleteCommitMsg string `json:"delete_commit_message" type:"text" default:"{{.UserName}} remove {{.ObjPath}}"`
|
||||||
MoveCommitMsg string `json:"move_commit_message" type:"text" default:"{{.UserName}} move {{.ObjPath}} to {{.TargetPath}}"`
|
PutCommitMsg string `json:"put_commit_message" type:"text" default:"{{.UserName}} upload {{.ObjPath}}"`
|
||||||
|
RenameCommitMsg string `json:"rename_commit_message" type:"text" default:"{{.UserName}} rename {{.ObjPath}} to {{.TargetName}}"`
|
||||||
|
CopyCommitMsg string `json:"copy_commit_message" type:"text" default:"{{.UserName}} copy {{.ObjPath}} to {{.TargetPath}}"`
|
||||||
|
MoveCommitMsg string `json:"move_commit_message" type:"text" default:"{{.UserName}} move {{.ObjPath}} to {{.TargetPath}}"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ type TreeResp struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type TreeReq struct {
|
type TreeReq struct {
|
||||||
BaseTree string `json:"base_tree"`
|
BaseTree interface{} `json:"base_tree,omitempty"`
|
||||||
Trees []interface{} `json:"tree"`
|
Trees []interface{} `json:"tree"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -100,3 +100,8 @@ type UpdateRefReq struct {
|
|||||||
type RepoResp struct {
|
type RepoResp struct {
|
||||||
DefaultBranch string `json:"default_branch"`
|
DefaultBranch string `json:"default_branch"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type UserResp struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,32 +1,21 @@
|
|||||||
package github
|
package github
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ProtonMail/go-crypto/openpgp"
|
||||||
|
"github.com/ProtonMail/go-crypto/openpgp/armor"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"strings"
|
|
||||||
"text/template"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ReaderWithProgress struct {
|
|
||||||
Reader io.Reader
|
|
||||||
Length int64
|
|
||||||
Progress func(percentage float64)
|
|
||||||
offset int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ReaderWithProgress) Read(p []byte) (int, error) {
|
|
||||||
n, err := r.Reader.Read(p)
|
|
||||||
r.offset += int64(n)
|
|
||||||
r.Progress(math.Min(100.0, float64(r.offset)/float64(r.Length)*100.0))
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type MessageTemplateVars struct {
|
type MessageTemplateVars struct {
|
||||||
UserName string
|
UserName string
|
||||||
ObjName string
|
ObjName string
|
||||||
@@ -113,3 +102,65 @@ func getUsername(ctx context.Context) string {
|
|||||||
}
|
}
|
||||||
return user.Username
|
return user.Username
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func loadPrivateKey(key, passphrase string) (*openpgp.Entity, error) {
|
||||||
|
entityList, err := openpgp.ReadArmoredKeyRing(strings.NewReader(key))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(entityList) < 1 {
|
||||||
|
return nil, fmt.Errorf("no keys found in key ring")
|
||||||
|
}
|
||||||
|
entity := entityList[0]
|
||||||
|
|
||||||
|
pass := []byte(passphrase)
|
||||||
|
if entity.PrivateKey != nil && entity.PrivateKey.Encrypted {
|
||||||
|
if err = entity.PrivateKey.Decrypt(pass); err != nil {
|
||||||
|
return nil, fmt.Errorf("password incorrect: %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, subKey := range entity.Subkeys {
|
||||||
|
if subKey.PrivateKey != nil && subKey.PrivateKey.Encrypted {
|
||||||
|
if err = subKey.PrivateKey.Decrypt(pass); err != nil {
|
||||||
|
return nil, fmt.Errorf("password incorrect: %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return entity, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func signCommit(m *map[string]interface{}, entity *openpgp.Entity) (string, error) {
|
||||||
|
var commit strings.Builder
|
||||||
|
commit.WriteString(fmt.Sprintf("tree %s\n", (*m)["tree"].(string)))
|
||||||
|
parents := (*m)["parents"].([]string)
|
||||||
|
for _, p := range parents {
|
||||||
|
commit.WriteString(fmt.Sprintf("parent %s\n", p))
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
_, offset := now.Zone()
|
||||||
|
hour := offset / 3600
|
||||||
|
author := (*m)["author"].(map[string]string)
|
||||||
|
commit.WriteString(fmt.Sprintf("author %s <%s> %d %+03d00\n", author["name"], author["email"], now.Unix(), hour))
|
||||||
|
author["date"] = now.Format(time.RFC3339)
|
||||||
|
committer := (*m)["committer"].(map[string]string)
|
||||||
|
commit.WriteString(fmt.Sprintf("committer %s <%s> %d %+03d00\n", committer["name"], committer["email"], now.Unix(), hour))
|
||||||
|
committer["date"] = now.Format(time.RFC3339)
|
||||||
|
commit.WriteString(fmt.Sprintf("\n%s", (*m)["message"].(string)))
|
||||||
|
data := commit.String()
|
||||||
|
|
||||||
|
var sigBuffer bytes.Buffer
|
||||||
|
err := openpgp.DetachSign(&sigBuffer, entity, strings.NewReader(data), nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("signing failed: %v", err)
|
||||||
|
}
|
||||||
|
var armoredSig bytes.Buffer
|
||||||
|
armorWriter, err := armor.Encode(&armoredSig, "PGP SIGNATURE", nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if _, err = utils.CopyWithBuffer(armorWriter, &sigBuffer); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
_ = armorWriter.Close()
|
||||||
|
return armoredSig.String(), nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,9 +4,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
|
||||||
|
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@@ -18,7 +17,7 @@ type GithubReleases struct {
|
|||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
|
|
||||||
releases []Release
|
points []MountPoint
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *GithubReleases) Config() driver.Config {
|
func (d *GithubReleases) Config() driver.Config {
|
||||||
@@ -30,86 +29,138 @@ func (d *GithubReleases) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *GithubReleases) Init(ctx context.Context) error {
|
func (d *GithubReleases) Init(ctx context.Context) error {
|
||||||
SetHeader(d.Addition.Token)
|
d.ParseRepos(d.Addition.RepoStructure)
|
||||||
repos, err := ParseRepos(d.Addition.RepoStructure, d.Addition.ShowAllVersion)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.releases = repos
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *GithubReleases) Drop(ctx context.Context) error {
|
func (d *GithubReleases) Drop(ctx context.Context) error {
|
||||||
ClearCache()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// processPoint 处理单个挂载点的文件列表
|
||||||
|
func (d *GithubReleases) processPoint(point *MountPoint, path string, args model.ListArgs) []File {
|
||||||
|
var pointFiles []File
|
||||||
|
|
||||||
|
if !d.Addition.ShowAllVersion { // latest
|
||||||
|
point.RequestLatestRelease(d.GetRequest, args.Refresh)
|
||||||
|
pointFiles = d.processLatestVersion(point, path)
|
||||||
|
} else { // all version
|
||||||
|
point.RequestReleases(d.GetRequest, args.Refresh)
|
||||||
|
pointFiles = d.processAllVersions(point, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pointFiles
|
||||||
|
}
|
||||||
|
|
||||||
|
// processLatestVersion 处理最新版本的逻辑
|
||||||
|
func (d *GithubReleases) processLatestVersion(point *MountPoint, path string) []File {
|
||||||
|
var pointFiles []File
|
||||||
|
|
||||||
|
if point.Point == path { // 与仓库路径相同
|
||||||
|
pointFiles = append(pointFiles, point.GetLatestRelease()...)
|
||||||
|
if d.Addition.ShowReadme {
|
||||||
|
files := point.GetOtherFile(d.GetRequest, false)
|
||||||
|
pointFiles = append(pointFiles, files...)
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
|
||||||
|
nextDir := GetNextDir(point.Point, path)
|
||||||
|
if nextDir != "" {
|
||||||
|
dirFile := File{
|
||||||
|
Path: path + "/" + nextDir,
|
||||||
|
FileName: nextDir,
|
||||||
|
Size: point.GetLatestSize(),
|
||||||
|
UpdateAt: point.Release.PublishedAt,
|
||||||
|
CreateAt: point.Release.CreatedAt,
|
||||||
|
Type: "dir",
|
||||||
|
Url: "",
|
||||||
|
}
|
||||||
|
pointFiles = append(pointFiles, dirFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pointFiles
|
||||||
|
}
|
||||||
|
|
||||||
|
// processAllVersions 处理所有版本的逻辑
|
||||||
|
func (d *GithubReleases) processAllVersions(point *MountPoint, path string) []File {
|
||||||
|
var pointFiles []File
|
||||||
|
|
||||||
|
if point.Point == path { // 与仓库路径相同
|
||||||
|
pointFiles = append(pointFiles, point.GetAllVersion()...)
|
||||||
|
if d.Addition.ShowReadme {
|
||||||
|
files := point.GetOtherFile(d.GetRequest, false)
|
||||||
|
pointFiles = append(pointFiles, files...)
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
|
||||||
|
nextDir := GetNextDir(point.Point, path)
|
||||||
|
if nextDir != "" {
|
||||||
|
dirFile := File{
|
||||||
|
FileName: nextDir,
|
||||||
|
Path: path + "/" + nextDir,
|
||||||
|
Size: point.GetAllVersionSize(),
|
||||||
|
UpdateAt: (*point.Releases)[0].PublishedAt,
|
||||||
|
CreateAt: (*point.Releases)[0].CreatedAt,
|
||||||
|
Type: "dir",
|
||||||
|
Url: "",
|
||||||
|
}
|
||||||
|
pointFiles = append(pointFiles, dirFile)
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(path, point.Point) { // 仓库目录的子目录
|
||||||
|
tagName := GetNextDir(path, point.Point)
|
||||||
|
if tagName != "" {
|
||||||
|
pointFiles = append(pointFiles, point.GetReleaseByTagName(tagName)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pointFiles
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeFiles 合并文件列表,处理重复目录
|
||||||
|
func (d *GithubReleases) mergeFiles(files *[]File, newFiles []File) {
|
||||||
|
for _, newFile := range newFiles {
|
||||||
|
if newFile.Type == "dir" {
|
||||||
|
hasSameDir := false
|
||||||
|
for index := range *files {
|
||||||
|
if (*files)[index].GetName() == newFile.GetName() && (*files)[index].Type == "dir" {
|
||||||
|
hasSameDir = true
|
||||||
|
(*files)[index].Size += newFile.Size
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !hasSameDir {
|
||||||
|
*files = append(*files, newFile)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
*files = append(*files, newFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
files := make([]File, 0)
|
files := make([]File, 0)
|
||||||
path := fmt.Sprintf("/%s", strings.Trim(dir.GetPath(), "/"))
|
path := fmt.Sprintf("/%s", strings.Trim(dir.GetPath(), "/"))
|
||||||
|
|
||||||
for _, repo := range d.releases {
|
if d.Addition.ConcurrentRequests && d.Addition.Token != "" { // 并发处理
|
||||||
if repo.Path == path { // 与仓库路径相同
|
var mu sync.Mutex
|
||||||
resp, err := GetRepoReleaseInfo(repo.RepoName, repo.ID, path, d.Storage.CacheExpiration)
|
var wg sync.WaitGroup
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
files = append(files, resp.Files...)
|
|
||||||
|
|
||||||
if d.Addition.ShowReadme {
|
for i := range d.points {
|
||||||
resp, err := GetGithubOtherFile(repo.RepoName, path, d.Storage.CacheExpiration)
|
wg.Add(1)
|
||||||
if err != nil {
|
go func(point *MountPoint) {
|
||||||
return nil, err
|
defer wg.Done()
|
||||||
}
|
pointFiles := d.processPoint(point, path, args)
|
||||||
files = append(files, *resp...)
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if strings.HasPrefix(repo.Path, path) { // 仓库路径是目录的子目录
|
mu.Lock()
|
||||||
nextDir := GetNextDir(repo.Path, path)
|
d.mergeFiles(&files, pointFiles)
|
||||||
if nextDir == "" {
|
mu.Unlock()
|
||||||
continue
|
}(&d.points[i])
|
||||||
}
|
}
|
||||||
if d.Addition.ShowAllVersion {
|
wg.Wait()
|
||||||
files = append(files, File{
|
} else { // 串行处理
|
||||||
FileName: nextDir,
|
for i := range d.points {
|
||||||
Size: 0,
|
point := &d.points[i]
|
||||||
CreateAt: time.Time{},
|
pointFiles := d.processPoint(point, path, args)
|
||||||
UpdateAt: time.Time{},
|
d.mergeFiles(&files, pointFiles)
|
||||||
Url: "",
|
|
||||||
Type: "dir",
|
|
||||||
Path: fmt.Sprintf("%s/%s", path, nextDir),
|
|
||||||
})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
repo, _ := GetRepoReleaseInfo(repo.RepoName, repo.Version, path, d.Storage.CacheExpiration)
|
|
||||||
|
|
||||||
hasSameDir := false
|
|
||||||
for index, file := range files {
|
|
||||||
if file.FileName == nextDir {
|
|
||||||
hasSameDir = true
|
|
||||||
files[index].Size += repo.Size
|
|
||||||
files[index].UpdateAt = func(a time.Time, b time.Time) time.Time {
|
|
||||||
if a.After(b) {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}(files[index].UpdateAt, repo.UpdateAt)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !hasSameDir {
|
|
||||||
files = append(files, File{
|
|
||||||
FileName: nextDir,
|
|
||||||
Size: repo.Size,
|
|
||||||
CreateAt: repo.CreateAt,
|
|
||||||
UpdateAt: repo.UpdateAt,
|
|
||||||
Url: repo.Url,
|
|
||||||
Type: "dir",
|
|
||||||
Path: fmt.Sprintf("%s/%s", path, nextDir),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,35 +170,41 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *GithubReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *GithubReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
url := file.GetID()
|
||||||
|
gh_proxy := strings.TrimSpace(d.Addition.GitHubProxy)
|
||||||
|
|
||||||
|
if gh_proxy != "" {
|
||||||
|
url = strings.Replace(url, "https://github.com", gh_proxy, 1)
|
||||||
|
}
|
||||||
|
|
||||||
link := model.Link{
|
link := model.Link{
|
||||||
URL: file.GetID(),
|
URL: url,
|
||||||
Header: http.Header{},
|
Header: http.Header{},
|
||||||
}
|
}
|
||||||
return &link, nil
|
return &link, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *GithubReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
func (d *GithubReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
|
// TODO create folder, optional
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *GithubReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
func (d *GithubReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
// TODO move obj, optional
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *GithubReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
func (d *GithubReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
|
// TODO rename obj, optional
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *GithubReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
func (d *GithubReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
// TODO copy obj, optional
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *GithubReleases) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *GithubReleases) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
// TODO remove obj, optional
|
||||||
return errs.NotImplement
|
return errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *GithubReleases) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ driver.Driver = (*GithubReleases)(nil)
|
|
||||||
|
|||||||
@@ -7,10 +7,12 @@ import (
|
|||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
driver.RootID
|
driver.RootID
|
||||||
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"/path/to/alist-gh:alistGo/alist\n/path/to2/alist-web-gh:AlistGo/alist-web" help:"structure:[path:]org/repo"`
|
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"alistGo/alist" help:"structure:[path:]org/repo"`
|
||||||
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
|
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
|
||||||
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
|
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
|
||||||
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
|
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
|
||||||
|
ConcurrentRequests bool `json:"concurrent_requests" type:"bool" default:"false" help:"To concurrently request the GitHub API, you must enter a GitHub token"`
|
||||||
|
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
|||||||
86
drivers/github_releases/models.go
Normal file
86
drivers/github_releases/models.go
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
package github_releases
|
||||||
|
|
||||||
|
type Release struct {
|
||||||
|
Url string `json:"url"`
|
||||||
|
AssetsUrl string `json:"assets_url"`
|
||||||
|
UploadUrl string `json:"upload_url"`
|
||||||
|
HtmlUrl string `json:"html_url"`
|
||||||
|
Id int `json:"id"`
|
||||||
|
Author User `json:"author"`
|
||||||
|
NodeId string `json:"node_id"`
|
||||||
|
TagName string `json:"tag_name"`
|
||||||
|
TargetCommitish string `json:"target_commitish"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Draft bool `json:"draft"`
|
||||||
|
Prerelease bool `json:"prerelease"`
|
||||||
|
CreatedAt string `json:"created_at"`
|
||||||
|
PublishedAt string `json:"published_at"`
|
||||||
|
Assets []Asset `json:"assets"`
|
||||||
|
TarballUrl string `json:"tarball_url"`
|
||||||
|
ZipballUrl string `json:"zipball_url"`
|
||||||
|
Body string `json:"body"`
|
||||||
|
Reactions Reactions `json:"reactions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type User struct {
|
||||||
|
Login string `json:"login"`
|
||||||
|
Id int `json:"id"`
|
||||||
|
NodeId string `json:"node_id"`
|
||||||
|
AvatarUrl string `json:"avatar_url"`
|
||||||
|
GravatarId string `json:"gravatar_id"`
|
||||||
|
Url string `json:"url"`
|
||||||
|
HtmlUrl string `json:"html_url"`
|
||||||
|
FollowersUrl string `json:"followers_url"`
|
||||||
|
FollowingUrl string `json:"following_url"`
|
||||||
|
GistsUrl string `json:"gists_url"`
|
||||||
|
StarredUrl string `json:"starred_url"`
|
||||||
|
SubscriptionsUrl string `json:"subscriptions_url"`
|
||||||
|
OrganizationsUrl string `json:"organizations_url"`
|
||||||
|
ReposUrl string `json:"repos_url"`
|
||||||
|
EventsUrl string `json:"events_url"`
|
||||||
|
ReceivedEventsUrl string `json:"received_events_url"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
UserViewType string `json:"user_view_type"`
|
||||||
|
SiteAdmin bool `json:"site_admin"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Asset struct {
|
||||||
|
Url string `json:"url"`
|
||||||
|
Id int `json:"id"`
|
||||||
|
NodeId string `json:"node_id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Label string `json:"label"`
|
||||||
|
Uploader User `json:"uploader"`
|
||||||
|
ContentType string `json:"content_type"`
|
||||||
|
State string `json:"state"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
DownloadCount int `json:"download_count"`
|
||||||
|
CreatedAt string `json:"created_at"`
|
||||||
|
UpdatedAt string `json:"updated_at"`
|
||||||
|
BrowserDownloadUrl string `json:"browser_download_url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Reactions struct {
|
||||||
|
Url string `json:"url"`
|
||||||
|
TotalCount int `json:"total_count"`
|
||||||
|
PlusOne int `json:"+1"`
|
||||||
|
MinusOne int `json:"-1"`
|
||||||
|
Laugh int `json:"laugh"`
|
||||||
|
Hooray int `json:"hooray"`
|
||||||
|
Confused int `json:"confused"`
|
||||||
|
Heart int `json:"heart"`
|
||||||
|
Rocket int `json:"rocket"`
|
||||||
|
Eyes int `json:"eyes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileInfo struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Sha string `json:"sha"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Url string `json:"url"`
|
||||||
|
HtmlUrl string `json:"html_url"`
|
||||||
|
GitUrl string `json:"git_url"`
|
||||||
|
DownloadUrl string `json:"download_url"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
}
|
||||||
@@ -1,19 +1,181 @@
|
|||||||
package github_releases
|
package github_releases
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type MountPoint struct {
|
||||||
|
Point string // 挂载点
|
||||||
|
Repo string // 仓库名 owner/repo
|
||||||
|
Release *Release // Release 指针 latest
|
||||||
|
Releases *[]Release // []Release 指针
|
||||||
|
OtherFile *[]FileInfo // 仓库根目录下的其他文件
|
||||||
|
}
|
||||||
|
|
||||||
|
// 请求最新版本
|
||||||
|
func (m *MountPoint) RequestLatestRelease(get func(url string) (*resty.Response, error), refresh bool) {
|
||||||
|
if m.Repo == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Release == nil || refresh {
|
||||||
|
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/releases/latest")
|
||||||
|
m.Release = new(Release)
|
||||||
|
json.Unmarshal(resp.Body(), m.Release)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 请求所有版本
|
||||||
|
func (m *MountPoint) RequestReleases(get func(url string) (*resty.Response, error), refresh bool) {
|
||||||
|
if m.Repo == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Releases == nil || refresh {
|
||||||
|
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/releases")
|
||||||
|
m.Releases = new([]Release)
|
||||||
|
json.Unmarshal(resp.Body(), m.Releases)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取最新版本
|
||||||
|
func (m *MountPoint) GetLatestRelease() []File {
|
||||||
|
files := make([]File, 0)
|
||||||
|
for _, asset := range m.Release.Assets {
|
||||||
|
files = append(files, File{
|
||||||
|
Path: m.Point + "/" + asset.Name,
|
||||||
|
FileName: asset.Name,
|
||||||
|
Size: asset.Size,
|
||||||
|
Type: "file",
|
||||||
|
UpdateAt: asset.UpdatedAt,
|
||||||
|
CreateAt: asset.CreatedAt,
|
||||||
|
Url: asset.BrowserDownloadUrl,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取最新版本大小
|
||||||
|
func (m *MountPoint) GetLatestSize() int64 {
|
||||||
|
size := int64(0)
|
||||||
|
for _, asset := range m.Release.Assets {
|
||||||
|
size += asset.Size
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取所有版本
|
||||||
|
func (m *MountPoint) GetAllVersion() []File {
|
||||||
|
files := make([]File, 0)
|
||||||
|
for _, release := range *m.Releases {
|
||||||
|
file := File{
|
||||||
|
Path: m.Point + "/" + release.TagName,
|
||||||
|
FileName: release.TagName,
|
||||||
|
Size: m.GetSizeByTagName(release.TagName),
|
||||||
|
Type: "dir",
|
||||||
|
UpdateAt: release.PublishedAt,
|
||||||
|
CreateAt: release.CreatedAt,
|
||||||
|
Url: release.HtmlUrl,
|
||||||
|
}
|
||||||
|
for _, asset := range release.Assets {
|
||||||
|
file.Size += asset.Size
|
||||||
|
}
|
||||||
|
files = append(files, file)
|
||||||
|
}
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
|
||||||
|
// 根据版本号获取版本
|
||||||
|
func (m *MountPoint) GetReleaseByTagName(tagName string) []File {
|
||||||
|
for _, item := range *m.Releases {
|
||||||
|
if item.TagName == tagName {
|
||||||
|
files := make([]File, 0)
|
||||||
|
for _, asset := range item.Assets {
|
||||||
|
files = append(files, File{
|
||||||
|
Path: m.Point + "/" + tagName + "/" + asset.Name,
|
||||||
|
FileName: asset.Name,
|
||||||
|
Size: asset.Size,
|
||||||
|
Type: "file",
|
||||||
|
UpdateAt: asset.UpdatedAt,
|
||||||
|
CreateAt: asset.CreatedAt,
|
||||||
|
Url: asset.BrowserDownloadUrl,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 根据版本号获取版本大小
|
||||||
|
func (m *MountPoint) GetSizeByTagName(tagName string) int64 {
|
||||||
|
if m.Releases == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
for _, item := range *m.Releases {
|
||||||
|
if item.TagName == tagName {
|
||||||
|
size := int64(0)
|
||||||
|
for _, asset := range item.Assets {
|
||||||
|
size += asset.Size
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取所有版本大小
|
||||||
|
func (m *MountPoint) GetAllVersionSize() int64 {
|
||||||
|
if m.Releases == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
size := int64(0)
|
||||||
|
for _, release := range *m.Releases {
|
||||||
|
for _, asset := range release.Assets {
|
||||||
|
size += asset.Size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MountPoint) GetOtherFile(get func(url string) (*resty.Response, error), refresh bool) []File {
|
||||||
|
if m.OtherFile == nil || refresh {
|
||||||
|
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/contents")
|
||||||
|
m.OtherFile = new([]FileInfo)
|
||||||
|
json.Unmarshal(resp.Body(), m.OtherFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
files := make([]File, 0)
|
||||||
|
defaultTime := "1970-01-01T00:00:00Z"
|
||||||
|
for _, file := range *m.OtherFile {
|
||||||
|
if strings.HasSuffix(file.Name, ".md") || strings.HasPrefix(file.Name, "LICENSE") {
|
||||||
|
files = append(files, File{
|
||||||
|
Path: m.Point + "/" + file.Name,
|
||||||
|
FileName: file.Name,
|
||||||
|
Size: file.Size,
|
||||||
|
Type: "file",
|
||||||
|
UpdateAt: defaultTime,
|
||||||
|
CreateAt: defaultTime,
|
||||||
|
Url: file.DownloadUrl,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
|
||||||
type File struct {
|
type File struct {
|
||||||
FileName string `json:"name"`
|
Path string // 文件路径
|
||||||
Size int64 `json:"size"`
|
FileName string // 文件名
|
||||||
CreateAt time.Time `json:"time"`
|
Size int64 // 文件大小
|
||||||
UpdateAt time.Time `json:"chtime"`
|
Type string // 文件类型
|
||||||
Url string `json:"url"`
|
UpdateAt string // 更新时间 eg:"2025-01-27T16:10:16Z"
|
||||||
Type string `json:"type"`
|
CreateAt string // 创建时间
|
||||||
Path string `json:"path"`
|
Url string // 下载链接
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f File) GetHash() utils.HashInfo {
|
func (f File) GetHash() utils.HashInfo {
|
||||||
@@ -33,11 +195,13 @@ func (f File) GetName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f File) ModTime() time.Time {
|
func (f File) ModTime() time.Time {
|
||||||
return f.UpdateAt
|
t, _ := time.Parse(time.RFC3339, f.CreateAt)
|
||||||
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f File) CreateTime() time.Time {
|
func (f File) CreateTime() time.Time {
|
||||||
return f.CreateAt
|
t, _ := time.Parse(time.RFC3339, f.CreateAt)
|
||||||
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f File) IsDir() bool {
|
func (f File) IsDir() bool {
|
||||||
@@ -47,22 +211,3 @@ func (f File) IsDir() bool {
|
|||||||
func (f File) GetID() string {
|
func (f File) GetID() string {
|
||||||
return f.Url
|
return f.Url
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f File) Thumb() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type ReleasesData struct {
|
|
||||||
Files []File `json:"files"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
UpdateAt time.Time `json:"chtime"`
|
|
||||||
CreateAt time.Time `json:"time"`
|
|
||||||
Url string `json:"url"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Release struct {
|
|
||||||
Path string // 挂载路径
|
|
||||||
RepoName string // 仓库名称
|
|
||||||
Version string // 版本号, tag
|
|
||||||
ID string // 版本ID
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -2,28 +2,36 @@ package github_releases
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
jsoniter "github.com/json-iterator/go"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// 发送 GET 请求
|
||||||
cache = make(map[string]*resty.Response)
|
func (d *GithubReleases) GetRequest(url string) (*resty.Response, error) {
|
||||||
created = make(map[string]time.Time)
|
req := base.RestyClient.R()
|
||||||
mu sync.Mutex
|
req.SetHeader("Accept", "application/vnd.github+json")
|
||||||
req *resty.Request
|
req.SetHeader("X-GitHub-Api-Version", "2022-11-28")
|
||||||
)
|
if d.Addition.Token != "" {
|
||||||
|
req.SetHeader("Authorization", fmt.Sprintf("Bearer %s", d.Addition.Token))
|
||||||
|
}
|
||||||
|
res, err := req.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if res.StatusCode() != 200 {
|
||||||
|
utils.Log.Warnf("failed to get request: %s %d %s", url, res.StatusCode(), res.String())
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
// 解析仓库列表
|
// 解析挂载结构
|
||||||
func ParseRepos(text string, allVersion bool) ([]Release, error) {
|
func (d *GithubReleases) ParseRepos(text string) ([]MountPoint, error) {
|
||||||
lines := strings.Split(text, "\n")
|
lines := strings.Split(text, "\n")
|
||||||
var repos []Release
|
points := make([]MountPoint, 0)
|
||||||
for _, line := range lines {
|
for _, line := range lines {
|
||||||
line = strings.TrimSpace(line)
|
line = strings.TrimSpace(line)
|
||||||
if line == "" {
|
if line == "" {
|
||||||
@@ -41,177 +49,37 @@ func ParseRepos(text string, allVersion bool) ([]Release, error) {
|
|||||||
return nil, fmt.Errorf("invalid format: %s", line)
|
return nil, fmt.Errorf("invalid format: %s", line)
|
||||||
}
|
}
|
||||||
|
|
||||||
if allVersion {
|
points = append(points, MountPoint{
|
||||||
releases, _ := GetAllVersion(repo, path)
|
Point: path,
|
||||||
repos = append(repos, *releases...)
|
Repo: repo,
|
||||||
} else {
|
Release: nil,
|
||||||
repos = append(repos, Release{
|
Releases: nil,
|
||||||
Path: path,
|
})
|
||||||
RepoName: repo,
|
|
||||||
Version: "latest",
|
|
||||||
ID: "latest",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
return repos, nil
|
d.points = points
|
||||||
|
return points, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取下一级目录
|
// 获取下一级目录
|
||||||
func GetNextDir(wholePath string, basePath string) string {
|
func GetNextDir(wholePath string, basePath string) string {
|
||||||
if !strings.HasSuffix(basePath, "/") {
|
basePath = fmt.Sprintf("%s/", strings.TrimRight(basePath, "/"))
|
||||||
basePath += "/"
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(wholePath, basePath) {
|
if !strings.HasPrefix(wholePath, basePath) {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
remainingPath := strings.TrimLeft(strings.TrimPrefix(wholePath, basePath), "/")
|
remainingPath := strings.TrimLeft(strings.TrimPrefix(wholePath, basePath), "/")
|
||||||
if remainingPath != "" {
|
if remainingPath != "" {
|
||||||
parts := strings.Split(remainingPath, "/")
|
parts := strings.Split(remainingPath, "/")
|
||||||
return parts[0]
|
nextDir := parts[0]
|
||||||
|
if strings.HasPrefix(wholePath, strings.TrimRight(basePath, "/")+"/"+nextDir) {
|
||||||
|
return nextDir
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// 发送 GET 请求
|
// 判断当前目录是否是目标目录的祖先目录
|
||||||
func GetRequest(url string, cacheExpiration int) (*resty.Response, error) {
|
func IsAncestorDir(parentDir string, targetDir string) bool {
|
||||||
mu.Lock()
|
absTargetDir, _ := filepath.Abs(targetDir)
|
||||||
if res, ok := cache[url]; ok && time.Now().Before(created[url].Add(time.Duration(cacheExpiration)*time.Minute)) {
|
absParentDir, _ := filepath.Abs(parentDir)
|
||||||
mu.Unlock()
|
return strings.HasPrefix(absTargetDir, absParentDir)
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
mu.Unlock()
|
|
||||||
|
|
||||||
res, err := req.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if res.StatusCode() != 200 {
|
|
||||||
log.Warn("failed to get request: ", res.StatusCode(), res.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
mu.Lock()
|
|
||||||
cache[url] = res
|
|
||||||
created[url] = time.Now()
|
|
||||||
mu.Unlock()
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// 获取 README、LICENSE 等文件
|
|
||||||
func GetGithubOtherFile(repo string, basePath string, cacheExpiration int) (*[]File, error) {
|
|
||||||
url := fmt.Sprintf("https://api.github.com/repos/%s/contents/", strings.Trim(repo, "/"))
|
|
||||||
res, _ := GetRequest(url, cacheExpiration)
|
|
||||||
body := jsoniter.Get(res.Body())
|
|
||||||
var files []File
|
|
||||||
for i := 0; i < body.Size(); i++ {
|
|
||||||
filename := body.Get(i, "name").ToString()
|
|
||||||
|
|
||||||
re := regexp.MustCompile(`(?i)^(.*\.md|LICENSE)$`)
|
|
||||||
|
|
||||||
if !re.MatchString(filename) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
files = append(files, File{
|
|
||||||
FileName: filename,
|
|
||||||
Size: body.Get(i, "size").ToInt64(),
|
|
||||||
CreateAt: time.Time{},
|
|
||||||
UpdateAt: time.Now(),
|
|
||||||
Url: body.Get(i, "download_url").ToString(),
|
|
||||||
Type: body.Get(i, "type").ToString(),
|
|
||||||
Path: fmt.Sprintf("%s/%s", basePath, filename),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return &files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// 获取 GitHub Release 详细信息
|
|
||||||
func GetRepoReleaseInfo(repo string, version string, basePath string, cacheExpiration int) (*ReleasesData, error) {
|
|
||||||
url := fmt.Sprintf("https://api.github.com/repos/%s/releases/%s", strings.Trim(repo, "/"), version)
|
|
||||||
res, _ := GetRequest(url, cacheExpiration)
|
|
||||||
body := res.Body()
|
|
||||||
|
|
||||||
if jsoniter.Get(res.Body(), "status").ToInt64() != 0 {
|
|
||||||
return &ReleasesData{}, fmt.Errorf("%s", res.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
assets := jsoniter.Get(res.Body(), "assets")
|
|
||||||
var files []File
|
|
||||||
|
|
||||||
for i := 0; i < assets.Size(); i++ {
|
|
||||||
filename := assets.Get(i, "name").ToString()
|
|
||||||
|
|
||||||
files = append(files, File{
|
|
||||||
FileName: filename,
|
|
||||||
Size: assets.Get(i, "size").ToInt64(),
|
|
||||||
Url: assets.Get(i, "browser_download_url").ToString(),
|
|
||||||
Type: assets.Get(i, "content_type").ToString(),
|
|
||||||
Path: fmt.Sprintf("%s/%s", basePath, filename),
|
|
||||||
|
|
||||||
CreateAt: func() time.Time {
|
|
||||||
t, _ := time.Parse(time.RFC3339, assets.Get(i, "created_at").ToString())
|
|
||||||
return t
|
|
||||||
}(),
|
|
||||||
UpdateAt: func() time.Time {
|
|
||||||
t, _ := time.Parse(time.RFC3339, assets.Get(i, "updated_at").ToString())
|
|
||||||
return t
|
|
||||||
}(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ReleasesData{
|
|
||||||
Files: files,
|
|
||||||
Url: jsoniter.Get(body, "html_url").ToString(),
|
|
||||||
|
|
||||||
Size: func() int64 {
|
|
||||||
size := int64(0)
|
|
||||||
for _, file := range files {
|
|
||||||
size += file.Size
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}(),
|
|
||||||
UpdateAt: func() time.Time {
|
|
||||||
t, _ := time.Parse(time.RFC3339, jsoniter.Get(body, "published_at").ToString())
|
|
||||||
return t
|
|
||||||
}(),
|
|
||||||
CreateAt: func() time.Time {
|
|
||||||
t, _ := time.Parse(time.RFC3339, jsoniter.Get(body, "created_at").ToString())
|
|
||||||
return t
|
|
||||||
}(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// 获取所有的版本号
|
|
||||||
func GetAllVersion(repo string, path string) (*[]Release, error) {
|
|
||||||
url := fmt.Sprintf("https://api.github.com/repos/%s/releases", strings.Trim(repo, "/"))
|
|
||||||
res, _ := GetRequest(url, 0)
|
|
||||||
body := jsoniter.Get(res.Body())
|
|
||||||
releases := make([]Release, 0)
|
|
||||||
for i := 0; i < body.Size(); i++ {
|
|
||||||
version := body.Get(i, "tag_name").ToString()
|
|
||||||
releases = append(releases, Release{
|
|
||||||
Path: fmt.Sprintf("%s/%s", path, version),
|
|
||||||
Version: version,
|
|
||||||
RepoName: repo,
|
|
||||||
ID: body.Get(i, "id").ToString(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return &releases, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ClearCache() {
|
|
||||||
mu.Lock()
|
|
||||||
cache = make(map[string]*resty.Response)
|
|
||||||
created = make(map[string]time.Time)
|
|
||||||
mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func SetHeader(token string) {
|
|
||||||
req = base.RestyClient.R()
|
|
||||||
if token != "" {
|
|
||||||
req.SetHeader("Authorization", fmt.Sprintf("Bearer %s", token))
|
|
||||||
}
|
|
||||||
req.SetHeader("Accept", "application/vnd.github+json")
|
|
||||||
req.SetHeader("X-GitHub-Api-Version", "2022-11-28")
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -158,7 +158,8 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
|||||||
putUrl := res.Header().Get("location")
|
putUrl := res.Header().Get("location")
|
||||||
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
||||||
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
||||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
|
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
|
||||||
|
SetBody(driver.NewLimitedUploadStream(ctx, stream))
|
||||||
}, nil)
|
}, nil)
|
||||||
} else {
|
} else {
|
||||||
err = d.chunkUpload(ctx, stream, putUrl)
|
err = d.chunkUpload(ctx, stream, putUrl)
|
||||||
|
|||||||
@@ -11,10 +11,10 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
"github.com/golang-jwt/jwt/v4"
|
"github.com/golang-jwt/jwt/v4"
|
||||||
@@ -126,8 +126,7 @@ func (d *GoogleDrive) refreshToken() error {
|
|||||||
}
|
}
|
||||||
d.AccessToken = resp.AccessToken
|
d.AccessToken = resp.AccessToken
|
||||||
return nil
|
return nil
|
||||||
}
|
} else if os.IsExist(gdsaFileErr) {
|
||||||
if gdsaFileErr != nil && os.IsExist(gdsaFileErr) {
|
|
||||||
return gdsaFileErr
|
return gdsaFileErr
|
||||||
}
|
}
|
||||||
url := "https://www.googleapis.com/oauth2/v4/token"
|
url := "https://www.googleapis.com/oauth2/v4/token"
|
||||||
@@ -229,6 +228,7 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
reader = driver.NewLimitedUploadStream(ctx, reader)
|
||||||
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
|
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
||||||
|
|||||||
@@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
|||||||
}
|
}
|
||||||
|
|
||||||
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
|
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(stream).SetContext(ctx)
|
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
|
||||||
}, nil, postHeaders)
|
}, nil, postHeaders)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -392,10 +392,11 @@ func (d *HalalCloud) put(ctx context.Context, dstDir model.Obj, fileStream model
|
|||||||
if fileStream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
if fileStream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||||
uploader.PartSize = fileStream.GetSize() / (s3manager.MaxUploadParts - 1)
|
uploader.PartSize = fileStream.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||||
}
|
}
|
||||||
|
reader := driver.NewLimitedUploadStream(ctx, fileStream)
|
||||||
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||||
Bucket: aws.String(result.Bucket),
|
Bucket: aws.String(result.Bucket),
|
||||||
Key: aws.String(result.Key),
|
Key: aws.String(result.Key),
|
||||||
Body: io.TeeReader(fileStream, driver.NewProgress(fileStream.GetSize(), up)),
|
Body: io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up)),
|
||||||
})
|
})
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package template
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -17,6 +16,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/foxxorcat/mopan-sdk-go"
|
"github.com/foxxorcat/mopan-sdk-go"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@@ -120,7 +120,7 @@ func (d *ILanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ts, ts_str, err := getTimestamp(d.conf.secret)
|
ts, ts_str, _ := getTimestamp(d.conf.secret)
|
||||||
|
|
||||||
params := []string{
|
params := []string{
|
||||||
"uuid=" + url.QueryEscape(d.UUID),
|
"uuid=" + url.QueryEscape(d.UUID),
|
||||||
@@ -149,11 +149,17 @@ func (d *ILanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
u.RawQuery = strings.Join(params, "&")
|
u.RawQuery = strings.Join(params, "&")
|
||||||
realURL := u.String()
|
realURL := u.String()
|
||||||
// get the url after redirect
|
// get the url after redirect
|
||||||
res, err := base.NoRedirectClient.R().SetHeaders(map[string]string{
|
req := base.NoRedirectClient.R()
|
||||||
//"Origin": d.conf.site,
|
|
||||||
|
req.SetHeaders(map[string]string{
|
||||||
"Referer": d.conf.site + "/",
|
"Referer": d.conf.site + "/",
|
||||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
||||||
}).Get(realURL)
|
})
|
||||||
|
if d.Addition.Ip != "" {
|
||||||
|
req.SetHeader("X-Forwarded-For", d.Addition.Ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := req.Get(realURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -266,30 +272,21 @@ func (d *ILanZou) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
|
|
||||||
const DefaultPartSize = 1024 * 1024 * 8
|
const DefaultPartSize = 1024 * 1024 * 8
|
||||||
|
|
||||||
func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
h := md5.New()
|
etag := s.GetHash().GetHash(utils.MD5)
|
||||||
// need to calculate md5 of the full content
|
var err error
|
||||||
tempFile, err := stream.CacheFullInTempFile()
|
if len(etag) != utils.MD5.Width {
|
||||||
if err != nil {
|
_, etag, err = stream.CacheFullInTempFileAndHash(s, utils.MD5)
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
_ = tempFile.Close()
|
|
||||||
}()
|
|
||||||
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
_, err = tempFile.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
etag := hex.EncodeToString(h.Sum(nil))
|
|
||||||
// get upToken
|
// get upToken
|
||||||
res, err := d.proved("/7n/getUpToken", http.MethodPost, func(req *resty.Request) {
|
res, err := d.proved("/7n/getUpToken", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"fileId": "",
|
"fileId": "",
|
||||||
"fileName": stream.GetName(),
|
"fileName": s.GetName(),
|
||||||
"fileSize": stream.GetSize()/1024 + 1,
|
"fileSize": s.GetSize()/1024 + 1,
|
||||||
"folderId": dstDir.GetID(),
|
"folderId": dstDir.GetID(),
|
||||||
"md5": etag,
|
"md5": etag,
|
||||||
"type": 1,
|
"type": 1,
|
||||||
@@ -301,13 +298,20 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
upToken := utils.Json.Get(res, "upToken").ToString()
|
upToken := utils.Json.Get(res, "upToken").ToString()
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
|
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
|
||||||
|
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: &driver.SimpleReaderWithSize{
|
||||||
|
Reader: s,
|
||||||
|
Size: s.GetSize(),
|
||||||
|
},
|
||||||
|
UpdateProgress: up,
|
||||||
|
})
|
||||||
var token string
|
var token string
|
||||||
if stream.GetSize() <= DefaultPartSize {
|
if s.GetSize() <= DefaultPartSize {
|
||||||
res, err := d.upClient.R().SetMultipartFormData(map[string]string{
|
res, err := d.upClient.R().SetContext(ctx).SetMultipartFormData(map[string]string{
|
||||||
"token": upToken,
|
"token": upToken,
|
||||||
"key": key,
|
"key": key,
|
||||||
"fname": stream.GetName(),
|
"fname": s.GetName(),
|
||||||
}).SetMultipartField("file", stream.GetName(), stream.GetMimetype(), tempFile).
|
}).SetMultipartField("file", s.GetName(), s.GetMimetype(), reader).
|
||||||
Post("https://upload.qiniup.com/")
|
Post("https://upload.qiniup.com/")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -321,10 +325,10 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
}
|
}
|
||||||
uploadId := utils.Json.Get(res.Body(), "uploadId").ToString()
|
uploadId := utils.Json.Get(res.Body(), "uploadId").ToString()
|
||||||
parts := make([]Part, 0)
|
parts := make([]Part, 0)
|
||||||
partNum := (stream.GetSize() + DefaultPartSize - 1) / DefaultPartSize
|
partNum := (s.GetSize() + DefaultPartSize - 1) / DefaultPartSize
|
||||||
for i := 1; i <= int(partNum); i++ {
|
for i := 1; i <= int(partNum); i++ {
|
||||||
u := fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s/%d", d.conf.bucket, keyBase64, uploadId, i)
|
u := fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s/%d", d.conf.bucket, keyBase64, uploadId, i)
|
||||||
res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(io.LimitReader(tempFile, DefaultPartSize)).Put(u)
|
res, err = d.upClient.R().SetContext(ctx).SetHeader("Authorization", "UpToken "+upToken).SetBody(io.LimitReader(reader, DefaultPartSize)).Put(u)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -335,7 +339,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(base.Json{
|
res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(base.Json{
|
||||||
"fnmae": stream.GetName(),
|
"fnmae": s.GetName(),
|
||||||
"parts": parts,
|
"parts": parts,
|
||||||
}).Post(fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s", d.conf.bucket, keyBase64, uploadId))
|
}).Post(fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s", d.conf.bucket, keyBase64, uploadId))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -373,9 +377,9 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
ID: strconv.FormatInt(file.FileId, 10),
|
ID: strconv.FormatInt(file.FileId, 10),
|
||||||
//Path: ,
|
//Path: ,
|
||||||
Name: file.FileName,
|
Name: file.FileName,
|
||||||
Size: stream.GetSize(),
|
Size: s.GetSize(),
|
||||||
Modified: stream.ModTime(),
|
Modified: s.ModTime(),
|
||||||
Ctime: stream.CreateTime(),
|
Ctime: s.CreateTime(),
|
||||||
IsFolder: false,
|
IsFolder: false,
|
||||||
HashInfo: utils.NewHashInfo(utils.MD5, etag),
|
HashInfo: utils.NewHashInfo(utils.MD5, etag),
|
||||||
}, nil
|
}, nil
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ type Addition struct {
|
|||||||
driver.RootID
|
driver.RootID
|
||||||
Username string `json:"username" type:"string" required:"true"`
|
Username string `json:"username" type:"string" required:"true"`
|
||||||
Password string `json:"password" type:"string" required:"true"`
|
Password string `json:"password" type:"string" required:"true"`
|
||||||
|
Ip string `json:"ip" type:"string"`
|
||||||
|
|
||||||
Token string
|
Token string
|
||||||
UUID string
|
UUID string
|
||||||
|
|||||||
@@ -73,8 +73,13 @@ func (d *ILanZou) request(pathname, method string, callback base.ReqCallback, pr
|
|||||||
"Referer": d.conf.site + "/",
|
"Referer": d.conf.site + "/",
|
||||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
||||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
"Accept-Encoding": "gzip, deflate, br, zstd",
|
||||||
|
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,mt;q=0.5",
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if d.Addition.Ip != "" {
|
||||||
|
req.SetHeader("X-Forwarded-For", d.Addition.Ip)
|
||||||
|
}
|
||||||
|
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
callback(req)
|
callback(req)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,13 +4,12 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
stdpath "path"
|
"path"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
shell "github.com/ipfs/go-ipfs-api"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
shell "github.com/ipfs/go-ipfs-api"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type IPFS struct {
|
type IPFS struct {
|
||||||
@@ -43,82 +42,143 @@ func (d *IPFS) Drop(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
path := dir.GetPath()
|
var ipfsPath string
|
||||||
if path[len(path):] != "/" {
|
cid := dir.GetID()
|
||||||
path += "/"
|
if cid != "" {
|
||||||
|
ipfsPath = path.Join("/ipfs", cid)
|
||||||
|
} else {
|
||||||
|
// 可能出现ipns dns解析失败的情况,需要重复获取cid,其他情况应该不会出错
|
||||||
|
ipfsPath = dir.GetPath()
|
||||||
|
switch d.Mode {
|
||||||
|
case "ipfs":
|
||||||
|
ipfsPath = path.Join("/ipfs", ipfsPath)
|
||||||
|
case "ipns":
|
||||||
|
ipfsPath = path.Join("/ipns", ipfsPath)
|
||||||
|
case "mfs":
|
||||||
|
fileStat, err := d.sh.FilesStat(ctx, ipfsPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ipfsPath = path.Join("/ipfs", fileStat.Hash)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("mode error")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
dirs, err := d.sh.List(ipfsPath)
|
||||||
path_cid, err := d.sh.FilesStat(ctx, path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dirs, err := d.sh.List(path_cid.Hash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
objlist := []model.Obj{}
|
objlist := []model.Obj{}
|
||||||
for _, file := range dirs {
|
for _, file := range dirs {
|
||||||
gateurl := *d.gateURL
|
objlist = append(objlist, &model.Object{ID: file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1})
|
||||||
gateurl.Path = "ipfs/" + file.Hash
|
|
||||||
gateurl.RawQuery = "filename=" + url.PathEscape(file.Name)
|
|
||||||
objlist = append(objlist, &model.ObjectURL{
|
|
||||||
Object: model.Object{ID: file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1},
|
|
||||||
Url: model.Url{Url: gateurl.String()},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return objlist, nil
|
return objlist, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *IPFS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
link := d.Gateway + "/ipfs/" + file.GetID() + "/?filename=" + url.PathEscape(file.GetName())
|
gateurl := d.gateURL.JoinPath("/ipfs/", file.GetID())
|
||||||
return &model.Link{URL: link}, nil
|
gateurl.RawQuery = "filename=" + url.QueryEscape(file.GetName())
|
||||||
|
return &model.Link{URL: gateurl.String()}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *IPFS) Get(ctx context.Context, rawPath string) (model.Obj, error) {
|
||||||
path := parentDir.GetPath()
|
rawPath = path.Join(d.GetRootPath(), rawPath)
|
||||||
if path[len(path):] != "/" {
|
var ipfsPath string
|
||||||
path += "/"
|
switch d.Mode {
|
||||||
|
case "ipfs":
|
||||||
|
ipfsPath = path.Join("/ipfs", rawPath)
|
||||||
|
case "ipns":
|
||||||
|
ipfsPath = path.Join("/ipns", rawPath)
|
||||||
|
case "mfs":
|
||||||
|
fileStat, err := d.sh.FilesStat(ctx, rawPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ipfsPath = path.Join("/ipfs", fileStat.Hash)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("mode error")
|
||||||
}
|
}
|
||||||
return d.sh.FilesMkdir(ctx, path+dirName)
|
file, err := d.sh.FilesStat(ctx, ipfsPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.Object{ID: file.Hash, Name: path.Base(rawPath), Path: rawPath, Size: int64(file.Size), IsFolder: file.Type == "directory"}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
return d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath())
|
if d.Mode != "mfs" {
|
||||||
|
return nil, fmt.Errorf("only write in mfs mode")
|
||||||
|
}
|
||||||
|
dirPath := parentDir.GetPath()
|
||||||
|
err := d.sh.FilesMkdir(ctx, path.Join(dirPath, dirName), shell.FilesMkdir.Parents(true))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
file, err := d.sh.FilesStat(ctx, path.Join(dirPath, dirName))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.Object{ID: file.Hash, Name: dirName, Path: path.Join(dirPath, dirName), Size: int64(file.Size), IsFolder: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
newFileName := filepath.Dir(srcObj.GetPath()) + "/" + newName
|
if d.Mode != "mfs" {
|
||||||
return d.sh.FilesMv(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
|
return nil, fmt.Errorf("only write in mfs mode")
|
||||||
|
}
|
||||||
|
dstPath := path.Join(dstDir.GetPath(), path.Base(srcObj.GetPath()))
|
||||||
|
d.sh.FilesRm(ctx, dstPath, true)
|
||||||
|
return &model.Object{ID: srcObj.GetID(), Name: srcObj.GetName(), Path: dstPath, Size: int64(srcObj.GetSize()), IsFolder: srcObj.IsDir()},
|
||||||
|
d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
// TODO copy obj, optional
|
if d.Mode != "mfs" {
|
||||||
fmt.Println(srcObj.GetPath())
|
return nil, fmt.Errorf("only write in mfs mode")
|
||||||
fmt.Println(dstDir.GetPath())
|
}
|
||||||
newFileName := dstDir.GetPath() + "/" + filepath.Base(srcObj.GetPath())
|
dstPath := path.Join(path.Dir(srcObj.GetPath()), newName)
|
||||||
fmt.Println(newFileName)
|
d.sh.FilesRm(ctx, dstPath, true)
|
||||||
return d.sh.FilesCp(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
|
return &model.Object{ID: srcObj.GetID(), Name: newName, Path: dstPath, Size: int64(srcObj.GetSize()),
|
||||||
|
IsFolder: srcObj.IsDir()}, d.sh.FilesMv(ctx, srcObj.GetPath(), dstPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
if d.Mode != "mfs" {
|
||||||
|
return nil, fmt.Errorf("only write in mfs mode")
|
||||||
|
}
|
||||||
|
dstPath := path.Join(dstDir.GetPath(), path.Base(srcObj.GetPath()))
|
||||||
|
d.sh.FilesRm(ctx, dstPath, true)
|
||||||
|
return &model.Object{ID: srcObj.GetID(), Name: srcObj.GetName(), Path: dstPath, Size: int64(srcObj.GetSize()), IsFolder: srcObj.IsDir()},
|
||||||
|
d.sh.FilesCp(ctx, path.Join("/ipfs/", srcObj.GetID()), dstPath, shell.FilesCp.Parents(true))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
// TODO remove obj, optional
|
if d.Mode != "mfs" {
|
||||||
|
return fmt.Errorf("only write in mfs mode")
|
||||||
|
}
|
||||||
return d.sh.FilesRm(ctx, obj.GetPath(), true)
|
return d.sh.FilesRm(ctx, obj.GetPath(), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
// TODO upload file, optional
|
if d.Mode != "mfs" {
|
||||||
_, err := d.sh.Add(stream, ToFiles(stdpath.Join(dstDir.GetPath(), stream.GetName())))
|
return nil, fmt.Errorf("only write in mfs mode")
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func ToFiles(dstDir string) shell.AddOpts {
|
|
||||||
return func(rb *shell.RequestBuilder) error {
|
|
||||||
rb.Option("to-files", dstDir)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
outHash, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
}))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dstPath := path.Join(dstDir.GetPath(), s.GetName())
|
||||||
|
if s.GetExist() != nil {
|
||||||
|
d.sh.FilesRm(ctx, dstPath, true)
|
||||||
|
}
|
||||||
|
err = d.sh.FilesCp(ctx, path.Join("/ipfs/", outHash), dstPath, shell.FilesCp.Parents(true))
|
||||||
|
gateurl := d.gateURL.JoinPath("/ipfs/", outHash)
|
||||||
|
gateurl.RawQuery = "filename=" + url.QueryEscape(s.GetName())
|
||||||
|
return &model.Object{ID: outHash, Name: s.GetName(), Path: dstPath, Size: int64(s.GetSize()), IsFolder: s.IsDir()}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
|||||||
@@ -8,14 +8,16 @@ import (
|
|||||||
type Addition struct {
|
type Addition struct {
|
||||||
// Usually one of two
|
// Usually one of two
|
||||||
driver.RootPath
|
driver.RootPath
|
||||||
Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001"`
|
Mode string `json:"mode" options:"ipfs,ipns,mfs" type:"select" required:"true"`
|
||||||
Gateway string `json:"gateway" default:"https://ipfs.io"`
|
Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001" required:"true"`
|
||||||
|
Gateway string `json:"gateway" default:"http://127.0.0.1:8080" required:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "IPFS API",
|
Name: "IPFS API",
|
||||||
DefaultRoot: "/",
|
DefaultRoot: "/",
|
||||||
LocalSort: true,
|
LocalSort: true,
|
||||||
|
OnlyProxy: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ package kodbox
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
"github.com/go-resty/resty/v2"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -12,6 +10,8 @@ import (
|
|||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type KodBox struct {
|
type KodBox struct {
|
||||||
@@ -225,14 +225,19 @@ func (d *KodBox) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
var resp *CommonResp
|
var resp *CommonResp
|
||||||
_, err := d.request(http.MethodPost, "/?explorer/upload/fileUpload", func(req *resty.Request) {
|
_, err := d.request(http.MethodPost, "/?explorer/upload/fileUpload", func(req *resty.Request) {
|
||||||
req.SetFileReader("file", stream.GetName(), stream).
|
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
})
|
||||||
|
req.SetFileReader("file", s.GetName(), r).
|
||||||
SetResult(&resp).
|
SetResult(&resp).
|
||||||
SetFormData(map[string]string{
|
SetFormData(map[string]string{
|
||||||
"path": dstDir.GetPath(),
|
"path": dstDir.GetPath(),
|
||||||
})
|
}).
|
||||||
|
SetContext(ctx)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -244,8 +249,8 @@ func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
return &model.ObjThumb{
|
return &model.ObjThumb{
|
||||||
Object: model.Object{
|
Object: model.Object{
|
||||||
Path: resp.Info.(string),
|
Path: resp.Info.(string),
|
||||||
Name: stream.GetName(),
|
Name: s.GetName(),
|
||||||
Size: stream.GetSize(),
|
Size: s.GetSize(),
|
||||||
IsFolder: false,
|
IsFolder: false,
|
||||||
Modified: time.Now(),
|
Modified: time.Now(),
|
||||||
Ctime: time.Now(),
|
Ctime: time.Now(),
|
||||||
|
|||||||
@@ -208,18 +208,22 @@ func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return errs.NotSupport
|
return errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
if d.IsCookie() || d.IsAccount() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
var resp RespText[[]FileOrFolder]
|
var resp RespText[[]FileOrFolder]
|
||||||
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
|
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
|
||||||
|
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
})
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
"task": "1",
|
"task": "1",
|
||||||
"vie": "2",
|
"vie": "2",
|
||||||
"ve": "2",
|
"ve": "2",
|
||||||
"id": "WU_FILE_0",
|
"id": "WU_FILE_0",
|
||||||
"name": stream.GetName(),
|
"name": s.GetName(),
|
||||||
"folder_id_bb_n": dstDir.GetID(),
|
"folder_id_bb_n": dstDir.GetID(),
|
||||||
}).SetFileReader("upload_file", stream.GetName(), stream).SetContext(ctx)
|
}).SetFileReader("upload_file", s.GetName(), reader).SetContext(ctx)
|
||||||
}, &resp, true)
|
}, &resp, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -78,6 +78,42 @@ func RemoveNotes(html string) string {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 清理JS注释
|
||||||
|
func RemoveJSComment(data string) string {
|
||||||
|
var result strings.Builder
|
||||||
|
inComment := false
|
||||||
|
inSingleLineComment := false
|
||||||
|
|
||||||
|
for i := 0; i < len(data); i++ {
|
||||||
|
v := data[i]
|
||||||
|
|
||||||
|
if inSingleLineComment && (v == '\n' || v == '\r') {
|
||||||
|
inSingleLineComment = false
|
||||||
|
result.WriteByte(v)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if inComment && v == '*' && i+1 < len(data) && data[i+1] == '/' {
|
||||||
|
inComment = false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v == '/' && i+1 < len(data) {
|
||||||
|
nextChar := data[i+1]
|
||||||
|
if nextChar == '*' {
|
||||||
|
inComment = true
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
} else if nextChar == '/' {
|
||||||
|
inSingleLineComment = true
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result.WriteByte(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.String()
|
||||||
|
}
|
||||||
|
|
||||||
var findAcwScV2Reg = regexp.MustCompile(`arg1='([0-9A-Z]+)'`)
|
var findAcwScV2Reg = regexp.MustCompile(`arg1='([0-9A-Z]+)'`)
|
||||||
|
|
||||||
// 在页面被过多访问或其他情况下,有时候会先返回一个加密的页面,其执行计算出一个acw_sc__v2后放入页面后再重新访问页面才能获得正常页面
|
// 在页面被过多访问或其他情况下,有时候会先返回一个加密的页面,其执行计算出一个acw_sc__v2后放入页面后再重新访问页面才能获得正常页面
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user