mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-11-25 19:37:41 +08:00
Compare commits
87 Commits
next
...
9d09ee133d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9d09ee133d | ||
|
|
d88f0e8f3c | ||
|
|
0857478516 | ||
|
|
66d9809057 | ||
|
|
db8a7e8caf | ||
|
|
8f18e34da0 | ||
|
|
525f26dc23 | ||
|
|
a0fcfa3ed2 | ||
|
|
15f276537c | ||
|
|
623a12050e | ||
|
|
ae2d2d1021 | ||
|
|
a109152a13 | ||
|
|
febbcd6027 | ||
|
|
549e60136b | ||
|
|
14d2b8290a | ||
|
|
cdc069d8e7 | ||
|
|
fb5094f688 | ||
|
|
670e0bdc45 | ||
|
|
89235012af | ||
|
|
2bfbad2874 | ||
|
|
4ba7696032 | ||
|
|
66645516e5 | ||
|
|
eb2ff2d2ca | ||
|
|
4153245f2c | ||
|
|
6fe9af7819 | ||
|
|
2edc446ced | ||
|
|
c3c7983f7b | ||
|
|
22deb4df30 | ||
|
|
da0c734aa3 | ||
|
|
189cebe4c9 | ||
|
|
9d3da44a99 | ||
|
|
8f17d35ed5 | ||
|
|
89759b6e3b | ||
|
|
a2fc38be8d | ||
|
|
e0414e7110 | ||
|
|
b486af0031 | ||
|
|
ea09ce4b8f | ||
|
|
d465da43e3 | ||
|
|
84ed487950 | ||
|
|
3c07144211 | ||
|
|
3936e736e6 | ||
|
|
68433d4f5b | ||
|
|
cc16cb35bf | ||
|
|
d3bc6321f4 | ||
|
|
cbbb5ad231 | ||
|
|
c1d03c5bcc | ||
|
|
61a8ed515f | ||
|
|
bbb7c06504 | ||
|
|
8bbdb272d4 | ||
|
|
c15ae94307 | ||
|
|
f1a5048558 | ||
|
|
1fe26bff9a | ||
|
|
433dcd156b | ||
|
|
e97f0a289e | ||
|
|
89f35170b3 | ||
|
|
8188fb2d7d | ||
|
|
87cf95f50b | ||
|
|
8ab26cb823 | ||
|
|
5880c8e1af | ||
|
|
14bf4ecb4c | ||
|
|
04a5e58781 | ||
|
|
bbd4389345 | ||
|
|
f350ccdf95 | ||
|
|
4f2de9395e | ||
|
|
b0dbbebfb0 | ||
|
|
0c27b4bd47 | ||
|
|
736cd9e5f2 | ||
|
|
c7a603c926 | ||
|
|
a28d6d5693 | ||
|
|
e59d2233e2 | ||
|
|
01914a06ef | ||
|
|
6499374d1c | ||
|
|
b054919d5c | ||
|
|
048ee9c2e5 | ||
|
|
23394548ca | ||
|
|
b04677b806 | ||
|
|
e4c902dd93 | ||
|
|
5d8bd258c0 | ||
|
|
08c5283c8c | ||
|
|
10a14f10cd | ||
|
|
f86ebc52a0 | ||
|
|
016ed90efa | ||
|
|
d76407b201 | ||
|
|
5de6b660f2 | ||
|
|
71ada3b656 | ||
|
|
dc42f0e226 | ||
|
|
74bf9f6467 |
56
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
56
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
<!--
|
||||||
|
Provide a general summary of your changes in the Title above.
|
||||||
|
The PR title must start with `feat(): `, `docs(): `, `fix(): `, `style(): `, or `refactor(): `, `chore(): `. For example: `feat(component): add new feature`.
|
||||||
|
If it spans multiple components, use the main component as the prefix and enumerate in the title, describe in the body.
|
||||||
|
-->
|
||||||
|
<!--
|
||||||
|
在上方标题中提供您更改的总体摘要。
|
||||||
|
PR 标题需以 `feat(): `, `docs(): `, `fix(): `, `style(): `, `refactor(): `, `chore(): ` 其中之一开头,例如:`feat(component): 新增功能`。
|
||||||
|
如果跨多个组件,请使用主要组件作为前缀,并在标题中枚举、描述中说明。
|
||||||
|
-->
|
||||||
|
|
||||||
|
## Description / 描述
|
||||||
|
|
||||||
|
<!-- Describe your changes in detail -->
|
||||||
|
<!-- 详细描述您的更改 -->
|
||||||
|
|
||||||
|
## Motivation and Context / 背景
|
||||||
|
|
||||||
|
<!-- Why is this change required? What problem does it solve? -->
|
||||||
|
<!-- 为什么需要此更改?它解决了什么问题? -->
|
||||||
|
|
||||||
|
<!-- If it fixes an open issue, please link to the issue here. -->
|
||||||
|
<!-- 如果修复了一个打开的issue,请在此处链接到该issue -->
|
||||||
|
|
||||||
|
Closes #XXXX
|
||||||
|
|
||||||
|
<!-- or -->
|
||||||
|
<!-- 或者 -->
|
||||||
|
|
||||||
|
Relates to #XXXX
|
||||||
|
|
||||||
|
## How Has This Been Tested? / 测试
|
||||||
|
|
||||||
|
<!-- Please describe in detail how you tested your changes. -->
|
||||||
|
<!-- 请详细描述您如何测试更改 -->
|
||||||
|
|
||||||
|
## Checklist / 检查清单
|
||||||
|
|
||||||
|
<!-- Go over all the following points, and put an `x` in all the boxes that apply. -->
|
||||||
|
<!-- 检查以下所有要点,并在所有适用的框中打`x` -->
|
||||||
|
|
||||||
|
<!-- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
|
||||||
|
<!-- 如果您对其中任何一项不确定,请不要犹豫提问。我们会帮助您! -->
|
||||||
|
|
||||||
|
- [ ] I have read the [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) document.
|
||||||
|
我已阅读 [CONTRIBUTING](https://github.com/OpenListTeam/OpenList/blob/main/CONTRIBUTING.md) 文档。
|
||||||
|
- [ ] I have formatted my code with `go fmt` or [prettier](https://prettier.io/).
|
||||||
|
我已使用 `go fmt` 或 [prettier](https://prettier.io/) 格式化提交的代码。
|
||||||
|
- [ ] I have added appropriate labels to this PR (or mentioned needed labels in the description if lacking permissions).
|
||||||
|
我已为此 PR 添加了适当的标签(如无权限或需要的标签不存在,请在描述中说明,管理员将后续处理)。
|
||||||
|
- [ ] I have requested review from relevant code authors using the "Request review" feature when applicable.
|
||||||
|
我已在适当情况下使用"Request review"功能请求相关代码作者进行审查。
|
||||||
|
- [ ] I have updated the repository accordingly (If it’s needed).
|
||||||
|
我已相应更新了相关仓库(若适用)。
|
||||||
|
- [ ] [OpenList-Frontend](https://github.com/OpenListTeam/OpenList-Frontend) #XXXX
|
||||||
|
- [ ] [OpenList-Docs](https://github.com/OpenListTeam/OpenList-Docs) #XXXX
|
||||||
2
.github/workflows/beta_release.yml
vendored
2
.github/workflows/beta_release.yml
vendored
@@ -87,7 +87,7 @@ jobs:
|
|||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: "1.24.5"
|
go-version: "1.25.0"
|
||||||
|
|
||||||
- name: Setup web
|
- name: Setup web
|
||||||
run: bash build.sh dev web
|
run: bash build.sh dev web
|
||||||
|
|||||||
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
|||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: "1.24.5"
|
go-version: "1.25.0"
|
||||||
|
|
||||||
- name: Setup web
|
- name: Setup web
|
||||||
run: bash build.sh dev web
|
run: bash build.sh dev web
|
||||||
|
|||||||
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
|||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version: '1.25.0'
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
@@ -73,4 +73,5 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
files: build/compress/*
|
files: build/compress/*
|
||||||
prerelease: false
|
prerelease: false
|
||||||
|
tag_name: ${{ github.event.release.tag_name }}
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/release_docker.yml
vendored
4
.github/workflows/release_docker.yml
vendored
@@ -47,7 +47,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 'stable'
|
go-version: '1.25.0'
|
||||||
|
|
||||||
- name: Cache Musl
|
- name: Cache Musl
|
||||||
id: cache-musl
|
id: cache-musl
|
||||||
@@ -87,7 +87,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 'stable'
|
go-version: '1.25.0'
|
||||||
|
|
||||||
- name: Cache Musl
|
- name: Cache Musl
|
||||||
id: cache-musl
|
id: cache-musl
|
||||||
|
|||||||
38
.github/workflows/sync_repo.yml
vendored
Normal file
38
.github/workflows/sync_repo.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
name: Sync to Gitee
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
sync:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Sync GitHub to Gitee
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup SSH
|
||||||
|
run: |
|
||||||
|
mkdir -p ~/.ssh
|
||||||
|
echo "${{ secrets.GITEE_SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa
|
||||||
|
chmod 600 ~/.ssh/id_rsa
|
||||||
|
ssh-keyscan gitee.com >> ~/.ssh/known_hosts
|
||||||
|
|
||||||
|
- name: Create single commit and push
|
||||||
|
run: |
|
||||||
|
git config user.name "GitHub Actions"
|
||||||
|
git config user.email "actions@github.com"
|
||||||
|
|
||||||
|
# Create a new branch
|
||||||
|
git checkout --orphan new-main
|
||||||
|
git add .
|
||||||
|
git commit -m "Sync from GitHub: $(date)"
|
||||||
|
|
||||||
|
# Add Gitee remote and force push
|
||||||
|
git remote add gitee ${{ vars.GITEE_REPO_URL }}
|
||||||
|
git push --force gitee new-main:main
|
||||||
2
.github/workflows/test_docker.yml
vendored
2
.github/workflows/test_docker.yml
vendored
@@ -36,7 +36,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 'stable'
|
go-version: '1.25.0'
|
||||||
|
|
||||||
- name: Cache Musl
|
- name: Cache Musl
|
||||||
id: cache-musl
|
id: cache-musl
|
||||||
|
|||||||
110
CONTRIBUTING.md
110
CONTRIBUTING.md
@@ -2,106 +2,76 @@
|
|||||||
|
|
||||||
## Setup your machine
|
## Setup your machine
|
||||||
|
|
||||||
`OpenList` is written in [Go](https://golang.org/) and [React](https://reactjs.org/).
|
`OpenList` is written in [Go](https://golang.org/) and [SolidJS](https://www.solidjs.com/).
|
||||||
|
|
||||||
Prerequisites:
|
Prerequisites:
|
||||||
|
|
||||||
- [git](https://git-scm.com)
|
- [git](https://git-scm.com)
|
||||||
- [Go 1.20+](https://golang.org/doc/install)
|
- [Go 1.24+](https://golang.org/doc/install)
|
||||||
- [gcc](https://gcc.gnu.org/)
|
- [gcc](https://gcc.gnu.org/)
|
||||||
- [nodejs](https://nodejs.org/)
|
- [nodejs](https://nodejs.org/)
|
||||||
|
|
||||||
Clone `OpenList` and `OpenList-Frontend` anywhere:
|
## Cloning a fork
|
||||||
|
|
||||||
|
Fork and clone `OpenList` and `OpenList-Frontend` anywhere:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ git clone https://github.com/OpenListTeam/OpenList.git
|
$ git clone https://github.com/<your-username>/OpenList.git
|
||||||
$ git clone --recurse-submodules https://github.com/OpenListTeam/OpenList-Frontend.git
|
$ git clone --recurse-submodules https://github.com/<your-username>/OpenList-Frontend.git
|
||||||
|
```
|
||||||
|
|
||||||
|
## Creating a branch
|
||||||
|
|
||||||
|
Create a new branch from the `main` branch, with an appropriate name.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ git checkout -b <branch-name>
|
||||||
```
|
```
|
||||||
You should switch to the `main` branch for development.
|
|
||||||
|
|
||||||
## Preview your change
|
## Preview your change
|
||||||
|
|
||||||
### backend
|
### backend
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ go run main.go
|
$ go run main.go
|
||||||
```
|
```
|
||||||
|
|
||||||
### frontend
|
### frontend
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ pnpm dev
|
$ pnpm dev
|
||||||
```
|
```
|
||||||
|
|
||||||
## Add a new driver
|
## Add a new driver
|
||||||
|
|
||||||
Copy `drivers/template` folder and rename it, and follow the comments in it.
|
Copy `drivers/template` folder and rename it, and follow the comments in it.
|
||||||
|
|
||||||
## Create a commit
|
## Create a commit
|
||||||
|
|
||||||
Commit messages should be well formatted, and to make that "standardized".
|
Commit messages should be well formatted, and to make that "standardized".
|
||||||
|
|
||||||
### Commit Message Format
|
Submit your pull request. For PR titles, follow [Conventional Commits](https://www.conventionalcommits.org).
|
||||||
Each commit message consists of a **header**, a **body** and a **footer**. The header has a special
|
|
||||||
format that includes a **type**, a **scope** and a **subject**:
|
|
||||||
|
|
||||||
```
|
https://github.com/OpenListTeam/OpenList/issues/376
|
||||||
<type>(<scope>): <subject>
|
|
||||||
<BLANK LINE>
|
|
||||||
<body>
|
|
||||||
<BLANK LINE>
|
|
||||||
<footer>
|
|
||||||
```
|
|
||||||
|
|
||||||
The **header** is mandatory and the **scope** of the header is optional.
|
It's suggested to sign your commits. See: [How to sign commits](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits)
|
||||||
|
|
||||||
Any line of the commit message cannot be longer than 100 characters! This allows the message to be easier
|
|
||||||
to read on GitHub as well as in various git tools.
|
|
||||||
|
|
||||||
### Revert
|
|
||||||
If the commit reverts a previous commit, it should begin with `revert: `, followed by the header
|
|
||||||
of the reverted commit.
|
|
||||||
In the body it should say: `This reverts commit <hash>.`, where the hash is the SHA of the commit
|
|
||||||
being reverted.
|
|
||||||
|
|
||||||
### Type
|
|
||||||
Must be one of the following:
|
|
||||||
|
|
||||||
* **feat**: A new feature
|
|
||||||
* **fix**: A bug fix
|
|
||||||
* **docs**: Documentation only changes
|
|
||||||
* **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing
|
|
||||||
semi-colons, etc)
|
|
||||||
* **refactor**: A code change that neither fixes a bug nor adds a feature
|
|
||||||
* **perf**: A code change that improves performance
|
|
||||||
* **test**: Adding missing or correcting existing tests
|
|
||||||
* **build**: Affects project builds or dependency modifications
|
|
||||||
* **revert**: Restore the previous commit
|
|
||||||
* **ci**: Continuous integration of related file modifications
|
|
||||||
* **chore**: Changes to the build process or auxiliary tools and libraries such as documentation
|
|
||||||
generation
|
|
||||||
* **release**: Release a new version
|
|
||||||
|
|
||||||
### Scope
|
|
||||||
The scope could be anything specifying place of the commit change. For example `$location`,
|
|
||||||
`$browser`, `$compile`, `$rootScope`, `ngHref`, `ngClick`, `ngView`, etc...
|
|
||||||
|
|
||||||
You can use `*` when the change affects more than a single scope.
|
|
||||||
|
|
||||||
### Subject
|
|
||||||
The subject contains succinct description of the change:
|
|
||||||
|
|
||||||
* use the imperative, present tense: "change" not "changed" nor "changes"
|
|
||||||
* don't capitalize first letter
|
|
||||||
* no dot (.) at the end
|
|
||||||
|
|
||||||
### Body
|
|
||||||
Just as in the **subject**, use the imperative, present tense: "change" not "changed" nor "changes".
|
|
||||||
The body should include the motivation for the change and contrast this with previous behavior.
|
|
||||||
|
|
||||||
### Footer
|
|
||||||
The footer should contain any information about **Breaking Changes** and is also the place to
|
|
||||||
[reference GitHub issues that this commit closes](https://help.github.com/articles/closing-issues-via-commit-messages/).
|
|
||||||
|
|
||||||
**Breaking Changes** should start with the word `BREAKING CHANGE:` with a space or two newlines.
|
|
||||||
The rest of the commit message is then used for this.
|
|
||||||
|
|
||||||
## Submit a pull request
|
## Submit a pull request
|
||||||
|
|
||||||
Push your branch to your `openlist` fork and open a pull request against the
|
Please make sure your code has been formatted with `go fmt` or [prettier](https://prettier.io/) before submitting.
|
||||||
`main` branch.
|
|
||||||
|
Push your branch to your `openlist` fork and open a pull request against the `main` branch.
|
||||||
|
|
||||||
|
## Merge your pull request
|
||||||
|
|
||||||
|
Your pull request will be merged after review. Please wait for the maintainer to merge your pull request after review.
|
||||||
|
|
||||||
|
At least 1 approving review is required by reviewers with write access. You can also request a review from maintainers.
|
||||||
|
|
||||||
|
## Delete your branch
|
||||||
|
|
||||||
|
(Optional) After your pull request is merged, you can delete your branch.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Thank you for your contribution! Let's make OpenList better together!
|
||||||
|
|||||||
11
Dockerfile
11
Dockerfile
@@ -20,11 +20,12 @@ ARG GID=1001
|
|||||||
|
|
||||||
WORKDIR /opt/openlist/
|
WORKDIR /opt/openlist/
|
||||||
|
|
||||||
COPY --chmod=755 --from=builder /app/bin/openlist ./
|
RUN addgroup -g ${GID} ${USER} && \
|
||||||
COPY --chmod=755 entrypoint.sh /entrypoint.sh
|
adduser -D -u ${UID} -G ${USER} ${USER} && \
|
||||||
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
|
mkdir -p /opt/openlist/data
|
||||||
&& chown -R ${UID}:${GID} /opt \
|
|
||||||
&& chown -R ${UID}:${GID} /entrypoint.sh
|
COPY --from=builder --chmod=755 --chown=${UID}:${GID} /app/bin/openlist ./
|
||||||
|
COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
|
||||||
|
|
||||||
USER ${USER}
|
USER ${USER}
|
||||||
RUN /entrypoint.sh version
|
RUN /entrypoint.sh version
|
||||||
|
|||||||
@@ -10,12 +10,12 @@ ARG GID=1001
|
|||||||
|
|
||||||
WORKDIR /opt/openlist/
|
WORKDIR /opt/openlist/
|
||||||
|
|
||||||
COPY --chmod=755 /build/${TARGETPLATFORM}/openlist ./
|
RUN addgroup -g ${GID} ${USER} && \
|
||||||
COPY --chmod=755 entrypoint.sh /entrypoint.sh
|
adduser -D -u ${UID} -G ${USER} ${USER} && \
|
||||||
|
mkdir -p /opt/openlist/data
|
||||||
|
|
||||||
RUN adduser -u ${UID} -g ${GID} -h /opt/openlist/data -D -s /bin/sh ${USER} \
|
COPY --chmod=755 --chown=${UID}:${GID} /build/${TARGETPLATFORM}/openlist ./
|
||||||
&& chown -R ${UID}:${GID} /opt \
|
COPY --chmod=755 --chown=${UID}:${GID} entrypoint.sh /entrypoint.sh
|
||||||
&& chown -R ${UID}:${GID} /entrypoint.sh
|
|
||||||
|
|
||||||
USER ${USER}
|
USER ${USER}
|
||||||
RUN /entrypoint.sh version
|
RUN /entrypoint.sh version
|
||||||
|
|||||||
12
README.md
12
README.md
@@ -64,7 +64,9 @@ Thank you for your support and understanding of the OpenList project.
|
|||||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||||
- [x] Teambition([China](https://www.teambition.com), [International](https://us.teambition.com))
|
- [x] Teambition([China](https://www.teambition.com), [International](https://us.teambition.com))
|
||||||
|
- [x] [MediaFire](https://www.mediafire.com)
|
||||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||||
|
- [x] [ProtonDrive](https://proton.me/drive)
|
||||||
- [x] [139yun](https://yun.139.com) (Personal, Family, Group)
|
- [x] [139yun](https://yun.139.com) (Personal, Family, Group)
|
||||||
- [x] [YandexDisk](https://disk.yandex.com)
|
- [x] [YandexDisk](https://disk.yandex.com)
|
||||||
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
||||||
@@ -74,7 +76,6 @@ Thank you for your support and understanding of the OpenList project.
|
|||||||
- [x] [Thunder](https://pan.xunlei.com)
|
- [x] [Thunder](https://pan.xunlei.com)
|
||||||
- [x] [Lanzou](https://www.lanzou.com)
|
- [x] [Lanzou](https://www.lanzou.com)
|
||||||
- [x] [ILanzou](https://www.ilanzou.com)
|
- [x] [ILanzou](https://www.ilanzou.com)
|
||||||
- [x] [Aliyundrive share](https://www.alipan.com)
|
|
||||||
- [x] [Google photo](https://photos.google.com)
|
- [x] [Google photo](https://photos.google.com)
|
||||||
- [x] [Mega.nz](https://mega.nz)
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
- [x] [Baidu photo](https://photo.baidu.com)
|
- [x] [Baidu photo](https://photo.baidu.com)
|
||||||
@@ -85,6 +86,15 @@ Thank you for your support and understanding of the OpenList project.
|
|||||||
- [x] [FeijiPan](https://www.feijipan.com)
|
- [x] [FeijiPan](https://www.feijipan.com)
|
||||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||||
|
- [x] [Chaoxing](https://www.chaoxing.com)
|
||||||
|
- [x] [CNB](https://cnb.cool/)
|
||||||
|
- [x] [Degoo](https://degoo.com)
|
||||||
|
- [x] [Doubao](https://www.doubao.com)
|
||||||
|
- [x] [Febbox](https://www.febbox.com)
|
||||||
|
- [x] [GitHub](https://github.com)
|
||||||
|
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||||
|
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||||
|
- [x] [Weiyun](https://www.weiyun.com)
|
||||||
- [x] Easy to deploy and out-of-the-box
|
- [x] Easy to deploy and out-of-the-box
|
||||||
- [x] File preview (PDF, markdown, code, plain text, ...)
|
- [x] File preview (PDF, markdown, code, plain text, ...)
|
||||||
- [x] Image preview in gallery mode
|
- [x] Image preview in gallery mode
|
||||||
|
|||||||
12
README_cn.md
12
README_cn.md
@@ -64,7 +64,9 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
|
|||||||
- [x] [又拍云对象存储](https://www.upyun.com/products/file-storage)
|
- [x] [又拍云对象存储](https://www.upyun.com/products/file-storage)
|
||||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||||
- [x] Teambition([中国](https://www.teambition.com), [国际](https://us.teambition.com))
|
- [x] Teambition([中国](https://www.teambition.com), [国际](https://us.teambition.com))
|
||||||
|
- [x] [MediaFire](https://www.mediafire.com)
|
||||||
- [x] [分秒帧](https://www.mediatrack.cn)
|
- [x] [分秒帧](https://www.mediatrack.cn)
|
||||||
|
- [x] [ProtonDrive](https://proton.me/drive)
|
||||||
- [x] [和彩云](https://yun.139.com)(个人、家庭、群组)
|
- [x] [和彩云](https://yun.139.com)(个人、家庭、群组)
|
||||||
- [x] [YandexDisk](https://disk.yandex.com)
|
- [x] [YandexDisk](https://disk.yandex.com)
|
||||||
- [x] [百度网盘](http://pan.baidu.com)
|
- [x] [百度网盘](http://pan.baidu.com)
|
||||||
@@ -74,7 +76,6 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
|
|||||||
- [x] [迅雷网盘](https://pan.xunlei.com)
|
- [x] [迅雷网盘](https://pan.xunlei.com)
|
||||||
- [x] [蓝奏云](https://www.lanzou.com)
|
- [x] [蓝奏云](https://www.lanzou.com)
|
||||||
- [x] [蓝奏云优享版](https://www.ilanzou.com)
|
- [x] [蓝奏云优享版](https://www.ilanzou.com)
|
||||||
- [x] [阿里云盘分享](https://www.alipan.com)
|
|
||||||
- [x] [Google 相册](https://photos.google.com)
|
- [x] [Google 相册](https://photos.google.com)
|
||||||
- [x] [Mega.nz](https://mega.nz)
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
- [x] [百度相册](https://photo.baidu.com)
|
- [x] [百度相册](https://photo.baidu.com)
|
||||||
@@ -85,6 +86,15 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
|
|||||||
- [x] [飞机盘](https://www.feijipan.com)
|
- [x] [飞机盘](https://www.feijipan.com)
|
||||||
- [x] [多吉云](https://www.dogecloud.com/product/oss)
|
- [x] [多吉云](https://www.dogecloud.com/product/oss)
|
||||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||||
|
- [x] [超星](https://www.chaoxing.com)
|
||||||
|
- [x] [CNB](https://cnb.cool/)
|
||||||
|
- [x] [Degoo](https://degoo.com)
|
||||||
|
- [x] [豆包](https://www.doubao.com)
|
||||||
|
- [x] [Febbox](https://www.febbox.com)
|
||||||
|
- [x] [GitHub](https://github.com)
|
||||||
|
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||||
|
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||||
|
- [x] [微云](https://www.weiyun.com)
|
||||||
- [x] 部署方便,开箱即用
|
- [x] 部署方便,开箱即用
|
||||||
- [x] 文件预览(PDF、markdown、代码、纯文本等)
|
- [x] 文件预览(PDF、markdown、代码、纯文本等)
|
||||||
- [x] 画廊模式下的图片预览
|
- [x] 画廊模式下的图片预览
|
||||||
|
|||||||
12
README_ja.md
12
README_ja.md
@@ -65,6 +65,7 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
|
|||||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||||
- [x] Teambition([中国](https://www.teambition.com), [国際](https://us.teambition.com))
|
- [x] Teambition([中国](https://www.teambition.com), [国際](https://us.teambition.com))
|
||||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||||
|
- [x] [ProtonDrive](https://proton.me/drive)
|
||||||
- [x] [139yun](https://yun.139.com)(個人、家族、グループ)
|
- [x] [139yun](https://yun.139.com)(個人、家族、グループ)
|
||||||
- [x] [YandexDisk](https://disk.yandex.com)
|
- [x] [YandexDisk](https://disk.yandex.com)
|
||||||
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
||||||
@@ -74,7 +75,6 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
|
|||||||
- [x] [Thunder](https://pan.xunlei.com)
|
- [x] [Thunder](https://pan.xunlei.com)
|
||||||
- [x] [Lanzou](https://www.lanzou.com)
|
- [x] [Lanzou](https://www.lanzou.com)
|
||||||
- [x] [ILanzou](https://www.ilanzou.com)
|
- [x] [ILanzou](https://www.ilanzou.com)
|
||||||
- [x] [Aliyundrive share](https://www.alipan.com)
|
|
||||||
- [x] [Google photo](https://photos.google.com)
|
- [x] [Google photo](https://photos.google.com)
|
||||||
- [x] [Mega.nz](https://mega.nz)
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
- [x] [Baidu photo](https://photo.baidu.com)
|
- [x] [Baidu photo](https://photo.baidu.com)
|
||||||
@@ -85,6 +85,16 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
|
|||||||
- [x] [FeijiPan](https://www.feijipan.com)
|
- [x] [FeijiPan](https://www.feijipan.com)
|
||||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||||
|
- [x] [Chaoxing](https://www.chaoxing.com)
|
||||||
|
- [x] [CNB](https://cnb.cool/)
|
||||||
|
- [x] [Degoo](https://degoo.com)
|
||||||
|
- [x] [Doubao](https://www.doubao.com)
|
||||||
|
- [x] [Febbox](https://www.febbox.com)
|
||||||
|
- [x] [GitHub](https://github.com)
|
||||||
|
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||||
|
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||||
|
- [x] [Weiyun](https://www.weiyun.com)
|
||||||
|
- [x] [MediaFire](https://www.mediafire.com)
|
||||||
- [x] 簡単にデプロイでき、すぐに使える
|
- [x] 簡単にデプロイでき、すぐに使える
|
||||||
- [x] ファイルプレビュー(PDF、markdown、コード、テキストなど)
|
- [x] ファイルプレビュー(PDF、markdown、コード、テキストなど)
|
||||||
- [x] ギャラリーモードでの画像プレビュー
|
- [x] ギャラリーモードでの画像プレビュー
|
||||||
|
|||||||
12
README_nl.md
12
README_nl.md
@@ -64,7 +64,9 @@ Dank u voor uw ondersteuning en begrip
|
|||||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||||
- [x] Teambition([China](https://www.teambition.com), [Internationaal](https://us.teambition.com))
|
- [x] Teambition([China](https://www.teambition.com), [Internationaal](https://us.teambition.com))
|
||||||
|
- [x] [MediaFire](https://www.mediafire.com)
|
||||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||||
|
- [x] [ProtonDrive](https://proton.me/drive)
|
||||||
- [x] [139yun](https://yun.139.com) (Persoonlijk, Familie, Groep)
|
- [x] [139yun](https://yun.139.com) (Persoonlijk, Familie, Groep)
|
||||||
- [x] [YandexDisk](https://disk.yandex.com)
|
- [x] [YandexDisk](https://disk.yandex.com)
|
||||||
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
||||||
@@ -74,7 +76,6 @@ Dank u voor uw ondersteuning en begrip
|
|||||||
- [x] [Thunder](https://pan.xunlei.com)
|
- [x] [Thunder](https://pan.xunlei.com)
|
||||||
- [x] [Lanzou](https://www.lanzou.com)
|
- [x] [Lanzou](https://www.lanzou.com)
|
||||||
- [x] [ILanzou](https://www.ilanzou.com)
|
- [x] [ILanzou](https://www.ilanzou.com)
|
||||||
- [x] [Aliyundrive share](https://www.alipan.com)
|
|
||||||
- [x] [Google photo](https://photos.google.com)
|
- [x] [Google photo](https://photos.google.com)
|
||||||
- [x] [Mega.nz](https://mega.nz)
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
- [x] [Baidu photo](https://photo.baidu.com)
|
- [x] [Baidu photo](https://photo.baidu.com)
|
||||||
@@ -85,6 +86,15 @@ Dank u voor uw ondersteuning en begrip
|
|||||||
- [x] [FeijiPan](https://www.feijipan.com)
|
- [x] [FeijiPan](https://www.feijipan.com)
|
||||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||||
|
- [x] [Chaoxing](https://www.chaoxing.com)
|
||||||
|
- [x] [CNB](https://cnb.cool/)
|
||||||
|
- [x] [Degoo](https://degoo.com)
|
||||||
|
- [x] [Doubao](https://www.doubao.com)
|
||||||
|
- [x] [Febbox](https://www.febbox.com)
|
||||||
|
- [x] [GitHub](https://github.com)
|
||||||
|
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||||
|
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||||
|
- [x] [Weiyun](https://www.weiyun.com)
|
||||||
- [x] Eenvoudig te implementeren en direct te gebruiken
|
- [x] Eenvoudig te implementeren en direct te gebruiken
|
||||||
- [x] Bestandsvoorbeeld (PDF, markdown, code, platte tekst, ...)
|
- [x] Bestandsvoorbeeld (PDF, markdown, code, platte tekst, ...)
|
||||||
- [x] Afbeeldingsvoorbeeld in galerijweergave
|
- [x] Afbeeldingsvoorbeeld in galerijweergave
|
||||||
|
|||||||
6
build.sh
6
build.sh
@@ -236,7 +236,7 @@ BuildRelease() {
|
|||||||
BuildLoongGLIBC() {
|
BuildLoongGLIBC() {
|
||||||
local target_abi="$2"
|
local target_abi="$2"
|
||||||
local output_file="$1"
|
local output_file="$1"
|
||||||
local oldWorldGoVersion="1.24.3"
|
local oldWorldGoVersion="1.25.0"
|
||||||
|
|
||||||
if [ "$target_abi" = "abi1.0" ]; then
|
if [ "$target_abi" = "abi1.0" ]; then
|
||||||
echo building for linux-loong64-abi1.0
|
echo building for linux-loong64-abi1.0
|
||||||
@@ -254,13 +254,13 @@ BuildLoongGLIBC() {
|
|||||||
|
|
||||||
# Download and setup patched Go compiler for old-world
|
# Download and setup patched Go compiler for old-world
|
||||||
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||||
-o go-loong64-abi1.0.tar.gz; then
|
-o go-loong64-abi1.0.tar.gz; then
|
||||||
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
|
echo "Error: Failed to download patched Go compiler for old-world ABI1.0"
|
||||||
if [ -n "$GITHUB_TOKEN" ]; then
|
if [ -n "$GITHUB_TOKEN" ]; then
|
||||||
echo "Error output from curl:"
|
echo "Error output from curl:"
|
||||||
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||||
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
"https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \
|
||||||
-o go-loong64-abi1.0.tar.gz || true
|
-o go-loong64-abi1.0.tar.gz || true
|
||||||
fi
|
fi
|
||||||
return 1
|
return 1
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package flags
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
DataDir string
|
DataDir string
|
||||||
|
ConfigPath string
|
||||||
Debug bool
|
Debug bool
|
||||||
NoPrefix bool
|
NoPrefix bool
|
||||||
Dev bool
|
Dev bool
|
||||||
|
|||||||
@@ -27,7 +27,8 @@ func Execute() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
|
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data directory (relative paths are resolved against the current working directory)")
|
||||||
|
RootCmd.PersistentFlags().StringVar(&flags.ConfigPath, "config", "", "path to config.json (relative to current working directory; defaults to [data directory]/config.json, where [data directory] is set by --data)")
|
||||||
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
|
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
|
||||||
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
|
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
|
||||||
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")
|
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")
|
||||||
|
|||||||
@@ -27,6 +27,8 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"golang.org/x/net/http2/h2c"
|
"golang.org/x/net/http2/h2c"
|
||||||
|
|
||||||
|
"github.com/quic-go/quic-go/http3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ServerCmd represents the server command
|
// ServerCmd represents the server command
|
||||||
@@ -63,6 +65,7 @@ the address is defined in config file`,
|
|||||||
httpHandler = h2c.NewHandler(r, &http2.Server{})
|
httpHandler = h2c.NewHandler(r, &http2.Server{})
|
||||||
}
|
}
|
||||||
var httpSrv, httpsSrv, unixSrv *http.Server
|
var httpSrv, httpsSrv, unixSrv *http.Server
|
||||||
|
var quicSrv *http3.Server
|
||||||
if conf.Conf.Scheme.HttpPort != -1 {
|
if conf.Conf.Scheme.HttpPort != -1 {
|
||||||
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
||||||
fmt.Printf("start HTTP server @ %s\n", httpBase)
|
fmt.Printf("start HTTP server @ %s\n", httpBase)
|
||||||
@@ -86,6 +89,24 @@ the address is defined in config file`,
|
|||||||
utils.Log.Fatalf("failed to start https: %s", err.Error())
|
utils.Log.Fatalf("failed to start https: %s", err.Error())
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
if conf.Conf.Scheme.EnableH3 {
|
||||||
|
fmt.Printf("start HTTP3 (quic) server @ %s\n", httpsBase)
|
||||||
|
utils.Log.Infof("start HTTP3 (quic) server @ %s", httpsBase)
|
||||||
|
r.Use(func(c *gin.Context) {
|
||||||
|
if c.Request.TLS != nil {
|
||||||
|
port := conf.Conf.Scheme.HttpsPort
|
||||||
|
c.Header("Alt-Svc", fmt.Sprintf("h3=\":%d\"; ma=86400", port))
|
||||||
|
}
|
||||||
|
c.Next()
|
||||||
|
})
|
||||||
|
quicSrv = &http3.Server{Addr: httpsBase, Handler: r}
|
||||||
|
go func() {
|
||||||
|
err := quicSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||||
|
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
|
utils.Log.Fatalf("failed to start http3 (quic): %s", err.Error())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if conf.Conf.Scheme.UnixFile != "" {
|
if conf.Conf.Scheme.UnixFile != "" {
|
||||||
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
|
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
|
||||||
@@ -203,6 +224,15 @@ the address is defined in config file`,
|
|||||||
utils.Log.Fatal("HTTPS server shutdown err: ", err)
|
utils.Log.Fatal("HTTPS server shutdown err: ", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
if conf.Conf.Scheme.EnableH3 {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := quicSrv.Shutdown(ctx); err != nil {
|
||||||
|
utils.Log.Fatal("HTTP3 (quic) server shutdown err: ", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if conf.Conf.Scheme.UnixFile != "" {
|
if conf.Conf.Scheme.UnixFile != "" {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|||||||
@@ -6,10 +6,9 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- '5244:5244'
|
- '5244:5244'
|
||||||
- '5245:5245'
|
- '5245:5245'
|
||||||
|
user: '0:0'
|
||||||
environment:
|
environment:
|
||||||
- PUID=0
|
|
||||||
- PGID=0
|
|
||||||
- UMASK=022
|
- UMASK=022
|
||||||
- TZ=UTC
|
- TZ=Asia/Shanghai
|
||||||
container_name: openlist
|
container_name: openlist
|
||||||
image: 'openlistteam/openlist:latest'
|
image: 'openlistteam/openlist:latest'
|
||||||
|
|||||||
@@ -1,43 +1,60 @@
|
|||||||
package _115
|
package _115
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
md5Salt = "Qclm8MGWUv59TnrR0XPg"
|
md5Salt = "Qclm8MGWUv59TnrR0XPg"
|
||||||
appVer = "27.0.5.7"
|
appVer = "35.6.0.3"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *Pan115) getAppVersion() ([]driver115.AppVersion, error) {
|
func (d *Pan115) getAppVersion() (string, error) {
|
||||||
result := driver115.VersionResp{}
|
result := VersionResp{}
|
||||||
resp, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
|
res, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
|
||||||
|
|
||||||
err = driver115.CheckErr(err, &result, resp)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
err = utils.Json.Unmarshal(res.Body(), &result)
|
||||||
return result.Data.GetAppVersions(), nil
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if len(result.Error) > 0 {
|
||||||
|
return "", errors.New(result.Error)
|
||||||
|
}
|
||||||
|
return result.Data.Win.Version, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) getAppVer() string {
|
func (d *Pan115) getAppVer() string {
|
||||||
// todo add some cache?
|
ver, err := d.getAppVersion()
|
||||||
vers, err := d.getAppVersion()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("[115] get app version failed: %v", err)
|
log.Warnf("[115] get app version failed: %v", err)
|
||||||
return appVer
|
return appVer
|
||||||
}
|
}
|
||||||
for _, ver := range vers {
|
if len(ver) > 0 {
|
||||||
if ver.AppName == "win" {
|
return ver
|
||||||
return ver.Version
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return appVer
|
return appVer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) initAppVer() {
|
func (d *Pan115) initAppVer() {
|
||||||
appVer = d.getAppVer()
|
appVer = d.getAppVer()
|
||||||
|
log.Debugf("use app version: %v", appVer)
|
||||||
|
}
|
||||||
|
|
||||||
|
type VersionResp struct {
|
||||||
|
Error string `json:"error,omitempty"`
|
||||||
|
Data Versions `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Versions struct {
|
||||||
|
Win Version `json:"win"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Version struct {
|
||||||
|
Version string `json:"version_code"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -245,4 +245,17 @@ func (d *Pan115) DeleteOfflineTasks(ctx context.Context, hashes []string, delete
|
|||||||
return d.client.DeleteOfflineTasks(hashes, deleteFiles)
|
return d.client.DeleteOfflineTasks(hashes, deleteFiles)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Pan115) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
info, err := d.client.GetInfo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: uint64(info.SpaceInfo.AllTotal.Size),
|
||||||
|
FreeSpace: uint64(info.SpaceInfo.AllRemain.Size),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Pan115)(nil)
|
var _ driver.Driver = (*Pan115)(nil)
|
||||||
|
|||||||
@@ -17,8 +17,7 @@ type Addition struct {
|
|||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "115 Cloud",
|
Name: "115 Cloud",
|
||||||
DefaultRoot: "0",
|
DefaultRoot: "0",
|
||||||
// OnlyProxy: true,
|
LinkCacheMode: driver.LinkCacheUA,
|
||||||
// NoOverwriteUpload: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -131,23 +131,6 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
|
|
||||||
if err := d.WaitLimit(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resp, err := d.client.GetFolderInfoByPath(ctx, path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Obj{
|
|
||||||
Fid: resp.FileID,
|
|
||||||
Fn: resp.FileName,
|
|
||||||
Fc: resp.FileCategory,
|
|
||||||
Sha1: resp.Sha1,
|
|
||||||
Pc: resp.PickCode,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
if err := d.WaitLimit(ctx); err != nil {
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -337,6 +320,27 @@ func (d *Open115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, er
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Open115) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
userInfo, err := d.client.UserInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
total, err := userInfo.RtSpaceInfo.AllTotal.Size.Int64()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
free, err := userInfo.RtSpaceInfo.AllRemain.Size.Int64()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: uint64(total),
|
||||||
|
FreeSpace: uint64(free),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||||
// return nil, errs.NotImplement
|
// return nil, errs.NotImplement
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ type Addition struct {
|
|||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "115 Open",
|
Name: "115 Open",
|
||||||
DefaultRoot: "0",
|
DefaultRoot: "0",
|
||||||
|
LinkCacheMode: driver.LinkCacheUA,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -41,7 +41,9 @@ func (d *Pan123) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) Init(ctx context.Context) error {
|
func (d *Pan123) Init(ctx context.Context) error {
|
||||||
_, err := d.Request(UserInfo, http.MethodGet, nil, nil)
|
_, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetHeader("platform", "web")
|
||||||
|
}, nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,7 +76,6 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
"type": f.Type,
|
"type": f.Type,
|
||||||
}
|
}
|
||||||
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
|
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
|
||||||
|
|
||||||
req.SetBody(data)
|
req.SetBody(data)
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -254,4 +255,15 @@ func (d *Pan123) APIRateLimit(ctx context.Context, api string) error {
|
|||||||
return limiter.Wait(ctx)
|
return limiter.Wait(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Pan123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
userInfo, err := d.getUserInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: driver.DiskUsageFromUsedAndTotal(userInfo.Data.SpaceUsed, total),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Pan123)(nil)
|
var _ driver.Driver = (*Pan123)(nil)
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ type Addition struct {
|
|||||||
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
AccessToken string
|
AccessToken string
|
||||||
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
||||||
|
Platform string `json:"platform" type:"string" default:"web" help:"the platform header value, sent with API requests"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ func (f File) CreateTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f File) GetHash() utils.HashInfo {
|
func (f File) GetHash() utils.HashInfo {
|
||||||
return utils.HashInfo{}
|
return utils.NewHashInfo(utils.MD5, f.Etag)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f File) GetPath() string {
|
func (f File) GetPath() string {
|
||||||
@@ -122,3 +122,14 @@ type S3PreSignedURLs struct {
|
|||||||
PreSignedUrls map[string]string `json:"presignedUrls"`
|
PreSignedUrls map[string]string `json:"presignedUrls"`
|
||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type UserInfoResp struct {
|
||||||
|
Data struct {
|
||||||
|
Uid int64 `json:"UID"`
|
||||||
|
Nickname string `json:"Nickname"`
|
||||||
|
SpaceUsed uint64 `json:"SpaceUsed"`
|
||||||
|
SpacePermanent uint64 `json:"SpacePermanent"`
|
||||||
|
SpaceTemp uint64 `json:"SpaceTemp"`
|
||||||
|
FileCount int `json:"FileCount"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -124,7 +124,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
if cur == chunkCount {
|
if cur == chunkCount {
|
||||||
curSize = lastChunkSize
|
curSize = lastChunkSize
|
||||||
}
|
}
|
||||||
var reader *stream.SectionReader
|
var reader io.ReadSeeker
|
||||||
var rateLimitedRd io.Reader
|
var rateLimitedRd io.Reader
|
||||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||||
Before: func(ctx context.Context) error {
|
Before: func(ctx context.Context) error {
|
||||||
|
|||||||
@@ -203,7 +203,7 @@ do:
|
|||||||
"referer": "https://www.123pan.com/",
|
"referer": "https://www.123pan.com/",
|
||||||
"authorization": "Bearer " + d.AccessToken,
|
"authorization": "Bearer " + d.AccessToken,
|
||||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) openlist-client",
|
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) openlist-client",
|
||||||
"platform": "web",
|
"platform": d.Platform,
|
||||||
"app-version": "3",
|
"app-version": "3",
|
||||||
//"user-agent": base.UserAgent,
|
//"user-agent": base.UserAgent,
|
||||||
})
|
})
|
||||||
@@ -282,3 +282,14 @@ func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]
|
|||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Pan123) getUserInfo(ctx context.Context) (*UserInfoResp, error) {
|
||||||
|
var resp UserInfoResp
|
||||||
|
_, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ import (
|
|||||||
type Open123 struct {
|
type Open123 struct {
|
||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
|
UID uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Open123) Config() driver.Config {
|
func (d *Open123) Config() driver.Config {
|
||||||
@@ -69,13 +70,45 @@ func (d *Open123) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
|||||||
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
fileId, _ := strconv.ParseInt(file.GetID(), 10, 64)
|
fileId, _ := strconv.ParseInt(file.GetID(), 10, 64)
|
||||||
|
|
||||||
|
if d.DirectLink {
|
||||||
|
res, err := d.getDirectLink(fileId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.DirectLinkPrivateKey == "" {
|
||||||
|
duration := 365 * 24 * time.Hour // 缓存1年
|
||||||
|
return &model.Link{
|
||||||
|
URL: res.Data.URL,
|
||||||
|
Expiration: &duration,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
uid, err := d.getUID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Duration(d.DirectLinkValidDuration) * time.Minute
|
||||||
|
|
||||||
|
newURL, err := d.SignURL(res.Data.URL, d.DirectLinkPrivateKey,
|
||||||
|
uid, duration)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Link{
|
||||||
|
URL: newURL,
|
||||||
|
Expiration: &duration,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
res, err := d.getDownloadInfo(fileId)
|
res, err := d.getDownloadInfo(fileId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
link := model.Link{URL: res.Data.DownloadUrl}
|
return &model.Link{URL: res.Data.DownloadUrl}, nil
|
||||||
return &link, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
@@ -181,5 +214,30 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
|
|||||||
return nil, fmt.Errorf("upload complete timeout")
|
return nil, fmt.Errorf("upload complete timeout")
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Open123)(nil)
|
func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
var _ driver.PutResult = (*Open123)(nil)
|
userInfo, err := d.getUserInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp
|
||||||
|
free := total - userInfo.Data.SpaceUsed
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: total,
|
||||||
|
FreeSpace: free,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open123) OfflineDownload(ctx context.Context, url string, dir model.Obj, callback string) (int, error) {
|
||||||
|
return d.createOfflineDownloadTask(ctx, url, dir.GetID(), callback)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open123) OfflineDownloadProcess(ctx context.Context, taskID int) (float64, int, error) {
|
||||||
|
return d.queryOfflineDownloadStatus(ctx, taskID)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ driver.Driver = (*Open123)(nil)
|
||||||
|
_ driver.PutResult = (*Open123)(nil)
|
||||||
|
)
|
||||||
|
|||||||
@@ -23,6 +23,11 @@ type Addition struct {
|
|||||||
// 上传线程数
|
// 上传线程数
|
||||||
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
|
||||||
|
|
||||||
|
// 使用直链
|
||||||
|
DirectLink bool `json:"DirectLink" type:"bool" default:"false" required:"false" help:"use direct link when download file"`
|
||||||
|
DirectLinkPrivateKey string `json:"DirectLinkPrivateKey" required:"false" help:"private key for direct link, if URL authentication is enabled"`
|
||||||
|
DirectLinkValidDuration int64 `json:"DirectLinkValidDuration" type:"number" default:"30" required:"false" help:"minutes, if URL authentication is enabled"`
|
||||||
|
|
||||||
driver.RootID
|
driver.RootID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ func (a *ApiInfo) Require() {
|
|||||||
a.token <- struct{}{}
|
a.token <- struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ApiInfo) Release() {
|
func (a *ApiInfo) Release() {
|
||||||
if a.qps > 0 {
|
if a.qps > 0 {
|
||||||
time.AfterFunc(time.Second, func() {
|
time.AfterFunc(time.Second, func() {
|
||||||
@@ -26,13 +27,16 @@ func (a *ApiInfo) Release() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ApiInfo) SetQPS(qps int) {
|
func (a *ApiInfo) SetQPS(qps int) {
|
||||||
a.qps = qps
|
a.qps = qps
|
||||||
a.token = make(chan struct{}, qps)
|
a.token = make(chan struct{}, qps)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ApiInfo) NowLen() int {
|
func (a *ApiInfo) NowLen() int {
|
||||||
return len(a.token)
|
return len(a.token)
|
||||||
}
|
}
|
||||||
|
|
||||||
func InitApiInfo(url string, qps int) *ApiInfo {
|
func InitApiInfo(url string, qps int) *ApiInfo {
|
||||||
return &ApiInfo{
|
return &ApiInfo{
|
||||||
url: url,
|
url: url,
|
||||||
@@ -127,19 +131,19 @@ type RefreshTokenResp struct {
|
|||||||
type UserInfoResp struct {
|
type UserInfoResp struct {
|
||||||
BaseResp
|
BaseResp
|
||||||
Data struct {
|
Data struct {
|
||||||
UID int64 `json:"uid"`
|
UID uint64 `json:"uid"`
|
||||||
Username string `json:"username"`
|
// Username string `json:"username"`
|
||||||
DisplayName string `json:"displayName"`
|
// DisplayName string `json:"displayName"`
|
||||||
HeadImage string `json:"headImage"`
|
// HeadImage string `json:"headImage"`
|
||||||
Passport string `json:"passport"`
|
// Passport string `json:"passport"`
|
||||||
Mail string `json:"mail"`
|
// Mail string `json:"mail"`
|
||||||
SpaceUsed int64 `json:"spaceUsed"`
|
SpaceUsed uint64 `json:"spaceUsed"`
|
||||||
SpacePermanent int64 `json:"spacePermanent"`
|
SpacePermanent uint64 `json:"spacePermanent"`
|
||||||
SpaceTemp int64 `json:"spaceTemp"`
|
SpaceTemp uint64 `json:"spaceTemp"`
|
||||||
SpaceTempExpr string `json:"spaceTempExpr"`
|
// SpaceTempExpr int64 `json:"spaceTempExpr"`
|
||||||
Vip bool `json:"vip"`
|
// Vip bool `json:"vip"`
|
||||||
DirectTraffic int64 `json:"directTraffic"`
|
// DirectTraffic int64 `json:"directTraffic"`
|
||||||
IsHideUID bool `json:"isHideUID"`
|
// IsHideUID bool `json:"isHideUID"`
|
||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -158,6 +162,13 @@ type DownloadInfoResp struct {
|
|||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DirectLinkResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
// 创建文件V2返回
|
// 创建文件V2返回
|
||||||
type UploadCreateResp struct {
|
type UploadCreateResp struct {
|
||||||
BaseResp
|
BaseResp
|
||||||
@@ -178,3 +189,18 @@ type UploadCompleteResp struct {
|
|||||||
FileID int64 `json:"fileID"`
|
FileID int64 `json:"fileID"`
|
||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type OfflineDownloadResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
TaskID int `json:"taskID"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type OfflineDownloadProcessResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
Process float64 `json:"process"`
|
||||||
|
Status int `json:"status"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -67,9 +67,11 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
|||||||
partNumber := partIndex + 1 // 分片号从1开始
|
partNumber := partIndex + 1 // 分片号从1开始
|
||||||
offset := partIndex * chunkSize
|
offset := partIndex * chunkSize
|
||||||
size := min(chunkSize, size-offset)
|
size := min(chunkSize, size-offset)
|
||||||
var reader *stream.SectionReader
|
var reader io.ReadSeeker
|
||||||
var rateLimitedRd io.Reader
|
var rateLimitedRd io.Reader
|
||||||
sliceMD5 := ""
|
sliceMD5 := ""
|
||||||
|
// 表单
|
||||||
|
b := bytes.NewBuffer(make([]byte, 0, 2048))
|
||||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||||
Before: func(ctx context.Context) error {
|
Before: func(ctx context.Context) error {
|
||||||
if reader == nil {
|
if reader == nil {
|
||||||
@@ -84,7 +86,6 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
@@ -92,9 +93,8 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
|||||||
// 重置分片reader位置,因为HashReader、上一次失败已经读取到分片EOF
|
// 重置分片reader位置,因为HashReader、上一次失败已经读取到分片EOF
|
||||||
reader.Seek(0, io.SeekStart)
|
reader.Seek(0, io.SeekStart)
|
||||||
|
|
||||||
// 创建表单数据
|
b.Reset()
|
||||||
var b bytes.Buffer
|
w := multipart.NewWriter(b)
|
||||||
w := multipart.NewWriter(&b)
|
|
||||||
// 添加表单字段
|
// 添加表单字段
|
||||||
err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
|
err = w.WriteField("preuploadID", createResp.Data.PreuploadID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -109,21 +109,20 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// 写入文件内容
|
// 写入文件内容
|
||||||
fw, err := w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
|
_, err = w.CreateFormFile("slice", fmt.Sprintf("%s.part%d", file.GetName(), partNumber))
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = utils.CopyWithBuffer(fw, rateLimitedRd)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
headSize := b.Len()
|
||||||
err = w.Close()
|
err = w.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
head := bytes.NewReader(b.Bytes()[:headSize])
|
||||||
|
tail := bytes.NewReader(b.Bytes()[headSize:])
|
||||||
|
rateLimitedRd = driver.NewLimitedUploadStream(ctx, io.MultiReader(head, reader, tail))
|
||||||
// 创建请求并设置header
|
// 创建请求并设置header
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", &b)
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadDomain+"/upload/v2/file/slice", rateLimitedRd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,21 @@
|
|||||||
package _123_open
|
package _123_open
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
|
"github.com/google/uuid"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -20,13 +26,17 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
|
|||||||
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
|
RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1)
|
||||||
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
|
UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
|
||||||
FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
|
FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
|
||||||
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 0)
|
DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 5)
|
||||||
|
DirectLink = InitApiInfo(Api+"/api/v1/direct-link/url", 5)
|
||||||
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
|
Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
|
||||||
Move = InitApiInfo(Api+"/api/v1/file/move", 1)
|
Move = InitApiInfo(Api+"/api/v1/file/move", 1)
|
||||||
Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
|
Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
|
||||||
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
|
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
|
||||||
UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
|
UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
|
||||||
UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
|
UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
|
||||||
|
|
||||||
|
OfflineDownload = InitApiInfo(Api+"/api/v1/offline/download", 1)
|
||||||
|
OfflineDownloadProcess = InitApiInfo(Api+"/api/v1/offline/download/process", 5)
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
@@ -76,12 +86,27 @@ func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCall
|
|||||||
return nil, errors.New(baseResp.Message)
|
return nil, errors.New(baseResp.Message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Open123) flushAccessToken() error {
|
func (d *Open123) flushAccessToken() error {
|
||||||
if d.Addition.ClientID != "" {
|
if d.ClientID != "" {
|
||||||
if d.Addition.ClientSecret != "" {
|
if d.RefreshToken != "" {
|
||||||
|
var resp RefreshTokenResp
|
||||||
|
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetQueryParam("client_id", d.ClientID)
|
||||||
|
if d.ClientSecret != "" {
|
||||||
|
req.SetQueryParam("client_secret", d.ClientSecret)
|
||||||
|
}
|
||||||
|
req.SetQueryParam("grant_type", "refresh_token")
|
||||||
|
req.SetQueryParam("refresh_token", d.RefreshToken)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.AccessToken = resp.AccessToken
|
||||||
|
d.RefreshToken = resp.RefreshToken
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
} else if d.ClientSecret != "" {
|
||||||
var resp AccessTokenResp
|
var resp AccessTokenResp
|
||||||
_, err := d.Request(AccessToken, http.MethodPost, func(req *resty.Request) {
|
_, err := d.Request(AccessToken, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
@@ -94,34 +119,62 @@ func (d *Open123) flushAccessToken() error {
|
|||||||
}
|
}
|
||||||
d.AccessToken = resp.Data.AccessToken
|
d.AccessToken = resp.Data.AccessToken
|
||||||
op.MustSaveDriverStorage(d)
|
op.MustSaveDriverStorage(d)
|
||||||
} else if d.Addition.RefreshToken != "" {
|
|
||||||
var resp RefreshTokenResp
|
|
||||||
_, err := d.Request(RefreshToken, http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetQueryParam("client_id", d.ClientID)
|
|
||||||
req.SetQueryParam("grant_type", "refresh_token")
|
|
||||||
req.SetQueryParam("refresh_token", d.Addition.RefreshToken)
|
|
||||||
}, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.AccessToken = resp.AccessToken
|
|
||||||
d.RefreshToken = resp.RefreshToken
|
|
||||||
op.MustSaveDriverStorage(d)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Open123) getUserInfo() (*UserInfoResp, error) {
|
func (d *Open123) SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
|
||||||
|
// 生成Unix时间戳
|
||||||
|
ts := time.Now().Add(validDuration).Unix()
|
||||||
|
|
||||||
|
// 生成随机数(建议使用UUID,不能包含中划线(-))
|
||||||
|
rand := strings.ReplaceAll(uuid.New().String(), "-", "")
|
||||||
|
|
||||||
|
// 解析URL
|
||||||
|
objURL, err := url.Parse(originURL)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 待签名字符串,格式:path-timestamp-rand-uid-privateKey
|
||||||
|
unsignedStr := fmt.Sprintf("%s-%d-%s-%d-%s", objURL.Path, ts, rand, uid, privateKey)
|
||||||
|
md5Hash := md5.Sum([]byte(unsignedStr))
|
||||||
|
// 生成鉴权参数,格式:timestamp-rand-uid-md5hash
|
||||||
|
authKey := fmt.Sprintf("%d-%s-%d-%x", ts, rand, uid, md5Hash)
|
||||||
|
|
||||||
|
// 添加鉴权参数到URL查询参数
|
||||||
|
v := objURL.Query()
|
||||||
|
v.Add("auth_key", authKey)
|
||||||
|
objURL.RawQuery = v.Encode()
|
||||||
|
|
||||||
|
return objURL.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open123) getUserInfo(ctx context.Context) (*UserInfoResp, error) {
|
||||||
var resp UserInfoResp
|
var resp UserInfoResp
|
||||||
|
|
||||||
if _, err := d.Request(UserInfo, http.MethodGet, nil, &resp); err != nil {
|
if _, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
|
}, &resp); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Open123) getUID(ctx context.Context) (uint64, error) {
|
||||||
|
if d.UID != 0 {
|
||||||
|
return d.UID, nil
|
||||||
|
}
|
||||||
|
resp, err := d.getUserInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
d.UID = resp.Data.UID
|
||||||
|
return resp.Data.UID, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) {
|
func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) {
|
||||||
var resp FileListResp
|
var resp FileListResp
|
||||||
|
|
||||||
@@ -136,7 +189,6 @@ func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*Fi
|
|||||||
"searchData": "",
|
"searchData": "",
|
||||||
})
|
})
|
||||||
}, &resp)
|
}, &resp)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -159,6 +211,21 @@ func (d *Open123) getDownloadInfo(fileId int64) (*DownloadInfoResp, error) {
|
|||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Open123) getDirectLink(fileId int64) (*DirectLinkResp, error) {
|
||||||
|
var resp DirectLinkResp
|
||||||
|
|
||||||
|
_, err := d.Request(DirectLink, http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(map[string]string{
|
||||||
|
"fileID": strconv.FormatInt(fileId, 10),
|
||||||
|
})
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Open123) mkdir(parentID int64, name string) error {
|
func (d *Open123) mkdir(parentID int64, name string) error {
|
||||||
_, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) {
|
_, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
@@ -213,3 +280,34 @@ func (d *Open123) trash(fileId int64) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Open123) createOfflineDownloadTask(ctx context.Context, url string, dirID, callback string) (taskID int, err error) {
|
||||||
|
body := base.Json{
|
||||||
|
"url": url,
|
||||||
|
"dirID": dirID,
|
||||||
|
}
|
||||||
|
if len(callback) > 0 {
|
||||||
|
body["callBackUrl"] = callback
|
||||||
|
}
|
||||||
|
var resp OfflineDownloadResp
|
||||||
|
_, err = d.Request(OfflineDownload, http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(body)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return resp.Data.TaskID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open123) queryOfflineDownloadStatus(ctx context.Context, taskID int) (process float64, status int, err error) {
|
||||||
|
var resp OfflineDownloadProcessResp
|
||||||
|
_, err = d.Request(OfflineDownloadProcess, http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(map[string]string{
|
||||||
|
"taskID": strconv.Itoa(taskID),
|
||||||
|
})
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return .0, 0, err
|
||||||
|
}
|
||||||
|
return resp.Data.Process, resp.Data.Status, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ type File struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f File) GetHash() utils.HashInfo {
|
func (f File) GetHash() utils.HashInfo {
|
||||||
return utils.HashInfo{}
|
return utils.NewHashInfo(utils.MD5, f.Etag)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f File) GetPath() string {
|
func (f File) GetPath() string {
|
||||||
|
|||||||
@@ -54,7 +54,8 @@ func (d *Yun139) Init(ctx context.Context) error {
|
|||||||
"userInfo": base.Json{
|
"userInfo": base.Json{
|
||||||
"userType": 1,
|
"userType": 1,
|
||||||
"accountType": 1,
|
"accountType": 1,
|
||||||
"accountName": d.Account},
|
"accountName": d.Account,
|
||||||
|
},
|
||||||
"modAddrType": 1,
|
"modAddrType": 1,
|
||||||
}, &resp)
|
}, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -534,16 +535,15 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
if size > partSize {
|
if size > partSize {
|
||||||
part = (size + partSize - 1) / partSize
|
part = (size + partSize - 1) / partSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 生成所有 partInfos
|
||||||
partInfos := make([]PartInfo, 0, part)
|
partInfos := make([]PartInfo, 0, part)
|
||||||
for i := int64(0); i < part; i++ {
|
for i := int64(0); i < part; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
start := i * partSize
|
start := i * partSize
|
||||||
byteSize := size - start
|
byteSize := min(size-start, partSize)
|
||||||
if byteSize > partSize {
|
|
||||||
byteSize = partSize
|
|
||||||
}
|
|
||||||
partNumber := i + 1
|
partNumber := i + 1
|
||||||
partInfo := PartInfo{
|
partInfo := PartInfo{
|
||||||
PartNumber: partNumber,
|
PartNumber: partNumber,
|
||||||
@@ -591,17 +591,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
// resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址
|
// resp.Data.RapidUpload: true 支持快传,但此处直接检测是否返回分片的上传地址
|
||||||
// 快传的情况下同样需要手动处理冲突
|
// 快传的情况下同样需要手动处理冲突
|
||||||
if resp.Data.PartInfos != nil {
|
if resp.Data.PartInfos != nil {
|
||||||
// 读取前100个分片的上传地址
|
// Progress
|
||||||
uploadPartInfos := resp.Data.PartInfos
|
p := driver.NewProgress(size, up)
|
||||||
|
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||||
|
|
||||||
// 获取后续分片的上传地址
|
// 先上传前100个分片
|
||||||
for i := 101; i < len(partInfos); i += 100 {
|
err = d.uploadPersonalParts(ctx, partInfos, resp.Data.PartInfos, rateLimited, p)
|
||||||
end := i + 100
|
if err != nil {
|
||||||
if end > len(partInfos) {
|
return err
|
||||||
end = len(partInfos)
|
|
||||||
}
|
}
|
||||||
batchPartInfos := partInfos[i:end]
|
|
||||||
|
|
||||||
|
// 如果还有剩余分片,分批获取上传地址并上传
|
||||||
|
for i := 100; i < len(partInfos); i += 100 {
|
||||||
|
end := min(i+100, len(partInfos))
|
||||||
|
batchPartInfos := partInfos[i:end]
|
||||||
moredata := base.Json{
|
moredata := base.Json{
|
||||||
"fileId": resp.Data.FileId,
|
"fileId": resp.Data.FileId,
|
||||||
"uploadId": resp.Data.UploadId,
|
"uploadId": resp.Data.UploadId,
|
||||||
@@ -617,44 +620,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...)
|
err = d.uploadPersonalParts(ctx, partInfos, moreresp.Data.PartInfos, rateLimited, p)
|
||||||
}
|
|
||||||
|
|
||||||
// Progress
|
|
||||||
p := driver.NewProgress(size, up)
|
|
||||||
|
|
||||||
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
|
||||||
// 上传所有分片
|
|
||||||
for _, uploadPartInfo := range uploadPartInfos {
|
|
||||||
index := uploadPartInfo.PartNumber - 1
|
|
||||||
partSize := partInfos[index].PartSize
|
|
||||||
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
|
|
||||||
limitReader := io.LimitReader(rateLimited, partSize)
|
|
||||||
|
|
||||||
// Update Progress
|
|
||||||
r := io.TeeReader(limitReader, p)
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
|
||||||
req.Header.Set("Content-Length", fmt.Sprint(partSize))
|
|
||||||
req.Header.Set("Origin", "https://yun.139.com")
|
|
||||||
req.Header.Set("Referer", "https://yun.139.com/")
|
|
||||||
req.ContentLength = partSize
|
|
||||||
|
|
||||||
res, err := base.HttpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_ = res.Body.Close()
|
|
||||||
log.Debugf("[139] uploaded: %+v", res)
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 全部分片上传完毕后,complete
|
||||||
data = base.Json{
|
data = base.Json{
|
||||||
"contentHash": fullHash,
|
"contentHash": fullHash,
|
||||||
"contentHashAlgorithm": "SHA256",
|
"contentHashAlgorithm": "SHA256",
|
||||||
@@ -863,4 +835,48 @@ func (d *Yun139) Other(ctx context.Context, args model.OtherArgs) (interface{},
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Yun139) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
if d.UserDomainID == "" {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
var total, free uint64
|
||||||
|
if d.isFamily() {
|
||||||
|
diskInfo, err := d.getFamilyDiskInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
totalMb, err := strconv.ParseUint(diskInfo.Data.DiskSize, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed convert disk size into integer: %+v", err)
|
||||||
|
}
|
||||||
|
usedMb, err := strconv.ParseUint(diskInfo.Data.UsedSize, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed convert used size into integer: %+v", err)
|
||||||
|
}
|
||||||
|
total = totalMb * 1024 * 1024
|
||||||
|
free = total - (usedMb * 1024 * 1024)
|
||||||
|
} else {
|
||||||
|
diskInfo, err := d.getPersonalDiskInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
totalMb, err := strconv.ParseUint(diskInfo.Data.DiskSize, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed convert disk size into integer: %+v", err)
|
||||||
|
}
|
||||||
|
freeMb, err := strconv.ParseUint(diskInfo.Data.FreeDiskSize, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed convert free size into integer: %+v", err)
|
||||||
|
}
|
||||||
|
total = totalMb * 1024 * 1024
|
||||||
|
free = freeMb * 1024 * 1024
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: total,
|
||||||
|
FreeSpace: free,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Yun139)(nil)
|
var _ driver.Driver = (*Yun139)(nil)
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ type Addition struct {
|
|||||||
driver.RootID
|
driver.RootID
|
||||||
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"`
|
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"`
|
||||||
CloudID string `json:"cloud_id"`
|
CloudID string `json:"cloud_id"`
|
||||||
|
UserDomainID string `json:"user_domain_id" help:"ud_id in Cookie, fill in to show disk usage"`
|
||||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||||
ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"`
|
ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"`
|
||||||
UseLargeThumbnail bool `json:"use_large_thumbnail" type:"bool" default:"false" help:"Enable to use large thumbnail for images"`
|
UseLargeThumbnail bool `json:"use_large_thumbnail" type:"bool" default:"false" help:"Enable to use large thumbnail for images"`
|
||||||
|
|||||||
@@ -312,3 +312,20 @@ type RefreshTokenResp struct {
|
|||||||
AccessToken string `xml:"accessToken"`
|
AccessToken string `xml:"accessToken"`
|
||||||
Desc string `xml:"desc"`
|
Desc string `xml:"desc"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PersonalDiskInfoResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
FreeDiskSize string `json:"freeDiskSize"`
|
||||||
|
DiskSize string `json:"diskSize"`
|
||||||
|
IsInfinitePicStorage *bool `json:"isInfinitePicStorage"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FamilyDiskInfoResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
UsedSize string `json:"usedSize"`
|
||||||
|
DiskSize string `json:"diskSize"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
package _139
|
package _139
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
@@ -13,6 +15,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
@@ -104,8 +107,7 @@ func (d *Yun139) refreshToken() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) request(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *Yun139) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
url := "https://yun.139.com" + pathname
|
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
randStr := random.String(16)
|
randStr := random.String(16)
|
||||||
ts := time.Now().Format("2006-01-02 15:04:05")
|
ts := time.Now().Format("2006-01-02 15:04:05")
|
||||||
@@ -216,7 +218,7 @@ func (d *Yun139) requestRoute(data interface{}, resp interface{}) ([]byte, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) post(pathname string, data interface{}, resp interface{}) ([]byte, error) {
|
func (d *Yun139) post(pathname string, data interface{}, resp interface{}) ([]byte, error) {
|
||||||
return d.request(pathname, http.MethodPost, func(req *resty.Request) {
|
return d.request("https://yun.139.com"+pathname, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data)
|
req.SetBody(data)
|
||||||
}, resp)
|
}, resp)
|
||||||
}
|
}
|
||||||
@@ -415,6 +417,7 @@ func (d *Yun139) getLink(contentId string) (string, error) {
|
|||||||
}
|
}
|
||||||
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
|
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) familyGetLink(contentId string, path string) (string, error) {
|
func (d *Yun139) familyGetLink(contentId string, path string) (string, error) {
|
||||||
data := d.newJson(base.Json{
|
data := d.newJson(base.Json{
|
||||||
"contentID": contentId,
|
"contentID": contentId,
|
||||||
@@ -507,6 +510,7 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R
|
|||||||
}
|
}
|
||||||
return res.Body(), nil
|
return res.Body(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) personalPost(pathname string, data interface{}, resp interface{}) ([]byte, error) {
|
func (d *Yun139) personalPost(pathname string, data interface{}, resp interface{}) ([]byte, error) {
|
||||||
return d.personalRequest(pathname, http.MethodPost, func(req *resty.Request) {
|
return d.personalRequest(pathname, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data)
|
req.SetBody(data)
|
||||||
@@ -542,7 +546,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
|
|||||||
}
|
}
|
||||||
nextPageCursor = resp.Data.NextPageCursor
|
nextPageCursor = resp.Data.NextPageCursor
|
||||||
for _, item := range resp.Data.Items {
|
for _, item := range resp.Data.Items {
|
||||||
var isFolder = (item.Type == "folder")
|
isFolder := (item.Type == "folder")
|
||||||
var f model.Obj
|
var f model.Obj
|
||||||
if isFolder {
|
if isFolder {
|
||||||
f = &model.Object{
|
f = &model.Object{
|
||||||
@@ -554,7 +558,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
|
|||||||
IsFolder: isFolder,
|
IsFolder: isFolder,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var Thumbnails = item.Thumbnails
|
Thumbnails := item.Thumbnails
|
||||||
var ThumbnailUrl string
|
var ThumbnailUrl string
|
||||||
if d.UseLargeThumbnail {
|
if d.UseLargeThumbnail {
|
||||||
for _, thumb := range Thumbnails {
|
for _, thumb := range Thumbnails {
|
||||||
@@ -597,7 +601,7 @@ func (d *Yun139) personalGetLink(fileId string) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
var cdnUrl = jsoniter.Get(res, "data", "cdnUrl").ToString()
|
cdnUrl := jsoniter.Get(res, "data", "cdnUrl").ToString()
|
||||||
if cdnUrl != "" {
|
if cdnUrl != "" {
|
||||||
return cdnUrl, nil
|
return cdnUrl, nil
|
||||||
} else {
|
} else {
|
||||||
@@ -611,15 +615,91 @@ func (d *Yun139) getAuthorization() string {
|
|||||||
}
|
}
|
||||||
return d.Authorization
|
return d.Authorization
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) getAccount() string {
|
func (d *Yun139) getAccount() string {
|
||||||
if d.ref != nil {
|
if d.ref != nil {
|
||||||
return d.ref.getAccount()
|
return d.ref.getAccount()
|
||||||
}
|
}
|
||||||
return d.Account
|
return d.Account
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) getPersonalCloudHost() string {
|
func (d *Yun139) getPersonalCloudHost() string {
|
||||||
if d.ref != nil {
|
if d.ref != nil {
|
||||||
return d.ref.getPersonalCloudHost()
|
return d.ref.getPersonalCloudHost()
|
||||||
}
|
}
|
||||||
return d.PersonalCloudHost
|
return d.PersonalCloudHost
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Yun139) uploadPersonalParts(ctx context.Context, partInfos []PartInfo, uploadPartInfos []PersonalPartInfo, rateLimited *driver.RateLimitReader, p *driver.Progress) error {
|
||||||
|
// 确保数组以 PartNumber 从小到大排序
|
||||||
|
sort.Slice(uploadPartInfos, func(i, j int) bool {
|
||||||
|
return uploadPartInfos[i].PartNumber < uploadPartInfos[j].PartNumber
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, uploadPartInfo := range uploadPartInfos {
|
||||||
|
index := uploadPartInfo.PartNumber - 1
|
||||||
|
if index < 0 || index >= len(partInfos) {
|
||||||
|
return fmt.Errorf("invalid PartNumber %d: index out of bounds (partInfos length: %d)", uploadPartInfo.PartNumber, len(partInfos))
|
||||||
|
}
|
||||||
|
partSize := partInfos[index].PartSize
|
||||||
|
log.Debugf("[139] uploading part %+v/%+v", index, len(partInfos))
|
||||||
|
limitReader := io.LimitReader(rateLimited, partSize)
|
||||||
|
r := io.TeeReader(limitReader, p)
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uploadPartInfo.UploadUrl, r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
|
req.Header.Set("Content-Length", fmt.Sprint(partSize))
|
||||||
|
req.Header.Set("Origin", "https://yun.139.com")
|
||||||
|
req.Header.Set("Referer", "https://yun.139.com/")
|
||||||
|
req.ContentLength = partSize
|
||||||
|
err = func() error {
|
||||||
|
res, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
log.Debugf("[139] uploaded: %+v", res)
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(res.Body)
|
||||||
|
return fmt.Errorf("unexpected status code: %d, body: %s", res.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Yun139) getPersonalDiskInfo(ctx context.Context) (*PersonalDiskInfoResp, error) {
|
||||||
|
data := map[string]interface{}{
|
||||||
|
"userDomainId": d.UserDomainID,
|
||||||
|
}
|
||||||
|
var resp PersonalDiskInfoResp
|
||||||
|
_, err := d.request("https://user-njs.yun.139.com/user/disk/getPersonalDiskInfo", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(data)
|
||||||
|
req.SetContext(ctx)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Yun139) getFamilyDiskInfo(ctx context.Context) (*FamilyDiskInfoResp, error) {
|
||||||
|
data := map[string]interface{}{
|
||||||
|
"userDomainId": d.UserDomainID,
|
||||||
|
}
|
||||||
|
var resp FamilyDiskInfoResp
|
||||||
|
_, err := d.request("https://user-njs.yun.139.com/user/disk/getFamilyDiskInfo", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(data)
|
||||||
|
req.SetContext(ctx)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -194,4 +194,17 @@ func (d *Cloud189) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||||||
return d.newUpload(ctx, dstDir, stream, up)
|
return d.newUpload(ctx, dstDir, stream, up)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Cloud189) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
capacityInfo, err := d.getCapacityInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: capacityInfo.CloudCapacityInfo.TotalSize,
|
||||||
|
FreeSpace: capacityInfo.CloudCapacityInfo.FreeSize,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Cloud189)(nil)
|
var _ driver.Driver = (*Cloud189)(nil)
|
||||||
|
|||||||
@@ -66,3 +66,21 @@ type DownResp struct {
|
|||||||
ResMessage string `json:"res_message"`
|
ResMessage string `json:"res_message"`
|
||||||
FileDownloadUrl string `json:"downloadUrl"`
|
FileDownloadUrl string `json:"downloadUrl"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CapacityResp struct {
|
||||||
|
ResCode int `json:"res_code"`
|
||||||
|
ResMessage string `json:"res_message"`
|
||||||
|
Account string `json:"account"`
|
||||||
|
CloudCapacityInfo struct {
|
||||||
|
FreeSize uint64 `json:"freeSize"`
|
||||||
|
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||||
|
TotalSize uint64 `json:"totalSize"`
|
||||||
|
UsedSize uint64 `json:"usedSize"`
|
||||||
|
} `json:"cloudCapacityInfo"`
|
||||||
|
FamilyCapacityInfo struct {
|
||||||
|
FreeSize uint64 `json:"freeSize"`
|
||||||
|
TotalSize uint64 `json:"totalSize"`
|
||||||
|
UsedSize uint64 `json:"usedSize"`
|
||||||
|
} `json:"familyCapacityInfo"`
|
||||||
|
TotalSize uint64 `json:"totalSize"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -311,7 +311,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
|||||||
}
|
}
|
||||||
d.sessionKey = sessionKey
|
d.sessionKey = sessionKey
|
||||||
const DEFAULT int64 = 10485760
|
const DEFAULT int64 = 10485760
|
||||||
var count = int64(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
count := int64(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
||||||
|
|
||||||
res, err := d.uploadRequest("/person/initMultiUpload", map[string]string{
|
res, err := d.uploadRequest("/person/initMultiUpload", map[string]string{
|
||||||
"parentFolderId": dstDir.GetID(),
|
"parentFolderId": dstDir.GetID(),
|
||||||
@@ -395,3 +395,14 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
|||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Cloud189) getCapacityInfo(ctx context.Context) (*CapacityResp, error) {
|
||||||
|
var resp CapacityResp
|
||||||
|
_, err := d.request("https://cloud.189.cn/api/portal/getUserSizeInfo.action", http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package _189_tv
|
package _189_tv
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"container/ring"
|
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -12,6 +11,7 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -21,9 +21,10 @@ type Cloud189TV struct {
|
|||||||
client *resty.Client
|
client *resty.Client
|
||||||
tokenInfo *AppSessionResp
|
tokenInfo *AppSessionResp
|
||||||
uploadThread int
|
uploadThread int
|
||||||
familyTransferFolder *ring.Ring
|
|
||||||
cleanFamilyTransferFile func()
|
|
||||||
storageConfig driver.Config
|
storageConfig driver.Config
|
||||||
|
|
||||||
|
TempUuid string
|
||||||
|
cron *cron.Cron // 新增 cron 字段
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189TV) Config() driver.Config {
|
func (y *Cloud189TV) Config() driver.Config {
|
||||||
@@ -68,7 +69,7 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
|
|||||||
// 避免重复登陆
|
// 避免重复登陆
|
||||||
if !y.isLogin() || y.Addition.AccessToken == "" {
|
if !y.isLogin() || y.Addition.AccessToken == "" {
|
||||||
if err = y.login(); err != nil {
|
if err = y.login(); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -79,10 +80,17 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
y.cron = cron.NewCron(time.Minute * 5)
|
||||||
|
y.cron.Do(y.keepAlive)
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189TV) Drop(ctx context.Context) error {
|
func (y *Cloud189TV) Drop(ctx context.Context) error {
|
||||||
|
if y.cron != nil {
|
||||||
|
y.cron.Stop()
|
||||||
|
y.cron = nil
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -236,7 +244,6 @@ func (y *Cloud189TV) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
FileName: srcObj.GetName(),
|
FileName: srcObj.GetName(),
|
||||||
IsFolder: BoolToNumber(srcObj.IsDir()),
|
IsFolder: BoolToNumber(srcObj.IsDir()),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -270,5 +277,25 @@ func (y *Cloud189TV) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
}
|
}
|
||||||
|
|
||||||
return y.OldUpload(ctx, dstDir, stream, up, isFamily, overwrite)
|
return y.OldUpload(ctx, dstDir, stream, up, isFamily, overwrite)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189TV) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
capacityInfo, err := y.getCapacityInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var total, free uint64
|
||||||
|
if y.isFamily() {
|
||||||
|
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
||||||
|
free = capacityInfo.FamilyCapacityInfo.FreeSize
|
||||||
|
} else {
|
||||||
|
total = capacityInfo.CloudCapacityInfo.TotalSize
|
||||||
|
free = capacityInfo.CloudCapacityInfo.FreeSize
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: total,
|
||||||
|
FreeSpace: free,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import (
|
|||||||
type Addition struct {
|
type Addition struct {
|
||||||
driver.RootID
|
driver.RootID
|
||||||
AccessToken string `json:"access_token"`
|
AccessToken string `json:"access_token"`
|
||||||
TempUuid string
|
|
||||||
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
||||||
|
|||||||
@@ -316,3 +316,21 @@ type BatchTaskConflictTaskInfoResp struct {
|
|||||||
TaskInfos []BatchTaskInfo
|
TaskInfos []BatchTaskInfo
|
||||||
TaskType int `json:"taskType"`
|
TaskType int `json:"taskType"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CapacityResp struct {
|
||||||
|
ResCode int `json:"res_code"`
|
||||||
|
ResMessage string `json:"res_message"`
|
||||||
|
Account string `json:"account"`
|
||||||
|
CloudCapacityInfo struct {
|
||||||
|
FreeSize uint64 `json:"freeSize"`
|
||||||
|
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||||
|
TotalSize uint64 `json:"totalSize"`
|
||||||
|
UsedSize uint64 `json:"usedSize"`
|
||||||
|
} `json:"cloudCapacityInfo"`
|
||||||
|
FamilyCapacityInfo struct {
|
||||||
|
FreeSize uint64 `json:"freeSize"`
|
||||||
|
TotalSize uint64 `json:"totalSize"`
|
||||||
|
UsedSize uint64 `json:"usedSize"`
|
||||||
|
} `json:"familyCapacityInfo"`
|
||||||
|
TotalSize uint64 `json:"totalSize"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -66,6 +66,13 @@ func (y *Cloud189TV) AppKeySignatureHeader(url, method string) map[string]string
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, isFamily ...bool) ([]byte, error) {
|
func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, isFamily ...bool) ([]byte, error) {
|
||||||
|
return y.requestWithRetry(url, method, callback, params, resp, 0, isFamily...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189TV) requestWithRetry(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, retryCount int, isFamily ...bool) ([]byte, error) {
|
||||||
|
if y.tokenInfo == nil {
|
||||||
|
return nil, fmt.Errorf("login failed")
|
||||||
|
}
|
||||||
req := y.client.R().SetQueryParams(clientSuffix())
|
req := y.client.R().SetQueryParams(clientSuffix())
|
||||||
|
|
||||||
if params != nil {
|
if params != nil {
|
||||||
@@ -91,8 +98,23 @@ func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, para
|
|||||||
|
|
||||||
if strings.Contains(res.String(), "userSessionBO is null") ||
|
if strings.Contains(res.String(), "userSessionBO is null") ||
|
||||||
strings.Contains(res.String(), "InvalidSessionKey") {
|
strings.Contains(res.String(), "InvalidSessionKey") {
|
||||||
|
// 限制重试次数,避免无限递归
|
||||||
|
if retryCount >= 3 {
|
||||||
|
y.Addition.AccessToken = ""
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
|
return nil, errors.New("session expired after retry")
|
||||||
|
}
|
||||||
|
|
||||||
|
// 尝试刷新会话
|
||||||
|
if err := y.refreshSession(); err != nil {
|
||||||
|
// 如果刷新失败,说明AccessToken也已过期,需要重新登录
|
||||||
|
y.Addition.AccessToken = ""
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
return nil, errors.New("session expired")
|
return nil, errors.New("session expired")
|
||||||
}
|
}
|
||||||
|
// 如果刷新成功,则重试原始请求(增加重试计数)
|
||||||
|
return y.requestWithRetry(url, method, callback, params, resp, retryCount+1, isFamily...)
|
||||||
|
}
|
||||||
|
|
||||||
// 处理错误
|
// 处理错误
|
||||||
if erron.HasError() {
|
if erron.HasError() {
|
||||||
@@ -131,6 +153,7 @@ func (y *Cloud189TV) put(ctx context.Context, url string, headers map[string]str
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 请求完成后http.Client会Close Request.Body
|
||||||
resp, err := base.HttpClient.Do(req)
|
resp, err := base.HttpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -153,6 +176,7 @@ func (y *Cloud189TV) put(ctx context.Context, url string, headers map[string]str
|
|||||||
}
|
}
|
||||||
return body, nil
|
return body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189TV) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
|
func (y *Cloud189TV) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
|
||||||
fullUrl := ApiUrl
|
fullUrl := ApiUrl
|
||||||
if isFamily {
|
if isFamily {
|
||||||
@@ -210,7 +234,7 @@ func (y *Cloud189TV) login() (err error) {
|
|||||||
var erron RespErr
|
var erron RespErr
|
||||||
var tokenInfo AppSessionResp
|
var tokenInfo AppSessionResp
|
||||||
if y.Addition.AccessToken == "" {
|
if y.Addition.AccessToken == "" {
|
||||||
if y.Addition.TempUuid == "" {
|
if y.TempUuid == "" {
|
||||||
// 获取登录参数
|
// 获取登录参数
|
||||||
var uuidInfo UuidInfoResp
|
var uuidInfo UuidInfoResp
|
||||||
req.SetResult(&uuidInfo).SetError(&erron)
|
req.SetResult(&uuidInfo).SetError(&erron)
|
||||||
@@ -218,9 +242,8 @@ func (y *Cloud189TV) login() (err error) {
|
|||||||
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/getQrCodeUUID.action",
|
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/getQrCodeUUID.action",
|
||||||
http.MethodGet))
|
http.MethodGet))
|
||||||
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/getQrCodeUUID.action")
|
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/getQrCodeUUID.action")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
if erron.HasError() {
|
if erron.HasError() {
|
||||||
return &erron
|
return &erron
|
||||||
@@ -229,7 +252,7 @@ func (y *Cloud189TV) login() (err error) {
|
|||||||
if uuidInfo.Uuid == "" {
|
if uuidInfo.Uuid == "" {
|
||||||
return errors.New("uuidInfo is empty")
|
return errors.New("uuidInfo is empty")
|
||||||
}
|
}
|
||||||
y.Addition.TempUuid = uuidInfo.Uuid
|
y.TempUuid = uuidInfo.Uuid
|
||||||
op.MustSaveDriverStorage(y)
|
op.MustSaveDriverStorage(y)
|
||||||
|
|
||||||
// 展示二维码
|
// 展示二维码
|
||||||
@@ -257,10 +280,10 @@ func (y *Cloud189TV) login() (err error) {
|
|||||||
// Signature
|
// Signature
|
||||||
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/qrcodeLoginResult.action",
|
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/qrcodeLoginResult.action",
|
||||||
http.MethodGet))
|
http.MethodGet))
|
||||||
req.SetQueryParam("uuid", y.Addition.TempUuid)
|
req.SetQueryParam("uuid", y.TempUuid)
|
||||||
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action")
|
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
if erron.HasError() {
|
if erron.HasError() {
|
||||||
return &erron
|
return &erron
|
||||||
@@ -269,7 +292,6 @@ func (y *Cloud189TV) login() (err error) {
|
|||||||
return errors.New("E189AccessToken is empty")
|
return errors.New("E189AccessToken is empty")
|
||||||
}
|
}
|
||||||
y.Addition.AccessToken = accessTokenResp.E189AccessToken
|
y.Addition.AccessToken = accessTokenResp.E189AccessToken
|
||||||
y.Addition.TempUuid = ""
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// 获取SessionKey 和 SessionSecret
|
// 获取SessionKey 和 SessionSecret
|
||||||
@@ -281,7 +303,7 @@ func (y *Cloud189TV) login() (err error) {
|
|||||||
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
|
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
|
||||||
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
|
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if erron.HasError() {
|
if erron.HasError() {
|
||||||
@@ -290,7 +312,45 @@ func (y *Cloud189TV) login() (err error) {
|
|||||||
|
|
||||||
y.tokenInfo = &tokenInfo
|
y.tokenInfo = &tokenInfo
|
||||||
op.MustSaveDriverStorage(y)
|
op.MustSaveDriverStorage(y)
|
||||||
return
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// refreshSession 尝试使用现有的 AccessToken 刷新会话
|
||||||
|
func (y *Cloud189TV) refreshSession() (err error) {
|
||||||
|
var erron RespErr
|
||||||
|
var tokenInfo AppSessionResp
|
||||||
|
reqb := y.client.R().SetQueryParams(clientSuffix())
|
||||||
|
reqb.SetResult(&tokenInfo).SetError(&erron)
|
||||||
|
// Signature
|
||||||
|
reqb.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/loginFamilyMerge.action",
|
||||||
|
http.MethodGet))
|
||||||
|
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
|
||||||
|
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if erron.HasError() {
|
||||||
|
return &erron
|
||||||
|
}
|
||||||
|
|
||||||
|
y.tokenInfo = &tokenInfo
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189TV) keepAlive() {
|
||||||
|
_, err := y.get(ApiUrl+"/keepUserSession.action", func(r *resty.Request) {
|
||||||
|
r.SetQueryParams(clientSuffix())
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Warnf("189tv: Failed to keep user session alive: %v", err)
|
||||||
|
// 如果keepAlive失败,尝试刷新session
|
||||||
|
if refreshErr := y.refreshSession(); refreshErr != nil {
|
||||||
|
utils.Log.Errorf("189tv: Failed to refresh session after keepAlive error: %v", refreshErr)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
utils.Log.Debugf("189tv: User session kept alive successfully.")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||||
@@ -314,7 +374,7 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
|
|||||||
// 旧版本上传,家庭云不支持覆盖
|
// 旧版本上传,家庭云不支持覆盖
|
||||||
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||||
fileMd5 := file.GetHash().GetHash(utils.MD5)
|
fileMd5 := file.GetHash().GetHash(utils.MD5)
|
||||||
var tempFile = file.GetFile()
|
tempFile := file.GetFile()
|
||||||
var err error
|
var err error
|
||||||
if len(fileMd5) != utils.MD5.Width {
|
if len(fileMd5) != utils.MD5.Width {
|
||||||
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
|
||||||
@@ -333,6 +393,10 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
|
|
||||||
// 网盘中不存在该文件,开始上传
|
// 网盘中不存在该文件,开始上传
|
||||||
status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
|
status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
|
||||||
|
// driver.RateLimitReader会尝试Close底层的reader
|
||||||
|
// 但这里的tempFile是一个*os.File,Close后就没法继续读了
|
||||||
|
// 所以这里用io.NopCloser包一层
|
||||||
|
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
|
||||||
for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
|
for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
@@ -350,7 +414,7 @@ func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, tempFile, isFamily)
|
_, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimitedRd, isFamily)
|
||||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -413,7 +477,6 @@ func (y *Cloud189TV) OldUploadCreate(ctx context.Context, parentID string, fileM
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, &uploadInfo, isFamily)
|
}, &uploadInfo, isFamily)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -567,3 +630,15 @@ func (y *Cloud189TV) WaitBatchTask(aType string, taskID string, t time.Duration)
|
|||||||
time.Sleep(t)
|
time.Sleep(t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189TV) getCapacityInfo(ctx context.Context) (*CapacityResp, error) {
|
||||||
|
fullUrl := ApiUrl + "/portal/getUserSizeInfo.action"
|
||||||
|
var resp CapacityResp
|
||||||
|
_, err := y.get(fullUrl, func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
@@ -21,11 +22,11 @@ type Cloud189PC struct {
|
|||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
|
|
||||||
identity string
|
|
||||||
|
|
||||||
client *resty.Client
|
client *resty.Client
|
||||||
|
|
||||||
loginParam *LoginParam
|
loginParam *LoginParam
|
||||||
|
qrcodeParam *QRLoginParam
|
||||||
|
|
||||||
tokenInfo *AppSessionResp
|
tokenInfo *AppSessionResp
|
||||||
|
|
||||||
uploadThread int
|
uploadThread int
|
||||||
@@ -35,6 +36,7 @@ type Cloud189PC struct {
|
|||||||
|
|
||||||
storageConfig driver.Config
|
storageConfig driver.Config
|
||||||
ref *Cloud189PC
|
ref *Cloud189PC
|
||||||
|
cron *cron.Cron
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Config() driver.Config {
|
func (y *Cloud189PC) Config() driver.Config {
|
||||||
@@ -84,14 +86,22 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// 避免重复登陆
|
// 先尝试用Token刷新,之后尝试登陆
|
||||||
identity := utils.GetMD5EncodeStr(y.Username + y.Password)
|
if y.Addition.RefreshToken != "" {
|
||||||
if !y.isLogin() || y.identity != identity {
|
y.tokenInfo = &AppSessionResp{RefreshToken: y.Addition.RefreshToken}
|
||||||
y.identity = identity
|
if err = y.refreshToken(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
if err = y.login(); err != nil {
|
if err = y.login(); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 初始化并启动 cron 任务
|
||||||
|
y.cron = cron.NewCron(time.Duration(time.Minute * 5))
|
||||||
|
// 每5分钟执行一次 keepAlive
|
||||||
|
y.cron.Do(y.keepAlive)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 处理家庭云ID
|
// 处理家庭云ID
|
||||||
@@ -114,7 +124,7 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
|||||||
utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err)
|
utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Cloud189PC) InitReference(storage driver.Driver) error {
|
func (d *Cloud189PC) InitReference(storage driver.Driver) error {
|
||||||
@@ -128,6 +138,10 @@ func (d *Cloud189PC) InitReference(storage driver.Driver) error {
|
|||||||
|
|
||||||
func (y *Cloud189PC) Drop(ctx context.Context) error {
|
func (y *Cloud189PC) Drop(ctx context.Context) error {
|
||||||
y.ref = nil
|
y.ref = nil
|
||||||
|
if y.cron != nil {
|
||||||
|
y.cron.Stop()
|
||||||
|
y.cron = nil
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -291,7 +305,6 @@ func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
FileName: srcObj.GetName(),
|
FileName: srcObj.GetName(),
|
||||||
IsFolder: BoolToNumber(srcObj.IsDir()),
|
IsFolder: BoolToNumber(srcObj.IsDir()),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -397,3 +410,24 @@ func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
return y.StreamUpload(ctx, dstDir, stream, up, isFamily, overwrite)
|
return y.StreamUpload(ctx, dstDir, stream, up, isFamily, overwrite)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
capacityInfo, err := y.getCapacityInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var total, free uint64
|
||||||
|
if y.isFamily() {
|
||||||
|
total = capacityInfo.FamilyCapacityInfo.TotalSize
|
||||||
|
free = capacityInfo.FamilyCapacityInfo.FreeSize
|
||||||
|
} else {
|
||||||
|
total = capacityInfo.CloudCapacityInfo.TotalSize
|
||||||
|
free = capacityInfo.CloudCapacityInfo.FreeSize
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: total,
|
||||||
|
FreeSpace: free,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -80,6 +80,20 @@ func timestamp() int64 {
|
|||||||
return time.Now().UTC().UnixNano() / 1e6
|
return time.Now().UTC().UnixNano() / 1e6
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// formatDate formats a time.Time object into the "YYYY-MM-DDHH:mm:ssSSS" format.
|
||||||
|
func formatDate(t time.Time) string {
|
||||||
|
// The layout string "2006-01-0215:04:05.000" corresponds to:
|
||||||
|
// 2006 -> Year (YYYY)
|
||||||
|
// 01 -> Month (MM)
|
||||||
|
// 02 -> Day (DD)
|
||||||
|
// 15 -> Hour (HH)
|
||||||
|
// 04 -> Minute (mm)
|
||||||
|
// 05 -> Second (ss)
|
||||||
|
// 000 -> Millisecond (SSS) with leading zeros
|
||||||
|
// Note the lack of a separator between the date and hour, matching the desired output.
|
||||||
|
return t.Format("2006-01-0215:04:05.000")
|
||||||
|
}
|
||||||
|
|
||||||
func MustParseTime(str string) *time.Time {
|
func MustParseTime(str string) *time.Time {
|
||||||
lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local)
|
lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local)
|
||||||
return &lastOpTime
|
return &lastOpTime
|
||||||
|
|||||||
@@ -6,9 +6,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
|
LoginType string `json:"login_type" type:"select" options:"password,qrcode" default:"password" required:"true"`
|
||||||
Username string `json:"username" required:"true"`
|
Username string `json:"username" required:"true"`
|
||||||
Password string `json:"password" required:"true"`
|
Password string `json:"password" required:"true"`
|
||||||
VCode string `json:"validate_code"`
|
VCode string `json:"validate_code"`
|
||||||
|
RefreshToken string `json:"refresh_token" help:"To switch accounts, please clear this field"`
|
||||||
driver.RootID
|
driver.RootID
|
||||||
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"`
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
|
|||||||
@@ -68,15 +68,7 @@ func (e *RespErr) Error() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// 登陆需要的参数
|
type BaseLoginParam struct {
|
||||||
type LoginParam struct {
|
|
||||||
// 加密后的用户名和密码
|
|
||||||
RsaUsername string
|
|
||||||
RsaPassword string
|
|
||||||
|
|
||||||
// rsa密钥
|
|
||||||
jRsaKey string
|
|
||||||
|
|
||||||
// 请求头参数
|
// 请求头参数
|
||||||
Lt string
|
Lt string
|
||||||
ReqId string
|
ReqId string
|
||||||
@@ -88,6 +80,27 @@ type LoginParam struct {
|
|||||||
CaptchaToken string
|
CaptchaToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// QRLoginParam 用于暂存二维码登录过程中的参数
|
||||||
|
type QRLoginParam struct {
|
||||||
|
BaseLoginParam
|
||||||
|
|
||||||
|
UUID string `json:"uuid"`
|
||||||
|
EncodeUUID string `json:"encodeuuid"`
|
||||||
|
EncryUUID string `json:"encryuuid"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 登陆需要的参数
|
||||||
|
type LoginParam struct {
|
||||||
|
// 加密后的用户名和密码
|
||||||
|
RsaUsername string
|
||||||
|
RsaPassword string
|
||||||
|
|
||||||
|
// rsa密钥
|
||||||
|
jRsaKey string
|
||||||
|
|
||||||
|
BaseLoginParam
|
||||||
|
}
|
||||||
|
|
||||||
// 登陆加密相关
|
// 登陆加密相关
|
||||||
type EncryptConfResp struct {
|
type EncryptConfResp struct {
|
||||||
Result int `json:"result"`
|
Result int `json:"result"`
|
||||||
@@ -396,3 +409,21 @@ func (p Params) Encode() string {
|
|||||||
}
|
}
|
||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CapacityResp struct {
|
||||||
|
ResCode int `json:"res_code"`
|
||||||
|
ResMessage string `json:"res_message"`
|
||||||
|
Account string `json:"account"`
|
||||||
|
CloudCapacityInfo struct {
|
||||||
|
FreeSize uint64 `json:"freeSize"`
|
||||||
|
MailUsedSize uint64 `json:"mail189UsedSize"`
|
||||||
|
TotalSize uint64 `json:"totalSize"`
|
||||||
|
UsedSize uint64 `json:"usedSize"`
|
||||||
|
} `json:"cloudCapacityInfo"`
|
||||||
|
FamilyCapacityInfo struct {
|
||||||
|
FreeSize uint64 `json:"freeSize"`
|
||||||
|
TotalSize uint64 `json:"totalSize"`
|
||||||
|
UsedSize uint64 `json:"usedSize"`
|
||||||
|
} `json:"familyCapacityInfo"`
|
||||||
|
TotalSize uint64 `json:"totalSize"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
"github.com/skip2/go-qrcode"
|
||||||
|
|
||||||
"github.com/avast/retry-go"
|
"github.com/avast/retry-go"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@@ -54,6 +55,9 @@ const (
|
|||||||
MAC = "TELEMAC"
|
MAC = "TELEMAC"
|
||||||
|
|
||||||
CHANNEL_ID = "web_cloud.189.cn"
|
CHANNEL_ID = "web_cloud.189.cn"
|
||||||
|
|
||||||
|
// Error codes
|
||||||
|
UserInvalidOpenTokenError = "UserInvalidOpenToken"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
|
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
|
||||||
@@ -86,6 +90,9 @@ func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}, isFamily ...bool) ([]byte, error) {
|
func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}, isFamily ...bool) ([]byte, error) {
|
||||||
|
if y.getTokenInfo() == nil {
|
||||||
|
return nil, fmt.Errorf("login failed")
|
||||||
|
}
|
||||||
req := y.getClient().R().SetQueryParams(clientSuffix())
|
req := y.getClient().R().SetQueryParams(clientSuffix())
|
||||||
|
|
||||||
// 设置params
|
// 设置params
|
||||||
@@ -185,6 +192,7 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
|
|||||||
}
|
}
|
||||||
return body, nil
|
return body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
|
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
|
||||||
res := make([]model.Obj, 0, 100)
|
res := make([]model.Obj, 0, 100)
|
||||||
for pageNum := 1; ; pageNum++ {
|
for pageNum := 1; ; pageNum++ {
|
||||||
@@ -264,7 +272,14 @@ func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, fold
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) login() (err error) {
|
func (y *Cloud189PC) login() error {
|
||||||
|
if y.LoginType == "qrcode" {
|
||||||
|
return y.loginByQRCode()
|
||||||
|
}
|
||||||
|
return y.loginByPassword()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) loginByPassword() (err error) {
|
||||||
// 初始化登陆所需参数
|
// 初始化登陆所需参数
|
||||||
if y.loginParam == nil {
|
if y.loginParam == nil {
|
||||||
if err = y.initLoginParam(); err != nil {
|
if err = y.initLoginParam(); err != nil {
|
||||||
@@ -278,11 +293,16 @@ func (y *Cloud189PC) login() (err error) {
|
|||||||
// 销毁登陆参数
|
// 销毁登陆参数
|
||||||
y.loginParam = nil
|
y.loginParam = nil
|
||||||
// 遇到错误,重新加载登陆参数(刷新验证码)
|
// 遇到错误,重新加载登陆参数(刷新验证码)
|
||||||
if err != nil && y.NoUseOcr {
|
if err != nil {
|
||||||
|
if y.NoUseOcr {
|
||||||
if err1 := y.initLoginParam(); err1 != nil {
|
if err1 := y.initLoginParam(); err1 != nil {
|
||||||
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1)
|
err = fmt.Errorf("err1: %s \nerr2: %s", err, err1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
y.Status = err.Error()
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
param := y.loginParam
|
param := y.loginParam
|
||||||
@@ -326,7 +346,7 @@ func (y *Cloud189PC) login() (err error) {
|
|||||||
SetQueryParam("redirectURL", loginresp.ToUrl).
|
SetQueryParam("redirectURL", loginresp.ToUrl).
|
||||||
Post(API_URL + "/getSessionForPC.action")
|
Post(API_URL + "/getSessionForPC.action")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if erron.HasError() {
|
if erron.HasError() {
|
||||||
@@ -334,16 +354,106 @@ func (y *Cloud189PC) login() (err error) {
|
|||||||
}
|
}
|
||||||
if tokenInfo.ResCode != 0 {
|
if tokenInfo.ResCode != 0 {
|
||||||
err = fmt.Errorf(tokenInfo.ResMessage)
|
err = fmt.Errorf(tokenInfo.ResMessage)
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||||
y.tokenInfo = &tokenInfo
|
y.tokenInfo = &tokenInfo
|
||||||
return
|
op.MustSaveDriverStorage(y)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 初始化登陆需要的参数
|
func (y *Cloud189PC) loginByQRCode() error {
|
||||||
* 如果遇到验证码返回错误
|
if y.qrcodeParam == nil {
|
||||||
*/
|
if err := y.initQRCodeParam(); err != nil {
|
||||||
func (y *Cloud189PC) initLoginParam() error {
|
// 二维码也通过错误返回
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var state struct {
|
||||||
|
Status int `json:"status"`
|
||||||
|
RedirectUrl string `json:"redirectUrl"`
|
||||||
|
Msg string `json:"msg"`
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
_, err := y.client.R().
|
||||||
|
SetHeaders(map[string]string{
|
||||||
|
"Referer": AUTH_URL,
|
||||||
|
"Reqid": y.qrcodeParam.ReqId,
|
||||||
|
"lt": y.qrcodeParam.Lt,
|
||||||
|
}).
|
||||||
|
SetFormData(map[string]string{
|
||||||
|
"appId": APP_ID,
|
||||||
|
"clientType": CLIENT_TYPE,
|
||||||
|
"returnUrl": RETURN_URL,
|
||||||
|
"paramId": y.qrcodeParam.ParamId,
|
||||||
|
"uuid": y.qrcodeParam.UUID,
|
||||||
|
"encryuuid": y.qrcodeParam.EncryUUID,
|
||||||
|
"date": formatDate(now),
|
||||||
|
"timeStamp": fmt.Sprint(now.UTC().UnixNano() / 1e6),
|
||||||
|
}).
|
||||||
|
ForceContentType("application/json;charset=UTF-8").
|
||||||
|
SetResult(&state).
|
||||||
|
Post(AUTH_URL + "/api/logbox/oauth2/qrcodeLoginState.do")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check QR code state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch state.Status {
|
||||||
|
case 0: // 登录成功
|
||||||
|
var tokenInfo AppSessionResp
|
||||||
|
_, err = y.client.R().
|
||||||
|
SetResult(&tokenInfo).
|
||||||
|
SetQueryParams(clientSuffix()).
|
||||||
|
SetQueryParam("redirectURL", state.RedirectUrl).
|
||||||
|
Post(API_URL + "/getSessionForPC.action")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tokenInfo.ResCode != 0 {
|
||||||
|
return fmt.Errorf(tokenInfo.ResMessage)
|
||||||
|
}
|
||||||
|
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||||
|
y.tokenInfo = &tokenInfo
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
|
return nil
|
||||||
|
case -11001: // 二维码过期
|
||||||
|
y.qrcodeParam = nil
|
||||||
|
return errors.New("QR code expired, please try again")
|
||||||
|
case -106: // 等待扫描
|
||||||
|
return y.genQRCode("QR code has not been scanned yet, please scan and save again")
|
||||||
|
case -11002: // 等待确认
|
||||||
|
return y.genQRCode("QR code has been scanned, please confirm the login on your phone and save again")
|
||||||
|
default: // 其他错误
|
||||||
|
y.qrcodeParam = nil
|
||||||
|
return fmt.Errorf("QR code login failed with status %d: %s", state.Status, state.Msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) genQRCode(text string) error {
|
||||||
|
// 展示二维码
|
||||||
|
qrTemplate := `<body>
|
||||||
|
state: %s
|
||||||
|
<br><img src="data:image/jpeg;base64,%s"/>
|
||||||
|
<br>Or Click here: <a href="%s">Login</a>
|
||||||
|
</body>`
|
||||||
|
|
||||||
|
// Generate QR code
|
||||||
|
qrCode, err := qrcode.Encode(y.qrcodeParam.UUID, qrcode.Medium, 256)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to generate QR code: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode QR code to base64
|
||||||
|
qrCodeBase64 := base64.StdEncoding.EncodeToString(qrCode)
|
||||||
|
|
||||||
|
// Create the HTML page
|
||||||
|
qrPage := fmt.Sprintf(qrTemplate, text, qrCodeBase64, y.qrcodeParam.UUID)
|
||||||
|
return fmt.Errorf("need verify: \n%s", qrPage)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) initBaseParams() (*BaseLoginParam, error) {
|
||||||
// 清除cookie
|
// 清除cookie
|
||||||
jar, _ := cookiejar.New(nil)
|
jar, _ := cookiejar.New(nil)
|
||||||
y.client.SetCookieJar(jar)
|
y.client.SetCookieJar(jar)
|
||||||
@@ -357,17 +467,30 @@ func (y *Cloud189PC) initLoginParam() error {
|
|||||||
}).
|
}).
|
||||||
Get(WEB_URL + "/api/portal/unifyLoginForPC.action")
|
Get(WEB_URL + "/api/portal/unifyLoginForPC.action")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
param := LoginParam{
|
return &BaseLoginParam{
|
||||||
CaptchaToken: regexp.MustCompile(`'captchaToken' value='(.+?)'`).FindStringSubmatch(res.String())[1],
|
CaptchaToken: regexp.MustCompile(`'captchaToken' value='(.+?)'`).FindStringSubmatch(res.String())[1],
|
||||||
Lt: regexp.MustCompile(`lt = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
Lt: regexp.MustCompile(`lt = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||||
ParamId: regexp.MustCompile(`paramId = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
ParamId: regexp.MustCompile(`paramId = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||||
ReqId: regexp.MustCompile(`reqId = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
ReqId: regexp.MustCompile(`reqId = "(.+?)"`).FindStringSubmatch(res.String())[1],
|
||||||
// jRsaKey: regexp.MustCompile(`"j_rsaKey" value="(.+?)"`).FindStringSubmatch(res.String())[1],
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* 初始化登陆需要的参数
|
||||||
|
* 如果遇到验证码返回错误
|
||||||
|
*/
|
||||||
|
func (y *Cloud189PC) initLoginParam() error {
|
||||||
|
y.loginParam = nil
|
||||||
|
|
||||||
|
baseParam, err := y.initBaseParams()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
y.loginParam = &LoginParam{BaseLoginParam: *baseParam}
|
||||||
|
|
||||||
// 获取rsa公钥
|
// 获取rsa公钥
|
||||||
var encryptConf EncryptConfResp
|
var encryptConf EncryptConfResp
|
||||||
_, err = y.client.R().
|
_, err = y.client.R().
|
||||||
@@ -378,18 +501,17 @@ func (y *Cloud189PC) initLoginParam() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
param.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey)
|
y.loginParam.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey)
|
||||||
param.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Username)
|
y.loginParam.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Username)
|
||||||
param.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Password)
|
y.loginParam.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Password)
|
||||||
y.loginParam = ¶m
|
|
||||||
|
|
||||||
// 判断是否需要验证码
|
// 判断是否需要验证码
|
||||||
resp, err := y.client.R().
|
resp, err := y.client.R().
|
||||||
SetHeader("REQID", param.ReqId).
|
SetHeader("REQID", y.loginParam.ReqId).
|
||||||
SetFormData(map[string]string{
|
SetFormData(map[string]string{
|
||||||
"appKey": APP_ID,
|
"appKey": APP_ID,
|
||||||
"accountType": ACCOUNT_TYPE,
|
"accountType": ACCOUNT_TYPE,
|
||||||
"userName": param.RsaUsername,
|
"userName": y.loginParam.RsaUsername,
|
||||||
}).Post(AUTH_URL + "/api/logbox/oauth2/needcaptcha.do")
|
}).Post(AUTH_URL + "/api/logbox/oauth2/needcaptcha.do")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -401,8 +523,8 @@ func (y *Cloud189PC) initLoginParam() error {
|
|||||||
// 拉取验证码
|
// 拉取验证码
|
||||||
imgRes, err := y.client.R().
|
imgRes, err := y.client.R().
|
||||||
SetQueryParams(map[string]string{
|
SetQueryParams(map[string]string{
|
||||||
"token": param.CaptchaToken,
|
"token": y.loginParam.CaptchaToken,
|
||||||
"REQID": param.ReqId,
|
"REQID": y.loginParam.ReqId,
|
||||||
"rnd": fmt.Sprint(timestamp()),
|
"rnd": fmt.Sprint(timestamp()),
|
||||||
}).
|
}).
|
||||||
Get(AUTH_URL + "/api/logbox/oauth2/picCaptcha.do")
|
Get(AUTH_URL + "/api/logbox/oauth2/picCaptcha.do")
|
||||||
@@ -429,10 +551,38 @@ func (y *Cloud189PC) initLoginParam() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getQRCode 获取并返回二维码
|
||||||
|
func (y *Cloud189PC) initQRCodeParam() (err error) {
|
||||||
|
y.qrcodeParam = nil
|
||||||
|
|
||||||
|
baseParam, err := y.initBaseParams()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var qrcodeParam QRLoginParam
|
||||||
|
_, err = y.client.R().
|
||||||
|
SetFormData(map[string]string{"appId": APP_ID}).
|
||||||
|
ForceContentType("application/json;charset=UTF-8").
|
||||||
|
SetResult(&qrcodeParam).
|
||||||
|
Post(AUTH_URL + "/api/logbox/oauth2/getUUID.do")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
qrcodeParam.BaseLoginParam = *baseParam
|
||||||
|
y.qrcodeParam = &qrcodeParam
|
||||||
|
|
||||||
|
return y.genQRCode("please scan the QR code with the 189 Cloud app, then save the settings again.")
|
||||||
|
}
|
||||||
|
|
||||||
// 刷新会话
|
// 刷新会话
|
||||||
func (y *Cloud189PC) refreshSession() (err error) {
|
func (y *Cloud189PC) refreshSession() (err error) {
|
||||||
|
return y.refreshSessionWithRetry(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) refreshSessionWithRetry(retryCount int) (err error) {
|
||||||
if y.ref != nil {
|
if y.ref != nil {
|
||||||
return y.ref.refreshSession()
|
return y.ref.refreshSessionWithRetry(retryCount)
|
||||||
}
|
}
|
||||||
var erron RespErr
|
var erron RespErr
|
||||||
var userSessionResp UserSessionResp
|
var userSessionResp UserSessionResp
|
||||||
@@ -449,37 +599,102 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// 错误影响正常访问,下线该储存
|
// token生效刷新token
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
y.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error()))
|
|
||||||
op.MustSaveDriverStorage(y)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if erron.HasError() {
|
if erron.HasError() {
|
||||||
if erron.ResCode == "UserInvalidOpenToken" {
|
if erron.ResCode == UserInvalidOpenTokenError {
|
||||||
if err = y.login(); err != nil {
|
return y.refreshTokenWithRetry(retryCount)
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return &erron
|
return &erron
|
||||||
}
|
}
|
||||||
y.tokenInfo.UserSessionResp = userSessionResp
|
y.tokenInfo.UserSessionResp = userSessionResp
|
||||||
return
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// refreshToken 刷新token,失败时返回错误,不再直接调用login
|
||||||
|
func (y *Cloud189PC) refreshToken() (err error) {
|
||||||
|
return y.refreshTokenWithRetry(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
|
||||||
|
if y.ref != nil {
|
||||||
|
return y.ref.refreshTokenWithRetry(retryCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 限制重试次数,避免无限递归
|
||||||
|
if retryCount >= 3 {
|
||||||
|
if y.Addition.RefreshToken != "" {
|
||||||
|
y.Addition.RefreshToken = ""
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
|
}
|
||||||
|
return errors.New("refresh token failed after maximum retries")
|
||||||
|
}
|
||||||
|
|
||||||
|
var erron RespErr
|
||||||
|
var tokenInfo AppSessionResp
|
||||||
|
_, err = y.client.R().
|
||||||
|
SetResult(&tokenInfo).
|
||||||
|
ForceContentType("application/json;charset=UTF-8").
|
||||||
|
SetError(&erron).
|
||||||
|
SetFormData(map[string]string{
|
||||||
|
"clientId": APP_ID,
|
||||||
|
"refreshToken": y.tokenInfo.RefreshToken,
|
||||||
|
"grantType": "refresh_token",
|
||||||
|
"format": "json",
|
||||||
|
}).
|
||||||
|
Post(AUTH_URL + "/api/oauth2/refreshToken.do")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 如果刷新失败,返回错误给上层处理
|
||||||
|
if erron.HasError() {
|
||||||
|
if y.Addition.RefreshToken != "" {
|
||||||
|
y.Addition.RefreshToken = ""
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 根据登录类型决定下一步行为
|
||||||
|
if y.LoginType == "qrcode" {
|
||||||
|
return errors.New("QR code session has expired, please re-scan the code to log in")
|
||||||
|
}
|
||||||
|
// 密码登录模式下,尝试回退到完整登录
|
||||||
|
return y.login()
|
||||||
|
}
|
||||||
|
|
||||||
|
y.Addition.RefreshToken = tokenInfo.RefreshToken
|
||||||
|
y.tokenInfo = &tokenInfo
|
||||||
|
op.MustSaveDriverStorage(y)
|
||||||
|
return y.refreshSessionWithRetry(retryCount + 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) keepAlive() {
|
||||||
|
_, err := y.get(API_URL+"/keepUserSession.action", func(r *resty.Request) {
|
||||||
|
r.SetQueryParams(clientSuffix())
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Warnf("189pc: Failed to keep user session alive: %v", err)
|
||||||
|
// 如果keepAlive失败,尝试刷新session
|
||||||
|
if refreshErr := y.refreshSession(); refreshErr != nil {
|
||||||
|
utils.Log.Errorf("189pc: Failed to refresh session after keepAlive error: %v", refreshErr)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
utils.Log.Debugf("189pc: User session kept alive successfully.")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 普通上传
|
// 普通上传
|
||||||
// 无法上传大小为0的文件
|
// 无法上传大小为0的文件
|
||||||
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
||||||
size := file.GetSize()
|
// 文件大小
|
||||||
sliceSize := min(size, partSize(size))
|
fileSize := file.GetSize()
|
||||||
|
// 分片大小,不得为文件大小
|
||||||
|
sliceSize := partSize(fileSize)
|
||||||
|
|
||||||
params := Params{
|
params := Params{
|
||||||
"parentFolderId": dstDir.GetID(),
|
"parentFolderId": dstDir.GetID(),
|
||||||
"fileName": url.QueryEscape(file.GetName()),
|
"fileName": url.QueryEscape(file.GetName()),
|
||||||
"fileSize": fmt.Sprint(file.GetSize()),
|
"fileSize": fmt.Sprint(fileSize),
|
||||||
"sliceSize": fmt.Sprint(sliceSize),
|
"sliceSize": fmt.Sprint(sliceSize), // 必须为特定分片大小
|
||||||
"lazyCheck": "1",
|
"lazyCheck": "1",
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -512,10 +727,10 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
|
||||||
count := 1
|
count := 1
|
||||||
if size > sliceSize {
|
if fileSize > sliceSize {
|
||||||
count = int((size + sliceSize - 1) / sliceSize)
|
count = int((fileSize + sliceSize - 1) / sliceSize)
|
||||||
}
|
}
|
||||||
lastPartSize := size % sliceSize
|
lastPartSize := fileSize % sliceSize
|
||||||
if lastPartSize == 0 {
|
if lastPartSize == 0 {
|
||||||
lastPartSize = sliceSize
|
lastPartSize = sliceSize
|
||||||
}
|
}
|
||||||
@@ -535,25 +750,25 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
offset := int64((i)-1) * sliceSize
|
offset := int64((i)-1) * sliceSize
|
||||||
size := sliceSize
|
partSize := sliceSize
|
||||||
if i == count {
|
if i == count {
|
||||||
size = lastPartSize
|
partSize = lastPartSize
|
||||||
}
|
}
|
||||||
partInfo := ""
|
partInfo := ""
|
||||||
var reader *stream.SectionReader
|
var reader io.ReadSeeker
|
||||||
var rateLimitedRd io.Reader
|
var rateLimitedRd io.Reader
|
||||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||||
Before: func(ctx context.Context) error {
|
Before: func(ctx context.Context) error {
|
||||||
if reader == nil {
|
if reader == nil {
|
||||||
var err error
|
var err error
|
||||||
reader, err = ss.GetSectionReader(offset, size)
|
reader, err = ss.GetSectionReader(offset, partSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
silceMd5.Reset()
|
silceMd5.Reset()
|
||||||
w, err := utils.CopyWithBuffer(writers, reader)
|
w, err := utils.CopyWithBuffer(writers, reader)
|
||||||
if w != size {
|
if w != partSize {
|
||||||
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", size, w, err)
|
return fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", partSize, w, err)
|
||||||
}
|
}
|
||||||
// 计算块md5并进行hex和base64编码
|
// 计算块md5并进行hex和base64编码
|
||||||
md5Bytes := silceMd5.Sum(nil)
|
md5Bytes := silceMd5.Sum(nil)
|
||||||
@@ -573,8 +788,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
|
|
||||||
// step.4 上传切片
|
// step.4 上传切片
|
||||||
uploadUrl := uploadUrls[0]
|
uploadUrl := uploadUrls[0]
|
||||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
|
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily)
|
||||||
driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -595,7 +809,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
fileMd5Hex = strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||||
}
|
}
|
||||||
sliceMd5Hex := fileMd5Hex
|
sliceMd5Hex := fileMd5Hex
|
||||||
if file.GetSize() > sliceSize {
|
if fileSize > sliceSize {
|
||||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -952,7 +1166,6 @@ func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileM
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, &uploadInfo, isFamily)
|
}, &uploadInfo, isFamily)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1262,3 +1475,15 @@ func (y *Cloud189PC) getClient() *resty.Client {
|
|||||||
}
|
}
|
||||||
return y.client
|
return y.client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) getCapacityInfo(ctx context.Context) (*CapacityResp, error) {
|
||||||
|
fullUrl := API_URL + "/portal/getUserSizeInfo.action"
|
||||||
|
var resp CapacityResp
|
||||||
|
_, err := y.get(fullUrl, func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ import (
|
|||||||
type Alias struct {
|
type Alias struct {
|
||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
|
rootOrder []string
|
||||||
pathMap map[string][]string
|
pathMap map[string][]string
|
||||||
autoFlatten bool
|
autoFlatten bool
|
||||||
oneKey string
|
oneKey string
|
||||||
@@ -40,13 +41,18 @@ func (d *Alias) Init(ctx context.Context) error {
|
|||||||
if d.Paths == "" {
|
if d.Paths == "" {
|
||||||
return errors.New("paths is required")
|
return errors.New("paths is required")
|
||||||
}
|
}
|
||||||
|
paths := strings.Split(d.Paths, "\n")
|
||||||
|
d.rootOrder = make([]string, 0, len(paths))
|
||||||
d.pathMap = make(map[string][]string)
|
d.pathMap = make(map[string][]string)
|
||||||
for _, path := range strings.Split(d.Paths, "\n") {
|
for _, path := range paths {
|
||||||
path = strings.TrimSpace(path)
|
path = strings.TrimSpace(path)
|
||||||
if path == "" {
|
if path == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
k, v := getPair(path)
|
k, v := getPair(path)
|
||||||
|
if _, ok := d.pathMap[k]; !ok {
|
||||||
|
d.rootOrder = append(d.rootOrder, k)
|
||||||
|
}
|
||||||
d.pathMap[k] = append(d.pathMap[k], v)
|
d.pathMap[k] = append(d.pathMap[k], v)
|
||||||
}
|
}
|
||||||
if len(d.pathMap) == 1 {
|
if len(d.pathMap) == 1 {
|
||||||
@@ -62,6 +68,7 @@ func (d *Alias) Init(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Alias) Drop(ctx context.Context) error {
|
func (d *Alias) Drop(ctx context.Context) error {
|
||||||
|
d.rootOrder = nil
|
||||||
d.pathMap = nil
|
d.pathMap = nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -79,27 +86,51 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
|
var ret *model.Object
|
||||||
|
provider := ""
|
||||||
for _, dst := range dsts {
|
for _, dst := range dsts {
|
||||||
obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true})
|
rawPath := stdpath.Join(dst, sub)
|
||||||
|
obj, err := fs.Get(ctx, rawPath, &fs.GetArgs{NoLog: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return &model.Object{
|
storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{})
|
||||||
|
if ret == nil {
|
||||||
|
ret = &model.Object{
|
||||||
Path: path,
|
Path: path,
|
||||||
Name: obj.GetName(),
|
Name: obj.GetName(),
|
||||||
Size: obj.GetSize(),
|
Size: obj.GetSize(),
|
||||||
Modified: obj.ModTime(),
|
Modified: obj.ModTime(),
|
||||||
IsFolder: obj.IsDir(),
|
IsFolder: obj.IsDir(),
|
||||||
HashInfo: obj.GetHash(),
|
HashInfo: obj.GetHash(),
|
||||||
|
}
|
||||||
|
if !d.ProviderPassThrough || err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
provider = storage.Config().Name
|
||||||
|
} else if err != nil || provider != storage.GetStorage().Driver {
|
||||||
|
provider = ""
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ret == nil {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
if provider != "" {
|
||||||
|
return &model.ObjectProvider{
|
||||||
|
Object: *ret,
|
||||||
|
Provider: model.Provider{
|
||||||
|
Provider: provider,
|
||||||
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
return nil, errs.ObjectNotFound
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
path := dir.GetPath()
|
path := dir.GetPath()
|
||||||
if utils.PathEqual(path, "/") && !d.autoFlatten {
|
if utils.PathEqual(path, "/") && !d.autoFlatten {
|
||||||
return d.listRoot(), nil
|
return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough, args.Refresh), nil
|
||||||
}
|
}
|
||||||
root, sub := d.getRootAndPath(path)
|
root, sub := d.getRootAndPath(path)
|
||||||
dsts, ok := d.pathMap[root]
|
dsts, ok := d.pathMap[root]
|
||||||
@@ -107,27 +138,35 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
|||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
var objs []model.Obj
|
var objs []model.Obj
|
||||||
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
|
|
||||||
for _, dst := range dsts {
|
for _, dst := range dsts {
|
||||||
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), fsArgs)
|
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), &fs.ListArgs{
|
||||||
|
NoLog: true,
|
||||||
|
Refresh: args.Refresh,
|
||||||
|
WithStorageDetails: args.WithStorageDetails && d.DetailsPassThrough,
|
||||||
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
|
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
|
||||||
thumb, ok := model.GetThumb(obj)
|
|
||||||
objRes := model.Object{
|
objRes := model.Object{
|
||||||
Name: obj.GetName(),
|
Name: obj.GetName(),
|
||||||
Size: obj.GetSize(),
|
Size: obj.GetSize(),
|
||||||
Modified: obj.ModTime(),
|
Modified: obj.ModTime(),
|
||||||
IsFolder: obj.IsDir(),
|
IsFolder: obj.IsDir(),
|
||||||
}
|
}
|
||||||
if !ok {
|
if thumb, ok := model.GetThumb(obj); ok {
|
||||||
return &objRes, nil
|
|
||||||
}
|
|
||||||
return &model.ObjThumb{
|
return &model.ObjThumb{
|
||||||
Object: objRes,
|
Object: objRes,
|
||||||
Thumbnail: model.Thumbnail{
|
Thumbnail: model.Thumbnail{
|
||||||
Thumbnail: thumb,
|
Thumbnail: thumb,
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
|
}
|
||||||
|
if details, ok := model.GetStorageDetails(obj); ok {
|
||||||
|
return &model.ObjStorageDetails{
|
||||||
|
Obj: &objRes,
|
||||||
|
StorageDetailsWithName: *details,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return &objRes, nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -172,9 +211,6 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
if resultLink.ContentLength == 0 {
|
if resultLink.ContentLength == 0 {
|
||||||
resultLink.ContentLength = fi.GetSize()
|
resultLink.ContentLength = fi.GetSize()
|
||||||
}
|
}
|
||||||
if resultLink.MFile != nil {
|
|
||||||
return &resultLink, nil
|
|
||||||
}
|
|
||||||
if d.DownloadConcurrency > 0 {
|
if d.DownloadConcurrency > 0 {
|
||||||
resultLink.Concurrency = d.DownloadConcurrency
|
resultLink.Concurrency = d.DownloadConcurrency
|
||||||
}
|
}
|
||||||
@@ -186,6 +222,35 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Alias) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
root, sub := d.getRootAndPath(args.Obj.GetPath())
|
||||||
|
dsts, ok := d.pathMap[root]
|
||||||
|
if !ok {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
for _, dst := range dsts {
|
||||||
|
rawPath := stdpath.Join(dst, sub)
|
||||||
|
storage, actualPath, err := op.GetStorageAndActualPath(rawPath)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
other, ok := storage.(driver.Other)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
obj, err := op.GetUnwrap(ctx, storage, actualPath)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return other.Other(ctx, model.OtherArgs{
|
||||||
|
Obj: obj,
|
||||||
|
Method: args.Method,
|
||||||
|
Data: args.Data,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
if !d.Writable {
|
if !d.Writable {
|
||||||
return errs.PermissionDenied
|
return errs.PermissionDenied
|
||||||
@@ -197,7 +262,7 @@ func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if errs.IsNotImplement(err) {
|
if errs.IsNotImplementError(err) {
|
||||||
return errors.New("same-name dirs cannot make sub-dir")
|
return errors.New("same-name dirs cannot make sub-dir")
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@@ -208,14 +273,14 @@ func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
return errs.PermissionDenied
|
return errs.PermissionDenied
|
||||||
}
|
}
|
||||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||||
if errs.IsNotImplement(err) {
|
if errs.IsNotImplementError(err) {
|
||||||
return errors.New("same-name files cannot be moved")
|
return errors.New("same-name files cannot be moved")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
if errs.IsNotImplement(err) {
|
if errs.IsNotImplementError(err) {
|
||||||
return errors.New("same-name dirs cannot be moved to")
|
return errors.New("same-name dirs cannot be moved to")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -243,7 +308,7 @@ func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) er
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if errs.IsNotImplement(err) {
|
if errs.IsNotImplementError(err) {
|
||||||
return errors.New("same-name files cannot be Rename")
|
return errors.New("same-name files cannot be Rename")
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@@ -254,14 +319,14 @@ func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
return errs.PermissionDenied
|
return errs.PermissionDenied
|
||||||
}
|
}
|
||||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||||
if errs.IsNotImplement(err) {
|
if errs.IsNotImplementError(err) {
|
||||||
return errors.New("same-name files cannot be copied")
|
return errors.New("same-name files cannot be copied")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
if errs.IsNotImplement(err) {
|
if errs.IsNotImplementError(err) {
|
||||||
return errors.New("same-name dirs cannot be copied to")
|
return errors.New("same-name dirs cannot be copied to")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -295,7 +360,7 @@ func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if errs.IsNotImplement(err) {
|
if errs.IsNotImplementError(err) {
|
||||||
return errors.New("same-name files cannot be Delete")
|
return errors.New("same-name files cannot be Delete")
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@@ -339,7 +404,7 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if errs.IsNotImplement(err) {
|
if errs.IsNotImplementError(err) {
|
||||||
return errors.New("same-name dirs cannot be Put")
|
return errors.New("same-name dirs cannot be Put")
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@@ -356,7 +421,7 @@ func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string)
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if errs.IsNotImplement(err) {
|
if errs.IsNotImplementError(err) {
|
||||||
return errors.New("same-name files cannot offline download")
|
return errors.New("same-name files cannot offline download")
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@@ -429,14 +494,14 @@ func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj,
|
|||||||
return errs.PermissionDenied
|
return errs.PermissionDenied
|
||||||
}
|
}
|
||||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||||
if errs.IsNotImplement(err) {
|
if errs.IsNotImplementError(err) {
|
||||||
return errors.New("same-name files cannot be decompressed")
|
return errors.New("same-name files cannot be decompressed")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
if errs.IsNotImplement(err) {
|
if errs.IsNotImplementError(err) {
|
||||||
return errors.New("same-name dirs cannot be decompressed to")
|
return errors.New("same-name dirs cannot be decompressed to")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -459,4 +524,25 @@ func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Alias) ResolveLinkCacheMode(path string) driver.LinkCacheMode {
|
||||||
|
root, sub := d.getRootAndPath(path)
|
||||||
|
dsts, ok := d.pathMap[root]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
for _, dst := range dsts {
|
||||||
|
storage, actualPath, err := op.GetStorageAndActualPath(stdpath.Join(dst, sub))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mode := storage.Config().LinkCacheMode
|
||||||
|
if mode == -1 {
|
||||||
|
return storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(actualPath)
|
||||||
|
} else {
|
||||||
|
return mode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Alias)(nil)
|
var _ driver.Driver = (*Alias)(nil)
|
||||||
|
|||||||
@@ -15,6 +15,8 @@ type Addition struct {
|
|||||||
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
||||||
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
||||||
Writable bool `json:"writable" type:"bool" default:"false"`
|
Writable bool `json:"writable" type:"bool" default:"false"`
|
||||||
|
ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"`
|
||||||
|
DetailsPassThrough bool `json:"details_pass_through" type:"bool" default:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
@@ -24,6 +26,7 @@ var config = driver.Config{
|
|||||||
NoUpload: false,
|
NoUpload: false,
|
||||||
DefaultRoot: "/",
|
DefaultRoot: "/",
|
||||||
ProxyRangeOption: true,
|
ProxyRangeOption: true,
|
||||||
|
LinkCacheMode: driver.LinkCacheAuto,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -2,8 +2,11 @@ package alias
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
@@ -11,18 +14,55 @@ import (
|
|||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
"github.com/OpenListTeam/OpenList/v4/server/common"
|
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *Alias) listRoot() []model.Obj {
|
func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model.Obj {
|
||||||
var objs []model.Obj
|
var objs []model.Obj
|
||||||
for k := range d.pathMap {
|
var wg sync.WaitGroup
|
||||||
|
for _, k := range d.rootOrder {
|
||||||
obj := model.Object{
|
obj := model.Object{
|
||||||
Name: k,
|
Name: k,
|
||||||
IsFolder: true,
|
IsFolder: true,
|
||||||
Modified: d.Modified,
|
Modified: d.Modified,
|
||||||
}
|
}
|
||||||
|
idx := len(objs)
|
||||||
objs = append(objs, &obj)
|
objs = append(objs, &obj)
|
||||||
|
v := d.pathMap[k]
|
||||||
|
if !withDetails || len(v) != 1 {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
remoteDriver, err := op.GetStorageByMountPath(v[0])
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_, ok := remoteDriver.(driver.WithDetails)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
objs[idx] = &model.ObjStorageDetails{
|
||||||
|
Obj: objs[idx],
|
||||||
|
StorageDetailsWithName: model.StorageDetailsWithName{
|
||||||
|
StorageDetails: nil,
|
||||||
|
DriverName: remoteDriver.Config().Name,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
c, cancel := context.WithTimeout(ctx, time.Second)
|
||||||
|
defer cancel()
|
||||||
|
details, e := op.GetStorageDetails(c, remoteDriver, refresh)
|
||||||
|
if e != nil {
|
||||||
|
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
|
||||||
|
log.Errorf("failed get %s storage details: %+v", remoteDriver.GetStorage().MountPath, e)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
objs[idx].(*model.ObjStorageDetails).StorageDetails = details
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
return objs
|
return objs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -171,7 +171,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
|||||||
Mimetype: streamer.GetMimetype(),
|
Mimetype: streamer.GetMimetype(),
|
||||||
}
|
}
|
||||||
const DEFAULT int64 = 10485760
|
const DEFAULT int64 = 10485760
|
||||||
var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
|
count := int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
|
||||||
|
|
||||||
partInfoList := make([]base.Json, 0, count)
|
partInfoList := make([]base.Json, 0, count)
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
@@ -327,6 +327,20 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
|||||||
return fmt.Errorf("%+v", resp2)
|
return fmt.Errorf("%+v", resp2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *AliDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
res, err, _ := d.request("https://api.aliyundrive.com/adrive/v1/user/driveCapacityDetails", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
used := utils.Json.Get(res, "drive_used_size").ToUint64()
|
||||||
|
total := utils.Json.Get(res, "drive_total_size").ToUint64()
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *AliDrive) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
func (d *AliDrive) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
var resp base.Json
|
var resp base.Json
|
||||||
var url string
|
var url string
|
||||||
|
|||||||
@@ -291,6 +291,18 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getSpaceInfo", http.MethodPost, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
|
||||||
|
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
||||||
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
||||||
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
||||||
|
|||||||
@@ -20,9 +20,12 @@ import (
|
|||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_netdisk"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_netdisk"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_photo"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_photo"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing"
|
||||||
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/chunk"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4"
|
||||||
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/cnb_releases"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/crypt"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/crypt"
|
||||||
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/degoo"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao_share"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/doubao_share"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/dropbox"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/dropbox"
|
||||||
@@ -33,12 +36,14 @@ import (
|
|||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_drive"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_drive"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_photo"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_photo"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud"
|
||||||
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud_open"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/ilanzou"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/ilanzou"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/ipfs_api"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/ipfs_api"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/kodbox"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/kodbox"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/lanzou"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/lanzou"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/lenovonas_share"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/lenovonas_share"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/local"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/local"
|
||||||
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediafire"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediatrack"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediatrack"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/mega"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/mega"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/misskey"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/misskey"
|
||||||
@@ -48,8 +53,10 @@ import (
|
|||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_app"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_app"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_sharelink"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/onedrive_sharelink"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist"
|
||||||
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
|
||||||
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/proton_drive"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc_tv"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc_tv"
|
||||||
@@ -59,6 +66,7 @@ import (
|
|||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/smb"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/smb"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/strm"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/strm"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/teambition"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/teambition"
|
||||||
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/teldrive"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/terabox"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/terabox"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder"
|
||||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser"
|
_ "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser"
|
||||||
|
|||||||
@@ -364,4 +364,12 @@ func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
du, err := d.quota(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{DiskUsage: du}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
||||||
|
|||||||
@@ -189,3 +189,12 @@ type PrecreateResp struct {
|
|||||||
// return_type=2
|
// return_type=2
|
||||||
File File `json:"info"`
|
File File `json:"info"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type QuotaResp struct {
|
||||||
|
Errno int `json:"errno"`
|
||||||
|
RequestId int64 `json:"request_id"`
|
||||||
|
Total uint64 `json:"total"`
|
||||||
|
Used uint64 `json:"used"`
|
||||||
|
//Free uint64 `json:"free"`
|
||||||
|
//Expire bool `json:"expire"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package baidu_netdisk
|
package baidu_netdisk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -11,6 +12,7 @@ import (
|
|||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
@@ -381,6 +383,17 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
|||||||
return maxSliceSize
|
return maxSliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *BaiduNetdisk) quota(ctx context.Context) (model.DiskUsage, error) {
|
||||||
|
var resp QuotaResp
|
||||||
|
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return model.DiskUsage{}, err
|
||||||
|
}
|
||||||
|
return driver.DiskUsageFromUsedAndTotal(resp.Used, resp.Total), nil
|
||||||
|
}
|
||||||
|
|
||||||
// func encodeURIComponent(str string) string {
|
// func encodeURIComponent(str string) string {
|
||||||
// r := url.QueryEscape(str)
|
// r := url.QueryEscape(str)
|
||||||
// r = strings.ReplaceAll(r, "+", "%20")
|
// r = strings.ReplaceAll(r, "+", "%20")
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ type Addition struct {
|
|||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "BaiduPhoto",
|
Name: "BaiduPhoto",
|
||||||
LocalSort: true,
|
LocalSort: true,
|
||||||
|
LinkCacheMode: driver.LinkCacheUA,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ func InitClient() {
|
|||||||
}),
|
}),
|
||||||
).SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
).SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||||
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
||||||
|
net.SetRestyProxyIfConfigured(NoRedirectClient)
|
||||||
|
|
||||||
RestyClient = NewRestyClient()
|
RestyClient = NewRestyClient()
|
||||||
HttpClient = net.NewHttpClient()
|
HttpClient = net.NewHttpClient()
|
||||||
@@ -37,5 +38,7 @@ func NewRestyClient() *resty.Client {
|
|||||||
SetRetryResetReaders(true).
|
SetRetryResetReaders(true).
|
||||||
SetTimeout(DefaultTimeout).
|
SetTimeout(DefaultTimeout).
|
||||||
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||||
|
|
||||||
|
net.SetRestyProxyIfConfigured(client)
|
||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -239,7 +240,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = writer.WriteField("puid", fmt.Sprintf("%d", resp.Msg.Puid))
|
err = writer.WriteField("puid", strconv.Itoa(resp.Msg.Puid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Error writing param2 to request body:", err)
|
fmt.Println("Error writing param2 to request body:", err)
|
||||||
return err
|
return err
|
||||||
@@ -260,7 +261,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||||
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
|
req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
|
||||||
resps, err := http.DefaultClient.Do(req)
|
resps, err := http.DefaultClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -258,7 +258,7 @@ type UploadDoneParam struct {
|
|||||||
func fileToObj(f File) *model.Object {
|
func fileToObj(f File) *model.Object {
|
||||||
if len(f.Content.FolderName) > 0 {
|
if len(f.Content.FolderName) > 0 {
|
||||||
return &model.Object{
|
return &model.Object{
|
||||||
ID: fmt.Sprintf("%d", f.ID),
|
ID: strconv.Itoa(f.ID),
|
||||||
Name: f.Content.FolderName,
|
Name: f.Content.FolderName,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
Modified: time.UnixMilli(f.Inserttime),
|
Modified: time.UnixMilli(f.Inserttime),
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
@@ -172,7 +173,7 @@ func (d *ChaoXing) Login() (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||||
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
|
req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
|
||||||
resp, err := http.DefaultClient.Do(req)
|
resp, err := http.DefaultClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|||||||
501
drivers/chunk/driver.go
Normal file
501
drivers/chunk/driver.go
Normal file
@@ -0,0 +1,501 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
stdpath "path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/fs"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/sign"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/server/common"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Chunk struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Init(ctx context.Context) error {
|
||||||
|
if d.PartSize <= 0 {
|
||||||
|
return errors.New("part size must be positive")
|
||||||
|
}
|
||||||
|
if len(d.ChunkPrefix) <= 0 {
|
||||||
|
return errors.New("chunk folder prefix must not be empty")
|
||||||
|
}
|
||||||
|
d.RemotePath = utils.FixAndCleanPath(d.RemotePath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||||
|
if utils.PathEqual(path, "/") {
|
||||||
|
return &model.Object{
|
||||||
|
Name: "Root",
|
||||||
|
IsFolder: true,
|
||||||
|
Path: "/",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
remoteActualPath = stdpath.Join(remoteActualPath, path)
|
||||||
|
if remoteObj, err := op.Get(ctx, remoteStorage, remoteActualPath); err == nil {
|
||||||
|
return &model.Object{
|
||||||
|
Path: path,
|
||||||
|
Name: remoteObj.GetName(),
|
||||||
|
Size: remoteObj.GetSize(),
|
||||||
|
Modified: remoteObj.ModTime(),
|
||||||
|
IsFolder: remoteObj.IsDir(),
|
||||||
|
HashInfo: remoteObj.GetHash(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteActualDir, name := stdpath.Split(remoteActualPath)
|
||||||
|
chunkName := d.ChunkPrefix + name
|
||||||
|
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, chunkName), model.ListArgs{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var totalSize int64 = 0
|
||||||
|
// 0号块默认为-1 以支持空文件
|
||||||
|
chunkSizes := []int64{-1}
|
||||||
|
h := make(map[*utils.HashType]string)
|
||||||
|
var first model.Obj
|
||||||
|
for _, o := range chunkObjs {
|
||||||
|
if o.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if after, ok := strings.CutPrefix(o.GetName(), "hash_"); ok {
|
||||||
|
hn, value, ok := strings.Cut(strings.TrimSuffix(after, d.CustomExt), "_")
|
||||||
|
if ok {
|
||||||
|
ht, ok := utils.GetHashByName(hn)
|
||||||
|
if ok {
|
||||||
|
h[ht] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalSize += o.GetSize()
|
||||||
|
if len(chunkSizes) > idx {
|
||||||
|
if idx == 0 {
|
||||||
|
first = o
|
||||||
|
}
|
||||||
|
chunkSizes[idx] = o.GetSize()
|
||||||
|
} else if len(chunkSizes) == idx {
|
||||||
|
chunkSizes = append(chunkSizes, o.GetSize())
|
||||||
|
} else {
|
||||||
|
newChunkSizes := make([]int64, idx+1)
|
||||||
|
copy(newChunkSizes, chunkSizes)
|
||||||
|
chunkSizes = newChunkSizes
|
||||||
|
chunkSizes[idx] = o.GetSize()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reqDir, _ := stdpath.Split(path)
|
||||||
|
objRes := chunkObject{
|
||||||
|
Object: model.Object{
|
||||||
|
Path: stdpath.Join(reqDir, chunkName),
|
||||||
|
Name: name,
|
||||||
|
Size: totalSize,
|
||||||
|
Modified: first.ModTime(),
|
||||||
|
Ctime: first.CreateTime(),
|
||||||
|
},
|
||||||
|
chunkSizes: chunkSizes,
|
||||||
|
}
|
||||||
|
if len(h) > 0 {
|
||||||
|
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||||
|
}
|
||||||
|
return &objRes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
remoteActualDir := stdpath.Join(remoteActualPath, dir.GetPath())
|
||||||
|
remoteObjs, err := op.List(ctx, remoteStorage, remoteActualDir, model.ListArgs{
|
||||||
|
ReqPath: args.ReqPath,
|
||||||
|
Refresh: args.Refresh,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result := make([]model.Obj, 0, len(remoteObjs))
|
||||||
|
listG, listCtx := errgroup.NewGroupWithContext(ctx, d.NumListWorkers, retry.Attempts(3))
|
||||||
|
for _, obj := range remoteObjs {
|
||||||
|
if utils.IsCanceled(listCtx) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
rawName := obj.GetName()
|
||||||
|
if obj.IsDir() {
|
||||||
|
if name, ok := strings.CutPrefix(rawName, d.ChunkPrefix); ok {
|
||||||
|
resultIdx := len(result)
|
||||||
|
result = append(result, nil)
|
||||||
|
listG.Go(func(ctx context.Context) error {
|
||||||
|
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
|
||||||
|
ReqPath: stdpath.Join(args.ReqPath, rawName),
|
||||||
|
Refresh: args.Refresh,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
totalSize := int64(0)
|
||||||
|
h := make(map[*utils.HashType]string)
|
||||||
|
first := obj
|
||||||
|
for _, o := range chunkObjs {
|
||||||
|
if o.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
|
||||||
|
hn, value, ok := strings.Cut(after, "_")
|
||||||
|
if ok {
|
||||||
|
ht, ok := utils.GetHashByName(hn)
|
||||||
|
if ok {
|
||||||
|
h[ht] = value
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if idx == 0 {
|
||||||
|
first = o
|
||||||
|
}
|
||||||
|
totalSize += o.GetSize()
|
||||||
|
}
|
||||||
|
objRes := model.Object{
|
||||||
|
Name: name,
|
||||||
|
Size: totalSize,
|
||||||
|
Modified: first.ModTime(),
|
||||||
|
Ctime: first.CreateTime(),
|
||||||
|
}
|
||||||
|
if len(h) > 0 {
|
||||||
|
objRes.HashInfo = utils.NewHashInfoByMap(h)
|
||||||
|
}
|
||||||
|
if !d.Thumbnail {
|
||||||
|
result[resultIdx] = &objRes
|
||||||
|
} else {
|
||||||
|
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
|
||||||
|
thumb := fmt.Sprintf("%s/d%s?sign=%s",
|
||||||
|
common.GetApiUrl(ctx),
|
||||||
|
utils.EncodePath(thumbPath, true),
|
||||||
|
sign.Sign(thumbPath))
|
||||||
|
result[resultIdx] = &model.ObjThumb{
|
||||||
|
Object: objRes,
|
||||||
|
Thumbnail: model.Thumbnail{
|
||||||
|
Thumbnail: thumb,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !d.ShowHidden && strings.HasPrefix(rawName, ".") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
thumb, ok := model.GetThumb(obj)
|
||||||
|
objRes := model.Object{
|
||||||
|
Name: rawName,
|
||||||
|
Size: obj.GetSize(),
|
||||||
|
Modified: obj.ModTime(),
|
||||||
|
IsFolder: obj.IsDir(),
|
||||||
|
HashInfo: obj.GetHash(),
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
result = append(result, &objRes)
|
||||||
|
} else {
|
||||||
|
result = append(result, &model.ObjThumb{
|
||||||
|
Object: objRes,
|
||||||
|
Thumbnail: model.Thumbnail{
|
||||||
|
Thumbnail: thumb,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err = listG.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
chunkFile, ok := file.(*chunkObject)
|
||||||
|
remoteActualPath = stdpath.Join(remoteActualPath, file.GetPath())
|
||||||
|
if !ok {
|
||||||
|
l, _, err := op.Link(ctx, remoteStorage, remoteActualPath, args)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resultLink := *l
|
||||||
|
resultLink.SyncClosers = utils.NewSyncClosers(l)
|
||||||
|
return &resultLink, nil
|
||||||
|
}
|
||||||
|
// 检查0号块不等于-1 以支持空文件
|
||||||
|
// 如果块数量大于1 最后一块不可能为0
|
||||||
|
// 只检查中间块是否有0
|
||||||
|
for i, l := 0, len(chunkFile.chunkSizes)-2; ; i++ {
|
||||||
|
if i == 0 {
|
||||||
|
if chunkFile.chunkSizes[i] == -1 {
|
||||||
|
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||||
|
}
|
||||||
|
} else if chunkFile.chunkSizes[i] == 0 {
|
||||||
|
return nil, fmt.Errorf("chunk part[%d] are missing", i)
|
||||||
|
}
|
||||||
|
if i >= l {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fileSize := chunkFile.GetSize()
|
||||||
|
mergedRrf := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
|
start := httpRange.Start
|
||||||
|
length := httpRange.Length
|
||||||
|
if length < 0 || start+length > fileSize {
|
||||||
|
length = fileSize - start
|
||||||
|
}
|
||||||
|
if length == 0 {
|
||||||
|
return io.NopCloser(strings.NewReader("")), nil
|
||||||
|
}
|
||||||
|
rs := make([]io.Reader, 0)
|
||||||
|
cs := make(utils.Closers, 0)
|
||||||
|
var (
|
||||||
|
rc io.ReadCloser
|
||||||
|
readFrom bool
|
||||||
|
)
|
||||||
|
for idx, chunkSize := range chunkFile.chunkSizes {
|
||||||
|
if readFrom {
|
||||||
|
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
|
||||||
|
if err != nil {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cs = append(cs, l)
|
||||||
|
chunkSize2 := l.ContentLength
|
||||||
|
if chunkSize2 <= 0 {
|
||||||
|
chunkSize2 = o.GetSize()
|
||||||
|
}
|
||||||
|
if chunkSize2 != chunkSize {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
|
||||||
|
}
|
||||||
|
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
|
||||||
|
if err != nil {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
newLength := length - chunkSize2
|
||||||
|
if newLength >= 0 {
|
||||||
|
length = newLength
|
||||||
|
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: -1})
|
||||||
|
} else {
|
||||||
|
rc, err = rrf.RangeRead(ctx, http_range.Range{Length: length})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rs = append(rs, rc)
|
||||||
|
cs = append(cs, rc)
|
||||||
|
if newLength <= 0 {
|
||||||
|
return utils.ReadCloser{
|
||||||
|
Reader: io.MultiReader(rs...),
|
||||||
|
Closer: &cs,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
} else if newStart := start - chunkSize; newStart >= 0 {
|
||||||
|
start = newStart
|
||||||
|
} else {
|
||||||
|
l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args)
|
||||||
|
if err != nil {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cs = append(cs, l)
|
||||||
|
chunkSize2 := l.ContentLength
|
||||||
|
if chunkSize2 <= 0 {
|
||||||
|
chunkSize2 = o.GetSize()
|
||||||
|
}
|
||||||
|
if chunkSize2 != chunkSize {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, fmt.Errorf("chunk part[%d] size not match", idx)
|
||||||
|
}
|
||||||
|
rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l)
|
||||||
|
if err != nil {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rc, err = rrf.RangeRead(ctx, http_range.Range{Start: start, Length: -1})
|
||||||
|
if err != nil {
|
||||||
|
_ = cs.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
length -= chunkSize2 - start
|
||||||
|
cs = append(cs, rc)
|
||||||
|
if length <= 0 {
|
||||||
|
return utils.ReadCloser{
|
||||||
|
Reader: rc,
|
||||||
|
Closer: &cs,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
rs = append(rs, rc)
|
||||||
|
readFrom = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("invalid range: start=%d,length=%d,fileSize=%d", httpRange.Start, httpRange.Length, fileSize)
|
||||||
|
}
|
||||||
|
return &model.Link{
|
||||||
|
RangeReader: stream.RangeReaderFunc(mergedRrf),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
path := stdpath.Join(d.RemotePath, parentDir.GetPath(), dirName)
|
||||||
|
return fs.MakeDir(ctx, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
|
||||||
|
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
|
||||||
|
_, err := fs.Move(ctx, src, dst)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
if _, ok := srcObj.(*chunkObject); ok {
|
||||||
|
newName = d.ChunkPrefix + newName
|
||||||
|
}
|
||||||
|
return fs.Rename(ctx, stdpath.Join(d.RemotePath, srcObj.GetPath()), newName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
dst := stdpath.Join(d.RemotePath, dstDir.GetPath())
|
||||||
|
src := stdpath.Join(d.RemotePath, srcObj.GetPath())
|
||||||
|
_, err := fs.Copy(ctx, src, dst)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
return fs.Remove(ctx, stdpath.Join(d.RemotePath, obj.GetPath()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (d.Thumbnail && dstDir.GetName() == ".thumbnails") || (d.ChunkLargeFileOnly && file.GetSize() <= d.PartSize) {
|
||||||
|
return op.Put(ctx, remoteStorage, stdpath.Join(remoteActualPath, dstDir.GetPath()), file, up)
|
||||||
|
}
|
||||||
|
upReader := &driver.ReaderUpdatingProgress{
|
||||||
|
Reader: file,
|
||||||
|
UpdateProgress: up,
|
||||||
|
}
|
||||||
|
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), d.ChunkPrefix+file.GetName())
|
||||||
|
if d.StoreHash {
|
||||||
|
for ht, value := range file.GetHash().All() {
|
||||||
|
_ = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||||
|
Obj: &model.Object{
|
||||||
|
Name: fmt.Sprintf("hash_%s_%s%s", ht.Name, value, d.CustomExt),
|
||||||
|
Size: 1,
|
||||||
|
Modified: file.ModTime(),
|
||||||
|
},
|
||||||
|
Mimetype: "application/octet-stream",
|
||||||
|
Reader: bytes.NewReader([]byte{0}), // 兼容不支持空文件的驱动
|
||||||
|
}, nil, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fullPartCount := int(file.GetSize() / d.PartSize)
|
||||||
|
tailSize := file.GetSize() % d.PartSize
|
||||||
|
if tailSize == 0 && fullPartCount > 0 {
|
||||||
|
fullPartCount--
|
||||||
|
tailSize = d.PartSize
|
||||||
|
}
|
||||||
|
partIndex := 0
|
||||||
|
for partIndex < fullPartCount {
|
||||||
|
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||||
|
Obj: &model.Object{
|
||||||
|
Name: d.getPartName(partIndex),
|
||||||
|
Size: d.PartSize,
|
||||||
|
Modified: file.ModTime(),
|
||||||
|
},
|
||||||
|
Mimetype: file.GetMimetype(),
|
||||||
|
Reader: io.LimitReader(upReader, d.PartSize),
|
||||||
|
}, nil, true)
|
||||||
|
if err != nil {
|
||||||
|
_ = op.Remove(ctx, remoteStorage, dst)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
partIndex++
|
||||||
|
}
|
||||||
|
err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
|
||||||
|
Obj: &model.Object{
|
||||||
|
Name: d.getPartName(fullPartCount),
|
||||||
|
Size: tailSize,
|
||||||
|
Modified: file.ModTime(),
|
||||||
|
},
|
||||||
|
Mimetype: file.GetMimetype(),
|
||||||
|
Reader: upReader,
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
_ = op.Remove(ctx, remoteStorage, dst)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) getPartName(part int) string {
|
||||||
|
return fmt.Sprintf("%d%s", part, d.CustomExt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Chunk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
remoteStorage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
remoteDetails, err := op.GetStorageDetails(ctx, remoteStorage)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: remoteDetails.DiskUsage,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*Chunk)(nil)
|
||||||
39
drivers/chunk/meta.go
Normal file
39
drivers/chunk/meta.go
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
RemotePath string `json:"remote_path" required:"true"`
|
||||||
|
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
|
||||||
|
ChunkLargeFileOnly bool `json:"chunk_large_file_only" default:"false" help:"chunk only if file size > part_size"`
|
||||||
|
ChunkPrefix string `json:"chunk_prefix" type:"string" default:"[openlist_chunk]" help:"the prefix of chunk folder"`
|
||||||
|
CustomExt string `json:"custom_ext" type:"string"`
|
||||||
|
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
|
||||||
|
NumListWorkers int `json:"num_list_workers" required:"true" type:"number" default:"5"`
|
||||||
|
|
||||||
|
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
|
||||||
|
ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "Chunk",
|
||||||
|
LocalSort: true,
|
||||||
|
OnlyProxy: true,
|
||||||
|
NoCache: true,
|
||||||
|
DefaultRoot: "/",
|
||||||
|
NoLinkURL: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &Chunk{
|
||||||
|
Addition: Addition{
|
||||||
|
ChunkPrefix: "[openlist_chunk]",
|
||||||
|
NumListWorkers: 5,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
8
drivers/chunk/obj.go
Normal file
8
drivers/chunk/obj.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
package chunk
|
||||||
|
|
||||||
|
import "github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
|
||||||
|
type chunkObject struct {
|
||||||
|
model.Object
|
||||||
|
chunkSizes []int64
|
||||||
|
}
|
||||||
@@ -21,6 +21,8 @@ type CloudreveV4 struct {
|
|||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
ref *CloudreveV4
|
ref *CloudreveV4
|
||||||
|
AccessExpires string
|
||||||
|
RefreshExpires string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *CloudreveV4) Config() driver.Config {
|
func (d *CloudreveV4) Config() driver.Config {
|
||||||
@@ -44,13 +46,17 @@ func (d *CloudreveV4) Init(ctx context.Context) error {
|
|||||||
if d.ref != nil {
|
if d.ref != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if d.AccessToken == "" && d.RefreshToken != "" {
|
if d.canLogin() {
|
||||||
return d.refreshToken()
|
|
||||||
}
|
|
||||||
if d.Username != "" {
|
|
||||||
return d.login()
|
return d.login()
|
||||||
}
|
}
|
||||||
return nil
|
if d.RefreshToken != "" {
|
||||||
|
return d.refreshToken()
|
||||||
|
}
|
||||||
|
if d.AccessToken == "" {
|
||||||
|
return errors.New("no way to authenticate. At least AccessToken is required")
|
||||||
|
}
|
||||||
|
// ensure AccessToken is valid
|
||||||
|
return d.parseJWT(d.AccessToken, &AccessJWT{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *CloudreveV4) InitReference(storage driver.Driver) error {
|
func (d *CloudreveV4) InitReference(storage driver.Driver) error {
|
||||||
@@ -333,6 +339,20 @@ func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir mode
|
|||||||
return nil, errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
// TODO return storage details (total space, free space, etc.)
|
||||||
|
var r CapacityResp
|
||||||
|
err := d.request(http.MethodGet, "/user/capacity", func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: driver.DiskUsageFromUsedAndTotal(r.Used, r.Total),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
// return nil, errs.NotSupport
|
// return nil, errs.NotSupport
|
||||||
//}
|
//}
|
||||||
|
|||||||
@@ -66,11 +66,27 @@ type CaptchaResp struct {
|
|||||||
Ticket string `json:"ticket"`
|
Ticket string `json:"ticket"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type AccessJWT struct {
|
||||||
|
TokenType string `json:"token_type"`
|
||||||
|
Sub string `json:"sub"`
|
||||||
|
Exp int64 `json:"exp"`
|
||||||
|
Nbf int64 `json:"nbf"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RefreshJWT struct {
|
||||||
|
TokenType string `json:"token_type"`
|
||||||
|
Sub string `json:"sub"`
|
||||||
|
Exp int `json:"exp"`
|
||||||
|
Nbf int `json:"nbf"`
|
||||||
|
StateHash string `json:"state_hash"`
|
||||||
|
RootTokenID string `json:"root_token_id"`
|
||||||
|
}
|
||||||
|
|
||||||
type Token struct {
|
type Token struct {
|
||||||
AccessToken string `json:"access_token"`
|
AccessToken string `json:"access_token"`
|
||||||
RefreshToken string `json:"refresh_token"`
|
RefreshToken string `json:"refresh_token"`
|
||||||
AccessExpires time.Time `json:"access_expires"`
|
AccessExpires string `json:"access_expires"`
|
||||||
RefreshExpires time.Time `json:"refresh_expires"`
|
RefreshExpires string `json:"refresh_expires"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type TokenResponse struct {
|
type TokenResponse struct {
|
||||||
@@ -188,3 +204,9 @@ type FolderSummaryResp struct {
|
|||||||
CalculatedAt time.Time `json:"calculated_at"`
|
CalculatedAt time.Time `json:"calculated_at"`
|
||||||
} `json:"folder_summary"`
|
} `json:"folder_summary"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CapacityResp struct {
|
||||||
|
Total uint64 `json:"total"`
|
||||||
|
Used uint64 `json:"used"`
|
||||||
|
// StoragePackTotal uint64 `json:"storage_pack_total"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -28,6 +28,15 @@ import (
|
|||||||
|
|
||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
|
|
||||||
|
const (
|
||||||
|
CodeLoginRequired = http.StatusUnauthorized
|
||||||
|
CodeCredentialInvalid = 40020 // Failed to issue token
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrorIssueToken = errors.New("failed to issue token")
|
||||||
|
)
|
||||||
|
|
||||||
func (d *CloudreveV4) getUA() string {
|
func (d *CloudreveV4) getUA() string {
|
||||||
if d.CustomUA != "" {
|
if d.CustomUA != "" {
|
||||||
return d.CustomUA
|
return d.CustomUA
|
||||||
@@ -39,6 +48,23 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
|
|||||||
if d.ref != nil {
|
if d.ref != nil {
|
||||||
return d.ref.request(method, path, callback, out)
|
return d.ref.request(method, path, callback, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ensure token
|
||||||
|
if d.isTokenExpired() {
|
||||||
|
err := d.refreshToken()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return d._request(method, path, callback, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) _request(method string, path string, callback base.ReqCallback, out any) error {
|
||||||
|
if d.ref != nil {
|
||||||
|
return d.ref._request(method, path, callback, out)
|
||||||
|
}
|
||||||
|
|
||||||
u := d.Address + "/api/v4" + path
|
u := d.Address + "/api/v4" + path
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
@@ -65,15 +91,17 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
|
|||||||
}
|
}
|
||||||
|
|
||||||
if r.Code != 0 {
|
if r.Code != 0 {
|
||||||
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" {
|
if r.Code == CodeLoginRequired && d.canLogin() && path != "/session/token/refresh" {
|
||||||
// try to refresh token
|
err = d.login()
|
||||||
err = d.refreshToken()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return d.request(method, path, callback, out)
|
return d.request(method, path, callback, out)
|
||||||
}
|
}
|
||||||
return errors.New(r.Msg)
|
if r.Code == CodeCredentialInvalid {
|
||||||
|
return ErrorIssueToken
|
||||||
|
}
|
||||||
|
return fmt.Errorf("%d: %s", r.Code, r.Msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
if out != nil && r.Data != nil {
|
if out != nil && r.Data != nil {
|
||||||
@@ -91,14 +119,18 @@ func (d *CloudreveV4) request(method string, path string, callback base.ReqCallb
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) canLogin() bool {
|
||||||
|
return d.Username != "" && d.Password != ""
|
||||||
|
}
|
||||||
|
|
||||||
func (d *CloudreveV4) login() error {
|
func (d *CloudreveV4) login() error {
|
||||||
var siteConfig SiteLoginConfigResp
|
var siteConfig SiteLoginConfigResp
|
||||||
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig)
|
err := d._request(http.MethodGet, "/site/config/login", nil, &siteConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var prepareLogin PrepareLoginResp
|
var prepareLogin PrepareLoginResp
|
||||||
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
|
err = d._request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -128,7 +160,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
|
|||||||
}
|
}
|
||||||
if needCaptcha {
|
if needCaptcha {
|
||||||
var config BasicConfigResp
|
var config BasicConfigResp
|
||||||
err = d.request(http.MethodGet, "/site/config/basic", nil, &config)
|
err = d._request(http.MethodGet, "/site/config/basic", nil, &config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -136,7 +168,7 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
|
|||||||
return fmt.Errorf("captcha type %s not support", config.CaptchaType)
|
return fmt.Errorf("captcha type %s not support", config.CaptchaType)
|
||||||
}
|
}
|
||||||
var captcha CaptchaResp
|
var captcha CaptchaResp
|
||||||
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha)
|
err = d._request(http.MethodGet, "/site/captcha", nil, &captcha)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -162,20 +194,22 @@ func (d *CloudreveV4) doLogin(needCaptcha bool) error {
|
|||||||
loginBody["captcha"] = captchaCode
|
loginBody["captcha"] = captchaCode
|
||||||
}
|
}
|
||||||
var token TokenResponse
|
var token TokenResponse
|
||||||
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) {
|
err = d._request(http.MethodPost, "/session/token", func(req *resty.Request) {
|
||||||
req.SetBody(loginBody)
|
req.SetBody(loginBody)
|
||||||
}, &token)
|
}, &token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
|
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
|
||||||
|
d.AccessExpires, d.RefreshExpires = token.Token.AccessExpires, token.Token.RefreshExpires
|
||||||
op.MustSaveDriverStorage(d)
|
op.MustSaveDriverStorage(d)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *CloudreveV4) refreshToken() error {
|
func (d *CloudreveV4) refreshToken() error {
|
||||||
|
// if no refresh token, try to login if possible
|
||||||
if d.RefreshToken == "" {
|
if d.RefreshToken == "" {
|
||||||
if d.Username != "" {
|
if d.canLogin() {
|
||||||
err := d.login()
|
err := d.login()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot login to get refresh token, error: %s", err)
|
return fmt.Errorf("cannot login to get refresh token, error: %s", err)
|
||||||
@@ -183,20 +217,127 @@ func (d *CloudreveV4) refreshToken() error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parse jwt to check if refresh token is valid
|
||||||
|
var jwt RefreshJWT
|
||||||
|
err := d.parseJWT(d.RefreshToken, &jwt)
|
||||||
|
if err != nil {
|
||||||
|
// if refresh token is invalid, try to login if possible
|
||||||
|
if d.canLogin() {
|
||||||
|
return d.login()
|
||||||
|
}
|
||||||
|
d.GetStorage().SetStatus(fmt.Sprintf("Invalid RefreshToken: %s", err.Error()))
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
return fmt.Errorf("invalid refresh token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// do refresh token
|
||||||
var token Token
|
var token Token
|
||||||
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
|
err = d._request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"refresh_token": d.RefreshToken,
|
"refresh_token": d.RefreshToken,
|
||||||
})
|
})
|
||||||
}, &token)
|
}, &token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrorIssueToken) {
|
||||||
|
if d.canLogin() {
|
||||||
|
// try to login again
|
||||||
|
return d.login()
|
||||||
|
}
|
||||||
|
d.GetStorage().SetStatus("This session is no longer valid")
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
return ErrorIssueToken
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
|
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
|
||||||
|
d.AccessExpires, d.RefreshExpires = token.AccessExpires, token.RefreshExpires
|
||||||
op.MustSaveDriverStorage(d)
|
op.MustSaveDriverStorage(d)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *CloudreveV4) parseJWT(token string, jwt any) error {
|
||||||
|
split := strings.Split(token, ".")
|
||||||
|
if len(split) != 3 {
|
||||||
|
return fmt.Errorf("invalid token length: %d, ensure the token is a valid JWT", len(split))
|
||||||
|
}
|
||||||
|
data, err := base64.RawURLEncoding.DecodeString(split[1])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid token encoding: %w, ensure the token is a valid JWT", err)
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(data, &jwt)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid token content: %w, ensure the token is a valid JWT", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if token is expired
|
||||||
|
// https://github.com/cloudreve/frontend/blob/ddfacc1c31c49be03beb71de4cc114c8811038d6/src/session/index.ts#L177-L200
|
||||||
|
func (d *CloudreveV4) isTokenExpired() bool {
|
||||||
|
if d.RefreshToken == "" {
|
||||||
|
// login again if username and password is set
|
||||||
|
if d.canLogin() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// no refresh token, cannot refresh
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if d.AccessToken == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
expires time.Time
|
||||||
|
)
|
||||||
|
// check if token is expired
|
||||||
|
if d.AccessExpires != "" {
|
||||||
|
// use expires field if possible to prevent timezone issue
|
||||||
|
// only available after login or refresh token
|
||||||
|
// 2025-08-28T02:43:07.645109985+08:00
|
||||||
|
expires, err = time.Parse(time.RFC3339Nano, d.AccessExpires)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// fallback to parse jwt
|
||||||
|
// if failed, disable the storage
|
||||||
|
var jwt AccessJWT
|
||||||
|
err = d.parseJWT(d.AccessToken, &jwt)
|
||||||
|
if err != nil {
|
||||||
|
d.GetStorage().SetStatus(fmt.Sprintf("Invalid AccessToken: %s", err.Error()))
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// may be have timezone issue
|
||||||
|
expires = time.Unix(jwt.Exp, 0)
|
||||||
|
}
|
||||||
|
// add a 10 minutes safe margin
|
||||||
|
ddl := time.Now().Add(10 * time.Minute)
|
||||||
|
if expires.Before(ddl) {
|
||||||
|
// current access token expired, check if refresh token is expired
|
||||||
|
// warning: cannot parse refresh token from jwt, because the exp field is not standard
|
||||||
|
if d.RefreshExpires != "" {
|
||||||
|
refreshExpires, err := time.Parse(time.RFC3339Nano, d.RefreshExpires)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if refreshExpires.Before(time.Now()) {
|
||||||
|
// This session is no longer valid
|
||||||
|
if d.canLogin() {
|
||||||
|
// try to login again
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
d.GetStorage().SetStatus("This session is no longer valid")
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
|
||||||
var finish int64 = 0
|
var finish int64 = 0
|
||||||
var chunk int = 0
|
var chunk int = 0
|
||||||
|
|||||||
230
drivers/cnb_releases/driver.go
Normal file
230
drivers/cnb_releases/driver.go
Normal file
@@ -0,0 +1,230 @@
|
|||||||
|
package cnb_releases
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CnbReleases struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
ref *CnbReleases
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Init(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) InitReference(storage driver.Driver) error {
|
||||||
|
refStorage, ok := storage.(*CnbReleases)
|
||||||
|
if ok {
|
||||||
|
d.ref = refStorage
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("ref: storage is not CnbReleases")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Drop(ctx context.Context) error {
|
||||||
|
d.ref = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
if dir.GetPath() == "/" {
|
||||||
|
// get all releases for root dir
|
||||||
|
var resp ReleaseList
|
||||||
|
|
||||||
|
err := d.Request(http.MethodGet, "/{repo}/-/releases", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return utils.SliceConvert(resp, func(src Release) (model.Obj, error) {
|
||||||
|
name := src.Name
|
||||||
|
if d.UseTagName {
|
||||||
|
name = src.TagName
|
||||||
|
}
|
||||||
|
return &model.Object{
|
||||||
|
ID: src.ID,
|
||||||
|
Name: name,
|
||||||
|
Size: d.sumAssetsSize(src.Assets),
|
||||||
|
Ctime: src.CreatedAt,
|
||||||
|
Modified: src.UpdatedAt,
|
||||||
|
IsFolder: true,
|
||||||
|
}, nil
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
// get release info by release id
|
||||||
|
releaseID := dir.GetID()
|
||||||
|
if releaseID == "" {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
var resp Release
|
||||||
|
err := d.Request(http.MethodGet, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
req.SetPathParam("release_id", releaseID)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return utils.SliceConvert(resp.Assets, func(src ReleaseAsset) (model.Obj, error) {
|
||||||
|
return &Object{
|
||||||
|
Object: model.Object{
|
||||||
|
ID: src.ID,
|
||||||
|
Path: src.Path,
|
||||||
|
Name: src.Name,
|
||||||
|
Size: src.Size,
|
||||||
|
Ctime: src.CreatedAt,
|
||||||
|
Modified: src.UpdatedAt,
|
||||||
|
IsFolder: false,
|
||||||
|
},
|
||||||
|
ParentID: dir.GetID(),
|
||||||
|
}, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
return &model.Link{
|
||||||
|
URL: "https://cnb.cool" + file.GetPath(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
if parentDir.GetPath() == "/" {
|
||||||
|
// create a new release
|
||||||
|
branch := d.DefaultBranch
|
||||||
|
if branch == "" {
|
||||||
|
branch = "main" // fallback to "main" if not set
|
||||||
|
}
|
||||||
|
return d.Request(http.MethodPost, "/{repo}/-/releases", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"name": dirName,
|
||||||
|
"tag_name": dirName,
|
||||||
|
"target_commitish": branch,
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
return errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
if srcObj.IsDir() && !d.UseTagName {
|
||||||
|
return d.Request(http.MethodPatch, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
req.SetPathParam("release_id", srcObj.GetID())
|
||||||
|
req.SetFormData(map[string]string{
|
||||||
|
"name": newName,
|
||||||
|
})
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
return errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
if obj.IsDir() {
|
||||||
|
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
req.SetPathParam("release_id", obj.GetID())
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
if o, ok := obj.(*Object); ok {
|
||||||
|
return d.Request(http.MethodDelete, "/{repo}/-/releases/{release_id}/assets/{asset_id}", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
req.SetPathParam("release_id", o.ParentID)
|
||||||
|
req.SetPathParam("asset_id", obj.GetID())
|
||||||
|
}, nil)
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("unable to get release ID")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
// 1. get upload info
|
||||||
|
var resp ReleaseAssetUploadURL
|
||||||
|
err := d.Request(http.MethodPost, "/{repo}/-/releases/{release_id}/asset-upload-url", func(req *resty.Request) {
|
||||||
|
req.SetPathParam("repo", d.Repo)
|
||||||
|
req.SetPathParam("release_id", dstDir.GetID())
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"asset_name": file.GetName(),
|
||||||
|
"overwrite": true,
|
||||||
|
"size": file.GetSize(),
|
||||||
|
})
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. upload file
|
||||||
|
// use multipart to create form file
|
||||||
|
var b bytes.Buffer
|
||||||
|
w := multipart.NewWriter(&b)
|
||||||
|
_, err = w.CreateFormFile("file", file.GetName())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
headSize := b.Len()
|
||||||
|
err = w.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
head := bytes.NewReader(b.Bytes()[:headSize])
|
||||||
|
tail := bytes.NewReader(b.Bytes()[headSize:])
|
||||||
|
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, file, tail))
|
||||||
|
|
||||||
|
// use net/http to upload file
|
||||||
|
ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Duration(resp.ExpiresInSec+1)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
req, err := http.NewRequestWithContext(ctxWithTimeout, http.MethodPost, resp.UploadURL, rateLimitedRd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", w.FormDataContentType())
|
||||||
|
req.Header.Set("User-Agent", base.UserAgent)
|
||||||
|
httpResp, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer httpResp.Body.Close()
|
||||||
|
if httpResp.StatusCode != http.StatusNoContent {
|
||||||
|
return fmt.Errorf("upload file failed: %s", httpResp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. verify upload
|
||||||
|
return d.Request(http.MethodPost, resp.VerifyURL, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*CnbReleases)(nil)
|
||||||
26
drivers/cnb_releases/meta.go
Normal file
26
drivers/cnb_releases/meta.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
package cnb_releases
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
driver.RootPath
|
||||||
|
Repo string `json:"repo" type:"string" required:"true"`
|
||||||
|
Token string `json:"token" type:"string" required:"true"`
|
||||||
|
UseTagName bool `json:"use_tag_name" type:"bool" default:"false" help:"Use tag name instead of release name"`
|
||||||
|
DefaultBranch string `json:"default_branch" type:"string" default:"main" help:"Default branch for new releases"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "CNB Releases",
|
||||||
|
LocalSort: true,
|
||||||
|
DefaultRoot: "/",
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &CnbReleases{}
|
||||||
|
})
|
||||||
|
}
|
||||||
100
drivers/cnb_releases/types.go
Normal file
100
drivers/cnb_releases/types.go
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
package cnb_releases
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Object struct {
|
||||||
|
model.Object
|
||||||
|
ParentID string
|
||||||
|
}
|
||||||
|
|
||||||
|
type TagList []Tag
|
||||||
|
|
||||||
|
type Tag struct {
|
||||||
|
Commit struct {
|
||||||
|
Author UserInfo `json:"author"`
|
||||||
|
Commit CommitObject `json:"commit"`
|
||||||
|
Committer UserInfo `json:"committer"`
|
||||||
|
Parents []CommitParent `json:"parents"`
|
||||||
|
Sha string `json:"sha"`
|
||||||
|
} `json:"commit"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Target string `json:"target"`
|
||||||
|
TargetType string `json:"target_type"`
|
||||||
|
Verification TagObjectVerification `json:"verification"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UserInfo struct {
|
||||||
|
Freeze bool `json:"freeze"`
|
||||||
|
Nickname string `json:"nickname"`
|
||||||
|
Username string `json:"username"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommitObject struct {
|
||||||
|
Author Signature `json:"author"`
|
||||||
|
CommentCount int `json:"comment_count"`
|
||||||
|
Committer Signature `json:"committer"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Tree CommitObjectTree `json:"tree"`
|
||||||
|
Verification CommitObjectVerification `json:"verification"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Signature struct {
|
||||||
|
Date time.Time `json:"date"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommitObjectTree struct {
|
||||||
|
Sha string `json:"sha"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommitObjectVerification struct {
|
||||||
|
Payload string `json:"payload"`
|
||||||
|
Reason string `json:"reason"`
|
||||||
|
Signature string `json:"signature"`
|
||||||
|
Verified bool `json:"verified"`
|
||||||
|
VerifiedAt string `json:"verified_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommitParent = CommitObjectTree
|
||||||
|
|
||||||
|
type TagObjectVerification = CommitObjectVerification
|
||||||
|
|
||||||
|
type ReleaseList []Release
|
||||||
|
|
||||||
|
type Release struct {
|
||||||
|
Assets []ReleaseAsset `json:"assets"`
|
||||||
|
Author UserInfo `json:"author"`
|
||||||
|
Body string `json:"body"`
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
Draft bool `json:"draft"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
IsLatest bool `json:"is_latest"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Prerelease bool `json:"prerelease"`
|
||||||
|
PublishedAt time.Time `json:"published_at"`
|
||||||
|
TagCommitish string `json:"tag_commitish"`
|
||||||
|
TagName string `json:"tag_name"`
|
||||||
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReleaseAsset struct {
|
||||||
|
ContentType string `json:"content_type"`
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
|
Uploader UserInfo `json:"uploader"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReleaseAssetUploadURL struct {
|
||||||
|
UploadURL string `json:"upload_url"`
|
||||||
|
ExpiresInSec int `json:"expires_in_sec"`
|
||||||
|
VerifyURL string `json:"verify_url"`
|
||||||
|
}
|
||||||
58
drivers/cnb_releases/util.go
Normal file
58
drivers/cnb_releases/util.go
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
package cnb_releases
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// do others that not defined in Driver interface
|
||||||
|
|
||||||
|
func (d *CnbReleases) Request(method string, path string, callback base.ReqCallback, resp any) error {
|
||||||
|
if d.ref != nil {
|
||||||
|
return d.ref.Request(method, path, callback, resp)
|
||||||
|
}
|
||||||
|
var url string
|
||||||
|
if strings.HasPrefix(path, "http") {
|
||||||
|
url = path
|
||||||
|
} else {
|
||||||
|
url = "https://api.cnb.cool" + path
|
||||||
|
}
|
||||||
|
req := base.RestyClient.R()
|
||||||
|
req.SetHeader("Accept", "application/json")
|
||||||
|
req.SetAuthScheme("Bearer")
|
||||||
|
req.SetAuthToken(d.Token)
|
||||||
|
|
||||||
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
|
res, err := req.Execute(method, url)
|
||||||
|
log.Debugln(res.String())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if res.StatusCode() != http.StatusOK && res.StatusCode() != http.StatusCreated && res.StatusCode() != http.StatusNoContent {
|
||||||
|
return fmt.Errorf("failed to request %s, status code: %d, message: %s", url, res.StatusCode(), res.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp != nil {
|
||||||
|
err = json.Unmarshal(res.Body(), resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *CnbReleases) sumAssetsSize(assets []ReleaseAsset) int64 {
|
||||||
|
var size int64
|
||||||
|
for _, asset := range assets {
|
||||||
|
size += asset.Size
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
@@ -318,6 +318,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
return readSeeker, nil
|
return readSeeker, nil
|
||||||
}),
|
}),
|
||||||
SyncClosers: utils.NewSyncClosers(remoteLink),
|
SyncClosers: utils.NewSyncClosers(remoteLink),
|
||||||
|
RequireReference: remoteLink.RequireReference,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -366,7 +367,6 @@ func (d *Crypt) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
}
|
}
|
||||||
return op.Copy(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
|
return op.Copy(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
@@ -411,6 +411,16 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
remoteDetails, err := op.GetStorageDetails(ctx, d.remoteStorage)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: remoteDetails.DiskUsage,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
// return nil, errs.NotSupport
|
// return nil, errs.NotSupport
|
||||||
//}
|
//}
|
||||||
|
|||||||
203
drivers/degoo/driver.go
Normal file
203
drivers/degoo/driver.go
Normal file
@@ -0,0 +1,203 @@
|
|||||||
|
package degoo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Degoo struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) Init(ctx context.Context) error {
|
||||||
|
|
||||||
|
d.client = base.HttpClient
|
||||||
|
|
||||||
|
// Ensure we have a valid token (will login if needed or refresh if expired)
|
||||||
|
if err := d.ensureValidToken(ctx); err != nil {
|
||||||
|
return fmt.Errorf("failed to initialize token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.getDevices(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
items, err := d.getAllFileChildren5(ctx, dir.GetID())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return utils.MustSliceConvert(items, func(s DegooFileItem) model.Obj {
|
||||||
|
isFolder := s.Category == 2 || s.Category == 1 || s.Category == 10
|
||||||
|
|
||||||
|
createTime, modTime, _ := humanReadableTimes(s.CreationTime, s.LastModificationTime, s.LastUploadTime)
|
||||||
|
|
||||||
|
size, err := strconv.ParseInt(s.Size, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
size = 0 // Default to 0 if size parsing fails
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
ID: s.ID,
|
||||||
|
Path: s.FilePath,
|
||||||
|
Name: s.Name,
|
||||||
|
Size: size,
|
||||||
|
Modified: modTime,
|
||||||
|
Ctime: createTime,
|
||||||
|
IsFolder: isFolder,
|
||||||
|
}
|
||||||
|
}), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
item, err := d.getOverlay4(ctx, file.GetID())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Link{URL: item.URL}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
// This is done by calling the setUploadFile3 API with a special checksum and size.
|
||||||
|
const query = `mutation SetUploadFile3($Token: String!, $FileInfos: [FileInfoUpload3]!) { setUploadFile3(Token: $Token, FileInfos: $FileInfos) }`
|
||||||
|
|
||||||
|
variables := map[string]interface{}{
|
||||||
|
"Token": d.AccessToken,
|
||||||
|
"FileInfos": []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"Checksum": folderChecksum,
|
||||||
|
"Name": dirName,
|
||||||
|
"CreationTime": time.Now().UnixMilli(),
|
||||||
|
"ParentID": parentDir.GetID(),
|
||||||
|
"Size": 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := d.apiCall(ctx, "SetUploadFile3", query, variables)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
const query = `mutation SetMoveFile($Token: String!, $Copy: Boolean, $NewParentID: String!, $FileIDs: [String]!) { setMoveFile(Token: $Token, Copy: $Copy, NewParentID: $NewParentID, FileIDs: $FileIDs) }`
|
||||||
|
|
||||||
|
variables := map[string]interface{}{
|
||||||
|
"Token": d.AccessToken,
|
||||||
|
"Copy": false,
|
||||||
|
"NewParentID": dstDir.GetID(),
|
||||||
|
"FileIDs": []string{srcObj.GetID()},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := d.apiCall(ctx, "SetMoveFile", query, variables)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
const query = `mutation SetRenameFile($Token: String!, $FileRenames: [FileRenameInfo]!) { setRenameFile(Token: $Token, FileRenames: $FileRenames) }`
|
||||||
|
|
||||||
|
variables := map[string]interface{}{
|
||||||
|
"Token": d.AccessToken,
|
||||||
|
"FileRenames": []DegooFileRenameInfo{
|
||||||
|
{
|
||||||
|
ID: srcObj.GetID(),
|
||||||
|
NewName: newName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := d.apiCall(ctx, "SetRenameFile", query, variables)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
// Copy is not implemented, Degoo API does not support direct copy.
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
// Remove deletes a file or folder (moves to trash).
|
||||||
|
const query = `mutation SetDeleteFile5($Token: String!, $IsInRecycleBin: Boolean!, $IDs: [IDType]!) { setDeleteFile5(Token: $Token, IsInRecycleBin: $IsInRecycleBin, IDs: $IDs) }`
|
||||||
|
|
||||||
|
variables := map[string]interface{}{
|
||||||
|
"Token": d.AccessToken,
|
||||||
|
"IsInRecycleBin": false,
|
||||||
|
"IDs": []map[string]string{{"FileID": obj.GetID()}},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := d.apiCall(ctx, "SetDeleteFile5", query, variables)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
tmpF, err := file.CacheFullAndWriter(&up, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
parentID := dstDir.GetID()
|
||||||
|
|
||||||
|
// Calculate the checksum for the file.
|
||||||
|
checksum, err := d.checkSum(tmpF)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. Get upload authorization via getBucketWriteAuth4.
|
||||||
|
auths, err := d.getBucketWriteAuth4(ctx, file, parentID, checksum)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Upload file.
|
||||||
|
// support rapid upload
|
||||||
|
if auths.GetBucketWriteAuth4[0].Error != "Already exist!" {
|
||||||
|
err = d.uploadS3(ctx, auths, tmpF, file, checksum)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Register metadata with setUploadFile3.
|
||||||
|
data, err := d.SetUploadFile3(ctx, file, parentID, checksum)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !data.SetUploadFile3 {
|
||||||
|
return fmt.Errorf("setUploadFile3 failed: %v", data)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
27
drivers/degoo/meta.go
Normal file
27
drivers/degoo/meta.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package degoo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
driver.RootID
|
||||||
|
Username string `json:"username" help:"Your Degoo account email"`
|
||||||
|
Password string `json:"password" help:"Your Degoo account password"`
|
||||||
|
RefreshToken string `json:"refresh_token" help:"Refresh token for automatic token renewal, obtained automatically"`
|
||||||
|
AccessToken string `json:"access_token" help:"Access token for Degoo API, obtained automatically"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "Degoo",
|
||||||
|
LocalSort: true,
|
||||||
|
DefaultRoot: "0",
|
||||||
|
NoOverwriteUpload: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &Degoo{}
|
||||||
|
})
|
||||||
|
}
|
||||||
110
drivers/degoo/types.go
Normal file
110
drivers/degoo/types.go
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
package degoo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DegooLoginRequest represents the login request body.
|
||||||
|
type DegooLoginRequest struct {
|
||||||
|
GenerateToken bool `json:"GenerateToken"`
|
||||||
|
Username string `json:"Username"`
|
||||||
|
Password string `json:"Password"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegooLoginResponse represents a successful login response.
|
||||||
|
type DegooLoginResponse struct {
|
||||||
|
Token string `json:"Token"`
|
||||||
|
RefreshToken string `json:"RefreshToken"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegooAccessTokenRequest represents the token refresh request body.
|
||||||
|
type DegooAccessTokenRequest struct {
|
||||||
|
RefreshToken string `json:"RefreshToken"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegooAccessTokenResponse represents the token refresh response.
|
||||||
|
type DegooAccessTokenResponse struct {
|
||||||
|
AccessToken string `json:"AccessToken"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegooFileItem represents a Degoo file or folder.
|
||||||
|
type DegooFileItem struct {
|
||||||
|
ID string `json:"ID"`
|
||||||
|
ParentID string `json:"ParentID"`
|
||||||
|
Name string `json:"Name"`
|
||||||
|
Category int `json:"Category"`
|
||||||
|
Size string `json:"Size"`
|
||||||
|
URL string `json:"URL"`
|
||||||
|
CreationTime string `json:"CreationTime"`
|
||||||
|
LastModificationTime string `json:"LastModificationTime"`
|
||||||
|
LastUploadTime string `json:"LastUploadTime"`
|
||||||
|
MetadataID string `json:"MetadataID"`
|
||||||
|
DeviceID int64 `json:"DeviceID"`
|
||||||
|
FilePath string `json:"FilePath"`
|
||||||
|
IsInRecycleBin bool `json:"IsInRecycleBin"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DegooErrors struct {
|
||||||
|
Path []string `json:"path"`
|
||||||
|
Data interface{} `json:"data"`
|
||||||
|
ErrorType string `json:"errorType"`
|
||||||
|
ErrorInfo interface{} `json:"errorInfo"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegooGraphqlResponse is the common structure for GraphQL API responses.
|
||||||
|
type DegooGraphqlResponse struct {
|
||||||
|
Data json.RawMessage `json:"data"`
|
||||||
|
Errors []DegooErrors `json:"errors,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegooGetChildren5Data is the data field for getFileChildren5.
|
||||||
|
type DegooGetChildren5Data struct {
|
||||||
|
GetFileChildren5 struct {
|
||||||
|
Items []DegooFileItem `json:"Items"`
|
||||||
|
NextToken string `json:"NextToken"`
|
||||||
|
} `json:"getFileChildren5"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegooGetOverlay4Data is the data field for getOverlay4.
|
||||||
|
type DegooGetOverlay4Data struct {
|
||||||
|
GetOverlay4 DegooFileItem `json:"getOverlay4"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegooFileRenameInfo represents a file rename operation.
|
||||||
|
type DegooFileRenameInfo struct {
|
||||||
|
ID string `json:"ID"`
|
||||||
|
NewName string `json:"NewName"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegooFileIDs represents a list of file IDs for move operations.
|
||||||
|
type DegooFileIDs struct {
|
||||||
|
FileIDs []string `json:"FileIDs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegooGetBucketWriteAuth4Data is the data field for GetBucketWriteAuth4.
|
||||||
|
type DegooGetBucketWriteAuth4Data struct {
|
||||||
|
GetBucketWriteAuth4 []struct {
|
||||||
|
AuthData struct {
|
||||||
|
PolicyBase64 string `json:"PolicyBase64"`
|
||||||
|
Signature string `json:"Signature"`
|
||||||
|
BaseURL string `json:"BaseURL"`
|
||||||
|
KeyPrefix string `json:"KeyPrefix"`
|
||||||
|
AccessKey struct {
|
||||||
|
Key string `json:"Key"`
|
||||||
|
Value string `json:"Value"`
|
||||||
|
} `json:"AccessKey"`
|
||||||
|
ACL string `json:"ACL"`
|
||||||
|
AdditionalBody []struct {
|
||||||
|
Key string `json:"Key"`
|
||||||
|
Value string `json:"Value"`
|
||||||
|
} `json:"AdditionalBody"`
|
||||||
|
} `json:"AuthData"`
|
||||||
|
Error interface{} `json:"Error"`
|
||||||
|
} `json:"getBucketWriteAuth4"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegooSetUploadFile3Data is the data field for SetUploadFile3.
|
||||||
|
type DegooSetUploadFile3Data struct {
|
||||||
|
SetUploadFile3 bool `json:"setUploadFile3"`
|
||||||
|
}
|
||||||
198
drivers/degoo/upload.go
Normal file
198
drivers/degoo/upload.go
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
package degoo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *Degoo) getBucketWriteAuth4(ctx context.Context, file model.FileStreamer, parentID string, checksum string) (*DegooGetBucketWriteAuth4Data, error) {
|
||||||
|
const query = `query GetBucketWriteAuth4(
|
||||||
|
$Token: String!
|
||||||
|
$ParentID: String!
|
||||||
|
$StorageUploadInfos: [StorageUploadInfo2]
|
||||||
|
) {
|
||||||
|
getBucketWriteAuth4(
|
||||||
|
Token: $Token
|
||||||
|
ParentID: $ParentID
|
||||||
|
StorageUploadInfos: $StorageUploadInfos
|
||||||
|
) {
|
||||||
|
AuthData {
|
||||||
|
PolicyBase64
|
||||||
|
Signature
|
||||||
|
BaseURL
|
||||||
|
KeyPrefix
|
||||||
|
AccessKey {
|
||||||
|
Key
|
||||||
|
Value
|
||||||
|
}
|
||||||
|
ACL
|
||||||
|
AdditionalBody {
|
||||||
|
Key
|
||||||
|
Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Error
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
variables := map[string]interface{}{
|
||||||
|
"Token": d.AccessToken,
|
||||||
|
"ParentID": parentID,
|
||||||
|
"StorageUploadInfos": []map[string]string{{
|
||||||
|
"FileName": file.GetName(),
|
||||||
|
"Checksum": checksum,
|
||||||
|
"Size": strconv.FormatInt(file.GetSize(), 10),
|
||||||
|
}}}
|
||||||
|
|
||||||
|
data, err := d.apiCall(ctx, "GetBucketWriteAuth4", query, variables)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp DegooGetBucketWriteAuth4Data
|
||||||
|
err = json.Unmarshal(data, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkSum calculates the SHA1-based checksum for Degoo upload API.
|
||||||
|
func (d *Degoo) checkSum(file io.Reader) (string, error) {
|
||||||
|
seed := []byte{13, 7, 2, 2, 15, 40, 75, 117, 13, 10, 19, 16, 29, 23, 3, 36}
|
||||||
|
hasher := sha1.New()
|
||||||
|
hasher.Write(seed)
|
||||||
|
|
||||||
|
if _, err := utils.CopyWithBuffer(hasher, file); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
cs := hasher.Sum(nil)
|
||||||
|
|
||||||
|
csBytes := []byte{10, byte(len(cs))}
|
||||||
|
csBytes = append(csBytes, cs...)
|
||||||
|
csBytes = append(csBytes, 16, 0)
|
||||||
|
|
||||||
|
return strings.ReplaceAll(base64.StdEncoding.EncodeToString(csBytes), "/", "_"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Degoo) uploadS3(ctx context.Context, auths *DegooGetBucketWriteAuth4Data, tmpF model.File, file model.FileStreamer, checksum string) error {
|
||||||
|
a := auths.GetBucketWriteAuth4[0].AuthData
|
||||||
|
|
||||||
|
_, err := tmpF.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ext := utils.Ext(file.GetName())
|
||||||
|
key := fmt.Sprintf("%s%s/%s.%s", a.KeyPrefix, ext, checksum, ext)
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
w := multipart.NewWriter(&b)
|
||||||
|
err = w.WriteField("key", key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = w.WriteField("acl", a.ACL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = w.WriteField("policy", a.PolicyBase64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = w.WriteField("signature", a.Signature)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = w.WriteField(a.AccessKey.Key, a.AccessKey.Value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, additional := range a.AdditionalBody {
|
||||||
|
err = w.WriteField(additional.Key, additional.Value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = w.WriteField("Content-Type", "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.CreateFormFile("file", key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
headSize := b.Len()
|
||||||
|
err = w.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
head := bytes.NewReader(b.Bytes()[:headSize])
|
||||||
|
tail := bytes.NewReader(b.Bytes()[headSize:])
|
||||||
|
|
||||||
|
rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, tmpF, tail))
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, a.BaseURL, rateLimitedRd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Add("ngsw-bypass", "1")
|
||||||
|
req.Header.Add("Content-Type", w.FormDataContentType())
|
||||||
|
|
||||||
|
res, err := d.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode != http.StatusNoContent {
|
||||||
|
return fmt.Errorf("upload failed with status code %d", res.StatusCode)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*Degoo)(nil)
|
||||||
|
|
||||||
|
func (d *Degoo) SetUploadFile3(ctx context.Context, file model.FileStreamer, parentID string, checksum string) (*DegooSetUploadFile3Data, error) {
|
||||||
|
const query = `mutation SetUploadFile3($Token: String!, $FileInfos: [FileInfoUpload3]!) {
|
||||||
|
setUploadFile3(Token: $Token, FileInfos: $FileInfos)
|
||||||
|
}`
|
||||||
|
|
||||||
|
variables := map[string]interface{}{
|
||||||
|
"Token": d.AccessToken,
|
||||||
|
"FileInfos": []map[string]string{{
|
||||||
|
"Checksum": checksum,
|
||||||
|
"CreationTime": strconv.FormatInt(file.CreateTime().UnixMilli(), 10),
|
||||||
|
"Name": file.GetName(),
|
||||||
|
"ParentID": parentID,
|
||||||
|
"Size": strconv.FormatInt(file.GetSize(), 10),
|
||||||
|
}}}
|
||||||
|
|
||||||
|
data, err := d.apiCall(ctx, "SetUploadFile3", query, variables)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp DegooSetUploadFile3Data
|
||||||
|
err = json.Unmarshal(data, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
462
drivers/degoo/util.go
Normal file
462
drivers/degoo/util.go
Normal file
@@ -0,0 +1,462 @@
|
|||||||
|
package degoo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Thanks to https://github.com/bernd-wechner/Degoo for API research.
|
||||||
|
|
||||||
|
const (
|
||||||
|
// API endpoints
|
||||||
|
loginURL = "https://rest-api.degoo.com/login"
|
||||||
|
accessTokenURL = "https://rest-api.degoo.com/access-token/v2"
|
||||||
|
apiURL = "https://production-appsync.degoo.com/graphql"
|
||||||
|
|
||||||
|
// API configuration
|
||||||
|
apiKey = "da2-vs6twz5vnjdavpqndtbzg3prra"
|
||||||
|
folderChecksum = "CgAQAg"
|
||||||
|
|
||||||
|
// Token management
|
||||||
|
tokenRefreshThreshold = 5 * time.Minute
|
||||||
|
|
||||||
|
// Rate limiting
|
||||||
|
minRequestInterval = 1 * time.Second
|
||||||
|
|
||||||
|
// Error messages
|
||||||
|
errRateLimited = "rate limited (429), please try again later"
|
||||||
|
errUnauthorized = "unauthorized access"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Global rate limiting - protects against concurrent API calls
|
||||||
|
lastRequestTime time.Time
|
||||||
|
requestMutex sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
// JWT payload structure for token expiration checking
|
||||||
|
type JWTPayload struct {
|
||||||
|
UserID string `json:"userID"`
|
||||||
|
Exp int64 `json:"exp"`
|
||||||
|
Iat int64 `json:"iat"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate limiting helper functions
|
||||||
|
|
||||||
|
// applyRateLimit ensures minimum interval between API requests
|
||||||
|
func applyRateLimit() {
|
||||||
|
requestMutex.Lock()
|
||||||
|
defer requestMutex.Unlock()
|
||||||
|
|
||||||
|
if !lastRequestTime.IsZero() {
|
||||||
|
if elapsed := time.Since(lastRequestTime); elapsed < minRequestInterval {
|
||||||
|
time.Sleep(minRequestInterval - elapsed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastRequestTime = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTP request helper functions
|
||||||
|
|
||||||
|
// createJSONRequest creates a new HTTP request with JSON body
|
||||||
|
func createJSONRequest(ctx context.Context, method, url string, body interface{}) (*http.Request, error) {
|
||||||
|
jsonBody, err := json.Marshal(body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to marshal request body: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, method, url, bytes.NewBuffer(jsonBody))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("User-Agent", base.UserAgent)
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkHTTPResponse checks for common HTTP error conditions
|
||||||
|
func checkHTTPResponse(resp *http.Response, operation string) error {
|
||||||
|
if resp.StatusCode == http.StatusTooManyRequests {
|
||||||
|
return fmt.Errorf("%s %s", operation, errRateLimited)
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("%s failed: %s", operation, resp.Status)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isTokenExpired checks if the JWT token is expired or will expire soon
|
||||||
|
func (d *Degoo) isTokenExpired() bool {
|
||||||
|
if d.AccessToken == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
payload, err := extractJWTPayload(d.AccessToken)
|
||||||
|
if err != nil {
|
||||||
|
return true // Invalid token format
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if token expires within the threshold
|
||||||
|
expireTime := time.Unix(payload.Exp, 0)
|
||||||
|
return time.Now().Add(tokenRefreshThreshold).After(expireTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractJWTPayload extracts and parses JWT payload
|
||||||
|
func extractJWTPayload(token string) (*JWTPayload, error) {
|
||||||
|
parts := strings.Split(token, ".")
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return nil, fmt.Errorf("invalid JWT format")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode the payload (second part)
|
||||||
|
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode JWT payload: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var jwtPayload JWTPayload
|
||||||
|
if err := json.Unmarshal(payload, &jwtPayload); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse JWT payload: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &jwtPayload, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// refreshToken attempts to refresh the access token using the refresh token
|
||||||
|
func (d *Degoo) refreshToken(ctx context.Context) error {
|
||||||
|
if d.RefreshToken == "" {
|
||||||
|
return fmt.Errorf("no refresh token available")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create request
|
||||||
|
tokenReq := DegooAccessTokenRequest{RefreshToken: d.RefreshToken}
|
||||||
|
req, err := createJSONRequest(ctx, "POST", accessTokenURL, tokenReq)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create refresh token request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute request
|
||||||
|
resp, err := d.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("refresh token request failed: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Check response
|
||||||
|
if err := checkHTTPResponse(resp, "refresh token"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var accessTokenResp DegooAccessTokenResponse
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&accessTokenResp); err != nil {
|
||||||
|
return fmt.Errorf("failed to parse access token response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if accessTokenResp.AccessToken == "" {
|
||||||
|
return fmt.Errorf("empty access token received")
|
||||||
|
}
|
||||||
|
|
||||||
|
d.AccessToken = accessTokenResp.AccessToken
|
||||||
|
// Save the updated token to storage
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureValidToken ensures we have a valid, non-expired token
|
||||||
|
func (d *Degoo) ensureValidToken(ctx context.Context) error {
|
||||||
|
// Check if token is expired or will expire soon
|
||||||
|
if d.isTokenExpired() {
|
||||||
|
// Try to refresh token first if we have a refresh token
|
||||||
|
if d.RefreshToken != "" {
|
||||||
|
if refreshErr := d.refreshToken(ctx); refreshErr == nil {
|
||||||
|
return nil // Successfully refreshed
|
||||||
|
} else {
|
||||||
|
// If refresh failed, fall back to full login
|
||||||
|
fmt.Printf("Token refresh failed, falling back to full login: %v\n", refreshErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform full login
|
||||||
|
if d.Username != "" && d.Password != "" {
|
||||||
|
return d.login(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// login performs the login process and retrieves the access token.
|
||||||
|
func (d *Degoo) login(ctx context.Context) error {
|
||||||
|
if d.Username == "" || d.Password == "" {
|
||||||
|
return fmt.Errorf("username or password not provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
creds := DegooLoginRequest{
|
||||||
|
GenerateToken: true,
|
||||||
|
Username: d.Username,
|
||||||
|
Password: d.Password,
|
||||||
|
}
|
||||||
|
|
||||||
|
jsonCreds, err := json.Marshal(creds)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to serialize login credentials: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "POST", loginURL, bytes.NewBuffer(jsonCreds))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create login request: %w", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("User-Agent", base.UserAgent)
|
||||||
|
req.Header.Set("Origin", "https://app.degoo.com")
|
||||||
|
|
||||||
|
resp, err := d.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("login request failed: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Handle rate limiting (429 Too Many Requests)
|
||||||
|
if resp.StatusCode == http.StatusTooManyRequests {
|
||||||
|
return fmt.Errorf("login rate limited (429), please try again later")
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("login failed: %s", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var loginResp DegooLoginResponse
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&loginResp); err != nil {
|
||||||
|
return fmt.Errorf("failed to parse login response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if loginResp.RefreshToken != "" {
|
||||||
|
tokenReq := DegooAccessTokenRequest{RefreshToken: loginResp.RefreshToken}
|
||||||
|
jsonTokenReq, err := json.Marshal(tokenReq)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to serialize access token request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenReqHTTP, err := http.NewRequestWithContext(ctx, "POST", accessTokenURL, bytes.NewBuffer(jsonTokenReq))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create access token request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenReqHTTP.Header.Set("User-Agent", base.UserAgent)
|
||||||
|
|
||||||
|
tokenResp, err := d.client.Do(tokenReqHTTP)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get access token: %w", err)
|
||||||
|
}
|
||||||
|
defer tokenResp.Body.Close()
|
||||||
|
|
||||||
|
var accessTokenResp DegooAccessTokenResponse
|
||||||
|
if err := json.NewDecoder(tokenResp.Body).Decode(&accessTokenResp); err != nil {
|
||||||
|
return fmt.Errorf("failed to parse access token response: %w", err)
|
||||||
|
}
|
||||||
|
d.AccessToken = accessTokenResp.AccessToken
|
||||||
|
d.RefreshToken = loginResp.RefreshToken // Save refresh token
|
||||||
|
} else if loginResp.Token != "" {
|
||||||
|
d.AccessToken = loginResp.Token
|
||||||
|
d.RefreshToken = "" // Direct token, no refresh token available
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("login failed, no valid token returned")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the updated tokens to storage
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// apiCall performs a Degoo GraphQL API request.
|
||||||
|
func (d *Degoo) apiCall(ctx context.Context, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
|
||||||
|
// Apply rate limiting
|
||||||
|
applyRateLimit()
|
||||||
|
|
||||||
|
// Ensure we have a valid token before making the API call
|
||||||
|
if err := d.ensureValidToken(ctx); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to ensure valid token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the Token in variables if it exists (after potential refresh)
|
||||||
|
d.updateTokenInVariables(variables)
|
||||||
|
|
||||||
|
return d.executeGraphQLRequest(ctx, operationName, query, variables)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateTokenInVariables updates the Token field in GraphQL variables
|
||||||
|
func (d *Degoo) updateTokenInVariables(variables map[string]interface{}) {
|
||||||
|
if variables != nil {
|
||||||
|
if _, hasToken := variables["Token"]; hasToken {
|
||||||
|
variables["Token"] = d.AccessToken
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeGraphQLRequest executes a GraphQL request with retry logic
|
||||||
|
func (d *Degoo) executeGraphQLRequest(ctx context.Context, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
|
||||||
|
reqBody := map[string]interface{}{
|
||||||
|
"operationName": operationName,
|
||||||
|
"query": query,
|
||||||
|
"variables": variables,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create and configure request
|
||||||
|
req, err := createJSONRequest(ctx, "POST", apiURL, reqBody)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set Degoo-specific headers
|
||||||
|
req.Header.Set("x-api-key", apiKey)
|
||||||
|
if d.AccessToken != "" {
|
||||||
|
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.AccessToken))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute request
|
||||||
|
resp, err := d.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("GraphQL API request failed: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Check for HTTP errors
|
||||||
|
if err := checkHTTPResponse(resp, "GraphQL API"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse GraphQL response
|
||||||
|
var degooResp DegooGraphqlResponse
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(°ooResp); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode GraphQL response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle GraphQL errors
|
||||||
|
if len(degooResp.Errors) > 0 {
|
||||||
|
return d.handleGraphQLError(ctx, degooResp.Errors[0], operationName, query, variables)
|
||||||
|
}
|
||||||
|
|
||||||
|
return degooResp.Data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleGraphQLError handles GraphQL-level errors with retry logic
|
||||||
|
func (d *Degoo) handleGraphQLError(ctx context.Context, gqlError DegooErrors, operationName, query string, variables map[string]interface{}) (json.RawMessage, error) {
|
||||||
|
if gqlError.ErrorType == "Unauthorized" {
|
||||||
|
// Re-login and retry
|
||||||
|
if err := d.login(ctx); err != nil {
|
||||||
|
return nil, fmt.Errorf("%s, login failed: %w", errUnauthorized, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update token in variables and retry
|
||||||
|
d.updateTokenInVariables(variables)
|
||||||
|
return d.apiCall(ctx, operationName, query, variables)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("GraphQL API error: %s", gqlError.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// humanReadableTimes converts Degoo timestamps to Go time.Time.
|
||||||
|
func humanReadableTimes(creation, modification, upload string) (cTime, mTime, uTime time.Time) {
|
||||||
|
cTime, _ = time.Parse(time.RFC3339, creation)
|
||||||
|
if modification != "" {
|
||||||
|
modMillis, _ := strconv.ParseInt(modification, 10, 64)
|
||||||
|
mTime = time.Unix(0, modMillis*int64(time.Millisecond))
|
||||||
|
}
|
||||||
|
if upload != "" {
|
||||||
|
upMillis, _ := strconv.ParseInt(upload, 10, 64)
|
||||||
|
uTime = time.Unix(0, upMillis*int64(time.Millisecond))
|
||||||
|
}
|
||||||
|
return cTime, mTime, uTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDevices fetches and caches top-level devices and folders.
|
||||||
|
func (d *Degoo) getDevices(ctx context.Context) error {
|
||||||
|
const query = `query GetFileChildren5($Token: String! $ParentID: String $AllParentIDs: [String] $Limit: Int! $Order: Int! $NextToken: String ) { getFileChildren5(Token: $Token ParentID: $ParentID AllParentIDs: $AllParentIDs Limit: $Limit Order: $Order NextToken: $NextToken) { Items { ParentID } NextToken } }`
|
||||||
|
variables := map[string]interface{}{
|
||||||
|
"Token": d.AccessToken,
|
||||||
|
"ParentID": "0",
|
||||||
|
"Limit": 10,
|
||||||
|
"Order": 3,
|
||||||
|
}
|
||||||
|
data, err := d.apiCall(ctx, "GetFileChildren5", query, variables)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var resp DegooGetChildren5Data
|
||||||
|
if err := json.Unmarshal(data, &resp); err != nil {
|
||||||
|
return fmt.Errorf("failed to parse device list: %w", err)
|
||||||
|
}
|
||||||
|
if d.RootFolderID == "0" {
|
||||||
|
if len(resp.GetFileChildren5.Items) > 0 {
|
||||||
|
d.RootFolderID = resp.GetFileChildren5.Items[0].ParentID
|
||||||
|
}
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAllFileChildren5 fetches all children of a directory with pagination.
|
||||||
|
func (d *Degoo) getAllFileChildren5(ctx context.Context, parentID string) ([]DegooFileItem, error) {
|
||||||
|
const query = `query GetFileChildren5($Token: String! $ParentID: String $AllParentIDs: [String] $Limit: Int! $Order: Int! $NextToken: String ) { getFileChildren5(Token: $Token ParentID: $ParentID AllParentIDs: $AllParentIDs Limit: $Limit Order: $Order NextToken: $NextToken) { Items { ID ParentID Name Category Size CreationTime LastModificationTime LastUploadTime FilePath IsInRecycleBin DeviceID MetadataID } NextToken } }`
|
||||||
|
var allItems []DegooFileItem
|
||||||
|
nextToken := ""
|
||||||
|
for {
|
||||||
|
variables := map[string]interface{}{
|
||||||
|
"Token": d.AccessToken,
|
||||||
|
"ParentID": parentID,
|
||||||
|
"Limit": 1000,
|
||||||
|
"Order": 3,
|
||||||
|
}
|
||||||
|
if nextToken != "" {
|
||||||
|
variables["NextToken"] = nextToken
|
||||||
|
}
|
||||||
|
data, err := d.apiCall(ctx, "GetFileChildren5", query, variables)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var resp DegooGetChildren5Data
|
||||||
|
if err := json.Unmarshal(data, &resp); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
allItems = append(allItems, resp.GetFileChildren5.Items...)
|
||||||
|
if resp.GetFileChildren5.NextToken == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nextToken = resp.GetFileChildren5.NextToken
|
||||||
|
}
|
||||||
|
return allItems, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getOverlay4 fetches metadata for a single item by ID.
|
||||||
|
func (d *Degoo) getOverlay4(ctx context.Context, id string) (DegooFileItem, error) {
|
||||||
|
const query = `query GetOverlay4($Token: String!, $ID: IDType!) { getOverlay4(Token: $Token, ID: $ID) { ID ParentID Name Category Size CreationTime LastModificationTime LastUploadTime URL FilePath IsInRecycleBin DeviceID MetadataID } }`
|
||||||
|
variables := map[string]interface{}{
|
||||||
|
"Token": d.AccessToken,
|
||||||
|
"ID": map[string]string{
|
||||||
|
"FileID": id,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
data, err := d.apiCall(ctx, "GetOverlay4", query, variables)
|
||||||
|
if err != nil {
|
||||||
|
return DegooFileItem{}, err
|
||||||
|
}
|
||||||
|
var resp DegooGetOverlay4Data
|
||||||
|
if err := json.Unmarshal(data, &resp); err != nil {
|
||||||
|
return DegooFileItem{}, fmt.Errorf("failed to parse item metadata: %w", err)
|
||||||
|
}
|
||||||
|
return resp.GetOverlay4, nil
|
||||||
|
}
|
||||||
@@ -486,7 +486,7 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.
|
|||||||
"Authorization": {storeInfo.Auth},
|
"Authorization": {storeInfo.Auth},
|
||||||
"Content-Type": {"application/octet-stream"},
|
"Content-Type": {"application/octet-stream"},
|
||||||
"Content-Crc32": {crc32Value},
|
"Content-Crc32": {crc32Value},
|
||||||
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
|
"Content-Length": {strconv.FormatInt(file.GetSize(), 10)},
|
||||||
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
|
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
|
||||||
}
|
}
|
||||||
res, err := base.HttpClient.Do(req)
|
res, err := base.HttpClient.Do(req)
|
||||||
@@ -577,7 +577,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
|
|||||||
if partIndex == totalParts-1 {
|
if partIndex == totalParts-1 {
|
||||||
size = fileSize - offset
|
size = fileSize - offset
|
||||||
}
|
}
|
||||||
var reader *stream.SectionReader
|
var reader io.ReadSeeker
|
||||||
var rateLimitedRd io.Reader
|
var rateLimitedRd io.Reader
|
||||||
crc32Value := ""
|
crc32Value := ""
|
||||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||||
@@ -612,7 +612,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
|
|||||||
"Authorization": {storeInfo.Auth},
|
"Authorization": {storeInfo.Auth},
|
||||||
"Content-Type": {"application/octet-stream"},
|
"Content-Type": {"application/octet-stream"},
|
||||||
"Content-Crc32": {crc32Value},
|
"Content-Crc32": {crc32Value},
|
||||||
"Content-Length": {fmt.Sprintf("%d", size)},
|
"Content-Length": {strconv.FormatInt(size, 10)},
|
||||||
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
|
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
|
||||||
}
|
}
|
||||||
res, err := base.HttpClient.Do(req)
|
res, err := base.HttpClient.Do(req)
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ type Addition struct {
|
|||||||
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
|
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
|
||||||
AccessToken string
|
AccessToken string
|
||||||
RefreshToken string `json:"refresh_token" required:"true"`
|
RefreshToken string `json:"refresh_token" required:"true"`
|
||||||
RootNamespaceId string
|
RootNamespaceId string `json:"RootNamespaceId" required:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
|||||||
@@ -175,6 +175,13 @@ func (d *Dropbox) finishUploadSession(ctx context.Context, toPath string, offset
|
|||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
|
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
|
||||||
|
if d.RootNamespaceId != "" {
|
||||||
|
apiPathRootJson, err := d.buildPathRootHeader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
|
||||||
|
}
|
||||||
|
|
||||||
uploadFinishArgs := UploadFinishArgs{
|
uploadFinishArgs := UploadFinishArgs{
|
||||||
Commit: struct {
|
Commit: struct {
|
||||||
@@ -219,6 +226,13 @@ func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
|
|||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
|
req.Header.Set("Authorization", "Bearer "+d.AccessToken)
|
||||||
|
if d.RootNamespaceId != "" {
|
||||||
|
apiPathRootJson, err := d.buildPathRootHeader()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
req.Header.Set("Dropbox-API-Path-Root", apiPathRootJson)
|
||||||
|
}
|
||||||
req.Header.Set("Dropbox-API-Arg", "{\"close\":false}")
|
req.Header.Set("Dropbox-API-Arg", "{\"close\":false}")
|
||||||
|
|
||||||
res, err := base.HttpClient.Do(req)
|
res, err := base.HttpClient.Do(req)
|
||||||
@@ -233,3 +247,11 @@ func (d *Dropbox) startUploadSession(ctx context.Context) (string, error) {
|
|||||||
_ = res.Body.Close()
|
_ = res.Body.Close()
|
||||||
return sessionId, nil
|
return sessionId, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Dropbox) buildPathRootHeader() (string, error) {
|
||||||
|
return utils.Json.MarshalToString(map[string]interface{}{
|
||||||
|
".tag": "root",
|
||||||
|
"root": d.RootNamespaceId,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ var config = driver.Config{
|
|||||||
Name: "FebBox",
|
Name: "FebBox",
|
||||||
NoUpload: true,
|
NoUpload: true,
|
||||||
DefaultRoot: "0",
|
DefaultRoot: "0",
|
||||||
|
LinkCacheMode: driver.LinkCacheIP,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ type Addition struct {
|
|||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "FTP",
|
Name: "FTP",
|
||||||
LocalSort: true,
|
LocalSort: true,
|
||||||
OnlyLinkMFile: false,
|
OnlyProxy: true,
|
||||||
DefaultRoot: "/",
|
DefaultRoot: "/",
|
||||||
NoLinkURL: true,
|
NoLinkURL: true,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -167,4 +167,30 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *GoogleDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
if d.DisableDiskUsage {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
about, err := d.getAbout(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var total, used uint64
|
||||||
|
if about.StorageQuota.Limit == nil {
|
||||||
|
total = 0
|
||||||
|
} else {
|
||||||
|
total, err = strconv.ParseUint(*about.StorageQuota.Limit, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
used, err = strconv.ParseUint(about.StorageQuota.Usage, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*GoogleDrive)(nil)
|
var _ driver.Driver = (*GoogleDrive)(nil)
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ type Addition struct {
|
|||||||
ClientID string `json:"client_id"`
|
ClientID string `json:"client_id"`
|
||||||
ClientSecret string `json:"client_secret"`
|
ClientSecret string `json:"client_secret"`
|
||||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5" help:"chunk size while uploading (unit: MB)"`
|
ChunkSize int64 `json:"chunk_size" type:"number" default:"5" help:"chunk size while uploading (unit: MB)"`
|
||||||
|
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
|||||||
@@ -78,3 +78,12 @@ type Error struct {
|
|||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
} `json:"error"`
|
} `json:"error"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type AboutResp struct {
|
||||||
|
StorageQuota struct {
|
||||||
|
Limit *string `json:"limit"`
|
||||||
|
Usage string `json:"usage"`
|
||||||
|
UsageInDrive string `json:"usageInDrive"`
|
||||||
|
UsageInDriveTrash string `json:"usageInDriveTrash"`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -27,6 +27,14 @@ import (
|
|||||||
|
|
||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
|
|
||||||
|
// Google Drive API field constants
|
||||||
|
const (
|
||||||
|
// File list query fields
|
||||||
|
FilesListFields = "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken"
|
||||||
|
// Single file query fields
|
||||||
|
FileInfoFields = "id,name,mimeType,size,md5Checksum,sha1Checksum,sha256Checksum"
|
||||||
|
)
|
||||||
|
|
||||||
type googleDriveServiceAccount struct {
|
type googleDriveServiceAccount struct {
|
||||||
// Type string `json:"type"`
|
// Type string `json:"type"`
|
||||||
// ProjectID string `json:"project_id"`
|
// ProjectID string `json:"project_id"`
|
||||||
@@ -235,7 +243,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
|||||||
}
|
}
|
||||||
query := map[string]string{
|
query := map[string]string{
|
||||||
"orderBy": orderBy,
|
"orderBy": orderBy,
|
||||||
"fields": "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken",
|
"fields": FilesListFields,
|
||||||
"pageSize": "1000",
|
"pageSize": "1000",
|
||||||
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
|
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
|
||||||
//"includeItemsFromAllDrives": "true",
|
//"includeItemsFromAllDrives": "true",
|
||||||
@@ -249,13 +257,84 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
pageToken = resp.NextPageToken
|
pageToken = resp.NextPageToken
|
||||||
|
|
||||||
|
// Batch process shortcuts, API calls only for file shortcuts
|
||||||
|
shortcutTargetIds := make([]string, 0)
|
||||||
|
shortcutIndices := make([]int, 0)
|
||||||
|
|
||||||
|
// Collect target IDs of all file shortcuts (skip folder shortcuts)
|
||||||
|
for i := range resp.Files {
|
||||||
|
if resp.Files[i].MimeType == "application/vnd.google-apps.shortcut" &&
|
||||||
|
resp.Files[i].ShortcutDetails.TargetId != "" &&
|
||||||
|
resp.Files[i].ShortcutDetails.TargetMimeType != "application/vnd.google-apps.folder" {
|
||||||
|
shortcutTargetIds = append(shortcutTargetIds, resp.Files[i].ShortcutDetails.TargetId)
|
||||||
|
shortcutIndices = append(shortcutIndices, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batch get target file info (only for file shortcuts)
|
||||||
|
if len(shortcutTargetIds) > 0 {
|
||||||
|
targetFiles := d.batchGetTargetFilesInfo(shortcutTargetIds)
|
||||||
|
// Update shortcut file info
|
||||||
|
for j, targetId := range shortcutTargetIds {
|
||||||
|
if targetFile, exists := targetFiles[targetId]; exists {
|
||||||
|
fileIndex := shortcutIndices[j]
|
||||||
|
if targetFile.Size != "" {
|
||||||
|
resp.Files[fileIndex].Size = targetFile.Size
|
||||||
|
}
|
||||||
|
if targetFile.MD5Checksum != "" {
|
||||||
|
resp.Files[fileIndex].MD5Checksum = targetFile.MD5Checksum
|
||||||
|
}
|
||||||
|
if targetFile.SHA1Checksum != "" {
|
||||||
|
resp.Files[fileIndex].SHA1Checksum = targetFile.SHA1Checksum
|
||||||
|
}
|
||||||
|
if targetFile.SHA256Checksum != "" {
|
||||||
|
resp.Files[fileIndex].SHA256Checksum = targetFile.SHA256Checksum
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
res = append(res, resp.Files...)
|
res = append(res, resp.Files...)
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getTargetFileInfo gets target file details for shortcuts
|
||||||
|
func (d *GoogleDrive) getTargetFileInfo(targetId string) (File, error) {
|
||||||
|
var targetFile File
|
||||||
|
url := fmt.Sprintf("https://www.googleapis.com/drive/v3/files/%s", targetId)
|
||||||
|
query := map[string]string{
|
||||||
|
"fields": FileInfoFields,
|
||||||
|
}
|
||||||
|
_, err := d.request(url, http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(query)
|
||||||
|
}, &targetFile)
|
||||||
|
if err != nil {
|
||||||
|
return File{}, err
|
||||||
|
}
|
||||||
|
return targetFile, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// batchGetTargetFilesInfo batch gets target file info, sequential processing to avoid concurrency complexity
|
||||||
|
func (d *GoogleDrive) batchGetTargetFilesInfo(targetIds []string) map[string]File {
|
||||||
|
if len(targetIds) == 0 {
|
||||||
|
return make(map[string]File)
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make(map[string]File)
|
||||||
|
// Sequential processing to avoid concurrency complexity
|
||||||
|
for _, targetId := range targetIds {
|
||||||
|
file, err := d.getTargetFileInfo(targetId)
|
||||||
|
if err == nil {
|
||||||
|
result[targetId] = file
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
|
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
|
||||||
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
defaultChunkSize := d.ChunkSize * 1024 * 1024
|
||||||
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
|
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -315,3 +394,18 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer,
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *GoogleDrive) getAbout(ctx context.Context) (*AboutResp, error) {
|
||||||
|
query := map[string]string{
|
||||||
|
"fields": "storageQuota",
|
||||||
|
}
|
||||||
|
var resp AboutResp
|
||||||
|
_, err := d.request("https://www.googleapis.com/drive/v3/about", http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(query)
|
||||||
|
req.SetContext(ctx)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|||||||
111
drivers/halalcloud_open/common.go
Normal file
111
drivers/halalcloud_open/common.go
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
package halalcloudopen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
slicePostErrorRetryInterval = time.Second * 120
|
||||||
|
retryTimes = 5
|
||||||
|
)
|
||||||
|
|
||||||
|
type halalCommon struct {
|
||||||
|
// *AuthService // 登录信息
|
||||||
|
UserInfo *sdkUser.User // 用户信息
|
||||||
|
refreshTokenFunc func(token string) error
|
||||||
|
// serv *AuthService
|
||||||
|
configs sync.Map
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *halalCommon) GetAccessToken() (string, error) {
|
||||||
|
value, exists := m.configs.Load("access_token")
|
||||||
|
if !exists {
|
||||||
|
return "", nil // 如果不存在,返回空字符串
|
||||||
|
}
|
||||||
|
return value.(string), nil // 返回配置项的值
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRefreshToken implements ConfigStore.
|
||||||
|
func (m *halalCommon) GetRefreshToken() (string, error) {
|
||||||
|
value, exists := m.configs.Load("refresh_token")
|
||||||
|
if !exists {
|
||||||
|
return "", nil // 如果不存在,返回空字符串
|
||||||
|
}
|
||||||
|
return value.(string), nil // 返回配置项的值
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccessToken implements ConfigStore.
|
||||||
|
func (m *halalCommon) SetAccessToken(token string) error {
|
||||||
|
m.configs.Store("access_token", token)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRefreshToken implements ConfigStore.
|
||||||
|
func (m *halalCommon) SetRefreshToken(token string) error {
|
||||||
|
m.configs.Store("refresh_token", token)
|
||||||
|
if m.refreshTokenFunc != nil {
|
||||||
|
return m.refreshTokenFunc(token)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetToken implements ConfigStore.
|
||||||
|
func (m *halalCommon) SetToken(accessToken string, refreshToken string, expiresIn int64) error {
|
||||||
|
m.configs.Store("access_token", accessToken)
|
||||||
|
m.configs.Store("refresh_token", refreshToken)
|
||||||
|
m.configs.Store("expires_in", expiresIn)
|
||||||
|
if m.refreshTokenFunc != nil {
|
||||||
|
return m.refreshTokenFunc(refreshToken)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearConfigs implements ConfigStore.
|
||||||
|
func (m *halalCommon) ClearConfigs() error {
|
||||||
|
m.configs = sync.Map{} // 清空map
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteConfig implements ConfigStore.
|
||||||
|
func (m *halalCommon) DeleteConfig(key string) error {
|
||||||
|
_, exists := m.configs.Load(key)
|
||||||
|
if !exists {
|
||||||
|
return nil // 如果不存在,直接返回
|
||||||
|
}
|
||||||
|
m.configs.Delete(key) // 删除指定的配置项
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConfig implements ConfigStore.
|
||||||
|
func (m *halalCommon) GetConfig(key string) (string, error) {
|
||||||
|
value, exists := m.configs.Load(key)
|
||||||
|
if !exists {
|
||||||
|
return "", nil // 如果不存在,返回空字符串
|
||||||
|
}
|
||||||
|
return value.(string), nil // 返回配置项的值
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListConfigs implements ConfigStore.
|
||||||
|
func (m *halalCommon) ListConfigs() (map[string]string, error) {
|
||||||
|
configs := make(map[string]string)
|
||||||
|
m.configs.Range(func(key, value interface{}) bool {
|
||||||
|
configs[key.(string)] = value.(string) // 将每个配置项添加到map中
|
||||||
|
return true // 继续遍历
|
||||||
|
})
|
||||||
|
return configs, nil // 返回所有配置项
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConfig implements ConfigStore.
|
||||||
|
func (m *halalCommon) SetConfig(key string, value string) error {
|
||||||
|
m.configs.Store(key, value) // 使用Store方法设置或更新配置项
|
||||||
|
return nil // 成功设置配置项后返回nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHalalCommon() *halalCommon {
|
||||||
|
return &halalCommon{
|
||||||
|
configs: sync.Map{},
|
||||||
|
}
|
||||||
|
}
|
||||||
29
drivers/halalcloud_open/driver.go
Normal file
29
drivers/halalcloud_open/driver.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package halalcloudopen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
sdkClient "github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
|
||||||
|
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
|
||||||
|
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HalalCloudOpen struct {
|
||||||
|
*halalCommon
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
sdkClient *sdkClient.Client
|
||||||
|
sdkUserFileService *sdkUserFile.UserFileService
|
||||||
|
sdkUserService *sdkUser.UserService
|
||||||
|
uploadThread int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*HalalCloudOpen)(nil)
|
||||||
131
drivers/halalcloud_open/driver_curd_impl.go
Normal file
131
drivers/halalcloud_open/driver_curd_impl.go
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
package halalcloudopen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
sdkModel "github.com/halalcloud/golang-sdk-lite/halalcloud/model"
|
||||||
|
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) getFiles(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
|
||||||
|
|
||||||
|
files := make([]model.Obj, 0)
|
||||||
|
limit := int64(100)
|
||||||
|
token := ""
|
||||||
|
|
||||||
|
for {
|
||||||
|
result, err := d.sdkUserFileService.List(ctx, &sdkUserFile.FileListRequest{
|
||||||
|
Parent: &sdkUserFile.File{Path: dir.GetPath()},
|
||||||
|
ListInfo: &sdkModel.ScanListRequest{
|
||||||
|
Limit: strconv.FormatInt(limit, 10),
|
||||||
|
Token: token,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; len(result.Files) > i; i++ {
|
||||||
|
files = append(files, NewObjFile(result.Files[i]))
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.ListInfo == nil || result.ListInfo.Token == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
token = result.ListInfo.Token
|
||||||
|
|
||||||
|
}
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) makeDir(ctx context.Context, dir model.Obj, name string) (model.Obj, error) {
|
||||||
|
_, err := d.sdkUserFileService.Create(ctx, &sdkUserFile.File{
|
||||||
|
Path: dir.GetPath(),
|
||||||
|
Name: name,
|
||||||
|
})
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) move(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
|
||||||
|
oldDir := obj.GetPath()
|
||||||
|
newDir := dir.GetPath()
|
||||||
|
_, err := d.sdkUserFileService.Move(ctx, &sdkUserFile.BatchOperationRequest{
|
||||||
|
Source: []*sdkUserFile.File{
|
||||||
|
{
|
||||||
|
Path: oldDir,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Dest: &sdkUserFile.File{
|
||||||
|
Path: newDir,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) rename(ctx context.Context, obj model.Obj, name string) (model.Obj, error) {
|
||||||
|
|
||||||
|
_, err := d.sdkUserFileService.Rename(ctx, &sdkUserFile.File{
|
||||||
|
Path: obj.GetPath(),
|
||||||
|
Name: name,
|
||||||
|
})
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) copy(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
|
||||||
|
id := obj.GetID()
|
||||||
|
sourcePath := obj.GetPath()
|
||||||
|
if len(id) > 0 {
|
||||||
|
sourcePath = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
destID := dir.GetID()
|
||||||
|
destPath := dir.GetPath()
|
||||||
|
if len(destID) > 0 {
|
||||||
|
destPath = ""
|
||||||
|
}
|
||||||
|
dest := &sdkUserFile.File{
|
||||||
|
Path: destPath,
|
||||||
|
Identity: destID,
|
||||||
|
}
|
||||||
|
_, err := d.sdkUserFileService.Copy(ctx, &sdkUserFile.BatchOperationRequest{
|
||||||
|
Source: []*sdkUserFile.File{
|
||||||
|
{
|
||||||
|
Path: sourcePath,
|
||||||
|
Identity: id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Dest: dest,
|
||||||
|
})
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
id := obj.GetID()
|
||||||
|
_, err := d.sdkUserFileService.Delete(ctx, &sdkUserFile.BatchOperationRequest{
|
||||||
|
Source: []*sdkUserFile.File{
|
||||||
|
{
|
||||||
|
Identity: id,
|
||||||
|
Path: obj.GetPath(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) details(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
ret, err := d.sdkUserService.GetStatisticsAndQuota(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
total := uint64(ret.DiskStatisticsQuota.BytesQuota)
|
||||||
|
|
||||||
|
free := uint64(ret.DiskStatisticsQuota.BytesFree)
|
||||||
|
return &model.StorageDetails{
|
||||||
|
DiskUsage: model.DiskUsage{
|
||||||
|
TotalSpace: total,
|
||||||
|
FreeSpace: free,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
108
drivers/halalcloud_open/driver_get_link.go
Normal file
108
drivers/halalcloud_open/driver_get_link.go
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
package halalcloudopen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha1"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
||||||
|
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) getLink(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
if args.Redirect {
|
||||||
|
// return nil, model.ErrUnsupported
|
||||||
|
fid := file.GetID()
|
||||||
|
fpath := file.GetPath()
|
||||||
|
if fid != "" {
|
||||||
|
fpath = ""
|
||||||
|
}
|
||||||
|
fi, err := d.sdkUserFileService.GetDirectDownloadAddress(ctx, &sdkUserFile.DirectDownloadRequest{
|
||||||
|
Identity: fid,
|
||||||
|
Path: fpath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
expireAt := fi.ExpireAt
|
||||||
|
duration := time.Until(time.UnixMilli(expireAt))
|
||||||
|
return &model.Link{
|
||||||
|
URL: fi.DownloadAddress,
|
||||||
|
Expiration: &duration,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
result, err := d.sdkUserFileService.ParseFileSlice(ctx, &sdkUserFile.File{
|
||||||
|
Identity: file.GetID(),
|
||||||
|
Path: file.GetPath(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fileAddrs := []*sdkUserFile.SliceDownloadInfo{}
|
||||||
|
var addressDuration int64
|
||||||
|
|
||||||
|
nodesNumber := len(result.RawNodes)
|
||||||
|
nodesIndex := nodesNumber - 1
|
||||||
|
startIndex, endIndex := 0, nodesIndex
|
||||||
|
for nodesIndex >= 0 {
|
||||||
|
if nodesIndex >= 200 {
|
||||||
|
endIndex = 200
|
||||||
|
} else {
|
||||||
|
endIndex = nodesNumber
|
||||||
|
}
|
||||||
|
for ; endIndex <= nodesNumber; endIndex += 200 {
|
||||||
|
if endIndex == 0 {
|
||||||
|
endIndex = 1
|
||||||
|
}
|
||||||
|
sliceAddress, err := d.sdkUserFileService.GetSliceDownloadAddress(ctx, &sdkUserFile.SliceDownloadAddressRequest{
|
||||||
|
Identity: result.RawNodes[startIndex:endIndex],
|
||||||
|
Version: 1,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
addressDuration, _ = strconv.ParseInt(sliceAddress.ExpireAt, 10, 64)
|
||||||
|
fileAddrs = append(fileAddrs, sliceAddress.Addresses...)
|
||||||
|
startIndex = endIndex
|
||||||
|
nodesIndex -= 200
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
size, _ := strconv.ParseInt(result.FileSize, 10, 64)
|
||||||
|
chunks := getChunkSizes(result.Sizes)
|
||||||
|
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
|
length := httpRange.Length
|
||||||
|
if httpRange.Length < 0 || httpRange.Start+httpRange.Length >= size {
|
||||||
|
length = size - httpRange.Start
|
||||||
|
}
|
||||||
|
oo := &openObject{
|
||||||
|
ctx: ctx,
|
||||||
|
d: fileAddrs,
|
||||||
|
chunk: []byte{},
|
||||||
|
chunks: chunks,
|
||||||
|
skip: httpRange.Start,
|
||||||
|
sha: result.Sha1,
|
||||||
|
shaTemp: sha1.New(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return readers.NewLimitedReadCloser(oo, length), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var duration time.Duration
|
||||||
|
if addressDuration != 0 {
|
||||||
|
duration = time.Until(time.UnixMilli(addressDuration))
|
||||||
|
} else {
|
||||||
|
duration = time.Until(time.Now().Add(time.Hour))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Link{
|
||||||
|
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
|
||||||
|
Expiration: &duration,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
50
drivers/halalcloud_open/driver_init.go
Normal file
50
drivers/halalcloud_open/driver_init.go
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
package halalcloudopen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
"github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
|
||||||
|
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
|
||||||
|
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) Init(ctx context.Context) error {
|
||||||
|
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||||
|
d.uploadThread, d.UploadThread = 3, 3
|
||||||
|
}
|
||||||
|
if d.halalCommon == nil {
|
||||||
|
d.halalCommon = &halalCommon{
|
||||||
|
UserInfo: &sdkUser.User{},
|
||||||
|
refreshTokenFunc: func(token string) error {
|
||||||
|
d.Addition.RefreshToken = token
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d.Addition.RefreshToken != "" {
|
||||||
|
d.halalCommon.SetRefreshToken(d.Addition.RefreshToken)
|
||||||
|
}
|
||||||
|
timeout := d.Addition.TimeOut
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = 60
|
||||||
|
}
|
||||||
|
host := d.Addition.Host
|
||||||
|
if host == "" {
|
||||||
|
host = "openapi.2dland.cn"
|
||||||
|
}
|
||||||
|
|
||||||
|
client := apiclient.NewClient(nil, host, d.Addition.ClientID, d.Addition.ClientSecret, d.halalCommon, apiclient.WithTimeout(time.Second*time.Duration(timeout)))
|
||||||
|
d.sdkClient = client
|
||||||
|
d.sdkUserFileService = sdkUserFile.NewUserFileService(client)
|
||||||
|
d.sdkUserService = sdkUser.NewUserService(client)
|
||||||
|
userInfo, err := d.sdkUserService.Get(ctx, &sdkUser.User{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.halalCommon.UserInfo = userInfo
|
||||||
|
// 能够获取到用户信息,已经检查了 RefreshToken 的有效性,无需再次检查
|
||||||
|
return nil
|
||||||
|
}
|
||||||
48
drivers/halalcloud_open/driver_interface.go
Normal file
48
drivers/halalcloud_open/driver_interface.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
package halalcloudopen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
return d.getFiles(ctx, dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
return d.getLink(ctx, file, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
|
return d.makeDir(ctx, parentDir, dirName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
return d.move(ctx, srcObj, dstDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
|
return d.rename(ctx, srcObj, newName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
return d.copy(ctx, srcObj, dstDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
return d.remove(ctx, obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
return d.put(ctx, dstDir, stream, up)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||||
|
return d.details(ctx)
|
||||||
|
}
|
||||||
258
drivers/halalcloud_open/halalcloud_upload.go
Normal file
258
drivers/halalcloud_open/halalcloud_upload.go
Normal file
@@ -0,0 +1,258 @@
|
|||||||
|
package halalcloudopen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *HalalCloudOpen) put(ctx context.Context, dstDir model.Obj, fileStream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
|
||||||
|
newPath := path.Join(dstDir.GetPath(), fileStream.GetName())
|
||||||
|
|
||||||
|
uploadTask, err := d.sdkUserFileService.CreateUploadTask(ctx, &sdkUserFile.File{
|
||||||
|
Path: newPath,
|
||||||
|
Size: fileStream.GetSize(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if uploadTask.Created {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
slicesList := make([]string, 0)
|
||||||
|
codec := uint64(0x55)
|
||||||
|
if uploadTask.BlockCodec > 0 {
|
||||||
|
codec = uint64(uploadTask.BlockCodec)
|
||||||
|
}
|
||||||
|
blockHashType := uploadTask.BlockHashType
|
||||||
|
mhType := uint64(0x12)
|
||||||
|
if blockHashType > 0 {
|
||||||
|
mhType = uint64(blockHashType)
|
||||||
|
}
|
||||||
|
prefix := cid.Prefix{
|
||||||
|
Codec: codec,
|
||||||
|
MhLength: -1,
|
||||||
|
MhType: mhType,
|
||||||
|
Version: 1,
|
||||||
|
}
|
||||||
|
blockSize := uploadTask.BlockSize
|
||||||
|
useSingleUpload := true
|
||||||
|
//
|
||||||
|
if fileStream.GetSize() <= int64(blockSize) || d.uploadThread <= 1 {
|
||||||
|
useSingleUpload = true
|
||||||
|
}
|
||||||
|
// Not sure whether FileStream supports concurrent read and write operations, so currently using single-threaded upload to ensure safety.
|
||||||
|
// read file
|
||||||
|
if useSingleUpload {
|
||||||
|
bufferSize := int(blockSize)
|
||||||
|
buffer := make([]byte, bufferSize)
|
||||||
|
reader := driver.NewLimitedUploadStream(ctx, fileStream)
|
||||||
|
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
|
||||||
|
// fileStream.Seek(0, os.SEEK_SET)
|
||||||
|
for {
|
||||||
|
n, err := teeReader.Read(buffer)
|
||||||
|
if n > 0 {
|
||||||
|
data := buffer[:n]
|
||||||
|
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
slicesList = append(slicesList, uploadCid.String())
|
||||||
|
}
|
||||||
|
if err == io.EOF || n == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// TODO: implement multipart upload, currently using single-threaded upload to ensure safety.
|
||||||
|
bufferSize := int(blockSize)
|
||||||
|
buffer := make([]byte, bufferSize)
|
||||||
|
reader := driver.NewLimitedUploadStream(ctx, fileStream)
|
||||||
|
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
|
||||||
|
for {
|
||||||
|
n, err := teeReader.Read(buffer)
|
||||||
|
if n > 0 {
|
||||||
|
data := buffer[:n]
|
||||||
|
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
slicesList = append(slicesList, uploadCid.String())
|
||||||
|
}
|
||||||
|
if err == io.EOF || n == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
newFile, err := makeFile(ctx, slicesList, uploadTask.Task, uploadTask.UploadAddress, retryTimes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewObjFile(newFile), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeFile(ctx context.Context, fileSlice []string, taskID string, uploadAddress string, retry int) (*sdkUserFile.File, error) {
|
||||||
|
var lastError error = nil
|
||||||
|
for range retry {
|
||||||
|
newFile, err := doMakeFile(fileSlice, taskID, uploadAddress)
|
||||||
|
if err == nil {
|
||||||
|
return newFile, nil
|
||||||
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if strings.Contains(err.Error(), "not found") {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
lastError = err
|
||||||
|
time.Sleep(slicePostErrorRetryInterval)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("mk file slice failed after %d times, error: %s", retry, lastError.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func doMakeFile(fileSlice []string, taskID string, uploadAddress string) (*sdkUserFile.File, error) {
|
||||||
|
accessUrl := uploadAddress + "/" + taskID
|
||||||
|
getTimeOut := time.Minute * 2
|
||||||
|
u, err := url.Parse(accessUrl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
n, _ := json.Marshal(fileSlice)
|
||||||
|
httpRequest := http.Request{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: u,
|
||||||
|
Header: map[string][]string{
|
||||||
|
"Accept": {"application/json"},
|
||||||
|
"Content-Type": {"application/json"},
|
||||||
|
//"Content-Length": {strconv.Itoa(len(n))},
|
||||||
|
},
|
||||||
|
Body: io.NopCloser(bytes.NewReader(n)),
|
||||||
|
}
|
||||||
|
httpClient := http.Client{
|
||||||
|
Timeout: getTimeOut,
|
||||||
|
}
|
||||||
|
httpResponse, err := httpClient.Do(&httpRequest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer httpResponse.Body.Close()
|
||||||
|
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
|
||||||
|
b, _ := io.ReadAll(httpResponse.Body)
|
||||||
|
message := string(b)
|
||||||
|
return nil, fmt.Errorf("mk file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
|
||||||
|
}
|
||||||
|
b, _ := io.ReadAll(httpResponse.Body)
|
||||||
|
var result *sdkUserFile.File
|
||||||
|
err = json.Unmarshal(b, &result)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
func postFileSlice(ctx context.Context, fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix, retry int) (cid.Cid, error) {
|
||||||
|
var lastError error = nil
|
||||||
|
for range retry {
|
||||||
|
newCid, err := doPostFileSlice(fileSlice, taskID, uploadAddress, preix)
|
||||||
|
if err == nil {
|
||||||
|
return newCid, nil
|
||||||
|
}
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
time.Sleep(slicePostErrorRetryInterval)
|
||||||
|
lastError = err
|
||||||
|
}
|
||||||
|
return cid.Undef, fmt.Errorf("upload file slice failed after %d times, error: %s", retry, lastError.Error())
|
||||||
|
}
|
||||||
|
func doPostFileSlice(fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix) (cid.Cid, error) {
|
||||||
|
// 1. sum file slice
|
||||||
|
newCid, err := preix.Sum(fileSlice)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
// 2. post file slice
|
||||||
|
sliceCidString := newCid.String()
|
||||||
|
// /{taskID}/{sliceID}
|
||||||
|
accessUrl := uploadAddress + "/" + taskID + "/" + sliceCidString
|
||||||
|
getTimeOut := time.Second * 30
|
||||||
|
// get {accessUrl} in {getTimeOut}
|
||||||
|
u, err := url.Parse(accessUrl)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
// header: accept: application/json
|
||||||
|
// header: content-type: application/octet-stream
|
||||||
|
// header: content-length: {fileSlice.length}
|
||||||
|
// header: x-content-cid: {sliceCidString}
|
||||||
|
// header: x-task-id: {taskID}
|
||||||
|
httpRequest := http.Request{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: u,
|
||||||
|
Header: map[string][]string{
|
||||||
|
"Accept": {"application/json"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
httpClient := http.Client{
|
||||||
|
Timeout: getTimeOut,
|
||||||
|
}
|
||||||
|
httpResponse, err := httpClient.Do(&httpRequest)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
if httpResponse.StatusCode != http.StatusOK {
|
||||||
|
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d", httpResponse.StatusCode)
|
||||||
|
}
|
||||||
|
var result bool
|
||||||
|
b, err := io.ReadAll(httpResponse.Body)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(b, &result)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
if result {
|
||||||
|
return newCid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
httpRequest = http.Request{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: u,
|
||||||
|
Header: map[string][]string{
|
||||||
|
"Accept": {"application/json"},
|
||||||
|
"Content-Type": {"application/octet-stream"},
|
||||||
|
// "Content-Length": {strconv.Itoa(len(fileSlice))},
|
||||||
|
},
|
||||||
|
Body: io.NopCloser(bytes.NewReader(fileSlice)),
|
||||||
|
}
|
||||||
|
httpResponse, err = httpClient.Do(&httpRequest)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, err
|
||||||
|
}
|
||||||
|
defer httpResponse.Body.Close()
|
||||||
|
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
|
||||||
|
b, _ := io.ReadAll(httpResponse.Body)
|
||||||
|
message := string(b)
|
||||||
|
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
|
||||||
|
}
|
||||||
|
//
|
||||||
|
|
||||||
|
return newCid, nil
|
||||||
|
}
|
||||||
32
drivers/halalcloud_open/meta.go
Normal file
32
drivers/halalcloud_open/meta.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
package halalcloudopen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
// Usually one of two
|
||||||
|
driver.RootPath
|
||||||
|
// define other
|
||||||
|
RefreshToken string `json:"refresh_token" required:"false" help:"If using a personal API approach, the RefreshToken is not required."`
|
||||||
|
UploadThread int `json:"upload_thread" type:"number" default:"3" help:"1 <= thread <= 32"`
|
||||||
|
|
||||||
|
ClientID string `json:"client_id" required:"true" default:""`
|
||||||
|
ClientSecret string `json:"client_secret" required:"true" default:""`
|
||||||
|
Host string `json:"host" required:"false" default:"openapi.2dland.cn"`
|
||||||
|
TimeOut int `json:"timeout" type:"number" default:"60" help:"timeout in seconds"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "HalalCloudOpen",
|
||||||
|
OnlyProxy: false,
|
||||||
|
DefaultRoot: "/",
|
||||||
|
NoLinkURL: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &HalalCloudOpen{}
|
||||||
|
})
|
||||||
|
}
|
||||||
60
drivers/halalcloud_open/obj_file.go
Normal file
60
drivers/halalcloud_open/obj_file.go
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
package halalcloudopen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ObjFile struct {
|
||||||
|
sdkFile *sdkUserFile.File
|
||||||
|
fileSize int64
|
||||||
|
modTime time.Time
|
||||||
|
createTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewObjFile(f *sdkUserFile.File) model.Obj {
|
||||||
|
ofile := &ObjFile{sdkFile: f}
|
||||||
|
ofile.fileSize = f.Size
|
||||||
|
modTimeTs := f.UpdateTs
|
||||||
|
ofile.modTime = time.UnixMilli(modTimeTs)
|
||||||
|
createTimeTs := f.CreateTs
|
||||||
|
ofile.createTime = time.UnixMilli(createTimeTs)
|
||||||
|
return ofile
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ObjFile) GetSize() int64 {
|
||||||
|
return f.fileSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ObjFile) GetName() string {
|
||||||
|
return f.sdkFile.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ObjFile) ModTime() time.Time {
|
||||||
|
return f.modTime
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ObjFile) IsDir() bool {
|
||||||
|
return f.sdkFile.Dir
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ObjFile) GetHash() utils.HashInfo {
|
||||||
|
return utils.HashInfo{
|
||||||
|
// TODO: support more hash types
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ObjFile) GetID() string {
|
||||||
|
return f.sdkFile.Identity
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ObjFile) GetPath() string {
|
||||||
|
return f.sdkFile.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *ObjFile) CreateTime() time.Time {
|
||||||
|
return f.createTime
|
||||||
|
}
|
||||||
185
drivers/halalcloud_open/utils.go
Normal file
185
drivers/halalcloud_open/utils.go
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
package halalcloudopen
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||||
|
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// get the next chunk
|
||||||
|
func (oo *openObject) getChunk(_ context.Context) (err error) {
|
||||||
|
if oo.id >= len(oo.chunks) {
|
||||||
|
return io.EOF
|
||||||
|
}
|
||||||
|
var chunk []byte
|
||||||
|
err = utils.Retry(3, time.Second, func() (err error) {
|
||||||
|
chunk, err = getRawFiles(oo.d[oo.id])
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
oo.id++
|
||||||
|
oo.chunk = chunk
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads up to len(p) bytes into p.
|
||||||
|
func (oo *openObject) Read(p []byte) (n int, err error) {
|
||||||
|
oo.mu.Lock()
|
||||||
|
defer oo.mu.Unlock()
|
||||||
|
if oo.closed {
|
||||||
|
return 0, fmt.Errorf("read on closed file")
|
||||||
|
}
|
||||||
|
// Skip data at the start if requested
|
||||||
|
for oo.skip > 0 {
|
||||||
|
//size := 1024 * 1024
|
||||||
|
_, size, err := oo.ChunkLocation(oo.id)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if oo.skip < int64(size) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
oo.id++
|
||||||
|
oo.skip -= int64(size)
|
||||||
|
}
|
||||||
|
if len(oo.chunk) == 0 {
|
||||||
|
err = oo.getChunk(oo.ctx)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if oo.skip > 0 {
|
||||||
|
oo.chunk = (oo.chunk)[oo.skip:]
|
||||||
|
oo.skip = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n = copy(p, oo.chunk)
|
||||||
|
oo.shaTemp.Write(p[:n])
|
||||||
|
oo.chunk = (oo.chunk)[n:]
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closed the file - MAC errors are reported here
|
||||||
|
func (oo *openObject) Close() (err error) {
|
||||||
|
oo.mu.Lock()
|
||||||
|
defer oo.mu.Unlock()
|
||||||
|
if oo.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// 校验Sha1
|
||||||
|
if string(oo.shaTemp.Sum(nil)) != oo.sha {
|
||||||
|
return fmt.Errorf("failed to finish download: SHA mismatch")
|
||||||
|
}
|
||||||
|
|
||||||
|
oo.closed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetMD5Hash(text string) string {
|
||||||
|
tHash := md5.Sum([]byte(text))
|
||||||
|
return hex.EncodeToString(tHash[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
type chunkSize struct {
|
||||||
|
position int64
|
||||||
|
size int
|
||||||
|
}
|
||||||
|
|
||||||
|
type openObject struct {
|
||||||
|
ctx context.Context
|
||||||
|
mu sync.Mutex
|
||||||
|
d []*sdkUserFile.SliceDownloadInfo
|
||||||
|
id int
|
||||||
|
skip int64
|
||||||
|
chunk []byte
|
||||||
|
chunks []chunkSize
|
||||||
|
closed bool
|
||||||
|
sha string
|
||||||
|
shaTemp hash.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func getChunkSizes(sliceSize []*sdkUserFile.SliceSize) (chunks []chunkSize) {
|
||||||
|
chunks = make([]chunkSize, 0)
|
||||||
|
for _, s := range sliceSize {
|
||||||
|
// 对最后一个做特殊处理
|
||||||
|
endIndex := s.EndIndex
|
||||||
|
startIndex := s.StartIndex
|
||||||
|
if endIndex == 0 {
|
||||||
|
endIndex = startIndex
|
||||||
|
}
|
||||||
|
for j := startIndex; j <= endIndex; j++ {
|
||||||
|
size := s.Size
|
||||||
|
chunks = append(chunks, chunkSize{position: j, size: int(size)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return chunks
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oo *openObject) ChunkLocation(id int) (position int64, size int, err error) {
|
||||||
|
if id < 0 || id >= len(oo.chunks) {
|
||||||
|
return 0, 0, errors.New("invalid arguments")
|
||||||
|
}
|
||||||
|
|
||||||
|
return (oo.chunks)[id].position, (oo.chunks)[id].size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRawFiles(addr *sdkUserFile.SliceDownloadInfo) ([]byte, error) {
|
||||||
|
|
||||||
|
if addr == nil {
|
||||||
|
return nil, errors.New("addr is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
client := http.Client{
|
||||||
|
Timeout: time.Duration(60 * time.Second), // Set timeout to 60 seconds
|
||||||
|
}
|
||||||
|
resp, err := client.Get(addr.DownloadAddress)
|
||||||
|
if err != nil {
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("bad status: %s, body: %s", resp.Status, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
if addr.Encrypt > 0 {
|
||||||
|
cd := uint8(addr.Encrypt)
|
||||||
|
for idx := 0; idx < len(body); idx++ {
|
||||||
|
body[idx] = body[idx] ^ cd
|
||||||
|
}
|
||||||
|
}
|
||||||
|
storeType := addr.StoreType
|
||||||
|
if storeType != 10 {
|
||||||
|
|
||||||
|
sourceCid, err := cid.Decode(addr.Identity)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
checkCid, err := sourceCid.Prefix().Sum(body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !checkCid.Equals(sourceCid) {
|
||||||
|
return nil, fmt.Errorf("bad cid: %s, body: %s", checkCid.String(), body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, nil
|
||||||
|
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user