Compare commits

...

60 Commits

Author SHA1 Message Date
千石
85fe4e5bb3 feat(alist_v3): add IntSlice type for JSON unmarshalling (#9247)
- Add `IntSlice` type to handle both single int and array in JSON.
- Modify `MeResp` struct to use `IntSlice` for `Role` field.
- Import `encoding/json` for JSON operations.
2025-08-04 12:02:45 +08:00
千石
52da07e8a7 feat(123_open): add new driver support for 123 Open (#9246)
- Implement new driver for 123 Open service, enabling file operations
  such as listing, uploading, moving, and removing files.
- Introduce token management for authentication and authorization.
- Add API integration for various file operations and actions.
- Include utility functions for handling API requests and responses.
- Register the new driver in the existing drivers' list.
2025-08-04 11:56:57 +08:00
Sky_slience
46de9e9ebb fix(driver): 123 download and modify request headers on the frontend (#9236)
Co-authored-by: Sky_slience <Skyslience@spdzy.com>
2025-08-03 20:00:09 +08:00
千石
ae90fb579b feat(log): enhance log formatter to respect NO_COLOR env variable (#9239)
- Adjust log formatter to disable colors when NO_COLOR or ALIST_NO_COLOR
  environment variables are set.
- Reorganize formatter settings for better readability.
2025-08-03 09:26:23 +08:00
Sky_slience
394a18cbd9 Fix 123 download (#9235)
* fix(driver): handle additional HTTP status code 210 for URL redirection

* fix(driver): 123 download url error

---------

Co-authored-by: Sky_slience <Skyslience@spdzy.com>
2025-07-30 16:55:32 +08:00
千石
280960ce3e feat(user-db): enhance user management with role-based queries (allow-edit-role-guest) (#9234)
- Add `GetUsersByRole` function to fetch users based on their roles.
- Extend `UpdateUserBasePathPrefix` to accept optional user lists.
- Ensure path cleaning in `UpdateUserBasePathPrefix` for consistency.
- Integrate guest role fetching in `auth.go` middleware.
- Utilize `GetUsersByRole` in `role.go` for base path modifications.
- Remove redundant line in `role.go` role modification logic.
2025-07-30 13:15:35 +08:00
Sky_slience
74332e91fb feat(ui): add new UI configuration option to settings (#9233)
* feat(ui): add new UI configuration option to settings

* fix(ui): disable new UI feature by default

---------

Co-authored-by: Sky_slience <Skyslience@spdzy.com>
2025-07-30 12:22:02 +08:00
Sky_slience
540d6c7064 fix(meta): update OAuth token URL and improve default client credentials (#9231) 2025-07-30 10:48:33 +08:00
千石
55b2bb6b80 feat(user-management): Enhance admin management and role handling 2025-07-29 19:45:28 +08:00
qianshi
d5df6fa4cf Merge branch 'main' into feat/allow-edit-role-guest 2025-07-29 19:13:01 +08:00
千石
3353055482 Update Dockerfile.ci (#9230)
chore(docker): Update base image from alpine:edge to alpine:3.20.7 in Dockerfile.ci
2025-07-29 18:35:47 +08:00
千石
4d7c2a09ce docs(README): Add API documentation links across multiple languages (#9225)
- Add API documentation section to `README.md` with link to Apifox
- Add API documentation section to `README_ja.md` with Japanese translation and link to Apifox
- Add API documentation section to `README_cn.md` with Chinese translation and link to Apifox
2025-07-29 09:42:34 +08:00
qianshi
5b8c26510b feat(user-management): Enhance admin management and role handling
- Add `CountEnabledAdminsExcluding` function to count enabled admins excluding a specific user.
- Implement `CountUsersByRoleAndEnabledExclude` in `internal/db/user.go` to support exclusion logic.
- Refactor role handling with switch-case for better readability in `server/handles/role.go`.
- Ensure at least one enabled admin remains when disabling an admin in `server/handles/user.go`.
- Maintain guest role name consistency when updating roles in `internal/op/role.go`.
2025-07-28 23:07:07 +08:00
千石
91cc7529a0 feat(user/role/storage): enhance user and storage operations with additional validations (#9223)
- Update `CreateUser` to adjust `BasePath` based on user roles and clean paths.
- Modify `UpdateUser` to incorporate role-based path changes.
- Add validation in `CreateStorage` and `UpdateStorage` to prevent root mount path.
- Prevent changes to admin user's role and username in user handler.
- Update `UpdateRole` to modify user base paths when role paths change, and clear user cache accordingly.
- Import `errors` package to handle error messages.
2025-07-27 22:25:45 +08:00
千石
f61d13d433 refactor(convert_role): Improve role conversion logic for legacy formats (#9219)
- Add new imports: `database/sql`, `encoding/json`, and `conf` package in `convert_role.go`.
- Simplify permission entry initialization by removing redundant struct formatting.
- Update error logging messages for better clarity.
- Replace `op.GetUsers` with direct database access for fetching user roles.
- Implement role update logic using `rawDb` and handle legacy int role conversion.
- Count the number of users whose roles are updated and log completion.
- Introduce `IsLegacyRoleDetected` function to check for legacy role formats.
- Modify `cmd/common.go` to invoke role conversion if legacy format is detected.
2025-07-26 15:20:08 +08:00
千石
00120cba27 feat: enhance permission control and label management (#9215)
* 标签管理

* pr检查优化

* feat(role): Implement role management functionality

- Add role management routes in `server/router.go` for listing, getting, creating, updating, and deleting roles
- Introduce `initRoles()` in `internal/bootstrap/data/data.go` for initializing roles during bootstrap
- Create `internal/op/role.go` to handle role operations including caching and singleflight
- Implement role handler functions in `server/handles/role.go` for API responses
- Define database operations for roles in `internal/db/role.go`
- Extend `internal/db/db.go` for role model auto-migration
- Design `internal/model/role.go` to represent role structure with ID, name, description, base path, and permissions
- Initialize default roles (`admin` and `guest`) in `internal/bootstrap/data/role.go` during startup

* refactor(user roles): Support multiple roles for users

- Change the `Role` field type from `int` to `[]int` in `drivers/alist_v3/types.go` and `drivers/quqi/types.go`.
- Update the `Role` field in `internal/model/user.go` to use a new `Roles` type with JSON and database support.
- Modify `IsGuest` and `IsAdmin` methods to check for roles using `Contains` method.
- Update `GetUserByRole` method in `internal/db/user.go` to handle multiple roles.
- Add `roles.go` to define a new `Roles` type with JSON marshalling and scanning capabilities.
- Adjust code in `server/handles/user.go` to compare roles with `utils.SliceEqual`.
- Change role initialization for users in `internal/bootstrap/data/dev.go` and `internal/bootstrap/data/user.go`.
- Update `Role` handling in `server/handles/task.go`, `server/handles/ssologin.go`, and `server/handles/ldap_login.go`.

* feat(user/role): Add path limit check for user and role permissions

- Add new permission bit for checking path limits in `user.go`
- Implement `CheckPathLimit` method in `User` struct to validate path access
- Modify `JoinPath` method in `User` to enforce path limit checks
- Update `role.go` to include path limit logic in `Role` struct
- Document new permission bit in `Role` and `User` comments for clarity

* feat(permission): Add role-based permission handling

- Introduce `role_perm.go` for managing user permissions based on roles.
- Implement `HasPermission` and `MergeRolePermissions` functions.
- Update `webdav.go` to utilize role-based permissions instead of direct user checks.
- Modify `fsup.go` to integrate `CanAccessWithRoles` function.
- Refactor `fsread.go` to use `common.HasPermission` for permission validation.
- Adjust `fsmanage.go` for role-based access control checks.
- Enhance `ftp.go` and `sftp.go` to manage FTP access via roles.
- Update `fsbatch.go` to employ `MergeRolePermissions` for batch operations.
- Replace direct user permission checks with role-based permission handling across various modules.

* refactor(user): Replace integer role values with role IDs

- Change `GetAdmin()` and `GetGuest()` functions to retrieve role by name and use role ID.
- Add patch for version `v3.45.2` to convert legacy integer roles to role IDs.
- Update `dev.go` and `user.go` to use role IDs instead of integer values for roles.
- Remove redundant code in `role.go` related to guest role creation.
- Modify `ssologin.go` and `ldap_login.go` to set user roles to nil instead of using integer roles.
- Introduce `convert_roles.go` to handle conversion of legacy roles and ensure role existence in the database.

* feat(role_perm): implement support for multiple base paths for roles

- Modify role permission checks to support multiple base paths
- Update role creation and update functions to handle multiple base paths
- Add migration script to convert old base_path to base_paths
- Define new Paths type for handling multiple paths in the model
- Adjust role model to replace BasePath with BasePaths
- Update existing patches to handle roles with multiple base paths
- Update bootstrap data to reflect the new base_paths field

* feat(role): Restrict modifications to default roles (admin and guest)

- Add validation to prevent changes to "admin" and "guest" roles in `UpdateRole` and `DeleteRole` functions.
- Introduce `ErrChangeDefaultRole` error in `internal/errs/role.go` to standardize error messaging.
- Update role-related API handlers in `server/handles/role.go` to enforce the new restriction.
- Enhance comments in `internal/bootstrap/data/role.go` to clarify the significance of default roles.
- Ensure consistent error responses for unauthorized role modifications across the application.

* 🔄 **refactor(role): Enhance role permission handling**

- Replaced `BasePaths` with `PermissionPaths` in `Role` struct for better permission granularity.
- Introduced JSON serialization for `PermissionPaths` using `RawPermission` field in `Role` struct.
- Implemented `BeforeSave` and `AfterFind` GORM hooks for handling `PermissionPaths` serialization.
- Refactored permission calculation logic in `role_perm.go` to work with `PermissionPaths`.
- Updated role creation logic to initialize `PermissionPaths` for `admin` and `guest` roles.
- Removed deprecated `CheckPathLimit` method from `Role` struct.

* fix(model/user/role): update permission settings for admin and role

- Change `RawPermission` field in `role.go` to hide JSON representation
- Update `Permission` field in `user.go` to `0xFFFF` for full access
- Modify `PermissionScopes` in `role.go` to `0xFFFF` for enhanced permissions

* 🔒 feat(role-permissions): Enhance role-based access control

- Introduce `canReadPathByRole` function in `role_perm.go` to verify path access based on user roles
- Modify `CanAccessWithRoles` to include role-based path read check
- Add `RoleNames` and `Permissions` to `UserResp` struct in `auth.go` for enhanced user role and permission details
- Implement role details aggregation in `auth.go` to populate `RoleNames` and `Permissions`
- Update `User` struct in `user.go` to include `RolesDetail` for more detailed role information
- Enhance middleware in `auth.go` to load and verify detailed role information for users
- Move `guest` user initialization logic in `user.go` to improve code organization and avoid repetition

* 🔒 fix(permissions): Add permission checks for archive operations

- Add `MergeRolePermissions` and `HasPermission` checks to validate user access for reading archives
- Ensure users have `PermReadArchives` before proceeding with `GetNearestMeta` in specific archive paths
- Implement permission checks for decompress operations, requiring `PermDecompress` for source paths
- Return `PermissionDenied` errors with 403 status if user lacks necessary permissions

* 🔒 fix(server): Add permission check for offline download

- Add permission merging logic for user roles
- Check user has permission for offline download addition
- Return error response with "permission denied" if check fails

*  feat(role-permission): Implement path-based role permission checks

- Add `CheckPathLimitWithRoles` function to validate access based on `PermPathLimit` permission.
- Integrate `CheckPathLimitWithRoles` in `offline_download` to enforce path-based access control.
- Apply `CheckPathLimitWithRoles` across file system management operations (e.g., creation, movement, deletion).
- Ensure `CheckPathLimitWithRoles` is invoked for batch operations and archive-related actions.
- Update error handling to return `PermissionDenied` if the path validation fails.
- Import `errs` package in `offline_download` for consistent error responses.

*  feat(role-permission): Implement path-based role permission checks

- Add `CheckPathLimitWithRoles` function to validate access based on `PermPathLimit` permission.
- Integrate `CheckPathLimitWithRoles` in `offline_download` to enforce path-based access control.
- Apply `CheckPathLimitWithRoles` across file system management operations (e.g., creation, movement, deletion).
- Ensure `CheckPathLimitWithRoles` is invoked for batch operations and archive-related actions.
- Update error handling to return `PermissionDenied` if the path validation fails.
- Import `errs` package in `offline_download` for consistent error responses.

* ♻️ refactor(access-control): Update access control logic to use role-based checks

- Remove deprecated logic from `CanAccess` function in `check.go`, replacing it with `CanAccessWithRoles` for improved role-based access control.
- Modify calls in `search.go` to use `CanAccessWithRoles` for more precise handling of permissions.
- Update `fsread.go` to utilize `CanAccessWithRoles`, ensuring accurate access validation based on user roles.
- Simplify import statements in `check.go` by removing unused packages to clean up the codebase.

*  feat(fs): Improve visibility logic for hidden files

- Import `server/common` package to handle permissions more robustly
- Update `whetherHide` function to use `MergeRolePermissions` for user-specific path permissions
- Replace direct user checks with `HasPermission` for `PermSeeHides`
- Enhance logic to ensure `nil` user cases are handled explicitly

* 标签管理

* feat(db/auth/user): Enhance role handling and clean permission paths

- Comment out role modification checks in `server/handles/user.go` to allow flexible role changes.
- Improve permission path handling in `server/handles/auth.go` by normalizing and deduplicating paths.
- Introduce `addedPaths` map in `CurrentUser` to prevent duplicate permissions.

* feat(storage/db): Implement role permissions path prefix update

- Add `UpdateRolePermissionsPathPrefix` function in `role.go` to update role permissions paths.
- Modify `storage.go` to call the new function when the mount path is renamed.
- Introduce path cleaning and prefix matching logic for accurate path updates.
- Ensure roles are updated only if their permission scopes are modified.
- Handle potential errors with informative messages during database operations.

* feat(role-migration): Implement role conversion and introduce NEWGENERAL role

- Add `NEWGENERAL` to the roles enumeration in `user.go`
- Create new file `convert_role.go` for migrating legacy roles to new model
- Implement `ConvertLegacyRoles` function to handle role conversion with permission scopes
- Add `convert_role.go` patch to `all.go` under version `v3.46.0`

* feat(role/auth): Add role retrieval by user ID and update path prefixes

- Add `GetRolesByUserID` function for efficient role retrieval by user ID
- Implement `UpdateUserBasePathPrefix` to update user base paths
- Modify `UpdateRolePermissionsPathPrefix` to return modified role IDs
- Update `auth.go` middleware to use the new role retrieval function
- Refresh role and user caches upon path prefix updates to maintain consistency

---------

Co-authored-by: Leslie-Xy <540049476@qq.com>
2025-07-26 09:51:59 +08:00
Sakana
5e15a360b7 feat(github_releases): concurrently request the GitHub API (#9211) 2025-07-24 15:30:12 +08:00
alist666
2bdc5bef9e Merge pull request #9207 from AlistGo/fix-aliyundirve
fix: update DriveId assignment to use DeviceID from Addition struct
2025-07-17 13:21:32 +08:00
AlistDev
13ea1c1405 fix: restore user-agent header in HTTP requests 2025-07-16 20:39:05 +08:00
AlistDev
fd41186679 fix: update DriveId assignment to use DeviceID from Addition struct 2025-07-14 23:04:40 +08:00
alist666
9da56bab4d Merge pull request #9171 from AlistGo/fix-189pc-login
fix: update documentation links to point to the new domain And fix 189pc getToken fail
2025-06-28 00:20:50 +08:00
alistgo
51eeb22465 fix: dead link 2025-06-27 23:58:52 +08:00
Alone
b1586612ca feat: add ghcr docker image (#8524) 2025-06-27 23:39:23 +08:00
AlistDev
7aeb0ab078 fix: update documentation links to point to the new domain And fix 189pc getToken fail 2025-06-27 16:28:09 +08:00
MadDogOwner
ffa03bfda1 feat(cloudreve_v4): add Cloudreve V4 driver (#8470 closes #8328 #8467)
* feat(cloudreve_v4): add Cloudreve V4 driver implementation

* fix(cloudreve_v4): update request handling to prevent token refresh loop

* feat(onedrive): implement retry logic for upload failures

* feat(cloudreve): implement retry logic for upload failures

* feat(cloudreve_v4): support cloud sorting

* fix(cloudreve_v4): improve token handling in Init method

* feat(cloudreve_v4): support share

* feat(cloudreve): support reference

* feat(cloudreve_v4): support version upload

* fix(cloudreve_v4): add SetBody in upLocal

* fix(cloudreve_v4): update URL structure in Link and FileUrlResp
2025-05-24 13:38:43 +08:00
Andy Hsu
630cf30af5 feat(115_open): implement rate limiting for API requests 2025-05-11 13:39:32 +08:00
Andy Hsu
bc5117fa4f fix(115_open): add delay in MakeDir function to handle rate limiting 2025-05-02 16:53:39 +08:00
yoclo
11e7284824 fix: prevent guest user from updating profile (#8447) 2025-04-29 23:14:16 +08:00
MadDogOwner
b2b91a9281 feat(doubao): add get_download_info API and download_api option (#8428) 2025-04-27 20:00:25 +08:00
MadDogOwner
f541489d7d fix(netease_music): change ListResp size fields from string to int64 (#8417) 2025-04-27 19:59:30 +08:00
bigQY
6d9c554f6f feat: add UseLargeThumbnail for 139 (#8424) 2025-04-27 19:58:45 +08:00
Mmx
e532ab31ef fix: remove auth middleware for authn login (#8407) 2025-04-27 19:58:09 +08:00
Mmx
bf0705ec17 fix: shebang of entrypoint.sh (#8408) 2025-04-27 19:56:34 +08:00
gdm257
17b42b9fa4 fix(mega): use newest file for same filename (#8422 close #8344)
Mega supports duplicate names but alist does not support.
In `List()` method, driver will return multiple files with same name.
That makes alist to use oldest version file for listing/downloading.
So it is necessary to filter old same name files in a folder.
After fixes, all CRUD work normally.

Refs #8344
2025-04-27 19:56:04 +08:00
Sam- Pan(潘绍森)
41bdab49aa fix(139): incorrect host (#8368)
* fix: correct new personal cloud path for 139Driver

* Update drivers/139/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix bug

---------

Co-authored-by: panshaosen <19802021493@139.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: j2rong4cn <253551464@qq.com>
2025-04-19 14:29:12 +08:00
Lin Tianchuan
8f89c55aca perf(local): avoid duplicate parsing of VideoThumbPos (#7812)
* feat(local): support percent for video thumbnail

The percentage determines the point in the video (as a percentage of the total duration) at which the thumbnail will be generated.

* feat(local): support both time and percent for video thumbnail

* refactor(local): avoid duplicate parsing of VideoThumbPos
2025-04-19 14:27:13 +08:00
wxnq
b449312da8 fix(docker_release): avoid duplicate occupation in docker image (#8393 close #8388)
* fix(ci): modify the method of adding permissions

* fix(build): modify the method of adding permissions(to keep up with ci)
2025-04-19 14:26:19 +08:00
MadDogOwner
52d4e8ec47 fix(lanzou): remove JavaScript comments from response data (#8386)
* feat(lanzou): add RemoveJSComment function to clean JavaScript comments from HTML

* feat(lanzou): remove comments from share page data in getFilesByShareUrl function

* fix(lanzou): optimize RemoveJSComment function to improve comment removal logic
2025-04-19 14:24:43 +08:00
New Future
28e5b5759e feat(azure_blob): implement GetRootId interface in Addition struct (#8389)
fix failed get dir
2025-04-19 14:23:48 +08:00
asdfghjkl
477c43971f feat(doubao_share): support doubao_share link (#8376)
Co-authored-by: anobodys <anobodys@gmail.com>
2025-04-19 14:22:43 +08:00
Yifan Gao
0a9921fa79 fix(aliyundrive_open): resolve file duplication issues and improve path handling (#8358)
* fix(aliyundrive_open): resolve file duplication issues and improve path handling

1. Fix file duplication by implementing a new removeDuplicateFiles method that cleans up duplicate files after operations
2. Change Move operation to use "ignore" for check_name_mode instead of "refuse" to allow moves when destination has same filename
3. Set Copy operation to handle duplicates by removing them after successful copy
4. Improve path handling for all file operations (Move, Rename, Put, MakeDir) by properly maintaining the full path of objects
5. Implement GetRoot interface for proper root object initialization with correct path
6. Add proper path management in List operation to ensure objects have correct paths
7. Fix path handling in error cases and improve logging of failures

* refactor(aliyundrive_open): change error logging to warnings for duplicate file removal

Updated the Move, Rename, and Copy methods to log warnings instead of errors when duplicate file removal fails, as the primary operations have already completed successfully. This improves the clarity of logs without affecting the functionality.

* Update drivers/aliyundrive_open/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-04-19 14:22:12 +08:00
Lee CQ
88abb323cb feat(url-tree): implement the Put interface to support adding links directly to the UrlTree on the web side (#8312)
* feat(url-tree)支持PUT

* feat(url-tree) UrlTree更新时,需要将路径和内容分割 #8303

* fix: stdpath.Join call

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Andy Hsu <i@nn.ci>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-04-12 17:27:56 +08:00
asdfghjkl
f0b1aeaf8d feat(doubao): support upload (#8302 close #8335)
* feat(doubao): support upload

* fix(doubao): fix file list cursor

* fix: handle strconv.Atoi err

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: anobodys <anobodys@gmail.com>
Co-authored-by: Andy Hsu <i@nn.ci>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-04-12 17:12:40 +08:00
Yifan Gao
c8470b9a2a fix(fs): remove old target object from cache before updating (#8352) 2025-04-12 17:09:46 +08:00
Dgs
d0ee90cd11 fix(thunder): fix login issue (#8342 close #8288) 2025-04-12 17:05:58 +08:00
Dgs
544a7ea022 fix(pikpak&pikpak_share): fix WebPackageName (#8305) 2025-04-12 17:03:58 +08:00
j2rong4cn
4f5cabc725 feat: add h2c for http server (#8294)
* feat: add h2c for http server

* chore(config): add EnableH2c option
2025-04-12 17:02:51 +08:00
j2rong4cn
a2f266277c fix(net): unexpected write (#8291 close #8281) 2025-04-12 17:01:52 +08:00
jerry
a4bfbf8a83 fix(ipfs): fix problems (#8252)
* fix: 🐛 (ipfs): fix the list error caused by not proper join path function

使用更加规范的路径拼接,修复了有中文或符号的路径无法正常访问的问题

* refactor: 命名规范

* 删除多余的条件判断

* fix: 使用withresult方法重构代码,添加get方法,提高性能

* fix: 允许get方法获取目录

去除多余的判断

* fix: 允许copy,rename,move进行覆写

* fix: 修复move方法导致的目录被删除

* refactor: 整理关于返回Path的代码

* fix: 修复由于get方法导致的ipfs路径无法访问

* fix: 修复path处理错误的get方法

修复get方法,删除意外加入的目录

* fix: fix path join

use path join instead of filepath join to avoid os problem

* fix: rm filepath ref

---------

Co-authored-by: Andy Hsu <i@nn.ci>
2025-04-12 17:01:30 +08:00
j2rong4cn
ddffacf07b perf: optimize IO read/write usage (#8243)
* perf: optimize IO read/write usage

* .

* Update drivers/139/driver.go

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Co-authored-by: MadDogOwner <xiaoran@xrgzs.top>
2025-04-12 16:55:31 +08:00
xiaoQQya
3375c26c41 perf(quark_uc&quark_uc_tv): native proxy multithreading (#8287)
* perf(quark_uc): native proxy multithreading

* perf(quark_uc_tv): native proxy multithreading

* chore(fs): file query result add id
2025-04-03 20:50:29 +08:00
asdfghjkl
ab68faef44 fix(baidu_netdisk): add another video crack api (#8275)
Co-authored-by: anobodys <anobodys@gmail.com>
2025-04-03 20:44:49 +08:00
New Future
2e21df0661 feat(driver): add Azure Blob Storage driver (#8261)
* add azure-blob driver

* fix nested folders copy

* feat(driver): add Azure Blob Storage driver

实现 Azure Blob Storage 驱动,支持以下功能:
- 使用共享密钥身份验证初始化连接
- 列出目录和文件
- 生成临时 SAS URL 进行文件访问
- 创建目录
- 移动和重命名文件/文件夹
- 复制文件/文件夹
- 删除文件/文件夹
- 上传文件并支持进度跟踪

此驱动允许用户通过 AList 平台无缝访问和管理 Azure Blob Storage 中的数据。

* feat(driver): update help doc for Azure Blob

* doc(readme): add new driver

* Update drivers/azure_blob/driver.go

fix(azure): fix name check

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update README.md

doc(readme): fix the link

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix(azure): fix log and link

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-04-03 20:43:21 +08:00
MadDogOwner
af18cb138b feat(139): add option ReportRealSize (#8244 close #8141)
* feat(139): handle family upload errors

* feat(139): add option `ReportRealSize`

* Update drivers/139/driver.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-04-03 20:41:59 +08:00
j2rong4cn
31c55a2adf fix(archive): unable to preview (#8248)
* fix(archive): unable to preview

* fix bug
2025-04-03 20:41:05 +08:00
MadDogOwner
465dd1703d feat(cloudreve): s3 policy support (#8245)
* feat(cloudreve): s3 policy support

* fix(cloudreve): correct potential off-by-one error in `etags` initialization
2025-04-03 20:40:19 +08:00
j2rong4cn
a6304285b6 fix: revert "refactor(net): pass request header" (#8269)
5be50e77d9
2025-04-03 20:35:52 +08:00
YangXu
affd0cecd1 fix(pikpak&pikpak_share): update algorithms (#8278) 2025-04-03 20:35:14 +08:00
MadDogOwner
37640221c0 fix(doubao): update file size type to int64 (#8289) 2025-04-03 20:34:27 +08:00
Andy Hsu
e4bd223d1c fix(deps): update 115-sdk-go to v0.1.5 2025-04-03 20:29:53 +08:00
174 changed files with 9280 additions and 1098 deletions

2
.github/FUNDING.yml vendored
View File

@@ -10,4 +10,4 @@ liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: ['https://alist.nn.ci/guide/sponsor.html']
custom: ['https://alistgo.com/guide/sponsor.html']

View File

@@ -16,14 +16,14 @@ body:
您必须勾选以下所有内容否则您的issue可能会被直接关闭。或者您可以去[讨论区](https://github.com/alist-org/alist/discussions)
options:
- label: |
I have read the [documentation](https://alist.nn.ci).
我已经阅读了[文档](https://alist.nn.ci)。
I have read the [documentation](https://alistgo.com).
我已经阅读了[文档](https://alistgo.com)。
- label: |
I'm sure there are no duplicate issues or discussions.
我确定没有重复的issue或讨论。
- label: |
I'm sure it's due to `AList` and not something else(such as [Network](https://alist.nn.ci/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alist.nn.ci/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host)`依赖`或`操作`)。
I'm sure it's due to `AList` and not something else(such as [Network](https://alistgo.com/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alistgo.com/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host)`依赖`或`操作`)。
- label: |
I'm sure this issue is not fixed in the latest version.
我确定这个问题在最新版本中没有被修复。

View File

@@ -7,7 +7,7 @@ body:
label: Please make sure of the following things
description: You may select more than one, even select all.
options:
- label: I have read the [documentation](https://alist.nn.ci).
- label: I have read the [documentation](https://alistgo.com).
- label: I'm sure there are no duplicate issues or discussions.
- label: I'm sure this feature is not implemented.
- label: I'm sure it's a reasonable and popular requirement.

View File

@@ -119,7 +119,7 @@ jobs:
- name: Checkout repo
uses: actions/checkout@v4
with:
repository: alist-org/desktop-release
repository: AlistGo/desktop-release
ref: main
persist-credentials: false
fetch-depth: 0
@@ -135,4 +135,4 @@ jobs:
with:
github_token: ${{ secrets.MY_TOKEN }}
branch: main
repository: alist-org/desktop-release
repository: AlistGo/desktop-release

View File

@@ -72,7 +72,7 @@ jobs:
- name: Checkout repo
uses: actions/checkout@v4
with:
repository: alist-org/desktop-release
repository: AlistGo/desktop-release
ref: main
persist-credentials: false
fetch-depth: 0
@@ -89,4 +89,4 @@ jobs:
with:
github_token: ${{ secrets.MY_TOKEN }}
branch: main
repository: alist-org/desktop-release
repository: AlistGo/desktop-release

View File

@@ -18,6 +18,7 @@ env:
REGISTRY: 'xhofe/alist'
REGISTRY_USERNAME: 'xhofe'
REGISTRY_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
GITHUB_CR_REPO: ghcr.io/${{ github.repository }}
ARTIFACT_NAME: 'binaries_docker_release'
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
IMAGE_PUSH: ${{ github.event_name == 'push' }}
@@ -114,11 +115,21 @@ jobs:
username: ${{ env.REGISTRY_USERNAME }}
password: ${{ env.REGISTRY_PASSWORD }}
- name: Login to GHCR
uses: docker/login-action@v3
with:
logout: true
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}
images: |
${{ env.REGISTRY }}
${{ env.GITHUB_CR_REPO }}
tags: ${{ env.IMAGE_IS_PROD == 'true' && '' || env.IMAGE_TAGS_BETA }}
flavor: |
${{ env.IMAGE_IS_PROD == 'true' && 'latest=true' || '' }}

View File

@@ -32,10 +32,9 @@ RUN apk update && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
COPY --from=builder /app/bin/alist ./
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /opt/alist/alist && \
chmod +x /entrypoint.sh && /entrypoint.sh version
COPY --chmod=755 --from=builder /app/bin/alist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh
RUN /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/alist/data/

View File

@@ -1,4 +1,4 @@
FROM alpine:edge
FROM alpine:3.20.7
ARG TARGETPLATFORM
ARG INSTALL_FFMPEG=false
@@ -24,12 +24,11 @@ RUN apk update && \
/opt/aria2/.aria2/tracker.sh ; \
rm -rf /var/cache/apk/*
COPY /build/${TARGETPLATFORM}/alist ./
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /opt/alist/alist && \
chmod +x /entrypoint.sh && /entrypoint.sh version
COPY --chmod=755 /build/${TARGETPLATFORM}/alist ./
COPY --chmod=755 entrypoint.sh /entrypoint.sh
RUN /entrypoint.sh version
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
VOLUME /opt/alist/data/
EXPOSE 5244 5245
CMD [ "/entrypoint.sh" ]
CMD [ "/entrypoint.sh" ]

View File

@@ -1,5 +1,5 @@
<div align="center">
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<p><em>🗂A file list program that supports multiple storages, powered by Gin and Solidjs.</em></p>
<div>
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
@@ -31,7 +31,7 @@
<a href="https://hub.docker.com/r/xhofe/alist">
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
</a>
<a href="https://alist.nn.ci/guide/sponsor.html">
<a href="https://alistgo.com/guide/sponsor.html">
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
</a>
</div>
@@ -77,6 +77,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
- [x] [Dropbox](https://www.dropbox.com/)
- [x] [FeijiPan](https://www.feijipan.com/)
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
- [x] Easy to deploy and out-of-the-box
- [x] File preview (PDF, markdown, code, plain text, ...)
- [x] Image preview in gallery mode
@@ -87,7 +88,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
- [x] Dark mode
- [x] I18n
- [x] Protected routes (password protection and authentication)
- [x] WebDav (see https://alist.nn.ci/guide/webdav.html for details)
- [x] WebDav (see https://alistgo.com/guide/webdav.html for details)
- [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist)
- [x] Cloudflare Workers proxy
- [x] File/Folder package download
@@ -100,6 +101,10 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
<https://alistgo.com/>
## API Documentation (via Apifox):
<https://alist-public.apifox.cn/>
## Demo
<https://al.nn.ci>
@@ -111,7 +116,7 @@ Please go to our [discussion forum](https://github.com/alist-org/alist/discussio
## Sponsor
AList is an open-source software, if you happen to like this project and want me to keep going, please consider sponsoring me or providing a single donation! Thanks for all the love and support:
https://alist.nn.ci/guide/sponsor.html
https://alistgo.com/guide/sponsor.html
### Special sponsors

View File

@@ -1,5 +1,5 @@
<div align="center">
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<p><em>🗂一个支持多存储的文件列表程序,使用 Gin 和 Solidjs。</em></p>
<div>
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
@@ -31,7 +31,7 @@
<a href="https://hub.docker.com/r/xhofe/alist">
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
</a>
<a href="https://alist.nn.ci/zh/guide/sponsor.html">
<a href="https://alistgo.com/zh/guide/sponsor.html">
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
</a>
</div>
@@ -86,7 +86,7 @@
- [x] 黑暗模式
- [x] 国际化
- [x] 受保护的路由(密码保护和身份验证)
- [x] WebDav (具体见 https://alist.nn.ci/zh/guide/webdav.html)
- [x] WebDav (具体见 https://alistgo.com/zh/guide/webdav.html)
- [x] [Docker 部署](https://hub.docker.com/r/xhofe/alist)
- [x] Cloudflare workers 中转
- [x] 文件/文件夹打包下载
@@ -97,7 +97,11 @@
## 文档
<https://alist.nn.ci/zh/>
<https://alistgo.com/zh/>
## API 文档(通过 Apifox 提供)
<https://alist-public.apifox.cn/>
## Demo
@@ -109,7 +113,7 @@
## 赞助
AList 是一个开源软件如果你碰巧喜欢这个项目并希望我继续下去请考虑赞助我或提供一个单一的捐款感谢所有的爱和支持https://alist.nn.ci/zh/guide/sponsor.html
AList 是一个开源软件如果你碰巧喜欢这个项目并希望我继续下去请考虑赞助我或提供一个单一的捐款感谢所有的爱和支持https://alistgo.com/zh/guide/sponsor.html
### 特别赞助

View File

@@ -1,5 +1,5 @@
<div align="center">
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<a href="https://alistgo.com"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<p><em>🗂Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
<div>
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
@@ -31,7 +31,7 @@
<a href="https://hub.docker.com/r/xhofe/alist">
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
</a>
<a href="https://alist.nn.ci/guide/sponsor.html">
<a href="https://alistgo.com/guide/sponsor.html">
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
</a>
</div>
@@ -87,7 +87,7 @@
- [x] ダークモード
- [x] 国際化
- [x] 保護されたルート (パスワード保護と認証)
- [x] WebDav (詳細は https://alist.nn.ci/guide/webdav.html を参照)
- [x] WebDav (詳細は https://alistgo.com/guide/webdav.html を参照)
- [x] [Docker デプロイ](https://hub.docker.com/r/xhofe/alist)
- [x] Cloudflare ワーカープロキシ
- [x] ファイル/フォルダパッケージのダウンロード
@@ -98,7 +98,11 @@
## ドキュメント
<https://alist.nn.ci/>
<https://alistgo.com/>
## APIドキュメントApifox 提供)
<https://alist-public.apifox.cn/>
## デモ
@@ -111,7 +115,7 @@
## スポンサー
AList はオープンソースのソフトウェアです。もしあなたがこのプロジェクトを気に入ってくださり、続けて欲しいと思ってくださるなら、ぜひスポンサーになってくださるか、1口でも寄付をしてくださるようご検討くださいすべての愛とサポートに感謝します:
https://alist.nn.ci/guide/sponsor.html
https://alistgo.com/guide/sponsor.html
### スペシャルスポンサー

View File

@@ -93,7 +93,7 @@ BuildDocker() {
PrepareBuildDockerMusl() {
mkdir -p build/musl-libs
BASE="https://musl.cc/"
BASE="https://github.com/go-cross/musl-toolchain-archive/releases/latest/download/"
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz"
@@ -245,7 +245,7 @@ BuildReleaseFreeBSD() {
cgo_cc="clang --target=${CGO_ARGS[$i]} --sysroot=/opt/freebsd/${os_arch}"
echo building for freebsd-${os_arch}
sudo mkdir -p "/opt/freebsd/${os_arch}"
wget -q https://download.freebsd.org/releases/${os_arch}/14.1-RELEASE/base.txz
wget -q https://download.freebsd.org/releases/${os_arch}/14.3-RELEASE/base.txz
sudo tar -xf ./base.txz -C /opt/freebsd/${os_arch}
rm base.txz
export GOOS=freebsd

View File

@@ -1,6 +1,7 @@
package cmd
import (
"github.com/alist-org/alist/v3/internal/bootstrap/patch/v3_46_0"
"os"
"path/filepath"
"strconv"
@@ -16,6 +17,12 @@ func Init() {
bootstrap.InitConfig()
bootstrap.Log()
bootstrap.InitDB()
if v3_46_0.IsLegacyRoleDetected() {
utils.Log.Warnf("Detected legacy role format, executing ConvertLegacyRoles patch early...")
v3_46_0.ConvertLegacyRoles()
}
data.InitData()
bootstrap.InitStreamLimit()
bootstrap.InitIndex()

View File

@@ -16,7 +16,7 @@ var RootCmd = &cobra.Command{
Short: "A file list program that supports multiple storage.",
Long: `A file list program that supports multiple storage,
built with love by Xhofe and friends in Go/Solid.js.
Complete documentation is available at https://alist.nn.ci/`,
Complete documentation is available at https://alistgo.com/`,
}
func Execute() {

View File

@@ -4,9 +4,6 @@ import (
"context"
"errors"
"fmt"
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
"github.com/KirCute/sftpd-alist"
"github.com/alist-org/alist/v3/internal/fs"
"net"
"net/http"
"os"
@@ -16,14 +13,19 @@ import (
"syscall"
"time"
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
"github.com/KirCute/sftpd-alist"
"github.com/alist-org/alist/v3/cmd/flags"
"github.com/alist-org/alist/v3/internal/bootstrap"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
)
// ServerCmd represents the server command
@@ -47,11 +49,15 @@ the address is defined in config file`,
r := gin.New()
r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
server.Init(r)
var httpHandler http.Handler = r
if conf.Conf.Scheme.EnableH2c {
httpHandler = h2c.NewHandler(r, &http2.Server{})
}
var httpSrv, httpsSrv, unixSrv *http.Server
if conf.Conf.Scheme.HttpPort != -1 {
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
utils.Log.Infof("start HTTP server @ %s", httpBase)
httpSrv = &http.Server{Addr: httpBase, Handler: r}
httpSrv = &http.Server{Addr: httpBase, Handler: httpHandler}
go func() {
err := httpSrv.ListenAndServe()
if err != nil && !errors.Is(err, http.ErrServerClosed) {
@@ -72,7 +78,7 @@ the address is defined in config file`,
}
if conf.Conf.Scheme.UnixFile != "" {
utils.Log.Infof("start unix server @ %s", conf.Conf.Scheme.UnixFile)
unixSrv = &http.Server{Handler: r}
unixSrv = &http.Server{Handler: httpHandler}
go func() {
listener, err := net.Listen("unix", conf.Conf.Scheme.UnixFile)
if err != nil {

View File

@@ -405,7 +405,7 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
continue
}
if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf)),
if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf)),
chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
break
}

View File

@@ -16,12 +16,14 @@ import (
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
sdk "github.com/xhofe/115-sdk-go"
"golang.org/x/time/rate"
)
type Open115 struct {
model.Storage
Addition
client *sdk.Client
client *sdk.Client
limiter *rate.Limiter
}
func (d *Open115) Config() driver.Config {
@@ -47,6 +49,16 @@ func (d *Open115) Init(ctx context.Context) error {
if err != nil {
return err
}
if d.Addition.LimitRate > 0 {
d.limiter = rate.NewLimiter(rate.Limit(d.Addition.LimitRate), 1)
}
return nil
}
func (d *Open115) WaitLimit(ctx context.Context) error {
if d.limiter != nil {
return d.limiter.Wait(ctx)
}
return nil
}
@@ -59,6 +71,9 @@ func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs)
pageSize := int64(200)
offset := int64(0)
for {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.GetFiles(ctx, &sdk.GetFilesReq{
CID: dir.GetID(),
Limit: pageSize,
@@ -84,6 +99,9 @@ func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs)
}
func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
var ua string
if args.Header != nil {
ua = args.Header.Get("User-Agent")
@@ -113,6 +131,9 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.Mkdir(ctx, parentDir.GetID(), dirName)
if err != nil {
return nil, err
@@ -129,6 +150,9 @@ func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri
}
func (d *Open115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
_, err := d.client.Move(ctx, &sdk.MoveReq{
FileIDs: srcObj.GetID(),
ToCid: dstDir.GetID(),
@@ -140,6 +164,9 @@ func (d *Open115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj
}
func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
_, err := d.client.UpdateFile(ctx, &sdk.UpdateFileReq{
FileID: srcObj.GetID(),
FileNma: newName,
@@ -155,6 +182,9 @@ func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string)
}
func (d *Open115) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
_, err := d.client.Copy(ctx, &sdk.CopyReq{
PID: dstDir.GetID(),
FileID: srcObj.GetID(),
@@ -167,6 +197,9 @@ func (d *Open115) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj
}
func (d *Open115) Remove(ctx context.Context, obj model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
_obj, ok := obj.(*Obj)
if !ok {
return fmt.Errorf("can't convert obj")
@@ -182,6 +215,9 @@ func (d *Open115) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
tempF, err := file.CacheFullInTempFile()
if err != nil {
return err

View File

@@ -9,9 +9,10 @@ type Addition struct {
// Usually one of two
driver.RootID
// define other
RefreshToken string `json:"refresh_token" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
RefreshToken string `json:"refresh_token" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
LimitRate float64 `json:"limit_rate" type:"float" default:"1" help:"limit all api request rate ([limit]r/1s)"`
AccessToken string
}

View File

@@ -2,11 +2,8 @@ package _123
import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"net/url"
"sync"
@@ -18,6 +15,7 @@ import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
@@ -187,25 +185,12 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error {
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
etag := file.GetHash().GetHash(utils.MD5)
var err error
if len(etag) < utils.MD5.Width {
// const DEFAULT int64 = 10485760
h := md5.New()
// need to calculate md5 of the full content
tempFile, err := file.CacheFullInTempFile()
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
if err != nil {
return err
}
defer func() {
_ = tempFile.Close()
}()
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
return err
}
_, err = tempFile.Seek(0, io.SeekStart)
if err != nil {
return err
}
etag = hex.EncodeToString(h.Sum(nil))
}
data := base.Json{
"driveId": 0,

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
"math"
"net/http"
"strconv"
@@ -70,27 +69,33 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
}
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
chunkSize := int64(1024 * 1024 * 16)
tmpF, err := file.CacheFullInTempFile()
if err != nil {
return err
}
// fetch s3 pre signed urls
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
size := file.GetSize()
chunkSize := min(size, 16*utils.MB)
chunkCount := int(size / chunkSize)
lastChunkSize := size % chunkSize
if lastChunkSize > 0 {
chunkCount++
} else {
lastChunkSize = chunkSize
}
// only 1 batch is allowed
isMultipart := chunkCount > 1
batchSize := 1
getS3UploadUrl := d.getS3Auth
if isMultipart {
if chunkCount > 1 {
batchSize = 10
getS3UploadUrl = d.getS3PreSignedUrls
}
limited := driver.NewLimitedUploadStream(ctx, file)
for i := 1; i <= chunkCount; i += batchSize {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
start := i
end := i + batchSize
if end > chunkCount+1 {
end = chunkCount + 1
}
end := min(i+batchSize, chunkCount+1)
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
if err != nil {
return err
@@ -102,9 +107,9 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
}
curSize := chunkSize
if j == chunkCount {
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
curSize = lastChunkSize
}
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(limited, chunkSize), curSize, false, getS3UploadUrl)
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.NewSectionReader(tmpF, chunkSize*int64(j-1), curSize), curSize, false, getS3UploadUrl)
if err != nil {
return err
}
@@ -115,12 +120,12 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
return d.completeS3(ctx, upReq, file, chunkCount > 1)
}
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader *io.SectionReader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
if uploadUrl == "" {
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
}
req, err := http.NewRequest("PUT", uploadUrl, reader)
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, reader))
if err != nil {
return err
}
@@ -143,6 +148,7 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign
}
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
// retry
reader.Seek(0, io.SeekStart)
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
}
if res.StatusCode != http.StatusOK {

View File

@@ -161,12 +161,12 @@ func (d *Pan123) login() error {
}
res, err := base.RestyClient.R().
SetHeaders(map[string]string{
"origin": "https://www.123pan.com",
"referer": "https://www.123pan.com/",
"user-agent": "Dart/2.19(dart:io)-alist",
"origin": "https://www.123pan.com",
"referer": "https://www.123pan.com/",
//"user-agent": "Dart/2.19(dart:io)-alist",
"platform": "web",
"app-version": "3",
//"user-agent": base.UserAgent,
"user-agent": base.UserAgent,
}).
SetBody(body).Post(SignIn)
if err != nil {
@@ -202,7 +202,7 @@ do:
"origin": "https://www.123pan.com",
"referer": "https://www.123pan.com/",
"authorization": "Bearer " + d.AccessToken,
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) alist-client",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)",
"platform": "web",
"app-version": "3",
//"user-agent": base.UserAgent,

191
drivers/123_open/api.go Normal file
View File

@@ -0,0 +1,191 @@
package _123Open
import (
"fmt"
"github.com/go-resty/resty/v2"
"net/http"
)
const (
// baseurl
ApiBaseURL = "https://open-api.123pan.com"
// auth
ApiToken = "/api/v1/access_token"
// file list
ApiFileList = "/api/v2/file/list"
// direct link
ApiGetDirectLink = "/api/v1/direct-link/url"
// mkdir
ApiMakeDir = "/upload/v1/file/mkdir"
// remove
ApiRemove = "/api/v1/file/trash"
// upload
ApiUploadDomainURL = "/upload/v2/file/domain"
ApiSingleUploadURL = "/upload/v2/file/single/create"
ApiCreateUploadURL = "/upload/v2/file/create"
ApiUploadSliceURL = "/upload/v2/file/slice"
ApiUploadCompleteURL = "/upload/v2/file/upload_complete"
// move
ApiMove = "/api/v1/file/move"
// rename
ApiRename = "/api/v1/file/name"
)
type Response[T any] struct {
Code int `json:"code"`
Message string `json:"message"`
Data T `json:"data"`
}
type TokenResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data TokenData `json:"data"`
}
type TokenData struct {
AccessToken string `json:"accessToken"`
ExpiredAt string `json:"expiredAt"`
}
type FileListResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data FileListData `json:"data"`
}
type FileListData struct {
LastFileId int64 `json:"lastFileId"`
FileList []File `json:"fileList"`
}
type DirectLinkResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data DirectLinkData `json:"data"`
}
type DirectLinkData struct {
URL string `json:"url"`
}
type MakeDirRequest struct {
Name string `json:"name"`
ParentID int64 `json:"parentID"`
}
type MakeDirResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data MakeDirData `json:"data"`
}
type MakeDirData struct {
DirID int64 `json:"dirID"`
}
type RemoveRequest struct {
FileIDs []int64 `json:"fileIDs"`
}
type UploadCreateResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data UploadCreateData `json:"data"`
}
type UploadCreateData struct {
FileID int64 `json:"fileId"`
Reuse bool `json:"reuse"`
PreuploadID string `json:"preuploadId"`
SliceSize int64 `json:"sliceSize"`
Servers []string `json:"servers"`
}
type UploadUrlResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data UploadUrlData `json:"data"`
}
type UploadUrlData struct {
PresignedURL string `json:"presignedUrl"`
}
type UploadCompleteResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data UploadCompleteData `json:"data"`
}
type UploadCompleteData struct {
FileID int `json:"fileID"`
Completed bool `json:"completed"`
}
func (d *Open123) Request(endpoint string, method string, setup func(*resty.Request), result any) (*resty.Response, error) {
client := resty.New()
token, err := d.tm.getToken()
if err != nil {
return nil, err
}
req := client.R().
SetHeader("Authorization", "Bearer "+token).
SetHeader("Platform", "open_platform").
SetHeader("Content-Type", "application/json").
SetResult(result)
if setup != nil {
setup(req)
}
switch method {
case http.MethodGet:
return req.Get(ApiBaseURL + endpoint)
case http.MethodPost:
return req.Post(ApiBaseURL + endpoint)
case http.MethodPut:
return req.Put(ApiBaseURL + endpoint)
default:
return nil, fmt.Errorf("unsupported method: %s", method)
}
}
func (d *Open123) RequestTo(fullURL string, method string, setup func(*resty.Request), result any) (*resty.Response, error) {
client := resty.New()
token, err := d.tm.getToken()
if err != nil {
return nil, err
}
req := client.R().
SetHeader("Authorization", "Bearer "+token).
SetHeader("Platform", "open_platform").
SetHeader("Content-Type", "application/json").
SetResult(result)
if setup != nil {
setup(req)
}
switch method {
case http.MethodGet:
return req.Get(fullURL)
case http.MethodPost:
return req.Post(fullURL)
case http.MethodPut:
return req.Put(fullURL)
default:
return nil, fmt.Errorf("unsupported method: %s", method)
}
}

277
drivers/123_open/driver.go Normal file
View File

@@ -0,0 +1,277 @@
package _123Open
import (
"context"
"fmt"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"net/http"
"strconv"
)
type Open123 struct {
model.Storage
Addition
UploadThread int
tm *tokenManager
}
func (d *Open123) Config() driver.Config {
return config
}
func (d *Open123) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Open123) Init(ctx context.Context) error {
d.tm = newTokenManager(d.ClientID, d.ClientSecret)
if _, err := d.tm.getToken(); err != nil {
return fmt.Errorf("token 初始化失败: %w", err)
}
return nil
}
func (d *Open123) Drop(ctx context.Context) error {
return nil
}
func (d *Open123) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
parentFileId, err := strconv.ParseInt(dir.GetID(), 10, 64)
if err != nil {
return nil, err
}
fileLastId := int64(0)
var results []File
for fileLastId != -1 {
files, err := d.getFiles(parentFileId, 100, fileLastId)
if err != nil {
return nil, err
}
for _, f := range files.Data.FileList {
if f.Trashed == 0 {
results = append(results, f)
}
}
fileLastId = files.Data.LastFileId
}
objs := make([]model.Obj, 0, len(results))
for _, f := range results {
objs = append(objs, f)
}
return objs, nil
}
func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if file.IsDir() {
return nil, errs.LinkIsDir
}
fileID := file.GetID()
var result DirectLinkResp
url := fmt.Sprintf("%s?fileID=%s", ApiGetDirectLink, fileID)
_, err := d.Request(url, http.MethodGet, nil, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("get link failed: %s", result.Message)
}
return &model.Link{
URL: result.Data.URL,
}, nil
}
func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
parentID, err := strconv.ParseInt(parentDir.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid parent ID: %w", err)
}
var result MakeDirResp
reqBody := MakeDirRequest{
Name: dirName,
ParentID: parentID,
}
_, err = d.Request(ApiMakeDir, http.MethodPost, func(r *resty.Request) {
r.SetBody(reqBody)
}, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("mkdir failed: %s", result.Message)
}
newDir := File{
FileId: result.Data.DirID,
FileName: dirName,
Type: 1,
ParentFileId: int(parentID),
Size: 0,
Trashed: 0,
}
return newDir, nil
}
func (d *Open123) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
srcID, err := strconv.ParseInt(srcObj.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid src file ID: %w", err)
}
dstID, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid dest dir ID: %w", err)
}
var result Response[any]
reqBody := map[string]interface{}{
"fileIDs": []int64{srcID},
"toParentFileID": dstID,
}
_, err = d.Request(ApiMove, http.MethodPost, func(r *resty.Request) {
r.SetBody(reqBody)
}, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("move failed: %s", result.Message)
}
files, err := d.getFiles(dstID, 100, 0)
if err != nil {
return nil, fmt.Errorf("move succeed but failed to get target dir: %w", err)
}
for _, f := range files.Data.FileList {
if f.FileId == srcID {
return f, nil
}
}
return nil, fmt.Errorf("move succeed but file not found in target dir")
}
func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
srcID, err := strconv.ParseInt(srcObj.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid file ID: %w", err)
}
var result Response[any]
reqBody := map[string]interface{}{
"fileId": srcID,
"fileName": newName,
}
_, err = d.Request(ApiRename, http.MethodPut, func(r *resty.Request) {
r.SetBody(reqBody)
}, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("rename failed: %s", result.Message)
}
parentID := 0
if file, ok := srcObj.(File); ok {
parentID = file.ParentFileId
}
files, err := d.getFiles(int64(parentID), 100, 0)
if err != nil {
return nil, fmt.Errorf("rename succeed but failed to get parent dir: %w", err)
}
for _, f := range files.Data.FileList {
if f.FileId == srcID {
return f, nil
}
}
return nil, fmt.Errorf("rename succeed but file not found in parent dir")
}
func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return nil, errs.NotSupport
}
func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
idStr := obj.GetID()
id, err := strconv.ParseInt(idStr, 10, 64)
if err != nil {
return fmt.Errorf("invalid file ID: %w", err)
}
var result Response[any]
reqBody := RemoveRequest{
FileIDs: []int64{id},
}
_, err = d.Request(ApiRemove, http.MethodPost, func(r *resty.Request) {
r.SetBody(reqBody)
}, &result)
if err != nil {
return err
}
if result.Code != 0 {
return fmt.Errorf("remove failed: %s", result.Message)
}
return nil
}
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
up = model.UpdateProgressWithRange(up, 50, 100)
_, etag, err = stream.CacheFullInTempFileAndHash(file, utils.MD5)
if err != nil {
return err
}
}
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
if err != nil {
return err
}
if createResp.Data.Reuse {
return nil
}
return d.Upload(ctx, file, parentFileId, createResp, up)
}
func (d *Open123) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
return nil, errs.NotSupport
}
func (d *Open123) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
return nil, errs.NotSupport
}
func (d *Open123) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
return nil, errs.NotSupport
}
func (d *Open123) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
return nil, errs.NotSupport
}
//func (d *Open123) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Open123)(nil)

33
drivers/123_open/meta.go Normal file
View File

@@ -0,0 +1,33 @@
package _123Open
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
driver.RootID
ClientID string `json:"client_id" required:"true" label:"Client ID"`
ClientSecret string `json:"client_secret" required:"true" label:"Client Secret"`
}
var config = driver.Config{
Name: "123 Open",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Open123{}
})
}

85
drivers/123_open/token.go Normal file
View File

@@ -0,0 +1,85 @@
package _123Open
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
)
const tokenURL = ApiBaseURL + ApiToken
type tokenManager struct {
clientID string
clientSecret string
mu sync.Mutex
accessToken string
expireTime time.Time
}
func newTokenManager(clientID, clientSecret string) *tokenManager {
return &tokenManager{
clientID: clientID,
clientSecret: clientSecret,
}
}
func (tm *tokenManager) getToken() (string, error) {
tm.mu.Lock()
defer tm.mu.Unlock()
if tm.accessToken != "" && time.Now().Before(tm.expireTime.Add(-5*time.Minute)) {
return tm.accessToken, nil
}
reqBody := map[string]string{
"clientID": tm.clientID,
"clientSecret": tm.clientSecret,
}
body, _ := json.Marshal(reqBody)
req, err := http.NewRequest("POST", tokenURL, bytes.NewBuffer(body))
if err != nil {
return "", err
}
req.Header.Set("Platform", "open_platform")
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
var result TokenResp
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return "", err
}
if result.Code != 0 {
return "", fmt.Errorf("get token failed: %s", result.Message)
}
tm.accessToken = result.Data.AccessToken
expireAt, err := time.Parse(time.RFC3339, result.Data.ExpiredAt)
if err != nil {
return "", fmt.Errorf("parse expire time failed: %w", err)
}
tm.expireTime = expireAt
return tm.accessToken, nil
}
func (tm *tokenManager) buildHeaders() (http.Header, error) {
token, err := tm.getToken()
if err != nil {
return nil, err
}
header := http.Header{}
header.Set("Authorization", "Bearer "+token)
header.Set("Platform", "open_platform")
header.Set("Content-Type", "application/json")
return header, nil
}

70
drivers/123_open/types.go Normal file
View File

@@ -0,0 +1,70 @@
package _123Open
import (
"fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"time"
)
type File struct {
FileName string `json:"filename"`
Size int64 `json:"size"`
CreateAt string `json:"createAt"`
UpdateAt string `json:"updateAt"`
FileId int64 `json:"fileId"`
Type int `json:"type"`
Etag string `json:"etag"`
S3KeyFlag string `json:"s3KeyFlag"`
ParentFileId int `json:"parentFileId"`
Category int `json:"category"`
Status int `json:"status"`
Trashed int `json:"trashed"`
}
func (f File) GetID() string {
return fmt.Sprint(f.FileId)
}
func (f File) GetName() string {
return f.FileName
}
func (f File) GetSize() int64 {
return f.Size
}
func (f File) IsDir() bool {
return f.Type == 1
}
func (f File) GetModified() string {
return f.UpdateAt
}
func (f File) GetThumb() string {
return ""
}
func (f File) ModTime() time.Time {
t, err := time.Parse("2006-01-02 15:04:05", f.UpdateAt)
if err != nil {
return time.Time{}
}
return t
}
func (f File) CreateTime() time.Time {
t, err := time.Parse("2006-01-02 15:04:05", f.CreateAt)
if err != nil {
return time.Time{}
}
return t
}
func (f File) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.MD5, f.Etag)
}
func (f File) GetPath() string {
return ""
}

282
drivers/123_open/upload.go Normal file
View File

@@ -0,0 +1,282 @@
package _123Open
import (
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"golang.org/x/sync/errgroup"
"io"
"mime/multipart"
"net/http"
"runtime"
"strconv"
"time"
)
func (d *Open123) create(parentFileID int64, filename, etag string, size int64, duplicate int, containDir bool) (*UploadCreateResp, error) {
var resp UploadCreateResp
_, err := d.Request(ApiCreateUploadURL, http.MethodPost, func(req *resty.Request) {
body := base.Json{
"parentFileID": parentFileID,
"filename": filename,
"etag": etag,
"size": size,
}
if duplicate > 0 {
body["duplicate"] = duplicate
}
if containDir {
body["containDir"] = true
}
req.SetBody(body)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) GetUploadDomains() ([]string, error) {
var resp struct {
Code int `json:"code"`
Message string `json:"message"`
Data []string `json:"data"`
}
_, err := d.Request(ApiUploadDomainURL, http.MethodGet, nil, &resp)
if err != nil {
return nil, err
}
if resp.Code != 0 {
return nil, fmt.Errorf("get upload domain failed: %s", resp.Message)
}
return resp.Data, nil
}
func (d *Open123) UploadSingle(ctx context.Context, createResp *UploadCreateResp, file model.FileStreamer, parentID int64) error {
domain := createResp.Data.Servers[0]
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
_, _, err := stream.CacheFullInTempFileAndHash(file, utils.MD5)
if err != nil {
return err
}
}
reader, err := file.RangeRead(http_range.Range{Start: 0, Length: file.GetSize()})
if err != nil {
return err
}
reader = driver.NewLimitedUploadStream(ctx, reader)
var b bytes.Buffer
mw := multipart.NewWriter(&b)
mw.WriteField("parentFileID", fmt.Sprint(parentID))
mw.WriteField("filename", file.GetName())
mw.WriteField("etag", etag)
mw.WriteField("size", fmt.Sprint(file.GetSize()))
fw, _ := mw.CreateFormFile("file", file.GetName())
_, err = io.Copy(fw, reader)
mw.Close()
req, err := http.NewRequestWithContext(ctx, "POST", domain+ApiSingleUploadURL, &b)
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+d.tm.accessToken)
req.Header.Set("Platform", "open_platform")
req.Header.Set("Content-Type", mw.FormDataContentType())
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
var result struct {
Code int `json:"code"`
Message string `json:"message"`
Data struct {
FileID int64 `json:"fileID"`
Completed bool `json:"completed"`
} `json:"data"`
}
body, _ := io.ReadAll(resp.Body)
if err := json.Unmarshal(body, &result); err != nil {
return fmt.Errorf("unmarshal response error: %v, body: %s", err, string(body))
}
if result.Code != 0 {
return fmt.Errorf("upload failed: %s", result.Message)
}
if !result.Data.Completed || result.Data.FileID == 0 {
return fmt.Errorf("upload incomplete or missing fileID")
}
return nil
}
func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, parentID int64, createResp *UploadCreateResp, up driver.UpdateProgress) error {
if cacher, ok := file.(interface{ CacheFullInTempFile() (model.File, error) }); ok {
if _, err := cacher.CacheFullInTempFile(); err != nil {
return err
}
}
size := file.GetSize()
chunkSize := createResp.Data.SliceSize
uploadNums := (size + chunkSize - 1) / chunkSize
uploadDomain := createResp.Data.Servers[0]
if d.UploadThread <= 0 {
cpuCores := runtime.NumCPU()
threads := cpuCores * 2
if threads < 4 {
threads = 4
}
if threads > 16 {
threads = 16
}
d.UploadThread = threads
fmt.Printf("[Upload] Auto set upload concurrency: %d (CPU cores=%d)\n", d.UploadThread, cpuCores)
}
fmt.Printf("[Upload] File size: %d bytes, chunk size: %d bytes, total slices: %d, concurrency: %d\n",
size, chunkSize, uploadNums, d.UploadThread)
if size <= 1<<30 {
return d.UploadSingle(ctx, createResp, file, parentID)
}
if createResp.Data.Reuse {
up(100)
return nil
}
client := resty.New()
semaphore := make(chan struct{}, d.UploadThread)
threadG, _ := errgroup.WithContext(ctx)
var progressArr = make([]int64, uploadNums)
for partIndex := int64(0); partIndex < uploadNums; partIndex++ {
partIndex := partIndex
semaphore <- struct{}{}
threadG.Go(func() error {
defer func() { <-semaphore }()
offset := partIndex * chunkSize
length := min(chunkSize, size-offset)
partNumber := partIndex + 1
fmt.Printf("[Slice %d] Starting read from offset %d, length %d\n", partNumber, offset, length)
reader, err := file.RangeRead(http_range.Range{Start: offset, Length: length})
if err != nil {
return fmt.Errorf("[Slice %d] RangeRead error: %v", partNumber, err)
}
buf := make([]byte, length)
n, err := io.ReadFull(reader, buf)
if err != nil && err != io.EOF {
return fmt.Errorf("[Slice %d] Read error: %v", partNumber, err)
}
buf = buf[:n]
hash := md5.Sum(buf)
sliceMD5Str := hex.EncodeToString(hash[:])
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
writer.WriteField("preuploadID", createResp.Data.PreuploadID)
writer.WriteField("sliceNo", strconv.FormatInt(partNumber, 10))
writer.WriteField("sliceMD5", sliceMD5Str)
partName := fmt.Sprintf("%s.part%d", file.GetName(), partNumber)
fw, _ := writer.CreateFormFile("slice", partName)
fw.Write(buf)
writer.Close()
resp, err := client.R().
SetHeader("Authorization", "Bearer "+d.tm.accessToken).
SetHeader("Platform", "open_platform").
SetHeader("Content-Type", writer.FormDataContentType()).
SetBody(body.Bytes()).
Post(uploadDomain + ApiUploadSliceURL)
if err != nil {
return fmt.Errorf("[Slice %d] Upload HTTP error: %v", partNumber, err)
}
if resp.StatusCode() != 200 {
return fmt.Errorf("[Slice %d] Upload failed with status: %s, resp: %s", partNumber, resp.Status(), resp.String())
}
progressArr[partIndex] = length
var totalUploaded int64 = 0
for _, v := range progressArr {
totalUploaded += v
}
if up != nil {
percent := float64(totalUploaded) / float64(size) * 100
up(percent)
}
fmt.Printf("[Slice %d] MD5: %s\n", partNumber, sliceMD5Str)
fmt.Printf("[Slice %d] Upload finished\n", partNumber)
return nil
})
}
if err := threadG.Wait(); err != nil {
return err
}
var completeResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data struct {
Completed bool `json:"completed"`
FileID int64 `json:"fileID"`
} `json:"data"`
}
for {
reqBody := fmt.Sprintf(`{"preuploadID":"%s"}`, createResp.Data.PreuploadID)
req, err := http.NewRequestWithContext(ctx, "POST", uploadDomain+ApiUploadCompleteURL, bytes.NewBufferString(reqBody))
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+d.tm.accessToken)
req.Header.Set("Platform", "open_platform")
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
if err := json.Unmarshal(body, &completeResp); err != nil {
return fmt.Errorf("completion response unmarshal error: %v, body: %s", err, string(body))
}
if completeResp.Code != 0 {
return fmt.Errorf("completion API returned error code %d: %s", completeResp.Code, completeResp.Message)
}
if completeResp.Data.Completed && completeResp.Data.FileID != 0 {
fmt.Printf("[Upload] Upload completed successfully. FileID: %d\n", completeResp.Data.FileID)
break
}
time.Sleep(time.Second)
}
up(100)
return nil
}

20
drivers/123_open/util.go Normal file
View File

@@ -0,0 +1,20 @@
package _123Open
import (
"fmt"
"net/http"
)
func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*FileListResp, error) {
var result FileListResp
url := fmt.Sprintf("%s?parentFileId=%d&limit=%d&lastFileId=%d", ApiFileList, parentFileId, limit, lastFileId)
_, err := d.Request(url, http.MethodGet, nil, &result)
if err != nil {
return nil, err
}
if result.Code != 0 {
return nil, fmt.Errorf("list error: %s", result.Message)
}
return &result, nil
}

View File

@@ -2,19 +2,19 @@ package _139
import (
"context"
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"net/http"
"path"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
streamPkg "github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/cron"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/pkg/utils/random"
@@ -24,9 +24,10 @@ import (
type Yun139 struct {
model.Storage
Addition
cron *cron.Cron
Account string
ref *Yun139
cron *cron.Cron
Account string
ref *Yun139
PersonalCloudHost string
}
func (d *Yun139) Config() driver.Config {
@@ -39,13 +40,36 @@ func (d *Yun139) GetAddition() driver.Additional {
func (d *Yun139) Init(ctx context.Context) error {
if d.ref == nil {
if d.Authorization == "" {
if len(d.Authorization) == 0 {
return fmt.Errorf("authorization is empty")
}
err := d.refreshToken()
if err != nil {
return err
}
// Query Route Policy
var resp QueryRoutePolicyResp
_, err = d.requestRoute(base.Json{
"userInfo": base.Json{
"userType": 1,
"accountType": 1,
"accountName": d.Account},
"modAddrType": 1,
}, &resp)
if err != nil {
return err
}
for _, policyItem := range resp.Data.RoutePolicyList {
if policyItem.ModName == "personal" {
d.PersonalCloudHost = policyItem.HttpsUrl
break
}
}
if len(d.PersonalCloudHost) == 0 {
return fmt.Errorf("PersonalCloudHost is empty")
}
d.cron = cron.NewCron(time.Hour * 12)
d.cron.Do(func() {
err := d.refreshToken()
@@ -71,28 +95,7 @@ func (d *Yun139) Init(ctx context.Context) error {
default:
return errs.NotImplement
}
if d.ref != nil {
return nil
}
decode, err := base64.StdEncoding.DecodeString(d.Authorization)
if err != nil {
return err
}
decodeStr := string(decode)
splits := strings.Split(decodeStr, ":")
if len(splits) < 2 {
return fmt.Errorf("authorization is invalid, splits < 2")
}
d.Account = splits[1]
_, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{
"qryUserExternInfoReq": base.Json{
"commonAccountInfo": base.Json{
"account": d.getAccount(),
"accountType": 1,
},
},
}, nil)
return err
return nil
}
func (d *Yun139) InitReference(storage driver.Driver) error {
@@ -159,7 +162,7 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
"type": "folder",
"fileRenameMode": "force_rename",
}
pathname := "/hcy/file/create"
pathname := "/file/create"
_, err = d.personalPost(pathname, data, nil)
case MetaPersonal:
data := base.Json{
@@ -212,7 +215,7 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
"fileIds": []string{srcObj.GetID()},
"toParentFileId": dstDir.GetID(),
}
pathname := "/hcy/file/batchMove"
pathname := "/file/batchMove"
_, err := d.personalPost(pathname, data, nil)
if err != nil {
return nil, err
@@ -289,7 +292,7 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e
"name": newName,
"description": "",
}
pathname := "/hcy/file/update"
pathname := "/file/update"
_, err = d.personalPost(pathname, data, nil)
case MetaPersonal:
var data base.Json
@@ -389,7 +392,7 @@ func (d *Yun139) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
"fileIds": []string{srcObj.GetID()},
"toParentFileId": dstDir.GetID(),
}
pathname := "/hcy/file/batchCopy"
pathname := "/file/batchCopy"
_, err := d.personalPost(pathname, data, nil)
return err
case MetaPersonal:
@@ -429,7 +432,7 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error {
data := base.Json{
"fileIds": []string{obj.GetID()},
}
pathname := "/hcy/recyclebin/batchTrash"
pathname := "/recyclebin/batchTrash"
_, err := d.personalPost(pathname, data, nil)
return err
case MetaGroup:
@@ -502,23 +505,15 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error {
}
}
const (
_ = iota //ignore first value by assigning to blank identifier
KB = 1 << (10 * iota)
MB
GB
TB
)
func (d *Yun139) getPartSize(size int64) int64 {
if d.CustomUploadPartSize != 0 {
return d.CustomUploadPartSize
}
// 网盘对于分片数量存在上限
if size/GB > 30 {
return 512 * MB
if size/utils.GB > 30 {
return 512 * utils.MB
}
return 100 * MB
return 100 * utils.MB
}
func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
@@ -526,29 +521,28 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
case MetaPersonalNew:
var err error
fullHash := stream.GetHash().GetHash(utils.SHA256)
if len(fullHash) <= 0 {
tmpF, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
fullHash, err = utils.HashFile(utils.SHA256, tmpF)
if len(fullHash) != utils.SHA256.Width {
_, fullHash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA256)
if err != nil {
return err
}
}
partInfos := []PartInfo{}
var partSize = d.getPartSize(stream.GetSize())
part := (stream.GetSize() + partSize - 1) / partSize
if part == 0 {
size := stream.GetSize()
var partSize = d.getPartSize(size)
part := size / partSize
if size%partSize > 0 {
part++
} else if part == 0 {
part = 1
}
partInfos := make([]PartInfo, 0, part)
for i := int64(0); i < part; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
start := i * partSize
byteSize := stream.GetSize() - start
byteSize := size - start
if byteSize > partSize {
byteSize = partSize
}
@@ -576,13 +570,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
"contentType": "application/octet-stream",
"parallelUpload": false,
"partInfos": firstPartInfos,
"size": stream.GetSize(),
"size": size,
"parentFileId": dstDir.GetID(),
"name": stream.GetName(),
"type": "file",
"fileRenameMode": "auto_rename",
}
pathname := "/hcy/file/create"
pathname := "/file/create"
var resp PersonalUploadResp
_, err = d.personalPost(pathname, data, &resp)
if err != nil {
@@ -619,7 +613,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
"accountType": 1,
},
}
pathname := "/hcy/file/getUploadUrl"
pathname := "/file/getUploadUrl"
var moreresp PersonalUploadUrlResp
_, err = d.personalPost(pathname, moredata, &moreresp)
if err != nil {
@@ -629,7 +623,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
}
// Progress
p := driver.NewProgress(stream.GetSize(), up)
p := driver.NewProgress(size, up)
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 上传所有分片
@@ -670,7 +664,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
"fileId": resp.Data.FileId,
"uploadId": resp.Data.UploadId,
}
_, err = d.personalPost("/hcy/file/complete", data, nil)
_, err = d.personalPost("/file/complete", data, nil)
if err != nil {
return err
}
@@ -740,14 +734,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
break
}
}
var reportSize int64
if d.ReportRealSize {
reportSize = stream.GetSize()
} else {
reportSize = 0
}
data := base.Json{
"manualRename": 2,
"operation": 0,
"fileCount": 1,
"totalSize": 0, // 去除上传大小限制
"totalSize": reportSize,
"uploadContentList": []base.Json{{
"contentName": stream.GetName(),
"contentSize": 0, // 去除上传大小限制
"contentSize": reportSize,
// "digest": "5a3231986ce7a6b46e408612d385bafa"
}},
"parentCatalogID": dstDir.GetID(),
@@ -765,10 +765,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
"operation": 0,
"path": path.Join(dstDir.GetPath(), dstDir.GetID()),
"seqNo": random.String(32), //序列号不能为空
"totalSize": 0,
"totalSize": reportSize,
"uploadContentList": []base.Json{{
"contentName": stream.GetName(),
"contentSize": 0,
"contentSize": reportSize,
// "digest": "5a3231986ce7a6b46e408612d385bafa"
}},
})
@@ -779,13 +779,18 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if err != nil {
return err
}
if resp.Data.Result.ResultCode != "0" {
return fmt.Errorf("get file upload url failed with result code: %s, message: %s", resp.Data.Result.ResultCode, resp.Data.Result.ResultDesc)
}
size := stream.GetSize()
// Progress
p := driver.NewProgress(stream.GetSize(), up)
var partSize = d.getPartSize(stream.GetSize())
part := (stream.GetSize() + partSize - 1) / partSize
if part == 0 {
p := driver.NewProgress(size, up)
var partSize = d.getPartSize(size)
part := size / partSize
if size%partSize > 0 {
part++
} else if part == 0 {
part = 1
}
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
@@ -795,10 +800,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
}
start := i * partSize
byteSize := stream.GetSize() - start
if byteSize > partSize {
byteSize = partSize
}
byteSize := min(size-start, partSize)
limitReader := io.LimitReader(rateLimited, byteSize)
// Update Progress
@@ -810,7 +812,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "text/plain;name="+unicode(stream.GetName()))
req.Header.Set("contentSize", strconv.FormatInt(stream.GetSize(), 10))
req.Header.Set("contentSize", strconv.FormatInt(size, 10))
req.Header.Set("range", fmt.Sprintf("bytes=%d-%d", start, start+byteSize-1))
req.Header.Set("uploadtaskID", resp.Data.UploadResult.UploadTaskID)
req.Header.Set("rangeType", "0")
@@ -820,13 +822,23 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if err != nil {
return err
}
_ = res.Body.Close()
log.Debugf("%+v", res)
if res.StatusCode != http.StatusOK {
res.Body.Close()
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
}
bodyBytes, err := io.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("error reading response body: %v", err)
}
var result InterLayerUploadResult
err = xml.Unmarshal(bodyBytes, &result)
if err != nil {
return fmt.Errorf("error parsing XML: %v", err)
}
if result.ResultCode != 0 {
return fmt.Errorf("upload failed with result code: %d, message: %s", result.ResultCode, result.Msg)
}
}
return nil
default:
return errs.NotImplement
@@ -844,7 +856,7 @@ func (d *Yun139) Other(ctx context.Context, args model.OtherArgs) (interface{},
}
switch args.Method {
case "video_preview":
uri = "/hcy/videoPreview/getPreviewInfo"
uri = "/videoPreview/getPreviewInfo"
default:
return nil, errs.NotSupport
}

View File

@@ -12,6 +12,8 @@ type Addition struct {
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"`
CloudID string `json:"cloud_id"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"`
UseLargeThumbnail bool `json:"use_large_thumbnail" type:"bool" default:"false" help:"Enable to use large thumbnail for images"`
}
var config = driver.Config{

View File

@@ -143,6 +143,13 @@ type UploadResp struct {
} `json:"data"`
}
type InterLayerUploadResult struct {
XMLName xml.Name `xml:"result"`
Text string `xml:",chardata"`
ResultCode int `xml:"resultCode"`
Msg string `xml:"msg"`
}
type CloudContent struct {
ContentID string `json:"contentID"`
//Modifier string `json:"modifier"`
@@ -278,11 +285,30 @@ type PersonalUploadUrlResp struct {
}
}
type RefreshTokenResp struct {
XMLName xml.Name `xml:"root"`
Return string `xml:"return"`
Token string `xml:"token"`
Expiretime int32 `xml:"expiretime"`
AccessToken string `xml:"accessToken"`
Desc string `xml:"desc"`
type QueryRoutePolicyResp struct {
Success bool `json:"success"`
Code string `json:"code"`
Message string `json:"message"`
Data struct {
RoutePolicyList []struct {
SiteID string `json:"siteID"`
SiteCode string `json:"siteCode"`
ModName string `json:"modName"`
HttpUrl string `json:"httpUrl"`
HttpsUrl string `json:"httpsUrl"`
EnvID string `json:"envID"`
ExtInfo string `json:"extInfo"`
HashName string `json:"hashName"`
ModAddrType int `json:"modAddrType"`
} `json:"routePolicyList"`
} `json:"data"`
}
type RefreshTokenResp struct {
XMLName xml.Name `xml:"root"`
Return string `xml:"return"`
Token string `xml:"token"`
Expiretime int32 `xml:"expiretime"`
AccessToken string `xml:"accessToken"`
Desc string `xml:"desc"`
}

View File

@@ -67,6 +67,7 @@ func (d *Yun139) refreshToken() error {
if len(splits) < 3 {
return fmt.Errorf("authorization is invalid, splits < 3")
}
d.Account = splits[1]
strs := strings.Split(splits[2], "|")
if len(strs) < 4 {
return fmt.Errorf("authorization is invalid, strs < 4")
@@ -156,6 +157,64 @@ func (d *Yun139) request(pathname string, method string, callback base.ReqCallba
}
return res.Body(), nil
}
func (d *Yun139) requestRoute(data interface{}, resp interface{}) ([]byte, error) {
url := "https://user-njs.yun.139.com/user/route/qryRoutePolicy"
req := base.RestyClient.R()
randStr := random.String(16)
ts := time.Now().Format("2006-01-02 15:04:05")
callback := func(req *resty.Request) {
req.SetBody(data)
}
if callback != nil {
callback(req)
}
body, err := utils.Json.Marshal(req.Body)
if err != nil {
return nil, err
}
sign := calSign(string(body), ts, randStr)
svcType := "1"
if d.isFamily() {
svcType = "2"
}
req.SetHeaders(map[string]string{
"Accept": "application/json, text/plain, */*",
"CMS-DEVICE": "default",
"Authorization": "Basic " + d.getAuthorization(),
"mcloud-channel": "1000101",
"mcloud-client": "10701",
//"mcloud-route": "001",
"mcloud-sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign),
//"mcloud-skey":"",
"mcloud-version": "7.14.0",
"Origin": "https://yun.139.com",
"Referer": "https://yun.139.com/w/",
"x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||",
"x-huawei-channelSrc": "10000034",
"x-inner-ntwk": "2",
"x-m4c-caller": "PC",
"x-m4c-src": "10002",
"x-SvcType": svcType,
"Inner-Hcy-Router-Https": "1",
})
var e BaseResp
req.SetResult(&e)
res, err := req.Execute(http.MethodPost, url)
log.Debugln(res.String())
if !e.Success {
return nil, errors.New(e.Message)
}
if resp != nil {
err = utils.Json.Unmarshal(res.Body(), resp)
if err != nil {
return nil, err
}
}
return res.Body(), nil
}
func (d *Yun139) post(pathname string, data interface{}, resp interface{}) ([]byte, error) {
return d.request(pathname, http.MethodPost, func(req *resty.Request) {
req.SetBody(data)
@@ -390,7 +449,7 @@ func unicode(str string) string {
}
func (d *Yun139) personalRequest(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
url := "https://personal-kd-njs.yun.139.com" + pathname
url := d.getPersonalCloudHost() + pathname
req := base.RestyClient.R()
randStr := random.String(16)
ts := time.Now().Format("2006-01-02 15:04:05")
@@ -416,8 +475,6 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R
"Mcloud-Route": "001",
"Mcloud-Sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign),
"Mcloud-Version": "7.14.0",
"Origin": "https://yun.139.com",
"Referer": "https://yun.139.com/w/",
"x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||",
"x-huawei-channelSrc": "10000034",
"x-inner-ntwk": "2",
@@ -479,7 +536,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
"parentFileId": fileId,
}
var resp PersonalListResp
_, err := d.personalPost("/hcy/file/list", data, &resp)
_, err := d.personalPost("/file/list", data, &resp)
if err != nil {
return nil, err
}
@@ -499,7 +556,15 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
} else {
var Thumbnails = item.Thumbnails
var ThumbnailUrl string
if len(Thumbnails) > 0 {
if d.UseLargeThumbnail {
for _, thumb := range Thumbnails {
if strings.Contains(thumb.Style, "Large") {
ThumbnailUrl = thumb.Url
break
}
}
}
if ThumbnailUrl == "" && len(Thumbnails) > 0 {
ThumbnailUrl = Thumbnails[len(Thumbnails)-1].Url
}
f = &model.ObjThumb{
@@ -527,7 +592,7 @@ func (d *Yun139) personalGetLink(fileId string) (string, error) {
data := base.Json{
"fileId": fileId,
}
res, err := d.personalPost("/hcy/file/getDownloadUrl",
res, err := d.personalPost("/file/getDownloadUrl",
data, nil)
if err != nil {
return "", err
@@ -552,3 +617,9 @@ func (d *Yun139) getAccount() string {
}
return d.Account
}
func (d *Yun139) getPersonalCloudHost() string {
if d.ref != nil {
return d.ref.getPersonalCloudHost()
}
return d.PersonalCloudHost
}

View File

@@ -3,16 +3,15 @@ package _189pc
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"fmt"
"io"
"math"
"net/http"
"net/http/cookiejar"
"net/url"
"os"
"regexp"
"sort"
"strconv"
@@ -28,6 +27,7 @@ import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/errgroup"
"github.com/alist-org/alist/v3/pkg/utils"
@@ -324,7 +324,7 @@ func (y *Cloud189PC) login() (err error) {
_, err = y.client.R().
SetResult(&tokenInfo).SetError(&erron).
SetQueryParams(clientSuffix()).
SetQueryParam("redirectURL", url.QueryEscape(loginresp.ToUrl)).
SetQueryParam("redirectURL", loginresp.ToUrl).
Post(API_URL + "/getSessionForPC.action")
if err != nil {
return
@@ -473,12 +473,8 @@ func (y *Cloud189PC) refreshSession() (err error) {
// 普通上传
// 无法上传大小为0的文件
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
var sliceSize = partSize(file.GetSize())
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
lastPartSize := file.GetSize() % sliceSize
if file.GetSize() > 0 && lastPartSize == 0 {
lastPartSize = sliceSize
}
size := file.GetSize()
sliceSize := partSize(size)
params := Params{
"parentFolderId": dstDir.GetID(),
@@ -512,22 +508,29 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
retry.DelayType(retry.BackOffDelay))
sem := semaphore.NewWeighted(3)
fileMd5 := md5.New()
silceMd5 := md5.New()
count := int(size / sliceSize)
lastPartSize := size % sliceSize
if lastPartSize > 0 {
count++
} else {
lastPartSize = sliceSize
}
fileMd5 := utils.MD5.NewFunc()
silceMd5 := utils.MD5.NewFunc()
silceMd5Hexs := make([]string, 0, count)
teeReader := io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5))
byteSize := sliceSize
for i := 1; i <= count; i++ {
if utils.IsCanceled(upCtx) {
break
}
byteData := make([]byte, sliceSize)
if i == count {
byteData = byteData[:lastPartSize]
byteSize = lastPartSize
}
byteData := make([]byte, byteSize)
// 读取块
silceMd5.Reset()
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
if _, err := io.ReadFull(teeReader, byteData); err != io.EOF && err != nil {
sem.Release(1)
return nil, err
}
@@ -607,24 +610,43 @@ func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
// 快传
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
var (
cache = file.GetFile()
tmpF *os.File
err error
)
size := file.GetSize()
if _, ok := cache.(io.ReaderAt); !ok && size > 0 {
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
if err != nil {
return nil, err
}
defer func() {
_ = tmpF.Close()
_ = os.Remove(tmpF.Name())
}()
cache = tmpF
}
var sliceSize = partSize(file.GetSize())
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
lastSliceSize := file.GetSize() % sliceSize
if file.GetSize() > 0 && lastSliceSize == 0 {
sliceSize := partSize(size)
count := int(size / sliceSize)
lastSliceSize := size % sliceSize
if lastSliceSize > 0 {
count++
} else {
lastSliceSize = sliceSize
}
//step.1 优先计算所需信息
byteSize := sliceSize
fileMd5 := md5.New()
silceMd5 := md5.New()
silceMd5Hexs := make([]string, 0, count)
fileMd5 := utils.MD5.NewFunc()
sliceMd5 := utils.MD5.NewFunc()
sliceMd5Hexs := make([]string, 0, count)
partInfos := make([]string, 0, count)
writers := []io.Writer{fileMd5, sliceMd5}
if tmpF != nil {
writers = append(writers, tmpF)
}
written := int64(0)
for i := 1; i <= count; i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
@@ -634,19 +656,31 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
byteSize = lastSliceSize
}
silceMd5.Reset()
if _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), file, byteSize)
written += n
if err != nil && err != io.EOF {
return nil, err
}
md5Byte := silceMd5.Sum(nil)
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
md5Byte := sliceMd5.Sum(nil)
sliceMd5Hexs = append(sliceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
sliceMd5.Reset()
}
if tmpF != nil {
if size > 0 && written != size {
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, size)
}
_, err = tmpF.Seek(0, io.SeekStart)
if err != nil {
return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ")
}
}
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
sliceMd5Hex := fileMd5Hex
if file.GetSize() > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
if size > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(sliceMd5Hexs, "\n")))
}
fullUrl := UPLOAD_URL
@@ -712,7 +746,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
}
// step.4 上传切片
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize), isFamily)
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(cache, offset, byteSize), isFamily)
if err != nil {
return err
}
@@ -794,11 +828,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
}
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
tempFile, fileMd5, err := stream.CacheFullInTempFileAndHash(file, utils.MD5)
if err != nil {
return nil, err
}

View File

@@ -56,7 +56,7 @@ func (d *AListV3) Init(ctx context.Context) error {
if err != nil {
return err
}
if resp.Data.Role == model.GUEST {
if utils.SliceContains(resp.Data.Role, model.GUEST) {
u := d.Address + "/api/public/settings"
res, err := base.RestyClient.R().Get(u)
if err != nil {

View File

@@ -1,6 +1,7 @@
package alist_v3
import (
"encoding/json"
"time"
"github.com/alist-org/alist/v3/internal/model"
@@ -72,15 +73,15 @@ type LoginResp struct {
}
type MeResp struct {
Id int `json:"id"`
Username string `json:"username"`
Password string `json:"password"`
BasePath string `json:"base_path"`
Role int `json:"role"`
Disabled bool `json:"disabled"`
Permission int `json:"permission"`
SsoId string `json:"sso_id"`
Otp bool `json:"otp"`
Id int `json:"id"`
Username string `json:"username"`
Password string `json:"password"`
BasePath string `json:"base_path"`
Role IntSlice `json:"role"`
Disabled bool `json:"disabled"`
Permission int `json:"permission"`
SsoId string `json:"sso_id"`
Otp bool `json:"otp"`
}
type ArchiveMetaReq struct {
@@ -168,3 +169,17 @@ type DecompressReq struct {
PutIntoNewDir bool `json:"put_into_new_dir"`
SrcDir string `json:"src_dir"`
}
type IntSlice []int
func (s *IntSlice) UnmarshalJSON(data []byte) error {
if len(data) > 0 && data[0] == '[' {
return json.Unmarshal(data, (*[]int)(s))
}
var single int
if err := json.Unmarshal(data, &single); err != nil {
return err
}
*s = []int{single}
return nil
}

View File

@@ -55,7 +55,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
if err != nil {
return err
}
d.DriveId = utils.Json.Get(res, "default_drive_id").ToString()
d.DriveId = d.Addition.DeviceID
d.UserID = utils.Json.Get(res, "user_id").ToString()
d.cron = cron.NewCron(time.Hour * 2)
d.cron.Do(func() {

View File

@@ -7,8 +7,8 @@ import (
type Addition struct {
driver.RootID
RefreshToken string `json:"refresh_token" required:"true"`
//DeviceID string `json:"device_id" required:"true"`
RefreshToken string `json:"refresh_token" required:"true"`
DeviceID string `json:"device_id" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
RapidUpload bool `json:"rapid_upload"`

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"net/http"
"path/filepath"
"time"
"github.com/Xhofe/rateg"
@@ -14,6 +15,7 @@ import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
type AliyundriveOpen struct {
@@ -72,6 +74,18 @@ func (d *AliyundriveOpen) Drop(ctx context.Context) error {
return nil
}
// GetRoot implements the driver.GetRooter interface to properly set up the root object
func (d *AliyundriveOpen) GetRoot(ctx context.Context) (model.Obj, error) {
return &model.Object{
ID: d.RootFolderID,
Path: "/",
Name: "root",
Size: 0,
Modified: d.Modified,
IsFolder: true,
}, nil
}
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.limitList == nil {
return nil, fmt.Errorf("driver not init")
@@ -80,9 +94,17 @@ func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.Li
if err != nil {
return nil, err
}
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
return fileToObj(src), nil
objs, err := utils.SliceConvert(files, func(src File) (model.Obj, error) {
obj := fileToObj(src)
// Set the correct path for the object
if dir.GetPath() != "" {
obj.Path = filepath.Join(dir.GetPath(), obj.GetName())
}
return obj, nil
})
return objs, err
}
func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link, error) {
@@ -132,7 +154,16 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
if err != nil {
return nil, err
}
return fileToObj(newDir), nil
obj := fileToObj(newDir)
// Set the correct Path for the returned directory object
if parentDir.GetPath() != "" {
obj.Path = filepath.Join(parentDir.GetPath(), dirName)
} else {
obj.Path = "/" + dirName
}
return obj, nil
}
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
@@ -142,20 +173,24 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (m
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
"to_parent_file_id": dstDir.GetID(),
"check_name_mode": "refuse", // optional:ignore,auto_rename,refuse
"check_name_mode": "ignore", // optional:ignore,auto_rename,refuse
//"new_name": "newName", // The new name to use when a file of the same name exists
}).SetResult(&resp)
})
if err != nil {
return nil, err
}
if resp.Exist {
return nil, errors.New("existence of files with the same name")
}
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
srcObj.ID = resp.FileID
srcObj.Modified = time.Now()
srcObj.Path = filepath.Join(dstDir.GetPath(), srcObj.GetName())
// Check for duplicate files in the destination directory
if err := d.removeDuplicateFiles(ctx, dstDir.GetPath(), srcObj.GetName(), srcObj.GetID()); err != nil {
// Only log a warning instead of returning an error since the move operation has already completed successfully
log.Warnf("Failed to remove duplicate files after move: %v", err)
}
return srcObj, nil
}
return nil, nil
@@ -173,19 +208,47 @@ func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName
if err != nil {
return nil, err
}
return fileToObj(newFile), nil
// Check for duplicate files in the parent directory
parentPath := filepath.Dir(srcObj.GetPath())
if err := d.removeDuplicateFiles(ctx, parentPath, newName, newFile.FileId); err != nil {
// Only log a warning instead of returning an error since the rename operation has already completed successfully
log.Warnf("Failed to remove duplicate files after rename: %v", err)
}
obj := fileToObj(newFile)
// Set the correct Path for the renamed object
if parentPath != "" && parentPath != "." {
obj.Path = filepath.Join(parentPath, newName)
} else {
obj.Path = "/" + newName
}
return obj, nil
}
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/copy", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
"to_parent_file_id": dstDir.GetID(),
"auto_rename": true,
})
"auto_rename": false,
}).SetResult(&resp)
})
return err
if err != nil {
return err
}
// Check for duplicate files in the destination directory
if err := d.removeDuplicateFiles(ctx, dstDir.GetPath(), srcObj.GetName(), resp.FileID); err != nil {
// Only log a warning instead of returning an error since the copy operation has already completed successfully
log.Warnf("Failed to remove duplicate files after copy: %v", err)
}
return nil
}
func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
@@ -203,7 +266,18 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
return d.upload(ctx, dstDir, stream, up)
obj, err := d.upload(ctx, dstDir, stream, up)
// Set the correct Path for the returned file object
if obj != nil && obj.GetPath() == "" {
if dstDir.GetPath() != "" {
if objWithPath, ok := obj.(model.SetPath); ok {
objWithPath.SetPath(filepath.Join(dstDir.GetPath(), obj.GetName()))
}
}
}
return obj, err
}
func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
@@ -235,3 +309,4 @@ var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
var _ driver.RenameResult = (*AliyundriveOpen)(nil)
var _ driver.PutResult = (*AliyundriveOpen)(nil)
var _ driver.GetRooter = (*AliyundriveOpen)(nil)

View File

@@ -11,7 +11,7 @@ type Addition struct {
RefreshToken string `json:"refresh_token" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
OauthTokenURL string `json:"oauth_token_url" default:"https://api.nn.ci/alist/ali_open/token"`
OauthTokenURL string `json:"oauth_token_url" default:"https://api.alistgo.com/alist/ali_open/token"`
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`

View File

@@ -1,7 +1,6 @@
package aliyundrive_open
import (
"bytes"
"context"
"encoding/base64"
"fmt"
@@ -15,6 +14,7 @@ import (
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
streamPkg "github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go"
@@ -131,16 +131,19 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
return "", err
}
length := proofRange.End - proofRange.Start
buf := bytes.NewBuffer(make([]byte, 0, length))
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
if err != nil {
return "", err
}
_, err = utils.CopyWithBufferN(buf, reader, length)
buf := make([]byte, length)
n, err := io.ReadFull(reader, buf)
if err == io.ErrUnexpectedEOF {
return "", fmt.Errorf("can't read data, expected=%d, got=%d", len(buf), n)
}
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
return base64.StdEncoding.EncodeToString(buf), nil
}
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
@@ -183,25 +186,18 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp)
})
var tmpF model.File
if err != nil {
if e.Code != "PreHashMatched" || !rapidUpload {
return nil, err
}
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
hi := stream.GetHash()
hash := hi.GetHash(utils.SHA1)
if len(hash) <= 0 {
tmpF, err = stream.CacheFullInTempFile()
hash := stream.GetHash().GetHash(utils.SHA1)
if len(hash) != utils.SHA1.Width {
_, hash, err = streamPkg.CacheFullInTempFileAndHash(stream, utils.SHA1)
if err != nil {
return nil, err
}
hash, err = utils.HashFile(utils.SHA1, tmpF)
if err != nil {
return nil, err
}
}
delete(createData, "pre_hash")

View File

@@ -10,6 +10,7 @@ import (
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
@@ -186,3 +187,36 @@ func (d *AliyundriveOpen) getAccessToken() string {
}
return d.AccessToken
}
// Remove duplicate files with the same name in the given directory path,
// preserving the file with the given skipID if provided
func (d *AliyundriveOpen) removeDuplicateFiles(ctx context.Context, parentPath string, fileName string, skipID string) error {
// Handle empty path (root directory) case
if parentPath == "" {
parentPath = "/"
}
// List all files in the parent directory
files, err := op.List(ctx, d, parentPath, model.ListArgs{})
if err != nil {
return err
}
// Find all files with the same name
var duplicates []model.Obj
for _, file := range files {
if file.GetName() == fileName && file.GetID() != skipID {
duplicates = append(duplicates, file)
}
}
// Remove all duplicates files, except the file with the given ID
for _, file := range duplicates {
err := d.Remove(ctx, file)
if err != nil {
return err
}
}
return nil
}

View File

@@ -6,6 +6,7 @@ import (
_ "github.com/alist-org/alist/v3/drivers/115_share"
_ "github.com/alist-org/alist/v3/drivers/123"
_ "github.com/alist-org/alist/v3/drivers/123_link"
_ "github.com/alist-org/alist/v3/drivers/123_open"
_ "github.com/alist-org/alist/v3/drivers/123_share"
_ "github.com/alist-org/alist/v3/drivers/139"
_ "github.com/alist-org/alist/v3/drivers/189"
@@ -16,13 +17,16 @@ import (
_ "github.com/alist-org/alist/v3/drivers/aliyundrive"
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_open"
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_share"
_ "github.com/alist-org/alist/v3/drivers/azure_blob"
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
_ "github.com/alist-org/alist/v3/drivers/cloudreve_v4"
_ "github.com/alist-org/alist/v3/drivers/crypt"
_ "github.com/alist-org/alist/v3/drivers/doubao"
_ "github.com/alist-org/alist/v3/drivers/doubao_share"
_ "github.com/alist-org/alist/v3/drivers/dropbox"
_ "github.com/alist-org/alist/v3/drivers/febbox"
_ "github.com/alist-org/alist/v3/drivers/ftp"

View File

@@ -0,0 +1,313 @@
package azure_blob
import (
"context"
"fmt"
"io"
"path"
"regexp"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
)
// Azure Blob Storage based on the blob APIs
// Link: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api
type AzureBlob struct {
model.Storage
Addition
client *azblob.Client
containerClient *container.Client
config driver.Config
}
// Config returns the driver configuration.
func (d *AzureBlob) Config() driver.Config {
return d.config
}
// GetAddition returns additional settings specific to Azure Blob Storage.
func (d *AzureBlob) GetAddition() driver.Additional {
return &d.Addition
}
// Init initializes the Azure Blob Storage client using shared key authentication.
func (d *AzureBlob) Init(ctx context.Context) error {
// Validate the endpoint URL
accountName := extractAccountName(d.Addition.Endpoint)
if !regexp.MustCompile(`^[a-z0-9]+$`).MatchString(accountName) {
return fmt.Errorf("invalid storage account name: must be chars of lowercase letters or numbers only")
}
credential, err := azblob.NewSharedKeyCredential(accountName, d.Addition.AccessKey)
if err != nil {
return fmt.Errorf("failed to create credential: %w", err)
}
// Check if Endpoint is just account name
endpoint := d.Addition.Endpoint
if accountName == endpoint {
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
}
// Initialize Azure Blob client with retry policy
client, err := azblob.NewClientWithSharedKeyCredential(endpoint, credential,
&azblob.ClientOptions{ClientOptions: azcore.ClientOptions{
Retry: policy.RetryOptions{
MaxRetries: MaxRetries,
RetryDelay: RetryDelay,
},
}})
if err != nil {
return fmt.Errorf("failed to create client: %w", err)
}
d.client = client
// Ensure container exists or create it
containerName := strings.Trim(d.Addition.ContainerName, "/ \\")
if containerName == "" {
return fmt.Errorf("container name cannot be empty")
}
return d.createContainerIfNotExists(ctx, containerName)
}
// Drop releases resources associated with the Azure Blob client.
func (d *AzureBlob) Drop(ctx context.Context) error {
d.client = nil
return nil
}
// List retrieves blobs and directories under the specified path.
func (d *AzureBlob) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
prefix := ensureTrailingSlash(dir.GetPath())
pager := d.containerClient.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{
Prefix: &prefix,
})
var objs []model.Obj
for pager.More() {
page, err := pager.NextPage(ctx)
if err != nil {
return nil, fmt.Errorf("failed to list blobs: %w", err)
}
// Process directories
for _, blobPrefix := range page.Segment.BlobPrefixes {
objs = append(objs, &model.Object{
Name: path.Base(strings.TrimSuffix(*blobPrefix.Name, "/")),
Path: *blobPrefix.Name,
Modified: *blobPrefix.Properties.LastModified,
Ctime: *blobPrefix.Properties.CreationTime,
IsFolder: true,
})
}
// Process files
for _, blob := range page.Segment.BlobItems {
if strings.HasSuffix(*blob.Name, "/") {
continue
}
objs = append(objs, &model.Object{
Name: path.Base(*blob.Name),
Path: *blob.Name,
Size: *blob.Properties.ContentLength,
Modified: *blob.Properties.LastModified,
Ctime: *blob.Properties.CreationTime,
IsFolder: false,
})
}
}
return objs, nil
}
// Link generates a temporary SAS URL for accessing a blob.
func (d *AzureBlob) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
blobClient := d.containerClient.NewBlobClient(file.GetPath())
expireDuration := time.Hour * time.Duration(d.SignURLExpire)
sasURL, err := blobClient.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil)
if err != nil {
return nil, fmt.Errorf("failed to generate SAS URL: %w", err)
}
return &model.Link{URL: sasURL}, nil
}
// MakeDir creates a virtual directory by uploading an empty blob as a marker.
func (d *AzureBlob) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
dirPath := path.Join(parentDir.GetPath(), dirName)
if err := d.mkDir(ctx, dirPath); err != nil {
return nil, fmt.Errorf("failed to create directory marker: %w", err)
}
return &model.Object{
Path: dirPath,
Name: dirName,
IsFolder: true,
}, nil
}
// Move relocates an object (file or directory) to a new directory.
func (d *AzureBlob) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
srcPath := srcObj.GetPath()
dstPath := path.Join(dstDir.GetPath(), srcObj.GetName())
if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil {
return nil, fmt.Errorf("move operation failed: %w", err)
}
return &model.Object{
Path: dstPath,
Name: srcObj.GetName(),
Modified: time.Now(),
IsFolder: srcObj.IsDir(),
Size: srcObj.GetSize(),
}, nil
}
// Rename changes the name of an existing object.
func (d *AzureBlob) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
srcPath := srcObj.GetPath()
dstPath := path.Join(path.Dir(srcPath), newName)
if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil {
return nil, fmt.Errorf("rename operation failed: %w", err)
}
return &model.Object{
Path: dstPath,
Name: newName,
Modified: time.Now(),
IsFolder: srcObj.IsDir(),
Size: srcObj.GetSize(),
}, nil
}
// Copy duplicates an object (file or directory) to a specified destination directory.
func (d *AzureBlob) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
dstPath := path.Join(dstDir.GetPath(), srcObj.GetName())
// Handle directory copying using flat listing
if srcObj.IsDir() {
srcPrefix := srcObj.GetPath()
srcPrefix = ensureTrailingSlash(srcPrefix)
// Get all blobs under the source directory
blobs, err := d.flattenListBlobs(ctx, srcPrefix)
if err != nil {
return nil, fmt.Errorf("failed to list source directory contents: %w", err)
}
// Process each blob - copy to destination
for _, blob := range blobs {
// Skip the directory marker itself
if *blob.Name == srcPrefix {
continue
}
// Calculate relative path from source
relPath := strings.TrimPrefix(*blob.Name, srcPrefix)
itemDstPath := path.Join(dstPath, relPath)
if strings.HasSuffix(itemDstPath, "/") || (blob.Metadata["hdi_isfolder"] != nil && *blob.Metadata["hdi_isfolder"] == "true") {
// Create directory marker at destination
err := d.mkDir(ctx, itemDstPath)
if err != nil {
return nil, fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err)
}
} else {
// Copy the blob
if err := d.copyFile(ctx, *blob.Name, itemDstPath); err != nil {
return nil, fmt.Errorf("failed to copy %s: %w", *blob.Name, err)
}
}
}
// Create directory marker at destination if needed
if len(blobs) == 0 {
err := d.mkDir(ctx, dstPath)
if err != nil {
return nil, fmt.Errorf("failed to create directory [%s]: %w", dstPath, err)
}
}
return &model.Object{
Path: dstPath,
Name: srcObj.GetName(),
Modified: time.Now(),
IsFolder: true,
}, nil
}
// Copy a single file
if err := d.copyFile(ctx, srcObj.GetPath(), dstPath); err != nil {
return nil, fmt.Errorf("failed to copy blob: %w", err)
}
return &model.Object{
Path: dstPath,
Name: srcObj.GetName(),
Size: srcObj.GetSize(),
Modified: time.Now(),
IsFolder: false,
}, nil
}
// Remove deletes a specified blob or recursively deletes a directory and its contents.
func (d *AzureBlob) Remove(ctx context.Context, obj model.Obj) error {
path := obj.GetPath()
// Handle recursive directory deletion
if obj.IsDir() {
return d.deleteFolder(ctx, path)
}
// Delete single file
return d.deleteFile(ctx, path, false)
}
// Put uploads a file stream to Azure Blob Storage with progress tracking.
func (d *AzureBlob) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
blobPath := path.Join(dstDir.GetPath(), stream.GetName())
blobClient := d.containerClient.NewBlockBlobClient(blobPath)
// Determine optimal upload options based on file size
options := optimizedUploadOptions(stream.GetSize())
// Track upload progress
progressTracker := &progressTracker{
total: stream.GetSize(),
updateProgress: up,
}
// Wrap stream to handle context cancellation and progress tracking
limitedStream := driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, progressTracker))
// Upload the stream to Azure Blob Storage
_, err := blobClient.UploadStream(ctx, limitedStream, options)
if err != nil {
return nil, fmt.Errorf("failed to upload file: %w", err)
}
return &model.Object{
Path: blobPath,
Name: stream.GetName(),
Size: stream.GetSize(),
Modified: time.Now(),
IsFolder: false,
}, nil
}
// The following methods related to archive handling are not implemented yet.
// func (d *AzureBlob) GetArchiveMeta(...) {...}
// func (d *AzureBlob) ListArchive(...) {...}
// func (d *AzureBlob) Extract(...) {...}
// func (d *AzureBlob) ArchiveDecompress(...) {...}
// Ensure AzureBlob implements the driver.Driver interface.
var _ driver.Driver = (*AzureBlob)(nil)

View File

@@ -0,0 +1,32 @@
package azure_blob
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
Endpoint string `json:"endpoint" required:"true" default:"https://<accountname>.blob.core.windows.net/" help:"e.g. https://accountname.blob.core.windows.net/. The full endpoint URL for Azure Storage, including the unique storage account name (3 ~ 24 numbers and lowercase letters only)."`
AccessKey string `json:"access_key" required:"true" help:"The access key for Azure Storage, used for authentication. https://learn.microsoft.com/azure/storage/common/storage-account-keys-manage"`
ContainerName string `json:"container_name" required:"true" help:"The name of the container in Azure Storage (created in the Azure portal). https://learn.microsoft.com/azure/storage/blobs/blob-containers-portal"`
SignURLExpire int `json:"sign_url_expire" type:"number" default:"4" help:"The expiration time for SAS URLs, in hours."`
}
// implement GetRootId interface
func (r Addition) GetRootId() string {
return r.ContainerName
}
var config = driver.Config{
Name: "Azure Blob Storage",
LocalSort: true,
CheckStatus: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &AzureBlob{
config: config,
}
})
}

View File

@@ -0,0 +1,20 @@
package azure_blob
import "github.com/alist-org/alist/v3/internal/driver"
// progressTracker is used to track upload progress
type progressTracker struct {
total int64
current int64
updateProgress driver.UpdateProgress
}
// Write implements io.Writer to track progress
func (pt *progressTracker) Write(p []byte) (n int, err error) {
n = len(p)
pt.current += int64(n)
if pt.updateProgress != nil && pt.total > 0 {
pt.updateProgress(float64(pt.current) * 100 / float64(pt.total))
}
return n, nil
}

401
drivers/azure_blob/util.go Normal file
View File

@@ -0,0 +1,401 @@
package azure_blob
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"path"
"sort"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
log "github.com/sirupsen/logrus"
)
const (
// MaxRetries defines the maximum number of retry attempts for Azure operations
MaxRetries = 3
// RetryDelay defines the base delay between retries
RetryDelay = 3 * time.Second
// MaxBatchSize defines the maximum number of operations in a single batch request
MaxBatchSize = 128
)
// extractAccountName 从 Azure 存储 Endpoint 中提取账户名
func extractAccountName(endpoint string) string {
// 移除协议前缀
endpoint = strings.TrimPrefix(endpoint, "https://")
endpoint = strings.TrimPrefix(endpoint, "http://")
// 获取第一个点之前的部分(即账户名)
parts := strings.Split(endpoint, ".")
if len(parts) > 0 {
// to lower case
return strings.ToLower(parts[0])
}
return ""
}
// isNotFoundError checks if the error is a "not found" type error
func isNotFoundError(err error) bool {
var storageErr *azcore.ResponseError
if errors.As(err, &storageErr) {
return storageErr.StatusCode == 404
}
// Fallback to string matching for backwards compatibility
return err != nil && strings.Contains(err.Error(), "BlobNotFound")
}
// flattenListBlobs - Optimize blob listing to handle pagination better
func (d *AzureBlob) flattenListBlobs(ctx context.Context, prefix string) ([]container.BlobItem, error) {
// Standardize prefix format
prefix = ensureTrailingSlash(prefix)
var blobItems []container.BlobItem
pager := d.containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{
Prefix: &prefix,
Include: container.ListBlobsInclude{
Metadata: true,
},
})
for pager.More() {
page, err := pager.NextPage(ctx)
if err != nil {
return nil, fmt.Errorf("failed to list blobs: %w", err)
}
for _, blob := range page.Segment.BlobItems {
blobItems = append(blobItems, *blob)
}
}
return blobItems, nil
}
// batchDeleteBlobs - Simplify batch deletion logic
func (d *AzureBlob) batchDeleteBlobs(ctx context.Context, blobPaths []string) error {
if len(blobPaths) == 0 {
return nil
}
// Process in batches of MaxBatchSize
for i := 0; i < len(blobPaths); i += MaxBatchSize {
end := min(i+MaxBatchSize, len(blobPaths))
currentBatch := blobPaths[i:end]
// Create batch builder
batchBuilder, err := d.containerClient.NewBatchBuilder()
if err != nil {
return fmt.Errorf("failed to create batch builder: %w", err)
}
// Add delete operations
for _, blobPath := range currentBatch {
if err := batchBuilder.Delete(blobPath, nil); err != nil {
return fmt.Errorf("failed to add delete operation for %s: %w", blobPath, err)
}
}
// Submit batch
responses, err := d.containerClient.SubmitBatch(ctx, batchBuilder, nil)
if err != nil {
return fmt.Errorf("batch delete request failed: %w", err)
}
// Check responses
for _, resp := range responses.Responses {
if resp.Error != nil && !isNotFoundError(resp.Error) {
// 获取 blob 名称以提供更好的错误信息
blobName := "unknown"
if resp.BlobName != nil {
blobName = *resp.BlobName
}
return fmt.Errorf("failed to delete blob %s: %v", blobName, resp.Error)
}
}
}
return nil
}
// deleteFolder recursively deletes a directory and all its contents
func (d *AzureBlob) deleteFolder(ctx context.Context, prefix string) error {
// Ensure directory path ends with slash
prefix = ensureTrailingSlash(prefix)
// Get all blobs under the directory using flattenListBlobs
globs, err := d.flattenListBlobs(ctx, prefix)
if err != nil {
return fmt.Errorf("failed to list blobs for deletion: %w", err)
}
// If there are blobs in the directory, delete them
if len(globs) > 0 {
// 分离文件和目录标记
var filePaths []string
var dirPaths []string
for _, blob := range globs {
blobName := *blob.Name
if isDirectory(blob) {
// remove trailing slash for directory names
dirPaths = append(dirPaths, strings.TrimSuffix(blobName, "/"))
} else {
filePaths = append(filePaths, blobName)
}
}
// 先删除文件,再删除目录
if len(filePaths) > 0 {
if err := d.batchDeleteBlobs(ctx, filePaths); err != nil {
return err
}
}
if len(dirPaths) > 0 {
// 按路径深度分组
depthMap := make(map[int][]string)
for _, dir := range dirPaths {
depth := strings.Count(dir, "/") // 计算目录深度
depthMap[depth] = append(depthMap[depth], dir)
}
// 按深度从大到小排序
var depths []int
for depth := range depthMap {
depths = append(depths, depth)
}
sort.Sort(sort.Reverse(sort.IntSlice(depths)))
// 按深度逐层批量删除
for _, depth := range depths {
batch := depthMap[depth]
if err := d.batchDeleteBlobs(ctx, batch); err != nil {
return err
}
}
}
}
// 最后删除目录标记本身
return d.deleteEmptyDirectory(ctx, prefix)
}
// deleteFile deletes a single file or blob with better error handling
func (d *AzureBlob) deleteFile(ctx context.Context, path string, isDir bool) error {
blobClient := d.containerClient.NewBlobClient(path)
_, err := blobClient.Delete(ctx, nil)
if err != nil && !(isDir && isNotFoundError(err)) {
return err
}
return nil
}
// copyFile copies a single blob from source path to destination path
func (d *AzureBlob) copyFile(ctx context.Context, srcPath, dstPath string) error {
srcBlob := d.containerClient.NewBlobClient(srcPath)
dstBlob := d.containerClient.NewBlobClient(dstPath)
// Use configured expiration time for SAS URL
expireDuration := time.Hour * time.Duration(d.SignURLExpire)
srcURL, err := srcBlob.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil)
if err != nil {
return fmt.Errorf("failed to generate source SAS URL: %w", err)
}
_, err = dstBlob.StartCopyFromURL(ctx, srcURL, nil)
return err
}
// createContainerIfNotExists - Create container if not exists
// Clean up commented code
func (d *AzureBlob) createContainerIfNotExists(ctx context.Context, containerName string) error {
serviceClient := d.client.ServiceClient()
containerClient := serviceClient.NewContainerClient(containerName)
var options = service.CreateContainerOptions{}
_, err := containerClient.Create(ctx, &options)
if err != nil {
var responseErr *azcore.ResponseError
if errors.As(err, &responseErr) && responseErr.ErrorCode != "ContainerAlreadyExists" {
return fmt.Errorf("failed to create or access container [%s]: %w", containerName, err)
}
}
d.containerClient = containerClient
return nil
}
// mkDir creates a virtual directory marker by uploading an empty blob with metadata.
func (d *AzureBlob) mkDir(ctx context.Context, fullDirName string) error {
dirPath := ensureTrailingSlash(fullDirName)
blobClient := d.containerClient.NewBlockBlobClient(dirPath)
// Upload an empty blob with metadata indicating it's a directory
_, err := blobClient.Upload(ctx, struct {
*bytes.Reader
io.Closer
}{
Reader: bytes.NewReader([]byte{}),
Closer: io.NopCloser(nil),
}, &blockblob.UploadOptions{
Metadata: map[string]*string{
"hdi_isfolder": to.Ptr("true"),
},
})
return err
}
// ensureTrailingSlash ensures the provided path ends with a trailing slash.
func ensureTrailingSlash(path string) string {
if !strings.HasSuffix(path, "/") {
return path + "/"
}
return path
}
// moveOrRename moves or renames blobs or directories from source to destination.
func (d *AzureBlob) moveOrRename(ctx context.Context, srcPath, dstPath string, isDir bool, srcSize int64) error {
if isDir {
// Normalize paths for directory operations
srcPath = ensureTrailingSlash(srcPath)
dstPath = ensureTrailingSlash(dstPath)
// List all blobs under the source directory
blobs, err := d.flattenListBlobs(ctx, srcPath)
if err != nil {
return fmt.Errorf("failed to list blobs: %w", err)
}
// Iterate and copy each blob to the destination
for _, item := range blobs {
srcBlobName := *item.Name
relPath := strings.TrimPrefix(srcBlobName, srcPath)
itemDstPath := path.Join(dstPath, relPath)
if isDirectory(item) {
// Create directory marker at destination
if err := d.mkDir(ctx, itemDstPath); err != nil {
return fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err)
}
} else {
// Copy file blob to destination
if err := d.copyFile(ctx, srcBlobName, itemDstPath); err != nil {
return fmt.Errorf("failed to copy blob [%s]: %w", srcBlobName, err)
}
}
}
// Handle empty directories by creating a marker at destination
if len(blobs) == 0 {
if err := d.mkDir(ctx, dstPath); err != nil {
return fmt.Errorf("failed to create directory [%s]: %w", dstPath, err)
}
}
// Delete source directory and its contents
if err := d.deleteFolder(ctx, srcPath); err != nil {
log.Warnf("failed to delete source directory [%s]: %v\n, and try again", srcPath, err)
// Retry deletion once more and ignore the result
if err := d.deleteFolder(ctx, srcPath); err != nil {
log.Errorf("Retry deletion of source directory [%s] failed: %v", srcPath, err)
}
}
return nil
}
// Single file move or rename operation
if err := d.copyFile(ctx, srcPath, dstPath); err != nil {
return fmt.Errorf("failed to copy file: %w", err)
}
// Delete source file after successful copy
if err := d.deleteFile(ctx, srcPath, false); err != nil {
log.Errorf("Error deleting source file [%s]: %v", srcPath, err)
}
return nil
}
// optimizedUploadOptions returns the optimal upload options based on file size
func optimizedUploadOptions(fileSize int64) *azblob.UploadStreamOptions {
options := &azblob.UploadStreamOptions{
BlockSize: 4 * 1024 * 1024, // 4MB block size
Concurrency: 4, // Default concurrency
}
// For large files, increase block size and concurrency
if fileSize > 256*1024*1024 { // For files larger than 256MB
options.BlockSize = 8 * 1024 * 1024 // 8MB blocks
options.Concurrency = 8 // More concurrent uploads
}
// For very large files (>1GB)
if fileSize > 1024*1024*1024 {
options.BlockSize = 16 * 1024 * 1024 // 16MB blocks
options.Concurrency = 16 // Higher concurrency
}
return options
}
// isDirectory determines if a blob represents a directory
// Checks multiple indicators: path suffix, metadata, and content type
func isDirectory(blob container.BlobItem) bool {
// Check path suffix
if strings.HasSuffix(*blob.Name, "/") {
return true
}
// Check metadata for directory marker
if blob.Metadata != nil {
if val, ok := blob.Metadata["hdi_isfolder"]; ok && val != nil && *val == "true" {
return true
}
// Azure Storage Explorer and other tools may use different metadata keys
if val, ok := blob.Metadata["is_directory"]; ok && val != nil && strings.ToLower(*val) == "true" {
return true
}
}
// Check content type (some tools mark directories with specific content types)
if blob.Properties != nil && blob.Properties.ContentType != nil {
contentType := strings.ToLower(*blob.Properties.ContentType)
if blob.Properties.ContentLength != nil && *blob.Properties.ContentLength == 0 && (contentType == "application/directory" || contentType == "directory") {
return true
}
}
return false
}
// deleteEmptyDirectory deletes a directory only if it's empty
func (d *AzureBlob) deleteEmptyDirectory(ctx context.Context, dirPath string) error {
// Directory is empty, delete the directory marker
blobClient := d.containerClient.NewBlobClient(strings.TrimSuffix(dirPath, "/"))
_, err := blobClient.Delete(ctx, nil)
// Also try deleting with trailing slash (for different directory marker formats)
if err != nil && isNotFoundError(err) {
blobClient = d.containerClient.NewBlobClient(dirPath)
_, err = blobClient.Delete(ctx, nil)
}
// Ignore not found errors
if err != nil && isNotFoundError(err) {
log.Infof("Directory [%s] not found during deletion: %v", dirPath, err)
return nil
}
return err
}

View File

@@ -6,8 +6,8 @@ import (
"encoding/hex"
"errors"
"io"
"math"
"net/url"
"os"
stdpath "path"
"strconv"
"time"
@@ -15,6 +15,7 @@ import (
"golang.org/x/sync/semaphore"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
@@ -78,6 +79,8 @@ func (d *BaiduNetdisk) List(ctx context.Context, dir model.Obj, args model.ListA
func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if d.DownloadAPI == "crack" {
return d.linkCrack(file, args)
} else if d.DownloadAPI == "crack_video" {
return d.linkCrackVideo(file, args)
}
return d.linkOfficial(file, args)
}
@@ -183,16 +186,30 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
return newObj, nil
}
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return nil, err
var (
cache = stream.GetFile()
tmpF *os.File
err error
)
if _, ok := cache.(io.ReaderAt); !ok {
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
if err != nil {
return nil, err
}
defer func() {
_ = tmpF.Close()
_ = os.Remove(tmpF.Name())
}()
cache = tmpF
}
streamSize := stream.GetSize()
sliceSize := d.getSliceSize(streamSize)
count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
count := int(streamSize / sliceSize)
lastBlockSize := streamSize % sliceSize
if streamSize > 0 && lastBlockSize == 0 {
if lastBlockSize > 0 {
count++
} else {
lastBlockSize = sliceSize
}
@@ -205,6 +222,11 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
sliceMd5H := md5.New()
sliceMd5H2 := md5.New()
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
writers := []io.Writer{fileMd5H, sliceMd5H, slicemd5H2Write}
if tmpF != nil {
writers = append(writers, tmpF)
}
written := int64(0)
for i := 1; i <= count; i++ {
if utils.IsCanceled(ctx) {
@@ -213,13 +235,23 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
if i == count {
byteSize = lastBlockSize
}
_, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), stream, byteSize)
written += n
if err != nil && err != io.EOF {
return nil, err
}
blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
sliceMd5H.Reset()
}
if tmpF != nil {
if written != streamSize {
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
}
_, err = tmpF.Seek(0, io.SeekStart)
if err != nil {
return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ")
}
}
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
blockListStr, _ := utils.Json.MarshalToString(blockList)
@@ -289,7 +321,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
"partseq": strconv.Itoa(partseq),
}
err := d.uploadSlice(ctx, params, stream.GetName(),
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)))
if err != nil {
return err
}

View File

@@ -10,15 +10,16 @@ type Addition struct {
driver.RootPath
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
DownloadAPI string `json:"download_api" type:"select" options:"official,crack,crack_video" default:"official"`
ClientID string `json:"client_id" required:"true" default:"hq9yQ9w9kR4YHj1kyYafLygVocobh7Sf"`
ClientSecret string `json:"client_secret" required:"true" default:"YH2VpZcFJHYNnV6vLfHQXDBhcE7ZChyE"`
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
AccessToken string
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
}
var config = driver.Config{

View File

@@ -17,7 +17,7 @@ type TokenErrResp struct {
type File struct {
//TkbindId int `json:"tkbind_id"`
//OwnerType int `json:"owner_type"`
//Category int `json:"category"`
Category int `json:"category"`
//RealCategory string `json:"real_category"`
FsId int64 `json:"fs_id"`
//OperId int `json:"oper_id"`

View File

@@ -79,6 +79,12 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
return retry.Unrecoverable(err2)
}
}
if 31023 == errno && d.DownloadAPI == "crack_video" {
result = res.Body()
return nil
}
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
}
result = res.Body()
@@ -131,7 +137,16 @@ func (d *BaiduNetdisk) getFiles(dir string) ([]File, error) {
if len(resp.List) == 0 {
break
}
res = append(res, resp.List...)
if d.OnlyListVideoFile {
for _, file := range resp.List {
if file.Isdir == 1 || file.Category == 1 {
res = append(res, file)
}
}
} else {
res = append(res, resp.List...)
}
}
return res, nil
}
@@ -187,6 +202,34 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link,
}, nil
}
func (d *BaiduNetdisk) linkCrackVideo(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
param := map[string]string{
"type": "VideoURL",
"path": fmt.Sprintf("%s", file.GetPath()),
"fs_id": file.GetID(),
"devuid": "0%1",
"clienttype": "1",
"channel": "android_15_25010PN30C_bd-netdisk_1523a",
"nom3u8": "1",
"dlink": "1",
"media": "1",
"origin": "dlna",
}
resp, err := d.request("https://pan.baidu.com/api/mediainfo", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(param)
}, nil)
if err != nil {
return nil, err
}
return &model.Link{
URL: utils.Json.Get(resp, "info", "dlink").ToString(),
Header: http.Header{
"User-Agent": []string{d.CustomCrackUA},
},
}, nil
}
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
params := map[string]string{
"method": "filemanager",

View File

@@ -7,7 +7,7 @@ import (
"errors"
"fmt"
"io"
"math"
"os"
"regexp"
"strconv"
"strings"
@@ -16,6 +16,7 @@ import (
"golang.org/x/sync/semaphore"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
@@ -241,11 +242,21 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
// TODO:
// 暂时没有找到妙传方式
// 需要获取完整文件md5,必须支持 io.Seek
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return nil, err
var (
cache = stream.GetFile()
tmpF *os.File
err error
)
if _, ok := cache.(io.ReaderAt); !ok {
tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
if err != nil {
return nil, err
}
defer func() {
_ = tmpF.Close()
_ = os.Remove(tmpF.Name())
}()
cache = tmpF
}
const DEFAULT int64 = 1 << 22
@@ -253,9 +264,11 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
// 计算需要的数据
streamSize := stream.GetSize()
count := int(math.Ceil(float64(streamSize) / float64(DEFAULT)))
count := int(streamSize / DEFAULT)
lastBlockSize := streamSize % DEFAULT
if lastBlockSize == 0 {
if lastBlockSize > 0 {
count++
} else {
lastBlockSize = DEFAULT
}
@@ -266,6 +279,11 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
sliceMd5H := md5.New()
sliceMd5H2 := md5.New()
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
writers := []io.Writer{fileMd5H, sliceMd5H, slicemd5H2Write}
if tmpF != nil {
writers = append(writers, tmpF)
}
written := int64(0)
for i := 1; i <= count; i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
@@ -273,13 +291,23 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
if i == count {
byteSize = lastBlockSize
}
_, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), stream, byteSize)
written += n
if err != nil && err != io.EOF {
return nil, err
}
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil)))
sliceMd5H.Reset()
}
if tmpF != nil {
if written != streamSize {
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
}
_, err = tmpF.Seek(0, io.SeekStart)
if err != nil {
return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ")
}
}
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
blockListStr, _ := utils.Json.MarshalToString(sliceMD5List)
@@ -291,7 +319,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
"rtype": "1",
"ctype": "11",
"path": fmt.Sprintf("/%s", stream.GetName()),
"size": fmt.Sprint(stream.GetSize()),
"size": fmt.Sprint(streamSize),
"slice-md5": sliceMd5,
"content-md5": contentMd5,
"block_list": blockListStr,
@@ -343,7 +371,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
r.SetContext(ctx)
r.SetQueryParams(uploadParams)
r.SetFileReader("file", stream.GetName(),
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)))
}, nil)
if err != nil {
return err

View File

@@ -18,6 +18,7 @@ import (
type Cloudreve struct {
model.Storage
Addition
ref *Cloudreve
}
func (d *Cloudreve) Config() driver.Config {
@@ -37,8 +38,18 @@ func (d *Cloudreve) Init(ctx context.Context) error {
return d.login()
}
func (d *Cloudreve) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*Cloudreve)
if ok {
d.ref = refStorage
return nil
}
return errs.NotSupport
}
func (d *Cloudreve) Drop(ctx context.Context) error {
d.Cookie = ""
d.ref = nil
return nil
}
@@ -162,6 +173,8 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
switch r.Policy.Type {
case "onedrive":
err = d.upOneDrive(ctx, stream, u, up)
case "s3":
err = d.upS3(ctx, stream, u, up)
case "remote": // 从机存储
err = d.upRemote(ctx, stream, u, up)
case "local": // 本机存储

View File

@@ -21,11 +21,12 @@ type Policy struct {
}
type UploadInfo struct {
SessionID string `json:"sessionID"`
ChunkSize int `json:"chunkSize"`
Expires int `json:"expires"`
UploadURLs []string `json:"uploadURLs"`
Credential string `json:"credential,omitempty"`
SessionID string `json:"sessionID"`
ChunkSize int `json:"chunkSize"`
Expires int `json:"expires"`
UploadURLs []string `json:"uploadURLs"`
Credential string `json:"credential,omitempty"` // local
CompleteURL string `json:"completeURL,omitempty"` // s3
}
type DirectoryResp struct {

View File

@@ -4,12 +4,14 @@ import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
@@ -19,7 +21,6 @@ import (
"github.com/alist-org/alist/v3/pkg/cookie"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
json "github.com/json-iterator/go"
jsoniter "github.com/json-iterator/go"
)
@@ -35,6 +36,9 @@ func (d *Cloudreve) getUA() string {
}
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
if d.ref != nil {
return d.ref.request(method, path, callback, out)
}
u := d.Address + "/api/v3" + path
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
@@ -79,11 +83,11 @@ func (d *Cloudreve) request(method string, path string, callback base.ReqCallbac
}
if out != nil && r.Data != nil {
var marshal []byte
marshal, err = json.Marshal(r.Data)
marshal, err = jsoniter.Marshal(r.Data)
if err != nil {
return err
}
err = json.Unmarshal(marshal, out)
err = jsoniter.Unmarshal(marshal, out)
if err != nil {
return err
}
@@ -187,12 +191,9 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
if utils.IsCanceled(ctx) {
return ctx.Err()
}
utils.Log.Debugf("[Cloudreve-Local] upload: %d", finish)
var byteSize = DEFAULT
left := stream.GetSize() - finish
if left < DEFAULT {
byteSize = left
}
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
@@ -204,10 +205,27 @@ func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u Up
req.SetContentLength(true)
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
req.SetHeader("User-Agent", d.getUA())
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
req.AddRetryCondition(func(r *resty.Response, err error) bool {
if err != nil {
return true
}
if r.IsError() {
return true
}
var retryResp Resp
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
if jErr != nil {
return true
}
if retryResp.Code != 0 {
return true
}
return false
})
}, nil)
if err != nil {
break
return err
}
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
@@ -222,16 +240,15 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
utils.Log.Debugf("[Cloudreve-Remote] upload: %d", finish)
var byteSize = DEFAULT
left := stream.GetSize() - finish
if left < DEFAULT {
byteSize = left
}
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
@@ -239,7 +256,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
return err
}
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
@@ -248,14 +265,43 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return errors.New(res.Status)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
var up Resp
err = json.Unmarshal(body, &up)
if err != nil {
return err
}
if up.Code != 0 {
return errors.New(up.Msg)
}
return nil
}()
if err == nil {
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
} else {
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
time.Sleep(backoff)
}
_ = res.Body.Close()
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
}
return nil
}
@@ -264,23 +310,22 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
uploadUrl := u.UploadURLs[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
utils.Log.Debugf("[Cloudreve-OneDrive] upload: %d", finish)
var byteSize = DEFAULT
left := stream.GetSize() - finish
if left < DEFAULT {
byteSize = left
}
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
@@ -295,18 +340,121 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
return err
}
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
_ = res.Body.Close()
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
}
_ = res.Body.Close()
up(float64(finish) * 100 / float64(stream.GetSize()))
}
// 上传成功发送回调请求
err := d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
return d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
req.SetBody("{}")
}, nil)
}
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := stream.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Cloudreve-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", u.UploadURLs[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
etag := res.Header.Get("ETag")
res.Body.Close()
switch {
case res.StatusCode != 200:
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-S3] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case etag == "":
return errors.New("faild to get ETag from header")
default:
retryCount = 0
etags = append(etags, etag)
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
}
}
// s3LikeFinishUpload
// https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252
bodyBuilder := &strings.Builder{}
bodyBuilder.WriteString("<CompleteMultipartUpload>")
for i, etag := range etags {
bodyBuilder.WriteString(fmt.Sprintf(
`<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>`,
i+1, // PartNumber 从 1 开始
etag,
))
}
bodyBuilder.WriteString("</CompleteMultipartUpload>")
req, err := http.NewRequest(
"POST",
u.CompleteURL,
strings.NewReader(bodyBuilder.String()),
)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/xml")
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
body, _ := io.ReadAll(res.Body)
return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body))
}
// 上传成功发送回调请求
err = d.request(http.MethodGet, "/callback/s3/"+u.SessionID, nil, nil)
if err != nil {
return err
}

View File

@@ -0,0 +1,305 @@
package cloudreve_v4
import (
"context"
"errors"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
)
type CloudreveV4 struct {
model.Storage
Addition
ref *CloudreveV4
}
func (d *CloudreveV4) Config() driver.Config {
if d.ref != nil {
return d.ref.Config()
}
if d.EnableVersionUpload {
config.NoOverwriteUpload = false
}
return config
}
func (d *CloudreveV4) GetAddition() driver.Additional {
return &d.Addition
}
func (d *CloudreveV4) Init(ctx context.Context) error {
// removing trailing slash
d.Address = strings.TrimSuffix(d.Address, "/")
op.MustSaveDriverStorage(d)
if d.ref != nil {
return nil
}
if d.AccessToken == "" && d.RefreshToken != "" {
return d.refreshToken()
}
if d.Username != "" {
return d.login()
}
return nil
}
func (d *CloudreveV4) InitReference(storage driver.Driver) error {
refStorage, ok := storage.(*CloudreveV4)
if ok {
d.ref = refStorage
return nil
}
return errs.NotSupport
}
func (d *CloudreveV4) Drop(ctx context.Context) error {
d.ref = nil
return nil
}
func (d *CloudreveV4) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
const pageSize int = 100
var f []File
var r FileResp
params := map[string]string{
"page_size": strconv.Itoa(pageSize),
"uri": dir.GetPath(),
"order_by": d.OrderBy,
"order_direction": d.OrderDirection,
"page": "0",
}
for {
err := d.request(http.MethodGet, "/file", func(req *resty.Request) {
req.SetQueryParams(params)
}, &r)
if err != nil {
return nil, err
}
f = append(f, r.Files...)
if r.Pagination.NextToken == "" || len(r.Files) < pageSize {
break
}
params["next_page_token"] = r.Pagination.NextToken
}
return utils.SliceConvert(f, func(src File) (model.Obj, error) {
if d.EnableFolderSize && src.Type == 1 {
var ds FolderSummaryResp
err := d.request(http.MethodGet, "/file/info", func(req *resty.Request) {
req.SetQueryParam("uri", src.Path)
req.SetQueryParam("folder_summary", "true")
}, &ds)
if err == nil && ds.FolderSummary.Size > 0 {
src.Size = ds.FolderSummary.Size
}
}
var thumb model.Thumbnail
if d.EnableThumb && src.Type == 0 {
var t FileThumbResp
err := d.request(http.MethodGet, "/file/thumb", func(req *resty.Request) {
req.SetQueryParam("uri", src.Path)
}, &t)
if err == nil && t.URL != "" {
thumb = model.Thumbnail{
Thumbnail: t.URL,
}
}
}
return &model.ObjThumb{
Object: model.Object{
ID: src.ID,
Path: src.Path,
Name: src.Name,
Size: src.Size,
Modified: src.UpdatedAt,
Ctime: src.CreatedAt,
IsFolder: src.Type == 1,
},
Thumbnail: thumb,
}, nil
})
}
func (d *CloudreveV4) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
var url FileUrlResp
err := d.request(http.MethodPost, "/file/url", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{file.GetPath()},
"download": true,
})
}, &url)
if err != nil {
return nil, err
}
if len(url.Urls) == 0 {
return nil, errors.New("server returns no url")
}
exp := time.Until(url.Expires)
return &model.Link{
URL: url.Urls[0].URL,
Expiration: &exp,
}, nil
}
func (d *CloudreveV4) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
req.SetBody(base.Json{
"type": "folder",
"uri": parentDir.GetPath() + "/" + dirName,
"error_on_conflict": true,
})
}, nil)
}
func (d *CloudreveV4) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
return d.request(http.MethodPost, "/file/move", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{srcObj.GetPath()},
"dst": dstDir.GetPath(),
"copy": false,
})
}, nil)
}
func (d *CloudreveV4) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
req.SetBody(base.Json{
"new_name": newName,
"uri": srcObj.GetPath(),
})
}, nil)
}
func (d *CloudreveV4) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
return d.request(http.MethodPost, "/file/move", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{srcObj.GetPath()},
"dst": dstDir.GetPath(),
"copy": true,
})
}, nil)
}
func (d *CloudreveV4) Remove(ctx context.Context, obj model.Obj) error {
return d.request(http.MethodDelete, "/file", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{obj.GetPath()},
"unlink": false,
"skip_soft_delete": true,
})
}, nil)
}
func (d *CloudreveV4) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
if file.GetSize() == 0 {
// 空文件使用新建文件方法,避免上传卡锁
return d.request(http.MethodPost, "/file/create", func(req *resty.Request) {
req.SetBody(base.Json{
"type": "file",
"uri": dstDir.GetPath() + "/" + file.GetName(),
"error_on_conflict": true,
})
}, nil)
}
var p StoragePolicy
var r FileResp
var u FileUploadResp
var err error
params := map[string]string{
"page_size": "10",
"uri": dstDir.GetPath(),
"order_by": "created_at",
"order_direction": "asc",
"page": "0",
}
err = d.request(http.MethodGet, "/file", func(req *resty.Request) {
req.SetQueryParams(params)
}, &r)
if err != nil {
return err
}
p = r.StoragePolicy
body := base.Json{
"uri": dstDir.GetPath() + "/" + file.GetName(),
"size": file.GetSize(),
"policy_id": p.ID,
"last_modified": file.ModTime().UnixMilli(),
"mime_type": "",
}
if d.EnableVersionUpload {
body["entity_type"] = "version"
}
err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) {
req.SetBody(body)
}, &u)
if err != nil {
return err
}
if u.StoragePolicy.Relay {
err = d.upLocal(ctx, file, u, up)
} else {
switch u.StoragePolicy.Type {
case "local":
err = d.upLocal(ctx, file, u, up)
case "remote":
err = d.upRemote(ctx, file, u, up)
case "onedrive":
err = d.upOneDrive(ctx, file, u, up)
case "s3":
err = d.upS3(ctx, file, u, up)
default:
return errs.NotImplement
}
}
if err != nil {
// 删除失败的会话
_ = d.request(http.MethodDelete, "/file/upload", func(req *resty.Request) {
req.SetBody(base.Json{
"id": u.SessionID,
"uri": u.URI,
})
}, nil)
return err
}
return nil
}
func (d *CloudreveV4) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *CloudreveV4) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *CloudreveV4) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// return errs.NotImplement to use an internal archive tool
return nil, errs.NotImplement
}
//func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*CloudreveV4)(nil)

View File

@@ -0,0 +1,44 @@
package cloudreve_v4
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
// Usually one of two
driver.RootPath
// driver.RootID
// define other
Address string `json:"address" required:"true"`
Username string `json:"username"`
Password string `json:"password"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
CustomUA string `json:"custom_ua"`
EnableFolderSize bool `json:"enable_folder_size"`
EnableThumb bool `json:"enable_thumb"`
EnableVersionUpload bool `json:"enable_version_upload"`
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at" default:"name" required:"true"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc" required:"true"`
}
var config = driver.Config{
Name: "Cloudreve V4",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "cloudreve://my",
CheckStatus: true,
Alert: "",
NoOverwriteUpload: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &CloudreveV4{}
})
}

View File

@@ -0,0 +1,164 @@
package cloudreve_v4
import (
"time"
"github.com/alist-org/alist/v3/internal/model"
)
type Object struct {
model.Object
StoragePolicy StoragePolicy
}
type Resp struct {
Code int `json:"code"`
Msg string `json:"msg"`
Data any `json:"data"`
}
type BasicConfigResp struct {
InstanceID string `json:"instance_id"`
// Title string `json:"title"`
// Themes string `json:"themes"`
// DefaultTheme string `json:"default_theme"`
User struct {
ID string `json:"id"`
// Nickname string `json:"nickname"`
// CreatedAt time.Time `json:"created_at"`
// Anonymous bool `json:"anonymous"`
Group struct {
ID string `json:"id"`
Name string `json:"name"`
Permission string `json:"permission"`
} `json:"group"`
} `json:"user"`
// Logo string `json:"logo"`
// LogoLight string `json:"logo_light"`
// CaptchaReCaptchaKey string `json:"captcha_ReCaptchaKey"`
CaptchaType string `json:"captcha_type"` // support 'normal' only
// AppPromotion bool `json:"app_promotion"`
}
type SiteLoginConfigResp struct {
LoginCaptcha bool `json:"login_captcha"`
Authn bool `json:"authn"`
}
type PrepareLoginResp struct {
WebauthnEnabled bool `json:"webauthn_enabled"`
PasswordEnabled bool `json:"password_enabled"`
}
type CaptchaResp struct {
Image string `json:"image"`
Ticket string `json:"ticket"`
}
type Token struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
AccessExpires time.Time `json:"access_expires"`
RefreshExpires time.Time `json:"refresh_expires"`
}
type TokenResponse struct {
User struct {
ID string `json:"id"`
// Email string `json:"email"`
// Nickname string `json:"nickname"`
Status string `json:"status"`
// CreatedAt time.Time `json:"created_at"`
Group struct {
ID string `json:"id"`
Name string `json:"name"`
Permission string `json:"permission"`
// DirectLinkBatchSize int `json:"direct_link_batch_size"`
// TrashRetention int `json:"trash_retention"`
} `json:"group"`
// Language string `json:"language"`
} `json:"user"`
Token Token `json:"token"`
}
type File struct {
Type int `json:"type"` // 0: file, 1: folder
ID string `json:"id"`
Name string `json:"name"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
Size int64 `json:"size"`
Metadata interface{} `json:"metadata"`
Path string `json:"path"`
Capability string `json:"capability"`
Owned bool `json:"owned"`
PrimaryEntity string `json:"primary_entity"`
}
type StoragePolicy struct {
ID string `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
MaxSize int64 `json:"max_size"`
Relay bool `json:"relay,omitempty"`
}
type Pagination struct {
Page int `json:"page"`
PageSize int `json:"page_size"`
IsCursor bool `json:"is_cursor"`
NextToken string `json:"next_token,omitempty"`
}
type Props struct {
Capability string `json:"capability"`
MaxPageSize int `json:"max_page_size"`
OrderByOptions []string `json:"order_by_options"`
OrderDirectionOptions []string `json:"order_direction_options"`
}
type FileResp struct {
Files []File `json:"files"`
Parent File `json:"parent"`
Pagination Pagination `json:"pagination"`
Props Props `json:"props"`
ContextHint string `json:"context_hint"`
MixedType bool `json:"mixed_type"`
StoragePolicy StoragePolicy `json:"storage_policy"`
}
type FileUrlResp struct {
Urls []struct {
URL string `json:"url"`
} `json:"urls"`
Expires time.Time `json:"expires"`
}
type FileUploadResp struct {
// UploadID string `json:"upload_id"`
SessionID string `json:"session_id"`
ChunkSize int64 `json:"chunk_size"`
Expires int64 `json:"expires"`
StoragePolicy StoragePolicy `json:"storage_policy"`
URI string `json:"uri"`
CompleteURL string `json:"completeURL,omitempty"` // for S3-like
CallbackSecret string `json:"callback_secret,omitempty"` // for S3-like, OneDrive
UploadUrls []string `json:"upload_urls,omitempty"` // for not-local
Credential string `json:"credential,omitempty"` // for local
}
type FileThumbResp struct {
URL string `json:"url"`
Expires time.Time `json:"expires"`
}
type FolderSummaryResp struct {
File
FolderSummary struct {
Size int64 `json:"size"`
Files int64 `json:"files"`
Folders int64 `json:"folders"`
Completed bool `json:"completed"`
CalculatedAt time.Time `json:"calculated_at"`
} `json:"folder_summary"`
}

View File

@@ -0,0 +1,476 @@
package cloudreve_v4
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
)
// do others that not defined in Driver interface
func (d *CloudreveV4) getUA() string {
if d.CustomUA != "" {
return d.CustomUA
}
return base.UserAgent
}
func (d *CloudreveV4) request(method string, path string, callback base.ReqCallback, out any) error {
if d.ref != nil {
return d.ref.request(method, path, callback, out)
}
u := d.Address + "/api/v4" + path
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
"Accept": "application/json, text/plain, */*",
"User-Agent": d.getUA(),
})
if d.AccessToken != "" {
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
}
var r Resp
req.SetResult(&r)
if callback != nil {
callback(req)
}
resp, err := req.Execute(method, u)
if err != nil {
return err
}
if !resp.IsSuccess() {
return errors.New(resp.String())
}
if r.Code != 0 {
if r.Code == 401 && d.RefreshToken != "" && path != "/session/token/refresh" {
// try to refresh token
err = d.refreshToken()
if err != nil {
return err
}
return d.request(method, path, callback, out)
}
return errors.New(r.Msg)
}
if out != nil && r.Data != nil {
var marshal []byte
marshal, err = json.Marshal(r.Data)
if err != nil {
return err
}
err = json.Unmarshal(marshal, out)
if err != nil {
return err
}
}
return nil
}
func (d *CloudreveV4) login() error {
var siteConfig SiteLoginConfigResp
err := d.request(http.MethodGet, "/site/config/login", nil, &siteConfig)
if err != nil {
return err
}
if !siteConfig.Authn {
return errors.New("authn not support")
}
var prepareLogin PrepareLoginResp
err = d.request(http.MethodGet, "/session/prepare?email="+d.Addition.Username, nil, &prepareLogin)
if err != nil {
return err
}
if !prepareLogin.PasswordEnabled {
return errors.New("password not enabled")
}
if prepareLogin.WebauthnEnabled {
return errors.New("webauthn not support")
}
for range 5 {
err = d.doLogin(siteConfig.LoginCaptcha)
if err == nil {
break
}
if err.Error() != "CAPTCHA not match." {
break
}
}
return err
}
func (d *CloudreveV4) doLogin(needCaptcha bool) error {
var err error
loginBody := base.Json{
"email": d.Username,
"password": d.Password,
}
if needCaptcha {
var config BasicConfigResp
err = d.request(http.MethodGet, "/site/config/basic", nil, &config)
if err != nil {
return err
}
if config.CaptchaType != "normal" {
return fmt.Errorf("captcha type %s not support", config.CaptchaType)
}
var captcha CaptchaResp
err = d.request(http.MethodGet, "/site/captcha", nil, &captcha)
if err != nil {
return err
}
if !strings.HasPrefix(captcha.Image, "data:image/png;base64,") {
return errors.New("can not get captcha")
}
loginBody["ticket"] = captcha.Ticket
i := strings.Index(captcha.Image, ",")
dec := base64.NewDecoder(base64.StdEncoding, strings.NewReader(captcha.Image[i+1:]))
vRes, err := base.RestyClient.R().SetMultipartField(
"image", "validateCode.png", "image/png", dec).
Post(setting.GetStr(conf.OcrApi))
if err != nil {
return err
}
if jsoniter.Get(vRes.Body(), "status").ToInt() != 200 {
return errors.New("ocr error:" + jsoniter.Get(vRes.Body(), "msg").ToString())
}
captchaCode := jsoniter.Get(vRes.Body(), "result").ToString()
if captchaCode == "" {
return errors.New("ocr error: empty result")
}
loginBody["captcha"] = captchaCode
}
var token TokenResponse
err = d.request(http.MethodPost, "/session/token", func(req *resty.Request) {
req.SetBody(loginBody)
}, &token)
if err != nil {
return err
}
d.AccessToken, d.RefreshToken = token.Token.AccessToken, token.Token.RefreshToken
op.MustSaveDriverStorage(d)
return nil
}
func (d *CloudreveV4) refreshToken() error {
var token Token
if token.RefreshToken == "" {
if d.Username != "" {
err := d.login()
if err != nil {
return fmt.Errorf("cannot login to get refresh token, error: %s", err)
}
}
return nil
}
err := d.request(http.MethodPost, "/session/token/refresh", func(req *resty.Request) {
req.SetBody(base.Json{
"refresh_token": d.RefreshToken,
})
}, &token)
if err != nil {
return err
}
d.AccessToken, d.RefreshToken = token.AccessToken, token.RefreshToken
op.MustSaveDriverStorage(d)
return nil
}
func (d *CloudreveV4) upLocal(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
if DEFAULT == 0 {
// support relay
DEFAULT = file.GetSize()
}
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-Local] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
req.SetHeader("Content-Type", "application/octet-stream")
req.SetContentLength(true)
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
req.AddRetryCondition(func(r *resty.Response, err error) bool {
if err != nil {
return true
}
if r.IsError() {
return true
}
var retryResp Resp
jErr := base.RestyClient.JSONUnmarshal(r.Body(), &retryResp)
if jErr != nil {
return true
}
if retryResp.Code != 0 {
return true
}
return false
})
}, nil)
if err != nil {
return err
}
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
}
return nil
}
func (d *CloudreveV4) upRemote(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
uploadUrl := u.UploadUrls[0]
credential := u.Credential
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-Remote] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
err = func() error {
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return errors.New(res.Status)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return err
}
var up Resp
err = json.Unmarshal(body, &up)
if err != nil {
return err
}
if up.Code != 0 {
return errors.New(up.Msg)
}
return nil
}()
if err == nil {
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
} else {
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors, error: %s", maxRetries, err)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Cloudreve-Remote] server errors while uploading, retrying after %v...", backoff)
time.Sleep(backoff)
}
}
return nil
}
func (d *CloudreveV4) upOneDrive(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
uploadUrl := u.UploadUrls[0]
var finish int64 = 0
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-OneDrive] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, file.GetSize()))
req.Header.Set("User-Agent", d.getUA())
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[CloudreveV4-OneDrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
}
}
// 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/onedrive/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
req.SetBody("{}")
}, nil)
}
func (d *CloudreveV4) upS3(ctx context.Context, file model.FileStreamer, u FileUploadResp, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
var etags []string
DEFAULT := int64(u.ChunkSize)
retryCount := 0
maxRetries := 3
for finish < file.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
left := file.GetSize() - finish
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[CloudreveV4-S3] upload range: %d-%d/%d", finish, finish+byteSize-1, file.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest(http.MethodPut, u.UploadUrls[chunk],
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.ContentLength = byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
etag := res.Header.Get("ETag")
res.Body.Close()
switch {
case res.StatusCode != 200:
retryCount++
if retryCount > maxRetries {
return fmt.Errorf("upload failed after %d retries due to server errors", maxRetries)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("server error %d, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case etag == "":
return errors.New("faild to get ETag from header")
default:
retryCount = 0
etags = append(etags, etag)
finish += byteSize
up(float64(finish) * 100 / float64(file.GetSize()))
chunk++
}
}
// s3LikeFinishUpload
bodyBuilder := &strings.Builder{}
bodyBuilder.WriteString("<CompleteMultipartUpload>")
for i, etag := range etags {
bodyBuilder.WriteString(fmt.Sprintf(
`<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>`,
i+1, // PartNumber 从 1 开始
etag,
))
}
bodyBuilder.WriteString("</CompleteMultipartUpload>")
req, err := http.NewRequest(
"POST",
u.CompleteURL,
strings.NewReader(bodyBuilder.String()),
)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/xml")
req.Header.Set("User-Agent", d.getUA())
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
body, _ := io.ReadAll(res.Body)
return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body))
}
// 上传成功发送回调请求
return d.request(http.MethodPost, "/callback/s3/"+u.SessionID+"/"+u.CallbackSecret, func(req *resty.Request) {
req.SetBody("{}")
}, nil)
}

View File

@@ -3,12 +3,16 @@ package doubao
import (
"context"
"errors"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
)
@@ -16,6 +20,9 @@ import (
type Doubao struct {
model.Storage
Addition
*UploadToken
UserId string
uploadThread int
}
func (d *Doubao) Config() driver.Config {
@@ -29,6 +36,31 @@ func (d *Doubao) GetAddition() driver.Additional {
func (d *Doubao) Init(ctx context.Context) error {
// TODO login / refresh token
//op.MustSaveDriverStorage(d)
uploadThread, err := strconv.Atoi(d.UploadThread)
if err != nil || uploadThread < 1 {
d.uploadThread, d.UploadThread = 3, "3" // Set default value
} else {
d.uploadThread = uploadThread
}
if d.UserId == "" {
userInfo, err := d.getUserInfo()
if err != nil {
return err
}
d.UserId = strconv.FormatInt(userInfo.UserID, 10)
}
if d.UploadToken == nil {
uploadToken, err := d.initUploadToken()
if err != nil {
return err
}
d.UploadToken = uploadToken
}
return nil
}
@@ -38,56 +70,98 @@ func (d *Doubao) Drop(ctx context.Context) error {
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var files []model.Obj
var r NodeInfoResp
_, err := d.request("/samantha/aispace/node_info", "POST", func(req *resty.Request) {
req.SetBody(base.Json{
"node_id": dir.GetID(),
"need_full_path": false,
})
}, &r)
fileList, err := d.getFiles(dir.GetID(), "")
if err != nil {
return nil, err
}
for _, child := range r.Data.Children {
for _, child := range fileList {
files = append(files, &Object{
Object: model.Object{
ID: child.ID,
Path: child.ParentID,
Name: child.Name,
Size: int64(child.Size),
Modified: time.Unix(int64(child.UpdateTime), 0),
Ctime: time.Unix(int64(child.CreateTime), 0),
Size: child.Size,
Modified: time.Unix(child.UpdateTime, 0),
Ctime: time.Unix(child.CreateTime, 0),
IsFolder: child.NodeType == 1,
},
Key: child.Key,
Key: child.Key,
NodeType: child.NodeType,
})
}
return files, nil
}
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
var downloadUrl string
if u, ok := file.(*Object); ok {
var r GetFileUrlResp
_, err := d.request("/alice/message/get_file_url", "POST", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{u.Key},
"type": "file",
})
}, &r)
if err != nil {
return nil, err
switch d.DownloadApi {
case "get_download_info":
var r GetDownloadInfoResp
_, err := d.request("/samantha/aispace/get_download_info", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"requests": []base.Json{{"node_id": file.GetID()}},
})
}, &r)
if err != nil {
return nil, err
}
downloadUrl = r.Data.DownloadInfos[0].MainURL
case "get_file_url":
switch u.NodeType {
case VideoType, AudioType:
var r GetVideoFileUrlResp
_, err := d.request("/samantha/media/get_play_info", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"key": u.Key,
"node_id": file.GetID(),
})
}, &r)
if err != nil {
return nil, err
}
downloadUrl = r.Data.OriginalMediaInfo.MainURL
default:
var r GetFileUrlResp
_, err := d.request("/alice/message/get_file_url", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{u.Key},
"type": FileNodeType[u.NodeType],
})
}, &r)
if err != nil {
return nil, err
}
downloadUrl = r.Data.FileUrls[0].MainURL
}
default:
return nil, errs.NotImplement
}
// 生成标准的Content-Disposition
contentDisposition := generateContentDisposition(u.Name)
return &model.Link{
URL: r.Data.FileUrls[0].MainURL,
URL: downloadUrl,
Header: http.Header{
"User-Agent": []string{UserAgent},
"Content-Disposition": []string{contentDisposition},
},
}, nil
}
return nil, errors.New("can't convert obj to URL")
}
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
var r UploadNodeResp
_, err := d.request("/samantha/aispace/upload_node", "POST", func(req *resty.Request) {
_, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"node_list": []base.Json{
{
@@ -104,7 +178,7 @@ func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
var r UploadNodeResp
_, err := d.request("/samantha/aispace/move_node", "POST", func(req *resty.Request) {
_, err := d.request("/samantha/aispace/move_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"node_list": []base.Json{
{"id": srcObj.GetID()},
@@ -118,7 +192,7 @@ func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
var r BaseResp
_, err := d.request("/samantha/aispace/rename_node", "POST", func(req *resty.Request) {
_, err := d.request("/samantha/aispace/rename_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"node_id": srcObj.GetID(),
"node_name": newName,
@@ -134,15 +208,38 @@ func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
var r BaseResp
_, err := d.request("/samantha/aispace/delete_node", "POST", func(req *resty.Request) {
_, err := d.request("/samantha/aispace/delete_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
}, &r)
return err
}
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// TODO upload file, optional
return nil, errs.NotImplement
// 根据MIME类型确定数据类型
mimetype := file.GetMimetype()
dataType := FileDataType
switch {
case strings.HasPrefix(mimetype, "video/"):
dataType = VideoDataType
case strings.HasPrefix(mimetype, "audio/"):
dataType = VideoDataType // 音频与视频使用相同的处理方式
case strings.HasPrefix(mimetype, "image/"):
dataType = ImgDataType
}
// 获取上传配置
uploadConfig := UploadConfig{}
if err := d.getUploadConfig(&uploadConfig, dataType, file); err != nil {
return nil, err
}
// 根据文件大小选择上传方式
if file.GetSize() <= 1*utils.MB { // 小于1MB使用普通模式上传
return d.Upload(&uploadConfig, dstDir, file, up, dataType)
}
// 大文件使用分片上传
return d.UploadByMultipart(ctx, &uploadConfig, file.GetSize(), dstDir, file, up, dataType)
}
func (d *Doubao) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {

View File

@@ -10,7 +10,9 @@ type Addition struct {
// driver.RootPath
driver.RootID
// define other
Cookie string `json:"cookie" type:"text"`
Cookie string `json:"cookie" type:"text"`
UploadThread string `json:"upload_thread" default:"3"`
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
}
var config = driver.Config{
@@ -19,7 +21,7 @@ var config = driver.Config{
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,

View File

@@ -1,6 +1,12 @@
package doubao
import "github.com/alist-org/alist/v3/internal/model"
import (
"encoding/json"
"fmt"
"time"
"github.com/alist-org/alist/v3/internal/model"
)
type BaseResp struct {
Code int `json:"code"`
@@ -10,27 +16,38 @@ type BaseResp struct {
type NodeInfoResp struct {
BaseResp
Data struct {
NodeInfo NodeInfo `json:"node_info"`
Children []NodeInfo `json:"children"`
NextCursor string `json:"next_cursor"`
HasMore bool `json:"has_more"`
NodeInfo File `json:"node_info"`
Children []File `json:"children"`
NextCursor string `json:"next_cursor"`
HasMore bool `json:"has_more"`
} `json:"data"`
}
type NodeInfo struct {
type File struct {
ID string `json:"id"`
Name string `json:"name"`
Key string `json:"key"`
NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹
Size int `json:"size"`
Size int64 `json:"size"`
Source int `json:"source"`
NameReviewStatus int `json:"name_review_status"`
ContentReviewStatus int `json:"content_review_status"`
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
CreateTime int64 `json:"create_time"`
UpdateTime int64 `json:"update_time"`
}
type GetDownloadInfoResp struct {
BaseResp
Data struct {
DownloadInfos []struct {
NodeID string `json:"node_id"`
MainURL string `json:"main_url"`
BackupURL string `json:"backup_url"`
} `json:"download_infos"`
} `json:"data"`
}
type GetFileUrlResp struct {
@@ -44,6 +61,39 @@ type GetFileUrlResp struct {
} `json:"data"`
}
type GetVideoFileUrlResp struct {
BaseResp
Data struct {
MediaType string `json:"media_type"`
MediaInfo []struct {
Meta struct {
Height string `json:"height"`
Width string `json:"width"`
Format string `json:"format"`
Duration float64 `json:"duration"`
CodecType string `json:"codec_type"`
Definition string `json:"definition"`
} `json:"meta"`
MainURL string `json:"main_url"`
BackupURL string `json:"backup_url"`
} `json:"media_info"`
OriginalMediaInfo struct {
Meta struct {
Height string `json:"height"`
Width string `json:"width"`
Format string `json:"format"`
Duration float64 `json:"duration"`
CodecType string `json:"codec_type"`
Definition string `json:"definition"`
} `json:"meta"`
MainURL string `json:"main_url"`
BackupURL string `json:"backup_url"`
} `json:"original_media_info"`
PosterURL string `json:"poster_url"`
PlayableStatus int `json:"playable_status"`
} `json:"data"`
}
type UploadNodeResp struct {
BaseResp
Data struct {
@@ -60,5 +110,306 @@ type UploadNodeResp struct {
type Object struct {
model.Object
Key string
Key string
NodeType int
}
type UserInfoResp struct {
Data UserInfo `json:"data"`
Message string `json:"message"`
}
type AppUserInfo struct {
BuiAuditInfo string `json:"bui_audit_info"`
}
type AuditInfo struct {
}
type Details struct {
}
type BuiAuditInfo struct {
AuditInfo AuditInfo `json:"audit_info"`
IsAuditing bool `json:"is_auditing"`
AuditStatus int `json:"audit_status"`
LastUpdateTime int `json:"last_update_time"`
UnpassReason string `json:"unpass_reason"`
Details Details `json:"details"`
}
type Connects struct {
Platform string `json:"platform"`
ProfileImageURL string `json:"profile_image_url"`
ExpiredTime int `json:"expired_time"`
ExpiresIn int `json:"expires_in"`
PlatformScreenName string `json:"platform_screen_name"`
UserID int64 `json:"user_id"`
PlatformUID string `json:"platform_uid"`
SecPlatformUID string `json:"sec_platform_uid"`
PlatformAppID int `json:"platform_app_id"`
ModifyTime int `json:"modify_time"`
AccessToken string `json:"access_token"`
OpenID string `json:"open_id"`
}
type OperStaffRelationInfo struct {
HasPassword int `json:"has_password"`
Mobile string `json:"mobile"`
SecOperStaffUserID string `json:"sec_oper_staff_user_id"`
RelationMobileCountryCode int `json:"relation_mobile_country_code"`
}
type UserInfo struct {
AppID int `json:"app_id"`
AppUserInfo AppUserInfo `json:"app_user_info"`
AvatarURL string `json:"avatar_url"`
BgImgURL string `json:"bg_img_url"`
BuiAuditInfo BuiAuditInfo `json:"bui_audit_info"`
CanBeFoundByPhone int `json:"can_be_found_by_phone"`
Connects []Connects `json:"connects"`
CountryCode int `json:"country_code"`
Description string `json:"description"`
DeviceID int `json:"device_id"`
Email string `json:"email"`
EmailCollected bool `json:"email_collected"`
Gender int `json:"gender"`
HasPassword int `json:"has_password"`
HmRegion int `json:"hm_region"`
IsBlocked int `json:"is_blocked"`
IsBlocking int `json:"is_blocking"`
IsRecommendAllowed int `json:"is_recommend_allowed"`
IsVisitorAccount bool `json:"is_visitor_account"`
Mobile string `json:"mobile"`
Name string `json:"name"`
NeedCheckBindStatus bool `json:"need_check_bind_status"`
OdinUserType int `json:"odin_user_type"`
OperStaffRelationInfo OperStaffRelationInfo `json:"oper_staff_relation_info"`
PhoneCollected bool `json:"phone_collected"`
RecommendHintMessage string `json:"recommend_hint_message"`
ScreenName string `json:"screen_name"`
SecUserID string `json:"sec_user_id"`
SessionKey string `json:"session_key"`
UseHmRegion bool `json:"use_hm_region"`
UserCreateTime int `json:"user_create_time"`
UserID int64 `json:"user_id"`
UserIDStr string `json:"user_id_str"`
UserVerified bool `json:"user_verified"`
VerifiedContent string `json:"verified_content"`
}
// UploadToken 上传令牌配置
type UploadToken struct {
Alice map[string]UploadAuthToken
Samantha MediaUploadAuthToken
}
// UploadAuthToken 多种类型的上传配置:图片/文件
type UploadAuthToken struct {
ServiceID string `json:"service_id"`
UploadPathPrefix string `json:"upload_path_prefix"`
Auth struct {
AccessKeyID string `json:"access_key_id"`
SecretAccessKey string `json:"secret_access_key"`
SessionToken string `json:"session_token"`
ExpiredTime time.Time `json:"expired_time"`
CurrentTime time.Time `json:"current_time"`
} `json:"auth"`
UploadHost string `json:"upload_host"`
}
// MediaUploadAuthToken 媒体上传配置
type MediaUploadAuthToken struct {
StsToken struct {
AccessKeyID string `json:"access_key_id"`
SecretAccessKey string `json:"secret_access_key"`
SessionToken string `json:"session_token"`
ExpiredTime time.Time `json:"expired_time"`
CurrentTime time.Time `json:"current_time"`
} `json:"sts_token"`
UploadInfo struct {
VideoHost string `json:"video_host"`
SpaceName string `json:"space_name"`
} `json:"upload_info"`
}
type UploadAuthTokenResp struct {
BaseResp
Data UploadAuthToken `json:"data"`
}
type MediaUploadAuthTokenResp struct {
BaseResp
Data MediaUploadAuthToken `json:"data"`
}
type ResponseMetadata struct {
RequestID string `json:"RequestId"`
Action string `json:"Action"`
Version string `json:"Version"`
Service string `json:"Service"`
Region string `json:"Region"`
Error struct {
CodeN int `json:"CodeN,omitempty"`
Code string `json:"Code,omitempty"`
Message string `json:"Message,omitempty"`
} `json:"Error,omitempty"`
}
type UploadConfig struct {
UploadAddress UploadAddress `json:"UploadAddress"`
FallbackUploadAddress FallbackUploadAddress `json:"FallbackUploadAddress"`
InnerUploadAddress InnerUploadAddress `json:"InnerUploadAddress"`
RequestID string `json:"RequestId"`
SDKParam interface{} `json:"SDKParam"`
}
type UploadConfigResp struct {
ResponseMetadata `json:"ResponseMetadata"`
Result UploadConfig `json:"Result"`
}
// StoreInfo 存储信息
type StoreInfo struct {
StoreURI string `json:"StoreUri"`
Auth string `json:"Auth"`
UploadID string `json:"UploadID"`
UploadHeader map[string]interface{} `json:"UploadHeader,omitempty"`
StorageHeader map[string]interface{} `json:"StorageHeader,omitempty"`
}
// UploadAddress 上传地址信息
type UploadAddress struct {
StoreInfos []StoreInfo `json:"StoreInfos"`
UploadHosts []string `json:"UploadHosts"`
UploadHeader map[string]interface{} `json:"UploadHeader"`
SessionKey string `json:"SessionKey"`
Cloud string `json:"Cloud"`
}
// FallbackUploadAddress 备用上传地址
type FallbackUploadAddress struct {
StoreInfos []StoreInfo `json:"StoreInfos"`
UploadHosts []string `json:"UploadHosts"`
UploadHeader map[string]interface{} `json:"UploadHeader"`
SessionKey string `json:"SessionKey"`
Cloud string `json:"Cloud"`
}
// UploadNode 上传节点信息
type UploadNode struct {
Vid string `json:"Vid"`
Vids []string `json:"Vids"`
StoreInfos []StoreInfo `json:"StoreInfos"`
UploadHost string `json:"UploadHost"`
UploadHeader map[string]interface{} `json:"UploadHeader"`
Type string `json:"Type"`
Protocol string `json:"Protocol"`
SessionKey string `json:"SessionKey"`
NodeConfig struct {
UploadMode string `json:"UploadMode"`
} `json:"NodeConfig"`
Cluster string `json:"Cluster"`
}
// AdvanceOption 高级选项
type AdvanceOption struct {
Parallel int `json:"Parallel"`
Stream int `json:"Stream"`
SliceSize int `json:"SliceSize"`
EncryptionKey string `json:"EncryptionKey"`
}
// InnerUploadAddress 内部上传地址
type InnerUploadAddress struct {
UploadNodes []UploadNode `json:"UploadNodes"`
AdvanceOption AdvanceOption `json:"AdvanceOption"`
}
// UploadPart 上传分片信息
type UploadPart struct {
UploadId string `json:"uploadid,omitempty"`
PartNumber string `json:"part_number,omitempty"`
Crc32 string `json:"crc32,omitempty"`
Etag string `json:"etag,omitempty"`
Mode string `json:"mode,omitempty"`
}
// UploadResp 上传响应体
type UploadResp struct {
Code int `json:"code"`
ApiVersion string `json:"apiversion"`
Message string `json:"message"`
Data UploadPart `json:"data"`
}
type VideoCommitUpload struct {
Vid string `json:"Vid"`
VideoMeta struct {
URI string `json:"Uri"`
Height int `json:"Height"`
Width int `json:"Width"`
OriginHeight int `json:"OriginHeight"`
OriginWidth int `json:"OriginWidth"`
Duration float64 `json:"Duration"`
Bitrate int `json:"Bitrate"`
Md5 string `json:"Md5"`
Format string `json:"Format"`
Size int `json:"Size"`
FileType string `json:"FileType"`
Codec string `json:"Codec"`
} `json:"VideoMeta"`
WorkflowInput struct {
TemplateID string `json:"TemplateId"`
} `json:"WorkflowInput"`
GetPosterMode string `json:"GetPosterMode"`
}
type VideoCommitUploadResp struct {
ResponseMetadata ResponseMetadata `json:"ResponseMetadata"`
Result struct {
RequestID string `json:"RequestId"`
Results []VideoCommitUpload `json:"Results"`
} `json:"Result"`
}
type CommonResp struct {
Code int `json:"code"`
Msg string `json:"msg,omitempty"`
Message string `json:"message,omitempty"` // 错误情况下的消息
Data json.RawMessage `json:"data,omitempty"` // 原始数据,稍后解析
Error *struct {
Code int `json:"code"`
Message string `json:"message"`
Locale string `json:"locale"`
} `json:"error,omitempty"`
}
// IsSuccess 判断响应是否成功
func (r *CommonResp) IsSuccess() bool {
return r.Code == 0
}
// GetError 获取错误信息
func (r *CommonResp) GetError() error {
if r.IsSuccess() {
return nil
}
// 优先使用message字段
errMsg := r.Message
if errMsg == "" {
errMsg = r.Msg
}
// 如果error对象存在且有详细消息,则使用error中的信息
if r.Error != nil && r.Error.Message != "" {
errMsg = r.Error.Message
}
return fmt.Errorf("[doubao] API error (code: %d): %s", r.Code, errMsg)
}
// UnmarshalData 将data字段解析为指定类型
func (r *CommonResp) UnmarshalData(v interface{}) error {
if !r.IsSuccess() {
return r.GetError()
}
if len(r.Data) == 0 {
return nil
}
return json.Unmarshal(r.Data, v)
}

View File

@@ -1,38 +1,970 @@
package doubao
import (
"context"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/errgroup"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
"hash/crc32"
"io"
"math"
"math/rand"
"net/http"
"net/url"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
)
const (
DirectoryType = 1
FileType = 2
LinkType = 3
ImageType = 4
PagesType = 5
VideoType = 6
AudioType = 7
MeetingMinutesType = 8
)
var FileNodeType = map[int]string{
1: "directory",
2: "file",
3: "link",
4: "image",
5: "pages",
6: "video",
7: "audio",
8: "meeting_minutes",
}
const (
BaseURL = "https://www.doubao.com"
FileDataType = "file"
ImgDataType = "image"
VideoDataType = "video"
DefaultChunkSize = int64(5 * 1024 * 1024) // 5MB
MaxRetryAttempts = 3 // 最大重试次数
UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
Region = "cn-north-1"
UploadTimeout = 3 * time.Minute
)
// do others that not defined in Driver interface
func (d *Doubao) request(path string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
url := "https://www.doubao.com" + path
reqUrl := BaseURL + path
req := base.RestyClient.R()
req.SetHeader("Cookie", d.Cookie)
if callback != nil {
callback(req)
}
var r BaseResp
req.SetResult(&r)
res, err := req.Execute(method, url)
var commonResp CommonResp
res, err := req.Execute(method, reqUrl)
log.Debugln(res.String())
if err != nil {
return nil, err
}
// 业务状态码检查优先于HTTP状态码
if r.Code != 0 {
return res.Body(), errors.New(r.Msg)
body := res.Body()
// 先解析为通用响应
if err = json.Unmarshal(body, &commonResp); err != nil {
return nil, err
}
// 检查响应是否成功
if !commonResp.IsSuccess() {
return body, commonResp.GetError()
}
if resp != nil {
err = utils.Json.Unmarshal(res.Body(), resp)
if err = json.Unmarshal(body, resp); err != nil {
return body, err
}
}
return body, nil
}
func (d *Doubao) getFiles(dirId, cursor string) (resp []File, err error) {
var r NodeInfoResp
var body = base.Json{
"node_id": dirId,
}
// 如果有游标,则设置游标和大小
if cursor != "" {
body["cursor"] = cursor
body["size"] = 50
} else {
body["need_full_path"] = false
}
_, err = d.request("/samantha/aispace/node_info", http.MethodPost, func(req *resty.Request) {
req.SetBody(body)
}, &r)
if err != nil {
return nil, err
}
if r.Data.Children != nil {
resp = r.Data.Children
}
if r.Data.NextCursor != "-1" {
// 递归获取下一页
nextFiles, err := d.getFiles(dirId, r.Data.NextCursor)
if err != nil {
return nil, err
}
resp = append(r.Data.Children, nextFiles...)
}
return resp, err
}
func (d *Doubao) getUserInfo() (UserInfo, error) {
var r UserInfoResp
_, err := d.request("/passport/account/info/v2/", http.MethodGet, nil, &r)
if err != nil {
return UserInfo{}, err
}
return r.Data, err
}
// 签名请求
func (d *Doubao) signRequest(req *resty.Request, method, tokenType, uploadUrl string) error {
parsedUrl, err := url.Parse(uploadUrl)
if err != nil {
return fmt.Errorf("invalid URL format: %w", err)
}
var accessKeyId, secretAccessKey, sessionToken string
var serviceName string
if tokenType == VideoDataType {
accessKeyId = d.UploadToken.Samantha.StsToken.AccessKeyID
secretAccessKey = d.UploadToken.Samantha.StsToken.SecretAccessKey
sessionToken = d.UploadToken.Samantha.StsToken.SessionToken
serviceName = "vod"
} else {
accessKeyId = d.UploadToken.Alice[tokenType].Auth.AccessKeyID
secretAccessKey = d.UploadToken.Alice[tokenType].Auth.SecretAccessKey
sessionToken = d.UploadToken.Alice[tokenType].Auth.SessionToken
serviceName = "imagex"
}
// 当前时间,格式为 ISO8601
now := time.Now().UTC()
amzDate := now.Format("20060102T150405Z")
dateStamp := now.Format("20060102")
req.SetHeader("X-Amz-Date", amzDate)
if sessionToken != "" {
req.SetHeader("X-Amz-Security-Token", sessionToken)
}
// 计算请求体的SHA256哈希
var bodyHash string
if req.Body != nil {
bodyBytes, ok := req.Body.([]byte)
if !ok {
return fmt.Errorf("request body must be []byte")
}
bodyHash = hashSHA256(string(bodyBytes))
req.SetHeader("X-Amz-Content-Sha256", bodyHash)
} else {
bodyHash = hashSHA256("")
}
// 创建规范请求
canonicalURI := parsedUrl.Path
if canonicalURI == "" {
canonicalURI = "/"
}
// 查询参数按照字母顺序排序
canonicalQueryString := getCanonicalQueryString(req.QueryParam)
// 规范请求头
canonicalHeaders, signedHeaders := getCanonicalHeadersFromMap(req.Header)
canonicalRequest := method + "\n" +
canonicalURI + "\n" +
canonicalQueryString + "\n" +
canonicalHeaders + "\n" +
signedHeaders + "\n" +
bodyHash
algorithm := "AWS4-HMAC-SHA256"
credentialScope := fmt.Sprintf("%s/%s/%s/aws4_request", dateStamp, Region, serviceName)
stringToSign := algorithm + "\n" +
amzDate + "\n" +
credentialScope + "\n" +
hashSHA256(canonicalRequest)
// 计算签名密钥
signingKey := getSigningKey(secretAccessKey, dateStamp, Region, serviceName)
// 计算签名
signature := hmacSHA256Hex(signingKey, stringToSign)
// 构建授权头
authorizationHeader := fmt.Sprintf(
"%s Credential=%s/%s, SignedHeaders=%s, Signature=%s",
algorithm,
accessKeyId,
credentialScope,
signedHeaders,
signature,
)
req.SetHeader("Authorization", authorizationHeader)
return nil
}
func (d *Doubao) requestApi(url, method, tokenType string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
"user-agent": UserAgent,
})
if method == http.MethodPost {
req.SetHeader("Content-Type", "text/plain;charset=UTF-8")
}
if callback != nil {
callback(req)
}
if resp != nil {
req.SetResult(resp)
}
// 使用自定义AWS SigV4签名
err := d.signRequest(req, method, tokenType, url)
if err != nil {
return nil, err
}
res, err := req.Execute(method, url)
if err != nil {
return nil, err
}
return res.Body(), nil
}
func (d *Doubao) initUploadToken() (*UploadToken, error) {
uploadToken := &UploadToken{
Alice: make(map[string]UploadAuthToken),
Samantha: MediaUploadAuthToken{},
}
fileAuthToken, err := d.getUploadAuthToken(FileDataType)
if err != nil {
return nil, err
}
imgAuthToken, err := d.getUploadAuthToken(ImgDataType)
if err != nil {
return nil, err
}
mediaAuthToken, err := d.getSamantaUploadAuthToken()
if err != nil {
return nil, err
}
uploadToken.Alice[FileDataType] = fileAuthToken
uploadToken.Alice[ImgDataType] = imgAuthToken
uploadToken.Samantha = mediaAuthToken
return uploadToken, nil
}
func (d *Doubao) getUploadAuthToken(dataType string) (ut UploadAuthToken, err error) {
var r UploadAuthTokenResp
_, err = d.request("/alice/upload/auth_token", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"scene": "bot_chat",
"data_type": dataType,
})
}, &r)
return r.Data, err
}
func (d *Doubao) getSamantaUploadAuthToken() (mt MediaUploadAuthToken, err error) {
var r MediaUploadAuthTokenResp
_, err = d.request("/samantha/media/get_upload_token", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{})
}, &r)
return r.Data, err
}
// getUploadConfig 获取上传配置信息
func (d *Doubao) getUploadConfig(upConfig *UploadConfig, dataType string, file model.FileStreamer) error {
tokenType := dataType
// 配置参数函数
configureParams := func() (string, map[string]string) {
var uploadUrl string
var params map[string]string
// 根据数据类型设置不同的上传参数
switch dataType {
case VideoDataType:
// 音频/视频类型 - 使用uploadToken.Samantha的配置
uploadUrl = d.UploadToken.Samantha.UploadInfo.VideoHost
params = map[string]string{
"Action": "ApplyUploadInner",
"Version": "2020-11-19",
"SpaceName": d.UploadToken.Samantha.UploadInfo.SpaceName,
"FileType": "video",
"IsInner": "1",
"NeedFallback": "true",
"FileSize": strconv.FormatInt(file.GetSize(), 10),
"s": randomString(),
}
case ImgDataType, FileDataType:
// 图片或其他文件类型 - 使用uploadToken.Alice对应配置
uploadUrl = "https://" + d.UploadToken.Alice[dataType].UploadHost
params = map[string]string{
"Action": "ApplyImageUpload",
"Version": "2018-08-01",
"ServiceId": d.UploadToken.Alice[dataType].ServiceID,
"NeedFallback": "true",
"FileSize": strconv.FormatInt(file.GetSize(), 10),
"FileExtension": filepath.Ext(file.GetName()),
"s": randomString(),
}
}
return uploadUrl, params
}
// 获取初始参数
uploadUrl, params := configureParams()
tokenRefreshed := false
var configResp UploadConfigResp
err := d._retryOperation("get upload_config", func() error {
configResp = UploadConfigResp{}
_, err := d.requestApi(uploadUrl, http.MethodGet, tokenType, func(req *resty.Request) {
req.SetQueryParams(params)
}, &configResp)
if err != nil {
return err
}
if configResp.ResponseMetadata.Error.Code == "" {
*upConfig = configResp.Result
return nil
}
// 100028 凭证过期
if configResp.ResponseMetadata.Error.CodeN == 100028 && !tokenRefreshed {
log.Debugln("[doubao] Upload token expired, re-fetching...")
newToken, err := d.initUploadToken()
if err != nil {
return fmt.Errorf("failed to refresh token: %w", err)
}
d.UploadToken = newToken
tokenRefreshed = true
uploadUrl, params = configureParams()
return retry.Error{errors.New("token refreshed, retry needed")}
}
return fmt.Errorf("get upload_config failed: %s", configResp.ResponseMetadata.Error.Message)
})
return err
}
// uploadNode 上传 文件信息
func (d *Doubao) uploadNode(uploadConfig *UploadConfig, dir model.Obj, file model.FileStreamer, dataType string) (UploadNodeResp, error) {
reqUuid := uuid.New().String()
var key string
var nodeType int
mimetype := file.GetMimetype()
switch dataType {
case VideoDataType:
key = uploadConfig.InnerUploadAddress.UploadNodes[0].Vid
if strings.HasPrefix(mimetype, "audio/") {
nodeType = AudioType // 音频类型
} else {
nodeType = VideoType // 视频类型
}
case ImgDataType:
key = uploadConfig.InnerUploadAddress.UploadNodes[0].StoreInfos[0].StoreURI
nodeType = ImageType // 图片类型
default: // FileDataType
key = uploadConfig.InnerUploadAddress.UploadNodes[0].StoreInfos[0].StoreURI
nodeType = FileType // 文件类型
}
var r UploadNodeResp
_, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"node_list": []base.Json{
{
"local_id": reqUuid,
"parent_id": dir.GetID(),
"name": file.GetName(),
"key": key,
"node_content": base.Json{},
"node_type": nodeType,
"size": file.GetSize(),
},
},
"request_id": reqUuid,
})
}, &r)
return r, err
}
// Upload 普通上传实现
func (d *Doubao) Upload(config *UploadConfig, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
data, err := io.ReadAll(file)
if err != nil {
return nil, err
}
// 计算CRC32
crc32Hash := crc32.NewIEEE()
crc32Hash.Write(data)
crc32Value := hex.EncodeToString(crc32Hash.Sum(nil))
// 构建请求路径
uploadNode := config.InnerUploadAddress.UploadNodes[0]
storeInfo := uploadNode.StoreInfos[0]
uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI)
uploadResp := UploadResp{}
if _, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Type": "application/octet-stream",
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetBody(data)
}, &uploadResp); err != nil {
return nil, err
}
if uploadResp.Code != 2000 {
return nil, fmt.Errorf("upload failed: %s", uploadResp.Message)
}
uploadNodeResp, err := d.uploadNode(config, dstDir, file, dataType)
if err != nil {
return nil, err
}
return &model.Object{
ID: uploadNodeResp.Data.NodeList[0].ID,
Name: uploadNodeResp.Data.NodeList[0].Name,
Size: file.GetSize(),
IsFolder: false,
}, nil
}
// UploadByMultipart 分片上传
func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fileSize int64, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, dataType string) (model.Obj, error) {
// 构建请求路径
uploadNode := config.InnerUploadAddress.UploadNodes[0]
storeInfo := uploadNode.StoreInfos[0]
uploadUrl := fmt.Sprintf("https://%s/upload/v1/%s", uploadNode.UploadHost, storeInfo.StoreURI)
// 初始化分片上传
var uploadID string
err := d._retryOperation("Initialize multipart upload", func() error {
var err error
uploadID, err = d.initMultipartUpload(config, uploadUrl, storeInfo)
return err
})
if err != nil {
return nil, fmt.Errorf("failed to initialize multipart upload: %w", err)
}
// 准备分片参数
chunkSize := DefaultChunkSize
if config.InnerUploadAddress.AdvanceOption.SliceSize > 0 {
chunkSize = int64(config.InnerUploadAddress.AdvanceOption.SliceSize)
}
totalParts := (fileSize + chunkSize - 1) / chunkSize
// 创建分片信息组
parts := make([]UploadPart, totalParts)
// 缓存文件
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, fmt.Errorf("failed to cache file: %w", err)
}
defer tempFile.Close()
up(10.0) // 更新进度
// 设置并行上传
threadG, uploadCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
var partsMutex sync.Mutex
// 并行上传所有分片
for partIndex := int64(0); partIndex < totalParts; partIndex++ {
if utils.IsCanceled(uploadCtx) {
break
}
partIndex := partIndex
partNumber := partIndex + 1 // 分片编号从1开始
threadG.Go(func(ctx context.Context) error {
// 计算此分片的大小和偏移
offset := partIndex * chunkSize
size := chunkSize
if partIndex == totalParts-1 {
size = fileSize - offset
}
limitedReader := driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, size))
// 读取数据到内存
data, err := io.ReadAll(limitedReader)
if err != nil {
return fmt.Errorf("failed to read part %d: %w", partNumber, err)
}
// 计算CRC32
crc32Value := calculateCRC32(data)
// 使用_retryOperation上传分片
var uploadPart UploadPart
if err = d._retryOperation(fmt.Sprintf("Upload part %d", partNumber), func() error {
var err error
uploadPart, err = d.uploadPart(config, uploadUrl, uploadID, partNumber, data, crc32Value)
return err
}); err != nil {
return fmt.Errorf("part %d upload failed: %w", partNumber, err)
}
// 记录成功上传的分片
partsMutex.Lock()
parts[partIndex] = UploadPart{
PartNumber: strconv.FormatInt(partNumber, 10),
Etag: uploadPart.Etag,
Crc32: crc32Value,
}
partsMutex.Unlock()
// 更新进度
progress := 10.0 + 90.0*float64(threadG.Success()+1)/float64(totalParts)
up(math.Min(progress, 95.0))
return nil
})
}
if err = threadG.Wait(); err != nil {
return nil, err
}
// 完成上传-分片合并
if err = d._retryOperation("Complete multipart upload", func() error {
return d.completeMultipartUpload(config, uploadUrl, uploadID, parts)
}); err != nil {
return nil, fmt.Errorf("failed to complete multipart upload: %w", err)
}
// 提交上传
if err = d._retryOperation("Commit upload", func() error {
return d.commitMultipartUpload(config)
}); err != nil {
return nil, fmt.Errorf("failed to commit upload: %w", err)
}
up(98.0) // 更新到98%
// 上传节点信息
var uploadNodeResp UploadNodeResp
if err = d._retryOperation("Upload node", func() error {
var err error
uploadNodeResp, err = d.uploadNode(config, dstDir, file, dataType)
return err
}); err != nil {
return nil, fmt.Errorf("failed to upload node: %w", err)
}
up(100.0) // 完成上传
return &model.Object{
ID: uploadNodeResp.Data.NodeList[0].ID,
Name: uploadNodeResp.Data.NodeList[0].Name,
Size: file.GetSize(),
IsFolder: false,
}, nil
}
// 统一上传请求方法
func (d *Doubao) uploadRequest(uploadUrl string, method string, storeInfo StoreInfo, callback base.ReqCallback, resp interface{}) ([]byte, error) {
client := resty.New()
client.SetTransport(&http.Transport{
DisableKeepAlives: true, // 禁用连接复用
ForceAttemptHTTP2: false, // 强制使用HTTP/1.1
})
client.SetTimeout(UploadTimeout)
req := client.R()
req.SetHeaders(map[string]string{
"Host": strings.Split(uploadUrl, "/")[2],
"Referer": BaseURL + "/",
"Origin": BaseURL,
"User-Agent": UserAgent,
"X-Storage-U": d.UserId,
"Authorization": storeInfo.Auth,
})
if method == http.MethodPost {
req.SetHeader("Content-Type", "text/plain;charset=UTF-8")
}
if callback != nil {
callback(req)
}
if resp != nil {
req.SetResult(resp)
}
res, err := req.Execute(method, uploadUrl)
if err != nil && err != io.EOF {
return nil, fmt.Errorf("upload request failed: %w", err)
}
return res.Body(), nil
}
// 初始化分片上传
func (d *Doubao) initMultipartUpload(config *UploadConfig, uploadUrl string, storeInfo StoreInfo) (uploadId string, err error) {
uploadResp := UploadResp{}
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"uploadmode": "part",
"phase": "init",
})
}, &uploadResp)
if err != nil {
return uploadId, err
}
if uploadResp.Code != 2000 {
return uploadId, fmt.Errorf("init upload failed: %s", uploadResp.Message)
}
return uploadResp.Data.UploadId, nil
}
// 分片上传实现
func (d *Doubao) uploadPart(config *UploadConfig, uploadUrl, uploadID string, partNumber int64, data []byte, crc32Value string) (resp UploadPart, err error) {
uploadResp := UploadResp{}
storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0]
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Type": "application/octet-stream",
"Content-Crc32": crc32Value,
"Content-Length": fmt.Sprintf("%d", len(data)),
"Content-Disposition": fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI)),
})
req.SetQueryParams(map[string]string{
"uploadid": uploadID,
"part_number": strconv.FormatInt(partNumber, 10),
"phase": "transfer",
})
req.SetBody(data)
req.SetContentLength(true)
}, &uploadResp)
if err != nil {
return resp, err
}
if uploadResp.Code != 2000 {
return resp, fmt.Errorf("upload part failed: %s", uploadResp.Message)
} else if uploadResp.Data.Crc32 != crc32Value {
return resp, fmt.Errorf("upload part failed: crc32 mismatch, expected %s, got %s", crc32Value, uploadResp.Data.Crc32)
}
return uploadResp.Data, nil
}
// 完成分片上传
func (d *Doubao) completeMultipartUpload(config *UploadConfig, uploadUrl, uploadID string, parts []UploadPart) error {
uploadResp := UploadResp{}
storeInfo := config.InnerUploadAddress.UploadNodes[0].StoreInfos[0]
body := _convertUploadParts(parts)
err := utils.Retry(MaxRetryAttempts, time.Second, func() (err error) {
_, err = d.uploadRequest(uploadUrl, http.MethodPost, storeInfo, func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"uploadid": uploadID,
"phase": "finish",
"uploadmode": "part",
})
req.SetBody(body)
}, &uploadResp)
if err != nil {
return err
}
// 检查响应状态码 2000 成功 4024 分片合并中
if uploadResp.Code != 2000 && uploadResp.Code != 4024 {
return fmt.Errorf("finish upload failed: %s", uploadResp.Message)
}
return err
})
if err != nil {
return fmt.Errorf("failed to complete multipart upload: %w", err)
}
return nil
}
func (d *Doubao) commitMultipartUpload(uploadConfig *UploadConfig) error {
uploadUrl := d.UploadToken.Samantha.UploadInfo.VideoHost
params := map[string]string{
"Action": "CommitUploadInner",
"Version": "2020-11-19",
"SpaceName": d.UploadToken.Samantha.UploadInfo.SpaceName,
}
tokenType := VideoDataType
videoCommitUploadResp := VideoCommitUploadResp{}
jsonBytes, err := json.Marshal(base.Json{
"SessionKey": uploadConfig.InnerUploadAddress.UploadNodes[0].SessionKey,
"Functions": []base.Json{},
})
if err != nil {
return fmt.Errorf("failed to marshal request data: %w", err)
}
_, err = d.requestApi(uploadUrl, http.MethodPost, tokenType, func(req *resty.Request) {
req.SetHeader("Content-Type", "application/json")
req.SetQueryParams(params)
req.SetBody(jsonBytes)
}, &videoCommitUploadResp)
if err != nil {
return err
}
return nil
}
// 计算CRC32
func calculateCRC32(data []byte) string {
hash := crc32.NewIEEE()
hash.Write(data)
return hex.EncodeToString(hash.Sum(nil))
}
// _retryOperation 操作重试
func (d *Doubao) _retryOperation(operation string, fn func() error) error {
return retry.Do(
fn,
retry.Attempts(MaxRetryAttempts),
retry.Delay(500*time.Millisecond),
retry.DelayType(retry.BackOffDelay),
retry.MaxJitter(200*time.Millisecond),
retry.OnRetry(func(n uint, err error) {
log.Debugf("[doubao] %s retry #%d: %v", operation, n+1, err)
}),
)
}
// _convertUploadParts 将分片信息转换为字符串
func _convertUploadParts(parts []UploadPart) string {
if len(parts) == 0 {
return ""
}
var result strings.Builder
for i, part := range parts {
if i > 0 {
result.WriteString(",")
}
result.WriteString(fmt.Sprintf("%s:%s", part.PartNumber, part.Crc32))
}
return result.String()
}
// 获取规范查询字符串
func getCanonicalQueryString(query url.Values) string {
if len(query) == 0 {
return ""
}
keys := make([]string, 0, len(query))
for k := range query {
keys = append(keys, k)
}
sort.Strings(keys)
parts := make([]string, 0, len(keys))
for _, k := range keys {
values := query[k]
for _, v := range values {
parts = append(parts, urlEncode(k)+"="+urlEncode(v))
}
}
return strings.Join(parts, "&")
}
func urlEncode(s string) string {
s = url.QueryEscape(s)
s = strings.ReplaceAll(s, "+", "%20")
return s
}
// 获取规范头信息和已签名头列表
func getCanonicalHeadersFromMap(headers map[string][]string) (string, string) {
// 不可签名的头部列表
unsignableHeaders := map[string]bool{
"authorization": true,
"content-type": true,
"content-length": true,
"user-agent": true,
"presigned-expires": true,
"expect": true,
"x-amzn-trace-id": true,
}
headerValues := make(map[string]string)
var signedHeadersList []string
for k, v := range headers {
if len(v) == 0 {
continue
}
lowerKey := strings.ToLower(k)
// 检查是否可签名
if strings.HasPrefix(lowerKey, "x-amz-") || !unsignableHeaders[lowerKey] {
value := strings.TrimSpace(v[0])
value = strings.Join(strings.Fields(value), " ")
headerValues[lowerKey] = value
signedHeadersList = append(signedHeadersList, lowerKey)
}
}
sort.Strings(signedHeadersList)
var canonicalHeadersStr strings.Builder
for _, key := range signedHeadersList {
canonicalHeadersStr.WriteString(key)
canonicalHeadersStr.WriteString(":")
canonicalHeadersStr.WriteString(headerValues[key])
canonicalHeadersStr.WriteString("\n")
}
signedHeaders := strings.Join(signedHeadersList, ";")
return canonicalHeadersStr.String(), signedHeaders
}
// 计算HMAC-SHA256
func hmacSHA256(key []byte, data string) []byte {
h := hmac.New(sha256.New, key)
h.Write([]byte(data))
return h.Sum(nil)
}
// 计算HMAC-SHA256并返回十六进制字符串
func hmacSHA256Hex(key []byte, data string) string {
return hex.EncodeToString(hmacSHA256(key, data))
}
// 计算SHA256哈希并返回十六进制字符串
func hashSHA256(data string) string {
h := sha256.New()
h.Write([]byte(data))
return hex.EncodeToString(h.Sum(nil))
}
// 获取签名密钥
func getSigningKey(secretKey, dateStamp, region, service string) []byte {
kDate := hmacSHA256([]byte("AWS4"+secretKey), dateStamp)
kRegion := hmacSHA256(kDate, region)
kService := hmacSHA256(kRegion, service)
kSigning := hmacSHA256(kService, "aws4_request")
return kSigning
}
// generateContentDisposition 生成符合RFC 5987标准的Content-Disposition头部
func generateContentDisposition(filename string) string {
// 按照RFC 2047进行编码用于filename部分
encodedName := urlEncode(filename)
// 按照RFC 5987进行编码用于filename*部分
encodedNameRFC5987 := encodeRFC5987(filename)
return fmt.Sprintf("attachment; filename=\"%s\"; filename*=utf-8''%s",
encodedName, encodedNameRFC5987)
}
// encodeRFC5987 按照RFC 5987规范编码字符串适用于HTTP头部参数中的非ASCII字符
func encodeRFC5987(s string) string {
var buf strings.Builder
for _, r := range []byte(s) {
// 根据RFC 5987只有字母、数字和部分特殊符号可以不编码
if (r >= 'a' && r <= 'z') ||
(r >= 'A' && r <= 'Z') ||
(r >= '0' && r <= '9') ||
r == '-' || r == '.' || r == '_' || r == '~' {
buf.WriteByte(r)
} else {
// 其他字符都需要百分号编码
fmt.Fprintf(&buf, "%%%02X", r)
}
}
return buf.String()
}
func randomString() string {
const charset = "0123456789abcdefghijklmnopqrstuvwxyz"
const length = 11 // 11位随机字符串
var sb strings.Builder
sb.Grow(length)
for i := 0; i < length; i++ {
sb.WriteByte(charset[rand.Intn(len(charset))])
}
return sb.String()
}

View File

@@ -0,0 +1,177 @@
package doubao_share
import (
"context"
"errors"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/go-resty/resty/v2"
"net/http"
)
type DoubaoShare struct {
model.Storage
Addition
RootFiles []RootFileList
}
func (d *DoubaoShare) Config() driver.Config {
return config
}
func (d *DoubaoShare) GetAddition() driver.Additional {
return &d.Addition
}
func (d *DoubaoShare) Init(ctx context.Context) error {
// 初始化 虚拟分享列表
if err := d.initShareList(); err != nil {
return err
}
return nil
}
func (d *DoubaoShare) Drop(ctx context.Context) error {
return nil
}
func (d *DoubaoShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
// 检查是否为根目录
if dir.GetID() == "" && dir.GetPath() == "/" {
return d.listRootDirectory(ctx)
}
// 非根目录,处理不同情况
if fo, ok := dir.(*FileObject); ok {
if fo.ShareID == "" {
// 虚拟目录,需要列出子目录
return d.listVirtualDirectoryContent(dir)
} else {
// 具有分享ID的目录获取此分享下的文件
shareId, relativePath, err := d._findShareAndPath(dir)
if err != nil {
return nil, err
}
return d.getFilesInPath(ctx, shareId, dir.GetID(), relativePath)
}
}
// 使用通用方法
shareId, relativePath, err := d._findShareAndPath(dir)
if err != nil {
return nil, err
}
// 获取指定路径下的文件
return d.getFilesInPath(ctx, shareId, dir.GetID(), relativePath)
}
func (d *DoubaoShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
var downloadUrl string
if u, ok := file.(*FileObject); ok {
switch u.NodeType {
case VideoType, AudioType:
var r GetVideoFileUrlResp
_, err := d.request("/samantha/media/get_play_info", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"key": u.Key,
"share_id": u.ShareID,
"node_id": file.GetID(),
})
}, &r)
if err != nil {
return nil, err
}
downloadUrl = r.Data.OriginalMediaInfo.MainURL
default:
var r GetFileUrlResp
_, err := d.request("/alice/message/get_file_url", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{u.Key},
"type": FileNodeType[u.NodeType],
})
}, &r)
if err != nil {
return nil, err
}
downloadUrl = r.Data.FileUrls[0].MainURL
}
// 生成标准的Content-Disposition
contentDisposition := generateContentDisposition(u.Name)
return &model.Link{
URL: downloadUrl,
Header: http.Header{
"User-Agent": []string{UserAgent},
"Content-Disposition": []string{contentDisposition},
},
}, nil
}
return nil, errors.New("can't convert obj to URL")
}
func (d *DoubaoShare) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
// TODO create folder, optional
return nil, errs.NotImplement
}
func (d *DoubaoShare) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// TODO move obj, optional
return nil, errs.NotImplement
}
func (d *DoubaoShare) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
// TODO rename obj, optional
return nil, errs.NotImplement
}
func (d *DoubaoShare) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// TODO copy obj, optional
return nil, errs.NotImplement
}
func (d *DoubaoShare) Remove(ctx context.Context, obj model.Obj) error {
// TODO remove obj, optional
return errs.NotImplement
}
func (d *DoubaoShare) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// TODO upload file, optional
return nil, errs.NotImplement
}
func (d *DoubaoShare) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *DoubaoShare) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *DoubaoShare) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *DoubaoShare) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// return errs.NotImplement to use an internal archive tool
return nil, errs.NotImplement
}
//func (d *DoubaoShare) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*DoubaoShare)(nil)

View File

@@ -0,0 +1,32 @@
package doubao_share
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
driver.RootPath
Cookie string `json:"cookie" type:"text"`
ShareIds string `json:"share_ids" type:"text" required:"true"`
}
var config = driver.Config{
Name: "DoubaoShare",
LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true,
NeedMs: false,
DefaultRoot: "/",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &DoubaoShare{}
})
}

View File

@@ -0,0 +1,207 @@
package doubao_share
import (
"encoding/json"
"fmt"
"github.com/alist-org/alist/v3/internal/model"
)
type BaseResp struct {
Code int `json:"code"`
Msg string `json:"msg"`
}
type NodeInfoData struct {
Share ShareInfo `json:"share,omitempty"`
Creator CreatorInfo `json:"creator,omitempty"`
NodeList []File `json:"node_list,omitempty"`
NodeInfo File `json:"node_info,omitempty"`
Children []File `json:"children,omitempty"`
Path FilePath `json:"path,omitempty"`
NextCursor string `json:"next_cursor,omitempty"`
HasMore bool `json:"has_more,omitempty"`
}
type NodeInfoResp struct {
BaseResp
NodeInfoData `json:"data"`
}
type RootFileList struct {
ShareID string
VirtualPath string
NodeInfo NodeInfoData
Child *[]RootFileList
}
type File struct {
ID string `json:"id"`
Name string `json:"name"`
Key string `json:"key"`
NodeType int `json:"node_type"`
Size int64 `json:"size"`
Source int `json:"source"`
NameReviewStatus int `json:"name_review_status"`
ContentReviewStatus int `json:"content_review_status"`
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int64 `json:"create_time"`
UpdateTime int64 `json:"update_time"`
}
type FileObject struct {
model.Object
ShareID string
Key string
NodeID string
NodeType int
}
type ShareInfo struct {
ShareID string `json:"share_id"`
FirstNode struct {
ID string `json:"id"`
Name string `json:"name"`
Key string `json:"key"`
NodeType int `json:"node_type"`
Size int `json:"size"`
Source int `json:"source"`
Content struct {
LinkFileType string `json:"link_file_type"`
ImageWidth int `json:"image_width"`
ImageHeight int `json:"image_height"`
AiSkillStatus int `json:"ai_skill_status"`
} `json:"content"`
NameReviewStatus int `json:"name_review_status"`
ContentReviewStatus int `json:"content_review_status"`
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
} `json:"first_node"`
NodeCount int `json:"node_count"`
CreateTime int `json:"create_time"`
Channel string `json:"channel"`
InfluencerType int `json:"influencer_type"`
}
type CreatorInfo struct {
EntityID string `json:"entity_id"`
UserName string `json:"user_name"`
NickName string `json:"nick_name"`
Avatar struct {
OriginURL string `json:"origin_url"`
TinyURL string `json:"tiny_url"`
URI string `json:"uri"`
} `json:"avatar"`
}
type FilePath []struct {
ID string `json:"id"`
Name string `json:"name"`
Key string `json:"key"`
NodeType int `json:"node_type"`
Size int `json:"size"`
Source int `json:"source"`
NameReviewStatus int `json:"name_review_status"`
ContentReviewStatus int `json:"content_review_status"`
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
}
type GetFileUrlResp struct {
BaseResp
Data struct {
FileUrls []struct {
URI string `json:"uri"`
MainURL string `json:"main_url"`
BackURL string `json:"back_url"`
} `json:"file_urls"`
} `json:"data"`
}
type GetVideoFileUrlResp struct {
BaseResp
Data struct {
MediaType string `json:"media_type"`
MediaInfo []struct {
Meta struct {
Height string `json:"height"`
Width string `json:"width"`
Format string `json:"format"`
Duration float64 `json:"duration"`
CodecType string `json:"codec_type"`
Definition string `json:"definition"`
} `json:"meta"`
MainURL string `json:"main_url"`
BackupURL string `json:"backup_url"`
} `json:"media_info"`
OriginalMediaInfo struct {
Meta struct {
Height string `json:"height"`
Width string `json:"width"`
Format string `json:"format"`
Duration float64 `json:"duration"`
CodecType string `json:"codec_type"`
Definition string `json:"definition"`
} `json:"meta"`
MainURL string `json:"main_url"`
BackupURL string `json:"backup_url"`
} `json:"original_media_info"`
PosterURL string `json:"poster_url"`
PlayableStatus int `json:"playable_status"`
} `json:"data"`
}
type CommonResp struct {
Code int `json:"code"`
Msg string `json:"msg,omitempty"`
Message string `json:"message,omitempty"` // 错误情况下的消息
Data json.RawMessage `json:"data,omitempty"` // 原始数据,稍后解析
Error *struct {
Code int `json:"code"`
Message string `json:"message"`
Locale string `json:"locale"`
} `json:"error,omitempty"`
}
// IsSuccess 判断响应是否成功
func (r *CommonResp) IsSuccess() bool {
return r.Code == 0
}
// GetError 获取错误信息
func (r *CommonResp) GetError() error {
if r.IsSuccess() {
return nil
}
// 优先使用message字段
errMsg := r.Message
if errMsg == "" {
errMsg = r.Msg
}
// 如果error对象存在且有详细消息,则使用error中的信息
if r.Error != nil && r.Error.Message != "" {
errMsg = r.Error.Message
}
return fmt.Errorf("[doubao] API error (code: %d): %s", r.Code, errMsg)
}
// UnmarshalData 将data字段解析为指定类型
func (r *CommonResp) UnmarshalData(v interface{}) error {
if !r.IsSuccess() {
return r.GetError()
}
if len(r.Data) == 0 {
return nil
}
return json.Unmarshal(r.Data, v)
}

View File

@@ -0,0 +1,744 @@
package doubao_share
import (
"context"
"encoding/json"
"fmt"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/model"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
"net/http"
"net/url"
"path"
"regexp"
"strings"
"time"
)
const (
DirectoryType = 1
FileType = 2
LinkType = 3
ImageType = 4
PagesType = 5
VideoType = 6
AudioType = 7
MeetingMinutesType = 8
)
var FileNodeType = map[int]string{
1: "directory",
2: "file",
3: "link",
4: "image",
5: "pages",
6: "video",
7: "audio",
8: "meeting_minutes",
}
const (
BaseURL = "https://www.doubao.com"
FileDataType = "file"
ImgDataType = "image"
VideoDataType = "video"
UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
)
func (d *DoubaoShare) request(path string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
reqUrl := BaseURL + path
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
"Cookie": d.Cookie,
"User-Agent": UserAgent,
})
req.SetQueryParams(map[string]string{
"version_code": "20800",
"device_platform": "web",
})
if callback != nil {
callback(req)
}
var commonResp CommonResp
res, err := req.Execute(method, reqUrl)
log.Debugln(res.String())
if err != nil {
return nil, err
}
body := res.Body()
// 先解析为通用响应
if err = json.Unmarshal(body, &commonResp); err != nil {
return nil, err
}
// 检查响应是否成功
if !commonResp.IsSuccess() {
return body, commonResp.GetError()
}
if resp != nil {
if err = json.Unmarshal(body, resp); err != nil {
return body, err
}
}
return body, nil
}
func (d *DoubaoShare) getFiles(dirId, nodeId, cursor string) (resp []File, err error) {
var r NodeInfoResp
var body = base.Json{
"share_id": dirId,
"node_id": nodeId,
}
// 如果有游标,则设置游标和大小
if cursor != "" {
body["cursor"] = cursor
body["size"] = 50
} else {
body["need_full_path"] = false
}
_, err = d.request("/samantha/aispace/share/node_info", http.MethodPost, func(req *resty.Request) {
req.SetBody(body)
}, &r)
if err != nil {
return nil, err
}
if r.NodeInfoData.Children != nil {
resp = r.NodeInfoData.Children
}
if r.NodeInfoData.NextCursor != "-1" {
// 递归获取下一页
nextFiles, err := d.getFiles(dirId, nodeId, r.NodeInfoData.NextCursor)
if err != nil {
return nil, err
}
resp = append(r.NodeInfoData.Children, nextFiles...)
}
return resp, err
}
func (d *DoubaoShare) getShareOverview(shareId, cursor string) (resp []File, err error) {
return d.getShareOverviewWithHistory(shareId, cursor, make(map[string]bool))
}
func (d *DoubaoShare) getShareOverviewWithHistory(shareId, cursor string, cursorHistory map[string]bool) (resp []File, err error) {
var r NodeInfoResp
var body = base.Json{
"share_id": shareId,
}
// 如果有游标,则设置游标和大小
if cursor != "" {
body["cursor"] = cursor
body["size"] = 50
} else {
body["need_full_path"] = false
}
_, err = d.request("/samantha/aispace/share/overview", http.MethodPost, func(req *resty.Request) {
req.SetBody(body)
}, &r)
if err != nil {
return nil, err
}
if r.NodeInfoData.NodeList != nil {
resp = r.NodeInfoData.NodeList
}
if r.NodeInfoData.NextCursor != "-1" {
// 检查游标是否重复出现,防止无限循环
if cursorHistory[r.NodeInfoData.NextCursor] {
return resp, nil
}
// 记录当前游标
cursorHistory[r.NodeInfoData.NextCursor] = true
// 递归获取下一页
nextFiles, err := d.getShareOverviewWithHistory(shareId, r.NodeInfoData.NextCursor, cursorHistory)
if err != nil {
return nil, err
}
resp = append(resp, nextFiles...)
}
return resp, nil
}
func (d *DoubaoShare) initShareList() error {
if d.Addition.ShareIds == "" {
return fmt.Errorf("share_ids is empty")
}
// 解析分享配置
shareConfigs, rootShares, err := d._parseShareConfigs()
if err != nil {
return err
}
// 检查路径冲突
if err := d._detectPathConflicts(shareConfigs); err != nil {
return err
}
// 构建树形结构
rootMap := d._buildTreeStructure(shareConfigs, rootShares)
// 提取顶级节点
topLevelNodes := d._extractTopLevelNodes(rootMap, rootShares)
if len(topLevelNodes) == 0 {
return fmt.Errorf("no valid share_ids found")
}
// 存储结果
d.RootFiles = topLevelNodes
return nil
}
// 从配置中解析分享ID和路径
func (d *DoubaoShare) _parseShareConfigs() (map[string]string, []string, error) {
shareConfigs := make(map[string]string) // 路径 -> 分享ID
rootShares := make([]string, 0) // 根目录显示的分享ID
lines := strings.Split(strings.TrimSpace(d.Addition.ShareIds), "\n")
if len(lines) == 0 {
return nil, nil, fmt.Errorf("no share_ids found")
}
for _, line := range lines {
line = strings.TrimSpace(line)
if line == "" {
continue
}
// 解析分享ID和路径
parts := strings.Split(line, "|")
var shareId, sharePath string
if len(parts) == 1 {
// 无路径分享,直接在根目录显示
shareId = _extractShareId(parts[0])
if shareId != "" {
rootShares = append(rootShares, shareId)
}
continue
} else if len(parts) >= 2 {
shareId = _extractShareId(parts[0])
sharePath = strings.Trim(parts[1], "/")
}
if shareId == "" {
log.Warnf("[doubao_share] Invalid Share_id Format: %s", line)
continue
}
// 空路径也加入根目录显示
if sharePath == "" {
rootShares = append(rootShares, shareId)
continue
}
// 添加到路径映射
shareConfigs[sharePath] = shareId
}
return shareConfigs, rootShares, nil
}
// 检测路径冲突
func (d *DoubaoShare) _detectPathConflicts(shareConfigs map[string]string) error {
// 检查直接路径冲突
pathToShareIds := make(map[string][]string)
for sharePath, id := range shareConfigs {
pathToShareIds[sharePath] = append(pathToShareIds[sharePath], id)
}
for sharePath, ids := range pathToShareIds {
if len(ids) > 1 {
return fmt.Errorf("路径冲突: 路径 '%s' 被多个不同的分享ID使用: %s",
sharePath, strings.Join(ids, ", "))
}
}
// 检查层次冲突
for path1, id1 := range shareConfigs {
for path2, id2 := range shareConfigs {
if path1 == path2 || id1 == id2 {
continue
}
// 检查前缀冲突
if strings.HasPrefix(path2, path1+"/") || strings.HasPrefix(path1, path2+"/") {
return fmt.Errorf("路径冲突: 路径 '%s' (ID: %s) 与路径 '%s' (ID: %s) 存在层次冲突",
path1, id1, path2, id2)
}
}
}
return nil
}
// 构建树形结构
func (d *DoubaoShare) _buildTreeStructure(shareConfigs map[string]string, rootShares []string) map[string]*RootFileList {
rootMap := make(map[string]*RootFileList)
// 添加所有分享节点
for sharePath, shareId := range shareConfigs {
children := make([]RootFileList, 0)
rootMap[sharePath] = &RootFileList{
ShareID: shareId,
VirtualPath: sharePath,
NodeInfo: NodeInfoData{},
Child: &children,
}
}
// 构建父子关系
for sharePath, node := range rootMap {
if sharePath == "" {
continue
}
pathParts := strings.Split(sharePath, "/")
if len(pathParts) > 1 {
parentPath := strings.Join(pathParts[:len(pathParts)-1], "/")
// 确保所有父级路径都已创建
_ensurePathExists(rootMap, parentPath)
// 添加当前节点到父节点
if parent, exists := rootMap[parentPath]; exists {
*parent.Child = append(*parent.Child, *node)
}
}
}
return rootMap
}
// 提取顶级节点
func (d *DoubaoShare) _extractTopLevelNodes(rootMap map[string]*RootFileList, rootShares []string) []RootFileList {
var topLevelNodes []RootFileList
// 添加根目录分享
for _, shareId := range rootShares {
children := make([]RootFileList, 0)
topLevelNodes = append(topLevelNodes, RootFileList{
ShareID: shareId,
VirtualPath: "",
NodeInfo: NodeInfoData{},
Child: &children,
})
}
// 添加顶级目录
for rootPath, node := range rootMap {
if rootPath == "" {
continue
}
isTopLevel := true
pathParts := strings.Split(rootPath, "/")
if len(pathParts) > 1 {
parentPath := strings.Join(pathParts[:len(pathParts)-1], "/")
if _, exists := rootMap[parentPath]; exists {
isTopLevel = false
}
}
if isTopLevel {
topLevelNodes = append(topLevelNodes, *node)
}
}
return topLevelNodes
}
// 确保路径存在,创建所有必要的中间节点
func _ensurePathExists(rootMap map[string]*RootFileList, path string) {
if path == "" {
return
}
// 如果路径已存在,不需要再处理
if _, exists := rootMap[path]; exists {
return
}
// 创建当前路径节点
children := make([]RootFileList, 0)
rootMap[path] = &RootFileList{
ShareID: "",
VirtualPath: path,
NodeInfo: NodeInfoData{},
Child: &children,
}
// 处理父路径
pathParts := strings.Split(path, "/")
if len(pathParts) > 1 {
parentPath := strings.Join(pathParts[:len(pathParts)-1], "/")
// 确保父路径存在
_ensurePathExists(rootMap, parentPath)
// 将当前节点添加为父节点的子节点
if parent, exists := rootMap[parentPath]; exists {
*parent.Child = append(*parent.Child, *rootMap[path])
}
}
}
// _extractShareId 从URL或直接ID中提取分享ID
func _extractShareId(input string) string {
input = strings.TrimSpace(input)
if strings.HasPrefix(input, "http") {
regex := regexp.MustCompile(`/drive/s/([a-zA-Z0-9]+)`)
if matches := regex.FindStringSubmatch(input); len(matches) > 1 {
return matches[1]
}
return ""
}
return input // 直接返回ID
}
// _findRootFileByShareID 查找指定ShareID的配置
func _findRootFileByShareID(rootFiles []RootFileList, shareID string) *RootFileList {
for i, rf := range rootFiles {
if rf.ShareID == shareID {
return &rootFiles[i]
}
if rf.Child != nil && len(*rf.Child) > 0 {
if found := _findRootFileByShareID(*rf.Child, shareID); found != nil {
return found
}
}
}
return nil
}
// _findNodeByPath 查找指定路径的节点
func _findNodeByPath(rootFiles []RootFileList, path string) *RootFileList {
for i, rf := range rootFiles {
if rf.VirtualPath == path {
return &rootFiles[i]
}
if rf.Child != nil && len(*rf.Child) > 0 {
if found := _findNodeByPath(*rf.Child, path); found != nil {
return found
}
}
}
return nil
}
// _findShareByPath 根据路径查找分享和相对路径
func _findShareByPath(rootFiles []RootFileList, path string) (*RootFileList, string) {
// 完全匹配或子路径匹配
for i, rf := range rootFiles {
if rf.VirtualPath == path {
return &rootFiles[i], ""
}
if rf.VirtualPath != "" && strings.HasPrefix(path, rf.VirtualPath+"/") {
relPath := strings.TrimPrefix(path, rf.VirtualPath+"/")
// 先检查子节点
if rf.Child != nil && len(*rf.Child) > 0 {
if child, childPath := _findShareByPath(*rf.Child, path); child != nil {
return child, childPath
}
}
return &rootFiles[i], relPath
}
// 递归检查子节点
if rf.Child != nil && len(*rf.Child) > 0 {
if child, childPath := _findShareByPath(*rf.Child, path); child != nil {
return child, childPath
}
}
}
// 检查根目录分享
for i, rf := range rootFiles {
if rf.VirtualPath == "" && rf.ShareID != "" {
parts := strings.SplitN(path, "/", 2)
if len(parts) > 0 && parts[0] == rf.ShareID {
if len(parts) > 1 {
return &rootFiles[i], parts[1]
}
return &rootFiles[i], ""
}
}
}
return nil, ""
}
// _findShareAndPath 根据给定路径查找对应的ShareID和相对路径
func (d *DoubaoShare) _findShareAndPath(dir model.Obj) (string, string, error) {
dirPath := dir.GetPath()
// 如果是根目录,返回空值表示需要列出所有分享
if dirPath == "/" || dirPath == "" {
return "", "", nil
}
// 检查是否是 FileObject 类型,并获取 ShareID
if fo, ok := dir.(*FileObject); ok && fo.ShareID != "" {
// 直接使用对象中存储的 ShareID
// 计算相对路径(移除前导斜杠)
relativePath := strings.TrimPrefix(dirPath, "/")
// 递归查找对应的 RootFile
found := _findRootFileByShareID(d.RootFiles, fo.ShareID)
if found != nil {
if found.VirtualPath != "" {
// 如果此分享配置了路径前缀,需要考虑相对路径的计算
if strings.HasPrefix(relativePath, found.VirtualPath) {
return fo.ShareID, strings.TrimPrefix(relativePath, found.VirtualPath+"/"), nil
}
}
return fo.ShareID, relativePath, nil
}
// 如果找不到对应的 RootFile 配置,仍然使用对象中的 ShareID
return fo.ShareID, relativePath, nil
}
// 移除开头的斜杠
cleanPath := strings.TrimPrefix(dirPath, "/")
// 先检查是否有直接匹配的根目录分享
for _, rootFile := range d.RootFiles {
if rootFile.VirtualPath == "" && rootFile.ShareID != "" {
// 检查是否匹配当前路径的第一部分
parts := strings.SplitN(cleanPath, "/", 2)
if len(parts) > 0 && parts[0] == rootFile.ShareID {
if len(parts) > 1 {
return rootFile.ShareID, parts[1], nil
}
return rootFile.ShareID, "", nil
}
}
}
// 查找匹配此路径的分享或虚拟目录
share, relPath := _findShareByPath(d.RootFiles, cleanPath)
if share != nil {
return share.ShareID, relPath, nil
}
log.Warnf("[doubao_share] No matching share path found: %s", dirPath)
return "", "", fmt.Errorf("no matching share path found: %s", dirPath)
}
// convertToFileObject 将File转换为FileObject
func (d *DoubaoShare) convertToFileObject(file File, shareId string, relativePath string) *FileObject {
// 构建文件对象
obj := &FileObject{
Object: model.Object{
ID: file.ID,
Name: file.Name,
Size: file.Size,
Modified: time.Unix(file.UpdateTime, 0),
Ctime: time.Unix(file.CreateTime, 0),
IsFolder: file.NodeType == DirectoryType,
Path: path.Join(relativePath, file.Name),
},
ShareID: shareId,
Key: file.Key,
NodeID: file.ID,
NodeType: file.NodeType,
}
return obj
}
// getFilesInPath 获取指定分享和路径下的文件
func (d *DoubaoShare) getFilesInPath(ctx context.Context, shareId, nodeId, relativePath string) ([]model.Obj, error) {
var (
files []File
err error
)
// 调用overview接口获取分享链接信息 nodeId
if nodeId == "" {
files, err = d.getShareOverview(shareId, "")
if err != nil {
return nil, fmt.Errorf("failed to get share link information: %w", err)
}
result := make([]model.Obj, 0, len(files))
for _, file := range files {
result = append(result, d.convertToFileObject(file, shareId, "/"))
}
return result, nil
} else {
files, err = d.getFiles(shareId, nodeId, "")
if err != nil {
return nil, fmt.Errorf("failed to get share file: %w", err)
}
result := make([]model.Obj, 0, len(files))
for _, file := range files {
result = append(result, d.convertToFileObject(file, shareId, path.Join("/", relativePath)))
}
return result, nil
}
}
// listRootDirectory 处理根目录的内容展示
func (d *DoubaoShare) listRootDirectory(ctx context.Context) ([]model.Obj, error) {
objects := make([]model.Obj, 0)
// 分组处理:直接显示的分享内容 vs 虚拟目录
var directShareIDs []string
addedDirs := make(map[string]bool)
// 处理所有根节点
for _, rootFile := range d.RootFiles {
if rootFile.VirtualPath == "" && rootFile.ShareID != "" {
// 无路径分享记录ShareID以便后续获取内容
directShareIDs = append(directShareIDs, rootFile.ShareID)
} else {
// 有路径的分享,显示第一级目录
parts := strings.SplitN(rootFile.VirtualPath, "/", 2)
firstLevel := parts[0]
// 避免重复添加同名目录
if _, exists := addedDirs[firstLevel]; exists {
continue
}
// 创建虚拟目录对象
obj := &FileObject{
Object: model.Object{
ID: "",
Name: firstLevel,
Modified: time.Now(),
Ctime: time.Now(),
IsFolder: true,
Path: path.Join("/", firstLevel),
},
ShareID: rootFile.ShareID,
Key: "",
NodeID: "",
NodeType: DirectoryType,
}
objects = append(objects, obj)
addedDirs[firstLevel] = true
}
}
// 处理直接显示的分享内容
for _, shareID := range directShareIDs {
shareFiles, err := d.getFilesInPath(ctx, shareID, "", "")
if err != nil {
log.Warnf("[doubao_share] Failed to get list of files in share %s: %s", shareID, err)
continue
}
objects = append(objects, shareFiles...)
}
return objects, nil
}
// listVirtualDirectoryContent 列出虚拟目录的内容
func (d *DoubaoShare) listVirtualDirectoryContent(dir model.Obj) ([]model.Obj, error) {
dirPath := strings.TrimPrefix(dir.GetPath(), "/")
objects := make([]model.Obj, 0)
// 递归查找此路径的节点
node := _findNodeByPath(d.RootFiles, dirPath)
if node != nil && node.Child != nil {
// 显示此节点的所有子节点
for _, child := range *node.Child {
// 计算显示名称(取路径的最后一部分)
displayName := child.VirtualPath
if child.VirtualPath != "" {
parts := strings.Split(child.VirtualPath, "/")
displayName = parts[len(parts)-1]
} else if child.ShareID != "" {
displayName = child.ShareID
}
obj := &FileObject{
Object: model.Object{
ID: "",
Name: displayName,
Modified: time.Now(),
Ctime: time.Now(),
IsFolder: true,
Path: path.Join("/", child.VirtualPath),
},
ShareID: child.ShareID,
Key: "",
NodeID: "",
NodeType: DirectoryType,
}
objects = append(objects, obj)
}
}
return objects, nil
}
// generateContentDisposition 生成符合RFC 5987标准的Content-Disposition头部
func generateContentDisposition(filename string) string {
// 按照RFC 2047进行编码用于filename部分
encodedName := urlEncode(filename)
// 按照RFC 5987进行编码用于filename*部分
encodedNameRFC5987 := encodeRFC5987(filename)
return fmt.Sprintf("attachment; filename=\"%s\"; filename*=utf-8''%s",
encodedName, encodedNameRFC5987)
}
// encodeRFC5987 按照RFC 5987规范编码字符串适用于HTTP头部参数中的非ASCII字符
func encodeRFC5987(s string) string {
var buf strings.Builder
for _, r := range []byte(s) {
// 根据RFC 5987只有字母、数字和部分特殊符号可以不编码
if (r >= 'a' && r <= 'z') ||
(r >= 'A' && r <= 'Z') ||
(r >= '0' && r <= '9') ||
r == '-' || r == '.' || r == '_' || r == '~' {
buf.WriteByte(r)
} else {
// 其他字符都需要百分号编码
fmt.Fprintf(&buf, "%%%02X", r)
}
}
return buf.String()
}
func urlEncode(s string) string {
s = url.QueryEscape(s)
s = strings.ReplaceAll(s, "+", "%20")
return s
}

View File

@@ -5,7 +5,6 @@ import (
"context"
"errors"
"fmt"
"io"
"strings"
"text/template"
"time"
@@ -159,7 +158,7 @@ func signCommit(m *map[string]interface{}, entity *openpgp.Entity) (string, erro
if err != nil {
return "", err
}
if _, err = io.Copy(armorWriter, &sigBuffer); err != nil {
if _, err = utils.CopyWithBuffer(armorWriter, &sigBuffer); err != nil {
return "", err
}
_ = armorWriter.Close()

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"net/http"
"strings"
"sync"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
@@ -36,88 +37,130 @@ func (d *GithubReleases) Drop(ctx context.Context) error {
return nil
}
// processPoint 处理单个挂载点的文件列表
func (d *GithubReleases) processPoint(point *MountPoint, path string, args model.ListArgs) []File {
var pointFiles []File
if !d.Addition.ShowAllVersion { // latest
point.RequestLatestRelease(d.GetRequest, args.Refresh)
pointFiles = d.processLatestVersion(point, path)
} else { // all version
point.RequestReleases(d.GetRequest, args.Refresh)
pointFiles = d.processAllVersions(point, path)
}
return pointFiles
}
// processLatestVersion 处理最新版本的逻辑
func (d *GithubReleases) processLatestVersion(point *MountPoint, path string) []File {
var pointFiles []File
if point.Point == path { // 与仓库路径相同
pointFiles = append(pointFiles, point.GetLatestRelease()...)
if d.Addition.ShowReadme {
files := point.GetOtherFile(d.GetRequest, false)
pointFiles = append(pointFiles, files...)
}
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
nextDir := GetNextDir(point.Point, path)
if nextDir != "" {
dirFile := File{
Path: path + "/" + nextDir,
FileName: nextDir,
Size: point.GetLatestSize(),
UpdateAt: point.Release.PublishedAt,
CreateAt: point.Release.CreatedAt,
Type: "dir",
Url: "",
}
pointFiles = append(pointFiles, dirFile)
}
}
return pointFiles
}
// processAllVersions 处理所有版本的逻辑
func (d *GithubReleases) processAllVersions(point *MountPoint, path string) []File {
var pointFiles []File
if point.Point == path { // 与仓库路径相同
pointFiles = append(pointFiles, point.GetAllVersion()...)
if d.Addition.ShowReadme {
files := point.GetOtherFile(d.GetRequest, false)
pointFiles = append(pointFiles, files...)
}
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
nextDir := GetNextDir(point.Point, path)
if nextDir != "" {
dirFile := File{
FileName: nextDir,
Path: path + "/" + nextDir,
Size: point.GetAllVersionSize(),
UpdateAt: (*point.Releases)[0].PublishedAt,
CreateAt: (*point.Releases)[0].CreatedAt,
Type: "dir",
Url: "",
}
pointFiles = append(pointFiles, dirFile)
}
} else if strings.HasPrefix(path, point.Point) { // 仓库目录的子目录
tagName := GetNextDir(path, point.Point)
if tagName != "" {
pointFiles = append(pointFiles, point.GetReleaseByTagName(tagName)...)
}
}
return pointFiles
}
// mergeFiles 合并文件列表,处理重复目录
func (d *GithubReleases) mergeFiles(files *[]File, newFiles []File) {
for _, newFile := range newFiles {
if newFile.Type == "dir" {
hasSameDir := false
for index := range *files {
if (*files)[index].GetName() == newFile.GetName() && (*files)[index].Type == "dir" {
hasSameDir = true
(*files)[index].Size += newFile.Size
break
}
}
if !hasSameDir {
*files = append(*files, newFile)
}
} else {
*files = append(*files, newFile)
}
}
}
func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
files := make([]File, 0)
path := fmt.Sprintf("/%s", strings.Trim(dir.GetPath(), "/"))
for i := range d.points {
point := &d.points[i]
if d.Addition.ConcurrentRequests && d.Addition.Token != "" { // 并发处理
var mu sync.Mutex
var wg sync.WaitGroup
if !d.Addition.ShowAllVersion { // latest
point.RequestRelease(d.GetRequest, args.Refresh)
for i := range d.points {
wg.Add(1)
go func(point *MountPoint) {
defer wg.Done()
pointFiles := d.processPoint(point, path, args)
if point.Point == path { // 与仓库路径相同
files = append(files, point.GetLatestRelease()...)
if d.Addition.ShowReadme {
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
}
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
nextDir := GetNextDir(point.Point, path)
if nextDir == "" {
continue
}
hasSameDir := false
for index := range files {
if files[index].GetName() == nextDir {
hasSameDir = true
files[index].Size += point.GetLatestSize()
break
}
}
if !hasSameDir {
files = append(files, File{
Path: path + "/" + nextDir,
FileName: nextDir,
Size: point.GetLatestSize(),
UpdateAt: point.Release.PublishedAt,
CreateAt: point.Release.CreatedAt,
Type: "dir",
Url: "",
})
}
}
} else { // all version
point.RequestReleases(d.GetRequest, args.Refresh)
if point.Point == path { // 与仓库路径相同
files = append(files, point.GetAllVersion()...)
if d.Addition.ShowReadme {
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
}
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
nextDir := GetNextDir(point.Point, path)
if nextDir == "" {
continue
}
hasSameDir := false
for index := range files {
if files[index].GetName() == nextDir {
hasSameDir = true
files[index].Size += point.GetAllVersionSize()
break
}
}
if !hasSameDir {
files = append(files, File{
FileName: nextDir,
Path: path + "/" + nextDir,
Size: point.GetAllVersionSize(),
UpdateAt: (*point.Releases)[0].PublishedAt,
CreateAt: (*point.Releases)[0].CreatedAt,
Type: "dir",
Url: "",
})
}
} else if strings.HasPrefix(path, point.Point) { // 仓库目录的子目录
tagName := GetNextDir(path, point.Point)
if tagName == "" {
continue
}
files = append(files, point.GetReleaseByTagName(tagName)...)
}
mu.Lock()
d.mergeFiles(&files, pointFiles)
mu.Unlock()
}(&d.points[i])
}
wg.Wait()
} else { // 串行处理
for i := range d.points {
point := &d.points[i]
pointFiles := d.processPoint(point, path, args)
d.mergeFiles(&files, pointFiles)
}
}

View File

@@ -7,11 +7,12 @@ import (
type Addition struct {
driver.RootID
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"alistGo/alist" help:"structure:[path:]org/repo"`
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"alistGo/alist" help:"structure:[path:]org/repo"`
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
ConcurrentRequests bool `json:"concurrent_requests" type:"bool" default:"false" help:"To concurrently request the GitHub API, you must enter a GitHub token"`
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
}
var config = driver.Config{

View File

@@ -18,7 +18,7 @@ type MountPoint struct {
}
// 请求最新版本
func (m *MountPoint) RequestRelease(get func(url string) (*resty.Response, error), refresh bool) {
func (m *MountPoint) RequestLatestRelease(get func(url string) (*resty.Response, error), refresh bool) {
if m.Repo == "" {
return
}

View File

@@ -6,8 +6,8 @@ import (
"strings"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
// 发送 GET 请求
@@ -23,7 +23,7 @@ func (d *GithubReleases) GetRequest(url string) (*resty.Response, error) {
return nil, err
}
if res.StatusCode() != 200 {
log.Warn("failed to get request: ", res.StatusCode(), res.String())
utils.Log.Warnf("failed to get request: %s %d %s", url, res.StatusCode(), res.String())
}
return res, nil
}

View File

@@ -2,7 +2,6 @@ package template
import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"fmt"
@@ -17,6 +16,7 @@ import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/foxxorcat/mopan-sdk-go"
"github.com/go-resty/resty/v2"
@@ -273,23 +273,14 @@ func (d *ILanZou) Remove(ctx context.Context, obj model.Obj) error {
const DefaultPartSize = 1024 * 1024 * 8
func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
h := md5.New()
// need to calculate md5 of the full content
tempFile, err := s.CacheFullInTempFile()
if err != nil {
return nil, err
etag := s.GetHash().GetHash(utils.MD5)
var err error
if len(etag) != utils.MD5.Width {
_, etag, err = stream.CacheFullInTempFileAndHash(s, utils.MD5)
if err != nil {
return nil, err
}
}
defer func() {
_ = tempFile.Close()
}()
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
return nil, err
}
_, err = tempFile.Seek(0, io.SeekStart)
if err != nil {
return nil, err
}
etag := hex.EncodeToString(h.Sum(nil))
// get upToken
res, err := d.proved("/7n/getUpToken", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@@ -309,7 +300,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: &driver.SimpleReaderWithSize{
Reader: tempFile,
Reader: s,
Size: s.GetSize(),
},
UpdateProgress: up,

View File

@@ -4,8 +4,7 @@ import (
"context"
"fmt"
"net/url"
"path/filepath"
"strings"
"path"
shell "github.com/ipfs/go-ipfs-api"
@@ -43,78 +42,115 @@ func (d *IPFS) Drop(ctx context.Context) error {
}
func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
path := dir.GetPath()
switch d.Mode {
case "ipfs":
path, _ = url.JoinPath("/ipfs", path)
case "ipns":
path, _ = url.JoinPath("/ipns", path)
case "mfs":
fileStat, err := d.sh.FilesStat(ctx, path)
if err != nil {
return nil, err
var ipfsPath string
cid := dir.GetID()
if cid != "" {
ipfsPath = path.Join("/ipfs", cid)
} else {
// 可能出现ipns dns解析失败的情况需要重复获取cid其他情况应该不会出错
ipfsPath = dir.GetPath()
switch d.Mode {
case "ipfs":
ipfsPath = path.Join("/ipfs", ipfsPath)
case "ipns":
ipfsPath = path.Join("/ipns", ipfsPath)
case "mfs":
fileStat, err := d.sh.FilesStat(ctx, ipfsPath)
if err != nil {
return nil, err
}
ipfsPath = path.Join("/ipfs", fileStat.Hash)
default:
return nil, fmt.Errorf("mode error")
}
path, _ = url.JoinPath("/ipfs", fileStat.Hash)
default:
return nil, fmt.Errorf("mode error")
}
dirs, err := d.sh.List(path)
dirs, err := d.sh.List(ipfsPath)
if err != nil {
return nil, err
}
objlist := []model.Obj{}
for _, file := range dirs {
gateurl := *d.gateURL.JoinPath("/ipfs/" + file.Hash)
gateurl.RawQuery = "filename=" + url.PathEscape(file.Name)
objlist = append(objlist, &model.ObjectURL{
Object: model.Object{ID: "/ipfs/" + file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1},
Url: model.Url{Url: gateurl.String()},
})
objlist = append(objlist, &model.Object{ID: file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1})
}
return objlist, nil
}
func (d *IPFS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
gateurl := d.gateURL.JoinPath(file.GetID())
gateurl.RawQuery = "filename=" + url.PathEscape(file.GetName())
gateurl := d.gateURL.JoinPath("/ipfs/", file.GetID())
gateurl.RawQuery = "filename=" + url.QueryEscape(file.GetName())
return &model.Link{URL: gateurl.String()}, nil
}
func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if d.Mode != "mfs" {
return fmt.Errorf("only write in mfs mode")
func (d *IPFS) Get(ctx context.Context, rawPath string) (model.Obj, error) {
rawPath = path.Join(d.GetRootPath(), rawPath)
var ipfsPath string
switch d.Mode {
case "ipfs":
ipfsPath = path.Join("/ipfs", rawPath)
case "ipns":
ipfsPath = path.Join("/ipns", rawPath)
case "mfs":
fileStat, err := d.sh.FilesStat(ctx, rawPath)
if err != nil {
return nil, err
}
ipfsPath = path.Join("/ipfs", fileStat.Hash)
default:
return nil, fmt.Errorf("mode error")
}
path := parentDir.GetPath()
if path[len(path):] != "/" {
path += "/"
file, err := d.sh.FilesStat(ctx, ipfsPath)
if err != nil {
return nil, err
}
return d.sh.FilesMkdir(ctx, path+dirName)
return &model.Object{ID: file.Hash, Name: path.Base(rawPath), Path: rawPath, Size: int64(file.Size), IsFolder: file.Type == "directory"}, nil
}
func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if d.Mode != "mfs" {
return fmt.Errorf("only write in mfs mode")
return nil, fmt.Errorf("only write in mfs mode")
}
return d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath())
dirPath := parentDir.GetPath()
err := d.sh.FilesMkdir(ctx, path.Join(dirPath, dirName), shell.FilesMkdir.Parents(true))
if err != nil {
return nil, err
}
file, err := d.sh.FilesStat(ctx, path.Join(dirPath, dirName))
if err != nil {
return nil, err
}
return &model.Object{ID: file.Hash, Name: dirName, Path: path.Join(dirPath, dirName), Size: int64(file.Size), IsFolder: true}, nil
}
func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if d.Mode != "mfs" {
return fmt.Errorf("only write in mfs mode")
return nil, fmt.Errorf("only write in mfs mode")
}
newFileName := filepath.Dir(srcObj.GetPath()) + "/" + newName
return d.sh.FilesMv(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
dstPath := path.Join(dstDir.GetPath(), path.Base(srcObj.GetPath()))
d.sh.FilesRm(ctx, dstPath, true)
return &model.Object{ID: srcObj.GetID(), Name: srcObj.GetName(), Path: dstPath, Size: int64(srcObj.GetSize()), IsFolder: srcObj.IsDir()},
d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath())
}
func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
if d.Mode != "mfs" {
return fmt.Errorf("only write in mfs mode")
return nil, fmt.Errorf("only write in mfs mode")
}
newFileName := dstDir.GetPath() + "/" + filepath.Base(srcObj.GetPath())
return d.sh.FilesCp(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
dstPath := path.Join(path.Dir(srcObj.GetPath()), newName)
d.sh.FilesRm(ctx, dstPath, true)
return &model.Object{ID: srcObj.GetID(), Name: newName, Path: dstPath, Size: int64(srcObj.GetSize()),
IsFolder: srcObj.IsDir()}, d.sh.FilesMv(ctx, srcObj.GetPath(), dstPath)
}
func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if d.Mode != "mfs" {
return nil, fmt.Errorf("only write in mfs mode")
}
dstPath := path.Join(dstDir.GetPath(), path.Base(srcObj.GetPath()))
d.sh.FilesRm(ctx, dstPath, true)
return &model.Object{ID: srcObj.GetID(), Name: srcObj.GetName(), Path: dstPath, Size: int64(srcObj.GetSize()), IsFolder: srcObj.IsDir()},
d.sh.FilesCp(ctx, path.Join("/ipfs/", srcObj.GetID()), dstPath, shell.FilesCp.Parents(true))
}
func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
@@ -124,19 +160,25 @@ func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
return d.sh.FilesRm(ctx, obj.GetPath(), true)
}
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
if d.Mode != "mfs" {
return fmt.Errorf("only write in mfs mode")
return nil, fmt.Errorf("only write in mfs mode")
}
outHash, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
}))
if err != nil {
return err
return nil, err
}
err = d.sh.FilesCp(ctx, "/ipfs/"+outHash, dstDir.GetPath()+"/"+strings.ReplaceAll(s.GetName(), "\\", "/"))
return err
dstPath := path.Join(dstDir.GetPath(), s.GetName())
if s.GetExist() != nil {
d.sh.FilesRm(ctx, dstPath, true)
}
err = d.sh.FilesCp(ctx, path.Join("/ipfs/", outHash), dstPath, shell.FilesCp.Parents(true))
gateurl := d.gateURL.JoinPath("/ipfs/", outHash)
gateurl.RawQuery = "filename=" + url.QueryEscape(s.GetName())
return &model.Object{ID: outHash, Name: s.GetName(), Path: dstPath, Size: int64(s.GetSize()), IsFolder: s.IsDir()}, err
}
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {

View File

@@ -9,8 +9,8 @@ type Addition struct {
// Usually one of two
driver.RootPath
Mode string `json:"mode" options:"ipfs,ipns,mfs" type:"select" required:"true"`
Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001"`
Gateway string `json:"gateway" default:"http://127.0.0.1:8080"`
Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001" required:"true"`
Gateway string `json:"gateway" default:"http://127.0.0.1:8080" required:"true"`
}
var config = driver.Config{

View File

@@ -78,6 +78,42 @@ func RemoveNotes(html string) string {
})
}
// 清理JS注释
func RemoveJSComment(data string) string {
var result strings.Builder
inComment := false
inSingleLineComment := false
for i := 0; i < len(data); i++ {
v := data[i]
if inSingleLineComment && (v == '\n' || v == '\r') {
inSingleLineComment = false
result.WriteByte(v)
continue
}
if inComment && v == '*' && i+1 < len(data) && data[i+1] == '/' {
inComment = false
continue
}
if v == '/' && i+1 < len(data) {
nextChar := data[i+1]
if nextChar == '*' {
inComment = true
i++
continue
} else if nextChar == '/' {
inSingleLineComment = true
i++
continue
}
}
result.WriteByte(v)
}
return result.String()
}
var findAcwScV2Reg = regexp.MustCompile(`arg1='([0-9A-Z]+)'`)
// 在页面被过多访问或其他情况下有时候会先返回一个加密的页面其执行计算出一个acw_sc__v2后放入页面后再重新访问页面才能获得正常页面

View File

@@ -348,6 +348,10 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
file FileOrFolderByShareUrl
)
// 删除注释
sharePageData = RemoveNotes(sharePageData)
sharePageData = RemoveJSComment(sharePageData)
// 需要密码
if strings.Contains(sharePageData, "pwdload") || strings.Contains(sharePageData, "passwddiv") {
sharePageData, err := getJSFunctionByName(sharePageData, "down_p")

View File

@@ -35,6 +35,10 @@ type Local struct {
// zero means no limit
thumbConcurrency int
thumbTokenBucket TokenBucket
// video thumb position
videoThumbPos float64
videoThumbPosIsPercentage bool
}
func (d *Local) Config() driver.Config {
@@ -92,6 +96,8 @@ func (d *Local) Init(ctx context.Context) error {
if val < 0 || val > 100 {
return fmt.Errorf("invalid video_thumb_pos value: %s, the precentage must be a number between 0 and 100", d.VideoThumbPos)
}
d.videoThumbPosIsPercentage = true
d.videoThumbPos = val / 100
} else {
val, err := strconv.ParseFloat(d.VideoThumbPos, 64)
if err != nil {
@@ -100,6 +106,8 @@ func (d *Local) Init(ctx context.Context) error {
if val < 0 {
return fmt.Errorf("invalid video_thumb_pos value: %s, the time must be a positive number", d.VideoThumbPos)
}
d.videoThumbPosIsPercentage = false
d.videoThumbPos = val
}
return nil
}

View File

@@ -61,22 +61,14 @@ func (d *Local) GetSnapshot(videoPath string) (imgData *bytes.Buffer, err error)
}
var ss string
if strings.HasSuffix(d.VideoThumbPos, "%") {
percentage, err := strconv.ParseFloat(strings.TrimSuffix(d.VideoThumbPos, "%"), 64)
if err != nil {
return nil, err
}
ss = fmt.Sprintf("%f", totalDuration*percentage/100)
if d.videoThumbPosIsPercentage {
ss = fmt.Sprintf("%f", totalDuration*d.videoThumbPos)
} else {
val, err := strconv.ParseFloat(d.VideoThumbPos, 64)
if err != nil {
return nil, err
}
// If the value is greater than the total duration, use the total duration
if val > totalDuration {
if d.videoThumbPos > totalDuration {
ss = fmt.Sprintf("%f", totalDuration)
} else {
ss = d.VideoThumbPos
ss = fmt.Sprintf("%f", d.videoThumbPos)
}
}

View File

@@ -56,12 +56,21 @@ func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
if err != nil {
return nil, err
}
res := make([]model.Obj, 0)
fn := make(map[string]model.Obj)
for i := range nodes {
n := nodes[i]
if n.GetType() == mega.FILE || n.GetType() == mega.FOLDER {
res = append(res, &MegaNode{n})
if n.GetType() != mega.FILE && n.GetType() != mega.FOLDER {
continue
}
if _, ok := fn[n.GetName()]; !ok {
fn[n.GetName()] = &MegaNode{n}
} else if sameNameObj := fn[n.GetName()]; (&MegaNode{n}).ModTime().After(sameNameObj.ModTime()) {
fn[n.GetName()] = &MegaNode{n}
}
}
res := make([]model.Obj, 0)
for _, v := range fn {
res = append(res, v)
}
return res, nil
}

View File

@@ -269,9 +269,6 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
if err != nil {
return nil, err
}
defer func() {
_ = file.Close()
}()
// step.1
uploadPartData, err := mopan.InitUploadPartData(ctx, mopan.UpdloadFileParam{

View File

@@ -2,13 +2,13 @@ package netease_music
import (
"context"
"github.com/alist-org/alist/v3/internal/driver"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/sign"
"github.com/alist-org/alist/v3/pkg/http_range"
@@ -28,8 +28,8 @@ type SongResp struct {
}
type ListResp struct {
Size string `json:"size"`
MaxSize string `json:"maxSize"`
Size int64 `json:"size"`
MaxSize int64 `json:"maxSize"`
Data []struct {
AddTime int64 `json:"addTime"`
FileName string `json:"fileName"`

View File

@@ -227,7 +227,6 @@ func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStrea
if err != nil {
return err
}
defer tmp.Close()
u := uploader{driver: d, file: tmp}

View File

@@ -11,7 +11,7 @@ type Addition struct {
IsSharepoint bool `json:"is_sharepoint"`
ClientID string `json:"client_id" required:"true"`
ClientSecret string `json:"client_secret" required:"true"`
RedirectUri string `json:"redirect_uri" required:"true" default:"https://alist.nn.ci/tool/onedrive/callback"`
RedirectUri string `json:"redirect_uri" required:"true" default:"https://alistgo.com/tool/onedrive/callback"`
RefreshToken string `json:"refresh_token" required:"true"`
SiteId string `json:"site_id"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`

View File

@@ -8,6 +8,7 @@ import (
"io"
"net/http"
stdpath "path"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
@@ -17,7 +18,6 @@ import (
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
log "github.com/sirupsen/logrus"
)
var onedriveHostMap = map[string]Host{
@@ -204,23 +204,22 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
var finish int64 = 0
DEFAULT := d.ChunkSize * 1024 * 1024
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
log.Debugf("upload: %d", finish)
var byteSize int64 = DEFAULT
left := stream.GetSize() - finish
if left < DEFAULT {
byteSize = left
}
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[Onedrive] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
log.Debug(err, n)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
@@ -228,19 +227,31 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[Onedrive] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
}
res.Body.Close()
up(float64(finish) * 100 / float64(stream.GetSize()))
}
return nil
}

View File

@@ -8,6 +8,7 @@ import (
"io"
"net/http"
stdpath "path"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
@@ -17,7 +18,6 @@ import (
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
log "github.com/sirupsen/logrus"
)
var onedriveHostMap = map[string]Host{
@@ -154,23 +154,22 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
var finish int64 = 0
DEFAULT := d.ChunkSize * 1024 * 1024
retryCount := 0
maxRetries := 3
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
log.Debugf("upload: %d", finish)
var byteSize int64 = DEFAULT
left := stream.GetSize() - finish
if left < DEFAULT {
byteSize = left
}
byteSize := min(left, DEFAULT)
utils.Log.Debugf("[OnedriveAPP] upload range: %d-%d/%d", finish, finish+byteSize-1, stream.GetSize())
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
log.Debug(err, n)
utils.Log.Debug(err, n)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
@@ -178,19 +177,31 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
switch {
case res.StatusCode >= 500 && res.StatusCode <= 504:
retryCount++
if retryCount > maxRetries {
res.Body.Close()
return fmt.Errorf("upload failed after %d retries due to server errors, error %d", maxRetries, res.StatusCode)
}
backoff := time.Duration(1<<retryCount) * time.Second
utils.Log.Warnf("[OnedriveAPP] server errors %d while uploading, retrying after %v...", res.StatusCode, backoff)
time.Sleep(backoff)
case res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200:
data, _ := io.ReadAll(res.Body)
res.Body.Close()
return errors.New(string(data))
default:
res.Body.Close()
retryCount = 0
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
}
res.Body.Close()
up(float64(finish) * 100 / float64(stream.GetSize()))
}
return nil
}

View File

@@ -69,7 +69,7 @@ func (d *PikPak) Init(ctx context.Context) (err error) {
d.ClientVersion = PCClientVersion
d.PackageName = PCPackageName
d.Algorithms = PCAlgorithms
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.5.6.4831 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.6.11.4955 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
}
if d.Addition.CaptchaToken != "" && d.Addition.RefreshToken == "" {

View File

@@ -7,13 +7,6 @@ import (
"crypto/sha1"
"encoding/hex"
"fmt"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors"
"io"
"net/http"
"path/filepath"
@@ -24,38 +17,43 @@ import (
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors"
)
var AndroidAlgorithms = []string{
"7xOq4Z8s",
"QE9/9+IQco",
"WdX5J9CPLZp",
"NmQ5qFAXqH3w984cYhMeC5TJR8j",
"cc44M+l7GDhav",
"KxGjo/wHB+Yx8Lf7kMP+/m9I+",
"wla81BUVSmDkctHDpUT",
"c6wMr1sm1WxiR3i8LDAm3W",
"hRLrEQCFNYi0PFPV",
"o1J41zIraDtJPNuhBu7Ifb/q3",
"U",
"RrbZvV0CTu3gaZJ56PVKki4IeP",
"NNuRbLckJqUp1Do0YlrKCUP",
"UUwnBbipMTvInA0U0E9",
"VzGc",
"SOP04dGzk0TNO7t7t9ekDbAmx+eq0OI1ovEx",
"nVBjhYiND4hZ2NCGyV5beamIr7k6ifAsAbl",
"Ddjpt5B/Cit6EDq2a6cXgxY9lkEIOw4yC1GDF28KrA",
"VVCogcmSNIVvgV6U+AochorydiSymi68YVNGiz",
"u5ujk5sM62gpJOsB/1Gu/zsfgfZO",
"dXYIiBOAHZgzSruaQ2Nhrqc2im",
"z5jUTBSIpBN9g4qSJGlidNAutX6",
"KJE2oveZ34du/g1tiimm",
}
var WebAlgorithms = []string{
"fyZ4+p77W1U4zcWBUwefAIFhFxvADWtT1wzolCxhg9q7etmGUjXr",
"uSUX02HYJ1IkyLdhINEFcCf7l2",
"iWt97bqD/qvjIaPXB2Ja5rsBWtQtBZZmaHH2rMR41",
"3binT1s/5a1pu3fGsN",
"8YCCU+AIr7pg+yd7CkQEY16lDMwi8Rh4WNp5",
"DYS3StqnAEKdGddRP8CJrxUSFh",
"crquW+4",
"ryKqvW9B9hly+JAymXCIfag5Z",
"Hr08T/NDTX1oSJfHk90c",
"i",
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
"+r6CQVxjzJV6LCV",
"F",
"pFJRC",
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
"/750aCr4lm/Sly/c",
"RB+DT/gZCrbV",
"",
"CyLsf7hdkIRxRm215hl",
"7xHvLi2tOYP0Y92b",
"ZGTXXxu8E/MIWaEDB+Sm/",
"1UI3",
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
"ihtqpG6FMt65+Xk+tWUH2",
"NhXXU9rg4XXdzo7u5o",
}
var PCAlgorithms = []string{
@@ -80,17 +78,17 @@ const (
const (
AndroidClientID = "YNxT9w7GMdWvEOKa"
AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
AndroidClientVersion = "1.49.3"
AndroidClientVersion = "1.53.2"
AndroidPackageName = "com.pikcloud.pikpak"
AndroidSdkVersion = "2.0.4.204101"
AndroidSdkVersion = "2.0.6.206003"
WebClientID = "YUMx5nI8ZU8Ap8pm"
WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
WebClientVersion = "undefined"
WebPackageName = "drive.mypikpak.com"
WebClientVersion = "2.0.0"
WebPackageName = "mypikpak.com"
WebSdkVersion = "8.0.3"
PCClientID = "YvtoWO6GNHiuCl7x"
PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA"
PCClientVersion = "undefined" // 2.5.6.4831
PCClientVersion = "undefined" // 2.6.11.4955
PCPackageName = "mypikpak.com"
PCSdkVersion = "8.0.3"
)
@@ -518,7 +516,7 @@ func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSi
continue
}
b := driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf))
b := driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf))
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, OssOption(params)...); err == nil {
break
}

View File

@@ -66,7 +66,7 @@ func (d *PikPakShare) Init(ctx context.Context) error {
d.ClientVersion = PCClientVersion
d.PackageName = PCPackageName
d.Algorithms = PCAlgorithms
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.5.6.4831 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.6.11.4955 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
}
// 获取CaptchaToken

View File

@@ -17,34 +17,32 @@ import (
)
var AndroidAlgorithms = []string{
"7xOq4Z8s",
"QE9/9+IQco",
"WdX5J9CPLZp",
"NmQ5qFAXqH3w984cYhMeC5TJR8j",
"cc44M+l7GDhav",
"KxGjo/wHB+Yx8Lf7kMP+/m9I+",
"wla81BUVSmDkctHDpUT",
"c6wMr1sm1WxiR3i8LDAm3W",
"hRLrEQCFNYi0PFPV",
"o1J41zIraDtJPNuhBu7Ifb/q3",
"U",
"RrbZvV0CTu3gaZJ56PVKki4IeP",
"NNuRbLckJqUp1Do0YlrKCUP",
"UUwnBbipMTvInA0U0E9",
"VzGc",
"SOP04dGzk0TNO7t7t9ekDbAmx+eq0OI1ovEx",
"nVBjhYiND4hZ2NCGyV5beamIr7k6ifAsAbl",
"Ddjpt5B/Cit6EDq2a6cXgxY9lkEIOw4yC1GDF28KrA",
"VVCogcmSNIVvgV6U+AochorydiSymi68YVNGiz",
"u5ujk5sM62gpJOsB/1Gu/zsfgfZO",
"dXYIiBOAHZgzSruaQ2Nhrqc2im",
"z5jUTBSIpBN9g4qSJGlidNAutX6",
"KJE2oveZ34du/g1tiimm",
}
var WebAlgorithms = []string{
"fyZ4+p77W1U4zcWBUwefAIFhFxvADWtT1wzolCxhg9q7etmGUjXr",
"uSUX02HYJ1IkyLdhINEFcCf7l2",
"iWt97bqD/qvjIaPXB2Ja5rsBWtQtBZZmaHH2rMR41",
"3binT1s/5a1pu3fGsN",
"8YCCU+AIr7pg+yd7CkQEY16lDMwi8Rh4WNp5",
"DYS3StqnAEKdGddRP8CJrxUSFh",
"crquW+4",
"ryKqvW9B9hly+JAymXCIfag5Z",
"Hr08T/NDTX1oSJfHk90c",
"i",
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
"+r6CQVxjzJV6LCV",
"F",
"pFJRC",
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
"/750aCr4lm/Sly/c",
"RB+DT/gZCrbV",
"",
"CyLsf7hdkIRxRm215hl",
"7xHvLi2tOYP0Y92b",
"ZGTXXxu8E/MIWaEDB+Sm/",
"1UI3",
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
"ihtqpG6FMt65+Xk+tWUH2",
"NhXXU9rg4XXdzo7u5o",
}
var PCAlgorithms = []string{
@@ -63,17 +61,17 @@ var PCAlgorithms = []string{
const (
AndroidClientID = "YNxT9w7GMdWvEOKa"
AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
AndroidClientVersion = "1.49.3"
AndroidClientVersion = "1.53.2"
AndroidPackageName = "com.pikcloud.pikpak"
AndroidSdkVersion = "2.0.4.204101"
AndroidSdkVersion = "2.0.6.206003"
WebClientID = "YUMx5nI8ZU8Ap8pm"
WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
WebClientVersion = "undefined"
WebPackageName = "drive.mypikpak.com"
WebClientVersion = "2.0.0"
WebPackageName = "mypikpak.com"
WebSdkVersion = "8.0.3"
PCClientID = "YvtoWO6GNHiuCl7x"
PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA"
PCClientVersion = "undefined" // 2.5.6.4831
PCClientVersion = "undefined" // 2.6.11.4955
PCPackageName = "mypikpak.com"
PCSdkVersion = "8.0.3"
)

View File

@@ -3,9 +3,8 @@ package quark
import (
"bytes"
"context"
"crypto/md5"
"crypto/sha1"
"encoding/hex"
"hash"
"io"
"net/http"
"time"
@@ -14,6 +13,7 @@ import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
streamPkg "github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
@@ -74,7 +74,7 @@ func (d *QuarkOrUC) Link(ctx context.Context, file model.Obj, args model.LinkArg
"Referer": []string{d.conf.referer},
"User-Agent": []string{ua},
},
Concurrency: 2,
Concurrency: 3,
PartSize: 10 * utils.MB,
}, nil
}
@@ -136,33 +136,33 @@ func (d *QuarkOrUC) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
md5Str, sha1Str := stream.GetHash().GetHash(utils.MD5), stream.GetHash().GetHash(utils.SHA1)
var (
md5 hash.Hash
sha1 hash.Hash
)
writers := []io.Writer{}
if len(md5Str) != utils.MD5.Width {
md5 = utils.MD5.NewFunc()
writers = append(writers, md5)
}
defer func() {
_ = tempFile.Close()
}()
m := md5.New()
_, err = utils.CopyWithBuffer(m, tempFile)
if err != nil {
return err
if len(sha1Str) != utils.SHA1.Width {
sha1 = utils.SHA1.NewFunc()
writers = append(writers, sha1)
}
_, err = tempFile.Seek(0, io.SeekStart)
if err != nil {
return err
if len(writers) > 0 {
_, err := streamPkg.CacheFullInTempFileAndWriter(stream, io.MultiWriter(writers...))
if err != nil {
return err
}
if md5 != nil {
md5Str = hex.EncodeToString(md5.Sum(nil))
}
if sha1 != nil {
sha1Str = hex.EncodeToString(sha1.Sum(nil))
}
}
md5Str := hex.EncodeToString(m.Sum(nil))
s := sha1.New()
_, err = utils.CopyWithBuffer(s, tempFile)
if err != nil {
return err
}
_, err = tempFile.Seek(0, io.SeekStart)
if err != nil {
return err
}
sha1Str := hex.EncodeToString(s.Sum(nil))
// pre
pre, err := d.upPre(stream, dstDir.GetID())
if err != nil {
@@ -178,27 +178,28 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
return nil
}
// part up
partSize := pre.Metadata.PartSize
var part []byte
md5s := make([]string, 0)
defaultBytes := make([]byte, partSize)
total := stream.GetSize()
left := total
partSize := int64(pre.Metadata.PartSize)
part := make([]byte, partSize)
count := int(total / partSize)
if total%partSize > 0 {
count++
}
md5s := make([]string, 0, count)
partNumber := 1
for left > 0 {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
if left > int64(partSize) {
part = defaultBytes
} else {
part = make([]byte, left)
if left < partSize {
part = part[:left]
}
_, err := io.ReadFull(tempFile, part)
n, err := io.ReadFull(stream, part)
if err != nil {
return err
}
left -= int64(len(part))
left -= int64(n)
log.Debugf("left: %d", left)
reader := driver.NewLimitedUploadStream(ctx, bytes.NewReader(part))
m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, reader)

View File

@@ -125,7 +125,6 @@ func (d *QuarkUCTV) List(ctx context.Context, dir model.Obj, args model.ListArgs
}
func (d *QuarkUCTV) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
files := &model.Link{}
var fileLink FileLink
_, err := d.request(ctx, "/file", "GET", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
@@ -139,8 +138,12 @@ func (d *QuarkUCTV) Link(ctx context.Context, file model.Obj, args model.LinkArg
if err != nil {
return nil, err
}
files.URL = fileLink.Data.DownloadURL
return files, nil
return &model.Link{
URL: fileLink.Data.DownloadURL,
Concurrency: 3,
PartSize: 10 * utils.MB,
}, nil
}
func (d *QuarkUCTV) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {

View File

@@ -83,7 +83,7 @@ type Group struct {
Type int `json:"type"`
Name string `json:"name"`
IsAdministrator int `json:"is_administrator"`
Role int `json:"role"`
Role []int `json:"role"`
Avatar string `json:"avatar_url"`
IsStick int `json:"is_stick"`
Nickname string `json:"nickname"`

View File

@@ -12,6 +12,7 @@ import (
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
"github.com/aws/aws-sdk-go/aws"
@@ -44,26 +45,29 @@ func (x *Thunder) Init(ctx context.Context) (err error) {
Common: &Common{
client: base.NewRestyClient(),
Algorithms: []string{
"HPxr4BVygTQVtQkIMwQH33ywbgYG5l4JoR",
"GzhNkZ8pOBsCY+7",
"v+l0ImTpG7c7/",
"e5ztohgVXNP",
"t",
"EbXUWyVVqQbQX39Mbjn2geok3/0WEkAVxeqhtx857++kjJiRheP8l77gO",
"o7dvYgbRMOpHXxCs",
"6MW8TD8DphmakaxCqVrfv7NReRRN7ck3KLnXBculD58MvxjFRqT+",
"kmo0HxCKVfmxoZswLB4bVA/dwqbVAYghSb",
"j",
"4scKJNdd7F27Hv7tbt",
"9uJNVj/wLmdwKrJaVj/omlQ",
"Oz64Lp0GigmChHMf/6TNfxx7O9PyopcczMsnf",
"Eb+L7Ce+Ej48u",
"jKY0",
"ASr0zCl6v8W4aidjPK5KHd1Lq3t+vBFf41dqv5+fnOd",
"wQlozdg6r1qxh0eRmt3QgNXOvSZO6q/GXK",
"gmirk+ciAvIgA/cxUUCema47jr/YToixTT+Q6O",
"5IiCoM9B1/788ntB",
"P07JH0h6qoM6TSUAK2aL9T5s2QBVeY9JWvalf",
"+oK0AN",
},
DeviceID: utils.GetMD5EncodeStr(x.Username + x.Password),
DeviceID: func() string {
if len(x.DeviceID) != 32 {
return utils.GetMD5EncodeStr(x.DeviceID)
}
return x.DeviceID
}(),
ClientID: "Xp6vsxz_7IYVw2BB",
ClientSecret: "Xp6vsy4tN9toTVdMSpomVdXpRmES",
ClientVersion: "7.51.0.8196",
ClientVersion: "8.31.0.9726",
PackageName: "com.xunlei.downloadprovider",
UserAgent: "ANDROID-com.xunlei.downloadprovider/7.51.0.8196 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/220200 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)",
UserAgent: "ANDROID-com.xunlei.downloadprovider/8.31.0.9726 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/512000 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)",
DownloadUserAgent: "Dalvik/2.1.0 (Linux; U; Android 12; M2004J7AC Build/SP1A.210812.016)",
refreshCTokenCk: func(token string) {
x.CaptchaToken = token
op.MustSaveDriverStorage(x)
@@ -79,6 +83,8 @@ func (x *Thunder) Init(ctx context.Context) (err error) {
x.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error()))
op.MustSaveDriverStorage(x)
}
// 清空 信任密钥
x.Addition.CreditKey = ""
}
x.SetTokenResp(token)
return err
@@ -92,6 +98,17 @@ func (x *Thunder) Init(ctx context.Context) (err error) {
x.SetCaptchaToken(ctoekn)
}
if x.Addition.CreditKey != "" {
x.SetCreditKey(x.Addition.CreditKey)
}
if x.Addition.DeviceID != "" {
x.Common.DeviceID = x.Addition.DeviceID
} else {
x.Addition.DeviceID = x.Common.DeviceID
op.MustSaveDriverStorage(x)
}
// 防止重复登录
identity := x.GetIdentity()
if x.identity != identity || !x.IsLogin() {
@@ -101,6 +118,8 @@ func (x *Thunder) Init(ctx context.Context) (err error) {
if err != nil {
return err
}
// 清空 信任密钥
x.Addition.CreditKey = ""
x.SetTokenResp(token)
}
return nil
@@ -160,6 +179,17 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) {
x.SetCaptchaToken(x.CaptchaToken)
}
if x.ExpertAddition.CreditKey != "" {
x.SetCreditKey(x.ExpertAddition.CreditKey)
}
if x.ExpertAddition.DeviceID != "" {
x.Common.DeviceID = x.ExpertAddition.DeviceID
} else {
x.ExpertAddition.DeviceID = x.Common.DeviceID
op.MustSaveDriverStorage(x)
}
// 签名方法
if x.SignType == "captcha_sign" {
x.Common.Timestamp = x.Timestamp
@@ -193,6 +223,8 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) {
if err != nil {
return err
}
// 清空 信任密钥
x.ExpertAddition.CreditKey = ""
x.SetTokenResp(token)
x.SetRefreshTokenFunc(func() error {
token, err := x.XunLeiCommon.RefreshToken(x.TokenResp.RefreshToken)
@@ -201,6 +233,8 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) {
if err != nil {
x.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error()))
}
// 清空 信任密钥
x.ExpertAddition.CreditKey = ""
}
x.SetTokenResp(token)
op.MustSaveDriverStorage(x)
@@ -232,7 +266,8 @@ func (x *ThunderExpert) SetTokenResp(token *TokenResp) {
type XunLeiCommon struct {
*Common
*TokenResp // 登录信息
*TokenResp // 登录信息
*CoreLoginResp // core登录信息
refreshTokenFunc func() error
}
@@ -333,22 +368,17 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
}
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
hi := file.GetHash()
gcid := hi.GetHash(hash_extend.GCID)
gcid := file.GetHash().GetHash(hash_extend.GCID)
var err error
if len(gcid) < hash_extend.GCID.Width {
tFile, err := file.CacheFullInTempFile()
if err != nil {
return err
}
gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize())
_, gcid, err = stream.CacheFullInTempFileAndHash(file, hash_extend.GCID, file.GetSize())
if err != nil {
return err
}
}
var resp UploadTaskResponse
_, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) {
_, err = xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) {
r.SetContext(ctx)
r.SetBody(&base.Json{
"kind": FILE,
@@ -437,6 +467,10 @@ func (xc *XunLeiCommon) SetTokenResp(tr *TokenResp) {
xc.TokenResp = tr
}
func (xc *XunLeiCommon) SetCoreTokenResp(tr *CoreLoginResp) {
xc.CoreLoginResp = tr
}
// 携带Authorization和CaptchaToken的请求
func (xc *XunLeiCommon) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
data, err := xc.Common.Request(url, method, func(req *resty.Request) {
@@ -465,7 +499,7 @@ func (xc *XunLeiCommon) Request(url string, method string, callback base.ReqCall
}
return nil, err
case 9: // 验证码token过期
if err = xc.RefreshCaptchaTokenAtLogin(GetAction(method, url), xc.UserID); err != nil {
if err = xc.RefreshCaptchaTokenAtLogin(GetAction(method, url), xc.TokenResp.UserID); err != nil {
return nil, err
}
default:
@@ -497,20 +531,25 @@ func (xc *XunLeiCommon) RefreshToken(refreshToken string) (*TokenResp, error) {
// 登录
func (xc *XunLeiCommon) Login(username, password string) (*TokenResp, error) {
url := XLUSER_API_URL + "/auth/signin"
err := xc.RefreshCaptchaTokenInLogin(GetAction(http.MethodPost, url), username)
//v3 login拿到 sessionID
sessionID, err := xc.CoreLogin(username, password)
if err != nil {
return nil, err
}
//v1 login拿到令牌
url := XLUSER_API_URL + "/auth/signin/token"
if err = xc.RefreshCaptchaTokenInLogin(GetAction(http.MethodPost, url), username); err != nil {
return nil, err
}
var resp TokenResp
_, err = xc.Common.Request(url, http.MethodPost, func(req *resty.Request) {
req.SetPathParam("client_id", xc.ClientID)
req.SetBody(&SignInRequest{
CaptchaToken: xc.GetCaptchaToken(),
ClientID: xc.ClientID,
ClientSecret: xc.ClientSecret,
Username: username,
Password: password,
Provider: SignProvider,
SigninToken: sessionID,
})
}, &resp)
if err != nil {
@@ -586,3 +625,48 @@ func (xc *XunLeiCommon) DeleteOfflineTasks(ctx context.Context, taskIDs []string
}
return nil
}
func (xc *XunLeiCommon) CoreLogin(username string, password string) (sessionID string, err error) {
url := XLUSER_API_BASE_URL + "/xluser.core.login/v3/login"
var resp CoreLoginResp
res, err := xc.Common.Request(url, http.MethodPost, func(req *resty.Request) {
req.SetHeader("User-Agent", "android-ok-http-client/xl-acc-sdk/version-5.0.12.512000")
req.SetBody(&CoreLoginRequest{
ProtocolVersion: "301",
SequenceNo: "1000012",
PlatformVersion: "10",
IsCompressed: "0",
Appid: APPID,
ClientVersion: "8.31.0.9726",
PeerID: "00000000000000000000000000000000",
AppName: "ANDROID-com.xunlei.downloadprovider",
SdkVersion: "512000",
Devicesign: generateDeviceSign(xc.DeviceID, xc.PackageName),
NetWorkType: "WIFI",
ProviderName: "NONE",
DeviceModel: "M2004J7AC",
DeviceName: "Xiaomi_M2004j7ac",
OSVersion: "12",
Creditkey: xc.GetCreditKey(),
Hl: "zh-CN",
UserName: username,
PassWord: password,
VerifyKey: "",
VerifyCode: "",
IsMd5Pwd: "0",
})
}, nil)
if err != nil {
return "", err
}
if err = utils.Json.Unmarshal(res, &resp); err != nil {
return "", err
}
xc.SetCoreTokenResp(&resp)
sessionID = resp.SessionID
return sessionID, nil
}

View File

@@ -23,23 +23,25 @@ type ExpertAddition struct {
RefreshToken string `json:"refresh_token" required:"true" help:"login type is refresh_token,this is required"`
// 签名方法1
Algorithms string `json:"algorithms" required:"true" help:"sign type is algorithms,this is required" default:"HPxr4BVygTQVtQkIMwQH33ywbgYG5l4JoR,GzhNkZ8pOBsCY+7,v+l0ImTpG7c7/,e5ztohgVXNP,t,EbXUWyVVqQbQX39Mbjn2geok3/0WEkAVxeqhtx857++kjJiRheP8l77gO,o7dvYgbRMOpHXxCs,6MW8TD8DphmakaxCqVrfv7NReRRN7ck3KLnXBculD58MvxjFRqT+,kmo0HxCKVfmxoZswLB4bVA/dwqbVAYghSb,j,4scKJNdd7F27Hv7tbt"`
Algorithms string `json:"algorithms" required:"true" help:"sign type is algorithms,this is required" default:"9uJNVj/wLmdwKrJaVj/omlQ,Oz64Lp0GigmChHMf/6TNfxx7O9PyopcczMsnf,Eb+L7Ce+Ej48u,jKY0,ASr0zCl6v8W4aidjPK5KHd1Lq3t+vBFf41dqv5+fnOd,wQlozdg6r1qxh0eRmt3QgNXOvSZO6q/GXK,gmirk+ciAvIgA/cxUUCema47jr/YToixTT+Q6O,5IiCoM9B1/788ntB,P07JH0h6qoM6TSUAK2aL9T5s2QBVeY9JWvalf,+oK0AN"`
// 签名方法2
CaptchaSign string `json:"captcha_sign" required:"true" help:"sign type is captcha_sign,this is required"`
Timestamp string `json:"timestamp" required:"true" help:"sign type is captcha_sign,this is required"`
// 验证码
CaptchaToken string `json:"captcha_token"`
// 信任密钥
CreditKey string `json:"credit_key" help:"credit key,used for login"`
// 必要且影响登录,由签名决定
DeviceID string `json:"device_id" required:"true" default:"9aa5c268e7bcfc197a9ad88e2fb330e5"`
DeviceID string `json:"device_id" default:""`
ClientID string `json:"client_id" required:"true" default:"Xp6vsxz_7IYVw2BB"`
ClientSecret string `json:"client_secret" required:"true" default:"Xp6vsy4tN9toTVdMSpomVdXpRmES"`
ClientVersion string `json:"client_version" required:"true" default:"7.51.0.8196"`
ClientVersion string `json:"client_version" required:"true" default:"8.31.0.9726"`
PackageName string `json:"package_name" required:"true" default:"com.xunlei.downloadprovider"`
//不影响登录,影响下载速度
UserAgent string `json:"user_agent" required:"true" default:"ANDROID-com.xunlei.downloadprovider/7.51.0.8196 netWorkType/4G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/220200 Oauth2Client/0.9 (Linux 4_14_186-perf-gdcf98eab238b) (JAVA 0)"`
UserAgent string `json:"user_agent" required:"true" default:"ANDROID-com.xunlei.downloadprovider/8.31.0.9726 netWorkType/5G appid/40 deviceName/Xiaomi_M2004j7ac deviceModel/M2004J7AC OSVersion/12 protocolVersion/301 platformVersion/10 sdkVersion/512000 Oauth2Client/0.9 (Linux 4_14_186-perf-gddfs8vbb238b) (JAVA 0)"`
DownloadUserAgent string `json:"download_user_agent" required:"true" default:"Dalvik/2.1.0 (Linux; U; Android 12; M2004J7AC Build/SP1A.210812.016)"`
//优先使用视频链接代替下载链接
@@ -74,6 +76,10 @@ type Addition struct {
Username string `json:"username" required:"true"`
Password string `json:"password" required:"true"`
CaptchaToken string `json:"captcha_token"`
// 信任密钥
CreditKey string `json:"credit_key" help:"credit key,used for login"`
// 登录设备ID
DeviceID string `json:"device_id" default:""`
}
// 登录特征,用于判断是否重新登录

View File

@@ -18,6 +18,10 @@ type ErrResp struct {
}
func (e *ErrResp) IsError() bool {
if e.ErrorMsg == "success" {
return false
}
return e.ErrorCode != 0 || e.ErrorMsg != "" || e.ErrorDescription != ""
}
@@ -61,13 +65,79 @@ func (t *TokenResp) Token() string {
}
type SignInRequest struct {
CaptchaToken string `json:"captcha_token"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
Username string `json:"username"`
Password string `json:"password"`
Provider string `json:"provider"`
SigninToken string `json:"signin_token"`
}
type CoreLoginRequest struct {
ProtocolVersion string `json:"protocolVersion"`
SequenceNo string `json:"sequenceNo"`
PlatformVersion string `json:"platformVersion"`
IsCompressed string `json:"isCompressed"`
Appid string `json:"appid"`
ClientVersion string `json:"clientVersion"`
PeerID string `json:"peerID"`
AppName string `json:"appName"`
SdkVersion string `json:"sdkVersion"`
Devicesign string `json:"devicesign"`
NetWorkType string `json:"netWorkType"`
ProviderName string `json:"providerName"`
DeviceModel string `json:"deviceModel"`
DeviceName string `json:"deviceName"`
OSVersion string `json:"OSVersion"`
Creditkey string `json:"creditkey"`
Hl string `json:"hl"`
UserName string `json:"userName"`
PassWord string `json:"passWord"`
VerifyKey string `json:"verifyKey"`
VerifyCode string `json:"verifyCode"`
IsMd5Pwd string `json:"isMd5Pwd"`
}
type CoreLoginResp struct {
Account string `json:"account"`
Creditkey string `json:"creditkey"`
/* Error string `json:"error"`
ErrorCode string `json:"errorCode"`
ErrorDescription string `json:"error_description"`*/
ExpiresIn int `json:"expires_in"`
IsCompressed string `json:"isCompressed"`
IsSetPassWord string `json:"isSetPassWord"`
KeepAliveMinPeriod string `json:"keepAliveMinPeriod"`
KeepAlivePeriod string `json:"keepAlivePeriod"`
LoginKey string `json:"loginKey"`
NickName string `json:"nickName"`
PlatformVersion string `json:"platformVersion"`
ProtocolVersion string `json:"protocolVersion"`
SecureKey string `json:"secureKey"`
SequenceNo string `json:"sequenceNo"`
SessionID string `json:"sessionID"`
Timestamp string `json:"timestamp"`
UserID string `json:"userID"`
UserName string `json:"userName"`
UserNewNo string `json:"userNewNo"`
Version string `json:"version"`
/* VipList []struct {
ExpireDate string `json:"expireDate"`
IsAutoDeduct string `json:"isAutoDeduct"`
IsVip string `json:"isVip"`
IsYear string `json:"isYear"`
PayID string `json:"payId"`
PayName string `json:"payName"`
Register string `json:"register"`
Vasid string `json:"vasid"`
VasType string `json:"vasType"`
VipDayGrow string `json:"vipDayGrow"`
VipGrow string `json:"vipGrow"`
VipLevel string `json:"vipLevel"`
Icon struct {
General string `json:"general"`
Small string `json:"small"`
} `json:"icon"`
} `json:"vipList"`*/
}
/*
@@ -251,3 +321,29 @@ type Params struct {
PredictSpeed string `json:"predict_speed"`
PredictType string `json:"predict_type"`
}
// LoginReviewResp 登录验证响应
type LoginReviewResp struct {
Creditkey string `json:"creditkey"`
Error string `json:"error"`
ErrorCode string `json:"errorCode"`
ErrorDesc string `json:"errorDesc"`
ErrorDescURL string `json:"errorDescUrl"`
ErrorIsRetry int `json:"errorIsRetry"`
ErrorDescription string `json:"error_description"`
IsCompressed string `json:"isCompressed"`
PlatformVersion string `json:"platformVersion"`
ProtocolVersion string `json:"protocolVersion"`
Reviewurl string `json:"reviewurl"`
SequenceNo string `json:"sequenceNo"`
UserID string `json:"userID"`
VerifyType string `json:"verifyType"`
}
// ReviewData 验证数据
type ReviewData struct {
Creditkey string `json:"creditkey"`
Reviewurl string `json:"reviewurl"`
Deviceid string `json:"deviceid"`
Devicesign string `json:"devicesign"`
}

View File

@@ -1,8 +1,10 @@
package thunder
import (
"crypto/md5"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/http"
@@ -15,10 +17,11 @@ import (
)
const (
API_URL = "https://api-pan.xunlei.com/drive/v1"
FILE_API_URL = API_URL + "/files"
TASK_API_URL = API_URL + "/tasks"
XLUSER_API_URL = "https://xluser-ssl.xunlei.com/v1"
API_URL = "https://api-pan.xunlei.com/drive/v1"
FILE_API_URL = API_URL + "/files"
TASK_API_URL = API_URL + "/tasks"
XLUSER_API_BASE_URL = "https://xluser-ssl.xunlei.com"
XLUSER_API_URL = XLUSER_API_BASE_URL + "/v1"
)
const (
@@ -34,6 +37,12 @@ const (
UPLOAD_TYPE_URL = "UPLOAD_TYPE_URL"
)
const (
SignProvider = "access_end_point_token"
APPID = "40"
APPKey = "34a062aaa22f906fca4fefe9fb3a3021"
)
func GetAction(method string, url string) string {
urlpath := regexp.MustCompile(`://[^/]+((/[^/\s?#]+)*)`).FindStringSubmatch(url)[1]
return method + ":" + urlpath
@@ -44,6 +53,8 @@ type Common struct {
captchaToken string
creditKey string
// 签名相关,二选一
Algorithms []string
Timestamp, CaptchaSign string
@@ -69,6 +80,13 @@ func (c *Common) GetCaptchaToken() string {
return c.captchaToken
}
func (c *Common) SetCreditKey(creditKey string) {
c.creditKey = creditKey
}
func (c *Common) GetCreditKey() string {
return c.creditKey
}
// 刷新验证码token(登录后)
func (c *Common) RefreshCaptchaTokenAtLogin(action, userID string) error {
metas := map[string]string{
@@ -170,12 +188,53 @@ func (c *Common) Request(url, method string, callback base.ReqCallback, resp int
var erron ErrResp
utils.Json.Unmarshal(res.Body(), &erron)
if erron.IsError() {
// review_panel 表示需要短信验证码进行验证
if erron.ErrorMsg == "review_panel" {
return nil, c.getReviewData(res)
}
return nil, &erron
}
return res.Body(), nil
}
// 获取验证所需内容
func (c *Common) getReviewData(res *resty.Response) error {
var reviewResp LoginReviewResp
var reviewData ReviewData
if err := utils.Json.Unmarshal(res.Body(), &reviewResp); err != nil {
return err
}
deviceSign := generateDeviceSign(c.DeviceID, c.PackageName)
reviewData = ReviewData{
Creditkey: reviewResp.Creditkey,
Reviewurl: reviewResp.Reviewurl + "&deviceid=" + deviceSign,
Deviceid: deviceSign,
Devicesign: deviceSign,
}
// 将reviewData转为JSON字符串
reviewDataJSON, _ := json.MarshalIndent(reviewData, "", " ")
//reviewDataJSON, _ := json.Marshal(reviewData)
return fmt.Errorf(`
<div style="font-family: Arial, sans-serif; padding: 15px; border-radius: 5px; border: 1px solid #e0e0e0;>
<h3 style="color: #d9534f; margin-top: 0;">
<span style="font-size: 16px;">🔒 本次登录需要验证</span><br>
<span style="font-size: 14px; font-weight: normal; color: #666;">This login requires verification</span>
</h3>
<p style="font-size: 14px; margin-bottom: 15px;">下面是验证所需要的数据,具体使用方法请参照对应的驱动文档<br>
<span style="color: #666; font-size: 13px;">Below are the relevant verification data. For specific usage methods, please refer to the corresponding driver documentation.</span></p>
<div style="border: 1px solid #ddd; border-radius: 4px; padding: 10px; overflow-x: auto; font-family: 'Courier New', monospace; font-size: 13px;">
<pre style="margin: 0; white-space: pre-wrap;"><code>%s</code></pre>
</div>
</div>`, string(reviewDataJSON))
}
// 计算文件Gcid
func getGcid(r io.Reader, size int64) (string, error) {
calcBlockSize := func(j int64) int64 {
@@ -201,3 +260,24 @@ func getGcid(r io.Reader, size int64) (string, error) {
}
return hex.EncodeToString(hash1.Sum(nil)), nil
}
func generateDeviceSign(deviceID, packageName string) string {
signatureBase := fmt.Sprintf("%s%s%s%s", deviceID, packageName, APPID, APPKey)
sha1Hash := sha1.New()
sha1Hash.Write([]byte(signatureBase))
sha1Result := sha1Hash.Sum(nil)
sha1String := hex.EncodeToString(sha1Result)
md5Hash := md5.New()
md5Hash.Write([]byte(sha1String))
md5Result := md5Hash.Sum(nil)
md5String := hex.EncodeToString(md5Result)
deviceSign := fmt.Sprintf("div101.%s%s", deviceID, md5String)
return deviceSign
}

View File

@@ -4,10 +4,15 @@ import (
"context"
"errors"
"fmt"
"io"
"net/http"
"strings"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
streamPkg "github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
"github.com/aws/aws-sdk-go/aws"
@@ -15,9 +20,6 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/go-resty/resty/v2"
"io"
"net/http"
"strings"
)
type ThunderBrowser struct {
@@ -456,15 +458,10 @@ func (xc *XunLeiBrowserCommon) Remove(ctx context.Context, obj model.Obj) error
}
func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
hi := stream.GetHash()
gcid := hi.GetHash(hash_extend.GCID)
gcid := stream.GetHash().GetHash(hash_extend.GCID)
var err error
if len(gcid) < hash_extend.GCID.Width {
tFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
_, gcid, err = streamPkg.CacheFullInTempFileAndHash(stream, hash_extend.GCID, stream.GetSize())
if err != nil {
return err
}
@@ -481,7 +478,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream
}
var resp UploadTaskResponse
_, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) {
_, err = xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) {
r.SetContext(ctx)
r.SetBody(&js)
}, &resp)

View File

@@ -3,11 +3,15 @@ package thunderx
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
"github.com/aws/aws-sdk-go/aws"
@@ -15,8 +19,6 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/go-resty/resty/v2"
"net/http"
"strings"
)
type ThunderX struct {
@@ -364,22 +366,17 @@ func (xc *XunLeiXCommon) Remove(ctx context.Context, obj model.Obj) error {
}
func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
hi := file.GetHash()
gcid := hi.GetHash(hash_extend.GCID)
gcid := file.GetHash().GetHash(hash_extend.GCID)
var err error
if len(gcid) < hash_extend.GCID.Width {
tFile, err := file.CacheFullInTempFile()
if err != nil {
return err
}
gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize())
_, gcid, err = stream.CacheFullInTempFileAndHash(file, hash_extend.GCID, file.GetSize())
if err != nil {
return err
}
}
var resp UploadTaskResponse
_, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) {
_, err = xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) {
r.SetContext(ctx)
r.SetBody(&base.Json{
"kind": FILE,

View File

@@ -243,7 +243,25 @@ func (d *Urls) PutURL(ctx context.Context, dstDir model.Obj, name, url string) (
}
func (d *Urls) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
return errs.UploadNotSupported
if !d.Writable {
return errs.PermissionDenied
}
d.mutex.Lock()
defer d.mutex.Unlock()
node := GetNodeFromRootByPath(d.root, dstDir.GetPath()) // parent
if node == nil {
return errs.ObjectNotFound
}
if node.isFile() {
return errs.NotFolder
}
file, err := parseFileLine(stream.GetName(), d.HeadSize)
if err != nil {
return err
}
node.Children = append(node.Children, file)
d.updateStorage()
return nil
}
func (d *Urls) updateStorage() {

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
umask ${UMASK}

Some files were not shown because too many files have changed in this diff Show More