Compare commits

...

70 Commits
v4.1.3 ... main

Author SHA1 Message Date
KirCute
60a489eb68 feat(archive): support non-overwrite decompress (#1701) 2025-11-25 10:27:29 +08:00
varg1714
b22e211044 feat(fs): Add skipExisting option to move and copy, merge option to copy (#1556)
* fix(fs): Add skipExisting option to move and copy.

* feat(fs): Add merge option to copy.

* feat(fs): Code smell.

* feat(fs): Code smell.
2025-11-24 14:20:24 +08:00
KirCute
ca401b9af9 fix(local): assign non-CoW copy requests to the task module (#1669)
* fix(local): assign non-CoW copy requests to the task module

* fix build

* fix cross device
2025-11-24 14:14:53 +08:00
jenfonro
addce8b691 feat(baidu_netdisk): Add shard upload timeout setting (#1682)
add timeout
2025-11-24 14:14:34 +08:00
Seven
42fc841dc1 feat(strm): custom path prefixes (#1697)
fix(strm): custom path prefixes

Signed-off-by: ShenLin <773933146@qq.com>
Co-authored-by: ShenLin <773933146@qq.com>
2025-11-24 14:11:59 +08:00
varg1714
4c0916b64b fix(strm): fix the name and type issue (#1630)
* fix(strm): fix the name and type issue

* fix(strm): update version
2025-11-24 14:05:49 +08:00
VXTLS
3989d35abd fix(misskey): folderId format validation and root directory handling (#1647)
fix(misskey): Fix folderId format validation and root directory handling
2025-11-21 12:18:54 +08:00
KirCute
72e2ae1f14 feat(fs): support manually trigger objs update hook (#1620)
* feat(fs): support manually trigger objs update hook

* fix: support driver internal copy & move case

* fix

* fix: apply suggestions of Copilot
2025-11-21 12:18:20 +08:00
Seven
3e37f575d8 fix(openlist_driver): ensure UA is correctly propagated (#1679) 2025-11-21 12:13:41 +08:00
MoYan
c0d480366d fix(driver/123): initialize Platform field (#1644)
* fix(driver/123): initialization the Platform field

Signed-off-by: MoYan <1561515308@qq.com>

* Fix formatting of Platform field in Pan123

Signed-off-by: MoYan <1561515308@qq.com>

---------

Signed-off-by: MoYan <1561515308@qq.com>
2025-11-14 18:49:44 +08:00
Copilot
9de7561154 feat(upload): add optional system file filtering for uploads (#1634) 2025-11-14 14:45:39 +08:00
MadDogOwner
0866b9075f fix(link): correct link cache mode bitwise comparison (#1635)
* fix(link): correct link cache mode bitwise comparison

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* refactor(link): use explicit flag equality for link cache mode bitmask checks

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-11-13 13:52:33 +08:00
KirCute
055696f576 feat(s3): support frontend direct upload (#1631)
* feat(s3): support frontend direct upload

* feat(s3): support custom direct upload host

* fix: apply suggestions of Copilot
2025-11-13 13:22:17 +08:00
ShenLin
854415160c chore(issue templates): require logs (#1626) 2025-11-12 13:04:13 +08:00
varg1714
8f4f7d1291 feat(doubao): Add rate limiting (#1618) 2025-11-11 21:59:10 +08:00
KirCute
ee2c77acd8 fix(archive/zip): user specific encoding for non-EFS zips (#1599)
* fix(archive/zip): user specific encoding for non-EFS zips

* fix(stream): simplify head cache initialization and improve reader retrieval logic

* fix: support multipart zips (.z01)

* chore(deps): update github.com/KirCute/zip to v1.0.1

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: Pikachu Ren <40362270+PIKACHUIM@users.noreply.github.com>
2025-11-10 19:08:50 +08:00
yuyamionini
fc90ec1b53 fix(terabox): wrong return code used (#1547)
fix(terabox): rename, delete, copy operations sometimes failed

Signed-off-by: yuyamionini <46483865+yuyamionini@users.noreply.github.com>
2025-11-10 13:40:00 +08:00
jenfonro
7d78944d14 fix(baidu_netdisk): Fix Baidu Netdisk resume uploads sticking to the same upload host (#1609)
Fix Baidu Netdisk resume uploads sticking to the same upload host
2025-11-09 20:43:02 +08:00
jenfonro
f2e0fe8589 refactor(fs): implement immediate retry within task execution cycle (#1575) 2025-11-07 19:11:11 +08:00
ASLant
39dcf9bd19 feat(onedrive): support frontend direct upload (#1532)
* OneDrive添加直连上传

* refactor

* fix: duplicate root path join

---------

Co-authored-by: KirCute <951206789@qq.com>
2025-11-06 23:22:02 +08:00
Seven
25f38df4ca fix(strm): non-specified type generates strm (#1585)
* fix(strm): non-specified type generates strm

* fix(strm): only insert to strmTrie if SaveStrmToLocal is enabled

* fix(strm): update suffix handling in convert2strmObjs function

* fix(strm): refactor generateStrm to use range reader

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-11-06 20:58:43 +08:00
j2rong4cn
a1f1f98f94 refactor(stream): simplify code (#1590)
* refactor(stream): simplify Close method and update SeekableStream to use RangeReader interface

* refactor(stream):  improve RangeRead comments for clarity
2025-11-06 20:06:48 +08:00
KirCute
affc499913 fix(189): disk-usage unmarshal failed when used capacity overflow (#1577) 2025-11-05 12:35:51 +08:00
KirCute
c7574b545c feat(github_release): support Source code (zip/tar.gz) (#1581)
* support Github Release Source code (zip/tar.gz)

* fix TarballUrl and ZipballUrl

* fix show source code by allversion

---------

Co-authored-by: nibazshab <44338441+nibazshab@users.noreply.github.com>
2025-11-05 12:30:05 +08:00
hcrgm
9e852ba12d fix(baidu_netdisk): improve upload experience (#1562)
* fix(baidu_netdisk): improve upload experience

* fix(typo): URL should be uppercase, apply suggestion from @Copilot

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: ShenLin <773933146@qq.com>

* fix(typo): URL should be uppercase, apply suggestion from @Copilot

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: ShenLin <773933146@qq.com>

* fix(baidu_netdisk): use "UploadAPI" as a fallback when using dynamic upload api

* fix(baidu_netdisk): all uploads share the same upload url cache

* fix(drivers/baidu_netdisk): defer uploadUrlMu unlock

* update driver.go to main

---------

Signed-off-by: ShenLin <773933146@qq.com>
Signed-off-by: jenfonro <799170122@qq.com>
Co-authored-by: ShenLin <773933146@qq.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: jenfonro <799170122@qq.com>
2025-11-05 12:21:32 +08:00
j2rong4cn
174eae802a perf(stream): optimize CacheFullAndWriter for better memory management (#1584)
* perf(stream): optimize CacheFullAndWriter for better memory management

* fix(stream): ensure proper seek handling in CacheFullAndWriter for improved data integrity
2025-11-05 12:16:09 +08:00
KirCute
b9f058fcc9 fix(backup-restore): add shares (#1500) 2025-11-05 12:11:20 +08:00
j2rong4cn
6de15b6310 feat(stream): enhance GetRangeReaderFromLink rate limiting (#1528)
* feat(stream): enhance GetRangeReaderFromLink rate limiting

* refactor(stream): update GetRangeReaderFromMFile to return *model.FileRangeReader

* refactor(stream): simplify context error handling in RateLimitReader, RateLimitWriter, and RateLimitFile

* refactor(net): replace custom LimitedReadCloser with readers.NewLimitedReadCloser

* fix(model): update Link.ContentLength JSON tag for correct serialization

* docs(model): add clarification to FileRangeReader usage comment
2025-11-04 23:56:09 +08:00
jenfonro
2844797684 fix(baidu_netdisk): support resuming uploads when an error occurs (#1279)
support resuming uploads when an error occurs
2025-11-04 13:33:33 +08:00
Seven
9f4e439478 chore(strm): Built-in file types support modification (#1483) 2025-11-04 10:33:16 +08:00
jenfonro
9d09ee133d fix(google_driver): fix google link file display size (#1335)
* fix file link display size

* fix performance and field

* cn to en notes

---------

Co-authored-by: ShenLin <773933146@qq.com>
2025-11-04 09:26:20 +08:00
jenfonro
d88f0e8f3c feat(net): support proxy configuration via config file (#1359)
* support proxy

* debug

* debug2

* del debug

* add proxy configuration with env var fallback

* comments to en

* refactor(env): fallback env

---------

Co-authored-by: jyxjjj <773933146@qq.com>
2025-11-04 09:01:35 +08:00
ex-hentai
0857478516 feat(thunder): allow setting space (#1219)
allows access to files on remote devices via Thunder's tunneling service.
2025-11-03 10:53:38 +08:00
Seven
66d9809057 feat(strm): strm local file (#1127)
* feat(strm): strm local file

* feat: 代码优化

* feat: 访问被strm挂载路径时也更新

* fix: 路径最后带/判断缺失

* fix: 路径最后带/判断缺失

* refactor

* refactor

* fix: close seekable-stream in `generateStrm`

* refactor: lazy create local file

* 优化路径判断

---------

Co-authored-by: KirCute <kircute@foxmail.com>
2025-11-03 10:48:15 +08:00
MoYan
db8a7e8caf feat(123): allow modification of the platform header (#1542)
* feat(drivers/123): Allow modification of the platform field

* feat(drivers/123): Set login platfrom as web

* fix(drivers/123): update platform field help value
2025-11-03 10:02:52 +08:00
walloo
8f18e34da0 fix(alias): nil panic in ResolveLinkCacheMode (#1527)
* fix(alias): Check the driver path during initialization

* fix(alias): Don't check the driver path during initialization anymore.
2025-10-23 22:18:56 +08:00
ShenLin
525f26dc23 feat(command): add --config flag to set custom config path (#1479) 2025-10-22 19:35:34 +08:00
NewbieOrange
a0fcfa3ed2 fix(aliyundrive_open): use safe disk usage calculation (#1510) 2025-10-20 22:05:14 +08:00
ILoveScratch
15f276537c fix(share): remove share when user delete (#1493) 2025-10-19 22:48:21 +08:00
MadDogOwner
623a12050e feat(openlist): add PassIPToUpsteam to driver (#1498) 2025-10-19 22:45:40 +08:00
D@' 3z K!7
ae2d2d1021 feat(drivers): add ProtonDrive driver (#1368)
* feat(drivers): add ProtonDrive driver

- Implement complete ProtonDrive storage driver with end-to-end encryption support
- Add authentication via username/password with credential caching and reusable login
- Support all core operations: List, Link, Put, Copy, Move, Remove, Rename, MakeDir
- Include encrypted file operations with PGP key management and node passphrase handling
- Add temporary HTTP server for secure file downloads with range request support
- Support media streaming using temp server range requests
- Implement progress tracking for uploads and downloads
- Support directory operations with circular move detection
- Add proper error handling and panic recovery for external library integration
- Support buffered upload for specific sequential and encrypted, but optimized transmission.

* Update drivers/proton_drive/util.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>

* chore

* feat(drivers): enhance ProtonDrive temp server

- Implement separate listen and public port configuration for complex network deployments
- Add intelligent port detection with 8080 as preferred default, fallback to auto-assignment
- Support Container/NAT/VM environments through configurable external host and port mapping
- Add port availability validation with graceful fallback to listen port
- Enable users to specify external domain/IP for client connections (e.g., 192.168.1.5)
- Follow FTP server configuration patterns for network flexibility
- Maintain localhost development simplicity while supporting production deployments

* feat(proton_drive): refactor directory handling and improve link retrieval

* fix(proton_drive): add NoLinkURL configuration option

* fix(proton_drive): update file size retrieval and enforce TwoFACode requirement

* feat(proton_drive): add expiration to link response

* fix(proton_drive): handle empty RootFolderID in Init method

* fix(proton_drive): update credential handling to use email and reusable login

* fix(proton_drive): update credential handling to use reusableCredential variable

* fix(proton_drive): update DirectRename to use GetLink for source object retrieval

* fix(proton_drive): refactor uploadFile to return model.Obj and handle errors correctly

* fix(proton_drive): refactor DirectMove to use getLink for source retrieval and simplify destination handling

* fix(proton_drive): simplify Copy method by removing temporary file creation and directly using FileStream

* refactor(proton_drive): remove unused temporary server and related code

* chore

* fix(proton_drive): fix driver

- Handle fresh login if ProtonDrive rejects AccessToken or RefreshToken
- Update stored credentials

* fix(proton_drive): simplify reusable login handling in Init method

* fix(proton_drive): fix driver

- Update stored credentials, now is failing

* feat(proton_drive): improve authentication handling and remove unused variables

* fix(proton_drive): fix driver

- Update stored credentials, now is failing

* fix(proton_drive): improve authentication handling

* refactor(proton_drive): move client initialization to initClient method

* feat(proton_drive): move addrs and addrKRs

* feat(proton_drive): optimize upload threads

- Change ConcurrentBlockUploadCount to user configured upload threads number
- Comment ConcurrentFileCryptoCount, default is runtime.GOMAXPROCS(0)

---------

Signed-off-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: KirCute <951206789@qq.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: KirCute <kircute@foxmail.com>
2025-10-19 12:07:11 +08:00
jerrita
a109152a13 feat(onedrive): add ref support (#1435)
* feat(onedrive): add ref support

* fix(onedrive): remove redundant token assignment from reference

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-10-18 21:48:26 +08:00
ILoveScratch
febbcd6027 feat(cache): improve cache management (#1339)
* feat(cache): improve cache management

* feat(disk-usage): add cache

* feat(disk-usage): add refresh

* fix(disk-usage): cache with ttl

* feat(cache): implement KeyedCache and TypedCache for improved caching mechanism

* fix(copy): update object retrieval to use Get instead of GetUnwrap

* refactor(cache): simplify DirectoryCache structure and improve object management

* fix(cache): correct cache entry initialization and key deletion logic in TypedCache

* refactor(driver): remove GetObjInfo interface and simplify Link function logic
https://github.com/OpenListTeam/OpenList/pull/888/files#r2430925783

* fix(link): optimize link retrieval and caching logic

* refactor(cache): consolidate cache management and improve directory cache handling

* fix(cache): add cache control based on storage configuration in List function

* .

* refactor: replace fmt.Sprintf with strconv for integer conversions

* refactor(cache): enhance cache entry management with Expirable interface

* fix(cache): improve link reference acquisition logic to handle expiration

* refactor: replace OnlyLinkMFile with NoLinkSF in driver configurations and logic

* refactor(link): enhance link caching logic with dynamic type keys based on IP and User-Agent

* feat(drivers): add LinkCacheType to driver configurations for enhanced caching

* refactor(cache): streamline directory object management in cache operations

* refactor(cache): remove unnecessary 'dirty' field from CacheEntry structure

* refactor(cache): replace 'dirty' field with bitwise flags

* refactor(io): 调高SyncClosers.AcquireReference的优先级

* refactor(link): 优化链接获取逻辑,增加重

* refactor(link): 添加RequireReference字段以增强链接管理

* refactor(link): 移除MFile字段,改用RangeReader

* refactor: 移除不必要的NoLinkSF字段

* refactor(cache): 修改目录缓存的脏标志定义和更新逻辑

* feat(cache): add expiration gc

---------

Co-authored-by: KirCute <951206789@qq.com>
Co-authored-by: KirCute <kircute@foxmail.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-10-18 21:47:18 +08:00
jenfonro
549e60136b fix(fs):fix retry task after restart (#1467)
* fix retry task after restart

* fix: initialize SrcStorage and DstStorage in tasks to prevent nil pointer dereference

* feat: implement storage load signal mechanism for improved synchronization

* fix:  update StoragesLoaded logic

* refactor: reorganize storage loading logic and improve synchronization handling

---------

Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-10-18 20:52:02 +08:00
Tursom K. Ulefits
14d2b8290a feat(local): move deleted files to corresponding locations (#1281) 2025-10-17 18:01:02 +08:00
ShenLin
cdc069d8e7 feat(http3|quic): Set Alt-Svc header only for HTTPS requests (#1469) 2025-10-14 18:07:29 +08:00
zzzhr1990
fb5094f688 feat(drivers): add halalcloud_open driver (#1430)
* 新增清真云Open驱动,支持最新的轻量SDK

* Change Go version in go.mod

Downgrade Go version from 1.24.2 to 1.23.4

Signed-off-by: zzzhr1990 <zzzhr@hotmail.com>

* Apply suggestions from code review

* Removed unnecessary comments
* Downgraded the Go version to 1.23.4.
* Not sure whether FileStream supports concurrent read and write operations, so currently using single-threaded upload to ensure safety.

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: zzzhr1990 <zzzhr@hotmail.com>

* feat(halalcloud_open): support disk usage

* Set useSingleUpload to true for upload safety

Not sure whether FileStream supports concurrent read and write operations, so currently using single-threaded upload to ensure safety.

Signed-off-by: zzzhr1990 <zzzhr@hotmail.com>

* Update meta.go

Change required for RefreshToken, If using a personal API approach, the RefreshToken is not required.

Signed-off-by: zzzhr1990 <zzzhr@hotmail.com>

* remove debug logs

* bump halalcloud SDK version

* fix unnecessary params

* Update drivers/halalcloud_open/driver_init.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Signed-off-by: zzzhr1990 <zzzhr@hotmail.com>

* Fixed spelling errors; changed hardcoded retry parameters to constants.

* remove pointer in get link function in utils.go

---------

Signed-off-by: zzzhr1990 <zzzhr@hotmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: KirCute <951206789@qq.com>
2025-10-14 16:03:42 +08:00
KirCute
670e0bdc45 chore(frontend): optimize user operations (#1449) 2025-10-14 16:02:50 +08:00
ShenLin
89235012af feat(http3|quic): add http3|quic support (#1466)
* feat(http3|quic): add http3|quic support

* revert(ai): fix ai error

* fix(shutdown): shutdown was using close

* feat(http3|quic): add config if needs h3

* feat(http3|quic): add Alt-Svc to expose h3
2025-10-14 15:57:16 +08:00
KirCute
2bfbad2874 feat(offline_download): add 123 open (#1427) 2025-10-07 01:13:25 +08:00
KirCute
4ba7696032 feat(pikpak): support disk usage (#1426)
* feat(pikpak): support disk usage

* fix(alias): cannot list with details

* refactor: rename `NewDiskUsageFromUsedAndTotal`

* fix(disk-usage): get details of storages that is not initialized
2025-10-06 16:37:00 +08:00
KirCute
66645516e5 fix(ilanzou): wrong total capacity (#1433) 2025-10-06 16:35:10 +08:00
KirCute
eb2ff2d2ca fix(189pc/189tv): request panic when login failed (#1428) 2025-10-05 22:44:50 +08:00
NewbieOrange
4153245f2c fix(drivers): free space underflow if used larger than total space (#1407) 2025-10-04 00:41:45 +08:00
KirCute
6fe9af7819 fix(alias): stabilize list root result (#1401) 2025-10-01 21:05:04 +08:00
j2rong4cn
2edc446ced feat(stream): support using temporary files as large buffer (#1399)
feat(stream): refactor StreamSectionReader to support using temporary files as large buffer
2025-10-01 18:43:20 +08:00
j2rong4cn
c3c7983f7b perf(sync-closers): improve Close method (#1395)
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-10-01 18:06:10 +08:00
HG-ha
22deb4df30 fix(lanzou): auto handle acw_sc__v2 and down_ip=1 for all requests, improve secondary validation (#1394)
* Refactor upload and request handling in util.go

重构请求部分以避免偶现failed link: failed get link:的情况

Signed-off-by: HG-ha <60115106+HG-ha@users.noreply.github.com>

* Format and clean up code in util.go

Signed-off-by: HG-ha <60115106+HG-ha@users.noreply.github.com>

* go fmt util.go

---------

Signed-off-by: HG-ha <60115106+HG-ha@users.noreply.github.com>
2025-10-01 17:38:50 +08:00
j2rong4cn
da0c734aa3 fix(net): unable to pass HttpStatusCode (#1397) 2025-10-01 00:16:36 +08:00
ILoveScratch
189cebe4c9 feat(drivers): add MediaFire driver support (#1322)
* feat(drivers): add MediaFire driver support (#9319)

- Implement complete MediaFire storage driver
- Add authentication via session_token and cookie
- Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir
- Include thumbnail generation for media files
- Handle MediaFire's resumable upload API with multi-unit transfers
- Add proper error handling and progress reporting

Co-authored-by: Da3zKi7 <da3zki7@duck.com>

* fix(mediafire): fix code errors in mediafire

* fix(mediafire): fix code errors in mediafire

* fix(drivers): add session renewal cron for MediaFire driver (#9321)

- Implement automatic session token renewal every 6-9 minutes
- Add validation for required SessionToken and Cookie fields in Init
- Handle session expiration by calling renewToken on validation failure
- Prevent storage failures due to MediaFire session timeouts

Fixes session closure issues that occur after server restarts or extended periods.

Co-authored-by: Da3zKi7 <da3zki7@duck.com>

* docs: restore README changes

Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>

* fix

* fix

* fix: add stream upload limit

* fix

* fix: clear action token on drop and refactor header setting

* feat(drivers/mediafire): optimize file caching - support direct stream processing

- Remove forced caching to *os.File type
- Support generic model.File interface for better flexibility
- Improve upload efficiency by avoiding unnecessary file conversions
- Fix return type to use model.Object instead of model.ObjThumb

* feat(drivers/mediafire): improve global rate limiting

- Ensure all API methods properly use context for rate limiting
- Fix context parameter usage in getDirectDownloadLink, getActionToken, getFileByHash
- Maintain consistent rate limiting across all MediaFire API calls

* feat(drivers/mediafire): unify return types - remove unnecessary ObjThumb

- Change MakeDir, Rename, Copy methods to return model.Object instead of model.ObjThumb
- Remove empty Thumbnail fields where not meaningful
- Keep ObjThumb only for fileToObj (List operations) which provides actual thumbnail URLs
- Improve code consistency and reduce unnecessary wrapper objects

* refactor(drivers/mediafire): extract common error handling logic

- Add checkAPIResult helper function to reduce code duplication
- Replace repetitive MediaFire API error checks with centralized function
- Maintain specific error messages for unique cases (token, upload, search)
- Improve code maintainability and consistency

* enhance(drivers/mediafire): improve quick upload implementation

- Add null check for existingFile to prevent potential issues
- Improve error handling in quick upload - continue normal upload if search fails
- Add detailed comments explaining quick upload logic
- Optimize getExistingFileInfo with clearer fallback strategy
- Ensure upload reliability even when file search encounters issues

* refactor(drivers/mediafire): optimize request method reusability

- Extract common HTTP request logic into apiRequest method
- Reduce code duplication between getForm and postForm methods
- Maintain backward compatibility with existing method signatures
- Centralize rate limiting and header management
- Support extensible HTTP method handling

* docs(drivers/mediafire): add comprehensive English comments

- Add function-level comments for all major driver methods
- Document Init, List, Link, MakeDir, Move, Rename, Copy, Remove, Put methods
- Add comments for key utility functions including session token management
- Improve code readability and maintainability for community collaboration
- Follow Go documentation conventions with clear, concise descriptions

* perf(mediafire): optimize memory allocation and type assertion performance

- Pre-allocate slice capacity in getFiles and bitmap conversion to reduce reallocations
- Cache file type check in uploadUnits to avoid repeated type assertions
- Add uploadSingleUnitOptimized for os.File to eliminate redundant type checks
- Optimize string to int conversion with proper error handling
- Improve memory efficiency in file upload operations

* fix(mediafire): upload without cache

* feat(mediafire): add rate limiting to all API methods

- Add WaitLimit(ctx) calls to all driver methods: List, Link, MakeDir, Move, Rename, Copy, Remove, Put
- Ensure consistent rate limiting across all MediaFire API interactions
- Follow project standard pattern used by other drivers

* feat(mediafire): improve error handling consistency

- Add context parameter to all HTTP API functions for proper context propagation
- Update getForm, postForm and apiRequest to accept context parameter
- Fix rate limiting to use caller context instead of background context
- Ensure consistent error handling patterns across all API calls
- Improve cancellation and timeout support

* feat(mediafire): refactor resumableUpload to use io.ReadSeeker and improve upload handling

* fix(mediafire): release section reader

* feat: add disk usage

* feat(drivers/mediafire): support concurrent upload (#1387)

* feat(drivers): add MediaFire driver with concurrent upload support

- Implement complete MediaFire storage driver with session token authentication
- Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir
- Include thumbnail generation for media files
- Handle MediaFire's resumable upload with intelligent and multi-unit transfers
- Support concurrent chunk uploads using errgroup.NewOrderedGroupWithContext, using splitted file caching for large files
- Optimize memory usage with adaptive buffer sizing (10MB-100MB (default))
- Include rate limiting and retry logic for API requests
- Add proper error handling and progress reporting
- Handle MediaFire's bitmap-based resumable upload protocol

Closes PR #1322

* feat(stream): add DiscardSection method to StreamSectionReader for skipping data

* feat(mediafire): refactor resumableUpload logic for improved upload handling and error management

* fix(mediafire): stop cron job and clear action token in Drop method

* .

* fix(mediafire): optimize buffer sizing logic in uploadUnits method

* fix(docs): remove duplicate MediaFire

* fix(mediafire): revert 'optimization', large files should not be fully chached.

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Da3zKi7 <da3zki7@duck.com>
Co-authored-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

* fix(mediafire): optimize hash calculation in uploadUnits function

* feat(drivers/mediafire): support concurrent upload  (#1366)

* feat(drivers): add MediaFire driver with concurrent upload support

- Implement complete MediaFire storage driver with session token authentication
- Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir
- Include thumbnail generation for media files
- Handle MediaFire's resumable upload with intelligent and multi-unit transfers
- Support concurrent chunk uploads using errgroup.NewOrderedGroupWithContext, using splitted file caching for large files
- Optimize memory usage with adaptive buffer sizing (10MB-100MB (default))
- Include rate limiting and retry logic for API requests
- Add proper error handling and progress reporting
- Handle MediaFire's bitmap-based resumable upload protocol

Closes PR #1322

* feat(stream): add DiscardSection method to StreamSectionReader for skipping data

* feat(mediafire): refactor resumableUpload logic for improved upload handling and error management

* fix(mediafire): stop cron job and clear action token in Drop method

* .

* fix(mediafire): optimize buffer sizing logic in uploadUnits method

* fix(docs): remove duplicate MediaFire

* fix(mediafire): revert 'optimization', large files should not be fully chached.

---------

Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Signed-off-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>

---------

Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Signed-off-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: Da3zKi7 <da3zki7@duck.com>
Co-authored-by: KirCute <951206789@qq.com>
Co-authored-by: Suyunmeng <Susus0175@proton.me>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
2025-09-30 21:55:41 +08:00
j2rong4cn
9d3da44a99 fix(chunk): ensure NumListWorkers is set during Chunk initialization (#1396) 2025-09-30 21:39:19 +08:00
j2rong4cn
8f17d35ed5 feat(chunk): add ChunkPrefix and ChunkLargeFileOnly options (#1321)
* fix(chunk): move chunk existence check to Link method

* feat(chunk): add chunk prefix configuration

* feat(chunk): add chunk_large_file_only configuration

* feat(chunk): concurrently list chunk folder

* refactor(chunk): remove unnecessary mutex for result handling in List method

---------

Co-authored-by: KirCute <kircute@foxmail.com>
2025-09-30 21:26:06 +08:00
HG-ha
89759b6e3b fix(lanzou): support acw_sc__v2 and secondary validation for download link (#1379)
* 重定向链接添加acw_sc__v2验证

在最新的蓝奏云解析中,最后重定向获取真实地址时也需要添加acw_sc__v2信息

Signed-off-by: HG-ha <60115106+HG-ha@users.noreply.github.com>

* 重定向链接添加acw_sc__v2验证

Signed-off-by: HG-ha <60115106+HG-ha@users.noreply.github.com>

* 重定向真实下载链接添加acw_sc__v2验证

Signed-off-by: HG-ha <60115106+HG-ha@users.noreply.github.com>

* fix: CalcAcwScV2

* Add error handling for response body read

Handle error when reading response body.

Signed-off-by: HG-ha <60115106+HG-ha@users.noreply.github.com>

* Improve response handling and validation logic

优化重定向资源管理,添加二次人机验证acw_sc__v2处理

Signed-off-by: HG-ha <60115106+HG-ha@users.noreply.github.com>

---------

Signed-off-by: HG-ha <60115106+HG-ha@users.noreply.github.com>
Co-authored-by: foxxorcat <foxxorcat@foxmail.com>
2025-09-30 17:07:30 +08:00
KirCute
a2fc38be8d perf(disk-usage): concurrently get details (#1326) 2025-09-29 23:26:56 +08:00
KirCute
e0414e7110 fix(offline_download/http): attach UA to the request (#1347)
fix(simple-http): attach UA to the request
2025-09-29 22:13:00 +08:00
KirCute
b486af0031 feat(sftp-server): support disable password login (#1357) 2025-09-29 21:46:55 +08:00
j2rong4cn
ea09ce4b8f fix(fs): improve error handling in op.Get (#1323) 2025-09-29 21:38:34 +08:00
KirCute
d465da43e3 fix(ftp-server): cannot get obj in uploading state inconsistency window (#1293)
* fix(ftp-server): cannot get obj in uploading state inconsistency window

* fix

* fix: duplicate obj when upload completed but client access does not

* fix

* feat: support stat remove and move
2025-09-23 01:00:18 +08:00
Dgs
84ed487950 feat(quark_uc_tv): add order by and direction options for file listing (#1325) 2025-09-21 20:43:32 +08:00
ILoveScratch
3c07144211 chore(announcement): change default announcement text (#1319)
* chore(announcement): change default announcement text for better clarity  

- Improve site announcement style for a more polished look
- Adjust default value to avoid unclear meaning with only repo or single link
2025-09-20 23:06:40 +08:00
230 changed files with 9089 additions and 1807 deletions

View File

@@ -13,7 +13,7 @@ body:
attributes:
label: 请确认以下事项
description: |
您必须勾选以下内容,否则您的问题可能会被直接关闭。
您必须确认、同意并勾选以下内容,否则您的问题一定会被直接关闭。
或者您可以去[讨论区](https://github.com/OpenListTeam/OpenList/discussions)。
options:
- label: |
@@ -59,6 +59,14 @@ body:
label: 问题描述(必填)
validations:
required: true
- type: textarea
id: logs
attributes:
label: 日志(必填)
description: |
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
validations:
required: true
- type: textarea
id: config
attributes:
@@ -67,12 +75,6 @@ body:
请提供您的`OpenList`应用的配置文件,并截图相关存储配置。(可隐藏隐私字段)
validations:
required: true
- type: textarea
id: logs
attributes:
label: 日志(可选)
description: |
请复制粘贴错误日志,或者截图。(可隐藏隐私字段) [查看方法](https://doc.oplist.org/faq/howto#%E5%A6%82%E4%BD%95%E5%BF%AB%E9%80%9F%E5%AE%9A%E4%BD%8Dbug)
- type: textarea
id: reproduction
attributes:

View File

@@ -13,7 +13,7 @@ body:
attributes:
label: Please confirm the following
description: |
You must check all the following, otherwise your issue may be closed directly.
You must confirm, agree, and check all the following, otherwise your issue will definitely be closed directly.
Or you can go to the [discussions](https://github.com/OpenListTeam/OpenList/discussions).
options:
- label: |
@@ -59,6 +59,14 @@ body:
label: Bug Description (required)
validations:
required: true
- type: textarea
id: logs
attributes:
label: Logs (required)
description: |
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
validations:
required: true
- type: textarea
id: config
attributes:
@@ -67,12 +75,6 @@ body:
Please provide your `OpenList` application's configuration file and a screenshot of the relevant storage configuration. (You may mask sensitive fields)
validations:
required: true
- type: textarea
id: logs
attributes:
label: Logs (optional)
description: |
Please copy and paste any relevant log output or screenshots. (You may mask sensitive fields) [Guide](https://doc.oplist.org/faq/howto#how-to-quickly-locate-bugs)
- type: textarea
id: reproduction
attributes:

View File

@@ -64,7 +64,9 @@ Thank you for your support and understanding of the OpenList project.
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
- [x] Teambition([China](https://www.teambition.com), [International](https://us.teambition.com))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [Mediatrack](https://www.mediatrack.cn)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [139yun](https://yun.139.com) (Personal, Family, Group)
- [x] [YandexDisk](https://disk.yandex.com)
- [x] [BaiduNetdisk](http://pan.baidu.com)
@@ -93,7 +95,6 @@ Thank you for your support and understanding of the OpenList project.
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [Weiyun](https://www.weiyun.com)
- [x] Easy to deploy and out-of-the-box
- [x] File preview (PDF, markdown, code, plain text, ...)
- [x] Image preview in gallery mode

View File

@@ -64,7 +64,9 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
- [x] [又拍云对象存储](https://www.upyun.com/products/file-storage)
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
- [x] Teambition([中国](https://www.teambition.com), [国际](https://us.teambition.com))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [分秒帧](https://www.mediatrack.cn)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [和彩云](https://yun.139.com)(个人、家庭、群组)
- [x] [YandexDisk](https://disk.yandex.com)
- [x] [百度网盘](http://pan.baidu.com)

View File

@@ -65,6 +65,7 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
- [x] Teambition([中国](https://www.teambition.com), [国際](https://us.teambition.com))
- [x] [Mediatrack](https://www.mediatrack.cn)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [139yun](https://yun.139.com)(個人、家族、グループ)
- [x] [YandexDisk](https://disk.yandex.com)
- [x] [BaiduNetdisk](http://pan.baidu.com)
@@ -93,6 +94,7 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
- [x] [Weiyun](https://www.weiyun.com)
- [x] [MediaFire](https://www.mediafire.com)
- [x] 簡単にデプロイでき、すぐに使える
- [x] ファイルプレビューPDF、markdown、コード、テキストなど
- [x] ギャラリーモードでの画像プレビュー

View File

@@ -64,7 +64,9 @@ Dank u voor uw ondersteuning en begrip
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
- [x] Teambition([China](https://www.teambition.com), [Internationaal](https://us.teambition.com))
- [x] [MediaFire](https://www.mediafire.com)
- [x] [Mediatrack](https://www.mediatrack.cn)
- [x] [ProtonDrive](https://proton.me/drive)
- [x] [139yun](https://yun.139.com) (Persoonlijk, Familie, Groep)
- [x] [YandexDisk](https://disk.yandex.com)
- [x] [BaiduNetdisk](http://pan.baidu.com)

View File

@@ -2,6 +2,7 @@ package flags
var (
DataDir string
ConfigPath string
Debug bool
NoPrefix bool
Dev bool

View File

@@ -27,7 +27,8 @@ func Execute() {
}
func init() {
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data folder")
RootCmd.PersistentFlags().StringVar(&flags.DataDir, "data", "data", "data directory (relative paths are resolved against the current working directory)")
RootCmd.PersistentFlags().StringVar(&flags.ConfigPath, "config", "", "path to config.json (relative to current working directory; defaults to [data directory]/config.json, where [data directory] is set by --data)")
RootCmd.PersistentFlags().BoolVar(&flags.Debug, "debug", false, "start with debug mode")
RootCmd.PersistentFlags().BoolVar(&flags.NoPrefix, "no-prefix", false, "disable env prefix")
RootCmd.PersistentFlags().BoolVar(&flags.Dev, "dev", false, "start with dev mode")

View File

@@ -27,6 +27,8 @@ import (
"github.com/spf13/cobra"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
"github.com/quic-go/quic-go/http3"
)
// ServerCmd represents the server command
@@ -63,6 +65,7 @@ the address is defined in config file`,
httpHandler = h2c.NewHandler(r, &http2.Server{})
}
var httpSrv, httpsSrv, unixSrv *http.Server
var quicSrv *http3.Server
if conf.Conf.Scheme.HttpPort != -1 {
httpBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
fmt.Printf("start HTTP server @ %s\n", httpBase)
@@ -86,6 +89,24 @@ the address is defined in config file`,
utils.Log.Fatalf("failed to start https: %s", err.Error())
}
}()
if conf.Conf.Scheme.EnableH3 {
fmt.Printf("start HTTP3 (quic) server @ %s\n", httpsBase)
utils.Log.Infof("start HTTP3 (quic) server @ %s", httpsBase)
r.Use(func(c *gin.Context) {
if c.Request.TLS != nil {
port := conf.Conf.Scheme.HttpsPort
c.Header("Alt-Svc", fmt.Sprintf("h3=\":%d\"; ma=86400", port))
}
c.Next()
})
quicSrv = &http3.Server{Addr: httpsBase, Handler: r}
go func() {
err := quicSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
if err != nil && !errors.Is(err, http.ErrServerClosed) {
utils.Log.Fatalf("failed to start http3 (quic): %s", err.Error())
}
}()
}
}
if conf.Conf.Scheme.UnixFile != "" {
fmt.Printf("start unix server @ %s\n", conf.Conf.Scheme.UnixFile)
@@ -203,6 +224,15 @@ the address is defined in config file`,
utils.Log.Fatal("HTTPS server shutdown err: ", err)
}
}()
if conf.Conf.Scheme.EnableH3 {
wg.Add(1)
go func() {
defer wg.Done()
if err := quicSrv.Shutdown(ctx); err != nil {
utils.Log.Fatal("HTTP3 (quic) server shutdown err: ", err)
}
}()
}
}
if conf.Conf.Scheme.UnixFile != "" {
wg.Add(1)

View File

@@ -245,4 +245,17 @@ func (d *Pan115) DeleteOfflineTasks(ctx context.Context, hashes []string, delete
return d.client.DeleteOfflineTasks(hashes, deleteFiles)
}
func (d *Pan115) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
info, err := d.client.GetInfo()
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: uint64(info.SpaceInfo.AllTotal.Size),
FreeSpace: uint64(info.SpaceInfo.AllRemain.Size),
},
}, nil
}
var _ driver.Driver = (*Pan115)(nil)

View File

@@ -15,10 +15,9 @@ type Addition struct {
}
var config = driver.Config{
Name: "115 Cloud",
DefaultRoot: "0",
// OnlyProxy: true,
// NoOverwriteUpload: true,
Name: "115 Cloud",
DefaultRoot: "0",
LinkCacheMode: driver.LinkCacheUA,
}
func init() {

View File

@@ -131,23 +131,6 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}, nil
}
func (d *Open115) GetObjInfo(ctx context.Context, path string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
resp, err := d.client.GetFolderInfoByPath(ctx, path)
if err != nil {
return nil, err
}
return &Obj{
Fid: resp.FileID,
Fn: resp.FileName,
Fc: resp.FileCategory,
Sha1: resp.Sha1,
Pc: resp.PickCode,
}, nil
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err

View File

@@ -17,8 +17,9 @@ type Addition struct {
}
var config = driver.Config{
Name: "115 Open",
DefaultRoot: "0",
Name: "115 Open",
DefaultRoot: "0",
LinkCacheMode: driver.LinkCacheUA,
}
func init() {

View File

@@ -41,7 +41,9 @@ func (d *Pan123) GetAddition() driver.Additional {
}
func (d *Pan123) Init(ctx context.Context) error {
_, err := d.Request(UserInfo, http.MethodGet, nil, nil)
_, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
req.SetHeader("platform", "web")
}, nil)
return err
}
@@ -74,7 +76,6 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
"type": f.Type,
}
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data)
}, nil)
if err != nil {
@@ -254,4 +255,15 @@ func (d *Pan123) APIRateLimit(ctx context.Context, api string) error {
return limiter.Wait(ctx)
}
func (d *Pan123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
userInfo, err := d.getUserInfo(ctx)
if err != nil {
return nil, err
}
total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(userInfo.Data.SpaceUsed, total),
}, nil
}
var _ driver.Driver = (*Pan123)(nil)

View File

@@ -12,7 +12,8 @@ type Addition struct {
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
AccessToken string
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
UploadThread int `json:"UploadThread" type:"number" default:"3" help:"the threads of upload"`
Platform string `json:"platform" type:"string" default:"web" help:"the platform header value, sent with API requests"`
}
var config = driver.Config{
@@ -27,6 +28,7 @@ func init() {
return &Pan123{
Addition: Addition{
UploadThread: 3,
Platform: "web",
},
}
})

View File

@@ -122,3 +122,14 @@ type S3PreSignedURLs struct {
PreSignedUrls map[string]string `json:"presignedUrls"`
} `json:"data"`
}
type UserInfoResp struct {
Data struct {
Uid int64 `json:"UID"`
Nickname string `json:"Nickname"`
SpaceUsed uint64 `json:"SpaceUsed"`
SpacePermanent uint64 `json:"SpacePermanent"`
SpaceTemp uint64 `json:"SpaceTemp"`
FileCount int `json:"FileCount"`
} `json:"data"`
}

View File

@@ -124,7 +124,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
if cur == chunkCount {
curSize = lastChunkSize
}
var reader *stream.SectionReader
var reader io.ReadSeeker
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {

View File

@@ -43,7 +43,7 @@ const (
S3Auth = MainApi + "/file/s3_upload_object/auth"
UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
S3Complete = MainApi + "/file/s3_complete_multipart_upload"
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
// AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
)
func signPath(path string, os string, version string) (k string, v string) {
@@ -203,7 +203,7 @@ do:
"referer": "https://www.123pan.com/",
"authorization": "Bearer " + d.AccessToken,
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) openlist-client",
"platform": "web",
"platform": d.Platform,
"app-version": "3",
//"user-agent": base.UserAgent,
})
@@ -282,3 +282,14 @@ func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]
}
return res, nil
}
func (d *Pan123) getUserInfo(ctx context.Context) (*UserInfoResp, error) {
var resp UserInfoResp
_, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}

View File

@@ -84,7 +84,7 @@ func (d *Open123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}, nil
}
uid, err := d.getUID()
uid, err := d.getUID(ctx)
if err != nil {
return nil, err
}
@@ -215,7 +215,7 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
}
func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
userInfo, err := d.getUserInfo()
userInfo, err := d.getUserInfo(ctx)
if err != nil {
return nil, err
}
@@ -229,5 +229,15 @@ func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error)
}, nil
}
var _ driver.Driver = (*Open123)(nil)
var _ driver.PutResult = (*Open123)(nil)
func (d *Open123) OfflineDownload(ctx context.Context, url string, dir model.Obj, callback string) (int, error) {
return d.createOfflineDownloadTask(ctx, url, dir.GetID(), callback)
}
func (d *Open123) OfflineDownloadProcess(ctx context.Context, taskID int) (float64, int, error) {
return d.queryOfflineDownloadStatus(ctx, taskID)
}
var (
_ driver.Driver = (*Open123)(nil)
_ driver.PutResult = (*Open123)(nil)
)

View File

@@ -19,6 +19,7 @@ func (a *ApiInfo) Require() {
a.token <- struct{}{}
}
}
func (a *ApiInfo) Release() {
if a.qps > 0 {
time.AfterFunc(time.Second, func() {
@@ -26,13 +27,16 @@ func (a *ApiInfo) Release() {
})
}
}
func (a *ApiInfo) SetQPS(qps int) {
a.qps = qps
a.token = make(chan struct{}, qps)
}
func (a *ApiInfo) NowLen() int {
return len(a.token)
}
func InitApiInfo(url string, qps int) *ApiInfo {
return &ApiInfo{
url: url,
@@ -185,3 +189,18 @@ type UploadCompleteResp struct {
FileID int64 `json:"fileID"`
} `json:"data"`
}
type OfflineDownloadResp struct {
BaseResp
Data struct {
TaskID int `json:"taskID"`
} `json:"data"`
}
type OfflineDownloadProcessResp struct {
BaseResp
Data struct {
Process float64 `json:"process"`
Status int `json:"status"`
} `json:"data"`
}

View File

@@ -67,7 +67,7 @@ func (d *Open123) Upload(ctx context.Context, file model.FileStreamer, createRes
partNumber := partIndex + 1 // 分片号从1开始
offset := partIndex * chunkSize
size := min(chunkSize, size-offset)
var reader *stream.SectionReader
var reader io.ReadSeeker
var rateLimitedRd io.Reader
sliceMD5 := ""
// 表单

View File

@@ -1,6 +1,7 @@
package _123_open
import (
"context"
"crypto/md5"
"encoding/json"
"errors"
@@ -18,7 +19,7 @@ import (
log "github.com/sirupsen/logrus"
)
var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于拓展
var ( // 不同情况下获取的AccessTokenQPS限制不同 如下模块化易于拓展
Api = "https://open-api.123pan.com"
AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1)
@@ -33,6 +34,9 @@ var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于
Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
OfflineDownload = InitApiInfo(Api+"/api/v1/offline/download", 1)
OfflineDownloadProcess = InitApiInfo(Api+"/api/v1/offline/download/process", 5)
)
func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
@@ -82,7 +86,6 @@ func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCall
return nil, errors.New(baseResp.Message)
}
}
}
func (d *Open123) flushAccessToken() error {
@@ -148,21 +151,23 @@ func (d *Open123) SignURL(originURL, privateKey string, uid uint64, validDuratio
return objURL.String(), nil
}
func (d *Open123) getUserInfo() (*UserInfoResp, error) {
func (d *Open123) getUserInfo(ctx context.Context) (*UserInfoResp, error) {
var resp UserInfoResp
if _, err := d.Request(UserInfo, http.MethodGet, nil, &resp); err != nil {
if _, err := d.Request(UserInfo, http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, &resp); err != nil {
return nil, err
}
return &resp, nil
}
func (d *Open123) getUID() (uint64, error) {
func (d *Open123) getUID(ctx context.Context) (uint64, error) {
if d.UID != 0 {
return d.UID, nil
}
resp, err := d.getUserInfo()
resp, err := d.getUserInfo(ctx)
if err != nil {
return 0, err
}
@@ -184,7 +189,6 @@ func (d *Open123) getFiles(parentFileId int64, limit int, lastFileId int64) (*Fi
"searchData": "",
})
}, &resp)
if err != nil {
return nil, err
}
@@ -276,3 +280,34 @@ func (d *Open123) trash(fileId int64) error {
return nil
}
func (d *Open123) createOfflineDownloadTask(ctx context.Context, url string, dirID, callback string) (taskID int, err error) {
body := base.Json{
"url": url,
"dirID": dirID,
}
if len(callback) > 0 {
body["callBackUrl"] = callback
}
var resp OfflineDownloadResp
_, err = d.Request(OfflineDownload, http.MethodPost, func(req *resty.Request) {
req.SetBody(body)
}, &resp)
if err != nil {
return 0, err
}
return resp.Data.TaskID, nil
}
func (d *Open123) queryOfflineDownloadStatus(ctx context.Context, taskID int) (process float64, status int, err error) {
var resp OfflineDownloadProcessResp
_, err = d.Request(OfflineDownloadProcess, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"taskID": strconv.Itoa(taskID),
})
}, &resp)
if err != nil {
return .0, 0, err
}
return resp.Data.Process, resp.Data.Status, nil
}

View File

@@ -54,7 +54,8 @@ func (d *Yun139) Init(ctx context.Context) error {
"userInfo": base.Json{
"userType": 1,
"accountType": 1,
"accountName": d.Account},
"accountName": d.Account,
},
"modAddrType": 1,
}, &resp)
if err != nil {
@@ -732,7 +733,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
"manualRename": 2,
"operation": 0,
"path": path.Join(dstDir.GetPath(), dstDir.GetID()),
"seqNo": random.String(32), //序列号不能为空
"seqNo": random.String(32), // 序列号不能为空
"totalSize": reportSize,
"uploadContentList": []base.Json{{
"contentName": stream.GetName(),
@@ -834,4 +835,48 @@ func (d *Yun139) Other(ctx context.Context, args model.OtherArgs) (interface{},
}
}
func (d *Yun139) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
if d.UserDomainID == "" {
return nil, errs.NotImplement
}
var total, free uint64
if d.isFamily() {
diskInfo, err := d.getFamilyDiskInfo(ctx)
if err != nil {
return nil, err
}
totalMb, err := strconv.ParseUint(diskInfo.Data.DiskSize, 10, 64)
if err != nil {
return nil, fmt.Errorf("failed convert disk size into integer: %+v", err)
}
usedMb, err := strconv.ParseUint(diskInfo.Data.UsedSize, 10, 64)
if err != nil {
return nil, fmt.Errorf("failed convert used size into integer: %+v", err)
}
total = totalMb * 1024 * 1024
free = total - (usedMb * 1024 * 1024)
} else {
diskInfo, err := d.getPersonalDiskInfo(ctx)
if err != nil {
return nil, err
}
totalMb, err := strconv.ParseUint(diskInfo.Data.DiskSize, 10, 64)
if err != nil {
return nil, fmt.Errorf("failed convert disk size into integer: %+v", err)
}
freeMb, err := strconv.ParseUint(diskInfo.Data.FreeDiskSize, 10, 64)
if err != nil {
return nil, fmt.Errorf("failed convert free size into integer: %+v", err)
}
total = totalMb * 1024 * 1024
free = freeMb * 1024 * 1024
}
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
}
var _ driver.Driver = (*Yun139)(nil)

View File

@@ -11,6 +11,7 @@ type Addition struct {
driver.RootID
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"`
CloudID string `json:"cloud_id"`
UserDomainID string `json:"user_domain_id" help:"ud_id in Cookie, fill in to show disk usage"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"`
UseLargeThumbnail bool `json:"use_large_thumbnail" type:"bool" default:"false" help:"Enable to use large thumbnail for images"`

View File

@@ -312,3 +312,20 @@ type RefreshTokenResp struct {
AccessToken string `xml:"accessToken"`
Desc string `xml:"desc"`
}
type PersonalDiskInfoResp struct {
BaseResp
Data struct {
FreeDiskSize string `json:"freeDiskSize"`
DiskSize string `json:"diskSize"`
IsInfinitePicStorage *bool `json:"isInfinitePicStorage"`
} `json:"data"`
}
type FamilyDiskInfoResp struct {
BaseResp
Data struct {
UsedSize string `json:"usedSize"`
DiskSize string `json:"diskSize"`
} `json:"data"`
}

View File

@@ -107,8 +107,7 @@ func (d *Yun139) refreshToken() error {
return nil
}
func (d *Yun139) request(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
url := "https://yun.139.com" + pathname
func (d *Yun139) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
randStr := random.String(16)
ts := time.Now().Format("2006-01-02 15:04:05")
@@ -219,7 +218,7 @@ func (d *Yun139) requestRoute(data interface{}, resp interface{}) ([]byte, error
}
func (d *Yun139) post(pathname string, data interface{}, resp interface{}) ([]byte, error) {
return d.request(pathname, http.MethodPost, func(req *resty.Request) {
return d.request("https://yun.139.com"+pathname, http.MethodPost, func(req *resty.Request) {
req.SetBody(data)
}, resp)
}
@@ -268,7 +267,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
HashInfo: utils.NewHashInfo(utils.MD5, content.Digest),
},
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
//Thumbnail: content.BigthumbnailURL,
// Thumbnail: content.BigthumbnailURL,
}
files = append(files, &f)
}
@@ -335,7 +334,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
Path: path, // 文件所在目录的Path
},
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
//Thumbnail: content.BigthumbnailURL,
// Thumbnail: content.BigthumbnailURL,
}
files = append(files, &f)
}
@@ -390,7 +389,7 @@ func (d *Yun139) groupGetFiles(catalogID string) ([]model.Obj, error) {
Path: path, // 文件所在目录的Path
},
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
//Thumbnail: content.BigthumbnailURL,
// Thumbnail: content.BigthumbnailURL,
}
files = append(files, &f)
}
@@ -418,6 +417,7 @@ func (d *Yun139) getLink(contentId string) (string, error) {
}
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
}
func (d *Yun139) familyGetLink(contentId string, path string) (string, error) {
data := d.newJson(base.Json{
"contentID": contentId,
@@ -510,6 +510,7 @@ func (d *Yun139) personalRequest(pathname string, method string, callback base.R
}
return res.Body(), nil
}
func (d *Yun139) personalPost(pathname string, data interface{}, resp interface{}) ([]byte, error) {
return d.personalRequest(pathname, http.MethodPost, func(req *resty.Request) {
req.SetBody(data)
@@ -545,7 +546,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
}
nextPageCursor = resp.Data.NextPageCursor
for _, item := range resp.Data.Items {
var isFolder = (item.Type == "folder")
isFolder := (item.Type == "folder")
var f model.Obj
if isFolder {
f = &model.Object{
@@ -557,7 +558,7 @@ func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
IsFolder: isFolder,
}
} else {
var Thumbnails = item.Thumbnails
Thumbnails := item.Thumbnails
var ThumbnailUrl string
if d.UseLargeThumbnail {
for _, thumb := range Thumbnails {
@@ -600,7 +601,7 @@ func (d *Yun139) personalGetLink(fileId string) (string, error) {
if err != nil {
return "", err
}
var cdnUrl = jsoniter.Get(res, "data", "cdnUrl").ToString()
cdnUrl := jsoniter.Get(res, "data", "cdnUrl").ToString()
if cdnUrl != "" {
return cdnUrl, nil
} else {
@@ -614,12 +615,14 @@ func (d *Yun139) getAuthorization() string {
}
return d.Authorization
}
func (d *Yun139) getAccount() string {
if d.ref != nil {
return d.ref.getAccount()
}
return d.Account
}
func (d *Yun139) getPersonalCloudHost() string {
if d.ref != nil {
return d.ref.getPersonalCloudHost()
@@ -670,3 +673,33 @@ func (d *Yun139) uploadPersonalParts(ctx context.Context, partInfos []PartInfo,
}
return nil
}
func (d *Yun139) getPersonalDiskInfo(ctx context.Context) (*PersonalDiskInfoResp, error) {
data := map[string]interface{}{
"userDomainId": d.UserDomainID,
}
var resp PersonalDiskInfoResp
_, err := d.request("https://user-njs.yun.139.com/user/disk/getPersonalDiskInfo", http.MethodPost, func(req *resty.Request) {
req.SetBody(data)
req.SetContext(ctx)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Yun139) getFamilyDiskInfo(ctx context.Context) (*FamilyDiskInfoResp, error) {
data := map[string]interface{}{
"userDomainId": d.UserDomainID,
}
var resp FamilyDiskInfoResp
_, err := d.request("https://user-njs.yun.139.com/user/disk/getFamilyDiskInfo", http.MethodPost, func(req *resty.Request) {
req.SetBody(data)
req.SetContext(ctx)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}

View File

@@ -194,4 +194,14 @@ func (d *Cloud189) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
return d.newUpload(ctx, dstDir, stream, up)
}
func (d *Cloud189) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
capacityInfo, err := d.getCapacityInfo(ctx)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(capacityInfo.CloudCapacityInfo.UsedSize, capacityInfo.CloudCapacityInfo.TotalSize),
}, nil
}
var _ driver.Driver = (*Cloud189)(nil)

View File

@@ -66,3 +66,21 @@ type DownResp struct {
ResMessage string `json:"res_message"`
FileDownloadUrl string `json:"downloadUrl"`
}
type CapacityResp struct {
ResCode int `json:"res_code"`
ResMessage string `json:"res_message"`
Account string `json:"account"`
CloudCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
MailUsedSize uint64 `json:"mail189UsedSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"cloudCapacityInfo"`
FamilyCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"familyCapacityInfo"`
TotalSize uint64 `json:"totalSize"`
}

View File

@@ -157,7 +157,7 @@ func (d *Cloud189) request(url string, method string, callback base.ReqCallback,
if err != nil {
return nil, err
}
//log.Debug(res.String())
// log.Debug(res.String())
if e.ErrorCode != "" {
if e.ErrorCode == "InvalidSessionKey" {
err = d.newLogin()
@@ -186,8 +186,8 @@ func (d *Cloud189) getFiles(fileId string) ([]model.Obj, error) {
"mediaType": "0",
"folderId": fileId,
"iconOption": "5",
"orderBy": "lastOpTime", //account.OrderBy
"descending": "true", //account.OrderDirection
"orderBy": "lastOpTime", // account.OrderBy
"descending": "true", // account.OrderDirection
})
}, &resp)
if err != nil {
@@ -311,7 +311,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
}
d.sessionKey = sessionKey
const DEFAULT int64 = 10485760
var count = int64(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
count := int64(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
res, err := d.uploadRequest("/person/initMultiUpload", map[string]string{
"parentFolderId": dstDir.GetID(),
@@ -340,10 +340,10 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
if DEFAULT < byteSize {
byteSize = DEFAULT
}
//log.Debugf("%d,%d", byteSize, finish)
// log.Debugf("%d,%d", byteSize, finish)
byteData := make([]byte, byteSize)
n, err := io.ReadFull(file, byteData)
//log.Debug(err, n)
// log.Debug(err, n)
if err != nil {
return err
}
@@ -395,3 +395,14 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
}, nil)
return err
}
func (d *Cloud189) getCapacityInfo(ctx context.Context) (*CapacityResp, error) {
var resp CapacityResp
_, err := d.request("https://cloud.189.cn/api/portal/getUserSizeInfo.action", http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}

View File

@@ -69,7 +69,7 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
// 避免重复登陆
if !y.isLogin() || y.Addition.AccessToken == "" {
if err = y.login(); err != nil {
return
return err
}
}
@@ -83,7 +83,7 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) {
y.cron = cron.NewCron(time.Minute * 5)
y.cron.Do(y.keepAlive)
return
return err
}
func (y *Cloud189TV) Drop(ctx context.Context) error {
@@ -244,7 +244,6 @@ func (y *Cloud189TV) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
FileName: srcObj.GetName(),
IsFolder: BoolToNumber(srcObj.IsDir()),
})
if err != nil {
return err
}
@@ -278,5 +277,22 @@ func (y *Cloud189TV) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
}
return y.OldUpload(ctx, dstDir, stream, up, isFamily, overwrite)
}
func (y *Cloud189TV) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
capacityInfo, err := y.getCapacityInfo(ctx)
if err != nil {
return nil, err
}
var total, used uint64
if y.isFamily() {
total = capacityInfo.FamilyCapacityInfo.TotalSize
used = capacityInfo.FamilyCapacityInfo.UsedSize
} else {
total = capacityInfo.CloudCapacityInfo.TotalSize
used = capacityInfo.CloudCapacityInfo.UsedSize
}
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
}, nil
}

View File

@@ -316,3 +316,21 @@ type BatchTaskConflictTaskInfoResp struct {
TaskInfos []BatchTaskInfo
TaskType int `json:"taskType"`
}
type CapacityResp struct {
ResCode int `json:"res_code"`
ResMessage string `json:"res_message"`
Account string `json:"account"`
CloudCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
MailUsedSize uint64 `json:"mail189UsedSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"cloudCapacityInfo"`
FamilyCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"familyCapacityInfo"`
TotalSize uint64 `json:"totalSize"`
}

View File

@@ -70,6 +70,9 @@ func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, para
}
func (y *Cloud189TV) requestWithRetry(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, retryCount int, isFamily ...bool) ([]byte, error) {
if y.tokenInfo == nil {
return nil, fmt.Errorf("login failed")
}
req := y.client.R().SetQueryParams(clientSuffix())
if params != nil {
@@ -173,6 +176,7 @@ func (y *Cloud189TV) put(ctx context.Context, url string, headers map[string]str
}
return body, nil
}
func (y *Cloud189TV) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
fullUrl := ApiUrl
if isFamily {
@@ -238,9 +242,8 @@ func (y *Cloud189TV) login() (err error) {
req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/getQrCodeUUID.action",
http.MethodGet))
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/getQrCodeUUID.action")
if err != nil {
return
return err
}
if erron.HasError() {
return &erron
@@ -280,7 +283,7 @@ func (y *Cloud189TV) login() (err error) {
req.SetQueryParam("uuid", y.TempUuid)
_, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action")
if err != nil {
return
return err
}
if erron.HasError() {
return &erron
@@ -300,7 +303,7 @@ func (y *Cloud189TV) login() (err error) {
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
if err != nil {
return
return err
}
if erron.HasError() {
@@ -309,7 +312,7 @@ func (y *Cloud189TV) login() (err error) {
y.tokenInfo = &tokenInfo
op.MustSaveDriverStorage(y)
return
return err
}
// refreshSession 尝试使用现有的 AccessToken 刷新会话
@@ -324,7 +327,7 @@ func (y *Cloud189TV) refreshSession() (err error) {
reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken)
_, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action")
if err != nil {
return
return err
}
if erron.HasError() {
@@ -371,7 +374,7 @@ func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189TV) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
fileMd5 := file.GetHash().GetHash(utils.MD5)
var tempFile = file.GetFile()
tempFile := file.GetFile()
var err error
if len(fileMd5) != utils.MD5.Width {
tempFile, fileMd5, err = stream.CacheFullAndHash(file, &up, utils.MD5)
@@ -474,7 +477,6 @@ func (y *Cloud189TV) OldUploadCreate(ctx context.Context, parentID string, fileM
})
}
}, &uploadInfo, isFamily)
if err != nil {
return nil, err
}
@@ -628,3 +630,15 @@ func (y *Cloud189TV) WaitBatchTask(aType string, taskID string, t time.Duration)
time.Sleep(t)
}
}
func (y *Cloud189TV) getCapacityInfo(ctx context.Context) (*CapacityResp, error) {
fullUrl := ApiUrl + "/portal/getUserSizeInfo.action"
var resp CapacityResp
_, err := y.get(fullUrl, func(req *resty.Request) {
req.SetContext(ctx)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}

View File

@@ -90,11 +90,11 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
if y.Addition.RefreshToken != "" {
y.tokenInfo = &AppSessionResp{RefreshToken: y.Addition.RefreshToken}
if err = y.refreshToken(); err != nil {
return
return err
}
} else {
if err = y.login(); err != nil {
return
return err
}
}
@@ -124,7 +124,7 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err)
}
})
return
return err
}
func (d *Cloud189PC) InitReference(storage driver.Driver) error {
@@ -305,7 +305,6 @@ func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
FileName: srcObj.GetName(),
IsFolder: BoolToNumber(srcObj.IsDir()),
})
if err != nil {
return err
}
@@ -411,3 +410,21 @@ func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
return y.StreamUpload(ctx, dstDir, stream, up, isFamily, overwrite)
}
}
func (y *Cloud189PC) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
capacityInfo, err := y.getCapacityInfo(ctx)
if err != nil {
return nil, err
}
var total, used uint64
if y.isFamily() {
total = capacityInfo.FamilyCapacityInfo.TotalSize
used = capacityInfo.FamilyCapacityInfo.UsedSize
} else {
total = capacityInfo.CloudCapacityInfo.TotalSize
used = capacityInfo.CloudCapacityInfo.UsedSize
}
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
}, nil
}

View File

@@ -409,3 +409,21 @@ func (p Params) Encode() string {
}
return buf.String()
}
type CapacityResp struct {
ResCode int `json:"res_code"`
ResMessage string `json:"res_message"`
Account string `json:"account"`
CloudCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
MailUsedSize uint64 `json:"mail189UsedSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"cloudCapacityInfo"`
FamilyCapacityInfo struct {
FreeSize int64 `json:"freeSize"`
TotalSize uint64 `json:"totalSize"`
UsedSize uint64 `json:"usedSize"`
} `json:"familyCapacityInfo"`
TotalSize uint64 `json:"totalSize"`
}

View File

@@ -90,6 +90,9 @@ func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string {
}
func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}, isFamily ...bool) ([]byte, error) {
if y.getTokenInfo() == nil {
return nil, fmt.Errorf("login failed")
}
req := y.getClient().R().SetQueryParams(clientSuffix())
// 设置params
@@ -189,6 +192,7 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
}
return body, nil
}
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
res := make([]model.Obj, 0, 100)
for pageNum := 1; ; pageNum++ {
@@ -342,7 +346,7 @@ func (y *Cloud189PC) loginByPassword() (err error) {
SetQueryParam("redirectURL", loginresp.ToUrl).
Post(API_URL + "/getSessionForPC.action")
if err != nil {
return
return err
}
if erron.HasError() {
@@ -350,12 +354,12 @@ func (y *Cloud189PC) loginByPassword() (err error) {
}
if tokenInfo.ResCode != 0 {
err = fmt.Errorf(tokenInfo.ResMessage)
return
return err
}
y.Addition.RefreshToken = tokenInfo.RefreshToken
y.tokenInfo = &tokenInfo
op.MustSaveDriverStorage(y)
return
return err
}
func (y *Cloud189PC) loginByQRCode() error {
@@ -447,7 +451,6 @@ func (y *Cloud189PC) genQRCode(text string) error {
// Create the HTML page
qrPage := fmt.Sprintf(qrTemplate, text, qrCodeBase64, y.qrcodeParam.UUID)
return fmt.Errorf("need verify: \n%s", qrPage)
}
func (y *Cloud189PC) initBaseParams() (*BaseLoginParam, error) {
@@ -616,7 +619,7 @@ func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
if y.ref != nil {
return y.ref.refreshTokenWithRetry(retryCount)
}
// 限制重试次数,避免无限递归
if retryCount >= 3 {
if y.Addition.RefreshToken != "" {
@@ -625,7 +628,7 @@ func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) {
}
return errors.New("refresh token failed after maximum retries")
}
var erron RespErr
var tokenInfo AppSessionResp
_, err = y.client.R().
@@ -700,7 +703,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
params.Set("familyId", y.FamilyID)
fullUrl += "/family"
} else {
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
// params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
fullUrl += "/person"
}
@@ -752,7 +755,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
partSize = lastPartSize
}
partInfo := ""
var reader *stream.SectionReader
var reader io.ReadSeeker
var rateLimitedRd io.Reader
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
@@ -876,7 +879,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
lastSliceSize = sliceSize
}
//step.1 优先计算所需信息
// step.1 优先计算所需信息
byteSize := sliceSize
fileMd5 := utils.MD5.NewFunc()
sliceMd5 := utils.MD5.NewFunc()
@@ -927,14 +930,14 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
if isFamily {
fullUrl += "/family"
} else {
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
// params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
fullUrl += "/person"
}
// 尝试恢复进度
uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.getTokenInfo().SessionKey, fileMd5Hex)
if !ok {
//step.2 预上传
// step.2 预上传
params := Params{
"parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(file.GetName()),
@@ -1163,7 +1166,6 @@ func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileM
})
}
}, &uploadInfo, isFamily)
if err != nil {
return nil, err
}
@@ -1473,3 +1475,15 @@ func (y *Cloud189PC) getClient() *resty.Client {
}
return y.client
}
func (y *Cloud189PC) getCapacityInfo(ctx context.Context) (*CapacityResp, error) {
fullUrl := API_URL + "/portal/getUserSizeInfo.action"
var resp CapacityResp
_, err := y.get(fullUrl, func(req *resty.Request) {
req.SetContext(ctx)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}

View File

@@ -23,6 +23,7 @@ import (
type Alias struct {
model.Storage
Addition
rootOrder []string
pathMap map[string][]string
autoFlatten bool
oneKey string
@@ -40,13 +41,18 @@ func (d *Alias) Init(ctx context.Context) error {
if d.Paths == "" {
return errors.New("paths is required")
}
paths := strings.Split(d.Paths, "\n")
d.rootOrder = make([]string, 0, len(paths))
d.pathMap = make(map[string][]string)
for _, path := range strings.Split(d.Paths, "\n") {
for _, path := range paths {
path = strings.TrimSpace(path)
if path == "" {
continue
}
k, v := getPair(path)
if _, ok := d.pathMap[k]; !ok {
d.rootOrder = append(d.rootOrder, k)
}
d.pathMap[k] = append(d.pathMap[k], v)
}
if len(d.pathMap) == 1 {
@@ -62,6 +68,7 @@ func (d *Alias) Init(ctx context.Context) error {
}
func (d *Alias) Drop(ctx context.Context) error {
d.rootOrder = nil
d.pathMap = nil
return nil
}
@@ -123,7 +130,7 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) {
func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
path := dir.GetPath()
if utils.PathEqual(path, "/") && !d.autoFlatten {
return d.listRoot(), nil
return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough, args.Refresh), nil
}
root, sub := d.getRootAndPath(path)
dsts, ok := d.pathMap[root]
@@ -131,27 +138,35 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
return nil, errs.ObjectNotFound
}
var objs []model.Obj
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
for _, dst := range dsts {
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), fsArgs)
tmp, err := fs.List(ctx, stdpath.Join(dst, sub), &fs.ListArgs{
NoLog: true,
Refresh: args.Refresh,
WithStorageDetails: args.WithStorageDetails && d.DetailsPassThrough,
})
if err == nil {
tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) {
thumb, ok := model.GetThumb(obj)
objRes := model.Object{
Name: obj.GetName(),
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
}
if !ok {
return &objRes, nil
if thumb, ok := model.GetThumb(obj); ok {
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
}
return &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}, nil
if details, ok := model.GetStorageDetails(obj); ok {
return &model.ObjStorageDetails{
Obj: &objRes,
StorageDetailsWithName: *details,
}, nil
}
return &objRes, nil
})
}
if err == nil {
@@ -196,9 +211,6 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if resultLink.ContentLength == 0 {
resultLink.ContentLength = fi.GetSize()
}
if resultLink.MFile != nil {
return &resultLink, nil
}
if d.DownloadConcurrency > 0 {
resultLink.Concurrency = d.DownloadConcurrency
}
@@ -250,7 +262,7 @@ func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string
}
return err
}
if errs.IsNotImplement(err) {
if errs.IsNotImplementError(err) {
return errors.New("same-name dirs cannot make sub-dir")
}
return err
@@ -261,14 +273,14 @@ func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
return errs.PermissionDenied
}
srcPath, err := d.getReqPath(ctx, srcObj, false)
if errs.IsNotImplement(err) {
if errs.IsNotImplementError(err) {
return errors.New("same-name files cannot be moved")
}
if err != nil {
return err
}
dstPath, err := d.getReqPath(ctx, dstDir, true)
if errs.IsNotImplement(err) {
if errs.IsNotImplementError(err) {
return errors.New("same-name dirs cannot be moved to")
}
if err != nil {
@@ -296,7 +308,7 @@ func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) er
}
return err
}
if errs.IsNotImplement(err) {
if errs.IsNotImplementError(err) {
return errors.New("same-name files cannot be Rename")
}
return err
@@ -307,14 +319,14 @@ func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
return errs.PermissionDenied
}
srcPath, err := d.getReqPath(ctx, srcObj, false)
if errs.IsNotImplement(err) {
if errs.IsNotImplementError(err) {
return errors.New("same-name files cannot be copied")
}
if err != nil {
return err
}
dstPath, err := d.getReqPath(ctx, dstDir, true)
if errs.IsNotImplement(err) {
if errs.IsNotImplementError(err) {
return errors.New("same-name dirs cannot be copied to")
}
if err != nil {
@@ -348,7 +360,7 @@ func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
}
return err
}
if errs.IsNotImplement(err) {
if errs.IsNotImplementError(err) {
return errors.New("same-name files cannot be Delete")
}
return err
@@ -392,7 +404,7 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer,
return err
}
}
if errs.IsNotImplement(err) {
if errs.IsNotImplementError(err) {
return errors.New("same-name dirs cannot be Put")
}
return err
@@ -409,7 +421,7 @@ func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string)
}
return err
}
if errs.IsNotImplement(err) {
if errs.IsNotImplementError(err) {
return errors.New("same-name files cannot offline download")
}
return err
@@ -482,14 +494,14 @@ func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj,
return errs.PermissionDenied
}
srcPath, err := d.getReqPath(ctx, srcObj, false)
if errs.IsNotImplement(err) {
if errs.IsNotImplementError(err) {
return errors.New("same-name files cannot be decompressed")
}
if err != nil {
return err
}
dstPath, err := d.getReqPath(ctx, dstDir, true)
if errs.IsNotImplement(err) {
if errs.IsNotImplementError(err) {
return errors.New("same-name dirs cannot be decompressed to")
}
if err != nil {
@@ -512,4 +524,25 @@ func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj,
}
}
func (d *Alias) ResolveLinkCacheMode(path string) driver.LinkCacheMode {
root, sub := d.getRootAndPath(path)
dsts, ok := d.pathMap[root]
if !ok {
return 0
}
for _, dst := range dsts {
storage, actualPath, err := op.GetStorageAndActualPath(stdpath.Join(dst, sub))
if err != nil {
continue
}
mode := storage.Config().LinkCacheMode
if mode == -1 {
return storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(actualPath)
} else {
return mode
}
}
return 0
}
var _ driver.Driver = (*Alias)(nil)

View File

@@ -16,6 +16,7 @@ type Addition struct {
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
Writable bool `json:"writable" type:"bool" default:"false"`
ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"`
DetailsPassThrough bool `json:"details_pass_through" type:"bool" default:"false"`
}
var config = driver.Config{
@@ -25,6 +26,7 @@ var config = driver.Config{
NoUpload: false,
DefaultRoot: "/",
ProxyRangeOption: true,
LinkCacheMode: driver.LinkCacheAuto,
}
func init() {

View File

@@ -2,8 +2,10 @@ package alias
import (
"context"
"errors"
stdpath "path"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@@ -11,24 +13,71 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/server/common"
log "github.com/sirupsen/logrus"
)
func (d *Alias) listRoot() []model.Obj {
type detailWithIndex struct {
idx int
val *model.StorageDetails
}
func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model.Obj {
var objs []model.Obj
for k := range d.pathMap {
detailsChan := make(chan detailWithIndex, len(d.pathMap))
workerCount := 0
for _, k := range d.rootOrder {
obj := model.Object{
Name: k,
IsFolder: true,
Modified: d.Modified,
}
idx := len(objs)
objs = append(objs, &obj)
v := d.pathMap[k]
if !withDetails || len(v) != 1 {
continue
}
remoteDriver, err := op.GetStorageByMountPath(v[0])
if err != nil {
continue
}
_, ok := remoteDriver.(driver.WithDetails)
if !ok {
continue
}
objs[idx] = &model.ObjStorageDetails{
Obj: objs[idx],
StorageDetailsWithName: model.StorageDetailsWithName{
StorageDetails: nil,
DriverName: remoteDriver.Config().Name,
},
}
workerCount++
go func(dri driver.Driver, i int) {
details, e := op.GetStorageDetails(ctx, dri, refresh)
if e != nil {
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", dri.GetStorage().MountPath, e)
}
}
detailsChan <- detailWithIndex{idx: i, val: details}
}(remoteDriver, idx)
}
for workerCount > 0 {
select {
case r := <-detailsChan:
objs[r.idx].(*model.ObjStorageDetails).StorageDetails = r.val
workerCount--
case <-time.After(time.Second):
workerCount = 0
}
}
return objs
}
// do others that not defined in Driver interface
func getPair(path string) (string, string) {
//path = strings.TrimSpace(path)
// path = strings.TrimSpace(path)
if strings.Contains(path, ":") {
pair := strings.SplitN(path, ":", 2)
if !strings.Contains(pair[0], "/") {

View File

@@ -45,7 +45,7 @@ func (d *AliDrive) GetAddition() driver.Additional {
func (d *AliDrive) Init(ctx context.Context) error {
// TODO login / refresh token
//op.MustSaveDriverStorage(d)
// op.MustSaveDriverStorage(d)
err := d.refreshToken()
if err != nil {
return err
@@ -171,7 +171,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
Mimetype: streamer.GetMimetype(),
}
const DEFAULT int64 = 10485760
var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
count := int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
partInfoList := make([]base.Json, 0, count)
for i := 1; i <= count; i++ {
@@ -327,6 +327,20 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
return fmt.Errorf("%+v", resp2)
}
func (d *AliDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
res, err, _ := d.request("https://api.aliyundrive.com/adrive/v1/user/driveCapacityDetails", http.MethodPost, func(req *resty.Request) {
req.SetContext(ctx)
}, nil)
if err != nil {
return nil, err
}
used := utils.Json.Get(res, "drive_used_size").ToUint64()
total := utils.Json.Get(res, "drive_total_size").ToUint64()
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
}, nil
}
func (d *AliDrive) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
var resp base.Json
var url string

View File

@@ -299,10 +299,7 @@ func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails
total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64()
used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64()
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: total - used,
},
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
}, nil
}

View File

@@ -36,12 +36,14 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_drive"
_ "github.com/OpenListTeam/OpenList/v4/drivers/google_photo"
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud"
_ "github.com/OpenListTeam/OpenList/v4/drivers/halalcloud_open"
_ "github.com/OpenListTeam/OpenList/v4/drivers/ilanzou"
_ "github.com/OpenListTeam/OpenList/v4/drivers/ipfs_api"
_ "github.com/OpenListTeam/OpenList/v4/drivers/kodbox"
_ "github.com/OpenListTeam/OpenList/v4/drivers/lanzou"
_ "github.com/OpenListTeam/OpenList/v4/drivers/lenovonas_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/local"
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediafire"
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediatrack"
_ "github.com/OpenListTeam/OpenList/v4/drivers/mega"
_ "github.com/OpenListTeam/OpenList/v4/drivers/misskey"
@@ -54,6 +56,7 @@ import (
_ "github.com/OpenListTeam/OpenList/v4/drivers/openlist_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak"
_ "github.com/OpenListTeam/OpenList/v4/drivers/pikpak_share"
_ "github.com/OpenListTeam/OpenList/v4/drivers/proton_drive"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_open"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc"
_ "github.com/OpenListTeam/OpenList/v4/drivers/quark_uc_tv"

View File

@@ -5,11 +5,15 @@ import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
stdpath "path"
"strconv"
"strings"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
@@ -18,8 +22,10 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/singleflight"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@@ -29,8 +35,20 @@ type BaiduNetdisk struct {
uploadThread int
vipType int // 会员类型0普通用户(4G/4M)、1普通会员(10G/16M)、2超级会员(20G/32M)
upClient *resty.Client // 上传文件使用的http客户端
uploadUrlG singleflight.Group[string]
uploadUrlMu sync.RWMutex
uploadUrlCache map[string]uploadURLCacheEntry
}
type uploadURLCacheEntry struct {
url string
updateTime time.Time
}
var ErrUploadIDExpired = errors.New("uploadid expired")
func (d *BaiduNetdisk) Config() driver.Config {
return config
}
@@ -40,19 +58,32 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional {
}
func (d *BaiduNetdisk) Init(ctx context.Context) error {
timeout := DEFAULT_UPLOAD_SLICE_TIMEOUT
if d.UploadSliceTimeout > 0 {
timeout = time.Second * time.Duration(d.UploadSliceTimeout)
}
d.upClient = base.NewRestyClient().
SetTimeout(timeout).
SetRetryCount(UPLOAD_RETRY_COUNT).
SetRetryWaitTime(UPLOAD_RETRY_WAIT_TIME).
SetRetryMaxWaitTime(UPLOAD_RETRY_MAX_WAIT_TIME)
d.uploadUrlCache = make(map[string]uploadURLCacheEntry)
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
if d.uploadThread < 1 || d.uploadThread > 32 {
d.uploadThread, d.UploadThread = 3, "3"
if d.uploadThread < 1 {
d.uploadThread, d.UploadThread = 1, "1"
} else if d.uploadThread > 32 {
d.uploadThread, d.UploadThread = 32, "32"
}
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
d.UploadAPI = "https://d.pcs.baidu.com"
d.UploadAPI = UPLOAD_FALLBACK_API
}
res, err := d.get("/xpan/nas", map[string]string{
"method": "uinfo",
}, nil)
log.Debugf("[baidu] get uinfo: %s", string(res))
log.Debugf("[baidu_netdisk] get uinfo: %s", string(res))
if err != nil {
return err
}
@@ -179,6 +210,11 @@ func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream mo
// **注意**: 截至 2024/04/20 百度云盘 api 接口返回的时间永远是当前时间,而不是文件时间。
// 而实际上云盘存储的时间是文件时间,所以此处需要覆盖时间,保证缓存与云盘的数据一致
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 百度网盘不允许上传空文件
if stream.GetSize() < 1 {
return nil, ErrBaiduEmptyFilesNotAllowed
}
// rapid upload
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
return newObj, nil
@@ -212,9 +248,8 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
lastBlockSize = sliceSize
}
//cal md5 for first 256k data
// cal md5 for first 256k data
const SliceSize int64 = 256 * utils.KB
// cal md5
blockList := make([]string, 0, count)
byteSize := sliceSize
fileMd5H := md5.New()
@@ -244,7 +279,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
}
if tmpF != nil {
if written != streamSize {
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", written, streamSize)
return nil, errs.NewErr(err, "CreateTempFile failed, size mismatch: %d != %d ", written, streamSize)
}
_, err = tmpF.Seek(0, io.SeekStart)
if err != nil {
@@ -258,78 +293,112 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
mtime := stream.ModTime().Unix()
ctime := stream.CreateTime().Unix()
// step.1 预上传
// 尝试获取之前的进度
// step.1 尝试读取已保存进度
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
if !ok {
params := map[string]string{
"method": "precreate",
}
form := map[string]string{
"path": path,
"size": strconv.FormatInt(streamSize, 10),
"isdir": "0",
"autoinit": "1",
"rtype": "3",
"block_list": blockListStr,
"content-md5": contentMd5,
"slice-md5": sliceMd5,
}
joinTime(form, ctime, mtime)
log.Debugf("[baidu_netdisk] precreate data: %s", form)
_, err = d.postForm("/xpan/file", params, form, &precreateResp)
// 没有进度,走预上传
precreateResp, err = d.precreate(ctx, path, streamSize, blockListStr, contentMd5, sliceMd5, ctime, mtime)
if err != nil {
return nil, err
}
log.Debugf("%+v", precreateResp)
if precreateResp.ReturnType == 2 {
//rapid upload, since got md5 match from baidu server
// rapid upload, since got md5 match from baidu server
// 修复时间,具体原因见 Put 方法注释的 **注意**
precreateResp.File.Ctime = ctime
precreateResp.File.Mtime = mtime
return fileToObj(precreateResp.File), nil
}
}
// step.2 上传分片
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) {
break
ensureUploadURL := func() {
if precreateResp.UploadURL != "" {
return
}
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
if partseq+1 == count {
byteSize = lastBlockSize
}
threadG.Go(func(ctx context.Context) error {
params := map[string]string{
"method": "upload",
"access_token": d.AccessToken,
"type": "tmpfile",
"path": path,
"uploadid": precreateResp.Uploadid,
"partseq": strconv.Itoa(partseq),
}
err := d.uploadSlice(ctx, params, stream.GetName(),
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(cache, offset, byteSize)))
if err != nil {
return err
}
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
precreateResp.BlockList[i] = -1
return nil
})
precreateResp.UploadURL = d.getUploadUrl(path, precreateResp.Uploadid)
}
if err = threadG.Wait(); err != nil {
// 如果属于用户主动取消,则保存上传进度
ensureUploadURL()
// step.2 上传分片
uploadLoop:
for attempt := 0; attempt < 2; attempt++ {
// 获取上传域名
if precreateResp.UploadURL == "" {
ensureUploadURL()
}
uploadUrl := precreateResp.UploadURL
// 并发上传
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
cacheReaderAt, okReaderAt := cache.(io.ReaderAt)
if !okReaderAt {
return nil, fmt.Errorf("cache object must implement io.ReaderAt interface for upload operations")
}
totalParts := len(precreateResp.BlockList)
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) || partseq < 0 {
continue
}
i, partseq := i, partseq
offset, size := int64(partseq)*sliceSize, sliceSize
if partseq+1 == count {
size = lastBlockSize
}
threadG.Go(func(ctx context.Context) error {
params := map[string]string{
"method": "upload",
"access_token": d.AccessToken,
"type": "tmpfile",
"path": path,
"uploadid": precreateResp.Uploadid,
"partseq": strconv.Itoa(partseq),
}
section := io.NewSectionReader(cacheReaderAt, offset, size)
err := d.uploadSlice(ctx, uploadUrl, params, stream.GetName(), driver.NewLimitedUploadStream(ctx, section))
if err != nil {
return err
}
precreateResp.BlockList[i] = -1
// 当前goroutine还没退出+1才是真正成功的数量
success := threadG.Success() + 1
progress := float64(success) * 100 / float64(totalParts)
up(progress)
return nil
})
}
err = threadG.Wait()
if err == nil {
break uploadLoop
}
// 保存进度(所有错误都会保存)
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
if errors.Is(err, context.Canceled) {
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
return nil, err
}
if errors.Is(err, ErrUploadIDExpired) {
log.Warn("[baidu_netdisk] uploadid expired, will restart from scratch")
d.clearUploadUrlCache(precreateResp.Uploadid)
// 重新 precreate所有分片都要重传
newPre, err2 := d.precreate(ctx, path, streamSize, blockListStr, "", "", ctime, mtime)
if err2 != nil {
return nil, err2
}
if newPre.ReturnType == 2 {
return fileToObj(newPre.File), nil
}
precreateResp = newPre
precreateResp.UploadURL = ""
ensureUploadURL()
// 覆盖掉旧的进度
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
continue uploadLoop
}
return nil, err
}
@@ -343,33 +412,82 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
// 修复时间,具体原因见 Put 方法注释的 **注意**
newFile.Ctime = ctime
newFile.Mtime = mtime
// 上传成功清理进度
base.SaveUploadProgress(d, nil, d.AccessToken, contentMd5)
d.clearUploadUrlCache(precreateResp.Uploadid)
return fileToObj(newFile), nil
}
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
res, err := base.RestyClient.R().
// precreate 执行预上传操作,支持首次上传和 uploadid 过期重试
func (d *BaiduNetdisk) precreate(ctx context.Context, path string, streamSize int64, blockListStr, contentMd5, sliceMd5 string, ctime, mtime int64) (*PrecreateResp, error) {
params := map[string]string{"method": "precreate"}
form := map[string]string{
"path": path,
"size": strconv.FormatInt(streamSize, 10),
"isdir": "0",
"autoinit": "1",
"rtype": "3",
"block_list": blockListStr,
}
// 只有在首次上传时才包含 content-md5 和 slice-md5
if contentMd5 != "" && sliceMd5 != "" {
form["content-md5"] = contentMd5
form["slice-md5"] = sliceMd5
}
joinTime(form, ctime, mtime)
var precreateResp PrecreateResp
_, err := d.postForm("/xpan/file", params, form, &precreateResp)
if err != nil {
return nil, err
}
// 修复时间,具体原因见 Put 方法注释的 **注意**
if precreateResp.ReturnType == 2 {
precreateResp.File.Ctime = ctime
precreateResp.File.Mtime = mtime
}
return &precreateResp, nil
}
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, uploadUrl string, params map[string]string, fileName string, file io.Reader) error {
res, err := d.upClient.R().
SetContext(ctx).
SetQueryParams(params).
SetFileReader("file", fileName, file).
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
Post(uploadUrl + "/rest/2.0/pcs/superfile2")
if err != nil {
return err
}
log.Debugln(res.RawResponse.Status + res.String())
if res.StatusCode() != http.StatusOK {
return errs.NewErr(errs.StreamIncomplete, "baidu upload failed, status=%d, body=%s", res.StatusCode(), res.String())
}
errCode := utils.Json.Get(res.Body(), "error_code").ToInt()
errNo := utils.Json.Get(res.Body(), "errno").ToInt()
respStr := res.String()
lower := strings.ToLower(respStr)
// 合并 uploadid 过期检测逻辑
if strings.Contains(lower, "uploadid") &&
(strings.Contains(lower, "invalid") || strings.Contains(lower, "expired") || strings.Contains(lower, "not found")) {
return ErrUploadIDExpired
}
if errCode != 0 || errNo != 0 {
return errs.NewErr(errs.StreamIncomplete, "error in uploading to baidu, will retry. response=%s", res.String())
return errs.NewErr(errs.StreamIncomplete, "error uploading to baidu, response=%s", res.String())
}
return nil
}
func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
du, err := d.quota()
du, err := d.quota(ctx)
if err != nil {
return nil, err
}
return &model.StorageDetails{DiskUsage: *du}, nil
return &model.StorageDetails{DiskUsage: du}, nil
}
var _ driver.Driver = (*BaiduNetdisk)(nil)

View File

@@ -3,6 +3,7 @@ package baidu_netdisk
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"time"
)
type Addition struct {
@@ -18,12 +19,23 @@ type Addition struct {
AccessToken string
RefreshToken string `json:"refresh_token" required:"true"`
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
UploadSliceTimeout int `json:"upload_timeout" type:"number" default:"60" help:"per-slice upload timeout in seconds"`
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
UseDynamicUploadAPI bool `json:"use_dynamic_upload_api" default:"true" help:"dynamically get upload api domain, when enabled, the 'Upload API' setting will be used as a fallback if failed to get"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
}
const (
UPLOAD_FALLBACK_API = "https://d.pcs.baidu.com" // 备用上传地址
UPLOAD_URL_EXPIRE_TIME = time.Minute * 60 // 上传地址有效期(分钟)
DEFAULT_UPLOAD_SLICE_TIMEOUT = time.Second * 60 // 上传分片请求默认超时时间
UPLOAD_RETRY_COUNT = 3
UPLOAD_RETRY_WAIT_TIME = time.Second * 1
UPLOAD_RETRY_MAX_WAIT_TIME = time.Second * 5
)
var config = driver.Config{
Name: "BaiduNetdisk",
DefaultRoot: "/",

View File

@@ -1,6 +1,7 @@
package baidu_netdisk
import (
"errors"
"path"
"strconv"
"time"
@@ -9,6 +10,10 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
var (
ErrBaiduEmptyFilesNotAllowed = errors.New("empty files are not allowed by baidu netdisk")
)
type TokenErrResp struct {
ErrorDescription string `json:"error_description"`
Error string `json:"error"`
@@ -188,6 +193,32 @@ type PrecreateResp struct {
// return_type=2
File File `json:"info"`
UploadURL string `json:"-"` // 保存断点续传对应的上传域名
}
type UploadServerResp struct {
BakServer []any `json:"bak_server"`
BakServers []struct {
Server string `json:"server"`
} `json:"bak_servers"`
ClientIP string `json:"client_ip"`
ErrorCode int `json:"error_code"`
ErrorMsg string `json:"error_msg"`
Expire int `json:"expire"`
Host string `json:"host"`
Newno string `json:"newno"`
QuicServer []any `json:"quic_server"`
QuicServers []struct {
Server string `json:"server"`
} `json:"quic_servers"`
RequestID int64 `json:"request_id"`
Server []any `json:"server"`
ServerTime int `json:"server_time"`
Servers []struct {
Server string `json:"server"`
} `json:"servers"`
Sl int `json:"sl"`
}
type QuotaResp struct {

View File

@@ -1,6 +1,7 @@
package baidu_netdisk
import (
"context"
"encoding/hex"
"errors"
"fmt"
@@ -11,6 +12,7 @@ import (
"unicode"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
@@ -113,7 +115,7 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
errno := utils.Json.Get(res.Body(), "errno").ToInt()
if errno != 0 {
if utils.SliceContains([]int{111, -6}, errno) {
log.Info("refreshing baidu_netdisk token.")
log.Info("[baidu_netdisk] refreshing baidu_netdisk token.")
err2 := d.refreshToken()
if err2 != nil {
return retry.Unrecoverable(err2)
@@ -207,7 +209,7 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Li
if err != nil {
return nil, err
}
//if res.StatusCode() == 302 {
// if res.StatusCode() == 302 {
u = res.Header().Get("location")
//}
@@ -324,10 +326,10 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
// 非会员固定为 4MB
if d.vipType == 0 {
if d.CustomUploadPartSize != 0 {
log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
log.Warnf("[baidu_netdisk] CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
}
if filesize > MaxSliceNum*DefaultSliceSize {
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
log.Warnf("[baidu_netdisk] File size(%d) is too large, may cause upload failure", filesize)
}
return DefaultSliceSize
@@ -335,17 +337,17 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
if d.CustomUploadPartSize != 0 {
if d.CustomUploadPartSize < DefaultSliceSize {
log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
return DefaultSliceSize
}
if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize {
log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
return VipSliceSize
}
if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize {
log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
log.Warnf("[baidu_netdisk] CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
return SVipSliceSize
}
@@ -375,22 +377,112 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
}
if filesize > MaxSliceNum*maxSliceSize {
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
log.Warnf("[baidu_netdisk] File size(%d) is too large, may cause upload failure", filesize)
}
return maxSliceSize
}
func (d *BaiduNetdisk) quota() (*model.DiskUsage, error) {
func (d *BaiduNetdisk) quota(ctx context.Context) (model.DiskUsage, error) {
var resp QuotaResp
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, nil, &resp)
_, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, &resp)
if err != nil {
return nil, err
return model.DiskUsage{}, err
}
return &model.DiskUsage{
TotalSpace: resp.Total,
FreeSpace: resp.Total - resp.Used,
}, nil
return driver.DiskUsageFromUsedAndTotal(resp.Used, resp.Total), nil
}
// getUploadUrl 从开放平台获取上传域名/地址,并发请求会被合并,结果会在 uploadid 生命周期内复用。
// 如果获取失败,则返回 Upload API设置项。
func (d *BaiduNetdisk) getUploadUrl(path, uploadId string) string {
if !d.UseDynamicUploadAPI || uploadId == "" {
return d.UploadAPI
}
getCachedUrlFunc := func() (string, bool) {
d.uploadUrlMu.RLock()
defer d.uploadUrlMu.RUnlock()
if entry, ok := d.uploadUrlCache[uploadId]; ok {
return entry.url, true
}
return "", false
}
// 检查地址缓存
if uploadUrl, ok := getCachedUrlFunc(); ok {
return uploadUrl
}
uploadUrlGetFunc := func() (string, error) {
// 双重检查缓存
if uploadUrl, ok := getCachedUrlFunc(); ok {
return uploadUrl, nil
}
uploadUrl, err := d.requestForUploadUrl(path, uploadId)
if err != nil {
return "", err
}
d.uploadUrlMu.Lock()
d.uploadUrlCache[uploadId] = uploadURLCacheEntry{
url: uploadUrl,
updateTime: time.Now(),
}
d.uploadUrlMu.Unlock()
return uploadUrl, nil
}
uploadUrl, err, _ := d.uploadUrlG.Do(uploadId, uploadUrlGetFunc)
if err != nil {
fallback := d.UploadAPI
log.Warnf("[baidu_netdisk] get upload URL failed (%v), will use fallback URL: %s", err, fallback)
return fallback
}
return uploadUrl
}
func (d *BaiduNetdisk) clearUploadUrlCache(uploadId string) {
if uploadId == "" {
return
}
d.uploadUrlMu.Lock()
if _, ok := d.uploadUrlCache[uploadId]; ok {
delete(d.uploadUrlCache, uploadId)
}
d.uploadUrlMu.Unlock()
}
// requestForUploadUrl 请求获取上传地址。
// 实测此接口不需要认证传method和upload_version就行不过还是按文档规范调用。
// https://pan.baidu.com/union/doc/Mlvw5hfnr
func (d *BaiduNetdisk) requestForUploadUrl(path, uploadId string) (string, error) {
params := map[string]string{
"method": "locateupload",
"appid": "250528",
"path": path,
"uploadid": uploadId,
"upload_version": "2.0",
}
apiUrl := "https://d.pcs.baidu.com/rest/2.0/pcs/file"
var resp UploadServerResp
_, err := d.request(apiUrl, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(params)
}, &resp)
if err != nil {
return "", err
}
// 应该是https开头的一个地址
var uploadUrl string
if len(resp.Servers) > 0 {
uploadUrl = resp.Servers[0].Server
} else if len(resp.BakServers) > 0 {
uploadUrl = resp.BakServers[0].Server
}
if uploadUrl == "" {
return "", errors.New("upload URL is empty")
}
return uploadUrl, nil
}
// func encodeURIComponent(str string) string {

View File

@@ -18,8 +18,9 @@ type Addition struct {
}
var config = driver.Config{
Name: "BaiduPhoto",
LocalSort: true,
Name: "BaiduPhoto",
LocalSort: true,
LinkCacheMode: driver.LinkCacheUA,
}
func init() {

View File

@@ -25,6 +25,7 @@ func InitClient() {
}),
).SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
NoRedirectClient.SetHeader("user-agent", UserAgent)
net.SetRestyProxyIfConfigured(NoRedirectClient)
RestyClient = NewRestyClient()
HttpClient = net.NewHttpClient()
@@ -37,5 +38,7 @@ func NewRestyClient() *resty.Client {
SetRetryResetReaders(true).
SetTimeout(DefaultTimeout).
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
net.SetRestyProxyIfConfigured(client)
return client
}

View File

@@ -10,6 +10,7 @@ import (
"mime/multipart"
"net/http"
"net/url"
"strconv"
"strings"
"time"
@@ -239,7 +240,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
if err != nil {
return err
}
err = writer.WriteField("puid", fmt.Sprintf("%d", resp.Msg.Puid))
err = writer.WriteField("puid", strconv.Itoa(resp.Msg.Puid))
if err != nil {
fmt.Println("Error writing param2 to request body:", err)
return err
@@ -260,7 +261,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
return err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
resps, err := http.DefaultClient.Do(req)
if err != nil {
return err

View File

@@ -258,7 +258,7 @@ type UploadDoneParam struct {
func fileToObj(f File) *model.Object {
if len(f.Content.FolderName) > 0 {
return &model.Object{
ID: fmt.Sprintf("%d", f.ID),
ID: strconv.Itoa(f.ID),
Name: f.Content.FolderName,
Size: 0,
Modified: time.UnixMilli(f.Inserttime),

View File

@@ -9,6 +9,7 @@ import (
"fmt"
"mime/multipart"
"net/http"
"strconv"
"strings"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
@@ -172,7 +173,7 @@ func (d *ChaoXing) Login() (string, error) {
return "", err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
req.Header.Set("Content-Length", strconv.Itoa(body.Len()))
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err

View File

@@ -17,9 +17,11 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/avast/retry-go"
)
type Chunk struct {
@@ -39,6 +41,9 @@ func (d *Chunk) Init(ctx context.Context) error {
if d.PartSize <= 0 {
return errors.New("part size must be positive")
}
if len(d.ChunkPrefix) <= 0 {
return errors.New("chunk folder prefix must not be empty")
}
d.RemotePath = utils.FixAndCleanPath(d.RemotePath)
return nil
}
@@ -72,13 +77,13 @@ func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
}
remoteActualDir, name := stdpath.Split(remoteActualPath)
chunkName := "[openlist_chunk]" + name
chunkName := d.ChunkPrefix + name
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, chunkName), model.ListArgs{})
if err != nil {
return nil, err
}
var totalSize int64 = 0
// 0号块必须存在
// 0号块默认为-1 以支持空文件
chunkSizes := []int64{-1}
h := make(map[*utils.HashType]string)
var first model.Obj
@@ -115,21 +120,6 @@ func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) {
chunkSizes[idx] = o.GetSize()
}
}
// 检查0号块不等于-1 以支持空文件
// 如果块数量大于1 最后一块不可能为0
// 只检查中间块是否有0
for i, l := 0, len(chunkSizes)-2; ; i++ {
if i == 0 {
if chunkSizes[i] == -1 {
return nil, fmt.Errorf("chunk part[%d] are missing", i)
}
} else if chunkSizes[i] == 0 {
return nil, fmt.Errorf("chunk part[%d] are missing", i)
}
if i >= l {
break
}
}
reqDir, _ := stdpath.Split(path)
objRes := chunkObject{
Object: model.Object{
@@ -161,67 +151,76 @@ func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
return nil, err
}
result := make([]model.Obj, 0, len(remoteObjs))
listG, listCtx := errgroup.NewGroupWithContext(ctx, d.NumListWorkers, retry.Attempts(3))
for _, obj := range remoteObjs {
if utils.IsCanceled(listCtx) {
break
}
rawName := obj.GetName()
if obj.IsDir() {
if name, ok := strings.CutPrefix(rawName, "[openlist_chunk]"); ok {
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
ReqPath: stdpath.Join(args.ReqPath, rawName),
Refresh: args.Refresh,
})
if err != nil {
return nil, err
}
totalSize := int64(0)
h := make(map[*utils.HashType]string)
first := obj
for _, o := range chunkObjs {
if o.IsDir() {
continue
if name, ok := strings.CutPrefix(rawName, d.ChunkPrefix); ok {
resultIdx := len(result)
result = append(result, nil)
listG.Go(func(ctx context.Context) error {
chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{
ReqPath: stdpath.Join(args.ReqPath, rawName),
Refresh: args.Refresh,
})
if err != nil {
return err
}
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
hn, value, ok := strings.Cut(after, "_")
if ok {
ht, ok := utils.GetHashByName(hn)
if ok {
h[ht] = value
}
totalSize := int64(0)
h := make(map[*utils.HashType]string)
first := obj
for _, o := range chunkObjs {
if o.IsDir() {
continue
}
if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok {
hn, value, ok := strings.Cut(after, "_")
if ok {
ht, ok := utils.GetHashByName(hn)
if ok {
h[ht] = value
}
continue
}
}
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
if err != nil {
continue
}
if idx == 0 {
first = o
}
totalSize += o.GetSize()
}
idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt))
if err != nil {
continue
objRes := model.Object{
Name: name,
Size: totalSize,
Modified: first.ModTime(),
Ctime: first.CreateTime(),
}
if idx == 0 {
first = o
if len(h) > 0 {
objRes.HashInfo = utils.NewHashInfoByMap(h)
}
totalSize += o.GetSize()
}
objRes := model.Object{
Name: name,
Size: totalSize,
Modified: first.ModTime(),
Ctime: first.CreateTime(),
}
if len(h) > 0 {
objRes.HashInfo = utils.NewHashInfoByMap(h)
}
if !d.Thumbnail {
result = append(result, &objRes)
} else {
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
thumb := fmt.Sprintf("%s/d%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(thumbPath, true),
sign.Sign(thumbPath))
result = append(result, &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
})
}
if !d.Thumbnail {
result[resultIdx] = &objRes
} else {
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
thumb := fmt.Sprintf("%s/d%s?sign=%s",
common.GetApiUrl(ctx),
utils.EncodePath(thumbPath, true),
sign.Sign(thumbPath))
result[resultIdx] = &model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}
}
return nil
})
continue
}
}
@@ -248,6 +247,9 @@ func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
})
}
}
if err = listG.Wait(); err != nil {
return nil, err
}
return result, nil
}
@@ -267,6 +269,21 @@ func (d *Chunk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
resultLink.SyncClosers = utils.NewSyncClosers(l)
return &resultLink, nil
}
// 检查0号块不等于-1 以支持空文件
// 如果块数量大于1 最后一块不可能为0
// 只检查中间块是否有0
for i, l := 0, len(chunkFile.chunkSizes)-2; ; i++ {
if i == 0 {
if chunkFile.chunkSizes[i] == -1 {
return nil, fmt.Errorf("chunk part[%d] are missing", i)
}
} else if chunkFile.chunkSizes[i] == 0 {
return nil, fmt.Errorf("chunk part[%d] are missing", i)
}
if i >= l {
break
}
}
fileSize := chunkFile.GetSize()
mergedRrf := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
start := httpRange.Start
@@ -383,7 +400,7 @@ func (d *Chunk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *Chunk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if _, ok := srcObj.(*chunkObject); ok {
newName = "[openlist_chunk]" + newName
newName = d.ChunkPrefix + newName
}
return fs.Rename(ctx, stdpath.Join(d.RemotePath, srcObj.GetPath()), newName)
}
@@ -404,14 +421,14 @@ func (d *Chunk) Put(ctx context.Context, dstDir model.Obj, file model.FileStream
if err != nil {
return err
}
if d.Thumbnail && dstDir.GetName() == ".thumbnails" {
if (d.Thumbnail && dstDir.GetName() == ".thumbnails") || (d.ChunkLargeFileOnly && file.GetSize() <= d.PartSize) {
return op.Put(ctx, remoteStorage, stdpath.Join(remoteActualPath, dstDir.GetPath()), file, up)
}
upReader := &driver.ReaderUpdatingProgress{
Reader: file,
UpdateProgress: up,
}
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), "[openlist_chunk]"+file.GetName())
dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), d.ChunkPrefix+file.GetName())
if d.StoreHash {
for ht, value := range file.GetHash().All() {
_ = op.Put(ctx, remoteStorage, dst, &stream.FileStream{
@@ -472,11 +489,7 @@ func (d *Chunk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
if err != nil {
return nil, errs.NotImplement
}
wd, ok := remoteStorage.(driver.WithDetails)
if !ok {
return nil, errs.NotImplement
}
remoteDetails, err := wd.GetDetails(ctx)
remoteDetails, err := op.GetStorageDetails(ctx, remoteStorage)
if err != nil {
return nil, err
}

View File

@@ -6,10 +6,13 @@ import (
)
type Addition struct {
RemotePath string `json:"remote_path" required:"true"`
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
CustomExt string `json:"custom_ext" type:"string"`
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
RemotePath string `json:"remote_path" required:"true"`
PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"`
ChunkLargeFileOnly bool `json:"chunk_large_file_only" default:"false" help:"chunk only if file size > part_size"`
ChunkPrefix string `json:"chunk_prefix" type:"string" default:"[openlist_chunk]" help:"the prefix of chunk folder"`
CustomExt string `json:"custom_ext" type:"string"`
StoreHash bool `json:"store_hash" type:"bool" default:"true"`
NumListWorkers int `json:"num_list_workers" required:"true" type:"number" default:"5"`
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"`
@@ -26,6 +29,11 @@ var config = driver.Config{
func init() {
op.RegisterDriver(func() driver.Driver {
return &Chunk{}
return &Chunk{
Addition: Addition{
ChunkPrefix: "[openlist_chunk]",
NumListWorkers: 5,
},
}
})
}

View File

@@ -342,15 +342,14 @@ func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir mode
func (d *CloudreveV4) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
// TODO return storage details (total space, free space, etc.)
var r CapacityResp
err := d.request(http.MethodGet, "/user/capacity", nil, &r)
err := d.request(http.MethodGet, "/user/capacity", func(req *resty.Request) {
req.SetContext(ctx)
}, &r)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: r.Total,
FreeSpace: r.Total - r.Used,
},
DiskUsage: driver.DiskUsageFromUsedAndTotal(r.Used, r.Total),
}, nil
}

View File

@@ -44,7 +44,7 @@ func (d *Crypt) GetAddition() driver.Additional {
}
func (d *Crypt) Init(ctx context.Context) error {
//obfuscate credentials if it's updated or just created
// obfuscate credentials if it's updated or just created
err := d.updateObfusParm(&d.Password)
if err != nil {
return fmt.Errorf("failed to obfuscate password: %w", err)
@@ -63,7 +63,7 @@ func (d *Crypt) Init(ctx context.Context) error {
op.MustSaveDriverStorage(d)
//need remote storage exist
// need remote storage exist
storage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
if err != nil {
return fmt.Errorf("can't find remote storage: %w", err)
@@ -109,8 +109,8 @@ func (d *Crypt) Drop(ctx context.Context) error {
func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
path := dir.GetPath()
//return d.list(ctx, d.RemotePath, path)
//remoteFull
// return d.list(ctx, d.RemotePath, path)
// remoteFull
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true, Refresh: args.Refresh})
// the obj must implement the model.SetPath interface
@@ -124,7 +124,7 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
if obj.IsDir() {
name, err := d.cipher.DecryptDirName(obj.GetName())
if err != nil {
//filter illegal files
// filter illegal files
continue
}
if !d.ShowHidden && strings.HasPrefix(name, ".") {
@@ -143,12 +143,12 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
thumb, ok := model.GetThumb(obj)
size, err := d.cipher.DecryptedSize(obj.GetSize())
if err != nil {
//filter illegal files
// filter illegal files
continue
}
name, err := d.cipher.DecryptFileName(obj.GetName())
if err != nil {
//filter illegal files
// filter illegal files
continue
}
if !d.ShowHidden && strings.HasPrefix(name, ".") {
@@ -202,7 +202,7 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
remoteObj, err = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
if err != nil {
if errs.IsObjectNotFound(err) && secondTry {
//try the opposite
// try the opposite
remoteFullPath = d.getPathForRemote(path, !firstTryIsFolder)
remoteObj, err2 = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
if err2 != nil {
@@ -240,7 +240,7 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
IsFolder: remoteObj.IsDir(),
}
return obj, nil
//return nil, errs.ObjectNotFound
// return nil, errs.ObjectNotFound
}
// https://github.com/rclone/rclone/blob/v1.67.0/backend/crypt/cipher.go#L37
@@ -317,7 +317,8 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
}
return readSeeker, nil
}),
SyncClosers: utils.NewSyncClosers(remoteLink),
SyncClosers: utils.NewSyncClosers(remoteLink),
RequireReference: remoteLink.RequireReference,
}, nil
}
@@ -366,7 +367,6 @@ func (d *Crypt) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
return fmt.Errorf("failed to convert path to remote path: %w", err)
}
return op.Copy(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
}
func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
@@ -412,11 +412,7 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
}
func (d *Crypt) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
wd, ok := d.remoteStorage.(driver.WithDetails)
if !ok {
return nil, errs.NotImplement
}
remoteDetails, err := wd.GetDetails(ctx)
remoteDetails, err := op.GetStorageDetails(ctx, d.remoteStorage)
if err != nil {
return nil, err
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
"golang.org/x/time/rate"
)
type Doubao struct {
@@ -23,6 +24,7 @@ type Doubao struct {
*UploadToken
UserId string
uploadThread int
limiter *rate.Limiter
}
func (d *Doubao) Config() driver.Config {
@@ -61,6 +63,17 @@ func (d *Doubao) Init(ctx context.Context) error {
d.UploadToken = uploadToken
}
if d.LimitRate > 0 {
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
}
return nil
}
func (d *Doubao) WaitLimit(ctx context.Context) error {
if d.limiter != nil {
return d.limiter.Wait(ctx)
}
return nil
}
@@ -69,6 +82,10 @@ func (d *Doubao) Drop(ctx context.Context) error {
}
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
var files []model.Obj
fileList, err := d.getFiles(dir.GetID(), "")
if err != nil {
@@ -95,6 +112,10 @@ func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
}
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
var downloadUrl string
if u, ok := file.(*Object); ok {
@@ -160,6 +181,10 @@ func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
var r UploadNodeResp
_, err := d.request("/samantha/aispace/upload_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@@ -177,6 +202,10 @@ func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
}
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
var r UploadNodeResp
_, err := d.request("/samantha/aispace/move_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@@ -191,6 +220,10 @@ func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
}
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
var r BaseResp
_, err := d.request("/samantha/aispace/rename_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
@@ -207,6 +240,10 @@ func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
}
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
var r BaseResp
_, err := d.request("/samantha/aispace/delete_node", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
@@ -215,6 +252,10 @@ func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
// 根据MIME类型确定数据类型
mimetype := file.GetMimetype()
dataType := FileDataType

View File

@@ -10,9 +10,10 @@ type Addition struct {
// driver.RootPath
driver.RootID
// define other
Cookie string `json:"cookie" type:"text"`
UploadThread string `json:"upload_thread" default:"3"`
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
Cookie string `json:"cookie" type:"text"`
UploadThread string `json:"upload_thread" default:"3"`
DownloadApi string `json:"download_api" type:"select" options:"get_file_url,get_download_info" default:"get_file_url"`
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"`
}
var config = driver.Config{
@@ -23,6 +24,10 @@ var config = driver.Config{
func init() {
op.RegisterDriver(func() driver.Driver {
return &Doubao{}
return &Doubao{
Addition: Addition{
LimitRate: 2,
},
}
})
}

View File

@@ -486,7 +486,7 @@ func (d *Doubao) Upload(ctx context.Context, config *UploadConfig, dstDir model.
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", file.GetSize())},
"Content-Length": {strconv.FormatInt(file.GetSize(), 10)},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)
@@ -577,7 +577,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
if partIndex == totalParts-1 {
size = fileSize - offset
}
var reader *stream.SectionReader
var reader io.ReadSeeker
var rateLimitedRd io.Reader
crc32Value := ""
threadG.GoWithLifecycle(errgroup.Lifecycle{
@@ -612,7 +612,7 @@ func (d *Doubao) UploadByMultipart(ctx context.Context, config *UploadConfig, fi
"Authorization": {storeInfo.Auth},
"Content-Type": {"application/octet-stream"},
"Content-Crc32": {crc32Value},
"Content-Length": {fmt.Sprintf("%d", size)},
"Content-Length": {strconv.FormatInt(size, 10)},
"Content-Disposition": {fmt.Sprintf("attachment; filename=%s", url.QueryEscape(storeInfo.StoreURI))},
}
res, err := base.HttpClient.Do(req)

View File

@@ -16,9 +16,10 @@ type Addition struct {
}
var config = driver.Config{
Name: "FebBox",
NoUpload: true,
DefaultRoot: "0",
Name: "FebBox",
NoUpload: true,
DefaultRoot: "0",
LinkCacheMode: driver.LinkCacheIP,
}
func init() {

View File

@@ -113,9 +113,7 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
}
return &model.Link{
RangeReader: &model.FileRangeReader{
RangeReaderIF: stream.RateLimitRangeReaderFunc(resultRangeReader),
},
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
SyncClosers: utils.NewSyncClosers(utils.CloseFunc(conn.Quit)),
}, nil
}

View File

@@ -31,11 +31,11 @@ type Addition struct {
}
var config = driver.Config{
Name: "FTP",
LocalSort: true,
OnlyLinkMFile: false,
DefaultRoot: "/",
NoLinkURL: true,
Name: "FTP",
LocalSort: true,
OnlyProxy: true,
DefaultRoot: "/",
NoLinkURL: true,
}
func init() {

View File

@@ -51,6 +51,9 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
if d.Addition.ShowReadme {
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
}
if d.Addition.ShowSourceCode{
files = append(files, point.GetSourceCode()...)
}
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
nextDir := GetNextDir(point.Point, path)
if nextDir == "" {
@@ -117,6 +120,10 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
}
files = append(files, point.GetReleaseByTagName(tagName)...)
if d.Addition.ShowSourceCode{
files = append(files, point.GetSourceCodeByTagName(tagName)...)
}
}
}
}

View File

@@ -10,6 +10,7 @@ type Addition struct {
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"OpenListTeam/OpenList" help:"structure:[path:]org/repo"`
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
ShowSourceCode bool `json:"show_source_code" type:"bool" default:"false" help:"show Source code (zip/tar.gz)"`
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
}

View File

@@ -143,6 +143,60 @@ func (m *MountPoint) GetAllVersionSize() int64 {
return size
}
func (m *MountPoint) GetSourceCode() []File {
files := make([]File, 0)
// 无法获取文件大小,此处设为 1
files = append(files, File{
Path: m.Point + "/" + "Source code (zip)",
FileName: "Source code (zip)",
Size: 1,
Type: "file",
UpdateAt: m.Release.CreatedAt,
CreateAt: m.Release.CreatedAt,
Url: m.Release.ZipballUrl,
})
files = append(files, File{
Path: m.Point + "/" + "Source code (tar.gz)",
FileName: "Source code (tar.gz)",
Size: 1,
Type: "file",
UpdateAt: m.Release.CreatedAt,
CreateAt: m.Release.CreatedAt,
Url: m.Release.TarballUrl,
})
return files
}
func (m *MountPoint) GetSourceCodeByTagName(tagName string) []File {
for _, item := range *m.Releases {
if item.TagName == tagName {
files := make([]File, 0)
files = append(files, File{
Path: m.Point + "/" + "Source code (zip)",
FileName: "Source code (zip)",
Size: 1,
Type: "file",
UpdateAt: item.CreatedAt,
CreateAt: item.CreatedAt,
Url: item.ZipballUrl,
})
files = append(files, File{
Path: m.Point + "/" + "Source code (tar.gz)",
FileName: "Source code (tar.gz)",
Size: 1,
Type: "file",
UpdateAt: item.CreatedAt,
CreateAt: item.CreatedAt,
Url: item.TarballUrl,
})
return files
}
}
return nil
}
func (m *MountPoint) GetOtherFile(get func(url string) (*resty.Response, error), refresh bool) []File {
if m.OtherFile == nil || refresh {
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/contents")

View File

@@ -167,4 +167,30 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
return err
}
func (d *GoogleDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
if d.DisableDiskUsage {
return nil, errs.NotImplement
}
about, err := d.getAbout(ctx)
if err != nil {
return nil, err
}
var total, used uint64
if about.StorageQuota.Limit == nil {
total = 0
} else {
total, err = strconv.ParseUint(*about.StorageQuota.Limit, 10, 64)
if err != nil {
return nil, err
}
}
used, err = strconv.ParseUint(about.StorageQuota.Usage, 10, 64)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
}, nil
}
var _ driver.Driver = (*GoogleDrive)(nil)

View File

@@ -7,14 +7,15 @@ import (
type Addition struct {
driver.RootID
RefreshToken string `json:"refresh_token" required:"true"`
OrderBy string `json:"order_by" type:"string" help:"such as: folder,name,modifiedTime"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/googleui/renewapi"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5" help:"chunk size while uploading (unit: MB)"`
RefreshToken string `json:"refresh_token" required:"true"`
OrderBy string `json:"order_by" type:"string" help:"such as: folder,name,modifiedTime"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/googleui/renewapi"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5" help:"chunk size while uploading (unit: MB)"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
}
var config = driver.Config{

View File

@@ -78,3 +78,12 @@ type Error struct {
Message string `json:"message"`
} `json:"error"`
}
type AboutResp struct {
StorageQuota struct {
Limit *string `json:"limit"`
Usage string `json:"usage"`
UsageInDrive string `json:"usageInDrive"`
UsageInDriveTrash string `json:"usageInDriveTrash"`
}
}

View File

@@ -27,17 +27,25 @@ import (
// do others that not defined in Driver interface
// Google Drive API field constants
const (
// File list query fields
FilesListFields = "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken"
// Single file query fields
FileInfoFields = "id,name,mimeType,size,md5Checksum,sha1Checksum,sha256Checksum"
)
type googleDriveServiceAccount struct {
//Type string `json:"type"`
//ProjectID string `json:"project_id"`
//PrivateKeyID string `json:"private_key_id"`
// Type string `json:"type"`
// ProjectID string `json:"project_id"`
// PrivateKeyID string `json:"private_key_id"`
PrivateKey string `json:"private_key"`
ClientEMail string `json:"client_email"`
//ClientID string `json:"client_id"`
//AuthURI string `json:"auth_uri"`
// ClientID string `json:"client_id"`
// AuthURI string `json:"auth_uri"`
TokenURI string `json:"token_uri"`
//AuthProviderX509CertURL string `json:"auth_provider_x509_cert_url"`
//ClientX509CertURL string `json:"client_x509_cert_url"`
// AuthProviderX509CertURL string `json:"auth_provider_x509_cert_url"`
// ClientX509CertURL string `json:"client_x509_cert_url"`
}
func (d *GoogleDrive) refreshToken() error {
@@ -235,7 +243,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
}
query := map[string]string{
"orderBy": orderBy,
"fields": "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken",
"fields": FilesListFields,
"pageSize": "1000",
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
//"includeItemsFromAllDrives": "true",
@@ -249,13 +257,84 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
return nil, err
}
pageToken = resp.NextPageToken
// Batch process shortcuts, API calls only for file shortcuts
shortcutTargetIds := make([]string, 0)
shortcutIndices := make([]int, 0)
// Collect target IDs of all file shortcuts (skip folder shortcuts)
for i := range resp.Files {
if resp.Files[i].MimeType == "application/vnd.google-apps.shortcut" &&
resp.Files[i].ShortcutDetails.TargetId != "" &&
resp.Files[i].ShortcutDetails.TargetMimeType != "application/vnd.google-apps.folder" {
shortcutTargetIds = append(shortcutTargetIds, resp.Files[i].ShortcutDetails.TargetId)
shortcutIndices = append(shortcutIndices, i)
}
}
// Batch get target file info (only for file shortcuts)
if len(shortcutTargetIds) > 0 {
targetFiles := d.batchGetTargetFilesInfo(shortcutTargetIds)
// Update shortcut file info
for j, targetId := range shortcutTargetIds {
if targetFile, exists := targetFiles[targetId]; exists {
fileIndex := shortcutIndices[j]
if targetFile.Size != "" {
resp.Files[fileIndex].Size = targetFile.Size
}
if targetFile.MD5Checksum != "" {
resp.Files[fileIndex].MD5Checksum = targetFile.MD5Checksum
}
if targetFile.SHA1Checksum != "" {
resp.Files[fileIndex].SHA1Checksum = targetFile.SHA1Checksum
}
if targetFile.SHA256Checksum != "" {
resp.Files[fileIndex].SHA256Checksum = targetFile.SHA256Checksum
}
}
}
}
res = append(res, resp.Files...)
}
return res, nil
}
// getTargetFileInfo gets target file details for shortcuts
func (d *GoogleDrive) getTargetFileInfo(targetId string) (File, error) {
var targetFile File
url := fmt.Sprintf("https://www.googleapis.com/drive/v3/files/%s", targetId)
query := map[string]string{
"fields": FileInfoFields,
}
_, err := d.request(url, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &targetFile)
if err != nil {
return File{}, err
}
return targetFile, nil
}
// batchGetTargetFilesInfo batch gets target file info, sequential processing to avoid concurrency complexity
func (d *GoogleDrive) batchGetTargetFilesInfo(targetIds []string) map[string]File {
if len(targetIds) == 0 {
return make(map[string]File)
}
result := make(map[string]File)
// Sequential processing to avoid concurrency complexity
for _, targetId := range targetIds {
file, err := d.getTargetFileInfo(targetId)
if err == nil {
result[targetId] = file
}
}
return result
}
func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer, url string, up driver.UpdateProgress) error {
var defaultChunkSize = d.ChunkSize * 1024 * 1024
defaultChunkSize := d.ChunkSize * 1024 * 1024
ss, err := stream.NewStreamSectionReader(file, int(defaultChunkSize), &up)
if err != nil {
return err
@@ -315,3 +394,18 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, file model.FileStreamer,
}
return nil
}
func (d *GoogleDrive) getAbout(ctx context.Context) (*AboutResp, error) {
query := map[string]string{
"fields": "storageQuota",
}
var resp AboutResp
_, err := d.request("https://www.googleapis.com/drive/v3/about", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
req.SetContext(ctx)
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}

View File

@@ -0,0 +1,111 @@
package halalcloudopen
import (
"sync"
"time"
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
)
var (
slicePostErrorRetryInterval = time.Second * 120
retryTimes = 5
)
type halalCommon struct {
// *AuthService // 登录信息
UserInfo *sdkUser.User // 用户信息
refreshTokenFunc func(token string) error
// serv *AuthService
configs sync.Map
}
func (m *halalCommon) GetAccessToken() (string, error) {
value, exists := m.configs.Load("access_token")
if !exists {
return "", nil // 如果不存在,返回空字符串
}
return value.(string), nil // 返回配置项的值
}
// GetRefreshToken implements ConfigStore.
func (m *halalCommon) GetRefreshToken() (string, error) {
value, exists := m.configs.Load("refresh_token")
if !exists {
return "", nil // 如果不存在,返回空字符串
}
return value.(string), nil // 返回配置项的值
}
// SetAccessToken implements ConfigStore.
func (m *halalCommon) SetAccessToken(token string) error {
m.configs.Store("access_token", token)
return nil
}
// SetRefreshToken implements ConfigStore.
func (m *halalCommon) SetRefreshToken(token string) error {
m.configs.Store("refresh_token", token)
if m.refreshTokenFunc != nil {
return m.refreshTokenFunc(token)
}
return nil
}
// SetToken implements ConfigStore.
func (m *halalCommon) SetToken(accessToken string, refreshToken string, expiresIn int64) error {
m.configs.Store("access_token", accessToken)
m.configs.Store("refresh_token", refreshToken)
m.configs.Store("expires_in", expiresIn)
if m.refreshTokenFunc != nil {
return m.refreshTokenFunc(refreshToken)
}
return nil
}
// ClearConfigs implements ConfigStore.
func (m *halalCommon) ClearConfigs() error {
m.configs = sync.Map{} // 清空map
return nil
}
// DeleteConfig implements ConfigStore.
func (m *halalCommon) DeleteConfig(key string) error {
_, exists := m.configs.Load(key)
if !exists {
return nil // 如果不存在,直接返回
}
m.configs.Delete(key) // 删除指定的配置项
return nil
}
// GetConfig implements ConfigStore.
func (m *halalCommon) GetConfig(key string) (string, error) {
value, exists := m.configs.Load(key)
if !exists {
return "", nil // 如果不存在,返回空字符串
}
return value.(string), nil // 返回配置项的值
}
// ListConfigs implements ConfigStore.
func (m *halalCommon) ListConfigs() (map[string]string, error) {
configs := make(map[string]string)
m.configs.Range(func(key, value interface{}) bool {
configs[key.(string)] = value.(string) // 将每个配置项添加到map中
return true // 继续遍历
})
return configs, nil // 返回所有配置项
}
// SetConfig implements ConfigStore.
func (m *halalCommon) SetConfig(key string, value string) error {
m.configs.Store(key, value) // 使用Store方法设置或更新配置项
return nil // 成功设置配置项后返回nil
}
func NewHalalCommon() *halalCommon {
return &halalCommon{
configs: sync.Map{},
}
}

View File

@@ -0,0 +1,29 @@
package halalcloudopen
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
sdkClient "github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
)
type HalalCloudOpen struct {
*halalCommon
model.Storage
Addition
sdkClient *sdkClient.Client
sdkUserFileService *sdkUserFile.UserFileService
sdkUserService *sdkUser.UserService
uploadThread int
}
func (d *HalalCloudOpen) Config() driver.Config {
return config
}
func (d *HalalCloudOpen) GetAddition() driver.Additional {
return &d.Addition
}
var _ driver.Driver = (*HalalCloudOpen)(nil)

View File

@@ -0,0 +1,131 @@
package halalcloudopen
import (
"context"
"strconv"
"github.com/OpenListTeam/OpenList/v4/internal/model"
sdkModel "github.com/halalcloud/golang-sdk-lite/halalcloud/model"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
)
func (d *HalalCloudOpen) getFiles(ctx context.Context, dir model.Obj) ([]model.Obj, error) {
files := make([]model.Obj, 0)
limit := int64(100)
token := ""
for {
result, err := d.sdkUserFileService.List(ctx, &sdkUserFile.FileListRequest{
Parent: &sdkUserFile.File{Path: dir.GetPath()},
ListInfo: &sdkModel.ScanListRequest{
Limit: strconv.FormatInt(limit, 10),
Token: token,
},
})
if err != nil {
return nil, err
}
for i := 0; len(result.Files) > i; i++ {
files = append(files, NewObjFile(result.Files[i]))
}
if result.ListInfo == nil || result.ListInfo.Token == "" {
break
}
token = result.ListInfo.Token
}
return files, nil
}
func (d *HalalCloudOpen) makeDir(ctx context.Context, dir model.Obj, name string) (model.Obj, error) {
_, err := d.sdkUserFileService.Create(ctx, &sdkUserFile.File{
Path: dir.GetPath(),
Name: name,
})
return nil, err
}
func (d *HalalCloudOpen) move(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
oldDir := obj.GetPath()
newDir := dir.GetPath()
_, err := d.sdkUserFileService.Move(ctx, &sdkUserFile.BatchOperationRequest{
Source: []*sdkUserFile.File{
{
Path: oldDir,
},
},
Dest: &sdkUserFile.File{
Path: newDir,
},
})
return nil, err
}
func (d *HalalCloudOpen) rename(ctx context.Context, obj model.Obj, name string) (model.Obj, error) {
_, err := d.sdkUserFileService.Rename(ctx, &sdkUserFile.File{
Path: obj.GetPath(),
Name: name,
})
return nil, err
}
func (d *HalalCloudOpen) copy(ctx context.Context, obj model.Obj, dir model.Obj) (model.Obj, error) {
id := obj.GetID()
sourcePath := obj.GetPath()
if len(id) > 0 {
sourcePath = ""
}
destID := dir.GetID()
destPath := dir.GetPath()
if len(destID) > 0 {
destPath = ""
}
dest := &sdkUserFile.File{
Path: destPath,
Identity: destID,
}
_, err := d.sdkUserFileService.Copy(ctx, &sdkUserFile.BatchOperationRequest{
Source: []*sdkUserFile.File{
{
Path: sourcePath,
Identity: id,
},
},
Dest: dest,
})
return nil, err
}
func (d *HalalCloudOpen) remove(ctx context.Context, obj model.Obj) error {
id := obj.GetID()
_, err := d.sdkUserFileService.Delete(ctx, &sdkUserFile.BatchOperationRequest{
Source: []*sdkUserFile.File{
{
Identity: id,
Path: obj.GetPath(),
},
},
})
return err
}
func (d *HalalCloudOpen) details(ctx context.Context) (*model.StorageDetails, error) {
ret, err := d.sdkUserService.GetStatisticsAndQuota(ctx)
if err != nil {
return nil, err
}
total := uint64(ret.DiskStatisticsQuota.BytesQuota)
free := uint64(ret.DiskStatisticsQuota.BytesFree)
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: free,
},
}, nil
}

View File

@@ -0,0 +1,108 @@
package halalcloudopen
import (
"context"
"crypto/sha1"
"io"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
"github.com/rclone/rclone/lib/readers"
)
func (d *HalalCloudOpen) getLink(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if args.Redirect {
// return nil, model.ErrUnsupported
fid := file.GetID()
fpath := file.GetPath()
if fid != "" {
fpath = ""
}
fi, err := d.sdkUserFileService.GetDirectDownloadAddress(ctx, &sdkUserFile.DirectDownloadRequest{
Identity: fid,
Path: fpath,
})
if err != nil {
return nil, err
}
expireAt := fi.ExpireAt
duration := time.Until(time.UnixMilli(expireAt))
return &model.Link{
URL: fi.DownloadAddress,
Expiration: &duration,
}, nil
}
result, err := d.sdkUserFileService.ParseFileSlice(ctx, &sdkUserFile.File{
Identity: file.GetID(),
Path: file.GetPath(),
})
if err != nil {
return nil, err
}
fileAddrs := []*sdkUserFile.SliceDownloadInfo{}
var addressDuration int64
nodesNumber := len(result.RawNodes)
nodesIndex := nodesNumber - 1
startIndex, endIndex := 0, nodesIndex
for nodesIndex >= 0 {
if nodesIndex >= 200 {
endIndex = 200
} else {
endIndex = nodesNumber
}
for ; endIndex <= nodesNumber; endIndex += 200 {
if endIndex == 0 {
endIndex = 1
}
sliceAddress, err := d.sdkUserFileService.GetSliceDownloadAddress(ctx, &sdkUserFile.SliceDownloadAddressRequest{
Identity: result.RawNodes[startIndex:endIndex],
Version: 1,
})
if err != nil {
return nil, err
}
addressDuration, _ = strconv.ParseInt(sliceAddress.ExpireAt, 10, 64)
fileAddrs = append(fileAddrs, sliceAddress.Addresses...)
startIndex = endIndex
nodesIndex -= 200
}
}
size, _ := strconv.ParseInt(result.FileSize, 10, 64)
chunks := getChunkSizes(result.Sizes)
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if httpRange.Length < 0 || httpRange.Start+httpRange.Length >= size {
length = size - httpRange.Start
}
oo := &openObject{
ctx: ctx,
d: fileAddrs,
chunk: []byte{},
chunks: chunks,
skip: httpRange.Start,
sha: result.Sha1,
shaTemp: sha1.New(),
}
return readers.NewLimitedReadCloser(oo, length), nil
}
var duration time.Duration
if addressDuration != 0 {
duration = time.Until(time.UnixMilli(addressDuration))
} else {
duration = time.Until(time.Now().Add(time.Hour))
}
return &model.Link{
RangeReader: stream.RateLimitRangeReaderFunc(resultRangeReader),
Expiration: &duration,
}, nil
}

View File

@@ -0,0 +1,50 @@
package halalcloudopen
import (
"context"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/halalcloud/golang-sdk-lite/halalcloud/apiclient"
sdkUser "github.com/halalcloud/golang-sdk-lite/halalcloud/services/user"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
)
func (d *HalalCloudOpen) Init(ctx context.Context) error {
if d.uploadThread < 1 || d.uploadThread > 32 {
d.uploadThread, d.UploadThread = 3, 3
}
if d.halalCommon == nil {
d.halalCommon = &halalCommon{
UserInfo: &sdkUser.User{},
refreshTokenFunc: func(token string) error {
d.Addition.RefreshToken = token
op.MustSaveDriverStorage(d)
return nil
},
}
}
if d.Addition.RefreshToken != "" {
d.halalCommon.SetRefreshToken(d.Addition.RefreshToken)
}
timeout := d.Addition.TimeOut
if timeout <= 0 {
timeout = 60
}
host := d.Addition.Host
if host == "" {
host = "openapi.2dland.cn"
}
client := apiclient.NewClient(nil, host, d.Addition.ClientID, d.Addition.ClientSecret, d.halalCommon, apiclient.WithTimeout(time.Second*time.Duration(timeout)))
d.sdkClient = client
d.sdkUserFileService = sdkUserFile.NewUserFileService(client)
d.sdkUserService = sdkUser.NewUserService(client)
userInfo, err := d.sdkUserService.Get(ctx, &sdkUser.User{})
if err != nil {
return err
}
d.halalCommon.UserInfo = userInfo
// 能够获取到用户信息,已经检查了 RefreshToken 的有效性,无需再次检查
return nil
}

View File

@@ -0,0 +1,48 @@
package halalcloudopen
import (
"context"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
)
func (d *HalalCloudOpen) Drop(ctx context.Context) error {
return nil
}
func (d *HalalCloudOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
return d.getFiles(ctx, dir)
}
func (d *HalalCloudOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
return d.getLink(ctx, file, args)
}
func (d *HalalCloudOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
return d.makeDir(ctx, parentDir, dirName)
}
func (d *HalalCloudOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return d.move(ctx, srcObj, dstDir)
}
func (d *HalalCloudOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
return d.rename(ctx, srcObj, newName)
}
func (d *HalalCloudOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
return d.copy(ctx, srcObj, dstDir)
}
func (d *HalalCloudOpen) Remove(ctx context.Context, obj model.Obj) error {
return d.remove(ctx, obj)
}
func (d *HalalCloudOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
return d.put(ctx, dstDir, stream, up)
}
func (d *HalalCloudOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
return d.details(ctx)
}

View File

@@ -0,0 +1,258 @@
package halalcloudopen
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
"github.com/ipfs/go-cid"
)
func (d *HalalCloudOpen) put(ctx context.Context, dstDir model.Obj, fileStream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
newPath := path.Join(dstDir.GetPath(), fileStream.GetName())
uploadTask, err := d.sdkUserFileService.CreateUploadTask(ctx, &sdkUserFile.File{
Path: newPath,
Size: fileStream.GetSize(),
})
if err != nil {
return nil, err
}
if uploadTask.Created {
return nil, nil
}
slicesList := make([]string, 0)
codec := uint64(0x55)
if uploadTask.BlockCodec > 0 {
codec = uint64(uploadTask.BlockCodec)
}
blockHashType := uploadTask.BlockHashType
mhType := uint64(0x12)
if blockHashType > 0 {
mhType = uint64(blockHashType)
}
prefix := cid.Prefix{
Codec: codec,
MhLength: -1,
MhType: mhType,
Version: 1,
}
blockSize := uploadTask.BlockSize
useSingleUpload := true
//
if fileStream.GetSize() <= int64(blockSize) || d.uploadThread <= 1 {
useSingleUpload = true
}
// Not sure whether FileStream supports concurrent read and write operations, so currently using single-threaded upload to ensure safety.
// read file
if useSingleUpload {
bufferSize := int(blockSize)
buffer := make([]byte, bufferSize)
reader := driver.NewLimitedUploadStream(ctx, fileStream)
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
// fileStream.Seek(0, os.SEEK_SET)
for {
n, err := teeReader.Read(buffer)
if n > 0 {
data := buffer[:n]
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
if err != nil {
return nil, err
}
slicesList = append(slicesList, uploadCid.String())
}
if err == io.EOF || n == 0 {
break
}
}
} else {
// TODO: implement multipart upload, currently using single-threaded upload to ensure safety.
bufferSize := int(blockSize)
buffer := make([]byte, bufferSize)
reader := driver.NewLimitedUploadStream(ctx, fileStream)
teeReader := io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up))
for {
n, err := teeReader.Read(buffer)
if n > 0 {
data := buffer[:n]
uploadCid, err := postFileSlice(ctx, data, uploadTask.Task, uploadTask.UploadAddress, prefix, retryTimes)
if err != nil {
return nil, err
}
slicesList = append(slicesList, uploadCid.String())
}
if err == io.EOF || n == 0 {
break
}
}
}
newFile, err := makeFile(ctx, slicesList, uploadTask.Task, uploadTask.UploadAddress, retryTimes)
if err != nil {
return nil, err
}
return NewObjFile(newFile), nil
}
func makeFile(ctx context.Context, fileSlice []string, taskID string, uploadAddress string, retry int) (*sdkUserFile.File, error) {
var lastError error = nil
for range retry {
newFile, err := doMakeFile(fileSlice, taskID, uploadAddress)
if err == nil {
return newFile, nil
}
if ctx.Err() != nil {
return nil, err
}
if strings.Contains(err.Error(), "not found") {
return nil, err
}
lastError = err
time.Sleep(slicePostErrorRetryInterval)
}
return nil, fmt.Errorf("mk file slice failed after %d times, error: %s", retry, lastError.Error())
}
func doMakeFile(fileSlice []string, taskID string, uploadAddress string) (*sdkUserFile.File, error) {
accessUrl := uploadAddress + "/" + taskID
getTimeOut := time.Minute * 2
u, err := url.Parse(accessUrl)
if err != nil {
return nil, err
}
n, _ := json.Marshal(fileSlice)
httpRequest := http.Request{
Method: http.MethodPost,
URL: u,
Header: map[string][]string{
"Accept": {"application/json"},
"Content-Type": {"application/json"},
//"Content-Length": {strconv.Itoa(len(n))},
},
Body: io.NopCloser(bytes.NewReader(n)),
}
httpClient := http.Client{
Timeout: getTimeOut,
}
httpResponse, err := httpClient.Do(&httpRequest)
if err != nil {
return nil, err
}
defer httpResponse.Body.Close()
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
b, _ := io.ReadAll(httpResponse.Body)
message := string(b)
return nil, fmt.Errorf("mk file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
}
b, _ := io.ReadAll(httpResponse.Body)
var result *sdkUserFile.File
err = json.Unmarshal(b, &result)
if err != nil {
return nil, err
}
return result, nil
}
func postFileSlice(ctx context.Context, fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix, retry int) (cid.Cid, error) {
var lastError error = nil
for range retry {
newCid, err := doPostFileSlice(fileSlice, taskID, uploadAddress, preix)
if err == nil {
return newCid, nil
}
if ctx.Err() != nil {
return cid.Undef, err
}
time.Sleep(slicePostErrorRetryInterval)
lastError = err
}
return cid.Undef, fmt.Errorf("upload file slice failed after %d times, error: %s", retry, lastError.Error())
}
func doPostFileSlice(fileSlice []byte, taskID string, uploadAddress string, preix cid.Prefix) (cid.Cid, error) {
// 1. sum file slice
newCid, err := preix.Sum(fileSlice)
if err != nil {
return cid.Undef, err
}
// 2. post file slice
sliceCidString := newCid.String()
// /{taskID}/{sliceID}
accessUrl := uploadAddress + "/" + taskID + "/" + sliceCidString
getTimeOut := time.Second * 30
// get {accessUrl} in {getTimeOut}
u, err := url.Parse(accessUrl)
if err != nil {
return cid.Undef, err
}
// header: accept: application/json
// header: content-type: application/octet-stream
// header: content-length: {fileSlice.length}
// header: x-content-cid: {sliceCidString}
// header: x-task-id: {taskID}
httpRequest := http.Request{
Method: http.MethodGet,
URL: u,
Header: map[string][]string{
"Accept": {"application/json"},
},
}
httpClient := http.Client{
Timeout: getTimeOut,
}
httpResponse, err := httpClient.Do(&httpRequest)
if err != nil {
return cid.Undef, err
}
if httpResponse.StatusCode != http.StatusOK {
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d", httpResponse.StatusCode)
}
var result bool
b, err := io.ReadAll(httpResponse.Body)
if err != nil {
return cid.Undef, err
}
err = json.Unmarshal(b, &result)
if err != nil {
return cid.Undef, err
}
if result {
return newCid, nil
}
httpRequest = http.Request{
Method: http.MethodPost,
URL: u,
Header: map[string][]string{
"Accept": {"application/json"},
"Content-Type": {"application/octet-stream"},
// "Content-Length": {strconv.Itoa(len(fileSlice))},
},
Body: io.NopCloser(bytes.NewReader(fileSlice)),
}
httpResponse, err = httpClient.Do(&httpRequest)
if err != nil {
return cid.Undef, err
}
defer httpResponse.Body.Close()
if httpResponse.StatusCode != http.StatusOK && httpResponse.StatusCode != http.StatusCreated {
b, _ := io.ReadAll(httpResponse.Body)
message := string(b)
return cid.Undef, fmt.Errorf("upload file slice failed, status code: %d, message: %s", httpResponse.StatusCode, message)
}
//
return newCid, nil
}

View File

@@ -0,0 +1,32 @@
package halalcloudopen
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
// Usually one of two
driver.RootPath
// define other
RefreshToken string `json:"refresh_token" required:"false" help:"If using a personal API approach, the RefreshToken is not required."`
UploadThread int `json:"upload_thread" type:"number" default:"3" help:"1 <= thread <= 32"`
ClientID string `json:"client_id" required:"true" default:""`
ClientSecret string `json:"client_secret" required:"true" default:""`
Host string `json:"host" required:"false" default:"openapi.2dland.cn"`
TimeOut int `json:"timeout" type:"number" default:"60" help:"timeout in seconds"`
}
var config = driver.Config{
Name: "HalalCloudOpen",
OnlyProxy: false,
DefaultRoot: "/",
NoLinkURL: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &HalalCloudOpen{}
})
}

View File

@@ -0,0 +1,60 @@
package halalcloudopen
import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
)
type ObjFile struct {
sdkFile *sdkUserFile.File
fileSize int64
modTime time.Time
createTime time.Time
}
func NewObjFile(f *sdkUserFile.File) model.Obj {
ofile := &ObjFile{sdkFile: f}
ofile.fileSize = f.Size
modTimeTs := f.UpdateTs
ofile.modTime = time.UnixMilli(modTimeTs)
createTimeTs := f.CreateTs
ofile.createTime = time.UnixMilli(createTimeTs)
return ofile
}
func (f *ObjFile) GetSize() int64 {
return f.fileSize
}
func (f *ObjFile) GetName() string {
return f.sdkFile.Name
}
func (f *ObjFile) ModTime() time.Time {
return f.modTime
}
func (f *ObjFile) IsDir() bool {
return f.sdkFile.Dir
}
func (f *ObjFile) GetHash() utils.HashInfo {
return utils.HashInfo{
// TODO: support more hash types
}
}
func (f *ObjFile) GetID() string {
return f.sdkFile.Identity
}
func (f *ObjFile) GetPath() string {
return f.sdkFile.Path
}
func (f *ObjFile) CreateTime() time.Time {
return f.createTime
}

View File

@@ -0,0 +1,185 @@
package halalcloudopen
import (
"context"
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"net/http"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
sdkUserFile "github.com/halalcloud/golang-sdk-lite/halalcloud/services/userfile"
"github.com/ipfs/go-cid"
)
// get the next chunk
func (oo *openObject) getChunk(_ context.Context) (err error) {
if oo.id >= len(oo.chunks) {
return io.EOF
}
var chunk []byte
err = utils.Retry(3, time.Second, func() (err error) {
chunk, err = getRawFiles(oo.d[oo.id])
return err
})
if err != nil {
return err
}
oo.id++
oo.chunk = chunk
return nil
}
// Read reads up to len(p) bytes into p.
func (oo *openObject) Read(p []byte) (n int, err error) {
oo.mu.Lock()
defer oo.mu.Unlock()
if oo.closed {
return 0, fmt.Errorf("read on closed file")
}
// Skip data at the start if requested
for oo.skip > 0 {
//size := 1024 * 1024
_, size, err := oo.ChunkLocation(oo.id)
if err != nil {
return 0, err
}
if oo.skip < int64(size) {
break
}
oo.id++
oo.skip -= int64(size)
}
if len(oo.chunk) == 0 {
err = oo.getChunk(oo.ctx)
if err != nil {
return 0, err
}
if oo.skip > 0 {
oo.chunk = (oo.chunk)[oo.skip:]
oo.skip = 0
}
}
n = copy(p, oo.chunk)
oo.shaTemp.Write(p[:n])
oo.chunk = (oo.chunk)[n:]
return n, nil
}
// Close closed the file - MAC errors are reported here
func (oo *openObject) Close() (err error) {
oo.mu.Lock()
defer oo.mu.Unlock()
if oo.closed {
return nil
}
// 校验Sha1
if string(oo.shaTemp.Sum(nil)) != oo.sha {
return fmt.Errorf("failed to finish download: SHA mismatch")
}
oo.closed = true
return nil
}
func GetMD5Hash(text string) string {
tHash := md5.Sum([]byte(text))
return hex.EncodeToString(tHash[:])
}
type chunkSize struct {
position int64
size int
}
type openObject struct {
ctx context.Context
mu sync.Mutex
d []*sdkUserFile.SliceDownloadInfo
id int
skip int64
chunk []byte
chunks []chunkSize
closed bool
sha string
shaTemp hash.Hash
}
func getChunkSizes(sliceSize []*sdkUserFile.SliceSize) (chunks []chunkSize) {
chunks = make([]chunkSize, 0)
for _, s := range sliceSize {
// 对最后一个做特殊处理
endIndex := s.EndIndex
startIndex := s.StartIndex
if endIndex == 0 {
endIndex = startIndex
}
for j := startIndex; j <= endIndex; j++ {
size := s.Size
chunks = append(chunks, chunkSize{position: j, size: int(size)})
}
}
return chunks
}
func (oo *openObject) ChunkLocation(id int) (position int64, size int, err error) {
if id < 0 || id >= len(oo.chunks) {
return 0, 0, errors.New("invalid arguments")
}
return (oo.chunks)[id].position, (oo.chunks)[id].size, nil
}
func getRawFiles(addr *sdkUserFile.SliceDownloadInfo) ([]byte, error) {
if addr == nil {
return nil, errors.New("addr is nil")
}
client := http.Client{
Timeout: time.Duration(60 * time.Second), // Set timeout to 60 seconds
}
resp, err := client.Get(addr.DownloadAddress)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("bad status: %s, body: %s", resp.Status, body)
}
if addr.Encrypt > 0 {
cd := uint8(addr.Encrypt)
for idx := 0; idx < len(body); idx++ {
body[idx] = body[idx] ^ cd
}
}
storeType := addr.StoreType
if storeType != 10 {
sourceCid, err := cid.Decode(addr.Identity)
if err != nil {
return nil, err
}
checkCid, err := sourceCid.Prefix().Sum(body)
if err != nil {
return nil, err
}
if !checkCid.Equals(sourceCid) {
return nil, fmt.Errorf("bad cid: %s, body: %s", checkCid.String(), body)
}
}
return body, nil
}

View File

@@ -97,13 +97,13 @@ func (d *ILanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs)
}
obj := model.Object{
ID: strconv.FormatInt(f.FileId, 10),
//Path: "",
// Path: "",
Name: f.FileName,
Size: f.FileSize * 1024,
Modified: updTime,
Ctime: updTime,
IsFolder: false,
//HashInfo: utils.HashInfo{},
// HashInfo: utils.HashInfo{},
}
if f.FileType == 2 {
obj.IsFolder = true
@@ -185,13 +185,13 @@ func (d *ILanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri
}
return &model.Object{
ID: utils.Json.Get(res, "list", 0, "id").ToString(),
//Path: "",
// Path: "",
Name: dirName,
Size: 0,
Modified: time.Now(),
Ctime: time.Now(),
IsFolder: true,
//HashInfo: utils.HashInfo{},
// HashInfo: utils.HashInfo{},
}, nil
}
@@ -239,7 +239,7 @@ func (d *ILanZou) Rename(ctx context.Context, srcObj model.Obj, newName string)
}
return &model.Object{
ID: srcObj.GetID(),
//Path: "",
// Path: "",
Name: newName,
Size: srcObj.GetSize(),
Modified: time.Now(),
@@ -392,7 +392,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
}
return &model.Object{
ID: strconv.FormatInt(file.FileId, 10),
//Path: ,
// Path: ,
Name: file.FileName,
Size: s.GetSize(),
Modified: s.ModTime(),
@@ -402,6 +402,22 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
}, nil
}
func (d *ILanZou) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
res, err := d.proved("/user/account/map", http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
})
if err != nil {
return nil, err
}
totalSize := utils.Json.Get(res, "map", "totalSize").ToUint64() * 1024
rewardSize := utils.Json.Get(res, "map", "rewardSize").ToUint64() * 1024
total := totalSize + rewardSize
used := utils.Json.Get(res, "map", "usedSize").ToUint64() * 1024
return &model.StorageDetails{
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
}, nil
}
//func (d *ILanZou) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}

View File

@@ -1,7 +1,8 @@
package lanzou
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"net/http"
"regexp"
@@ -9,8 +10,6 @@ import (
"strings"
"time"
"unicode"
log "github.com/sirupsen/logrus"
)
const DAY time.Duration = 84600000000000
@@ -122,20 +121,26 @@ var findAcwScV2Reg = regexp.MustCompile(`arg1='([0-9A-Z]+)'`)
// 在页面被过多访问或其他情况下有时候会先返回一个加密的页面其执行计算出一个acw_sc__v2后放入页面后再重新访问页面才能获得正常页面
// 若该页面进行了js加密则进行解密计算acw_sc__v2并加入cookie
func CalcAcwScV2(html string) (string, error) {
log.Debugln("acw_sc__v2", html)
acwScV2s := findAcwScV2Reg.FindStringSubmatch(html)
if len(acwScV2s) != 2 {
return "", fmt.Errorf("无法匹配acw_sc__v2")
func CalcAcwScV2(htmlContent string) (string, error) {
matches := findAcwScV2Reg.FindStringSubmatch(htmlContent)
if len(matches) != 2 {
return "", errors.New("无法匹配到 arg1 参数")
}
return HexXor(Unbox(acwScV2s[1]), "3000176000856006061501533003690027800375"), nil
arg1 := matches[1]
mask := "3000176000856006061501533003690027800375"
result, err := hexXor(unbox(arg1), mask)
if err != nil {
return "", fmt.Errorf("hexXor 操作失败: %w", err)
}
return result, nil
}
func Unbox(hex string) string {
func unbox(hex string) string {
var box = []int{6, 28, 34, 31, 33, 18, 30, 23, 9, 8, 19, 38, 17, 24, 0, 5, 32, 21, 10, 22, 25, 14, 15, 3, 16, 27, 13, 35, 2, 29, 11, 26, 4, 36, 1, 39, 37, 7, 20, 12}
var newBox = make([]byte, len(hex))
for i := 0; i < len(box); i++ {
j := box[i]
for i, j := range box {
if len(newBox) > j {
newBox[j] = hex[i]
}
@@ -143,14 +148,21 @@ func Unbox(hex string) string {
return string(newBox)
}
func HexXor(hex1, hex2 string) string {
out := bytes.NewBuffer(make([]byte, len(hex1)))
for i := 0; i < len(hex1) && i < len(hex2); i += 2 {
v1, _ := strconv.ParseInt(hex1[i:i+2], 16, 64)
v2, _ := strconv.ParseInt(hex2[i:i+2], 16, 64)
out.WriteString(strconv.FormatInt(v1^v2, 16))
func hexXor(hex1, hex2 string) (string, error) {
bytes1, err := hex.DecodeString(hex1)
if err != nil {
return "", fmt.Errorf("解码 hex1 失败: %w", err)
}
return out.String()
bytes2, err := hex.DecodeString(hex2)
if err != nil {
return "", fmt.Errorf("解码 hex2 失败: %w", err)
}
minLength := min(len(bytes2), len(bytes1))
resultBytes := make([]byte, minLength)
for i := range minLength {
resultBytes[i] = bytes1[i] ^ bytes2[i]
}
return hex.EncodeToString(resultBytes), nil
}
var findDataReg = regexp.MustCompile(`data[:\s]+({[^}]+})`) // 查找json

View File

@@ -3,6 +3,7 @@ package lanzou
import (
"errors"
"fmt"
"io"
"net/http"
"regexp"
"runtime"
@@ -94,36 +95,66 @@ func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{},
}
}
// 修复点:所有请求都自动处理 acw_sc__v2 验证和 down_ip=1
func (d *LanZou) request(url string, method string, callback base.ReqCallback, up bool) ([]byte, error) {
var req *resty.Request
if up {
once.Do(func() {
upClient = base.NewRestyClient().SetTimeout(120 * time.Second)
var vs string
for retry := 0; retry < 3; retry++ {
if up {
once.Do(func() {
upClient = base.NewRestyClient().SetTimeout(120 * time.Second)
})
req = upClient.R()
} else {
req = base.RestyClient.R()
}
req.SetHeaders(map[string]string{
"Referer": "https://pc.woozooo.com",
"User-Agent": d.UserAgent,
})
req = upClient.R()
} else {
req = base.RestyClient.R()
}
req.SetHeaders(map[string]string{
"Referer": "https://pc.woozooo.com",
"User-Agent": d.UserAgent,
})
// 下载直链时需要加 down_ip=1
if strings.Contains(url, "/file/") {
cookie := d.Cookie
if cookie != "" {
cookie += "; "
}
cookie += "down_ip=1"
if vs != "" {
cookie += "; acw_sc__v2=" + vs
}
req.SetHeader("cookie", cookie)
} else if d.Cookie != "" {
cookie := d.Cookie
if vs != "" {
cookie += "; acw_sc__v2=" + vs
}
req.SetHeader("cookie", cookie)
} else if vs != "" {
req.SetHeader("cookie", "acw_sc__v2="+vs)
}
if d.Cookie != "" {
req.SetHeader("cookie", d.Cookie)
}
if callback != nil {
callback(req)
}
if callback != nil {
callback(req)
res, err := req.Execute(method, url)
if err != nil {
return nil, err
}
bodyStr := res.String()
log.Debugf("lanzou request: url=>%s ,stats=>%d ,body => %s\n", res.Request.URL, res.StatusCode(), bodyStr)
if strings.Contains(bodyStr, "acw_sc__v2") {
vs, err = CalcAcwScV2(bodyStr)
if err != nil {
return nil, err
}
continue
}
return res.Body(), err
}
res, err := req.Execute(method, url)
if err != nil {
return nil, err
}
log.Debugf("lanzou request: url=>%s ,stats=>%d ,body => %s\n", res.Request.URL, res.StatusCode(), res.String())
return res.Body(), err
return nil, errors.New("acw_sc__v2 validation error")
}
func (d *LanZou) Login() ([]*http.Cookie, error) {
@@ -430,27 +461,91 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
file.Time = timeFindReg.FindString(sharePageData)
// 重定向获取真实链接
res, err := base.NoRedirectClient.R().SetHeaders(map[string]string{
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
}).Get(downloadUrl)
var (
res *resty.Response
err error
)
var vs string
var bodyStr string
for i := 0; i < 3; i++ {
res, err = base.NoRedirectClient.R().SetHeaders(map[string]string{
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
"Referer": baseUrl,
}).SetDoNotParseResponse(true).
SetCookie(&http.Cookie{
Name: "acw_sc__v2",
Value: vs,
}).SetHeader("cookie", "down_ip=1").Get(downloadUrl)
if err != nil {
return nil, err
}
if res.StatusCode() == 302 {
if res.RawBody() != nil {
res.RawBody().Close()
}
break
}
bodyBytes, err := io.ReadAll(res.RawBody())
if res.RawBody() != nil {
res.RawBody().Close()
}
if err != nil {
return nil, fmt.Errorf("读取响应体失败: %w", err)
}
bodyStr = string(bodyBytes)
if strings.Contains(bodyStr, "acw_sc__v2") {
if vs, err = CalcAcwScV2(bodyStr); err != nil {
log.Errorf("lanzou: err => acw_sc__v2 validation error ,data => %s\n", bodyStr)
return nil, err
}
continue
}
break
}
if err != nil {
return nil, err
}
file.Url = res.Header().Get("location")
// 触发验证
rPageData := res.String()
// 触发二次验证也需要处理一下触发acw_sc__v2的情况
if res.StatusCode() != 302 {
param, err = htmlJsonToMap(rPageData)
param, err = htmlJsonToMap(bodyStr)
if err != nil {
return nil, err
}
param["el"] = "2"
time.Sleep(time.Second * 2)
// 通过验证获取直
data, err := d.post(fmt.Sprint(baseUrl, "/ajax.php"), func(req *resty.Request) { req.SetFormData(param) }, nil)
// 通过验证获取直
var data []byte
for i := 0; i < 3; i++ {
data, err = d.post(fmt.Sprint(baseUrl, "/ajax.php"), func(req *resty.Request) {
req.SetFormData(param)
req.SetHeader("cookie", "down_ip=1")
if vs != "" {
req.SetCookie(&http.Cookie{
Name: "acw_sc__v2",
Value: vs,
})
}
}, nil)
if err != nil {
return nil, err
}
ajaxBodyStr := string(data)
if strings.Contains(ajaxBodyStr, "acw_sc__v2") {
if vs, err = CalcAcwScV2(ajaxBodyStr); err != nil {
log.Errorf("lanzou: err => acw_sc__v2 validation error ,data => %s\n", ajaxBodyStr)
return nil, err
}
time.Sleep(time.Second * 2)
continue
}
break
}
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,16 @@
//go:build !windows && !plan9 && !netbsd && !aix && !illumos && !solaris && !js
package local
import (
"os"
"path/filepath"
"syscall"
)
func copyNamedPipe(dstPath string, mode os.FileMode, dirMode os.FileMode) error {
if err := os.MkdirAll(filepath.Dir(dstPath), dirMode); err != nil {
return err
}
return syscall.Mkfifo(dstPath, uint32(mode))
}

View File

@@ -0,0 +1,9 @@
//go:build windows || plan9 || netbsd || aix || illumos || solaris || js
package local
import "os"
func copyNamedPipe(_ string, _, _ os.FileMode) error {
return nil
}

View File

@@ -23,7 +23,6 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/OpenListTeam/times"
cp "github.com/otiai10/copy"
log "github.com/sirupsen/logrus"
_ "golang.org/x/image/webp"
)
@@ -235,6 +234,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
fullPath := file.GetPath()
link := &model.Link{}
var MFile model.File
if args.Type == "thumb" && utils.Ext(file.GetName()) != "svg" {
var buf *bytes.Buffer
var thumbPath *string
@@ -261,9 +261,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err
}
link.ContentLength = int64(stat.Size())
link.MFile = open
MFile = open
} else {
link.MFile = bytes.NewReader(buf.Bytes())
MFile = bytes.NewReader(buf.Bytes())
link.ContentLength = int64(buf.Len())
}
} else {
@@ -272,13 +272,11 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err
}
link.ContentLength = file.GetSize()
link.MFile = open
}
link.AddIfCloser(link.MFile)
if !d.Config().OnlyLinkMFile {
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, link.MFile)
link.MFile = nil
MFile = open
}
link.SyncClosers.AddIfCloser(MFile)
link.RangeReader = stream.GetRangeReaderFromMFile(link.ContentLength, MFile)
link.RequireReference = link.SyncClosers.Length() > 0
return link, nil
}
@@ -298,16 +296,9 @@ func (d *Local) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
return fmt.Errorf("the destination folder is a subfolder of the source folder")
}
err := os.Rename(srcPath, dstPath)
if err != nil && strings.Contains(err.Error(), "invalid cross-device link") {
// 跨设备移动,先复制再删除
if err := d.Copy(ctx, srcObj, dstDir); err != nil {
return err
}
// 复制成功后直接删除源文件/文件夹
if srcObj.IsDir() {
return os.RemoveAll(srcObj.GetPath())
}
return os.Remove(srcObj.GetPath())
if isCrossDeviceError(err) {
// 跨设备移动,变更为移动任务
return errs.NotImplement
}
if err == nil {
srcParent := filepath.Dir(srcPath)
@@ -348,15 +339,14 @@ func (d *Local) Copy(_ context.Context, srcObj, dstDir model.Obj) error {
if utils.IsSubPath(srcPath, dstPath) {
return fmt.Errorf("the destination folder is a subfolder of the source folder")
}
// Copy using otiai10/copy to perform more secure & efficient copy
err := cp.Copy(srcPath, dstPath, cp.Options{
Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS
PreserveTimes: true,
PreserveOwner: true,
})
info, err := os.Lstat(srcPath)
if err != nil {
return err
}
// 复制regular文件会返回errs.NotImplement, 转为复制任务
if err = d.tryCopy(srcPath, dstPath, info); err != nil {
return err
}
if d.directoryMap.Has(filepath.Dir(dstPath)) {
d.directoryMap.UpdateDirSize(filepath.Dir(dstPath))
@@ -375,18 +365,26 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
err = os.Remove(obj.GetPath())
}
} else {
if !utils.Exists(d.RecycleBinPath) {
err = os.MkdirAll(d.RecycleBinPath, 0o755)
objPath := obj.GetPath()
objName := obj.GetName()
var relPath string
relPath, err = filepath.Rel(d.GetRootPath(), filepath.Dir(objPath))
if err != nil {
return err
}
recycleBinPath := filepath.Join(d.RecycleBinPath, relPath)
if !utils.Exists(recycleBinPath) {
err = os.MkdirAll(recycleBinPath, 0o755)
if err != nil {
return err
}
}
dstPath := filepath.Join(d.RecycleBinPath, obj.GetName())
dstPath := filepath.Join(recycleBinPath, objName)
if utils.Exists(dstPath) {
dstPath = filepath.Join(d.RecycleBinPath, obj.GetName()+"_"+time.Now().Format("20060102150405"))
dstPath = filepath.Join(recycleBinPath, objName+"_"+time.Now().Format("20060102150405"))
}
err = os.Rename(obj.GetPath(), dstPath)
err = os.Rename(objPath, dstPath)
}
if err != nil {
return err

View File

@@ -18,12 +18,12 @@ type Addition struct {
}
var config = driver.Config{
Name: "Local",
OnlyLinkMFile: false,
LocalSort: true,
NoCache: true,
DefaultRoot: "/",
NoLinkURL: true,
Name: "Local",
LocalSort: true,
OnlyProxy: true,
NoCache: true,
DefaultRoot: "/",
NoLinkURL: true,
}
func init() {

View File

@@ -3,6 +3,7 @@ package local
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/fs"
"os"
@@ -14,7 +15,9 @@ import (
"strings"
"sync"
"github.com/KarpelesLab/reflink"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/disintegration/imaging"
@@ -148,7 +151,7 @@ func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
return nil, nil, err
}
if d.ThumbCacheFolder != "" {
err = os.WriteFile(filepath.Join(d.ThumbCacheFolder, thumbName), buf.Bytes(), 0666)
err = os.WriteFile(filepath.Join(d.ThumbCacheFolder, thumbName), buf.Bytes(), 0o666)
if err != nil {
return nil, nil, err
}
@@ -405,3 +408,79 @@ func (m *DirectoryMap) DeleteDirNode(dirname string) error {
return nil
}
func (d *Local) tryCopy(srcPath, dstPath string, info os.FileInfo) error {
if info.Mode()&os.ModeDevice != 0 {
return errors.New("cannot copy a device")
} else if info.Mode()&os.ModeSymlink != 0 {
return d.copySymlink(srcPath, dstPath)
} else if info.Mode()&os.ModeNamedPipe != 0 {
return copyNamedPipe(dstPath, info.Mode(), os.FileMode(d.mkdirPerm))
} else if info.IsDir() {
return d.recurAndTryCopy(srcPath, dstPath)
} else {
return tryReflinkCopy(srcPath, dstPath)
}
}
func (d *Local) copySymlink(srcPath, dstPath string) error {
linkOrig, err := os.Readlink(srcPath)
if err != nil {
return err
}
dstDir := filepath.Dir(dstPath)
if !filepath.IsAbs(linkOrig) {
srcDir := filepath.Dir(srcPath)
rel, err := filepath.Rel(dstDir, srcDir)
if err != nil {
rel, err = filepath.Abs(srcDir)
}
if err != nil {
return err
}
linkOrig = filepath.Clean(filepath.Join(rel, linkOrig))
}
err = os.MkdirAll(dstDir, os.FileMode(d.mkdirPerm))
if err != nil {
return err
}
return os.Symlink(linkOrig, dstPath)
}
func (d *Local) recurAndTryCopy(srcPath, dstPath string) error {
err := os.MkdirAll(dstPath, os.FileMode(d.mkdirPerm))
if err != nil {
return err
}
files, err := readDir(srcPath)
if err != nil {
return err
}
for _, f := range files {
if !f.IsDir() {
sp := filepath.Join(srcPath, f.Name())
dp := filepath.Join(dstPath, f.Name())
if err = d.tryCopy(sp, dp, f); err != nil {
return err
}
}
}
for _, f := range files {
if f.IsDir() {
sp := filepath.Join(srcPath, f.Name())
dp := filepath.Join(dstPath, f.Name())
if err = d.recurAndTryCopy(sp, dp); err != nil {
return err
}
}
}
return nil
}
func tryReflinkCopy(srcPath, dstPath string) error {
err := reflink.Always(srcPath, dstPath)
if errors.Is(err, reflink.ErrReflinkUnsupported) || errors.Is(err, reflink.ErrReflinkFailed) || isCrossDeviceError(err) {
return errs.NotImplement
}
return err
}

View File

@@ -3,11 +3,13 @@
package local
import (
"errors"
"io/fs"
"strings"
"syscall"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"golang.org/x/sys/unix"
)
func isHidden(f fs.FileInfo, _ string) bool {
@@ -27,3 +29,7 @@ func getDiskUsage(path string) (model.DiskUsage, error) {
FreeSpace: free,
}, nil
}
func isCrossDeviceError(err error) bool {
return errors.Is(err, unix.EXDEV)
}

View File

@@ -49,3 +49,7 @@ func getDiskUsage(path string) (model.DiskUsage, error) {
FreeSpace: freeBytes,
}, nil
}
func isCrossDeviceError(err error) bool {
return errors.Is(err, windows.ERROR_NOT_SAME_DEVICE)
}

431
drivers/mediafire/driver.go Normal file
View File

@@ -0,0 +1,431 @@
package mediafire
/*
Package mediafire
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-11
D@' 3z K!7 - The King Of Cracking
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
Date: 2025-09-21
Date: 2025-09-26
Final opts by @Suyunjing @j2rong4cn @KirCute @Da3zKi7
*/
import (
"context"
"fmt"
"math/rand"
"net/http"
"strconv"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"golang.org/x/time/rate"
)
type Mediafire struct {
model.Storage
Addition
cron *cron.Cron
actionToken string
limiter *rate.Limiter
appBase string
apiBase string
hostBase string
maxRetries int
secChUa string
secChUaPlatform string
userAgent string
}
func (d *Mediafire) Config() driver.Config {
return config
}
func (d *Mediafire) GetAddition() driver.Additional {
return &d.Addition
}
// Init initializes the MediaFire driver with session token and cookie validation
func (d *Mediafire) Init(ctx context.Context) error {
if d.SessionToken == "" {
return fmt.Errorf("Init :: [MediaFire] {critical} missing sessionToken")
}
if d.Cookie == "" {
return fmt.Errorf("Init :: [MediaFire] {critical} missing Cookie")
}
// Setup rate limiter if rate limit is configured
if d.LimitRate > 0 {
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
}
// Validate and refresh session token if needed
if _, err := d.getSessionToken(ctx); err != nil {
d.renewToken(ctx)
// Avoids 10 mins token expiry (6- 9)
num := rand.Intn(4) + 6
d.cron = cron.NewCron(time.Minute * time.Duration(num))
d.cron.Do(func() {
// Crazy, but working way to refresh session token
d.renewToken(ctx)
})
}
return nil
}
// Drop cleans up driver resources
func (d *Mediafire) Drop(ctx context.Context) error {
// Clear cached resources
d.actionToken = ""
if d.cron != nil {
d.cron.Stop()
d.cron = nil
}
return nil
}
// List retrieves files and folders from the specified directory
func (d *Mediafire) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
files, err := d.getFiles(ctx, dir.GetID())
if err != nil {
return nil, err
}
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
return d.fileToObj(src), nil
})
}
// Link generates a direct download link for the specified file
func (d *Mediafire) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
downloadUrl, err := d.getDirectDownloadLink(ctx, file.GetID())
if err != nil {
return nil, err
}
res, err := base.NoRedirectClient.R().SetDoNotParseResponse(true).SetContext(ctx).Head(downloadUrl)
if err != nil {
return nil, err
}
defer func() {
_ = res.RawBody().Close()
}()
if res.StatusCode() == 302 {
downloadUrl = res.Header().Get("location")
}
return &model.Link{
URL: downloadUrl,
Header: http.Header{
"Origin": []string{d.appBase},
"Referer": []string{d.appBase + "/"},
"sec-ch-ua": []string{d.secChUa},
"sec-ch-ua-platform": []string{d.secChUaPlatform},
"User-Agent": []string{d.userAgent},
},
}, nil
}
// MakeDir creates a new folder in the specified parent directory
func (d *Mediafire) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
data := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"parent_key": parentDir.GetID(),
"foldername": dirName,
}
var resp MediafireFolderCreateResponse
_, err := d.postForm(ctx, "/folder/create.php", data, &resp)
if err != nil {
return nil, err
}
if err := checkAPIResult(resp.Response.Result); err != nil {
return nil, err
}
created, _ := time.Parse("2006-01-02T15:04:05Z", resp.Response.CreatedUTC)
return &model.Object{
ID: resp.Response.FolderKey,
Name: resp.Response.Name,
Size: 0,
Modified: created,
Ctime: created,
IsFolder: true,
}, nil
}
// Move relocates a file or folder to a different parent directory
func (d *Mediafire) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var data map[string]string
var endpoint string
if srcObj.IsDir() {
endpoint = "/folder/move.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key_src": srcObj.GetID(),
"folder_key_dst": dstDir.GetID(),
}
} else {
endpoint = "/file/move.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"quick_key": srcObj.GetID(),
"folder_key": dstDir.GetID(),
}
}
var resp MediafireMoveResponse
_, err := d.postForm(ctx, endpoint, data, &resp)
if err != nil {
return nil, err
}
if err := checkAPIResult(resp.Response.Result); err != nil {
return nil, err
}
return srcObj, nil
}
// Rename changes the name of a file or folder
func (d *Mediafire) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
var data map[string]string
var endpoint string
if srcObj.IsDir() {
endpoint = "/folder/update.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key": srcObj.GetID(),
"foldername": newName,
}
} else {
endpoint = "/file/update.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"quick_key": srcObj.GetID(),
"filename": newName,
}
}
var resp MediafireRenameResponse
_, err := d.postForm(ctx, endpoint, data, &resp)
if err != nil {
return nil, err
}
if err := checkAPIResult(resp.Response.Result); err != nil {
return nil, err
}
return &model.Object{
ID: srcObj.GetID(),
Name: newName,
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
Ctime: srcObj.CreateTime(),
IsFolder: srcObj.IsDir(),
}, nil
}
// Copy creates a duplicate of a file or folder in the specified destination directory
func (d *Mediafire) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var data map[string]string
var endpoint string
if srcObj.IsDir() {
endpoint = "/folder/copy.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key_src": srcObj.GetID(),
"folder_key_dst": dstDir.GetID(),
}
} else {
endpoint = "/file/copy.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"quick_key": srcObj.GetID(),
"folder_key": dstDir.GetID(),
}
}
var resp MediafireCopyResponse
_, err := d.postForm(ctx, endpoint, data, &resp)
if err != nil {
return nil, err
}
if err := checkAPIResult(resp.Response.Result); err != nil {
return nil, err
}
var newID string
if srcObj.IsDir() {
if len(resp.Response.NewFolderKeys) > 0 {
newID = resp.Response.NewFolderKeys[0]
}
} else {
if len(resp.Response.NewQuickKeys) > 0 {
newID = resp.Response.NewQuickKeys[0]
}
}
return &model.Object{
ID: newID,
Name: srcObj.GetName(),
Size: srcObj.GetSize(),
Modified: srcObj.ModTime(),
Ctime: srcObj.CreateTime(),
IsFolder: srcObj.IsDir(),
}, nil
}
// Remove deletes a file or folder permanently
func (d *Mediafire) Remove(ctx context.Context, obj model.Obj) error {
var data map[string]string
var endpoint string
if obj.IsDir() {
endpoint = "/folder/delete.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key": obj.GetID(),
}
} else {
endpoint = "/file/delete.php"
data = map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"quick_key": obj.GetID(),
}
}
var resp MediafireRemoveResponse
_, err := d.postForm(ctx, endpoint, data, &resp)
if err != nil {
return err
}
return checkAPIResult(resp.Response.Result)
}
// Put uploads a file to the specified directory with support for resumable upload and quick upload
func (d *Mediafire) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
fileHash := file.GetHash().GetHash(utils.SHA256)
var err error
// Try to use existing hash first, cache only if necessary
if len(fileHash) != utils.SHA256.Width {
_, fileHash, err = stream.CacheFullAndHash(file, &up, utils.SHA256)
if err != nil {
return nil, err
}
}
checkResp, err := d.uploadCheck(ctx, file.GetName(), file.GetSize(), fileHash, dstDir.GetID())
if err != nil {
return nil, err
}
if checkResp.Response.HashExists == "yes" && checkResp.Response.InAccount == "yes" {
up(100.0)
existingFile, err := d.getExistingFileInfo(ctx, fileHash, file.GetName(), dstDir.GetID())
if err == nil && existingFile != nil {
// File exists, return existing file info
return &model.Object{
ID: existingFile.GetID(),
Name: file.GetName(),
Size: file.GetSize(),
}, nil
}
// If getExistingFileInfo fails, log and continue with normal upload
// This ensures upload doesn't fail due to search issues
}
var pollKey string
if checkResp.Response.ResumableUpload.AllUnitsReady != "yes" {
pollKey, err = d.uploadUnits(ctx, file, checkResp, file.GetName(), fileHash, dstDir.GetID(), up)
if err != nil {
return nil, err
}
} else {
pollKey = checkResp.Response.ResumableUpload.UploadKey
up(100.0)
}
pollResp, err := d.pollUpload(ctx, pollKey)
if err != nil {
return nil, err
}
return &model.Object{
ID: pollResp.Response.Doupload.QuickKey,
Name: file.GetName(),
Size: file.GetSize(),
}, nil
}
func (d *Mediafire) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
data := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
}
var resp MediafireUserInfoResponse
_, err := d.postForm(ctx, "/user/get_info.php", data, &resp)
if err != nil {
return nil, err
}
used, err := strconv.ParseUint(resp.Response.UserInfo.UsedStorageSize, 10, 64)
if err != nil {
return nil, err
}
total, err := strconv.ParseUint(resp.Response.UserInfo.StorageLimit, 10, 64)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: total,
FreeSpace: total - used,
},
}, nil
}
var _ driver.Driver = (*Mediafire)(nil)

61
drivers/mediafire/meta.go Normal file
View File

@@ -0,0 +1,61 @@
package mediafire
/*
Package mediafire
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-11
D@' 3z K!7 - The King Of Cracking
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
Date: 2025-09-21
Date: 2025-09-26
Final opts by @Suyunjing @j2rong4cn @KirCute @Da3zKi7
*/
import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/op"
)
type Addition struct {
driver.RootPath
//driver.RootID
SessionToken string `json:"session_token" required:"true" type:"string" help:"Required for MediaFire API"`
Cookie string `json:"cookie" required:"true" type:"string" help:"Required for navigation"`
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"100"`
UploadThreads int `json:"upload_threads" type:"number" default:"3" help:"concurrent upload threads"`
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"`
}
var config = driver.Config{
Name: "MediaFire",
LocalSort: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "/",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Mediafire{
appBase: "https://app.mediafire.com",
apiBase: "https://www.mediafire.com/api/1.5",
hostBase: "https://www.mediafire.com",
maxRetries: 3,
secChUa: "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"139\", \"Google Chrome\";v=\"139\"",
secChUaPlatform: "Windows",
userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
}
})
}

246
drivers/mediafire/types.go Normal file
View File

@@ -0,0 +1,246 @@
package mediafire
/*
Package mediafire
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-11
D@' 3z K!7 - The King Of Cracking
*/
type MediafireRenewTokenResponse struct {
Response struct {
Action string `json:"action"`
SessionToken string `json:"session_token"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireResponse struct {
Response struct {
Action string `json:"action"`
FolderContent struct {
ChunkSize string `json:"chunk_size"`
ContentType string `json:"content_type"`
ChunkNumber string `json:"chunk_number"`
FolderKey string `json:"folderkey"`
Folders []MediafireFolder `json:"folders,omitempty"`
Files []MediafireFile `json:"files,omitempty"`
MoreChunks string `json:"more_chunks"`
} `json:"folder_content"`
Result string `json:"result"`
} `json:"response"`
}
type MediafireFolder struct {
FolderKey string `json:"folderkey"`
Name string `json:"name"`
Created string `json:"created"`
CreatedUTC string `json:"created_utc"`
}
type MediafireFile struct {
QuickKey string `json:"quickkey"`
Filename string `json:"filename"`
Size string `json:"size"`
Created string `json:"created"`
CreatedUTC string `json:"created_utc"`
MimeType string `json:"mimetype"`
}
type File struct {
ID string
Name string
Size int64
CreatedUTC string
IsFolder bool
}
type FolderContentResponse struct {
Folders []MediafireFolder
Files []MediafireFile
MoreChunks bool
}
type MediafireLinksResponse struct {
Response struct {
Action string `json:"action"`
Links []struct {
QuickKey string `json:"quickkey"`
View string `json:"view"`
NormalDownload string `json:"normal_download"`
OneTime struct {
Download string `json:"download"`
View string `json:"view"`
} `json:"one_time"`
} `json:"links"`
OneTimeKeyRequestCount string `json:"one_time_key_request_count"`
OneTimeKeyRequestMaxCount string `json:"one_time_key_request_max_count"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireDirectDownloadResponse struct {
Response struct {
Action string `json:"action"`
Links []struct {
QuickKey string `json:"quickkey"`
DirectDownload string `json:"direct_download"`
} `json:"links"`
DirectDownloadFreeBandwidth string `json:"direct_download_free_bandwidth"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireFolderCreateResponse struct {
Response struct {
Action string `json:"action"`
FolderKey string `json:"folder_key"`
UploadKey string `json:"upload_key"`
ParentFolderKey string `json:"parent_folderkey"`
Name string `json:"name"`
Description string `json:"description"`
Created string `json:"created"`
CreatedUTC string `json:"created_utc"`
Privacy string `json:"privacy"`
FileCount string `json:"file_count"`
FolderCount string `json:"folder_count"`
Revision string `json:"revision"`
DropboxEnabled string `json:"dropbox_enabled"`
Flag string `json:"flag"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireMoveResponse struct {
Response struct {
Action string `json:"action"`
Asynchronous string `json:"asynchronous,omitempty"`
NewNames []string `json:"new_names"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireRenameResponse struct {
Response struct {
Action string `json:"action"`
Asynchronous string `json:"asynchronous,omitempty"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireCopyResponse struct {
Response struct {
Action string `json:"action"`
Asynchronous string `json:"asynchronous,omitempty"`
NewQuickKeys []string `json:"new_quickkeys,omitempty"`
NewFolderKeys []string `json:"new_folderkeys,omitempty"`
SkippedCount string `json:"skipped_count,omitempty"`
OtherCount string `json:"other_count,omitempty"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireRemoveResponse struct {
Response struct {
Action string `json:"action"`
Asynchronous string `json:"asynchronous,omitempty"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
NewDeviceRevision int `json:"new_device_revision"`
} `json:"response"`
}
type MediafireCheckResponse struct {
Response struct {
Action string `json:"action"`
HashExists string `json:"hash_exists"`
InAccount string `json:"in_account"`
InFolder string `json:"in_folder"`
FileExists string `json:"file_exists"`
ResumableUpload struct {
AllUnitsReady string `json:"all_units_ready"`
NumberOfUnits string `json:"number_of_units"`
UnitSize string `json:"unit_size"`
Bitmap struct {
Count string `json:"count"`
Words []string `json:"words"`
} `json:"bitmap"`
UploadKey string `json:"upload_key"`
} `json:"resumable_upload"`
AvailableSpace string `json:"available_space"`
UsedStorageSize string `json:"used_storage_size"`
StorageLimit string `json:"storage_limit"`
StorageLimitExceeded string `json:"storage_limit_exceeded"`
UploadURL struct {
Simple string `json:"simple"`
SimpleFallback string `json:"simple_fallback"`
Resumable string `json:"resumable"`
ResumableFallback string `json:"resumable_fallback"`
} `json:"upload_url"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireActionTokenResponse struct {
Response struct {
Action string `json:"action"`
ActionToken string `json:"action_token"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafirePollResponse struct {
Response struct {
Action string `json:"action"`
Doupload struct {
Result string `json:"result"`
Status string `json:"status"`
Description string `json:"description"`
QuickKey string `json:"quickkey"`
Hash string `json:"hash"`
Filename string `json:"filename"`
Size string `json:"size"`
Created string `json:"created"`
CreatedUTC string `json:"created_utc"`
Revision string `json:"revision"`
} `json:"doupload"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireFileSearchResponse struct {
Response struct {
Action string `json:"action"`
FileInfo []File `json:"file_info"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}
type MediafireUserInfoResponse struct {
Response struct {
Action string `json:"action"`
UserInfo struct {
Email string `json:"string"`
DisplayName string `json:"display_name"`
UsedStorageSize string `json:"used_storage_size"`
StorageLimit string `json:"storage_limit"`
} `json:"user_info"`
Result string `json:"result"`
CurrentAPIVersion string `json:"current_api_version"`
} `json:"response"`
}

729
drivers/mediafire/util.go Normal file
View File

@@ -0,0 +1,729 @@
package mediafire
/*
Package mediafire
Author: Da3zKi7<da3zki7@duck.com>
Date: 2025-09-11
D@' 3z K!7 - The King Of Cracking
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
Date: 2025-09-21
Date: 2025-09-26
Final opts by @Suyunjing @j2rong4cn @KirCute @Da3zKi7
*/
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
)
// checkAPIResult validates MediaFire API response result and returns error if not successful
func checkAPIResult(result string) error {
if result != "Success" {
return fmt.Errorf("MediaFire API error: %s", result)
}
return nil
}
// getSessionToken retrieves and validates session token from MediaFire
func (d *Mediafire) getSessionToken(ctx context.Context) (string, error) {
if d.limiter != nil {
if err := d.limiter.Wait(ctx); err != nil {
return "", fmt.Errorf("rate limit wait failed: %w", err)
}
}
tokenURL := d.hostBase + "/application/get_session_token.php"
req, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, nil)
if err != nil {
return "", err
}
req.Header.Set("Accept", "*/*")
req.Header.Set("Accept-Encoding", "gzip, deflate, br, zstd")
req.Header.Set("Accept-Language", "en-US,en;q=0.9")
req.Header.Set("Content-Length", "0")
req.Header.Set("Cookie", d.Cookie)
req.Header.Set("DNT", "1")
req.Header.Set("Origin", d.hostBase)
req.Header.Set("Priority", "u=1, i")
req.Header.Set("Referer", (d.hostBase + "/"))
req.Header.Set("Sec-Ch-Ua", d.secChUa)
req.Header.Set("Sec-Ch-Ua-Mobile", "?0")
req.Header.Set("Sec-Ch-Ua-Platform", d.secChUaPlatform)
req.Header.Set("Sec-Fetch-Dest", "empty")
req.Header.Set("Sec-Fetch-Mode", "cors")
req.Header.Set("Sec-Fetch-Site", "same-site")
req.Header.Set("User-Agent", d.userAgent)
// req.Header.Set("Connection", "keep-alive")
resp, err := base.HttpClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
// fmt.Printf("getSessionToken :: Raw response: %s\n", string(body))
// fmt.Printf("getSessionToken :: Parsed response: %+v\n", resp)
var tokenResp struct {
Response struct {
SessionToken string `json:"session_token"`
} `json:"response"`
}
if resp.StatusCode == 200 {
if err := json.Unmarshal(body, &tokenResp); err != nil {
return "", err
}
if tokenResp.Response.SessionToken == "" {
return "", fmt.Errorf("empty session token received")
}
cookieMap := make(map[string]string)
for _, cookie := range resp.Cookies() {
cookieMap[cookie.Name] = cookie.Value
}
if len(cookieMap) > 0 {
var cookies []string
for name, value := range cookieMap {
cookies = append(cookies, fmt.Sprintf("%s=%s", name, value))
}
d.Cookie = strings.Join(cookies, "; ")
op.MustSaveDriverStorage(d)
// fmt.Printf("getSessionToken :: Captured cookies: %s\n", d.Cookie)
}
} else {
return "", fmt.Errorf("getSessionToken :: failed to get session token, status code: %d", resp.StatusCode)
}
d.SessionToken = tokenResp.Response.SessionToken
// fmt.Printf("Init :: Obtain Session Token %v", d.SessionToken)
op.MustSaveDriverStorage(d)
return d.SessionToken, nil
}
// renewToken refreshes the current session token when expired
func (d *Mediafire) renewToken(ctx context.Context) error {
query := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
}
var resp MediafireRenewTokenResponse
_, err := d.postForm(ctx, "/user/renew_session_token.php", query, &resp)
if err != nil {
return fmt.Errorf("failed to renew token: %w", err)
}
// fmt.Printf("getInfo :: Raw response: %s\n", string(body))
// fmt.Printf("getInfo :: Parsed response: %+v\n", resp)
if resp.Response.Result != "Success" {
return fmt.Errorf("MediaFire token renewal failed: %s", resp.Response.Result)
}
d.SessionToken = resp.Response.SessionToken
// fmt.Printf("Init :: Renew Session Token: %s", resp.Response.Result)
op.MustSaveDriverStorage(d)
return nil
}
func (d *Mediafire) getFiles(ctx context.Context, folderKey string) ([]File, error) {
// Pre-allocate slice with reasonable capacity to reduce memory allocations
files := make([]File, 0, d.ChunkSize*2) // Estimate: ChunkSize for files + folders
hasMore := true
chunkNumber := 1
for hasMore {
resp, err := d.getFolderContent(ctx, folderKey, chunkNumber)
if err != nil {
return nil, err
}
// Process folders and files in single loop to improve cache locality
totalItems := len(resp.Folders) + len(resp.Files)
if cap(files)-len(files) < totalItems {
// Grow slice if needed
newFiles := make([]File, len(files), len(files)+totalItems+int(d.ChunkSize))
copy(newFiles, files)
files = newFiles
}
for _, folder := range resp.Folders {
files = append(files, File{
ID: folder.FolderKey,
Name: folder.Name,
Size: 0,
CreatedUTC: folder.CreatedUTC,
IsFolder: true,
})
}
for _, file := range resp.Files {
size, _ := strconv.ParseInt(file.Size, 10, 64)
files = append(files, File{
ID: file.QuickKey,
Name: file.Filename,
Size: size,
CreatedUTC: file.CreatedUTC,
IsFolder: false,
})
}
hasMore = resp.MoreChunks
chunkNumber++
}
return files, nil
}
func (d *Mediafire) getFolderContent(ctx context.Context, folderKey string, chunkNumber int) (*FolderContentResponse, error) {
foldersResp, err := d.getFolderContentByType(ctx, folderKey, "folders", chunkNumber)
if err != nil {
return nil, err
}
filesResp, err := d.getFolderContentByType(ctx, folderKey, "files", chunkNumber)
if err != nil {
return nil, err
}
return &FolderContentResponse{
Folders: foldersResp.Response.FolderContent.Folders,
Files: filesResp.Response.FolderContent.Files,
MoreChunks: foldersResp.Response.FolderContent.MoreChunks == "yes" || filesResp.Response.FolderContent.MoreChunks == "yes",
}, nil
}
func (d *Mediafire) getFolderContentByType(ctx context.Context, folderKey, contentType string, chunkNumber int) (*MediafireResponse, error) {
data := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"folder_key": folderKey,
"content_type": contentType,
"chunk": strconv.Itoa(chunkNumber),
"chunk_size": strconv.FormatInt(d.ChunkSize, 10),
"details": "yes",
"order_direction": d.OrderDirection,
"order_by": d.OrderBy,
"filter": "",
}
var resp MediafireResponse
_, err := d.postForm(ctx, "/folder/get_content.php", data, &resp)
if err != nil {
return nil, err
}
if err := checkAPIResult(resp.Response.Result); err != nil {
return nil, err
}
return &resp, nil
}
// fileToObj converts MediaFire file data to model.ObjThumb with thumbnail support
func (d *Mediafire) fileToObj(f File) *model.ObjThumb {
created, _ := time.Parse("2006-01-02T15:04:05Z", f.CreatedUTC)
var thumbnailURL string
if !f.IsFolder && f.ID != "" {
thumbnailURL = d.hostBase + "/convkey/acaa/" + f.ID + "3g.jpg"
}
return &model.ObjThumb{
Object: model.Object{
ID: f.ID,
// Path: "",
Name: f.Name,
Size: f.Size,
Modified: created,
Ctime: created,
IsFolder: f.IsFolder,
},
Thumbnail: model.Thumbnail{
Thumbnail: thumbnailURL,
},
}
}
func (d *Mediafire) setCommonHeaders(req *resty.Request) {
req.SetHeaders(map[string]string{
"Cookie": d.Cookie,
"User-Agent": d.userAgent,
"Origin": d.appBase,
"Referer": d.appBase + "/",
})
}
// apiRequest performs HTTP request to MediaFire API with rate limiting and common headers
func (d *Mediafire) apiRequest(ctx context.Context, method, endpoint string, queryParams, formData map[string]string, resp interface{}) ([]byte, error) {
if d.limiter != nil {
if err := d.limiter.Wait(ctx); err != nil {
return nil, fmt.Errorf("rate limit wait failed: %w", err)
}
}
req := base.RestyClient.R()
req.SetContext(ctx)
d.setCommonHeaders(req)
// Set query parameters for GET requests
if queryParams != nil {
req.SetQueryParams(queryParams)
}
// Set form data for POST requests
if formData != nil {
req.SetFormData(formData)
req.SetHeader("Content-Type", "application/x-www-form-urlencoded")
}
// Set response object if provided
if resp != nil {
req.SetResult(resp)
}
var res *resty.Response
var err error
// Execute request based on method
switch method {
case "GET":
res, err = req.Get(d.apiBase + endpoint)
case "POST":
res, err = req.Post(d.apiBase + endpoint)
default:
return nil, fmt.Errorf("unsupported HTTP method: %s", method)
}
if err != nil {
return nil, err
}
return res.Body(), nil
}
func (d *Mediafire) getForm(ctx context.Context, endpoint string, query map[string]string, resp interface{}) ([]byte, error) {
return d.apiRequest(ctx, "GET", endpoint, query, nil, resp)
}
func (d *Mediafire) postForm(ctx context.Context, endpoint string, data map[string]string, resp interface{}) ([]byte, error) {
return d.apiRequest(ctx, "POST", endpoint, nil, data, resp)
}
func (d *Mediafire) getDirectDownloadLink(ctx context.Context, fileID string) (string, error) {
data := map[string]string{
"session_token": d.SessionToken,
"quick_key": fileID,
"link_type": "direct_download",
"response_format": "json",
}
var resp MediafireDirectDownloadResponse
_, err := d.getForm(ctx, "/file/get_links.php", data, &resp)
if err != nil {
return "", err
}
if err := checkAPIResult(resp.Response.Result); err != nil {
return "", err
}
if len(resp.Response.Links) == 0 {
return "", fmt.Errorf("no download links found")
}
return resp.Response.Links[0].DirectDownload, nil
}
func (d *Mediafire) uploadCheck(ctx context.Context, filename string, filesize int64, filehash, folderKey string) (*MediafireCheckResponse, error) {
actionToken, err := d.getActionToken(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get action token: %w", err)
}
query := map[string]string{
"session_token": actionToken, /* d.SessionToken */
"filename": filename,
"size": strconv.FormatInt(filesize, 10),
"hash": filehash,
"folder_key": folderKey,
"resumable": "yes",
"response_format": "json",
}
var resp MediafireCheckResponse
_, err = d.postForm(ctx, "/upload/check.php", query, &resp)
if err != nil {
return nil, err
}
// fmt.Printf("uploadCheck :: Raw response: %s\n", string(body))
// fmt.Printf("uploadCheck :: Parsed response: %+v\n", resp)
// fmt.Printf("uploadCheck :: ResumableUpload section: %+v\n", resp.Response.ResumableUpload)
// fmt.Printf("uploadCheck :: Upload key specifically: '%s'\n", resp.Response.ResumableUpload.UploadKey)
if err := checkAPIResult(resp.Response.Result); err != nil {
return nil, err
}
return &resp, nil
}
func (d *Mediafire) uploadUnits(ctx context.Context, file model.FileStreamer, checkResp *MediafireCheckResponse, filename, fileHash, folderKey string, up driver.UpdateProgress) (string, error) {
unitSize, _ := strconv.ParseInt(checkResp.Response.ResumableUpload.UnitSize, 10, 64)
numUnits, _ := strconv.Atoi(checkResp.Response.ResumableUpload.NumberOfUnits)
uploadKey := checkResp.Response.ResumableUpload.UploadKey
stringWords := checkResp.Response.ResumableUpload.Bitmap.Words
intWords := make([]int, 0, len(stringWords))
for _, word := range stringWords {
if intWord, err := strconv.Atoi(word); err == nil {
intWords = append(intWords, intWord)
}
}
// Intelligent buffer sizing for large files
bufferSize := int(unitSize)
fileSize := file.GetSize()
// Split in chunks
if fileSize > d.ChunkSize*1024*1024 {
// Large, use ChunkSize (default = 100MB)
bufferSize = min(int(fileSize), int(d.ChunkSize)*1024*1024)
} else if fileSize > 10*1024*1024 {
// Medium, use full file size for concurrent access
bufferSize = int(fileSize)
}
// Create stream section reader for efficient chunking
ss, err := stream.NewStreamSectionReader(file, bufferSize, &up)
if err != nil {
return "", err
}
// Cal minimal parallel upload threads, allows MediaFire resumable upload to rule it over custom value
// If file is big, likely will respect d.UploadThreads instead of MediaFire's suggestion i.e. 5 threads
thread := min(numUnits, d.UploadThreads)
// Create ordered group for sequential upload processing with retry logic
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
var finalUploadKey string
var keyMutex sync.Mutex
fileSize = file.GetSize()
for unitID := range numUnits {
if utils.IsCanceled(uploadCtx) {
break
}
start := int64(unitID) * unitSize
size := unitSize
if start+size > fileSize {
size = fileSize - start
}
var reader io.ReadSeeker
var rateLimitedRd io.Reader
var unitHash string
// Use lifecycle pattern for proper resource management
threadG.GoWithLifecycle(errgroup.Lifecycle{
Before: func(ctx context.Context) error {
// Skip already uploaded units
if d.isUnitUploaded(intWords, unitID) {
return ss.DiscardSection(start, size)
}
var err error
reader, err = ss.GetSectionReader(start, size)
if err != nil {
return err
}
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
return nil
},
Do: func(ctx context.Context) error {
if reader == nil {
return nil // Skip if reader is not initialized (already uploaded)
}
if unitHash == "" {
reader.Seek(0, io.SeekStart)
var err error
unitHash, err = utils.HashReader(utils.SHA256, reader)
if err != nil {
return err
}
}
reader.Seek(0, io.SeekStart)
// Perform upload
actionToken, err := d.getActionToken(ctx)
if err != nil {
return err
}
if d.limiter != nil {
if err := d.limiter.Wait(ctx); err != nil {
return fmt.Errorf("rate limit wait failed: %w", err)
}
}
url := d.apiBase + "/upload/resumable.php"
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, rateLimitedRd)
if err != nil {
return err
}
q := req.URL.Query()
q.Add("folder_key", folderKey)
q.Add("response_format", "json")
q.Add("session_token", actionToken)
q.Add("key", uploadKey)
req.URL.RawQuery = q.Encode()
req.Header.Set("x-filehash", fileHash)
req.Header.Set("x-filesize", strconv.FormatInt(fileSize, 10))
req.Header.Set("x-unit-id", strconv.Itoa(unitID))
req.Header.Set("x-unit-size", strconv.FormatInt(size, 10))
req.Header.Set("x-unit-hash", unitHash)
req.Header.Set("x-filename", filename)
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = size
/* fmt.Printf("Debug resumable upload request:\n")
fmt.Printf(" URL: %s\n", req.URL.String())
fmt.Printf(" Headers: %+v\n", req.Header)
fmt.Printf(" Unit ID: %d\n", unitID)
fmt.Printf(" Unit Size: %d\n", len(unitData))
fmt.Printf(" Upload Key: %s\n", uploadKey)
fmt.Printf(" Action Token: %s\n", actionToken) */
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("failed to read response body: %v", err)
}
// fmt.Printf("MediaFire resumable upload response (status %d): %s\n", res.StatusCode, string(body))
var uploadResp struct {
Response struct {
Doupload struct {
Key string `json:"key"`
} `json:"doupload"`
Result string `json:"result"`
} `json:"response"`
}
if err := json.Unmarshal(body, &uploadResp); err != nil {
return fmt.Errorf("failed to parse response: %v", err)
}
if res.StatusCode != 200 {
return fmt.Errorf("resumable upload failed with status %d", res.StatusCode)
}
// Thread-safe update of final upload key
keyMutex.Lock()
finalUploadKey = uploadResp.Response.Doupload.Key
keyMutex.Unlock()
return nil
},
After: func(err error) {
up(float64(threadG.Success()) * 100 / float64(numUnits))
if reader != nil {
// Cleanup resources
ss.FreeSectionReader(reader)
}
},
})
}
if err := threadG.Wait(); err != nil {
return "", err
}
return finalUploadKey, nil
}
/*func (d *Mediafire) uploadSingleUnit(ctx context.Context, file model.FileStreamer, unitID int, unitSize int64, fileHash, filename, uploadKey, folderKey string, fileSize int64) (string, error) {
start := int64(unitID) * unitSize
size := unitSize
if start+size > fileSize {
size = fileSize - start
}
unitData := make([]byte, size)
_, err := file.Read(unitData)
if err != nil {
return "", err
}
return d.resumableUpload(ctx, folderKey, uploadKey, unitData, unitID, fileHash, filename, fileSize)
}*/
func (d *Mediafire) getActionToken(ctx context.Context) (string, error) {
if d.actionToken != "" {
return d.actionToken, nil
}
data := map[string]string{
"type": "upload",
"lifespan": "1440",
"response_format": "json",
"session_token": d.SessionToken,
}
var resp MediafireActionTokenResponse
_, err := d.postForm(ctx, "/user/get_action_token.php", data, &resp)
if err != nil {
return "", err
}
if resp.Response.Result != "Success" {
return "", fmt.Errorf("MediaFire action token failed: %s", resp.Response.Result)
}
return resp.Response.ActionToken, nil
}
func (d *Mediafire) pollUpload(ctx context.Context, key string) (*MediafirePollResponse, error) {
actionToken, err := d.getActionToken(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get action token: %w", err)
}
// fmt.Printf("Debug Key: %+v\n", key)
query := map[string]string{
"key": key,
"response_format": "json",
"session_token": actionToken, /* d.SessionToken */
}
var resp MediafirePollResponse
_, err = d.postForm(ctx, "/upload/poll_upload.php", query, &resp)
if err != nil {
return nil, err
}
// fmt.Printf("pollUpload :: Raw response: %s\n", string(body))
// fmt.Printf("pollUpload :: Parsed response: %+v\n", resp)
// fmt.Printf("pollUpload :: Debug Result: %+v\n", resp.Response.Result)
if err := checkAPIResult(resp.Response.Result); err != nil {
return nil, err
}
return &resp, nil
}
func (d *Mediafire) isUnitUploaded(words []int, unitID int) bool {
wordIndex := unitID / 16
bitIndex := unitID % 16
if wordIndex >= len(words) {
return false
}
return (words[wordIndex]>>bitIndex)&1 == 1
}
func (d *Mediafire) getExistingFileInfo(ctx context.Context, fileHash, filename, folderKey string) (*model.ObjThumb, error) {
// First try to find by hash directly (most efficient)
if fileInfo, err := d.getFileByHash(ctx, fileHash); err == nil && fileInfo != nil {
return fileInfo, nil
}
// If hash search fails, search in the target folder
// This is a fallback method in case the file exists but hash search doesn't work
files, err := d.getFiles(ctx, folderKey)
if err != nil {
return nil, err
}
for _, file := range files {
if file.Name == filename && !file.IsFolder {
return d.fileToObj(file), nil
}
}
return nil, fmt.Errorf("existing file not found")
}
func (d *Mediafire) getFileByHash(ctx context.Context, hash string) (*model.ObjThumb, error) {
query := map[string]string{
"session_token": d.SessionToken,
"response_format": "json",
"hash": hash,
}
var resp MediafireFileSearchResponse
_, err := d.postForm(ctx, "/file/get_info.php", query, &resp)
if err != nil {
return nil, err
}
if resp.Response.Result != "Success" {
return nil, fmt.Errorf("MediaFire file search failed: %s", resp.Response.Result)
}
if len(resp.Response.FileInfo) == 0 {
return nil, fmt.Errorf("file not found by hash")
}
file := resp.Response.FileInfo[0]
return d.fileToObj(file), nil
}

View File

@@ -57,18 +57,22 @@ func setBody(body interface{}) base.ReqCallback {
}
func handleFolderId(dir model.Obj) interface{} {
if dir.GetID() == "" {
return nil
if isRootFolder(dir) {
return nil // Root folder doesn't need folderId
}
return dir.GetID()
}
func isRootFolder(dir model.Obj) bool {
return dir.GetID() == ""
}
// API layer methods
func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) {
var files []MFile
var body map[string]string
if dir.GetPath() != "/" {
if !isRootFolder(dir) {
body = map[string]string{"folderId": dir.GetID()}
} else {
body = map[string]string{}
@@ -85,7 +89,7 @@ func (d *Misskey) getFiles(dir model.Obj) ([]model.Obj, error) {
func (d *Misskey) getFolders(dir model.Obj) ([]model.Obj, error) {
var folders []MFolder
var body map[string]string
if dir.GetPath() != "/" {
if !isRootFolder(dir) {
body = map[string]string{"folderId": dir.GetID()}
} else {
body = map[string]string{}
@@ -197,16 +201,24 @@ func (d *Misskey) put(ctx context.Context, dstDir model.Obj, stream model.FileSt
Reader: stream,
UpdateProgress: up,
})
// Build form data, only add folderId if not root folder
formData := map[string]string{
"name": stream.GetName(),
"comment": "",
"isSensitive": "false",
"force": "false",
}
folderId := handleFolderId(dstDir)
if folderId != nil {
formData["folderId"] = folderId.(string)
}
req := base.RestyClient.R().
SetContext(ctx).
SetFileReader("file", stream.GetName(), reader).
SetFormData(map[string]string{
"folderId": handleFolderId(dstDir).(string),
"name": stream.GetName(),
"comment": "",
"isSensitive": "false",
"force": "false",
}).
SetFormData(formData).
SetResult(&file).
SetAuthToken(d.AccessToken)

View File

@@ -22,6 +22,7 @@ type Onedrive struct {
AccessToken string
root *Object
mutex sync.Mutex
ref *Onedrive
}
func (d *Onedrive) Config() driver.Config {
@@ -36,10 +37,22 @@ func (d *Onedrive) Init(ctx context.Context) error {
if d.ChunkSize < 1 {
d.ChunkSize = 5
}
if d.ref != nil {
return nil
}
return d.refreshToken()
}
func (d *Onedrive) InitReference(refStorage driver.Driver) error {
if ref, ok := refStorage.(*Onedrive); ok {
d.ref = ref
return nil
}
return errs.NotSupport
}
func (d *Onedrive) Drop(ctx context.Context) error {
d.ref = nil
return nil
}
@@ -207,4 +220,35 @@ func (d *Onedrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
return err
}
func (d *Onedrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
if d.DisableDiskUsage {
return nil, errs.NotImplement
}
drive, err := d.getDrive(ctx)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: drive.Quota.Total,
FreeSpace: drive.Quota.Remaining,
},
}, nil
}
func (d *Onedrive) GetDirectUploadTools() []string {
if !d.EnableDirectUpload {
return nil
}
return []string{"HttpDirect"}
}
// GetDirectUploadInfo returns the direct upload info for OneDrive
func (d *Onedrive) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
if !d.EnableDirectUpload {
return nil, errs.NotImplement
}
return d.getDirectUploadInfo(ctx, path.Join(dstDir.GetPath(), fileName))
}
var _ driver.Driver = (*Onedrive)(nil)

View File

@@ -7,17 +7,19 @@ import (
type Addition struct {
driver.RootPath
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
IsSharepoint bool `json:"is_sharepoint"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/onedrive/renewapi"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RedirectUri string `json:"redirect_uri" required:"true" default:"https://api.oplist.org/onedrive/callback"`
RefreshToken string `json:"refresh_token" required:"true"`
SiteId string `json:"site_id"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
IsSharepoint bool `json:"is_sharepoint"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/onedrive/renewapi"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RedirectUri string `json:"redirect_uri" required:"true" default:"https://api.oplist.org/onedrive/callback"`
RefreshToken string `json:"refresh_token" required:"true"`
SiteId string `json:"site_id"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
}
var config = driver.Config{

View File

@@ -89,3 +89,15 @@ type FileSystemInfoFacet struct {
CreatedDateTime time.Time `json:"createdDateTime,omitempty"` // The UTC date and time the file was created on a client.
LastModifiedDateTime time.Time `json:"lastModifiedDateTime,omitempty"` // The UTC date and time the file was last modified on a client.
}
type DriveResp struct {
ID string `json:"id"`
DriveType string `json:"driveType"`
Quota struct {
Deleted uint64 `json:"deleted"`
Remaining uint64 `json:"remaining"`
State string `json:"state"`
Total uint64 `json:"total"`
Used uint64 `json:"used"`
} `json:"quota"`
}

View File

@@ -133,7 +133,10 @@ func (d *Onedrive) _refreshToken() error {
return nil
}
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
func (d *Onedrive) Request(url string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
if d.ref != nil {
return d.ref.Request(url, method, callback, resp)
}
req := base.RestyClient.R()
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
if callback != nil {
@@ -149,7 +152,7 @@ func (d *Onedrive) Request(url string, method string, callback base.ReqCallback,
return nil, err
}
if e.Error.Code != "" {
if e.Error.Code == "InvalidAuthenticationToken" {
if e.Error.Code == "InvalidAuthenticationToken" && !utils.IsBool(noRetry...) {
err = d.refreshToken()
if err != nil {
return nil, err
@@ -295,3 +298,48 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
}
return nil
}
func (d *Onedrive) getDrive(ctx context.Context) (*DriveResp, error) {
var api string
host, _ := onedriveHostMap[d.Region]
if d.IsSharepoint {
api = fmt.Sprintf("%s/v1.0/sites/%s/drive", host.Api, d.SiteId)
} else {
api = fmt.Sprintf("%s/v1.0/me/drive", host.Api)
}
var resp DriveResp
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, &resp, true)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *Onedrive) getDirectUploadInfo(ctx context.Context, path string) (*model.HttpDirectUploadInfo, error) {
// Create upload session
url := d.GetMetaUrl(false, path) + "/createUploadSession"
metadata := map[string]any{
"item": map[string]any{
"@microsoft.graph.conflictBehavior": "rename",
},
}
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
req.SetBody(metadata).SetContext(ctx)
}, nil)
if err != nil {
return nil, err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
if uploadUrl == "" {
return nil, fmt.Errorf("failed to get upload URL from response")
}
return &model.HttpDirectUploadInfo{
UploadURL: uploadUrl,
ChunkSize: d.ChunkSize * 1024 * 1024, // Convert MB to bytes
Method: "PUT",
}, nil
}

View File

@@ -206,4 +206,34 @@ func (d *OnedriveAPP) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
return err
}
func (d *OnedriveAPP) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
if d.DisableDiskUsage {
return nil, errs.NotImplement
}
drive, err := d.getDrive(ctx)
if err != nil {
return nil, err
}
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: drive.Quota.Total,
FreeSpace: drive.Quota.Remaining,
},
}, nil
}
func (d *OnedriveAPP) GetDirectUploadTools() []string {
if !d.EnableDirectUpload {
return nil
}
return []string{"HttpDirect"}
}
func (d *OnedriveAPP) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
if !d.EnableDirectUpload {
return nil, errs.NotImplement
}
return d.getDirectUploadInfo(ctx, path.Join(dstDir.GetPath(), fileName))
}
var _ driver.Driver = (*OnedriveAPP)(nil)

View File

@@ -7,13 +7,15 @@ import (
type Addition struct {
driver.RootPath
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
ClientID string `json:"client_id" required:"true"`
ClientSecret string `json:"client_secret" required:"true"`
TenantID string `json:"tenant_id"`
Email string `json:"email"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
ClientID string `json:"client_id" required:"true"`
ClientSecret string `json:"client_secret" required:"true"`
TenantID string `json:"tenant_id"`
Email string `json:"email"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
}
var config = driver.Config{

View File

@@ -72,3 +72,15 @@ type Files struct {
Value []File `json:"value"`
NextLink string `json:"@odata.nextLink"`
}
type DriveResp struct {
ID string `json:"id"`
DriveType string `json:"driveType"`
Quota struct {
Deleted uint64 `json:"deleted"`
Remaining uint64 `json:"remaining"`
State string `json:"state"`
Total uint64 `json:"total"`
Used uint64 `json:"used"`
} `json:"quota"`
}

View File

@@ -88,7 +88,7 @@ func (d *OnedriveAPP) _accessToken() error {
return nil
}
func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
req := base.RestyClient.R()
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
if callback != nil {
@@ -104,7 +104,7 @@ func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallba
return nil, err
}
if e.Error.Code != "" {
if e.Error.Code == "InvalidAuthenticationToken" {
if e.Error.Code == "InvalidAuthenticationToken" && !utils.IsBool(noRetry...) {
err = d.accessToken()
if err != nil {
return nil, err
@@ -209,3 +209,43 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
}
return nil
}
func (d *OnedriveAPP) getDrive(ctx context.Context) (*DriveResp, error) {
host, _ := onedriveHostMap[d.Region]
api := fmt.Sprintf("%s/v1.0/users/%s/drive", host.Api, d.Email)
var resp DriveResp
_, err := d.Request(api, http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, &resp, true)
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *OnedriveAPP) getDirectUploadInfo(ctx context.Context, path string) (*model.HttpDirectUploadInfo, error) {
// Create upload session
url := d.GetMetaUrl(false, path) + "/createUploadSession"
metadata := map[string]any{
"item": map[string]any{
"@microsoft.graph.conflictBehavior": "rename",
},
}
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
req.SetBody(metadata).SetContext(ctx)
}, nil)
if err != nil {
return nil, err
}
uploadUrl := jsoniter.Get(res, "uploadUrl").ToString()
if uploadUrl == "" {
return nil, fmt.Errorf("failed to get upload URL from response")
}
return &model.HttpDirectUploadInfo{
UploadURL: uploadUrl,
ChunkSize: d.ChunkSize * 1024 * 1024, // Convert MB to bytes
Method: "PUT",
}, nil
}

Some files were not shown because too many files have changed in this diff Show More