mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-11-25 11:29:29 +08:00
feat(drivers): add MediaFire driver support (#1322)
* feat(drivers): add MediaFire driver support (#9319) - Implement complete MediaFire storage driver - Add authentication via session_token and cookie - Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir - Include thumbnail generation for media files - Handle MediaFire's resumable upload API with multi-unit transfers - Add proper error handling and progress reporting Co-authored-by: Da3zKi7 <da3zki7@duck.com> * fix(mediafire): fix code errors in mediafire * fix(mediafire): fix code errors in mediafire * fix(drivers): add session renewal cron for MediaFire driver (#9321) - Implement automatic session token renewal every 6-9 minutes - Add validation for required SessionToken and Cookie fields in Init - Handle session expiration by calling renewToken on validation failure - Prevent storage failures due to MediaFire session timeouts Fixes session closure issues that occur after server restarts or extended periods. Co-authored-by: Da3zKi7 <da3zki7@duck.com> * docs: restore README changes Signed-off-by: ILoveScratch <ilovescratch@foxmail.com> * fix * fix * fix: add stream upload limit * fix * fix: clear action token on drop and refactor header setting * feat(drivers/mediafire): optimize file caching - support direct stream processing - Remove forced caching to *os.File type - Support generic model.File interface for better flexibility - Improve upload efficiency by avoiding unnecessary file conversions - Fix return type to use model.Object instead of model.ObjThumb * feat(drivers/mediafire): improve global rate limiting - Ensure all API methods properly use context for rate limiting - Fix context parameter usage in getDirectDownloadLink, getActionToken, getFileByHash - Maintain consistent rate limiting across all MediaFire API calls * feat(drivers/mediafire): unify return types - remove unnecessary ObjThumb - Change MakeDir, Rename, Copy methods to return model.Object instead of model.ObjThumb - Remove empty Thumbnail fields where not meaningful - Keep ObjThumb only for fileToObj (List operations) which provides actual thumbnail URLs - Improve code consistency and reduce unnecessary wrapper objects * refactor(drivers/mediafire): extract common error handling logic - Add checkAPIResult helper function to reduce code duplication - Replace repetitive MediaFire API error checks with centralized function - Maintain specific error messages for unique cases (token, upload, search) - Improve code maintainability and consistency * enhance(drivers/mediafire): improve quick upload implementation - Add null check for existingFile to prevent potential issues - Improve error handling in quick upload - continue normal upload if search fails - Add detailed comments explaining quick upload logic - Optimize getExistingFileInfo with clearer fallback strategy - Ensure upload reliability even when file search encounters issues * refactor(drivers/mediafire): optimize request method reusability - Extract common HTTP request logic into apiRequest method - Reduce code duplication between getForm and postForm methods - Maintain backward compatibility with existing method signatures - Centralize rate limiting and header management - Support extensible HTTP method handling * docs(drivers/mediafire): add comprehensive English comments - Add function-level comments for all major driver methods - Document Init, List, Link, MakeDir, Move, Rename, Copy, Remove, Put methods - Add comments for key utility functions including session token management - Improve code readability and maintainability for community collaboration - Follow Go documentation conventions with clear, concise descriptions * perf(mediafire): optimize memory allocation and type assertion performance - Pre-allocate slice capacity in getFiles and bitmap conversion to reduce reallocations - Cache file type check in uploadUnits to avoid repeated type assertions - Add uploadSingleUnitOptimized for os.File to eliminate redundant type checks - Optimize string to int conversion with proper error handling - Improve memory efficiency in file upload operations * fix(mediafire): upload without cache * feat(mediafire): add rate limiting to all API methods - Add WaitLimit(ctx) calls to all driver methods: List, Link, MakeDir, Move, Rename, Copy, Remove, Put - Ensure consistent rate limiting across all MediaFire API interactions - Follow project standard pattern used by other drivers * feat(mediafire): improve error handling consistency - Add context parameter to all HTTP API functions for proper context propagation - Update getForm, postForm and apiRequest to accept context parameter - Fix rate limiting to use caller context instead of background context - Ensure consistent error handling patterns across all API calls - Improve cancellation and timeout support * feat(mediafire): refactor resumableUpload to use io.ReadSeeker and improve upload handling * fix(mediafire): release section reader * feat: add disk usage * feat(drivers/mediafire): support concurrent upload (#1387) * feat(drivers): add MediaFire driver with concurrent upload support - Implement complete MediaFire storage driver with session token authentication - Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir - Include thumbnail generation for media files - Handle MediaFire's resumable upload with intelligent and multi-unit transfers - Support concurrent chunk uploads using errgroup.NewOrderedGroupWithContext, using splitted file caching for large files - Optimize memory usage with adaptive buffer sizing (10MB-100MB (default)) - Include rate limiting and retry logic for API requests - Add proper error handling and progress reporting - Handle MediaFire's bitmap-based resumable upload protocol Closes PR #1322 * feat(stream): add DiscardSection method to StreamSectionReader for skipping data * feat(mediafire): refactor resumableUpload logic for improved upload handling and error management * fix(mediafire): stop cron job and clear action token in Drop method * . * fix(mediafire): optimize buffer sizing logic in uploadUnits method * fix(docs): remove duplicate MediaFire * fix(mediafire): revert 'optimization', large files should not be fully chached. --------- Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Co-authored-by: Da3zKi7 <da3zki7@duck.com> Co-authored-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com> Co-authored-by: j2rong4cn <j2rong@qq.com> Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> * fix(mediafire): optimize hash calculation in uploadUnits function * feat(drivers/mediafire): support concurrent upload (#1366) * feat(drivers): add MediaFire driver with concurrent upload support - Implement complete MediaFire storage driver with session token authentication - Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir - Include thumbnail generation for media files - Handle MediaFire's resumable upload with intelligent and multi-unit transfers - Support concurrent chunk uploads using errgroup.NewOrderedGroupWithContext, using splitted file caching for large files - Optimize memory usage with adaptive buffer sizing (10MB-100MB (default)) - Include rate limiting and retry logic for API requests - Add proper error handling and progress reporting - Handle MediaFire's bitmap-based resumable upload protocol Closes PR #1322 * feat(stream): add DiscardSection method to StreamSectionReader for skipping data * feat(mediafire): refactor resumableUpload logic for improved upload handling and error management * fix(mediafire): stop cron job and clear action token in Drop method * . * fix(mediafire): optimize buffer sizing logic in uploadUnits method * fix(docs): remove duplicate MediaFire * fix(mediafire): revert 'optimization', large files should not be fully chached. --------- Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Signed-off-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com> Co-authored-by: j2rong4cn <j2rong@qq.com> Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> --------- Signed-off-by: ILoveScratch <ilovescratch@foxmail.com> Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com> Signed-off-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com> Co-authored-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com> Co-authored-by: Da3zKi7 <da3zki7@duck.com> Co-authored-by: KirCute <951206789@qq.com> Co-authored-by: Suyunmeng <Susus0175@proton.me> Co-authored-by: j2rong4cn <j2rong@qq.com> Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
This commit is contained in:
@@ -65,6 +65,7 @@ Thank you for your support and understanding of the OpenList project.
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([China](https://www.teambition.com), [International](https://us.teambition.com))
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [139yun](https://yun.139.com) (Personal, Family, Group)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com)
|
||||
@@ -93,7 +94,6 @@ Thank you for your support and understanding of the OpenList project.
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [Weiyun](https://www.weiyun.com)
|
||||
|
||||
- [x] Easy to deploy and out-of-the-box
|
||||
- [x] File preview (PDF, markdown, code, plain text, ...)
|
||||
- [x] Image preview in gallery mode
|
||||
|
||||
@@ -65,6 +65,7 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([中国](https://www.teambition.com), [国际](https://us.teambition.com))
|
||||
- [x] [分秒帧](https://www.mediatrack.cn)
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [和彩云](https://yun.139.com)(个人、家庭、群组)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
- [x] [百度网盘](http://pan.baidu.com)
|
||||
|
||||
@@ -93,6 +93,7 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい
|
||||
- [x] [OpenList](https://github.com/OpenListTeam/OpenList)
|
||||
- [x] [Teldrive](https://github.com/tgdrive/teldrive)
|
||||
- [x] [Weiyun](https://www.weiyun.com)
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] 簡単にデプロイでき、すぐに使える
|
||||
- [x] ファイルプレビュー(PDF、markdown、コード、テキストなど)
|
||||
- [x] ギャラリーモードでの画像プレビュー
|
||||
|
||||
@@ -64,6 +64,7 @@ Dank u voor uw ondersteuning en begrip
|
||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- [x] [WebDAV](https://en.wikipedia.org/wiki/WebDAV)
|
||||
- [x] Teambition([China](https://www.teambition.com), [Internationaal](https://us.teambition.com))
|
||||
- [x] [MediaFire](https://www.mediafire.com)
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn)
|
||||
- [x] [139yun](https://yun.139.com) (Persoonlijk, Familie, Groep)
|
||||
- [x] [YandexDisk](https://disk.yandex.com)
|
||||
|
||||
@@ -42,6 +42,7 @@ import (
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/lanzou"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/lenovonas_share"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/local"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediafire"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/mediatrack"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/mega"
|
||||
_ "github.com/OpenListTeam/OpenList/v4/drivers/misskey"
|
||||
|
||||
431
drivers/mediafire/driver.go
Normal file
431
drivers/mediafire/driver.go
Normal file
@@ -0,0 +1,431 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
|
||||
Date: 2025-09-21
|
||||
|
||||
Date: 2025-09-26
|
||||
Final opts by @Suyunjing @j2rong4cn @KirCute @Da3zKi7
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type Mediafire struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
cron *cron.Cron
|
||||
|
||||
actionToken string
|
||||
limiter *rate.Limiter
|
||||
|
||||
appBase string
|
||||
apiBase string
|
||||
hostBase string
|
||||
maxRetries int
|
||||
|
||||
secChUa string
|
||||
secChUaPlatform string
|
||||
userAgent string
|
||||
}
|
||||
|
||||
func (d *Mediafire) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Mediafire) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
// Init initializes the MediaFire driver with session token and cookie validation
|
||||
func (d *Mediafire) Init(ctx context.Context) error {
|
||||
if d.SessionToken == "" {
|
||||
return fmt.Errorf("Init :: [MediaFire] {critical} missing sessionToken")
|
||||
}
|
||||
|
||||
if d.Cookie == "" {
|
||||
return fmt.Errorf("Init :: [MediaFire] {critical} missing Cookie")
|
||||
}
|
||||
// Setup rate limiter if rate limit is configured
|
||||
if d.LimitRate > 0 {
|
||||
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
|
||||
}
|
||||
// Validate and refresh session token if needed
|
||||
if _, err := d.getSessionToken(ctx); err != nil {
|
||||
|
||||
d.renewToken(ctx)
|
||||
|
||||
// Avoids 10 mins token expiry (6- 9)
|
||||
num := rand.Intn(4) + 6
|
||||
|
||||
d.cron = cron.NewCron(time.Minute * time.Duration(num))
|
||||
d.cron.Do(func() {
|
||||
// Crazy, but working way to refresh session token
|
||||
d.renewToken(ctx)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Drop cleans up driver resources
|
||||
func (d *Mediafire) Drop(ctx context.Context) error {
|
||||
// Clear cached resources
|
||||
d.actionToken = ""
|
||||
if d.cron != nil {
|
||||
d.cron.Stop()
|
||||
d.cron = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List retrieves files and folders from the specified directory
|
||||
func (d *Mediafire) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
files, err := d.getFiles(ctx, dir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||
return d.fileToObj(src), nil
|
||||
})
|
||||
}
|
||||
|
||||
// Link generates a direct download link for the specified file
|
||||
func (d *Mediafire) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
downloadUrl, err := d.getDirectDownloadLink(ctx, file.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := base.NoRedirectClient.R().SetDoNotParseResponse(true).SetContext(ctx).Head(downloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = res.RawBody().Close()
|
||||
}()
|
||||
|
||||
if res.StatusCode() == 302 {
|
||||
downloadUrl = res.Header().Get("location")
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: downloadUrl,
|
||||
Header: http.Header{
|
||||
"Origin": []string{d.appBase},
|
||||
"Referer": []string{d.appBase + "/"},
|
||||
"sec-ch-ua": []string{d.secChUa},
|
||||
"sec-ch-ua-platform": []string{d.secChUaPlatform},
|
||||
"User-Agent": []string{d.userAgent},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MakeDir creates a new folder in the specified parent directory
|
||||
func (d *Mediafire) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"parent_key": parentDir.GetID(),
|
||||
"foldername": dirName,
|
||||
}
|
||||
|
||||
var resp MediafireFolderCreateResponse
|
||||
_, err := d.postForm(ctx, "/folder/create.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
created, _ := time.Parse("2006-01-02T15:04:05Z", resp.Response.CreatedUTC)
|
||||
|
||||
return &model.Object{
|
||||
ID: resp.Response.FolderKey,
|
||||
Name: resp.Response.Name,
|
||||
Size: 0,
|
||||
Modified: created,
|
||||
Ctime: created,
|
||||
IsFolder: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Move relocates a file or folder to a different parent directory
|
||||
func (d *Mediafire) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/move.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key_src": srcObj.GetID(),
|
||||
"folder_key_dst": dstDir.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/move.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"folder_key": dstDir.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireMoveResponse
|
||||
_, err := d.postForm(ctx, endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return srcObj, nil
|
||||
}
|
||||
|
||||
// Rename changes the name of a file or folder
|
||||
func (d *Mediafire) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/update.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": srcObj.GetID(),
|
||||
"foldername": newName,
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/update.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"filename": newName,
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireRenameResponse
|
||||
_, err := d.postForm(ctx, endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: srcObj.GetID(),
|
||||
Name: newName,
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
Ctime: srcObj.CreateTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Copy creates a duplicate of a file or folder in the specified destination directory
|
||||
func (d *Mediafire) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if srcObj.IsDir() {
|
||||
|
||||
endpoint = "/folder/copy.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key_src": srcObj.GetID(),
|
||||
"folder_key_dst": dstDir.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/copy.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": srcObj.GetID(),
|
||||
"folder_key": dstDir.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireCopyResponse
|
||||
_, err := d.postForm(ctx, endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var newID string
|
||||
if srcObj.IsDir() {
|
||||
if len(resp.Response.NewFolderKeys) > 0 {
|
||||
newID = resp.Response.NewFolderKeys[0]
|
||||
}
|
||||
} else {
|
||||
if len(resp.Response.NewQuickKeys) > 0 {
|
||||
newID = resp.Response.NewQuickKeys[0]
|
||||
}
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: newID,
|
||||
Name: srcObj.GetName(),
|
||||
Size: srcObj.GetSize(),
|
||||
Modified: srcObj.ModTime(),
|
||||
Ctime: srcObj.CreateTime(),
|
||||
IsFolder: srcObj.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Remove deletes a file or folder permanently
|
||||
func (d *Mediafire) Remove(ctx context.Context, obj model.Obj) error {
|
||||
var data map[string]string
|
||||
var endpoint string
|
||||
|
||||
if obj.IsDir() {
|
||||
|
||||
endpoint = "/folder/delete.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": obj.GetID(),
|
||||
}
|
||||
} else {
|
||||
|
||||
endpoint = "/file/delete.php"
|
||||
data = map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"quick_key": obj.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
var resp MediafireRemoveResponse
|
||||
_, err := d.postForm(ctx, endpoint, data, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return checkAPIResult(resp.Response.Result)
|
||||
}
|
||||
|
||||
// Put uploads a file to the specified directory with support for resumable upload and quick upload
|
||||
func (d *Mediafire) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
fileHash := file.GetHash().GetHash(utils.SHA256)
|
||||
var err error
|
||||
|
||||
// Try to use existing hash first, cache only if necessary
|
||||
if len(fileHash) != utils.SHA256.Width {
|
||||
_, fileHash, err = stream.CacheFullAndHash(file, &up, utils.SHA256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
checkResp, err := d.uploadCheck(ctx, file.GetName(), file.GetSize(), fileHash, dstDir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if checkResp.Response.HashExists == "yes" && checkResp.Response.InAccount == "yes" {
|
||||
up(100.0)
|
||||
existingFile, err := d.getExistingFileInfo(ctx, fileHash, file.GetName(), dstDir.GetID())
|
||||
if err == nil && existingFile != nil {
|
||||
// File exists, return existing file info
|
||||
return &model.Object{
|
||||
ID: existingFile.GetID(),
|
||||
Name: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
}, nil
|
||||
}
|
||||
// If getExistingFileInfo fails, log and continue with normal upload
|
||||
// This ensures upload doesn't fail due to search issues
|
||||
}
|
||||
|
||||
var pollKey string
|
||||
|
||||
if checkResp.Response.ResumableUpload.AllUnitsReady != "yes" {
|
||||
pollKey, err = d.uploadUnits(ctx, file, checkResp, file.GetName(), fileHash, dstDir.GetID(), up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
pollKey = checkResp.Response.ResumableUpload.UploadKey
|
||||
up(100.0)
|
||||
}
|
||||
|
||||
pollResp, err := d.pollUpload(ctx, pollKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.Object{
|
||||
ID: pollResp.Response.Doupload.QuickKey,
|
||||
Name: file.GetName(),
|
||||
Size: file.GetSize(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
}
|
||||
var resp MediafireUserInfoResponse
|
||||
_, err := d.postForm(ctx, "/user/get_info.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
used, err := strconv.ParseUint(resp.Response.UserInfo.UsedStorageSize, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
total, err := strconv.ParseUint(resp.Response.UserInfo.StorageLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.StorageDetails{
|
||||
DiskUsage: model.DiskUsage{
|
||||
TotalSpace: total,
|
||||
FreeSpace: total - used,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Mediafire)(nil)
|
||||
62
drivers/mediafire/meta.go
Normal file
62
drivers/mediafire/meta.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
|
||||
Date: 2025-09-21
|
||||
|
||||
Date: 2025-09-26
|
||||
Final opts by @Suyunjing @j2rong4cn @KirCute @Da3zKi7
|
||||
*/
|
||||
|
||||
import (
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
//driver.RootID
|
||||
|
||||
SessionToken string `json:"session_token" required:"true" type:"string" help:"Required for MediaFire API"`
|
||||
Cookie string `json:"cookie" required:"true" type:"string" help:"Required for navigation"`
|
||||
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"100"`
|
||||
UploadThreads int `json:"upload_threads" type:"number" default:"3" help:"concurrent upload threads"`
|
||||
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "MediaFire",
|
||||
LocalSort: false,
|
||||
OnlyLinkMFile: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: true,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Mediafire{
|
||||
appBase: "https://app.mediafire.com",
|
||||
apiBase: "https://www.mediafire.com/api/1.5",
|
||||
hostBase: "https://www.mediafire.com",
|
||||
maxRetries: 3,
|
||||
secChUa: "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"139\", \"Google Chrome\";v=\"139\"",
|
||||
secChUaPlatform: "Windows",
|
||||
userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36",
|
||||
}
|
||||
})
|
||||
}
|
||||
246
drivers/mediafire/types.go
Normal file
246
drivers/mediafire/types.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
*/
|
||||
|
||||
type MediafireRenewTokenResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
SessionToken string `json:"session_token"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FolderContent struct {
|
||||
ChunkSize string `json:"chunk_size"`
|
||||
ContentType string `json:"content_type"`
|
||||
ChunkNumber string `json:"chunk_number"`
|
||||
FolderKey string `json:"folderkey"`
|
||||
Folders []MediafireFolder `json:"folders,omitempty"`
|
||||
Files []MediafireFile `json:"files,omitempty"`
|
||||
MoreChunks string `json:"more_chunks"`
|
||||
} `json:"folder_content"`
|
||||
Result string `json:"result"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFolder struct {
|
||||
FolderKey string `json:"folderkey"`
|
||||
Name string `json:"name"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
}
|
||||
|
||||
type MediafireFile struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
Filename string `json:"filename"`
|
||||
Size string `json:"size"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
MimeType string `json:"mimetype"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
ID string
|
||||
Name string
|
||||
Size int64
|
||||
CreatedUTC string
|
||||
IsFolder bool
|
||||
}
|
||||
|
||||
type FolderContentResponse struct {
|
||||
Folders []MediafireFolder
|
||||
Files []MediafireFile
|
||||
MoreChunks bool
|
||||
}
|
||||
|
||||
type MediafireLinksResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Links []struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
View string `json:"view"`
|
||||
NormalDownload string `json:"normal_download"`
|
||||
OneTime struct {
|
||||
Download string `json:"download"`
|
||||
View string `json:"view"`
|
||||
} `json:"one_time"`
|
||||
} `json:"links"`
|
||||
OneTimeKeyRequestCount string `json:"one_time_key_request_count"`
|
||||
OneTimeKeyRequestMaxCount string `json:"one_time_key_request_max_count"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireDirectDownloadResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Links []struct {
|
||||
QuickKey string `json:"quickkey"`
|
||||
DirectDownload string `json:"direct_download"`
|
||||
} `json:"links"`
|
||||
DirectDownloadFreeBandwidth string `json:"direct_download_free_bandwidth"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFolderCreateResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FolderKey string `json:"folder_key"`
|
||||
UploadKey string `json:"upload_key"`
|
||||
ParentFolderKey string `json:"parent_folderkey"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
Privacy string `json:"privacy"`
|
||||
FileCount string `json:"file_count"`
|
||||
FolderCount string `json:"folder_count"`
|
||||
Revision string `json:"revision"`
|
||||
DropboxEnabled string `json:"dropbox_enabled"`
|
||||
Flag string `json:"flag"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireMoveResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
NewNames []string `json:"new_names"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireRenameResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireCopyResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
NewQuickKeys []string `json:"new_quickkeys,omitempty"`
|
||||
NewFolderKeys []string `json:"new_folderkeys,omitempty"`
|
||||
SkippedCount string `json:"skipped_count,omitempty"`
|
||||
OtherCount string `json:"other_count,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireRemoveResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Asynchronous string `json:"asynchronous,omitempty"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
NewDeviceRevision int `json:"new_device_revision"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireCheckResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
HashExists string `json:"hash_exists"`
|
||||
InAccount string `json:"in_account"`
|
||||
InFolder string `json:"in_folder"`
|
||||
FileExists string `json:"file_exists"`
|
||||
ResumableUpload struct {
|
||||
AllUnitsReady string `json:"all_units_ready"`
|
||||
NumberOfUnits string `json:"number_of_units"`
|
||||
UnitSize string `json:"unit_size"`
|
||||
Bitmap struct {
|
||||
Count string `json:"count"`
|
||||
Words []string `json:"words"`
|
||||
} `json:"bitmap"`
|
||||
UploadKey string `json:"upload_key"`
|
||||
} `json:"resumable_upload"`
|
||||
AvailableSpace string `json:"available_space"`
|
||||
UsedStorageSize string `json:"used_storage_size"`
|
||||
StorageLimit string `json:"storage_limit"`
|
||||
StorageLimitExceeded string `json:"storage_limit_exceeded"`
|
||||
UploadURL struct {
|
||||
Simple string `json:"simple"`
|
||||
SimpleFallback string `json:"simple_fallback"`
|
||||
Resumable string `json:"resumable"`
|
||||
ResumableFallback string `json:"resumable_fallback"`
|
||||
} `json:"upload_url"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
type MediafireActionTokenResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
ActionToken string `json:"action_token"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafirePollResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
Doupload struct {
|
||||
Result string `json:"result"`
|
||||
Status string `json:"status"`
|
||||
Description string `json:"description"`
|
||||
QuickKey string `json:"quickkey"`
|
||||
Hash string `json:"hash"`
|
||||
Filename string `json:"filename"`
|
||||
Size string `json:"size"`
|
||||
Created string `json:"created"`
|
||||
CreatedUTC string `json:"created_utc"`
|
||||
Revision string `json:"revision"`
|
||||
} `json:"doupload"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireFileSearchResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
FileInfo []File `json:"file_info"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
type MediafireUserInfoResponse struct {
|
||||
Response struct {
|
||||
Action string `json:"action"`
|
||||
UserInfo struct {
|
||||
Email string `json:"string"`
|
||||
DisplayName string `json:"display_name"`
|
||||
UsedStorageSize string `json:"used_storage_size"`
|
||||
StorageLimit string `json:"storage_limit"`
|
||||
} `json:"user_info"`
|
||||
Result string `json:"result"`
|
||||
CurrentAPIVersion string `json:"current_api_version"`
|
||||
} `json:"response"`
|
||||
}
|
||||
729
drivers/mediafire/util.go
Normal file
729
drivers/mediafire/util.go
Normal file
@@ -0,0 +1,729 @@
|
||||
package mediafire
|
||||
|
||||
/*
|
||||
Package mediafire
|
||||
Author: Da3zKi7<da3zki7@duck.com>
|
||||
Date: 2025-09-11
|
||||
|
||||
D@' 3z K!7 - The King Of Cracking
|
||||
|
||||
Modifications by ILoveScratch2<ilovescratch@foxmail.com>
|
||||
Date: 2025-09-21
|
||||
|
||||
Date: 2025-09-26
|
||||
Final opts by @Suyunjing @j2rong4cn @KirCute @Da3zKi7
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OpenListTeam/OpenList/v4/drivers/base"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/driver"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/op"
|
||||
"github.com/OpenListTeam/OpenList/v4/internal/stream"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
|
||||
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
// checkAPIResult validates MediaFire API response result and returns error if not successful
|
||||
func checkAPIResult(result string) error {
|
||||
if result != "Success" {
|
||||
return fmt.Errorf("MediaFire API error: %s", result)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getSessionToken retrieves and validates session token from MediaFire
|
||||
func (d *Mediafire) getSessionToken(ctx context.Context) (string, error) {
|
||||
if d.limiter != nil {
|
||||
if err := d.limiter.Wait(ctx); err != nil {
|
||||
return "", fmt.Errorf("rate limit wait failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
tokenURL := d.hostBase + "/application/get_session_token.php"
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, tokenURL, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req.Header.Set("Accept", "*/*")
|
||||
req.Header.Set("Accept-Encoding", "gzip, deflate, br, zstd")
|
||||
req.Header.Set("Accept-Language", "en-US,en;q=0.9")
|
||||
req.Header.Set("Content-Length", "0")
|
||||
req.Header.Set("Cookie", d.Cookie)
|
||||
req.Header.Set("DNT", "1")
|
||||
req.Header.Set("Origin", d.hostBase)
|
||||
req.Header.Set("Priority", "u=1, i")
|
||||
req.Header.Set("Referer", (d.hostBase + "/"))
|
||||
req.Header.Set("Sec-Ch-Ua", d.secChUa)
|
||||
req.Header.Set("Sec-Ch-Ua-Mobile", "?0")
|
||||
req.Header.Set("Sec-Ch-Ua-Platform", d.secChUaPlatform)
|
||||
req.Header.Set("Sec-Fetch-Dest", "empty")
|
||||
req.Header.Set("Sec-Fetch-Mode", "cors")
|
||||
req.Header.Set("Sec-Fetch-Site", "same-site")
|
||||
req.Header.Set("User-Agent", d.userAgent)
|
||||
// req.Header.Set("Connection", "keep-alive")
|
||||
|
||||
resp, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// fmt.Printf("getSessionToken :: Raw response: %s\n", string(body))
|
||||
// fmt.Printf("getSessionToken :: Parsed response: %+v\n", resp)
|
||||
|
||||
var tokenResp struct {
|
||||
Response struct {
|
||||
SessionToken string `json:"session_token"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
if resp.StatusCode == 200 {
|
||||
if err := json.Unmarshal(body, &tokenResp); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if tokenResp.Response.SessionToken == "" {
|
||||
return "", fmt.Errorf("empty session token received")
|
||||
}
|
||||
|
||||
cookieMap := make(map[string]string)
|
||||
for _, cookie := range resp.Cookies() {
|
||||
cookieMap[cookie.Name] = cookie.Value
|
||||
}
|
||||
|
||||
if len(cookieMap) > 0 {
|
||||
|
||||
var cookies []string
|
||||
for name, value := range cookieMap {
|
||||
cookies = append(cookies, fmt.Sprintf("%s=%s", name, value))
|
||||
}
|
||||
d.Cookie = strings.Join(cookies, "; ")
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
// fmt.Printf("getSessionToken :: Captured cookies: %s\n", d.Cookie)
|
||||
}
|
||||
|
||||
} else {
|
||||
return "", fmt.Errorf("getSessionToken :: failed to get session token, status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
d.SessionToken = tokenResp.Response.SessionToken
|
||||
|
||||
// fmt.Printf("Init :: Obtain Session Token %v", d.SessionToken)
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
return d.SessionToken, nil
|
||||
}
|
||||
|
||||
// renewToken refreshes the current session token when expired
|
||||
func (d *Mediafire) renewToken(ctx context.Context) error {
|
||||
query := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireRenewTokenResponse
|
||||
_, err := d.postForm(ctx, "/user/renew_session_token.php", query, &resp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to renew token: %w", err)
|
||||
}
|
||||
|
||||
// fmt.Printf("getInfo :: Raw response: %s\n", string(body))
|
||||
// fmt.Printf("getInfo :: Parsed response: %+v\n", resp)
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return fmt.Errorf("MediaFire token renewal failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
d.SessionToken = resp.Response.SessionToken
|
||||
|
||||
// fmt.Printf("Init :: Renew Session Token: %s", resp.Response.Result)
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFiles(ctx context.Context, folderKey string) ([]File, error) {
|
||||
// Pre-allocate slice with reasonable capacity to reduce memory allocations
|
||||
files := make([]File, 0, d.ChunkSize*2) // Estimate: ChunkSize for files + folders
|
||||
hasMore := true
|
||||
chunkNumber := 1
|
||||
|
||||
for hasMore {
|
||||
resp, err := d.getFolderContent(ctx, folderKey, chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Process folders and files in single loop to improve cache locality
|
||||
totalItems := len(resp.Folders) + len(resp.Files)
|
||||
if cap(files)-len(files) < totalItems {
|
||||
// Grow slice if needed
|
||||
newFiles := make([]File, len(files), len(files)+totalItems+int(d.ChunkSize))
|
||||
copy(newFiles, files)
|
||||
files = newFiles
|
||||
}
|
||||
|
||||
for _, folder := range resp.Folders {
|
||||
files = append(files, File{
|
||||
ID: folder.FolderKey,
|
||||
Name: folder.Name,
|
||||
Size: 0,
|
||||
CreatedUTC: folder.CreatedUTC,
|
||||
IsFolder: true,
|
||||
})
|
||||
}
|
||||
|
||||
for _, file := range resp.Files {
|
||||
size, _ := strconv.ParseInt(file.Size, 10, 64)
|
||||
files = append(files, File{
|
||||
ID: file.QuickKey,
|
||||
Name: file.Filename,
|
||||
Size: size,
|
||||
CreatedUTC: file.CreatedUTC,
|
||||
IsFolder: false,
|
||||
})
|
||||
}
|
||||
|
||||
hasMore = resp.MoreChunks
|
||||
chunkNumber++
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFolderContent(ctx context.Context, folderKey string, chunkNumber int) (*FolderContentResponse, error) {
|
||||
foldersResp, err := d.getFolderContentByType(ctx, folderKey, "folders", chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filesResp, err := d.getFolderContentByType(ctx, folderKey, "files", chunkNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FolderContentResponse{
|
||||
Folders: foldersResp.Response.FolderContent.Folders,
|
||||
Files: filesResp.Response.FolderContent.Files,
|
||||
MoreChunks: foldersResp.Response.FolderContent.MoreChunks == "yes" || filesResp.Response.FolderContent.MoreChunks == "yes",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFolderContentByType(ctx context.Context, folderKey, contentType string, chunkNumber int) (*MediafireResponse, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"folder_key": folderKey,
|
||||
"content_type": contentType,
|
||||
"chunk": strconv.Itoa(chunkNumber),
|
||||
"chunk_size": strconv.FormatInt(d.ChunkSize, 10),
|
||||
"details": "yes",
|
||||
"order_direction": d.OrderDirection,
|
||||
"order_by": d.OrderBy,
|
||||
"filter": "",
|
||||
}
|
||||
|
||||
var resp MediafireResponse
|
||||
_, err := d.postForm(ctx, "/folder/get_content.php", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// fileToObj converts MediaFire file data to model.ObjThumb with thumbnail support
|
||||
func (d *Mediafire) fileToObj(f File) *model.ObjThumb {
|
||||
created, _ := time.Parse("2006-01-02T15:04:05Z", f.CreatedUTC)
|
||||
|
||||
var thumbnailURL string
|
||||
if !f.IsFolder && f.ID != "" {
|
||||
thumbnailURL = d.hostBase + "/convkey/acaa/" + f.ID + "3g.jpg"
|
||||
}
|
||||
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: f.ID,
|
||||
// Path: "",
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Modified: created,
|
||||
Ctime: created,
|
||||
IsFolder: f.IsFolder,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumbnailURL,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Mediafire) setCommonHeaders(req *resty.Request) {
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": d.Cookie,
|
||||
"User-Agent": d.userAgent,
|
||||
"Origin": d.appBase,
|
||||
"Referer": d.appBase + "/",
|
||||
})
|
||||
}
|
||||
|
||||
// apiRequest performs HTTP request to MediaFire API with rate limiting and common headers
|
||||
func (d *Mediafire) apiRequest(ctx context.Context, method, endpoint string, queryParams, formData map[string]string, resp interface{}) ([]byte, error) {
|
||||
if d.limiter != nil {
|
||||
if err := d.limiter.Wait(ctx); err != nil {
|
||||
return nil, fmt.Errorf("rate limit wait failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
req := base.RestyClient.R()
|
||||
req.SetContext(ctx)
|
||||
d.setCommonHeaders(req)
|
||||
|
||||
// Set query parameters for GET requests
|
||||
if queryParams != nil {
|
||||
req.SetQueryParams(queryParams)
|
||||
}
|
||||
|
||||
// Set form data for POST requests
|
||||
if formData != nil {
|
||||
req.SetFormData(formData)
|
||||
req.SetHeader("Content-Type", "application/x-www-form-urlencoded")
|
||||
}
|
||||
|
||||
// Set response object if provided
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
|
||||
var res *resty.Response
|
||||
var err error
|
||||
|
||||
// Execute request based on method
|
||||
switch method {
|
||||
case "GET":
|
||||
res, err = req.Get(d.apiBase + endpoint)
|
||||
case "POST":
|
||||
res, err = req.Post(d.apiBase + endpoint)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported HTTP method: %s", method)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) getForm(ctx context.Context, endpoint string, query map[string]string, resp interface{}) ([]byte, error) {
|
||||
return d.apiRequest(ctx, "GET", endpoint, query, nil, resp)
|
||||
}
|
||||
|
||||
func (d *Mediafire) postForm(ctx context.Context, endpoint string, data map[string]string, resp interface{}) ([]byte, error) {
|
||||
return d.apiRequest(ctx, "POST", endpoint, nil, data, resp)
|
||||
}
|
||||
|
||||
func (d *Mediafire) getDirectDownloadLink(ctx context.Context, fileID string) (string, error) {
|
||||
data := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"quick_key": fileID,
|
||||
"link_type": "direct_download",
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireDirectDownloadResponse
|
||||
_, err := d.getForm(ctx, "/file/get_links.php", data, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(resp.Response.Links) == 0 {
|
||||
return "", fmt.Errorf("no download links found")
|
||||
}
|
||||
|
||||
return resp.Response.Links[0].DirectDownload, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) uploadCheck(ctx context.Context, filename string, filesize int64, filehash, folderKey string) (*MediafireCheckResponse, error) {
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get action token: %w", err)
|
||||
}
|
||||
|
||||
query := map[string]string{
|
||||
"session_token": actionToken, /* d.SessionToken */
|
||||
"filename": filename,
|
||||
"size": strconv.FormatInt(filesize, 10),
|
||||
"hash": filehash,
|
||||
"folder_key": folderKey,
|
||||
"resumable": "yes",
|
||||
"response_format": "json",
|
||||
}
|
||||
|
||||
var resp MediafireCheckResponse
|
||||
_, err = d.postForm(ctx, "/upload/check.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// fmt.Printf("uploadCheck :: Raw response: %s\n", string(body))
|
||||
// fmt.Printf("uploadCheck :: Parsed response: %+v\n", resp)
|
||||
|
||||
// fmt.Printf("uploadCheck :: ResumableUpload section: %+v\n", resp.Response.ResumableUpload)
|
||||
// fmt.Printf("uploadCheck :: Upload key specifically: '%s'\n", resp.Response.ResumableUpload.UploadKey)
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) uploadUnits(ctx context.Context, file model.FileStreamer, checkResp *MediafireCheckResponse, filename, fileHash, folderKey string, up driver.UpdateProgress) (string, error) {
|
||||
unitSize, _ := strconv.ParseInt(checkResp.Response.ResumableUpload.UnitSize, 10, 64)
|
||||
numUnits, _ := strconv.Atoi(checkResp.Response.ResumableUpload.NumberOfUnits)
|
||||
uploadKey := checkResp.Response.ResumableUpload.UploadKey
|
||||
|
||||
stringWords := checkResp.Response.ResumableUpload.Bitmap.Words
|
||||
intWords := make([]int, 0, len(stringWords))
|
||||
for _, word := range stringWords {
|
||||
if intWord, err := strconv.Atoi(word); err == nil {
|
||||
intWords = append(intWords, intWord)
|
||||
}
|
||||
}
|
||||
|
||||
// Intelligent buffer sizing for large files
|
||||
bufferSize := int(unitSize)
|
||||
fileSize := file.GetSize()
|
||||
|
||||
// Split in chunks
|
||||
if fileSize > d.ChunkSize*1024*1024 {
|
||||
|
||||
// Large, use ChunkSize (default = 100MB)
|
||||
bufferSize = min(int(fileSize), int(d.ChunkSize)*1024*1024)
|
||||
} else if fileSize > 10*1024*1024 {
|
||||
// Medium, use full file size for concurrent access
|
||||
bufferSize = int(fileSize)
|
||||
}
|
||||
|
||||
// Create stream section reader for efficient chunking
|
||||
ss, err := stream.NewStreamSectionReader(file, bufferSize, &up)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Cal minimal parallel upload threads, allows MediaFire resumable upload to rule it over custom value
|
||||
// If file is big, likely will respect d.UploadThreads instead of MediaFire's suggestion i.e. 5 threads
|
||||
thread := min(numUnits, d.UploadThreads)
|
||||
|
||||
// Create ordered group for sequential upload processing with retry logic
|
||||
threadG, uploadCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
var finalUploadKey string
|
||||
var keyMutex sync.Mutex
|
||||
|
||||
fileSize = file.GetSize()
|
||||
for unitID := range numUnits {
|
||||
if utils.IsCanceled(uploadCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
start := int64(unitID) * unitSize
|
||||
size := unitSize
|
||||
if start+size > fileSize {
|
||||
size = fileSize - start
|
||||
}
|
||||
|
||||
var reader *stream.SectionReader
|
||||
var rateLimitedRd io.Reader
|
||||
var unitHash string
|
||||
|
||||
// Use lifecycle pattern for proper resource management
|
||||
threadG.GoWithLifecycle(errgroup.Lifecycle{
|
||||
Before: func(ctx context.Context) error {
|
||||
// Skip already uploaded units
|
||||
if d.isUnitUploaded(intWords, unitID) {
|
||||
return ss.DiscardSection(start, size)
|
||||
}
|
||||
|
||||
var err error
|
||||
reader, err = ss.GetSectionReader(start, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rateLimitedRd = driver.NewLimitedUploadStream(ctx, reader)
|
||||
return nil
|
||||
},
|
||||
Do: func(ctx context.Context) error {
|
||||
if reader == nil {
|
||||
return nil // Skip if reader is not initialized (already uploaded)
|
||||
}
|
||||
|
||||
if unitHash == "" {
|
||||
reader.Seek(0, io.SeekStart)
|
||||
var err error
|
||||
unitHash, err = utils.HashReader(utils.SHA256, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
reader.Seek(0, io.SeekStart)
|
||||
|
||||
// Perform upload
|
||||
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.limiter != nil {
|
||||
if err := d.limiter.Wait(ctx); err != nil {
|
||||
return fmt.Errorf("rate limit wait failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
url := d.apiBase + "/upload/resumable.php"
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, rateLimitedRd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q := req.URL.Query()
|
||||
q.Add("folder_key", folderKey)
|
||||
q.Add("response_format", "json")
|
||||
q.Add("session_token", actionToken)
|
||||
q.Add("key", uploadKey)
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
req.Header.Set("x-filehash", fileHash)
|
||||
req.Header.Set("x-filesize", strconv.FormatInt(fileSize, 10))
|
||||
req.Header.Set("x-unit-id", strconv.Itoa(unitID))
|
||||
req.Header.Set("x-unit-size", strconv.FormatInt(size, 10))
|
||||
req.Header.Set("x-unit-hash", unitHash)
|
||||
req.Header.Set("x-filename", filename)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.ContentLength = size
|
||||
|
||||
/* fmt.Printf("Debug resumable upload request:\n")
|
||||
fmt.Printf(" URL: %s\n", req.URL.String())
|
||||
fmt.Printf(" Headers: %+v\n", req.Header)
|
||||
fmt.Printf(" Unit ID: %d\n", unitID)
|
||||
fmt.Printf(" Unit Size: %d\n", len(unitData))
|
||||
fmt.Printf(" Upload Key: %s\n", uploadKey)
|
||||
fmt.Printf(" Action Token: %s\n", actionToken) */
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response body: %v", err)
|
||||
}
|
||||
|
||||
// fmt.Printf("MediaFire resumable upload response (status %d): %s\n", res.StatusCode, string(body))
|
||||
|
||||
var uploadResp struct {
|
||||
Response struct {
|
||||
Doupload struct {
|
||||
Key string `json:"key"`
|
||||
} `json:"doupload"`
|
||||
Result string `json:"result"`
|
||||
} `json:"response"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &uploadResp); err != nil {
|
||||
return fmt.Errorf("failed to parse response: %v", err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("resumable upload failed with status %d", res.StatusCode)
|
||||
}
|
||||
|
||||
// Thread-safe update of final upload key
|
||||
keyMutex.Lock()
|
||||
finalUploadKey = uploadResp.Response.Doupload.Key
|
||||
keyMutex.Unlock()
|
||||
|
||||
return nil
|
||||
},
|
||||
After: func(err error) {
|
||||
up(float64(threadG.Success()) * 100 / float64(numUnits))
|
||||
if reader != nil {
|
||||
// Cleanup resources
|
||||
ss.FreeSectionReader(reader)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if err := threadG.Wait(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return finalUploadKey, nil
|
||||
}
|
||||
|
||||
/*func (d *Mediafire) uploadSingleUnit(ctx context.Context, file model.FileStreamer, unitID int, unitSize int64, fileHash, filename, uploadKey, folderKey string, fileSize int64) (string, error) {
|
||||
start := int64(unitID) * unitSize
|
||||
size := unitSize
|
||||
|
||||
if start+size > fileSize {
|
||||
size = fileSize - start
|
||||
}
|
||||
|
||||
unitData := make([]byte, size)
|
||||
_, err := file.Read(unitData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return d.resumableUpload(ctx, folderKey, uploadKey, unitData, unitID, fileHash, filename, fileSize)
|
||||
}*/
|
||||
|
||||
func (d *Mediafire) getActionToken(ctx context.Context) (string, error) {
|
||||
if d.actionToken != "" {
|
||||
return d.actionToken, nil
|
||||
}
|
||||
|
||||
data := map[string]string{
|
||||
"type": "upload",
|
||||
"lifespan": "1440",
|
||||
"response_format": "json",
|
||||
"session_token": d.SessionToken,
|
||||
}
|
||||
|
||||
var resp MediafireActionTokenResponse
|
||||
_, err := d.postForm(ctx, "/user/get_action_token.php", data, &resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return "", fmt.Errorf("MediaFire action token failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
return resp.Response.ActionToken, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) pollUpload(ctx context.Context, key string) (*MediafirePollResponse, error) {
|
||||
actionToken, err := d.getActionToken(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get action token: %w", err)
|
||||
}
|
||||
|
||||
// fmt.Printf("Debug Key: %+v\n", key)
|
||||
|
||||
query := map[string]string{
|
||||
"key": key,
|
||||
"response_format": "json",
|
||||
"session_token": actionToken, /* d.SessionToken */
|
||||
}
|
||||
|
||||
var resp MediafirePollResponse
|
||||
_, err = d.postForm(ctx, "/upload/poll_upload.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// fmt.Printf("pollUpload :: Raw response: %s\n", string(body))
|
||||
// fmt.Printf("pollUpload :: Parsed response: %+v\n", resp)
|
||||
|
||||
// fmt.Printf("pollUpload :: Debug Result: %+v\n", resp.Response.Result)
|
||||
|
||||
if err := checkAPIResult(resp.Response.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Mediafire) isUnitUploaded(words []int, unitID int) bool {
|
||||
wordIndex := unitID / 16
|
||||
bitIndex := unitID % 16
|
||||
if wordIndex >= len(words) {
|
||||
return false
|
||||
}
|
||||
return (words[wordIndex]>>bitIndex)&1 == 1
|
||||
}
|
||||
|
||||
func (d *Mediafire) getExistingFileInfo(ctx context.Context, fileHash, filename, folderKey string) (*model.ObjThumb, error) {
|
||||
// First try to find by hash directly (most efficient)
|
||||
if fileInfo, err := d.getFileByHash(ctx, fileHash); err == nil && fileInfo != nil {
|
||||
return fileInfo, nil
|
||||
}
|
||||
|
||||
// If hash search fails, search in the target folder
|
||||
// This is a fallback method in case the file exists but hash search doesn't work
|
||||
files, err := d.getFiles(ctx, folderKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if file.Name == filename && !file.IsFolder {
|
||||
return d.fileToObj(file), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("existing file not found")
|
||||
}
|
||||
|
||||
func (d *Mediafire) getFileByHash(ctx context.Context, hash string) (*model.ObjThumb, error) {
|
||||
query := map[string]string{
|
||||
"session_token": d.SessionToken,
|
||||
"response_format": "json",
|
||||
"hash": hash,
|
||||
}
|
||||
|
||||
var resp MediafireFileSearchResponse
|
||||
_, err := d.postForm(ctx, "/file/get_info.php", query, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Response.Result != "Success" {
|
||||
return nil, fmt.Errorf("MediaFire file search failed: %s", resp.Response.Result)
|
||||
}
|
||||
|
||||
if len(resp.Response.FileInfo) == 0 {
|
||||
return nil, fmt.Errorf("file not found by hash")
|
||||
}
|
||||
|
||||
file := resp.Response.FileInfo[0]
|
||||
return d.fileToObj(file), nil
|
||||
}
|
||||
@@ -200,6 +200,21 @@ func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int, up *mode
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// 线程不安全
|
||||
func (ss *StreamSectionReader) DiscardSection(off int64, length int64) error {
|
||||
if ss.file.GetFile() == nil {
|
||||
if off != ss.off {
|
||||
return fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
||||
}
|
||||
_, err := utils.CopyWithBufferN(io.Discard, ss.file, length)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to skip data: (expect =%d) %w", length, err)
|
||||
}
|
||||
}
|
||||
ss.off += length
|
||||
return nil
|
||||
}
|
||||
|
||||
// 线程不安全
|
||||
func (ss *StreamSectionReader) GetSectionReader(off, length int64) (*SectionReader, error) {
|
||||
var cache io.ReaderAt = ss.file.GetFile()
|
||||
|
||||
Reference in New Issue
Block a user