2023-08-27 21:14:23 +08:00
|
|
|
package stream
|
|
|
|
|
|
|
|
|
|
import (
|
2025-08-05 21:42:54 +08:00
|
|
|
"bytes"
|
2023-08-27 21:14:23 +08:00
|
|
|
"context"
|
2025-04-12 16:55:31 +08:00
|
|
|
"encoding/hex"
|
2025-07-12 17:57:54 +08:00
|
|
|
"errors"
|
2023-08-27 21:14:23 +08:00
|
|
|
"fmt"
|
2023-10-10 18:08:27 +08:00
|
|
|
"io"
|
|
|
|
|
"net/http"
|
2025-10-01 18:43:20 +08:00
|
|
|
"os"
|
2023-10-10 18:08:27 +08:00
|
|
|
|
2025-07-14 23:55:17 +08:00
|
|
|
"github.com/OpenListTeam/OpenList/v4/internal/conf"
|
2025-10-18 21:47:18 +08:00
|
|
|
"github.com/OpenListTeam/OpenList/v4/internal/errs"
|
2025-07-01 09:54:50 +08:00
|
|
|
"github.com/OpenListTeam/OpenList/v4/internal/model"
|
|
|
|
|
"github.com/OpenListTeam/OpenList/v4/internal/net"
|
|
|
|
|
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
|
2025-08-16 17:19:52 +08:00
|
|
|
"github.com/OpenListTeam/OpenList/v4/pkg/pool"
|
2025-07-01 09:54:50 +08:00
|
|
|
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
|
2025-08-16 17:19:52 +08:00
|
|
|
"github.com/rclone/rclone/lib/mmap"
|
2023-08-27 21:14:23 +08:00
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
|
)
|
|
|
|
|
|
2025-07-12 17:57:54 +08:00
|
|
|
type RangeReaderFunc func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
|
|
|
|
|
|
|
|
|
|
func (f RangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
|
|
|
|
return f(ctx, httpRange)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) {
|
2025-11-04 23:56:09 +08:00
|
|
|
if link.RangeReader != nil {
|
|
|
|
|
if link.Concurrency < 1 && link.PartSize < 1 {
|
|
|
|
|
return link.RangeReader, nil
|
|
|
|
|
}
|
2025-07-12 17:57:54 +08:00
|
|
|
down := net.NewDownloader(func(d *net.Downloader) {
|
|
|
|
|
d.Concurrency = link.Concurrency
|
|
|
|
|
d.PartSize = link.PartSize
|
2025-11-04 23:56:09 +08:00
|
|
|
d.HttpClient = net.GetRangeReaderHttpRequestFunc(link.RangeReader)
|
2025-07-12 17:57:54 +08:00
|
|
|
})
|
2025-11-04 23:56:09 +08:00
|
|
|
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
|
|
|
|
return down.Download(ctx, &net.HttpRequestParams{
|
|
|
|
|
Range: httpRange,
|
|
|
|
|
Size: size,
|
|
|
|
|
})
|
2025-07-12 17:57:54 +08:00
|
|
|
}
|
2025-11-04 23:56:09 +08:00
|
|
|
// RangeReader只能在驱动限速
|
|
|
|
|
return RangeReaderFunc(rangeReader), nil
|
2025-07-12 17:57:54 +08:00
|
|
|
}
|
2023-08-27 21:14:23 +08:00
|
|
|
|
2025-07-12 17:57:54 +08:00
|
|
|
if len(link.URL) == 0 {
|
2025-10-18 21:47:18 +08:00
|
|
|
return nil, errors.New("invalid link: must have at least one of URL or RangeReader")
|
2025-07-12 17:57:54 +08:00
|
|
|
}
|
2025-11-04 23:56:09 +08:00
|
|
|
|
|
|
|
|
if link.Concurrency > 0 || link.PartSize > 0 {
|
|
|
|
|
down := net.NewDownloader(func(d *net.Downloader) {
|
|
|
|
|
d.Concurrency = link.Concurrency
|
|
|
|
|
d.PartSize = link.PartSize
|
|
|
|
|
d.HttpClient = func(ctx context.Context, params *net.HttpRequestParams) (*http.Response, error) {
|
|
|
|
|
if ServerDownloadLimit == nil {
|
|
|
|
|
return net.DefaultHttpRequestFunc(ctx, params)
|
|
|
|
|
}
|
|
|
|
|
resp, err := net.DefaultHttpRequestFunc(ctx, params)
|
|
|
|
|
if err == nil && resp.Body != nil {
|
|
|
|
|
resp.Body = &RateLimitReader{
|
|
|
|
|
Ctx: ctx,
|
|
|
|
|
Reader: resp.Body,
|
|
|
|
|
Limiter: ServerDownloadLimit,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return resp, err
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
|
|
|
|
requestHeader, _ := ctx.Value(conf.RequestHeaderKey).(http.Header)
|
|
|
|
|
header := net.ProcessHeader(requestHeader, link.Header)
|
|
|
|
|
return down.Download(ctx, &net.HttpRequestParams{
|
|
|
|
|
Range: httpRange,
|
|
|
|
|
Size: size,
|
|
|
|
|
URL: link.URL,
|
|
|
|
|
HeaderRef: header,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
return RangeReaderFunc(rangeReader), nil
|
|
|
|
|
}
|
|
|
|
|
|
2025-07-12 17:57:54 +08:00
|
|
|
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
|
|
|
|
if httpRange.Length < 0 || httpRange.Start+httpRange.Length > size {
|
|
|
|
|
httpRange.Length = size - httpRange.Start
|
2023-08-27 21:14:23 +08:00
|
|
|
}
|
2025-07-14 23:55:17 +08:00
|
|
|
requestHeader, _ := ctx.Value(conf.RequestHeaderKey).(http.Header)
|
2025-07-12 17:57:54 +08:00
|
|
|
header := net.ProcessHeader(requestHeader, link.Header)
|
|
|
|
|
header = http_range.ApplyRangeToHttpHeader(httpRange, header)
|
|
|
|
|
|
|
|
|
|
response, err := net.RequestHttp(ctx, "GET", header, link.URL)
|
2025-01-27 20:08:39 +08:00
|
|
|
if err != nil {
|
2025-10-18 21:47:18 +08:00
|
|
|
if _, ok := errs.UnwrapOrSelf(err).(net.HttpStatusCodeError); ok {
|
2025-07-12 17:57:54 +08:00
|
|
|
return nil, err
|
2023-08-27 21:14:23 +08:00
|
|
|
}
|
2025-07-12 17:57:54 +08:00
|
|
|
return nil, fmt.Errorf("http request failure, err:%w", err)
|
2025-01-27 20:08:39 +08:00
|
|
|
}
|
2025-11-04 23:56:09 +08:00
|
|
|
if ServerDownloadLimit != nil {
|
|
|
|
|
response.Body = &RateLimitReader{
|
|
|
|
|
Ctx: ctx,
|
|
|
|
|
Reader: response.Body,
|
|
|
|
|
Limiter: ServerDownloadLimit,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if httpRange.Start == 0 && httpRange.Length == size ||
|
|
|
|
|
response.StatusCode == http.StatusPartialContent ||
|
2025-07-12 17:57:54 +08:00
|
|
|
checkContentRange(&response.Header, httpRange.Start) {
|
2023-08-27 21:14:23 +08:00
|
|
|
return response.Body, nil
|
2025-01-27 20:08:39 +08:00
|
|
|
} else if response.StatusCode == http.StatusOK {
|
|
|
|
|
log.Warnf("remote http server not supporting range request, expect low perfromace!")
|
2025-07-12 17:57:54 +08:00
|
|
|
readCloser, err := net.GetRangedHttpReader(response.Body, httpRange.Start, httpRange.Length)
|
2025-01-27 20:08:39 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return readCloser, nil
|
2023-08-27 21:14:23 +08:00
|
|
|
}
|
2025-01-27 20:08:39 +08:00
|
|
|
return response.Body, nil
|
2023-08-27 21:14:23 +08:00
|
|
|
}
|
2025-11-04 23:56:09 +08:00
|
|
|
return RangeReaderFunc(rangeReader), nil
|
2023-08-27 21:14:23 +08:00
|
|
|
}
|
|
|
|
|
|
2025-11-04 23:56:09 +08:00
|
|
|
func GetRangeReaderFromMFile(size int64, file model.File) *model.FileRangeReader {
|
2025-08-06 13:32:37 +08:00
|
|
|
return &model.FileRangeReader{
|
|
|
|
|
RangeReaderIF: RangeReaderFunc(func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
|
|
|
|
length := httpRange.Length
|
|
|
|
|
if length < 0 || httpRange.Start+length > size {
|
|
|
|
|
length = size - httpRange.Start
|
|
|
|
|
}
|
|
|
|
|
return &model.FileCloser{File: io.NewSectionReader(file, httpRange.Start, length)}, nil
|
|
|
|
|
}),
|
2025-07-12 17:57:54 +08:00
|
|
|
}
|
2023-08-27 21:14:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// 139 cloud does not properly return 206 http status code, add a hack here
|
2023-09-06 14:12:01 +08:00
|
|
|
func checkContentRange(header *http.Header, offset int64) bool {
|
|
|
|
|
start, _, err := http_range.ParseContentRange(header.Get("Content-Range"))
|
|
|
|
|
if err != nil {
|
|
|
|
|
log.Warnf("exception trying to parse Content-Range, will ignore,err=%s", err)
|
2023-08-27 21:14:23 +08:00
|
|
|
}
|
2023-09-06 14:12:01 +08:00
|
|
|
if start == offset {
|
2023-08-27 21:14:23 +08:00
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
}
|
2025-02-16 12:22:11 +08:00
|
|
|
|
|
|
|
|
type ReaderWithCtx struct {
|
|
|
|
|
io.Reader
|
|
|
|
|
Ctx context.Context
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (r *ReaderWithCtx) Read(p []byte) (n int, err error) {
|
|
|
|
|
if utils.IsCanceled(r.Ctx) {
|
|
|
|
|
return 0, r.Ctx.Err()
|
|
|
|
|
}
|
|
|
|
|
return r.Reader.Read(p)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (r *ReaderWithCtx) Close() error {
|
|
|
|
|
if c, ok := r.Reader.(io.Closer); ok {
|
|
|
|
|
return c.Close()
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2025-04-12 16:55:31 +08:00
|
|
|
|
2025-08-11 23:41:22 +08:00
|
|
|
func CacheFullAndHash(stream model.FileStreamer, up *model.UpdateProgress, hashType *utils.HashType, hashParams ...any) (model.File, string, error) {
|
2025-07-08 21:41:45 +08:00
|
|
|
h := hashType.NewFunc(hashParams...)
|
2025-08-11 23:41:22 +08:00
|
|
|
tmpF, err := stream.CacheFullAndWriter(up, h)
|
2025-04-12 16:55:31 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, "", err
|
|
|
|
|
}
|
2025-08-11 23:41:22 +08:00
|
|
|
return tmpF, hex.EncodeToString(h.Sum(nil)), nil
|
2025-04-12 16:55:31 +08:00
|
|
|
}
|
2025-08-05 21:42:54 +08:00
|
|
|
|
2025-10-01 18:43:20 +08:00
|
|
|
type StreamSectionReaderIF interface {
|
|
|
|
|
// 线程不安全
|
|
|
|
|
GetSectionReader(off, length int64) (io.ReadSeeker, error)
|
|
|
|
|
FreeSectionReader(sr io.ReadSeeker)
|
|
|
|
|
// 线程不安全
|
|
|
|
|
DiscardSection(off int64, length int64) error
|
2025-08-05 21:42:54 +08:00
|
|
|
}
|
|
|
|
|
|
2025-10-01 18:43:20 +08:00
|
|
|
func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int, up *model.UpdateProgress) (StreamSectionReaderIF, error) {
|
2025-08-16 17:19:52 +08:00
|
|
|
if file.GetFile() != nil {
|
2025-10-01 18:43:20 +08:00
|
|
|
return &cachedSectionReader{file.GetFile()}, nil
|
2025-08-16 17:19:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
maxBufferSize = min(maxBufferSize, int(file.GetSize()))
|
|
|
|
|
if maxBufferSize > conf.MaxBufferLimit {
|
2025-10-01 18:43:20 +08:00
|
|
|
f, err := os.CreateTemp(conf.Conf.TempDir, "file-*")
|
2025-08-16 17:19:52 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2025-10-01 18:43:20 +08:00
|
|
|
|
|
|
|
|
if f.Truncate((file.GetSize()+int64(maxBufferSize-1))/int64(maxBufferSize)*int64(maxBufferSize)) != nil {
|
|
|
|
|
// fallback to full cache
|
|
|
|
|
_, _ = f.Close(), os.Remove(f.Name())
|
|
|
|
|
cache, err := file.CacheFullAndWriter(up, nil)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return &cachedSectionReader{cache}, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ss := &fileSectionReader{Reader: file, temp: f}
|
|
|
|
|
ss.bufPool = &pool.Pool[*offsetWriterWithBase]{
|
|
|
|
|
New: func() *offsetWriterWithBase {
|
|
|
|
|
base := ss.fileOff
|
|
|
|
|
ss.fileOff += int64(maxBufferSize)
|
|
|
|
|
return &offsetWriterWithBase{io.NewOffsetWriter(ss.temp, base), base}
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
file.Add(utils.CloseFunc(func() error {
|
|
|
|
|
ss.bufPool.Reset()
|
|
|
|
|
return errors.Join(ss.temp.Close(), os.Remove(ss.temp.Name()))
|
|
|
|
|
}))
|
2025-08-16 17:19:52 +08:00
|
|
|
return ss, nil
|
|
|
|
|
}
|
2025-10-01 18:43:20 +08:00
|
|
|
|
|
|
|
|
ss := &directSectionReader{file: file}
|
2025-08-16 17:19:52 +08:00
|
|
|
if conf.MmapThreshold > 0 && maxBufferSize >= conf.MmapThreshold {
|
|
|
|
|
ss.bufPool = &pool.Pool[[]byte]{
|
|
|
|
|
New: func() []byte {
|
|
|
|
|
buf, err := mmap.Alloc(maxBufferSize)
|
|
|
|
|
if err == nil {
|
2025-10-01 18:43:20 +08:00
|
|
|
ss.file.Add(utils.CloseFunc(func() error {
|
2025-08-16 17:19:52 +08:00
|
|
|
return mmap.Free(buf)
|
|
|
|
|
}))
|
|
|
|
|
} else {
|
|
|
|
|
buf = make([]byte, maxBufferSize)
|
|
|
|
|
}
|
|
|
|
|
return buf
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
ss.bufPool = &pool.Pool[[]byte]{
|
|
|
|
|
New: func() []byte {
|
|
|
|
|
return make([]byte, maxBufferSize)
|
|
|
|
|
},
|
2025-08-05 21:42:54 +08:00
|
|
|
}
|
|
|
|
|
}
|
2025-08-16 17:19:52 +08:00
|
|
|
|
|
|
|
|
file.Add(utils.CloseFunc(func() error {
|
|
|
|
|
ss.bufPool.Reset()
|
|
|
|
|
return nil
|
|
|
|
|
}))
|
2025-08-05 21:42:54 +08:00
|
|
|
return ss, nil
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-01 18:43:20 +08:00
|
|
|
type cachedSectionReader struct {
|
|
|
|
|
cache io.ReaderAt
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (*cachedSectionReader) DiscardSection(off int64, length int64) error {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
func (s *cachedSectionReader) GetSectionReader(off, length int64) (io.ReadSeeker, error) {
|
|
|
|
|
return io.NewSectionReader(s.cache, off, length), nil
|
|
|
|
|
}
|
|
|
|
|
func (*cachedSectionReader) FreeSectionReader(sr io.ReadSeeker) {}
|
|
|
|
|
|
|
|
|
|
type fileSectionReader struct {
|
|
|
|
|
io.Reader
|
|
|
|
|
off int64
|
|
|
|
|
temp *os.File
|
|
|
|
|
fileOff int64
|
|
|
|
|
bufPool *pool.Pool[*offsetWriterWithBase]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type offsetWriterWithBase struct {
|
|
|
|
|
*io.OffsetWriter
|
|
|
|
|
base int64
|
|
|
|
|
}
|
|
|
|
|
|
feat(drivers): add MediaFire driver support (#1322)
* feat(drivers): add MediaFire driver support (#9319)
- Implement complete MediaFire storage driver
- Add authentication via session_token and cookie
- Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir
- Include thumbnail generation for media files
- Handle MediaFire's resumable upload API with multi-unit transfers
- Add proper error handling and progress reporting
Co-authored-by: Da3zKi7 <da3zki7@duck.com>
* fix(mediafire): fix code errors in mediafire
* fix(mediafire): fix code errors in mediafire
* fix(drivers): add session renewal cron for MediaFire driver (#9321)
- Implement automatic session token renewal every 6-9 minutes
- Add validation for required SessionToken and Cookie fields in Init
- Handle session expiration by calling renewToken on validation failure
- Prevent storage failures due to MediaFire session timeouts
Fixes session closure issues that occur after server restarts or extended periods.
Co-authored-by: Da3zKi7 <da3zki7@duck.com>
* docs: restore README changes
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
* fix
* fix
* fix: add stream upload limit
* fix
* fix: clear action token on drop and refactor header setting
* feat(drivers/mediafire): optimize file caching - support direct stream processing
- Remove forced caching to *os.File type
- Support generic model.File interface for better flexibility
- Improve upload efficiency by avoiding unnecessary file conversions
- Fix return type to use model.Object instead of model.ObjThumb
* feat(drivers/mediafire): improve global rate limiting
- Ensure all API methods properly use context for rate limiting
- Fix context parameter usage in getDirectDownloadLink, getActionToken, getFileByHash
- Maintain consistent rate limiting across all MediaFire API calls
* feat(drivers/mediafire): unify return types - remove unnecessary ObjThumb
- Change MakeDir, Rename, Copy methods to return model.Object instead of model.ObjThumb
- Remove empty Thumbnail fields where not meaningful
- Keep ObjThumb only for fileToObj (List operations) which provides actual thumbnail URLs
- Improve code consistency and reduce unnecessary wrapper objects
* refactor(drivers/mediafire): extract common error handling logic
- Add checkAPIResult helper function to reduce code duplication
- Replace repetitive MediaFire API error checks with centralized function
- Maintain specific error messages for unique cases (token, upload, search)
- Improve code maintainability and consistency
* enhance(drivers/mediafire): improve quick upload implementation
- Add null check for existingFile to prevent potential issues
- Improve error handling in quick upload - continue normal upload if search fails
- Add detailed comments explaining quick upload logic
- Optimize getExistingFileInfo with clearer fallback strategy
- Ensure upload reliability even when file search encounters issues
* refactor(drivers/mediafire): optimize request method reusability
- Extract common HTTP request logic into apiRequest method
- Reduce code duplication between getForm and postForm methods
- Maintain backward compatibility with existing method signatures
- Centralize rate limiting and header management
- Support extensible HTTP method handling
* docs(drivers/mediafire): add comprehensive English comments
- Add function-level comments for all major driver methods
- Document Init, List, Link, MakeDir, Move, Rename, Copy, Remove, Put methods
- Add comments for key utility functions including session token management
- Improve code readability and maintainability for community collaboration
- Follow Go documentation conventions with clear, concise descriptions
* perf(mediafire): optimize memory allocation and type assertion performance
- Pre-allocate slice capacity in getFiles and bitmap conversion to reduce reallocations
- Cache file type check in uploadUnits to avoid repeated type assertions
- Add uploadSingleUnitOptimized for os.File to eliminate redundant type checks
- Optimize string to int conversion with proper error handling
- Improve memory efficiency in file upload operations
* fix(mediafire): upload without cache
* feat(mediafire): add rate limiting to all API methods
- Add WaitLimit(ctx) calls to all driver methods: List, Link, MakeDir, Move, Rename, Copy, Remove, Put
- Ensure consistent rate limiting across all MediaFire API interactions
- Follow project standard pattern used by other drivers
* feat(mediafire): improve error handling consistency
- Add context parameter to all HTTP API functions for proper context propagation
- Update getForm, postForm and apiRequest to accept context parameter
- Fix rate limiting to use caller context instead of background context
- Ensure consistent error handling patterns across all API calls
- Improve cancellation and timeout support
* feat(mediafire): refactor resumableUpload to use io.ReadSeeker and improve upload handling
* fix(mediafire): release section reader
* feat: add disk usage
* feat(drivers/mediafire): support concurrent upload (#1387)
* feat(drivers): add MediaFire driver with concurrent upload support
- Implement complete MediaFire storage driver with session token authentication
- Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir
- Include thumbnail generation for media files
- Handle MediaFire's resumable upload with intelligent and multi-unit transfers
- Support concurrent chunk uploads using errgroup.NewOrderedGroupWithContext, using splitted file caching for large files
- Optimize memory usage with adaptive buffer sizing (10MB-100MB (default))
- Include rate limiting and retry logic for API requests
- Add proper error handling and progress reporting
- Handle MediaFire's bitmap-based resumable upload protocol
Closes PR #1322
* feat(stream): add DiscardSection method to StreamSectionReader for skipping data
* feat(mediafire): refactor resumableUpload logic for improved upload handling and error management
* fix(mediafire): stop cron job and clear action token in Drop method
* .
* fix(mediafire): optimize buffer sizing logic in uploadUnits method
* fix(docs): remove duplicate MediaFire
* fix(mediafire): revert 'optimization', large files should not be fully chached.
---------
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Da3zKi7 <da3zki7@duck.com>
Co-authored-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
* fix(mediafire): optimize hash calculation in uploadUnits function
* feat(drivers/mediafire): support concurrent upload (#1366)
* feat(drivers): add MediaFire driver with concurrent upload support
- Implement complete MediaFire storage driver with session token authentication
- Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir
- Include thumbnail generation for media files
- Handle MediaFire's resumable upload with intelligent and multi-unit transfers
- Support concurrent chunk uploads using errgroup.NewOrderedGroupWithContext, using splitted file caching for large files
- Optimize memory usage with adaptive buffer sizing (10MB-100MB (default))
- Include rate limiting and retry logic for API requests
- Add proper error handling and progress reporting
- Handle MediaFire's bitmap-based resumable upload protocol
Closes PR #1322
* feat(stream): add DiscardSection method to StreamSectionReader for skipping data
* feat(mediafire): refactor resumableUpload logic for improved upload handling and error management
* fix(mediafire): stop cron job and clear action token in Drop method
* .
* fix(mediafire): optimize buffer sizing logic in uploadUnits method
* fix(docs): remove duplicate MediaFire
* fix(mediafire): revert 'optimization', large files should not be fully chached.
---------
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Signed-off-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
---------
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Signed-off-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: Da3zKi7 <da3zki7@duck.com>
Co-authored-by: KirCute <951206789@qq.com>
Co-authored-by: Suyunmeng <Susus0175@proton.me>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
2025-09-30 21:55:41 +08:00
|
|
|
// 线程不安全
|
2025-10-01 18:43:20 +08:00
|
|
|
func (ss *fileSectionReader) DiscardSection(off int64, length int64) error {
|
|
|
|
|
if off != ss.off {
|
|
|
|
|
return fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
|
|
|
|
}
|
|
|
|
|
_, err := utils.CopyWithBufferN(io.Discard, ss.Reader, length)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("failed to skip data: (expect =%d) %w", length, err)
|
feat(drivers): add MediaFire driver support (#1322)
* feat(drivers): add MediaFire driver support (#9319)
- Implement complete MediaFire storage driver
- Add authentication via session_token and cookie
- Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir
- Include thumbnail generation for media files
- Handle MediaFire's resumable upload API with multi-unit transfers
- Add proper error handling and progress reporting
Co-authored-by: Da3zKi7 <da3zki7@duck.com>
* fix(mediafire): fix code errors in mediafire
* fix(mediafire): fix code errors in mediafire
* fix(drivers): add session renewal cron for MediaFire driver (#9321)
- Implement automatic session token renewal every 6-9 minutes
- Add validation for required SessionToken and Cookie fields in Init
- Handle session expiration by calling renewToken on validation failure
- Prevent storage failures due to MediaFire session timeouts
Fixes session closure issues that occur after server restarts or extended periods.
Co-authored-by: Da3zKi7 <da3zki7@duck.com>
* docs: restore README changes
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
* fix
* fix
* fix: add stream upload limit
* fix
* fix: clear action token on drop and refactor header setting
* feat(drivers/mediafire): optimize file caching - support direct stream processing
- Remove forced caching to *os.File type
- Support generic model.File interface for better flexibility
- Improve upload efficiency by avoiding unnecessary file conversions
- Fix return type to use model.Object instead of model.ObjThumb
* feat(drivers/mediafire): improve global rate limiting
- Ensure all API methods properly use context for rate limiting
- Fix context parameter usage in getDirectDownloadLink, getActionToken, getFileByHash
- Maintain consistent rate limiting across all MediaFire API calls
* feat(drivers/mediafire): unify return types - remove unnecessary ObjThumb
- Change MakeDir, Rename, Copy methods to return model.Object instead of model.ObjThumb
- Remove empty Thumbnail fields where not meaningful
- Keep ObjThumb only for fileToObj (List operations) which provides actual thumbnail URLs
- Improve code consistency and reduce unnecessary wrapper objects
* refactor(drivers/mediafire): extract common error handling logic
- Add checkAPIResult helper function to reduce code duplication
- Replace repetitive MediaFire API error checks with centralized function
- Maintain specific error messages for unique cases (token, upload, search)
- Improve code maintainability and consistency
* enhance(drivers/mediafire): improve quick upload implementation
- Add null check for existingFile to prevent potential issues
- Improve error handling in quick upload - continue normal upload if search fails
- Add detailed comments explaining quick upload logic
- Optimize getExistingFileInfo with clearer fallback strategy
- Ensure upload reliability even when file search encounters issues
* refactor(drivers/mediafire): optimize request method reusability
- Extract common HTTP request logic into apiRequest method
- Reduce code duplication between getForm and postForm methods
- Maintain backward compatibility with existing method signatures
- Centralize rate limiting and header management
- Support extensible HTTP method handling
* docs(drivers/mediafire): add comprehensive English comments
- Add function-level comments for all major driver methods
- Document Init, List, Link, MakeDir, Move, Rename, Copy, Remove, Put methods
- Add comments for key utility functions including session token management
- Improve code readability and maintainability for community collaboration
- Follow Go documentation conventions with clear, concise descriptions
* perf(mediafire): optimize memory allocation and type assertion performance
- Pre-allocate slice capacity in getFiles and bitmap conversion to reduce reallocations
- Cache file type check in uploadUnits to avoid repeated type assertions
- Add uploadSingleUnitOptimized for os.File to eliminate redundant type checks
- Optimize string to int conversion with proper error handling
- Improve memory efficiency in file upload operations
* fix(mediafire): upload without cache
* feat(mediafire): add rate limiting to all API methods
- Add WaitLimit(ctx) calls to all driver methods: List, Link, MakeDir, Move, Rename, Copy, Remove, Put
- Ensure consistent rate limiting across all MediaFire API interactions
- Follow project standard pattern used by other drivers
* feat(mediafire): improve error handling consistency
- Add context parameter to all HTTP API functions for proper context propagation
- Update getForm, postForm and apiRequest to accept context parameter
- Fix rate limiting to use caller context instead of background context
- Ensure consistent error handling patterns across all API calls
- Improve cancellation and timeout support
* feat(mediafire): refactor resumableUpload to use io.ReadSeeker and improve upload handling
* fix(mediafire): release section reader
* feat: add disk usage
* feat(drivers/mediafire): support concurrent upload (#1387)
* feat(drivers): add MediaFire driver with concurrent upload support
- Implement complete MediaFire storage driver with session token authentication
- Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir
- Include thumbnail generation for media files
- Handle MediaFire's resumable upload with intelligent and multi-unit transfers
- Support concurrent chunk uploads using errgroup.NewOrderedGroupWithContext, using splitted file caching for large files
- Optimize memory usage with adaptive buffer sizing (10MB-100MB (default))
- Include rate limiting and retry logic for API requests
- Add proper error handling and progress reporting
- Handle MediaFire's bitmap-based resumable upload protocol
Closes PR #1322
* feat(stream): add DiscardSection method to StreamSectionReader for skipping data
* feat(mediafire): refactor resumableUpload logic for improved upload handling and error management
* fix(mediafire): stop cron job and clear action token in Drop method
* .
* fix(mediafire): optimize buffer sizing logic in uploadUnits method
* fix(docs): remove duplicate MediaFire
* fix(mediafire): revert 'optimization', large files should not be fully chached.
---------
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Co-authored-by: Da3zKi7 <da3zki7@duck.com>
Co-authored-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
* fix(mediafire): optimize hash calculation in uploadUnits function
* feat(drivers/mediafire): support concurrent upload (#1366)
* feat(drivers): add MediaFire driver with concurrent upload support
- Implement complete MediaFire storage driver with session token authentication
- Support all core operations: List, Get, Link, Put, Copy, Move, Remove, Rename, MakeDir
- Include thumbnail generation for media files
- Handle MediaFire's resumable upload with intelligent and multi-unit transfers
- Support concurrent chunk uploads using errgroup.NewOrderedGroupWithContext, using splitted file caching for large files
- Optimize memory usage with adaptive buffer sizing (10MB-100MB (default))
- Include rate limiting and retry logic for API requests
- Add proper error handling and progress reporting
- Handle MediaFire's bitmap-based resumable upload protocol
Closes PR #1322
* feat(stream): add DiscardSection method to StreamSectionReader for skipping data
* feat(mediafire): refactor resumableUpload logic for improved upload handling and error management
* fix(mediafire): stop cron job and clear action token in Drop method
* .
* fix(mediafire): optimize buffer sizing logic in uploadUnits method
* fix(docs): remove duplicate MediaFire
* fix(mediafire): revert 'optimization', large files should not be fully chached.
---------
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Signed-off-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
---------
Signed-off-by: ILoveScratch <ilovescratch@foxmail.com>
Signed-off-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
Signed-off-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: D@' 3z K!7 <99719341+Da3zKi7@users.noreply.github.com>
Co-authored-by: Da3zKi7 <da3zki7@duck.com>
Co-authored-by: KirCute <951206789@qq.com>
Co-authored-by: Suyunmeng <Susus0175@proton.me>
Co-authored-by: j2rong4cn <j2rong@qq.com>
Co-authored-by: j2rong4cn <36783515+j2rong4cn@users.noreply.github.com>
2025-09-30 21:55:41 +08:00
|
|
|
}
|
|
|
|
|
ss.off += length
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-01 18:43:20 +08:00
|
|
|
type fileBufferSectionReader struct {
|
|
|
|
|
io.ReadSeeker
|
|
|
|
|
fileBuf *offsetWriterWithBase
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (ss *fileSectionReader) GetSectionReader(off, length int64) (io.ReadSeeker, error) {
|
|
|
|
|
if off != ss.off {
|
|
|
|
|
return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
|
|
|
|
}
|
|
|
|
|
fileBuf := ss.bufPool.Get()
|
|
|
|
|
_, _ = fileBuf.Seek(0, io.SeekStart)
|
|
|
|
|
n, err := utils.CopyWithBufferN(fileBuf, ss.Reader, length)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
|
2025-08-05 21:42:54 +08:00
|
|
|
}
|
2025-10-01 18:43:20 +08:00
|
|
|
ss.off += length
|
|
|
|
|
return &fileBufferSectionReader{io.NewSectionReader(ss.temp, fileBuf.base, length), fileBuf}, nil
|
2025-08-05 21:42:54 +08:00
|
|
|
}
|
|
|
|
|
|
2025-10-01 18:43:20 +08:00
|
|
|
func (ss *fileSectionReader) FreeSectionReader(rs io.ReadSeeker) {
|
|
|
|
|
if sr, ok := rs.(*fileBufferSectionReader); ok {
|
|
|
|
|
ss.bufPool.Put(sr.fileBuf)
|
|
|
|
|
sr.fileBuf = nil
|
2025-08-05 21:42:54 +08:00
|
|
|
sr.ReadSeeker = nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-01 18:43:20 +08:00
|
|
|
type directSectionReader struct {
|
|
|
|
|
file model.FileStreamer
|
|
|
|
|
off int64
|
|
|
|
|
bufPool *pool.Pool[[]byte]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// 线程不安全
|
|
|
|
|
func (ss *directSectionReader) DiscardSection(off int64, length int64) error {
|
|
|
|
|
if off != ss.off {
|
|
|
|
|
return fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
|
|
|
|
}
|
|
|
|
|
_, err := utils.CopyWithBufferN(io.Discard, ss.file, length)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("failed to skip data: (expect =%d) %w", length, err)
|
|
|
|
|
}
|
|
|
|
|
ss.off += length
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type bufferSectionReader struct {
|
2025-08-05 21:42:54 +08:00
|
|
|
io.ReadSeeker
|
|
|
|
|
buf []byte
|
|
|
|
|
}
|
2025-10-01 18:43:20 +08:00
|
|
|
|
|
|
|
|
// 线程不安全
|
|
|
|
|
func (ss *directSectionReader) GetSectionReader(off, length int64) (io.ReadSeeker, error) {
|
|
|
|
|
if off != ss.off {
|
|
|
|
|
return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.off)
|
|
|
|
|
}
|
|
|
|
|
tempBuf := ss.bufPool.Get()
|
|
|
|
|
buf := tempBuf[:length]
|
|
|
|
|
n, err := io.ReadFull(ss.file, buf)
|
|
|
|
|
if int64(n) != length {
|
|
|
|
|
return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
|
|
|
|
|
}
|
|
|
|
|
ss.off += int64(n)
|
|
|
|
|
return &bufferSectionReader{bytes.NewReader(buf), buf}, nil
|
|
|
|
|
}
|
|
|
|
|
func (ss *directSectionReader) FreeSectionReader(rs io.ReadSeeker) {
|
|
|
|
|
if sr, ok := rs.(*bufferSectionReader); ok {
|
|
|
|
|
ss.bufPool.Put(sr.buf[0:cap(sr.buf)])
|
|
|
|
|
sr.buf = nil
|
|
|
|
|
sr.ReadSeeker = nil
|
|
|
|
|
}
|
|
|
|
|
}
|