mirror of
https://github.com/OpenListTeam/OpenList.git
synced 2025-11-25 03:15:19 +08:00
* feat(cache): improve cache management * feat(disk-usage): add cache * feat(disk-usage): add refresh * fix(disk-usage): cache with ttl * feat(cache): implement KeyedCache and TypedCache for improved caching mechanism * fix(copy): update object retrieval to use Get instead of GetUnwrap * refactor(cache): simplify DirectoryCache structure and improve object management * fix(cache): correct cache entry initialization and key deletion logic in TypedCache * refactor(driver): remove GetObjInfo interface and simplify Link function logic https://github.com/OpenListTeam/OpenList/pull/888/files#r2430925783 * fix(link): optimize link retrieval and caching logic * refactor(cache): consolidate cache management and improve directory cache handling * fix(cache): add cache control based on storage configuration in List function * . * refactor: replace fmt.Sprintf with strconv for integer conversions * refactor(cache): enhance cache entry management with Expirable interface * fix(cache): improve link reference acquisition logic to handle expiration * refactor: replace OnlyLinkMFile with NoLinkSF in driver configurations and logic * refactor(link): enhance link caching logic with dynamic type keys based on IP and User-Agent * feat(drivers): add LinkCacheType to driver configurations for enhanced caching * refactor(cache): streamline directory object management in cache operations * refactor(cache): remove unnecessary 'dirty' field from CacheEntry structure * refactor(cache): replace 'dirty' field with bitwise flags * refactor(io): 调高SyncClosers.AcquireReference的优先级 * refactor(link): 优化链接获取逻辑,增加重 * refactor(link): 添加RequireReference字段以增强链接管理 * refactor(link): 移除MFile字段,改用RangeReader * refactor: 移除不必要的NoLinkSF字段 * refactor(cache): 修改目录缓存的脏标志定义和更新逻辑 * feat(cache): add expiration gc --------- Co-authored-by: KirCute <951206789@qq.com> Co-authored-by: KirCute <kircute@foxmail.com> Co-authored-by: j2rong4cn <j2rong@qq.com>
312 lines
6.6 KiB
Go
312 lines
6.6 KiB
Go
package utils
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"math"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
)
|
|
|
|
// here is some syntaxic sugar inspired by the Tomas Senart's video,
|
|
// it allows me to inline the Reader interface
|
|
type readerFunc func(p []byte) (n int, err error)
|
|
|
|
func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) }
|
|
|
|
// CopyWithCtx slightly modified function signature:
|
|
// - context has been added in order to propagate cancellation
|
|
// - I do not return the number of bytes written, has it is not useful in my use case
|
|
func CopyWithCtx(ctx context.Context, out io.Writer, in io.Reader, size int64, progress func(percentage float64)) error {
|
|
// Copy will call the Reader and Writer interface multiple time, in order
|
|
// to copy by chunk (avoiding loading the whole file in memory).
|
|
// I insert the ability to cancel before read time as it is the earliest
|
|
// possible in the call process.
|
|
var finish int64 = 0
|
|
s := size / 100
|
|
_, err := CopyWithBuffer(out, readerFunc(func(p []byte) (int, error) {
|
|
// golang non-blocking channel: https://gobyexample.com/non-blocking-channel-operations
|
|
select {
|
|
// if context has been canceled
|
|
case <-ctx.Done():
|
|
// stop process and propagate "context canceled" error
|
|
return 0, ctx.Err()
|
|
default:
|
|
// otherwise just run default io.Reader implementation
|
|
n, err := in.Read(p)
|
|
if s > 0 && (err == nil || err == io.EOF) {
|
|
finish += int64(n)
|
|
progress(float64(finish) / float64(s))
|
|
}
|
|
return n, err
|
|
}
|
|
}))
|
|
return err
|
|
}
|
|
|
|
type limitWriter struct {
|
|
w io.Writer
|
|
limit int64
|
|
}
|
|
|
|
func (l *limitWriter) Write(p []byte) (n int, err error) {
|
|
lp := len(p)
|
|
if l.limit > 0 {
|
|
if int64(lp) > l.limit {
|
|
p = p[:l.limit]
|
|
}
|
|
l.limit -= int64(len(p))
|
|
_, err = l.w.Write(p)
|
|
}
|
|
return lp, err
|
|
}
|
|
|
|
func LimitWriter(w io.Writer, limit int64) io.Writer {
|
|
return &limitWriter{w: w, limit: limit}
|
|
}
|
|
|
|
type ReadCloser struct {
|
|
io.Reader
|
|
io.Closer
|
|
}
|
|
|
|
type CloseFunc func() error
|
|
|
|
func (c CloseFunc) Close() error {
|
|
return c()
|
|
}
|
|
|
|
func NewReadCloser(reader io.Reader, close CloseFunc) io.ReadCloser {
|
|
return ReadCloser{
|
|
Reader: reader,
|
|
Closer: close,
|
|
}
|
|
}
|
|
|
|
func NewLimitReadCloser(reader io.Reader, close CloseFunc, limit int64) io.ReadCloser {
|
|
return NewReadCloser(io.LimitReader(reader, limit), close)
|
|
}
|
|
|
|
type MultiReadable struct {
|
|
originReader io.Reader
|
|
reader io.Reader
|
|
cache *bytes.Buffer
|
|
}
|
|
|
|
func NewMultiReadable(reader io.Reader) *MultiReadable {
|
|
return &MultiReadable{
|
|
originReader: reader,
|
|
reader: reader,
|
|
}
|
|
}
|
|
|
|
func (mr *MultiReadable) Read(p []byte) (int, error) {
|
|
n, err := mr.reader.Read(p)
|
|
if _, ok := mr.reader.(io.Seeker); !ok && n > 0 {
|
|
if mr.cache == nil {
|
|
mr.cache = &bytes.Buffer{}
|
|
}
|
|
mr.cache.Write(p[:n])
|
|
}
|
|
return n, err
|
|
}
|
|
|
|
func (mr *MultiReadable) Reset() error {
|
|
if seeker, ok := mr.reader.(io.Seeker); ok {
|
|
_, err := seeker.Seek(0, io.SeekStart)
|
|
return err
|
|
}
|
|
if mr.cache != nil && mr.cache.Len() > 0 {
|
|
mr.reader = io.MultiReader(mr.cache, mr.reader)
|
|
mr.cache = nil
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (mr *MultiReadable) Close() error {
|
|
if closer, ok := mr.originReader.(io.Closer); ok {
|
|
return closer.Close()
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func Retry(attempts int, sleep time.Duration, f func() error) (err error) {
|
|
for i := 0; i < attempts; i++ {
|
|
//fmt.Println("This is attempt number", i)
|
|
if i > 0 {
|
|
log.Println("retrying after error:", err)
|
|
time.Sleep(sleep)
|
|
sleep *= 2
|
|
}
|
|
err = f()
|
|
if err == nil {
|
|
return nil
|
|
}
|
|
}
|
|
return fmt.Errorf("after %d attempts, last error: %s", attempts, err)
|
|
}
|
|
|
|
type ClosersIF interface {
|
|
io.Closer
|
|
Add(closer io.Closer)
|
|
AddIfCloser(a any)
|
|
}
|
|
type Closers []io.Closer
|
|
|
|
func (c *Closers) Close() error {
|
|
var errs []error
|
|
for _, closer := range *c {
|
|
if closer != nil {
|
|
errs = append(errs, closer.Close())
|
|
}
|
|
}
|
|
clear(*c)
|
|
*c = (*c)[:0]
|
|
return errors.Join(errs...)
|
|
}
|
|
func (c *Closers) Add(closer io.Closer) {
|
|
if closer != nil {
|
|
*c = append(*c, closer)
|
|
}
|
|
}
|
|
func (c *Closers) AddIfCloser(a any) {
|
|
if closer, ok := a.(io.Closer); ok {
|
|
*c = append(*c, closer)
|
|
}
|
|
}
|
|
|
|
var _ ClosersIF = (*Closers)(nil)
|
|
|
|
func NewClosers(c ...io.Closer) Closers {
|
|
return Closers(c)
|
|
}
|
|
|
|
type SyncClosers struct {
|
|
closers []io.Closer
|
|
ref int32
|
|
}
|
|
|
|
// if closed, return false
|
|
func (c *SyncClosers) AcquireReference() bool {
|
|
ref := atomic.AddInt32(&c.ref, 1)
|
|
if ref > 0 {
|
|
// log.Debugf("AcquireReference %p: %d", c, ref)
|
|
return true
|
|
}
|
|
atomic.StoreInt32(&c.ref, closersClosed)
|
|
return false
|
|
}
|
|
|
|
const closersClosed = math.MinInt32
|
|
|
|
func (c *SyncClosers) Close() error {
|
|
for {
|
|
ref := atomic.LoadInt32(&c.ref)
|
|
if ref < 0 {
|
|
return nil
|
|
}
|
|
if ref > 1 {
|
|
if atomic.CompareAndSwapInt32(&c.ref, ref, ref-1) {
|
|
// log.Debugf("ReleaseReference %p: %d", c, ref)
|
|
return nil
|
|
}
|
|
} else if atomic.CompareAndSwapInt32(&c.ref, ref, closersClosed) {
|
|
break
|
|
}
|
|
}
|
|
|
|
// log.Debugf("FinalClose %p", c)
|
|
var errs []error
|
|
for _, closer := range c.closers {
|
|
if closer != nil {
|
|
errs = append(errs, closer.Close())
|
|
}
|
|
}
|
|
clear(c.closers)
|
|
c.closers = nil
|
|
return errors.Join(errs...)
|
|
}
|
|
|
|
func (c *SyncClosers) Add(closer io.Closer) {
|
|
if closer != nil {
|
|
if atomic.LoadInt32(&c.ref) < 0 {
|
|
panic("Not reusable")
|
|
}
|
|
c.closers = append(c.closers, closer)
|
|
}
|
|
}
|
|
|
|
func (c *SyncClosers) AddIfCloser(a any) {
|
|
if closer, ok := a.(io.Closer); ok {
|
|
if atomic.LoadInt32(&c.ref) < 0 {
|
|
panic("Not reusable")
|
|
}
|
|
c.closers = append(c.closers, closer)
|
|
}
|
|
}
|
|
|
|
var _ ClosersIF = (*SyncClosers)(nil)
|
|
|
|
// 实现cache.Expirable接口
|
|
func (c *SyncClosers) Expired() bool {
|
|
return atomic.LoadInt32(&c.ref) < 0
|
|
}
|
|
func (c *SyncClosers) Length() int {
|
|
return len(c.closers)
|
|
}
|
|
|
|
func NewSyncClosers(c ...io.Closer) SyncClosers {
|
|
return SyncClosers{closers: c}
|
|
}
|
|
|
|
type Ordered interface {
|
|
~int | ~int8 | ~int16 | ~int32 | ~int64 |
|
|
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
|
|
~float32 | ~float64 |
|
|
~string
|
|
}
|
|
|
|
func Min[T Ordered](a, b T) T {
|
|
if a < b {
|
|
return a
|
|
}
|
|
return b
|
|
}
|
|
|
|
func Max[T Ordered](a, b T) T {
|
|
if a < b {
|
|
return b
|
|
}
|
|
return a
|
|
}
|
|
|
|
var IoBuffPool = &sync.Pool{
|
|
New: func() interface{} {
|
|
return make([]byte, 32*1024*2) // Two times of size in io package
|
|
},
|
|
}
|
|
|
|
func CopyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
|
|
buff := IoBuffPool.Get().([]byte)
|
|
defer IoBuffPool.Put(buff)
|
|
return io.CopyBuffer(dst, src, buff)
|
|
}
|
|
|
|
func CopyWithBufferN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
|
|
written, err = CopyWithBuffer(dst, io.LimitReader(src, n))
|
|
if written == n {
|
|
return n, nil
|
|
}
|
|
if written < n && err == nil {
|
|
// src stopped early; must have been EOF.
|
|
err = io.EOF
|
|
}
|
|
return
|
|
}
|