Compare commits

...

2 Commits

Author SHA1 Message Date
MadDogOwner
0866b9075f fix(link): correct link cache mode bitwise comparison (#1635)
* fix(link): correct link cache mode bitwise comparison

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

* refactor(link): use explicit flag equality for link cache mode bitmask checks

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>

---------

Signed-off-by: MadDogOwner <xiaoran@xrgzs.top>
2025-11-13 13:52:33 +08:00
KirCute
055696f576 feat(s3): support frontend direct upload (#1631)
* feat(s3): support frontend direct upload

* feat(s3): support custom direct upload host

* fix: apply suggestions of Copilot
2025-11-13 13:22:17 +08:00
9 changed files with 142 additions and 65 deletions

View File

@@ -5,7 +5,6 @@ import (
"errors"
stdpath "path"
"strings"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
@@ -17,9 +16,15 @@ import (
log "github.com/sirupsen/logrus"
)
type detailWithIndex struct {
idx int
val *model.StorageDetails
}
func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model.Obj {
var objs []model.Obj
var wg sync.WaitGroup
detailsChan := make(chan detailWithIndex, len(d.pathMap))
workerCount := 0
for _, k := range d.rootOrder {
obj := model.Object{
Name: k,
@@ -47,22 +52,26 @@ func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model
DriverName: remoteDriver.Config().Name,
},
}
wg.Add(1)
go func() {
defer wg.Done()
c, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
details, e := op.GetStorageDetails(c, remoteDriver, refresh)
workerCount++
go func(dri driver.Driver, i int) {
details, e := op.GetStorageDetails(ctx, dri, refresh)
if e != nil {
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", remoteDriver.GetStorage().MountPath, e)
log.Errorf("failed get %s storage details: %+v", dri.GetStorage().MountPath, e)
}
return
}
objs[idx].(*model.ObjStorageDetails).StorageDetails = details
}()
detailsChan <- detailWithIndex{idx: i, val: details}
}(remoteDriver, idx)
}
for workerCount > 0 {
select {
case r := <-detailsChan:
objs[r.idx].(*model.ObjStorageDetails).StorageDetails = r.val
workerCount--
case <-time.After(time.Second):
workerCount = 0
}
}
wg.Wait()
return objs
}

View File

@@ -7,19 +7,19 @@ import (
type Addition struct {
driver.RootPath
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
IsSharepoint bool `json:"is_sharepoint"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/onedrive/renewapi"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RedirectUri string `json:"redirect_uri" required:"true" default:"https://api.oplist.org/onedrive/callback"`
RefreshToken string `json:"refresh_token" required:"true"`
SiteId string `json:"site_id"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
Region string `json:"region" type:"select" required:"true" options:"global,cn,us,de" default:"global"`
IsSharepoint bool `json:"is_sharepoint"`
UseOnlineAPI bool `json:"use_online_api" default:"true"`
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/onedrive/renewapi"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RedirectUri string `json:"redirect_uri" required:"true" default:"https://api.oplist.org/onedrive/callback"`
RefreshToken string `json:"refresh_token" required:"true"`
SiteId string `json:"site_id"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
DisableDiskUsage bool `json:"disable_disk_usage" default:"false"`
EnableDirectUpload bool `json:"enable_direct_upload" default:"false" help:"Enable direct upload from client to OneDrive"`
}
var config = driver.Config{

View File

@@ -217,11 +217,10 @@ func (d *QuarkOrUC) GetDetails(ctx context.Context) (*model.StorageDetails, erro
if err != nil {
return nil, err
}
used := memberInfo.Data.UseCapacity
total := memberInfo.Data.TotalCapacity
return &model.StorageDetails{
DiskUsage: model.DiskUsage{
TotalSpace: memberInfo.Data.TotalCapacity,
FreeSpace: memberInfo.Data.TotalCapacity - memberInfo.Data.UseCapacity,
},
DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total),
}, nil
}

View File

@@ -10,6 +10,7 @@ import (
"time"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/cron"
@@ -24,9 +25,10 @@ import (
type S3 struct {
model.Storage
Addition
Session *session.Session
client *s3.S3
linkClient *s3.S3
Session *session.Session
client *s3.S3
linkClient *s3.S3
directUploadClient *s3.S3
config driver.Config
cron *cron.Cron
@@ -52,16 +54,18 @@ func (d *S3) Init(ctx context.Context) error {
if err != nil {
log.Errorln("Doge init session error:", err)
}
d.client = d.getClient(false)
d.linkClient = d.getClient(true)
d.client = d.getClient(ClientTypeNormal)
d.linkClient = d.getClient(ClientTypeLink)
d.directUploadClient = d.getClient(ClientTypeDirectUpload)
})
}
err := d.initSession()
if err != nil {
return err
}
d.client = d.getClient(false)
d.linkClient = d.getClient(true)
d.client = d.getClient(ClientTypeNormal)
d.linkClient = d.getClient(ClientTypeLink)
d.directUploadClient = d.getClient(ClientTypeDirectUpload)
return nil
}
@@ -210,4 +214,33 @@ func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up
return err
}
func (d *S3) GetDirectUploadTools() []string {
if !d.EnableDirectUpload {
return nil
}
return []string{"HttpDirect"}
}
func (d *S3) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj, fileName string, _ int64) (any, error) {
if !d.EnableDirectUpload {
return nil, errs.NotImplement
}
path := getKey(stdpath.Join(dstDir.GetPath(), fileName), false)
req, _ := d.directUploadClient.PutObjectRequest(&s3.PutObjectInput{
Bucket: &d.Bucket,
Key: &path,
})
if req == nil {
return nil, fmt.Errorf("failed to create PutObject request")
}
link, err := req.Presign(time.Hour * time.Duration(d.SignURLExpire))
if err != nil {
return nil, err
}
return &model.HttpDirectUploadInfo{
UploadURL: link,
Method: "PUT",
}, nil
}
var _ driver.Driver = (*S3)(nil)

View File

@@ -21,6 +21,8 @@ type Addition struct {
ListObjectVersion string `json:"list_object_version" type:"select" options:"v1,v2" default:"v1"`
RemoveBucket bool `json:"remove_bucket" help:"Remove bucket name from path when using custom host."`
AddFilenameToDisposition bool `json:"add_filename_to_disposition" help:"Add filename to Content-Disposition header."`
EnableDirectUpload bool `json:"enable_direct_upload" default:"false"`
DirectUploadHost string `json:"direct_upload_host" required:"false"`
}
func init() {

View File

@@ -41,9 +41,15 @@ func (d *S3) initSession() error {
return err
}
func (d *S3) getClient(link bool) *s3.S3 {
const (
ClientTypeNormal = iota
ClientTypeLink
ClientTypeDirectUpload
)
func (d *S3) getClient(clientType int) *s3.S3 {
client := s3.New(d.Session)
if link && d.CustomHost != "" {
if clientType == ClientTypeLink && d.CustomHost != "" {
client.Handlers.Build.PushBack(func(r *request.Request) {
if r.HTTPRequest.Method != http.MethodGet {
return
@@ -58,6 +64,20 @@ func (d *S3) getClient(link bool) *s3.S3 {
}
})
}
if clientType == ClientTypeDirectUpload && d.DirectUploadHost != "" {
client.Handlers.Build.PushBack(func(r *request.Request) {
if r.HTTPRequest.Method != http.MethodPut {
return
}
split := strings.SplitN(d.DirectUploadHost, "://", 2)
if utils.SliceContains([]string{"http", "https"}, split[0]) {
r.HTTPRequest.URL.Scheme = split[0]
r.HTTPRequest.URL.Host = split[1]
} else {
r.HTTPRequest.URL.Host = d.DirectUploadHost
}
})
}
return client
}

View File

@@ -173,10 +173,10 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
mode = storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(path)
}
typeKey := args.Type
if mode&driver.LinkCacheIP == 1 {
if mode&driver.LinkCacheIP == driver.LinkCacheIP {
typeKey += "/" + args.IP
}
if mode&driver.LinkCacheUA == 1 {
if mode&driver.LinkCacheUA == driver.LinkCacheUA {
typeKey += "/" + args.Header.Get("User-Agent")
}
key := Key(storage, path)

View File

@@ -358,16 +358,21 @@ func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string,
DriverName: d.Config().Name,
},
}
timeoutCtx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
details, err := GetStorageDetails(timeoutCtx, d, refresh)
if err != nil {
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err)
resultChan := make(chan *model.StorageDetails, 1)
go func(dri driver.Driver) {
details, err := GetStorageDetails(ctx, dri, refresh)
if err != nil {
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
log.Errorf("failed get %s storage details: %+v", dri.GetStorage().MountPath, err)
}
}
return ret
resultChan <- details
}(d)
select {
case r := <-resultChan:
ret.StorageDetails = r
case <-time.After(time.Second):
}
ret.StorageDetails = details
return ret
})
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"errors"
"strconv"
"sync"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
@@ -24,9 +23,15 @@ type StorageResp struct {
MountDetails *model.StorageDetails `json:"mount_details,omitempty"`
}
func makeStorageResp(c *gin.Context, storages []model.Storage) []*StorageResp {
type detailWithIndex struct {
idx int
val *model.StorageDetails
}
func makeStorageResp(ctx *gin.Context, storages []model.Storage) []*StorageResp {
ret := make([]*StorageResp, len(storages))
var wg sync.WaitGroup
detailsChan := make(chan detailWithIndex, len(storages))
workerCount := 0
for i, s := range storages {
ret[i] = &StorageResp{
Storage: s,
@@ -43,22 +48,26 @@ func makeStorageResp(c *gin.Context, storages []model.Storage) []*StorageResp {
if !ok {
continue
}
wg.Add(1)
go func() {
defer wg.Done()
ctx, cancel := context.WithTimeout(c, time.Second*3)
defer cancel()
details, err := op.GetStorageDetails(ctx, d)
if err != nil {
if !errors.Is(err, errs.NotImplement) && !errors.Is(err, errs.StorageNotInit) {
log.Errorf("failed get %s details: %+v", s.MountPath, err)
workerCount++
go func(dri driver.Driver, idx int) {
details, e := op.GetStorageDetails(ctx, dri)
if e != nil {
if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) {
log.Errorf("failed get %s details: %+v", dri.GetStorage().MountPath, e)
}
return
}
ret[i].MountDetails = details
}()
detailsChan <- detailWithIndex{idx: idx, val: details}
}(d, i)
}
for workerCount > 0 {
select {
case r := <-detailsChan:
ret[r.idx].MountDetails = r.val
workerCount--
case <-time.After(time.Second * 3):
workerCount = 0
}
}
wg.Wait()
return ret
}