mirror of
https://github.com/AlistGo/alist.git
synced 2025-11-25 03:15:10 +08:00
* feat(s3): Add support for S3 object storage classes Introduces a new 'storage_class' configuration option for S3 providers. Users can now specify the desired storage class (e.g., Standard, GLACIER, DEEP_ARCHIVE) for objects uploaded to S3-compatible services like AWS S3 and Tencent COS. The input storage class string is normalized to match AWS SDK constants, supporting various common aliases. If an unknown storage class is provided, it will be used as a raw value with a warning. This enhancement provides greater control over storage costs and data access patterns. * feat(storage): Support for displaying file storage classes Adds storage class information to file metadata and API responses. This change introduces the ability to store file storage classes in file metadata and display them in API responses. This allows users to view a file's storage tier (e.g., S3 Standard, Glacier), enhancing data management capabilities. Implementation details include: - Introducing the StorageClassProvider interface and the ObjWrapStorageClass structure to uniformly handle and communicate object storage class information. - Updated file metadata structures (e.g., ArchiveObj, FileInfo, RespFile) to include a StorageClass field. - Modified relevant API response functions (e.g., GetFileInfo, GetFileList) to populate and return storage classes. - Integrated functionality for retrieving object storage classes from underlying storage systems (e.g., S3) and wrapping them in lists. * feat(driver/s3): Added the "Other" interface and implemented it by the S3 driver. A new `driver.Other` interface has been added and defined in the `other.go` file. The S3 driver has been updated to implement this new interface, extending its functionality. * feat(s3): Add S3 object archive and thaw task management This commit introduces comprehensive support for S3 object archive and thaw operations, managed asynchronously through a new task system. - **S3 Transition Task System**: - Adds a new `S3Transition` task configuration, including workers, max retries, and persistence options. - Initializes `S3TransitionTaskManager` to handle asynchronous S3 archive/thaw requests. - Registers dedicated API routes for monitoring S3 transition tasks. - **Integrate S3 Archive/Thaw with Other API**: - Modifies the `Other` API handler to intercept `archive` and `thaw` methods for S3 storage drivers. - Dispatches these operations as `S3TransitionTask` instances to the task manager for background processing. - Returns a task ID to the client for tracking the status of the dispatched operation. - **Refactor `other` package for improved API consistency**: - Exports previously internal structs such as `archiveRequest`, `thawRequest`, `objectDescriptor`, `archiveResponse`, `thawResponse`, and `restoreStatus` by making their names public. - Makes helper functions like `decodeOtherArgs`, `normalizeStorageClass`, and `normalizeRestoreTier` public. - Introduces new constants for various S3 `Other` API methods.
221 lines
6.0 KiB
Go
221 lines
6.0 KiB
Go
package s3
|
||
|
||
import (
|
||
"bytes"
|
||
"context"
|
||
"fmt"
|
||
"io"
|
||
"net/url"
|
||
stdpath "path"
|
||
"strings"
|
||
"time"
|
||
|
||
"github.com/alist-org/alist/v3/internal/driver"
|
||
"github.com/alist-org/alist/v3/internal/model"
|
||
"github.com/alist-org/alist/v3/internal/stream"
|
||
"github.com/alist-org/alist/v3/pkg/cron"
|
||
"github.com/alist-org/alist/v3/server/common"
|
||
"github.com/aws/aws-sdk-go/aws"
|
||
"github.com/aws/aws-sdk-go/aws/session"
|
||
"github.com/aws/aws-sdk-go/service/s3"
|
||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||
log "github.com/sirupsen/logrus"
|
||
)
|
||
|
||
type S3 struct {
|
||
model.Storage
|
||
Addition
|
||
Session *session.Session
|
||
client *s3.S3
|
||
linkClient *s3.S3
|
||
|
||
config driver.Config
|
||
cron *cron.Cron
|
||
}
|
||
|
||
var storageClassLookup = map[string]string{
|
||
"standard": s3.ObjectStorageClassStandard,
|
||
"reduced_redundancy": s3.ObjectStorageClassReducedRedundancy,
|
||
"glacier": s3.ObjectStorageClassGlacier,
|
||
"standard_ia": s3.ObjectStorageClassStandardIa,
|
||
"onezone_ia": s3.ObjectStorageClassOnezoneIa,
|
||
"intelligent_tiering": s3.ObjectStorageClassIntelligentTiering,
|
||
"deep_archive": s3.ObjectStorageClassDeepArchive,
|
||
"outposts": s3.ObjectStorageClassOutposts,
|
||
"glacier_ir": s3.ObjectStorageClassGlacierIr,
|
||
"snow": s3.ObjectStorageClassSnow,
|
||
"express_onezone": s3.ObjectStorageClassExpressOnezone,
|
||
}
|
||
|
||
func (d *S3) resolveStorageClass() *string {
|
||
value := strings.TrimSpace(d.StorageClass)
|
||
if value == "" {
|
||
return nil
|
||
}
|
||
normalized := strings.ToLower(strings.ReplaceAll(value, "-", "_"))
|
||
if v, ok := storageClassLookup[normalized]; ok {
|
||
return aws.String(v)
|
||
}
|
||
log.Warnf("s3: unknown storage class %q, using raw value", d.StorageClass)
|
||
return aws.String(value)
|
||
}
|
||
|
||
func (d *S3) Config() driver.Config {
|
||
return d.config
|
||
}
|
||
|
||
func (d *S3) GetAddition() driver.Additional {
|
||
return &d.Addition
|
||
}
|
||
|
||
func (d *S3) Init(ctx context.Context) error {
|
||
if d.Region == "" {
|
||
d.Region = "alist"
|
||
}
|
||
if d.config.Name == "Doge" {
|
||
// 多吉云每次临时生成的秘钥有效期为 2h,所以这里设置为 118 分钟重新生成一次
|
||
d.cron = cron.NewCron(time.Minute * 118)
|
||
d.cron.Do(func() {
|
||
err := d.initSession()
|
||
if err != nil {
|
||
log.Errorln("Doge init session error:", err)
|
||
}
|
||
d.client = d.getClient(false)
|
||
d.linkClient = d.getClient(true)
|
||
})
|
||
}
|
||
err := d.initSession()
|
||
if err != nil {
|
||
return err
|
||
}
|
||
d.client = d.getClient(false)
|
||
d.linkClient = d.getClient(true)
|
||
return nil
|
||
}
|
||
|
||
func (d *S3) Drop(ctx context.Context) error {
|
||
if d.cron != nil {
|
||
d.cron.Stop()
|
||
}
|
||
return nil
|
||
}
|
||
|
||
func (d *S3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||
if d.ListObjectVersion == "v2" {
|
||
return d.listV2(dir.GetPath(), args)
|
||
}
|
||
return d.listV1(dir.GetPath(), args)
|
||
}
|
||
|
||
func (d *S3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||
path := getKey(file.GetPath(), false)
|
||
filename := stdpath.Base(path)
|
||
disposition := fmt.Sprintf(`attachment; filename*=UTF-8''%s`, url.PathEscape(filename))
|
||
if d.AddFilenameToDisposition {
|
||
disposition = fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, filename, url.PathEscape(filename))
|
||
}
|
||
input := &s3.GetObjectInput{
|
||
Bucket: &d.Bucket,
|
||
Key: &path,
|
||
//ResponseContentDisposition: &disposition,
|
||
}
|
||
if d.CustomHost == "" {
|
||
input.ResponseContentDisposition = &disposition
|
||
}
|
||
req, _ := d.linkClient.GetObjectRequest(input)
|
||
var link model.Link
|
||
var err error
|
||
if d.CustomHost != "" {
|
||
if d.EnableCustomHostPresign {
|
||
link.URL, err = req.Presign(time.Hour * time.Duration(d.SignURLExpire))
|
||
} else {
|
||
err = req.Build()
|
||
link.URL = req.HTTPRequest.URL.String()
|
||
}
|
||
if d.RemoveBucket {
|
||
link.URL = strings.Replace(link.URL, "/"+d.Bucket, "", 1)
|
||
}
|
||
} else {
|
||
if common.ShouldProxy(d, filename) {
|
||
err = req.Sign()
|
||
link.URL = req.HTTPRequest.URL.String()
|
||
link.Header = req.HTTPRequest.Header
|
||
} else {
|
||
link.URL, err = req.Presign(time.Hour * time.Duration(d.SignURLExpire))
|
||
}
|
||
}
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
return &link, nil
|
||
}
|
||
|
||
func (d *S3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||
return d.Put(ctx, &model.Object{
|
||
Path: stdpath.Join(parentDir.GetPath(), dirName),
|
||
}, &stream.FileStream{
|
||
Obj: &model.Object{
|
||
Name: getPlaceholderName(d.Placeholder),
|
||
Modified: time.Now(),
|
||
},
|
||
Reader: io.NopCloser(bytes.NewReader([]byte{})),
|
||
Mimetype: "application/octet-stream",
|
||
}, func(float64) {})
|
||
}
|
||
|
||
func (d *S3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||
err := d.Copy(ctx, srcObj, dstDir)
|
||
if err != nil {
|
||
return err
|
||
}
|
||
return d.Remove(ctx, srcObj)
|
||
}
|
||
|
||
func (d *S3) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||
err := d.copy(ctx, srcObj.GetPath(), stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName), srcObj.IsDir())
|
||
if err != nil {
|
||
return err
|
||
}
|
||
return d.Remove(ctx, srcObj)
|
||
}
|
||
|
||
func (d *S3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||
return d.copy(ctx, srcObj.GetPath(), stdpath.Join(dstDir.GetPath(), srcObj.GetName()), srcObj.IsDir())
|
||
}
|
||
|
||
func (d *S3) Remove(ctx context.Context, obj model.Obj) error {
|
||
if obj.IsDir() {
|
||
return d.removeDir(ctx, obj.GetPath())
|
||
}
|
||
return d.removeFile(obj.GetPath())
|
||
}
|
||
|
||
func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||
uploader := s3manager.NewUploader(d.Session)
|
||
if s.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||
uploader.PartSize = s.GetSize() / (s3manager.MaxUploadParts - 1)
|
||
}
|
||
key := getKey(stdpath.Join(dstDir.GetPath(), s.GetName()), false)
|
||
contentType := s.GetMimetype()
|
||
log.Debugln("key:", key)
|
||
input := &s3manager.UploadInput{
|
||
Bucket: &d.Bucket,
|
||
Key: &key,
|
||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||
Reader: s,
|
||
UpdateProgress: up,
|
||
}),
|
||
ContentType: &contentType,
|
||
}
|
||
if storageClass := d.resolveStorageClass(); storageClass != nil {
|
||
input.StorageClass = storageClass
|
||
}
|
||
_, err := uploader.UploadWithContext(ctx, input)
|
||
return err
|
||
}
|
||
|
||
var (
|
||
_ driver.Driver = (*S3)(nil)
|
||
_ driver.Other = (*S3)(nil)
|
||
)
|