Files
OpenList/server/common/proxy.go
ILoveScratch febbcd6027 feat(cache): improve cache management (#1339)
* feat(cache): improve cache management

* feat(disk-usage): add cache

* feat(disk-usage): add refresh

* fix(disk-usage): cache with ttl

* feat(cache): implement KeyedCache and TypedCache for improved caching mechanism

* fix(copy): update object retrieval to use Get instead of GetUnwrap

* refactor(cache): simplify DirectoryCache structure and improve object management

* fix(cache): correct cache entry initialization and key deletion logic in TypedCache

* refactor(driver): remove GetObjInfo interface and simplify Link function logic
https://github.com/OpenListTeam/OpenList/pull/888/files#r2430925783

* fix(link): optimize link retrieval and caching logic

* refactor(cache): consolidate cache management and improve directory cache handling

* fix(cache): add cache control based on storage configuration in List function

* .

* refactor: replace fmt.Sprintf with strconv for integer conversions

* refactor(cache): enhance cache entry management with Expirable interface

* fix(cache): improve link reference acquisition logic to handle expiration

* refactor: replace OnlyLinkMFile with NoLinkSF in driver configurations and logic

* refactor(link): enhance link caching logic with dynamic type keys based on IP and User-Agent

* feat(drivers): add LinkCacheType to driver configurations for enhanced caching

* refactor(cache): streamline directory object management in cache operations

* refactor(cache): remove unnecessary 'dirty' field from CacheEntry structure

* refactor(cache): replace 'dirty' field with bitwise flags

* refactor(io): 调高SyncClosers.AcquireReference的优先级

* refactor(link): 优化链接获取逻辑,增加重

* refactor(link): 添加RequireReference字段以增强链接管理

* refactor(link): 移除MFile字段,改用RangeReader

* refactor: 移除不必要的NoLinkSF字段

* refactor(cache): 修改目录缓存的脏标志定义和更新逻辑

* feat(cache): add expiration gc

---------

Co-authored-by: KirCute <951206789@qq.com>
Co-authored-by: KirCute <kircute@foxmail.com>
Co-authored-by: j2rong4cn <j2rong@qq.com>
2025-10-18 21:47:18 +08:00

159 lines
3.8 KiB
Go

package common
import (
"context"
"fmt"
"io"
"net/http"
"strings"
"maps"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/net"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
)
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
// if link.MFile != nil {
// attachHeader(w, file, link)
// http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile)
// return nil
// }
if link.Concurrency > 0 || link.PartSize > 0 {
attachHeader(w, file, link)
size := link.ContentLength
if size <= 0 {
size = file.GetSize()
}
rrf, _ := stream.GetRangeReaderFromLink(size, link)
if link.RangeReader == nil {
r = r.WithContext(context.WithValue(r.Context(), conf.RequestHeaderKey, r.Header))
}
return net.ServeHTTP(w, r, file.GetName(), file.ModTime(), size, &model.RangeReadCloser{
RangeReader: rrf,
})
}
if link.RangeReader != nil {
attachHeader(w, file, link)
size := link.ContentLength
if size <= 0 {
size = file.GetSize()
}
return net.ServeHTTP(w, r, file.GetName(), file.ModTime(), size, &model.RangeReadCloser{
RangeReader: link.RangeReader,
})
}
//transparent proxy
header := net.ProcessHeader(r.Header, link.Header)
res, err := net.RequestHttp(r.Context(), r.Method, header, link.URL)
if err != nil {
return err
}
defer res.Body.Close()
maps.Copy(w.Header(), res.Header)
w.WriteHeader(res.StatusCode)
if r.Method == http.MethodHead {
return nil
}
_, err = utils.CopyWithBuffer(w, &stream.RateLimitReader{
Reader: res.Body,
Limiter: stream.ServerDownloadLimit,
Ctx: r.Context(),
})
return err
}
func attachHeader(w http.ResponseWriter, file model.Obj, link *model.Link) {
fileName := file.GetName()
w.Header().Set("Content-Disposition", utils.GenerateContentDisposition(fileName))
w.Header().Set("Content-Type", utils.GetMimeType(fileName))
size := link.ContentLength
if size <= 0 {
size = file.GetSize()
}
w.Header().Set("Etag", GetEtag(file, size))
contentType := link.Header.Get("Content-Type")
if len(contentType) > 0 {
w.Header().Set("Content-Type", contentType)
} else {
w.Header().Set("Content-Type", utils.GetMimeType(fileName))
}
}
func GetEtag(file model.Obj, size int64) string {
hash := ""
for _, v := range file.GetHash().Export() {
if v > hash {
hash = v
}
}
if len(hash) > 0 {
return fmt.Sprintf(`"%s"`, hash)
}
// 参考nginx
return fmt.Sprintf(`"%x-%x"`, file.ModTime().Unix(), size)
}
func ProxyRange(ctx context.Context, link *model.Link, size int64) *model.Link {
if link.RangeReader == nil && !strings.HasPrefix(link.URL, GetApiUrl(ctx)+"/") {
if link.ContentLength > 0 {
size = link.ContentLength
}
rrf, err := stream.GetRangeReaderFromLink(size, link)
if err == nil {
return &model.Link{
RangeReader: rrf,
ContentLength: size,
}
}
}
return link
}
type InterceptResponseWriter struct {
http.ResponseWriter
io.Writer
}
func (iw *InterceptResponseWriter) Write(p []byte) (int, error) {
return iw.Writer.Write(p)
}
type WrittenResponseWriter struct {
http.ResponseWriter
written bool
}
func (ww *WrittenResponseWriter) Write(p []byte) (int, error) {
n, err := ww.ResponseWriter.Write(p)
if !ww.written && n > 0 {
ww.written = true
}
return n, err
}
func (ww *WrittenResponseWriter) IsWritten() bool {
return ww.written
}
func GenerateDownProxyURL(storage *model.Storage, reqPath string) string {
if storage.DownProxyURL == "" {
return ""
}
query := ""
if !storage.DisableProxySign {
query = "?sign=" + sign.Sign(reqPath)
}
return fmt.Sprintf("%s%s%s",
strings.Split(storage.DownProxyURL, "\n")[0],
utils.EncodePath(reqPath, true),
query,
)
}