Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6919358eab | |||
| 1187f05c77 | |||
| f6f93c86c8 | |||
| 30e804709f | |||
| 56bb1ddc12 | |||
| 9c65cdb156 | |||
| ae013f9a3b | |||
| d94b53c395 | |||
| 847931ed43 |
@@ -48,7 +48,6 @@ changelog:
|
||||
|
||||
release:
|
||||
name_template: "{{ .ProjectName }}-{{ .Version }}"
|
||||
footer: "Copyright (c) 2025 s1d3sw1ped"
|
||||
|
||||
gitea_urls:
|
||||
api: https://git.s1d3sw1ped.com/api/v1
|
||||
|
||||
8
.vscode/launch.json
vendored
8
.vscode/launch.json
vendored
@@ -23,6 +23,8 @@
|
||||
"lru",
|
||||
"--log-level",
|
||||
"debug",
|
||||
// "--upstream",
|
||||
// "http://192.168.2.5:80",
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -40,6 +42,8 @@
|
||||
"hybrid",
|
||||
"--log-level",
|
||||
"debug",
|
||||
// "--upstream",
|
||||
// "http://192.168.2.5:80",
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -52,9 +56,11 @@
|
||||
"--memory",
|
||||
"1G",
|
||||
"--memory-gc",
|
||||
"lfu",
|
||||
"lru",
|
||||
"--log-level",
|
||||
"debug",
|
||||
// "--upstream",
|
||||
// "http://192.168.2.5:80",
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
package steamcache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
@@ -24,6 +25,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"bytes"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -146,6 +149,19 @@ func verifyResponseHash(resp *http.Response, bodyData []byte, expectedHash strin
|
||||
return strings.EqualFold(actualHash, expectedHash)
|
||||
}
|
||||
|
||||
var hopByHopHeaders = map[string]struct{}{
|
||||
"Connection": {},
|
||||
"Keep-Alive": {},
|
||||
"Proxy-Authenticate": {},
|
||||
"Proxy-Authorization": {},
|
||||
"TE": {},
|
||||
"Trailer": {},
|
||||
"Transfer-Encoding": {},
|
||||
"Upgrade": {},
|
||||
"Date": {},
|
||||
"Server": {},
|
||||
}
|
||||
|
||||
type SteamCache struct {
|
||||
address string
|
||||
upstream string
|
||||
@@ -222,21 +238,23 @@ func New(address string, memorySize string, diskSize string, diskPath, upstream,
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 10,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
MaxIdleConns: 200, // Increased from 100
|
||||
MaxIdleConnsPerHost: 50, // Increased from 10
|
||||
IdleConnTimeout: 120 * time.Second, // Increased from 90s
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ResponseHeaderTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
TLSHandshakeTimeout: 15 * time.Second, // Increased from 10s
|
||||
ResponseHeaderTimeout: 30 * time.Second, // Increased from 10s
|
||||
ExpectContinueTimeout: 5 * time.Second, // Increased from 1s
|
||||
DisableCompression: true, // Steam doesn't use compression
|
||||
ForceAttemptHTTP2: true, // Enable HTTP/2 if available
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
Timeout: 60 * time.Second,
|
||||
Timeout: 120 * time.Second, // Increased from 60s
|
||||
}
|
||||
|
||||
sc := &SteamCache{
|
||||
@@ -249,10 +267,12 @@ func New(address string, memorySize string, diskSize string, diskPath, upstream,
|
||||
diskgc: dgc,
|
||||
client: client,
|
||||
server: &http.Server{
|
||||
Addr: address,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
IdleTimeout: 120 * time.Second,
|
||||
Addr: address,
|
||||
ReadTimeout: 30 * time.Second, // Increased
|
||||
WriteTimeout: 60 * time.Second, // Increased
|
||||
IdleTimeout: 120 * time.Second, // Good for keep-alive
|
||||
ReadHeaderTimeout: 10 * time.Second, // New, for header attacks
|
||||
MaxHeaderBytes: 1 << 20, // 1MB, optional
|
||||
},
|
||||
}
|
||||
|
||||
@@ -266,7 +286,8 @@ func New(address string, memorySize string, diskSize string, diskPath, upstream,
|
||||
|
||||
if d != nil {
|
||||
if d.Size() > d.Capacity() {
|
||||
gc.LRUGC(d, uint(d.Size()-d.Capacity()))
|
||||
gcHandler := gc.GetGCAlgorithm(gc.GCAlgorithm(diskGC))
|
||||
gcHandler(d, uint(d.Size()-d.Capacity()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -277,7 +298,7 @@ func (sc *SteamCache) Run() {
|
||||
if sc.upstream != "" {
|
||||
resp, err := sc.client.Get(sc.upstream)
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
|
||||
logger.Logger.Error().Err(err).Int("status_code", resp.StatusCode).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
|
||||
os.Exit(1)
|
||||
}
|
||||
resp.Body.Close()
|
||||
@@ -310,11 +331,6 @@ func (sc *SteamCache) Shutdown() {
|
||||
}
|
||||
|
||||
func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/metrics" {
|
||||
promhttp.Handler().ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Method != http.MethodGet {
|
||||
requestsTotal.WithLabelValues(r.Method, "405").Inc()
|
||||
logger.Logger.Warn().Str("method", r.Method).Msg("Only GET method is supported")
|
||||
@@ -322,6 +338,11 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if r.URL.Path == "/" {
|
||||
w.WriteHeader(http.StatusOK) // this is used by steamcache2's upstream verification at startup
|
||||
return
|
||||
}
|
||||
|
||||
if r.URL.String() == "/lancache-heartbeat" {
|
||||
w.Header().Add("X-LanCache-Processed-By", "SteamCache2")
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
@@ -329,6 +350,11 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if r.URL.Path == "/metrics" {
|
||||
promhttp.Handler().ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(r.URL.String(), "/depot/") {
|
||||
// trim the query parameters from the URL path
|
||||
// this is necessary because the cache key should not include query parameters
|
||||
@@ -347,25 +373,41 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
|
||||
|
||||
reader, err := sc.vfs.Open(cacheKey)
|
||||
cachePath := cacheKey // You may want to add a .http or .cache extension for clarity
|
||||
|
||||
// Try to serve from cache
|
||||
file, err := sc.vfs.Open(cachePath)
|
||||
if err == nil {
|
||||
defer reader.Close()
|
||||
w.Header().Add("X-LanCache-Status", "HIT")
|
||||
|
||||
io.Copy(w, reader)
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("key", cacheKey).
|
||||
Str("host", r.Host).
|
||||
Str("status", "HIT").
|
||||
Dur("duration", time.Since(tstart)).
|
||||
Msg("request")
|
||||
|
||||
requestsTotal.WithLabelValues(r.Method, "200").Inc()
|
||||
cacheStatusTotal.WithLabelValues("HIT").Inc()
|
||||
responseTime.WithLabelValues("HIT").Observe(time.Since(tstart).Seconds())
|
||||
|
||||
return
|
||||
defer file.Close()
|
||||
buf := bufio.NewReader(file)
|
||||
resp, err := http.ReadResponse(buf, nil)
|
||||
if err == nil {
|
||||
// Remove hop-by-hop and server-specific headers
|
||||
for k, vv := range resp.Header {
|
||||
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
// Add our own headers
|
||||
w.Header().Set("X-LanCache-Status", "HIT")
|
||||
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
io.Copy(w, resp.Body)
|
||||
resp.Body.Close()
|
||||
logger.Logger.Info().
|
||||
Str("key", cacheKey).
|
||||
Str("host", r.Host).
|
||||
Str("status", "HIT").
|
||||
Dur("duration", time.Since(tstart)).
|
||||
Msg("request")
|
||||
requestsTotal.WithLabelValues(r.Method, "200").Inc()
|
||||
cacheStatusTotal.WithLabelValues("HIT").Inc()
|
||||
responseTime.WithLabelValues("HIT").Observe(time.Since(tstart).Seconds())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var req *http.Request
|
||||
@@ -438,8 +480,6 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
size := resp.ContentLength
|
||||
|
||||
// Read the entire response body into memory for hash verification
|
||||
bodyData, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
@@ -453,18 +493,6 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
filename := filepath.Base(cacheKey)
|
||||
expectedHash, hasHash := extractHashFromFilename(filename)
|
||||
|
||||
// Debug logging for manifest files
|
||||
if strings.Contains(cacheKey, "manifest") {
|
||||
logger.Logger.Debug().
|
||||
Str("key", cacheKey).
|
||||
Str("filename", filename).
|
||||
Bool("hasHash", hasHash).
|
||||
Str("expectedHash", expectedHash).
|
||||
Int64("content_length_header", resp.ContentLength).
|
||||
Int("actual_content_length", len(bodyData)).
|
||||
Msg("Manifest file hash verification debug")
|
||||
}
|
||||
|
||||
// Hash verification using Steam's X-Content-Sha header and content length verification
|
||||
hashVerified := true
|
||||
if hasHash {
|
||||
@@ -502,15 +530,28 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Write to response (always serve the file)
|
||||
w.Header().Add("X-LanCache-Status", "MISS")
|
||||
// Remove hop-by-hop and server-specific headers
|
||||
for k, vv := range resp.Header {
|
||||
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
// Add our own headers
|
||||
w.Header().Set("X-LanCache-Status", "MISS")
|
||||
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
|
||||
w.Write(bodyData)
|
||||
|
||||
// Only cache the file if hash verification passed (or no hash was present)
|
||||
if hashVerified {
|
||||
writer, _ := sc.vfs.Create(cacheKey, size)
|
||||
writer, _ := sc.vfs.Create(cachePath, int64(0)) // size is not known in advance
|
||||
if writer != nil {
|
||||
defer writer.Close()
|
||||
writer.Write(bodyData)
|
||||
// Write the full HTTP response to cache
|
||||
resp.Body = io.NopCloser(bytes.NewReader(bodyData)) // Reset body for writing
|
||||
resp.Write(writer)
|
||||
}
|
||||
} else {
|
||||
logger.Logger.Warn().
|
||||
@@ -522,7 +563,6 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
Str("key", cacheKey).
|
||||
Str("host", r.Host).
|
||||
Str("status", "MISS").
|
||||
Bool("hash_verified", hasHash).
|
||||
Dur("duration", time.Since(tstart)).
|
||||
Msg("request")
|
||||
|
||||
|
||||
@@ -242,10 +242,12 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
|
||||
// Check again after lock
|
||||
d.mu.Lock()
|
||||
var accessCount int64 = 0
|
||||
if fi, exists := d.info[key]; exists {
|
||||
d.size -= fi.Size()
|
||||
d.LRU.Remove(key)
|
||||
delete(d.info, key)
|
||||
accessCount = fi.AccessCount // preserve access count if overwriting
|
||||
path := filepath.Join(d.root, key)
|
||||
os.Remove(path) // Ignore error, as file might not exist or other issues
|
||||
}
|
||||
@@ -274,6 +276,7 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
|
||||
d.mu.Lock()
|
||||
finfo := vfs.NewFileInfoFromOS(fi, key)
|
||||
finfo.AccessCount = accessCount
|
||||
d.info[key] = finfo
|
||||
d.LRU.Add(key, finfo)
|
||||
d.size += n
|
||||
@@ -381,6 +384,7 @@ func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
fi.ATime = time.Now()
|
||||
fi.AccessCount++ // Increment access count for LFU
|
||||
d.LRU.MoveToFront(key)
|
||||
d.mu.Unlock()
|
||||
|
||||
|
||||
@@ -7,27 +7,30 @@ import (
|
||||
)
|
||||
|
||||
type FileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
MTime time.Time
|
||||
ATime time.Time
|
||||
name string
|
||||
size int64
|
||||
MTime time.Time
|
||||
ATime time.Time
|
||||
AccessCount int64 // Number of times the file has been accessed
|
||||
}
|
||||
|
||||
func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo {
|
||||
return &FileInfo{
|
||||
name: key,
|
||||
size: size,
|
||||
MTime: modTime,
|
||||
ATime: time.Now(),
|
||||
name: key,
|
||||
size: size,
|
||||
MTime: modTime,
|
||||
ATime: time.Now(),
|
||||
AccessCount: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func NewFileInfoFromOS(f os.FileInfo, key string) *FileInfo {
|
||||
return &FileInfo{
|
||||
name: key,
|
||||
size: f.Size(),
|
||||
MTime: f.ModTime(),
|
||||
ATime: time.Now(),
|
||||
name: key,
|
||||
size: f.Size(),
|
||||
MTime: f.ModTime(),
|
||||
ATime: time.Now(),
|
||||
AccessCount: 0,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
114
vfs/gc/gc.go
114
vfs/gc/gc.go
@@ -75,12 +75,17 @@ func LRUGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LRU GC")
|
||||
|
||||
var reclaimed uint // reclaimed space in bytes
|
||||
deleted := false
|
||||
|
||||
for {
|
||||
switch fs := vfss.(type) {
|
||||
case *disk.DiskFS:
|
||||
fi := fs.LRU.Back()
|
||||
if fi == nil {
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace // No files to delete
|
||||
}
|
||||
sz := uint(fi.Size())
|
||||
@@ -89,9 +94,14 @@ func LRUGC(vfss vfs.VFS, size uint) error {
|
||||
continue // If delete fails, try the next file
|
||||
}
|
||||
reclaimed += sz
|
||||
deleted = true
|
||||
case *memory.MemoryFS:
|
||||
fi := fs.LRU.Back()
|
||||
if fi == nil {
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace // No files to delete
|
||||
}
|
||||
sz := uint(fi.Size())
|
||||
@@ -100,13 +110,14 @@ func LRUGC(vfss vfs.VFS, size uint) error {
|
||||
continue // If delete fails, try the next file
|
||||
}
|
||||
reclaimed += sz
|
||||
deleted = true
|
||||
default:
|
||||
panic("unreachable: unsupported VFS type for LRU GC") // panic if the VFS is not disk or memory
|
||||
}
|
||||
|
||||
if reclaimed >= size {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC")
|
||||
return nil // stop if enough space is reclaimed
|
||||
if deleted && (size == 0 || reclaimed >= size) {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC (at least one file deleted)")
|
||||
return nil // stop if enough space is reclaimed or at least one file deleted for size==0
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -115,31 +126,32 @@ func LRUGC(vfss vfs.VFS, size uint) error {
|
||||
func LFUGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LFU GC")
|
||||
|
||||
// Get all files and sort by access count (frequency)
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// Sort by access count (ascending - least frequently used first)
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].AccessCount < files[j].AccessCount
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
deleted := false
|
||||
for _, fi := range files {
|
||||
if reclaimed >= size {
|
||||
break
|
||||
}
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
deleted = true
|
||||
if deleted && (size == 0 || reclaimed >= size) {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LFU GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if reclaimed >= size {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LFU GC")
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LFU GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
@@ -149,31 +161,32 @@ func LFUGC(vfss vfs.VFS, size uint) error {
|
||||
func FIFOGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using FIFO GC")
|
||||
|
||||
// Get all files and sort by creation time (oldest first)
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// Sort by creation time (ascending - oldest first)
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].MTime.Before(files[j].MTime)
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
deleted := false
|
||||
for _, fi := range files {
|
||||
if reclaimed >= size {
|
||||
break
|
||||
}
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
deleted = true
|
||||
if deleted && (size == 0 || reclaimed >= size) {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using FIFO GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if reclaimed >= size {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using FIFO GC")
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using FIFO GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
@@ -183,31 +196,32 @@ func FIFOGC(vfss vfs.VFS, size uint) error {
|
||||
func LargestGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Largest GC")
|
||||
|
||||
// Get all files and sort by size (largest first)
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// Sort by size (descending - largest first)
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].Size > files[j].Size
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
deleted := false
|
||||
for _, fi := range files {
|
||||
if reclaimed >= size {
|
||||
break
|
||||
}
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
deleted = true
|
||||
if deleted && (size == 0 || reclaimed >= size) {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Largest GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if reclaimed >= size {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Largest GC")
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Largest GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
@@ -217,31 +231,32 @@ func LargestGC(vfss vfs.VFS, size uint) error {
|
||||
func SmallestGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Smallest GC")
|
||||
|
||||
// Get all files and sort by size (smallest first)
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// Sort by size (ascending - smallest first)
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].Size < files[j].Size
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
deleted := false
|
||||
for _, fi := range files {
|
||||
if reclaimed >= size {
|
||||
break
|
||||
}
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
deleted = true
|
||||
if deleted && (size == 0 || reclaimed >= size) {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Smallest GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if reclaimed >= size {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Smallest GC")
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Smallest GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
@@ -251,14 +266,11 @@ func SmallestGC(vfss vfs.VFS, size uint) error {
|
||||
func HybridGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Hybrid GC")
|
||||
|
||||
// Get all files and calculate hybrid scores
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// Calculate hybrid scores (lower score = more likely to be evicted)
|
||||
// Score = (time since last access in seconds) * (file size in MB)
|
||||
now := time.Now()
|
||||
for i := range files {
|
||||
timeSinceAccess := now.Sub(files[i].ATime).Seconds()
|
||||
@@ -266,25 +278,27 @@ func HybridGC(vfss vfs.VFS, size uint) error {
|
||||
files[i].HybridScore = timeSinceAccess * sizeMB
|
||||
}
|
||||
|
||||
// Sort by hybrid score (ascending - lowest scores first)
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].HybridScore < files[j].HybridScore
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
deleted := false
|
||||
for _, fi := range files {
|
||||
if reclaimed >= size {
|
||||
break
|
||||
}
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
deleted = true
|
||||
if deleted && (size == 0 || reclaimed >= size) {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Hybrid GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if reclaimed >= size {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Hybrid GC")
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Hybrid GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
@@ -308,25 +322,23 @@ func getAllFiles(vfss vfs.VFS) []fileInfoWithMetadata {
|
||||
case *disk.DiskFS:
|
||||
allFiles := fs.StatAll()
|
||||
for _, fi := range allFiles {
|
||||
// For disk, we can't easily track access count, so we'll use 1 as default
|
||||
files = append(files, fileInfoWithMetadata{
|
||||
Name: fi.Name(),
|
||||
Size: fi.Size(),
|
||||
MTime: fi.ModTime(),
|
||||
ATime: fi.AccessTime(),
|
||||
AccessCount: 1,
|
||||
AccessCount: fi.AccessCount,
|
||||
})
|
||||
}
|
||||
case *memory.MemoryFS:
|
||||
allFiles := fs.StatAll()
|
||||
for _, fi := range allFiles {
|
||||
// For memory, we can't easily track access count, so we'll use 1 as default
|
||||
files = append(files, fileInfoWithMetadata{
|
||||
Name: fi.Name(),
|
||||
Size: fi.Size(),
|
||||
MTime: fi.ModTime(),
|
||||
ATime: fi.AccessTime(),
|
||||
AccessCount: 1,
|
||||
AccessCount: fi.AccessCount,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -705,13 +717,17 @@ func New(vfs vfs.VFS, gcHandlerFunc GCHandlerFunc) *GCFS {
|
||||
|
||||
// Create overrides the Create method of the VFS interface. It tries to create the key, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error.
|
||||
func (g *GCFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
w, err := g.VFS.Create(key, size) // try to create the key
|
||||
for err == vfserror.ErrDiskFull && g.gcHanderFunc != nil { // if the error is disk full and there is a GC handler
|
||||
errr := g.gcHanderFunc(g.VFS, uint(size)) // call the GC handler
|
||||
if errr == ErrInsufficientSpace {
|
||||
return nil, errr // if the GC handler returns no files to delete, return the error
|
||||
w, err := g.VFS.Create(key, size) // try to create the key
|
||||
for err == vfserror.ErrDiskFull && g.gcHanderFunc != nil {
|
||||
errGC := g.gcHanderFunc(g.VFS, uint(size)) // call the GC handler
|
||||
if errGC == ErrInsufficientSpace {
|
||||
return nil, errGC // if the GC handler returns no files to delete, return the error
|
||||
}
|
||||
w, err = g.VFS.Create(key, size)
|
||||
if err == vfserror.ErrDiskFull {
|
||||
// GC handler did not free enough space, avoid infinite loop
|
||||
return nil, ErrInsufficientSpace
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -169,11 +169,14 @@ func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
onClose: func() error {
|
||||
data := buf.Bytes()
|
||||
m.mu.Lock()
|
||||
var accessCount int64 = 0
|
||||
if f, exists := m.files[key]; exists {
|
||||
m.size -= int64(len(f.data))
|
||||
m.LRU.Remove(key)
|
||||
accessCount = f.fileinfo.AccessCount // preserve access count if overwriting
|
||||
}
|
||||
fi := vfs.NewFileInfo(key, int64(len(data)), time.Now())
|
||||
fi.AccessCount = accessCount
|
||||
m.files[key] = &file{
|
||||
fileinfo: fi,
|
||||
data: data,
|
||||
@@ -232,6 +235,7 @@ func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
f.fileinfo.ATime = time.Now()
|
||||
f.fileinfo.AccessCount++ // Increment access count for LFU
|
||||
m.LRU.MoveToFront(key)
|
||||
dataCopy := make([]byte, len(f.data))
|
||||
copy(dataCopy, f.data)
|
||||
|
||||
Reference in New Issue
Block a user