5 Commits

Author SHA1 Message Date
6919358eab Enhance file metadata tracking and garbage collection logic
All checks were successful
Release Tag / release (push) Successful in 13s
- Added AccessCount field to FileInfo struct for improved tracking of file access frequency.
- Updated NewFileInfo and NewFileInfoFromOS functions to initialize AccessCount.
- Modified DiskFS and MemoryFS to preserve and increment AccessCount during file operations.
- Enhanced garbage collection methods (LRU, LFU, FIFO, Largest, Smallest, Hybrid) to utilize AccessCount for more effective space reclamation.
2025-07-19 09:07:49 -05:00
1187f05c77 revert 30e804709f
revert Enhance FileInfo structure and DiskFS functionality

- Added CTime (creation time) and AccessCount fields to FileInfo struct for better file metadata tracking.
- Updated NewFileInfo and NewFileInfoFromOS functions to initialize new fields.
- Enhanced DiskFS to maintain access counts and file metadata, including flushing to JSON files.
- Modified Open and Create methods to increment access counts and set creation times appropriately.
- Updated garbage collection logic to utilize real access counts for files.
2025-07-19 14:02:53 +00:00
f6f93c86c8 Update launch.json to modify memory-gc strategy and comment out upstream server configuration
- Changed memory-gc strategy from 'lfu' to 'lru' for improved cache management.
- Commented out the upstream server configuration to prevent potential connectivity issues during development.
2025-07-19 08:07:36 -05:00
30e804709f Enhance FileInfo structure and DiskFS functionality
All checks were successful
Release Tag / release (push) Successful in 12s
- Added CTime (creation time) and AccessCount fields to FileInfo struct for better file metadata tracking.
- Updated NewFileInfo and NewFileInfoFromOS functions to initialize new fields.
- Enhanced DiskFS to maintain access counts and file metadata, including flushing to JSON files.
- Modified Open and Create methods to increment access counts and set creation times appropriately.
- Updated garbage collection logic to utilize real access counts for files.
2025-07-19 05:29:18 -05:00
56bb1ddc12 Add hop-by-hop header handling in ServeHTTP method
All checks were successful
Release Tag / release (push) Successful in 12s
- Introduced a map for hop-by-hop headers to be removed from responses.
- Enhanced cache serving logic to read and filter HTTP responses, ensuring only relevant headers are forwarded.
- Updated cache writing to handle full HTTP responses, improving cache integrity and performance.
2025-07-19 05:07:36 -05:00
6 changed files with 161 additions and 91 deletions

14
.vscode/launch.json vendored
View File

@@ -23,8 +23,8 @@
"lru", "lru",
"--log-level", "--log-level",
"debug", "debug",
"--upstream", // "--upstream",
"http://192.168.2.5:80", // "http://192.168.2.5:80",
], ],
}, },
{ {
@@ -42,8 +42,8 @@
"hybrid", "hybrid",
"--log-level", "--log-level",
"debug", "debug",
"--upstream", // "--upstream",
"http://192.168.2.5:80", // "http://192.168.2.5:80",
], ],
}, },
{ {
@@ -56,11 +56,11 @@
"--memory", "--memory",
"1G", "1G",
"--memory-gc", "--memory-gc",
"lfu", "lru",
"--log-level", "--log-level",
"debug", "debug",
"--upstream", // "--upstream",
"http://192.168.2.5:80", // "http://192.168.2.5:80",
], ],
} }
] ]

View File

@@ -2,6 +2,7 @@
package steamcache package steamcache
import ( import (
"bufio"
"context" "context"
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
@@ -24,6 +25,8 @@ import (
"sync" "sync"
"time" "time"
"bytes"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
@@ -146,6 +149,19 @@ func verifyResponseHash(resp *http.Response, bodyData []byte, expectedHash strin
return strings.EqualFold(actualHash, expectedHash) return strings.EqualFold(actualHash, expectedHash)
} }
var hopByHopHeaders = map[string]struct{}{
"Connection": {},
"Keep-Alive": {},
"Proxy-Authenticate": {},
"Proxy-Authorization": {},
"TE": {},
"Trailer": {},
"Transfer-Encoding": {},
"Upgrade": {},
"Date": {},
"Server": {},
}
type SteamCache struct { type SteamCache struct {
address string address string
upstream string upstream string
@@ -357,26 +373,42 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
reader, err := sc.vfs.Open(cacheKey) cachePath := cacheKey // You may want to add a .http or .cache extension for clarity
// Try to serve from cache
file, err := sc.vfs.Open(cachePath)
if err == nil { if err == nil {
defer reader.Close() defer file.Close()
w.Header().Add("X-LanCache-Status", "HIT") buf := bufio.NewReader(file)
resp, err := http.ReadResponse(buf, nil)
io.Copy(w, reader) if err == nil {
// Remove hop-by-hop and server-specific headers
for k, vv := range resp.Header {
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
continue
}
for _, v := range vv {
w.Header().Add(k, v)
}
}
// Add our own headers
w.Header().Set("X-LanCache-Status", "HIT")
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
w.WriteHeader(resp.StatusCode)
io.Copy(w, resp.Body)
resp.Body.Close()
logger.Logger.Info(). logger.Logger.Info().
Str("key", cacheKey). Str("key", cacheKey).
Str("host", r.Host). Str("host", r.Host).
Str("status", "HIT"). Str("status", "HIT").
Dur("duration", time.Since(tstart)). Dur("duration", time.Since(tstart)).
Msg("request") Msg("request")
requestsTotal.WithLabelValues(r.Method, "200").Inc() requestsTotal.WithLabelValues(r.Method, "200").Inc()
cacheStatusTotal.WithLabelValues("HIT").Inc() cacheStatusTotal.WithLabelValues("HIT").Inc()
responseTime.WithLabelValues("HIT").Observe(time.Since(tstart).Seconds()) responseTime.WithLabelValues("HIT").Observe(time.Since(tstart).Seconds())
return return
} }
}
var req *http.Request var req *http.Request
if sc.upstream != "" { // if an upstream server is configured, proxy the request to the upstream server if sc.upstream != "" { // if an upstream server is configured, proxy the request to the upstream server
@@ -448,8 +480,6 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
defer resp.Body.Close() defer resp.Body.Close()
size := resp.ContentLength
// Read the entire response body into memory for hash verification // Read the entire response body into memory for hash verification
bodyData, err := io.ReadAll(resp.Body) bodyData, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
@@ -500,15 +530,28 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
// Write to response (always serve the file) // Write to response (always serve the file)
w.Header().Add("X-LanCache-Status", "MISS") // Remove hop-by-hop and server-specific headers
for k, vv := range resp.Header {
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
continue
}
for _, v := range vv {
w.Header().Add(k, v)
}
}
// Add our own headers
w.Header().Set("X-LanCache-Status", "MISS")
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
w.Write(bodyData) w.Write(bodyData)
// Only cache the file if hash verification passed (or no hash was present) // Only cache the file if hash verification passed (or no hash was present)
if hashVerified { if hashVerified {
writer, _ := sc.vfs.Create(cacheKey, size) writer, _ := sc.vfs.Create(cachePath, int64(0)) // size is not known in advance
if writer != nil { if writer != nil {
defer writer.Close() defer writer.Close()
writer.Write(bodyData) // Write the full HTTP response to cache
resp.Body = io.NopCloser(bytes.NewReader(bodyData)) // Reset body for writing
resp.Write(writer)
} }
} else { } else {
logger.Logger.Warn(). logger.Logger.Warn().

View File

@@ -242,10 +242,12 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
// Check again after lock // Check again after lock
d.mu.Lock() d.mu.Lock()
var accessCount int64 = 0
if fi, exists := d.info[key]; exists { if fi, exists := d.info[key]; exists {
d.size -= fi.Size() d.size -= fi.Size()
d.LRU.Remove(key) d.LRU.Remove(key)
delete(d.info, key) delete(d.info, key)
accessCount = fi.AccessCount // preserve access count if overwriting
path := filepath.Join(d.root, key) path := filepath.Join(d.root, key)
os.Remove(path) // Ignore error, as file might not exist or other issues os.Remove(path) // Ignore error, as file might not exist or other issues
} }
@@ -274,6 +276,7 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
d.mu.Lock() d.mu.Lock()
finfo := vfs.NewFileInfoFromOS(fi, key) finfo := vfs.NewFileInfoFromOS(fi, key)
finfo.AccessCount = accessCount
d.info[key] = finfo d.info[key] = finfo
d.LRU.Add(key, finfo) d.LRU.Add(key, finfo)
d.size += n d.size += n
@@ -381,6 +384,7 @@ func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
return nil, vfserror.ErrNotFound return nil, vfserror.ErrNotFound
} }
fi.ATime = time.Now() fi.ATime = time.Now()
fi.AccessCount++ // Increment access count for LFU
d.LRU.MoveToFront(key) d.LRU.MoveToFront(key)
d.mu.Unlock() d.mu.Unlock()

View File

@@ -11,6 +11,7 @@ type FileInfo struct {
size int64 size int64
MTime time.Time MTime time.Time
ATime time.Time ATime time.Time
AccessCount int64 // Number of times the file has been accessed
} }
func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo { func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo {
@@ -19,6 +20,7 @@ func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo {
size: size, size: size,
MTime: modTime, MTime: modTime,
ATime: time.Now(), ATime: time.Now(),
AccessCount: 0,
} }
} }
@@ -28,6 +30,7 @@ func NewFileInfoFromOS(f os.FileInfo, key string) *FileInfo {
size: f.Size(), size: f.Size(),
MTime: f.ModTime(), MTime: f.ModTime(),
ATime: time.Now(), ATime: time.Now(),
AccessCount: 0,
} }
} }

View File

@@ -75,12 +75,17 @@ func LRUGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LRU GC") logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LRU GC")
var reclaimed uint // reclaimed space in bytes var reclaimed uint // reclaimed space in bytes
deleted := false
for { for {
switch fs := vfss.(type) { switch fs := vfss.(type) {
case *disk.DiskFS: case *disk.DiskFS:
fi := fs.LRU.Back() fi := fs.LRU.Back()
if fi == nil { if fi == nil {
if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC (at least one file deleted)")
return nil
}
return ErrInsufficientSpace // No files to delete return ErrInsufficientSpace // No files to delete
} }
sz := uint(fi.Size()) sz := uint(fi.Size())
@@ -89,9 +94,14 @@ func LRUGC(vfss vfs.VFS, size uint) error {
continue // If delete fails, try the next file continue // If delete fails, try the next file
} }
reclaimed += sz reclaimed += sz
deleted = true
case *memory.MemoryFS: case *memory.MemoryFS:
fi := fs.LRU.Back() fi := fs.LRU.Back()
if fi == nil { if fi == nil {
if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC (at least one file deleted)")
return nil
}
return ErrInsufficientSpace // No files to delete return ErrInsufficientSpace // No files to delete
} }
sz := uint(fi.Size()) sz := uint(fi.Size())
@@ -100,13 +110,14 @@ func LRUGC(vfss vfs.VFS, size uint) error {
continue // If delete fails, try the next file continue // If delete fails, try the next file
} }
reclaimed += sz reclaimed += sz
deleted = true
default: default:
panic("unreachable: unsupported VFS type for LRU GC") // panic if the VFS is not disk or memory panic("unreachable: unsupported VFS type for LRU GC") // panic if the VFS is not disk or memory
} }
if reclaimed >= size { if deleted && (size == 0 || reclaimed >= size) {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC") logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC (at least one file deleted)")
return nil // stop if enough space is reclaimed return nil // stop if enough space is reclaimed or at least one file deleted for size==0
} }
} }
} }
@@ -115,31 +126,32 @@ func LRUGC(vfss vfs.VFS, size uint) error {
func LFUGC(vfss vfs.VFS, size uint) error { func LFUGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LFU GC") logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LFU GC")
// Get all files and sort by access count (frequency)
files := getAllFiles(vfss) files := getAllFiles(vfss)
if len(files) == 0 { if len(files) == 0 {
return ErrInsufficientSpace return ErrInsufficientSpace
} }
// Sort by access count (ascending - least frequently used first)
sort.Slice(files, func(i, j int) bool { sort.Slice(files, func(i, j int) bool {
return files[i].AccessCount < files[j].AccessCount return files[i].AccessCount < files[j].AccessCount
}) })
var reclaimed uint var reclaimed uint
deleted := false
for _, fi := range files { for _, fi := range files {
if reclaimed >= size {
break
}
err := vfss.Delete(fi.Name) err := vfss.Delete(fi.Name)
if err != nil { if err != nil {
continue continue
} }
reclaimed += uint(fi.Size) reclaimed += uint(fi.Size)
deleted = true
if deleted && (size == 0 || reclaimed >= size) {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LFU GC (at least one file deleted)")
return nil
}
} }
if reclaimed >= size { if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LFU GC") logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LFU GC (at least one file deleted)")
return nil return nil
} }
return ErrInsufficientSpace return ErrInsufficientSpace
@@ -149,31 +161,32 @@ func LFUGC(vfss vfs.VFS, size uint) error {
func FIFOGC(vfss vfs.VFS, size uint) error { func FIFOGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using FIFO GC") logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using FIFO GC")
// Get all files and sort by creation time (oldest first)
files := getAllFiles(vfss) files := getAllFiles(vfss)
if len(files) == 0 { if len(files) == 0 {
return ErrInsufficientSpace return ErrInsufficientSpace
} }
// Sort by creation time (ascending - oldest first)
sort.Slice(files, func(i, j int) bool { sort.Slice(files, func(i, j int) bool {
return files[i].MTime.Before(files[j].MTime) return files[i].MTime.Before(files[j].MTime)
}) })
var reclaimed uint var reclaimed uint
deleted := false
for _, fi := range files { for _, fi := range files {
if reclaimed >= size {
break
}
err := vfss.Delete(fi.Name) err := vfss.Delete(fi.Name)
if err != nil { if err != nil {
continue continue
} }
reclaimed += uint(fi.Size) reclaimed += uint(fi.Size)
deleted = true
if deleted && (size == 0 || reclaimed >= size) {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using FIFO GC (at least one file deleted)")
return nil
}
} }
if reclaimed >= size { if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using FIFO GC") logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using FIFO GC (at least one file deleted)")
return nil return nil
} }
return ErrInsufficientSpace return ErrInsufficientSpace
@@ -183,31 +196,32 @@ func FIFOGC(vfss vfs.VFS, size uint) error {
func LargestGC(vfss vfs.VFS, size uint) error { func LargestGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Largest GC") logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Largest GC")
// Get all files and sort by size (largest first)
files := getAllFiles(vfss) files := getAllFiles(vfss)
if len(files) == 0 { if len(files) == 0 {
return ErrInsufficientSpace return ErrInsufficientSpace
} }
// Sort by size (descending - largest first)
sort.Slice(files, func(i, j int) bool { sort.Slice(files, func(i, j int) bool {
return files[i].Size > files[j].Size return files[i].Size > files[j].Size
}) })
var reclaimed uint var reclaimed uint
deleted := false
for _, fi := range files { for _, fi := range files {
if reclaimed >= size {
break
}
err := vfss.Delete(fi.Name) err := vfss.Delete(fi.Name)
if err != nil { if err != nil {
continue continue
} }
reclaimed += uint(fi.Size) reclaimed += uint(fi.Size)
deleted = true
if deleted && (size == 0 || reclaimed >= size) {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Largest GC (at least one file deleted)")
return nil
}
} }
if reclaimed >= size { if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Largest GC") logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Largest GC (at least one file deleted)")
return nil return nil
} }
return ErrInsufficientSpace return ErrInsufficientSpace
@@ -217,31 +231,32 @@ func LargestGC(vfss vfs.VFS, size uint) error {
func SmallestGC(vfss vfs.VFS, size uint) error { func SmallestGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Smallest GC") logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Smallest GC")
// Get all files and sort by size (smallest first)
files := getAllFiles(vfss) files := getAllFiles(vfss)
if len(files) == 0 { if len(files) == 0 {
return ErrInsufficientSpace return ErrInsufficientSpace
} }
// Sort by size (ascending - smallest first)
sort.Slice(files, func(i, j int) bool { sort.Slice(files, func(i, j int) bool {
return files[i].Size < files[j].Size return files[i].Size < files[j].Size
}) })
var reclaimed uint var reclaimed uint
deleted := false
for _, fi := range files { for _, fi := range files {
if reclaimed >= size {
break
}
err := vfss.Delete(fi.Name) err := vfss.Delete(fi.Name)
if err != nil { if err != nil {
continue continue
} }
reclaimed += uint(fi.Size) reclaimed += uint(fi.Size)
deleted = true
if deleted && (size == 0 || reclaimed >= size) {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Smallest GC (at least one file deleted)")
return nil
}
} }
if reclaimed >= size { if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Smallest GC") logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Smallest GC (at least one file deleted)")
return nil return nil
} }
return ErrInsufficientSpace return ErrInsufficientSpace
@@ -251,14 +266,11 @@ func SmallestGC(vfss vfs.VFS, size uint) error {
func HybridGC(vfss vfs.VFS, size uint) error { func HybridGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Hybrid GC") logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Hybrid GC")
// Get all files and calculate hybrid scores
files := getAllFiles(vfss) files := getAllFiles(vfss)
if len(files) == 0 { if len(files) == 0 {
return ErrInsufficientSpace return ErrInsufficientSpace
} }
// Calculate hybrid scores (lower score = more likely to be evicted)
// Score = (time since last access in seconds) * (file size in MB)
now := time.Now() now := time.Now()
for i := range files { for i := range files {
timeSinceAccess := now.Sub(files[i].ATime).Seconds() timeSinceAccess := now.Sub(files[i].ATime).Seconds()
@@ -266,25 +278,27 @@ func HybridGC(vfss vfs.VFS, size uint) error {
files[i].HybridScore = timeSinceAccess * sizeMB files[i].HybridScore = timeSinceAccess * sizeMB
} }
// Sort by hybrid score (ascending - lowest scores first)
sort.Slice(files, func(i, j int) bool { sort.Slice(files, func(i, j int) bool {
return files[i].HybridScore < files[j].HybridScore return files[i].HybridScore < files[j].HybridScore
}) })
var reclaimed uint var reclaimed uint
deleted := false
for _, fi := range files { for _, fi := range files {
if reclaimed >= size {
break
}
err := vfss.Delete(fi.Name) err := vfss.Delete(fi.Name)
if err != nil { if err != nil {
continue continue
} }
reclaimed += uint(fi.Size) reclaimed += uint(fi.Size)
deleted = true
if deleted && (size == 0 || reclaimed >= size) {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Hybrid GC (at least one file deleted)")
return nil
}
} }
if reclaimed >= size { if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Hybrid GC") logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Hybrid GC (at least one file deleted)")
return nil return nil
} }
return ErrInsufficientSpace return ErrInsufficientSpace
@@ -308,25 +322,23 @@ func getAllFiles(vfss vfs.VFS) []fileInfoWithMetadata {
case *disk.DiskFS: case *disk.DiskFS:
allFiles := fs.StatAll() allFiles := fs.StatAll()
for _, fi := range allFiles { for _, fi := range allFiles {
// For disk, we can't easily track access count, so we'll use 1 as default
files = append(files, fileInfoWithMetadata{ files = append(files, fileInfoWithMetadata{
Name: fi.Name(), Name: fi.Name(),
Size: fi.Size(), Size: fi.Size(),
MTime: fi.ModTime(), MTime: fi.ModTime(),
ATime: fi.AccessTime(), ATime: fi.AccessTime(),
AccessCount: 1, AccessCount: fi.AccessCount,
}) })
} }
case *memory.MemoryFS: case *memory.MemoryFS:
allFiles := fs.StatAll() allFiles := fs.StatAll()
for _, fi := range allFiles { for _, fi := range allFiles {
// For memory, we can't easily track access count, so we'll use 1 as default
files = append(files, fileInfoWithMetadata{ files = append(files, fileInfoWithMetadata{
Name: fi.Name(), Name: fi.Name(),
Size: fi.Size(), Size: fi.Size(),
MTime: fi.ModTime(), MTime: fi.ModTime(),
ATime: fi.AccessTime(), ATime: fi.AccessTime(),
AccessCount: 1, AccessCount: fi.AccessCount,
}) })
} }
} }
@@ -706,12 +718,16 @@ func New(vfs vfs.VFS, gcHandlerFunc GCHandlerFunc) *GCFS {
// Create overrides the Create method of the VFS interface. It tries to create the key, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error. // Create overrides the Create method of the VFS interface. It tries to create the key, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error.
func (g *GCFS) Create(key string, size int64) (io.WriteCloser, error) { func (g *GCFS) Create(key string, size int64) (io.WriteCloser, error) {
w, err := g.VFS.Create(key, size) // try to create the key w, err := g.VFS.Create(key, size) // try to create the key
for err == vfserror.ErrDiskFull && g.gcHanderFunc != nil { // if the error is disk full and there is a GC handler for err == vfserror.ErrDiskFull && g.gcHanderFunc != nil {
errr := g.gcHanderFunc(g.VFS, uint(size)) // call the GC handler errGC := g.gcHanderFunc(g.VFS, uint(size)) // call the GC handler
if errr == ErrInsufficientSpace { if errGC == ErrInsufficientSpace {
return nil, errr // if the GC handler returns no files to delete, return the error return nil, errGC // if the GC handler returns no files to delete, return the error
} }
w, err = g.VFS.Create(key, size) w, err = g.VFS.Create(key, size)
if err == vfserror.ErrDiskFull {
// GC handler did not free enough space, avoid infinite loop
return nil, ErrInsufficientSpace
}
} }
if err != nil { if err != nil {

View File

@@ -169,11 +169,14 @@ func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
onClose: func() error { onClose: func() error {
data := buf.Bytes() data := buf.Bytes()
m.mu.Lock() m.mu.Lock()
var accessCount int64 = 0
if f, exists := m.files[key]; exists { if f, exists := m.files[key]; exists {
m.size -= int64(len(f.data)) m.size -= int64(len(f.data))
m.LRU.Remove(key) m.LRU.Remove(key)
accessCount = f.fileinfo.AccessCount // preserve access count if overwriting
} }
fi := vfs.NewFileInfo(key, int64(len(data)), time.Now()) fi := vfs.NewFileInfo(key, int64(len(data)), time.Now())
fi.AccessCount = accessCount
m.files[key] = &file{ m.files[key] = &file{
fileinfo: fi, fileinfo: fi,
data: data, data: data,
@@ -232,6 +235,7 @@ func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
return nil, vfserror.ErrNotFound return nil, vfserror.ErrNotFound
} }
f.fileinfo.ATime = time.Now() f.fileinfo.ATime = time.Now()
f.fileinfo.AccessCount++ // Increment access count for LFU
m.LRU.MoveToFront(key) m.LRU.MoveToFront(key)
dataCopy := make([]byte, len(f.data)) dataCopy := make([]byte, len(f.data))
copy(dataCopy, f.data) copy(dataCopy, f.data)