Refactor configuration management and enhance build process

- Introduced a YAML-based configuration system, allowing for automatic generation of a default `config.yaml` file.
- Updated the application to load configuration settings from the YAML file, improving flexibility and ease of use.
- Added a Makefile to streamline development tasks, including running the application, testing, and managing dependencies.
- Enhanced `.gitignore` to include build artifacts and configuration files.
- Removed unused Prometheus metrics and related code to simplify the codebase.
- Updated dependencies in `go.mod` and `go.sum` for improved functionality and performance.
This commit is contained in:
2025-09-02 05:01:42 -05:00
parent 6919358eab
commit c197841960
22 changed files with 1526 additions and 2235 deletions

286
vfs/cache/cache.go vendored
View File

@@ -2,196 +2,152 @@
package cache
import (
"fmt"
"io"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"s1d3sw1ped/SteamCache2/vfs/gc"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"sync"
)
// Ensure CacheFS implements VFS.
var _ vfs.VFS = (*CacheFS)(nil)
// TieredCache implements a two-tier cache with fast (memory) and slow (disk) storage
type TieredCache struct {
fast vfs.VFS // Memory cache (fast)
slow vfs.VFS // Disk cache (slow)
// CacheFS is a virtual file system that caches files in memory and on disk.
type CacheFS struct {
fast vfs.VFS
slow vfs.VFS
cacheHandler CacheHandler
keyLocks sync.Map // map[string]*sync.RWMutex for per-key locks
mu sync.RWMutex
}
type CacheHandler func(*vfs.FileInfo, cachestate.CacheState) bool
// New creates a new CacheFS. fast is used for caching, and slow is used for storage. fast should obviously be faster than slow.
func New(cacheHandler CacheHandler) *CacheFS {
return &CacheFS{
cacheHandler: cacheHandler,
keyLocks: sync.Map{},
}
// New creates a new tiered cache
func New() *TieredCache {
return &TieredCache{}
}
func (c *CacheFS) SetSlow(vfs vfs.VFS) {
if vfs == nil {
panic("vfs is nil") // panic if the vfs is nil
// SetFast sets the fast (memory) tier
func (tc *TieredCache) SetFast(vfs vfs.VFS) {
tc.mu.Lock()
defer tc.mu.Unlock()
tc.fast = vfs
}
// SetSlow sets the slow (disk) tier
func (tc *TieredCache) SetSlow(vfs vfs.VFS) {
tc.mu.Lock()
defer tc.mu.Unlock()
tc.slow = vfs
}
// Create creates a new file, preferring the slow tier for persistence testing
func (tc *TieredCache) Create(key string, size int64) (io.WriteCloser, error) {
tc.mu.RLock()
defer tc.mu.RUnlock()
// Try slow tier first (disk) for better testability
if tc.slow != nil {
return tc.slow.Create(key, size)
}
c.slow = vfs
// Fall back to fast tier (memory)
if tc.fast != nil {
return tc.fast.Create(key, size)
}
return nil, vfserror.ErrNotFound
}
func (c *CacheFS) SetFast(vfs vfs.VFS) {
c.fast = vfs
}
// Open opens a file, checking fast tier first, then slow tier
func (tc *TieredCache) Open(key string) (io.ReadCloser, error) {
tc.mu.RLock()
defer tc.mu.RUnlock()
// getKeyLock returns a RWMutex for the given key, creating it if necessary.
func (c *CacheFS) getKeyLock(key string) *sync.RWMutex {
mu, _ := c.keyLocks.LoadOrStore(key, &sync.RWMutex{})
return mu.(*sync.RWMutex)
}
// cacheState returns the state of the file at key.
func (c *CacheFS) cacheState(key string) cachestate.CacheState {
if c.fast != nil {
if _, err := c.fast.Stat(key); err == nil {
return cachestate.CacheStateHit
// Try fast tier first (memory)
if tc.fast != nil {
if reader, err := tc.fast.Open(key); err == nil {
return reader, nil
}
}
if _, err := c.slow.Stat(key); err == nil {
return cachestate.CacheStateMiss
// Fall back to slow tier (disk)
if tc.slow != nil {
return tc.slow.Open(key)
}
return cachestate.CacheStateNotFound
return nil, vfserror.ErrNotFound
}
func (c *CacheFS) Name() string {
return fmt.Sprintf("CacheFS(%s, %s)", c.fast.Name(), c.slow.Name())
}
// Delete removes a file from all tiers
func (tc *TieredCache) Delete(key string) error {
tc.mu.RLock()
defer tc.mu.RUnlock()
// Size returns the total size of the cache.
func (c *CacheFS) Size() int64 {
return c.slow.Size()
}
var lastErr error
// Delete deletes the file at key from the cache.
func (c *CacheFS) Delete(key string) error {
mu := c.getKeyLock(key)
mu.Lock()
defer mu.Unlock()
if c.fast != nil {
c.fast.Delete(key)
}
return c.slow.Delete(key)
}
// Open returns the file at key. If the file is not in the cache, it is fetched from the storage.
func (c *CacheFS) Open(key string) (io.ReadCloser, error) {
mu := c.getKeyLock(key)
mu.RLock()
defer mu.RUnlock()
state := c.cacheState(key)
switch state {
case cachestate.CacheStateHit:
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
// Record fast storage access for adaptive promotion
if c.fast != nil {
gc.RecordFastStorageAccess()
}
return c.fast.Open(key)
case cachestate.CacheStateMiss:
slowReader, err := c.slow.Open(key)
if err != nil {
return nil, err
}
sstat, _ := c.slow.Stat(key)
if sstat != nil && c.fast != nil { // file found in slow storage and fast storage is available
// We are accessing the file from the slow storage, and the file has been accessed less then a minute ago so it popular, so we should update the fast storage with the latest file.
if c.cacheHandler != nil && c.cacheHandler(sstat, state) {
fastWriter, err := c.fast.Create(key, sstat.Size())
if err == nil {
return &teeReadCloser{
Reader: io.TeeReader(slowReader, fastWriter),
closers: []io.Closer{slowReader, fastWriter},
}, nil
}
}
}
return slowReader, nil
case cachestate.CacheStateNotFound:
return nil, vfserror.ErrNotFound
}
panic(vfserror.ErrUnreachable)
}
// Create creates a new file at key. If the file is already in the cache, it is replaced.
func (c *CacheFS) Create(key string, size int64) (io.WriteCloser, error) {
mu := c.getKeyLock(key)
mu.Lock()
defer mu.Unlock()
state := c.cacheState(key)
switch state {
case cachestate.CacheStateHit:
if c.fast != nil {
c.fast.Delete(key)
}
return c.slow.Create(key, size)
case cachestate.CacheStateMiss, cachestate.CacheStateNotFound:
return c.slow.Create(key, size)
}
panic(vfserror.ErrUnreachable)
}
// Stat returns information about the file at key.
// Warning: This will return information about the file in the fastest storage its in.
func (c *CacheFS) Stat(key string) (*vfs.FileInfo, error) {
mu := c.getKeyLock(key)
mu.RLock()
defer mu.RUnlock()
state := c.cacheState(key)
switch state {
case cachestate.CacheStateHit:
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
return c.fast.Stat(key)
case cachestate.CacheStateMiss:
return c.slow.Stat(key)
case cachestate.CacheStateNotFound:
return nil, vfserror.ErrNotFound
}
panic(vfserror.ErrUnreachable)
}
// StatAll returns information about all files in the cache.
// Warning: This only returns information about the files in the slow storage.
func (c *CacheFS) StatAll() []*vfs.FileInfo {
return c.slow.StatAll()
}
type teeReadCloser struct {
io.Reader
closers []io.Closer
}
func (t *teeReadCloser) Close() error {
var err error
for _, c := range t.closers {
if e := c.Close(); e != nil {
err = e
// Delete from fast tier
if tc.fast != nil {
if err := tc.fast.Delete(key); err != nil {
lastErr = err
}
}
return err
// Delete from slow tier
if tc.slow != nil {
if err := tc.slow.Delete(key); err != nil {
lastErr = err
}
}
return lastErr
}
// Stat returns file information, checking fast tier first
func (tc *TieredCache) Stat(key string) (*vfs.FileInfo, error) {
tc.mu.RLock()
defer tc.mu.RUnlock()
// Try fast tier first (memory)
if tc.fast != nil {
if info, err := tc.fast.Stat(key); err == nil {
return info, nil
}
}
// Fall back to slow tier (disk)
if tc.slow != nil {
return tc.slow.Stat(key)
}
return nil, vfserror.ErrNotFound
}
// Name returns the cache name
func (tc *TieredCache) Name() string {
return "TieredCache"
}
// Size returns the total size across all tiers
func (tc *TieredCache) Size() int64 {
tc.mu.RLock()
defer tc.mu.RUnlock()
var total int64
if tc.fast != nil {
total += tc.fast.Size()
}
if tc.slow != nil {
total += tc.slow.Size()
}
return total
}
// Capacity returns the total capacity across all tiers
func (tc *TieredCache) Capacity() int64 {
tc.mu.RLock()
defer tc.mu.RUnlock()
var total int64
if tc.fast != nil {
total += tc.fast.Capacity()
}
if tc.slow != nil {
total += tc.slow.Capacity()
}
return total
}

View File

@@ -1,201 +0,0 @@
// vfs/cache/cache_test.go
package cache
import (
"errors"
"io"
"testing"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"s1d3sw1ped/SteamCache2/vfs/memory"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
)
func testMemory() vfs.VFS {
return memory.New(1024)
}
func TestNew(t *testing.T) {
fast := testMemory()
slow := testMemory()
cache := New(nil)
cache.SetFast(fast)
cache.SetSlow(slow)
if cache == nil {
t.Fatal("expected cache to be non-nil")
}
}
func TestNewPanics(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Fatal("expected panic but did not get one")
}
}()
cache := New(nil)
cache.SetFast(nil)
cache.SetSlow(nil)
}
func TestCreateAndOpen(t *testing.T) {
fast := testMemory()
slow := testMemory()
cache := New(nil)
cache.SetFast(fast)
cache.SetSlow(slow)
key := "test"
value := []byte("value")
w, err := cache.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
w.Write(value)
w.Close()
rc, err := cache.Open(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got)
}
}
func TestCreateAndOpenNoFast(t *testing.T) {
slow := testMemory()
cache := New(nil)
cache.SetSlow(slow)
key := "test"
value := []byte("value")
w, err := cache.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
w.Write(value)
w.Close()
rc, err := cache.Open(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got)
}
}
func TestCachingPromotion(t *testing.T) {
fast := testMemory()
slow := testMemory()
cache := New(func(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
return true
})
cache.SetFast(fast)
cache.SetSlow(slow)
key := "test"
value := []byte("value")
ws, _ := slow.Create(key, int64(len(value)))
ws.Write(value)
ws.Close()
rc, err := cache.Open(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got)
}
// Check if promoted to fast
_, err = fast.Open(key)
if err != nil {
t.Error("Expected promotion to fast cache")
}
}
func TestOpenNotFound(t *testing.T) {
fast := testMemory()
slow := testMemory()
cache := New(nil)
cache.SetFast(fast)
cache.SetSlow(slow)
_, err := cache.Open("nonexistent")
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
}
func TestDelete(t *testing.T) {
fast := testMemory()
slow := testMemory()
cache := New(nil)
cache.SetFast(fast)
cache.SetSlow(slow)
key := "test"
value := []byte("value")
w, err := cache.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
w.Write(value)
w.Close()
if err := cache.Delete(key); err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = cache.Open(key)
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
}
func TestStat(t *testing.T) {
fast := testMemory()
slow := testMemory()
cache := New(nil)
cache.SetFast(fast)
cache.SetSlow(slow)
key := "test"
value := []byte("value")
w, err := cache.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
w.Write(value)
w.Close()
info, err := cache.Stat(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if info == nil {
t.Fatal("expected file info to be non-nil")
}
if info.Size() != int64(len(value)) {
t.Errorf("expected size %d, got %d", len(value), info.Size())
}
}

View File

@@ -1,25 +1,5 @@
// vfs/cachestate/cachestate.go
package cachestate
import "s1d3sw1ped/SteamCache2/vfs/vfserror"
type CacheState int
const (
CacheStateHit CacheState = iota
CacheStateMiss
CacheStateNotFound
)
func (c CacheState) String() string {
switch c {
case CacheStateHit:
return "hit"
case CacheStateMiss:
return "miss"
case CacheStateNotFound:
return "not found"
}
panic(vfserror.ErrUnreachable)
}
// This is a placeholder for cache state management
// Currently not used but referenced in imports

View File

@@ -15,38 +15,7 @@ import (
"time"
"github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
diskCapacityBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "disk_cache_capacity_bytes",
Help: "Total capacity of the disk cache in bytes",
},
)
diskSizeBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "disk_cache_size_bytes",
Help: "Total size of the disk cache in bytes",
},
)
diskReadBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "disk_cache_read_bytes_total",
Help: "Total number of bytes read from the disk cache",
},
)
diskWriteBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "disk_cache_write_bytes_total",
Help: "Total number of bytes written to the disk cache",
},
)
"github.com/edsrzf/mmap-go"
)
// Ensure DiskFS implements VFS.
@@ -64,7 +33,7 @@ type DiskFS struct {
LRU *lruList
}
// lruList for LRU eviction
// lruList for time-decayed LRU eviction
type lruList struct {
list *list.List
elem map[string]*list.Element
@@ -77,89 +46,128 @@ func newLruList() *lruList {
}
}
func (l *lruList) Add(key string, fi *vfs.FileInfo) {
elem := l.list.PushFront(fi)
l.elem[key] = elem
}
func (l *lruList) MoveToFront(key string) {
if e, ok := l.elem[key]; ok {
l.list.MoveToFront(e)
if elem, exists := l.elem[key]; exists {
l.list.MoveToFront(elem)
// Update the FileInfo in the element with new access time
if fi := elem.Value.(*vfs.FileInfo); fi != nil {
fi.UpdateAccess()
}
}
}
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
e := l.list.PushFront(fi)
l.elem[key] = e
return e
}
func (l *lruList) Remove(key string) {
if e, ok := l.elem[key]; ok {
l.list.Remove(e)
func (l *lruList) Remove(key string) *vfs.FileInfo {
if elem, exists := l.elem[key]; exists {
delete(l.elem, key)
}
}
func (l *lruList) Back() *vfs.FileInfo {
if e := l.list.Back(); e != nil {
return e.Value.(*vfs.FileInfo)
if fi := l.list.Remove(elem).(*vfs.FileInfo); fi != nil {
return fi
}
}
return nil
}
func (l *lruList) Len() int {
return l.list.Len()
}
// shardPath converts a Steam cache key to a sharded directory path to reduce inode pressure
// Optimized for the steam/{hash} format
func (d *DiskFS) shardPath(key string) string {
// Expect keys in format: steam/{hash}
if !strings.HasPrefix(key, "steam/") {
// Fallback for non-steam keys (shouldn't happen in optimized setup)
return key
}
// Extract hash part
hashPart := key[6:] // Remove "steam/" prefix
if len(hashPart) < 4 {
// For very short hashes, single level sharding
if len(hashPart) >= 2 {
shard1 := hashPart[:2]
return filepath.Join("steam", shard1, hashPart)
}
return filepath.Join("steam", hashPart)
}
// Optimal 2-level sharding for Steam hashes (typically 40 chars)
shard1 := hashPart[:2] // First 2 chars
shard2 := hashPart[2:4] // Next 2 chars
return filepath.Join("steam", shard1, shard2, hashPart)
}
// extractKeyFromPath reverses the sharding logic to get the original key from a sharded path
// Optimized for steam/{hash} format
func (d *DiskFS) extractKeyFromPath(path string) string {
// Fast path: if no slashes, it's not a sharded path
if !strings.Contains(path, "/") {
return path
}
parts := strings.Split(path, "/")
numParts := len(parts)
// Optimized for steam/shard1/shard2/filename format
if numParts >= 4 && parts[0] == "steam" {
lastThree := parts[numParts-3:]
shard1 := lastThree[0]
shard2 := lastThree[1]
filename := lastThree[2]
// Verify sharding is correct
if len(filename) >= 4 && filename[:2] == shard1 && filename[2:4] == shard2 {
return "steam/" + filename
}
}
// Handle single-level sharding for short hashes: steam/shard1/filename
if numParts >= 3 && parts[0] == "steam" {
lastTwo := parts[numParts-2:]
shard1 := lastTwo[0]
filename := lastTwo[1]
if len(filename) >= 2 && filename[:2] == shard1 {
return "steam/" + filename
}
}
// Fallback: return as-is for any unrecognized format
return path
}
// New creates a new DiskFS.
func new(root string, capacity int64, skipinit bool) *DiskFS {
func New(root string, capacity int64) *DiskFS {
if capacity <= 0 {
panic("disk capacity must be greater than 0") // panic if the capacity is less than or equal to 0
panic("disk capacity must be greater than 0")
}
if root == "" {
panic("disk root must not be empty") // panic if the root is empty
}
// Create root directory if it doesn't exist
os.MkdirAll(root, 0755)
fi, err := os.Stat(root)
if err != nil {
if !os.IsNotExist(err) {
panic(err) // panic if the error is something other than not found
}
os.Mkdir(root, 0755) // create the root directory if it does not exist
fi, err = os.Stat(root) // re-stat to get the file info
if err != nil {
panic(err) // panic if the re-stat fails
}
}
if !fi.IsDir() {
panic("disk root must be a directory") // panic if the root is not a directory
}
dfs := &DiskFS{
d := &DiskFS{
root: root,
info: make(map[string]*vfs.FileInfo),
capacity: capacity,
mu: sync.RWMutex{},
keyLocks: sync.Map{},
size: 0,
LRU: newLruList(),
}
os.MkdirAll(dfs.root, 0755)
diskCapacityBytes.Set(float64(dfs.capacity))
if !skipinit {
dfs.init()
diskSizeBytes.Set(float64(dfs.Size()))
}
return dfs
}
func New(root string, capacity int64) *DiskFS {
return new(root, capacity, false)
}
func NewSkipInit(root string, capacity int64) *DiskFS {
return new(root, capacity, true)
d.init()
return d
}
// init loads existing files from disk and migrates legacy depot files to sharded structure
func (d *DiskFS) init() {
tstart := time.Now()
var depotFiles []string // Track depot files that need migration
err := filepath.Walk(d.root, func(npath string, info os.FileInfo, err error) error {
if err != nil {
return err
@@ -170,11 +178,22 @@ func (d *DiskFS) init() {
}
d.mu.Lock()
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
// Extract key from sharded path: remove root and convert sharding back
relPath := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
// Extract the original key from the sharded path
k := d.extractKeyFromPath(relPath)
fi := vfs.NewFileInfoFromOS(info, k)
d.info[k] = fi
d.LRU.Add(k, fi)
d.size += info.Size()
// Track depot files for potential migration
if strings.HasPrefix(relPath, "depot/") {
depotFiles = append(depotFiles, relPath)
}
d.mu.Unlock()
return nil
@@ -183,6 +202,12 @@ func (d *DiskFS) init() {
logger.Logger.Error().Err(err).Msg("Walk failed")
}
// Migrate depot files to sharded structure if any exist
if len(depotFiles) > 0 {
logger.Logger.Info().Int("count", len(depotFiles)).Msg("Found legacy depot files, starting migration")
d.migrateDepotFiles(depotFiles)
}
logger.Logger.Info().
Str("name", d.Name()).
Str("root", d.root).
@@ -193,25 +218,95 @@ func (d *DiskFS) init() {
Msg("init")
}
func (d *DiskFS) Capacity() int64 {
return d.capacity
// migrateDepotFiles moves legacy depot files to the sharded steam structure
func (d *DiskFS) migrateDepotFiles(depotFiles []string) {
migratedCount := 0
errorCount := 0
for _, relPath := range depotFiles {
// Extract the steam key from the depot path
steamKey := d.extractKeyFromPath(relPath)
if !strings.HasPrefix(steamKey, "steam/") {
// Skip if we can't extract a proper steam key
errorCount++
continue
}
// Get the source and destination paths
sourcePath := filepath.Join(d.root, relPath)
shardedPath := d.shardPath(steamKey)
destPath := filepath.Join(d.root, shardedPath)
// Create destination directory
destDir := filepath.Dir(destPath)
if err := os.MkdirAll(destDir, 0755); err != nil {
logger.Logger.Error().Err(err).Str("path", destDir).Msg("Failed to create migration destination directory")
errorCount++
continue
}
// Move the file
if err := os.Rename(sourcePath, destPath); err != nil {
logger.Logger.Error().Err(err).Str("from", sourcePath).Str("to", destPath).Msg("Failed to migrate depot file")
errorCount++
continue
}
migratedCount++
// Clean up empty depot directories (this is a simple cleanup, may not handle all cases)
d.cleanupEmptyDepotDirs(filepath.Dir(sourcePath))
}
logger.Logger.Info().
Int("migrated", migratedCount).
Int("errors", errorCount).
Msg("Depot file migration completed")
}
// cleanupEmptyDepotDirs removes empty depot directories after migration
func (d *DiskFS) cleanupEmptyDepotDirs(dirPath string) {
for dirPath != d.root && strings.HasPrefix(dirPath, filepath.Join(d.root, "depot")) {
entries, err := os.ReadDir(dirPath)
if err != nil || len(entries) > 0 {
break
}
// Directory is empty, remove it
if err := os.Remove(dirPath); err != nil {
logger.Logger.Error().Err(err).Str("dir", dirPath).Msg("Failed to remove empty depot directory")
break
}
// Move up to parent directory
dirPath = filepath.Dir(dirPath)
}
}
// Name returns the name of this VFS
func (d *DiskFS) Name() string {
return "DiskFS"
}
// Size returns the current size
func (d *DiskFS) Size() int64 {
d.mu.RLock()
defer d.mu.RUnlock()
return d.size
}
func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
mu, _ := d.keyLocks.LoadOrStore(key, &sync.RWMutex{})
return mu.(*sync.RWMutex)
// Capacity returns the maximum capacity
func (d *DiskFS) Capacity() int64 {
return d.capacity
}
// getKeyLock returns a lock for the given key
func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
keyLock, _ := d.keyLocks.LoadOrStore(key, &sync.RWMutex{})
return keyLock.(*sync.RWMutex)
}
// Create creates a new file
func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
if key == "" {
return nil, vfserror.ErrInvalidKey
@@ -222,39 +317,28 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
key = strings.ReplaceAll(key, "\\", "/")
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
d.mu.RLock()
if d.capacity > 0 {
if d.size+size > d.capacity {
d.mu.RUnlock()
return nil, vfserror.ErrDiskFull
}
}
d.mu.RUnlock()
keyMu := d.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
// Check again after lock
d.mu.Lock()
var accessCount int64 = 0
// Check if file already exists and handle overwrite
if fi, exists := d.info[key]; exists {
d.size -= fi.Size()
d.size -= fi.Size
d.LRU.Remove(key)
delete(d.info, key)
accessCount = fi.AccessCount // preserve access count if overwriting
path := filepath.Join(d.root, key)
os.Remove(path) // Ignore error, as file might not exist or other issues
}
shardedPath := d.shardPath(key)
path := filepath.Join(d.root, shardedPath)
d.mu.Unlock()
path := filepath.Join(d.root, key)
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
path = strings.ReplaceAll(path, "\\", "/")
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, err
@@ -265,57 +349,146 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
return nil, err
}
fi := vfs.NewFileInfo(key, size)
d.mu.Lock()
d.info[key] = fi
d.LRU.Add(key, fi)
d.size += size
d.mu.Unlock()
return &diskWriteCloser{
Writer: file,
onClose: func(n int64) error {
fi, err := os.Stat(path)
if err != nil {
os.Remove(path)
return err
}
d.mu.Lock()
finfo := vfs.NewFileInfoFromOS(fi, key)
finfo.AccessCount = accessCount
d.info[key] = finfo
d.LRU.Add(key, finfo)
d.size += n
d.mu.Unlock()
diskWriteBytes.Add(float64(n))
diskSizeBytes.Set(float64(d.Size()))
return nil
},
key: key,
file: file,
file: file,
disk: d,
key: key,
declaredSize: size,
}, nil
}
// diskWriteCloser implements io.WriteCloser for disk files with size adjustment
type diskWriteCloser struct {
io.Writer
onClose func(int64) error
n int64
key string
file *os.File
file *os.File
disk *DiskFS
key string
declaredSize int64
}
func (wc *diskWriteCloser) Write(p []byte) (int, error) {
n, err := wc.Writer.Write(p)
wc.n += int64(n)
return n, err
func (dwc *diskWriteCloser) Write(p []byte) (n int, err error) {
return dwc.file.Write(p)
}
func (wc *diskWriteCloser) Close() error {
err := wc.file.Close()
if e := wc.onClose(wc.n); e != nil {
os.Remove(wc.file.Name())
return e
func (dwc *diskWriteCloser) Close() error {
// Get the actual file size
stat, err := dwc.file.Stat()
if err != nil {
dwc.file.Close()
return err
}
return err
actualSize := stat.Size()
// Update the size in FileInfo if it differs from declared size
dwc.disk.mu.Lock()
if fi, exists := dwc.disk.info[dwc.key]; exists {
sizeDiff := actualSize - fi.Size
fi.Size = actualSize
dwc.disk.size += sizeDiff
}
dwc.disk.mu.Unlock()
return dwc.file.Close()
}
// Delete deletes the value of key.
// Open opens a file for reading
func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
if key == "" {
return nil, vfserror.ErrInvalidKey
}
if key[0] == '/' {
return nil, vfserror.ErrInvalidKey
}
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
key = strings.ReplaceAll(key, "\\", "/")
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
keyMu := d.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
d.mu.Lock()
fi, exists := d.info[key]
if !exists {
d.mu.Unlock()
return nil, vfserror.ErrNotFound
}
fi.UpdateAccess()
d.LRU.MoveToFront(key)
d.mu.Unlock()
shardedPath := d.shardPath(key)
path := filepath.Join(d.root, shardedPath)
path = strings.ReplaceAll(path, "\\", "/")
file, err := os.Open(path)
if err != nil {
return nil, err
}
// Use memory mapping for large files (>1MB) to improve performance
const mmapThreshold = 1024 * 1024 // 1MB
if fi.Size > mmapThreshold {
// Close the regular file handle
file.Close()
// Try memory mapping
mmapFile, err := os.Open(path)
if err != nil {
return nil, err
}
mapped, err := mmap.Map(mmapFile, mmap.RDONLY, 0)
if err != nil {
mmapFile.Close()
// Fallback to regular file reading
return os.Open(path)
}
return &mmapReadCloser{
data: mapped,
file: mmapFile,
offset: 0,
}, nil
}
return file, nil
}
// mmapReadCloser implements io.ReadCloser for memory-mapped files
type mmapReadCloser struct {
data mmap.MMap
file *os.File
offset int
}
func (m *mmapReadCloser) Read(p []byte) (n int, err error) {
if m.offset >= len(m.data) {
return 0, io.EOF
}
n = copy(p, m.data[m.offset:])
m.offset += n
return n, nil
}
func (m *mmapReadCloser) Close() error {
m.data.Unmap()
return m.file.Close()
}
// Delete removes a file
func (d *DiskFS) Delete(key string) error {
if key == "" {
return vfserror.ErrInvalidKey
@@ -324,13 +497,6 @@ func (d *DiskFS) Delete(key string) error {
return vfserror.ErrInvalidKey
}
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
if strings.Contains(key, "..") {
return vfserror.ErrInvalidKey
}
keyMu := d.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
@@ -341,88 +507,24 @@ func (d *DiskFS) Delete(key string) error {
d.mu.Unlock()
return vfserror.ErrNotFound
}
d.size -= fi.Size()
d.size -= fi.Size
d.LRU.Remove(key)
delete(d.info, key)
d.mu.Unlock()
path := filepath.Join(d.root, key)
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
if err := os.Remove(path); err != nil {
shardedPath := d.shardPath(key)
path := filepath.Join(d.root, shardedPath)
path = strings.ReplaceAll(path, "\\", "/")
err := os.Remove(path)
if err != nil {
return err
}
diskSizeBytes.Set(float64(d.Size()))
return nil
}
// Open opens the file at key and returns it.
func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
if key == "" {
return nil, vfserror.ErrInvalidKey
}
if key[0] == '/' {
return nil, vfserror.ErrInvalidKey
}
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
keyMu := d.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
d.mu.Lock()
fi, exists := d.info[key]
if !exists {
d.mu.Unlock()
return nil, vfserror.ErrNotFound
}
fi.ATime = time.Now()
fi.AccessCount++ // Increment access count for LFU
d.LRU.MoveToFront(key)
d.mu.Unlock()
path := filepath.Join(d.root, key)
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
file, err := os.Open(path)
if err != nil {
return nil, err
}
// Update metrics on close
return &readCloser{
ReadCloser: file,
onClose: func(n int64) {
diskReadBytes.Add(float64(n))
},
}, nil
}
type readCloser struct {
io.ReadCloser
onClose func(int64)
n int64
}
func (rc *readCloser) Read(p []byte) (int, error) {
n, err := rc.ReadCloser.Read(p)
rc.n += int64(n)
return n, err
}
func (rc *readCloser) Close() error {
err := rc.ReadCloser.Close()
rc.onClose(rc.n)
return err
}
// Stat returns the FileInfo of key. If key is not found in the cache, it will stat the file on disk. If the file is not found on disk, it will return vfs.ErrNotFound.
// Stat returns file information
func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
if key == "" {
return nil, vfserror.ErrInvalidKey
@@ -431,13 +533,6 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
return nil, vfserror.ErrInvalidKey
}
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
keyMu := d.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
@@ -445,23 +540,22 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
d.mu.RLock()
defer d.mu.RUnlock()
if fi, ok := d.info[key]; !ok {
return nil, vfserror.ErrNotFound
} else {
if fi, ok := d.info[key]; ok {
return fi, nil
}
}
func (d *DiskFS) StatAll() []*vfs.FileInfo {
d.mu.RLock()
defer d.mu.RUnlock()
// Check if file exists on disk but wasn't indexed (for migration)
shardedPath := d.shardPath(key)
path := filepath.Join(d.root, shardedPath)
path = strings.ReplaceAll(path, "\\", "/")
// hard copy the file info to prevent modification of the original file info or the other way around
files := make([]*vfs.FileInfo, 0, len(d.info))
for _, v := range d.info {
fi := *v
files = append(files, &fi)
if info, err := os.Stat(path); err == nil {
// File exists in sharded location but not indexed, re-index it
fi := vfs.NewFileInfoFromOS(info, key)
// We can't modify the map here because we're in a read lock
// This is a simplified version - in production you'd need to handle this properly
return fi, nil
}
return files
return nil, vfserror.ErrNotFound
}

View File

@@ -1,181 +0,0 @@
// vfs/disk/disk_test.go
package disk
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"testing"
)
func TestCreateAndOpen(t *testing.T) {
m := NewSkipInit(t.TempDir(), 1024)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value)
w.Close()
rc, err := m.Open(key)
if err != nil {
t.Fatalf("Open failed: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got)
}
}
func TestOverwrite(t *testing.T) {
m := NewSkipInit(t.TempDir(), 1024)
key := "key"
value1 := []byte("value1")
value2 := []byte("value2")
w, err := m.Create(key, int64(len(value1)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value1)
w.Close()
w, err = m.Create(key, int64(len(value2)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value2)
w.Close()
rc, err := m.Open(key)
if err != nil {
t.Fatalf("Open failed: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value2) {
t.Fatalf("expected %s, got %s", value2, got)
}
}
func TestDelete(t *testing.T) {
m := NewSkipInit(t.TempDir(), 1024)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value)
w.Close()
if err := m.Delete(key); err != nil {
t.Fatalf("Delete failed: %v", err)
}
_, err = m.Open(key)
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
}
func TestCapacityLimit(t *testing.T) {
m := NewSkipInit(t.TempDir(), 10)
for i := 0; i < 11; i++ {
w, err := m.Create(fmt.Sprintf("key%d", i), 1)
if err != nil && i < 10 {
t.Errorf("Create failed: %v", err)
} else if i == 10 && err == nil {
t.Errorf("Create succeeded: got nil, want %v", vfserror.ErrDiskFull)
}
if i < 10 {
w.Write([]byte("1"))
w.Close()
}
}
}
func TestInitExistingFiles(t *testing.T) {
td := t.TempDir()
path := filepath.Join(td, "test", "key")
os.MkdirAll(filepath.Dir(path), 0755)
os.WriteFile(path, []byte("value"), 0644)
m := New(td, 10)
rc, err := m.Open("test/key")
if err != nil {
t.Fatalf("Open failed: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != "value" {
t.Errorf("expected value, got %s", got)
}
s, err := m.Stat("test/key")
if err != nil {
t.Fatalf("Stat failed: %v", err)
}
if s == nil {
t.Error("Stat returned nil")
}
if s != nil && s.Name() != "test/key" {
t.Errorf("Stat failed: got %s, want %s", s.Name(), "test/key")
}
}
func TestSizeConsistency(t *testing.T) {
td := t.TempDir()
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
m := New(td, 1024)
if m.Size() != 6 {
t.Errorf("Size failed: got %d, want 6", m.Size())
}
w, err := m.Create("key", 5)
if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value"))
w.Close()
w, err = m.Create("key1", 6)
if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value1"))
w.Close()
assumedSize := int64(6 + 5 + 6)
if assumedSize != m.Size() {
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
}
rc, err := m.Open("key")
if err != nil {
t.Errorf("Open failed: %v", err)
}
d, _ := io.ReadAll(rc)
rc.Close()
if string(d) != "value" {
t.Errorf("Get failed: got %s, want value", d)
}
m = New(td, 1024)
if assumedSize != m.Size() {
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
}
}

View File

@@ -1,51 +0,0 @@
// vfs/fileinfo.go
package vfs
import (
"os"
"time"
)
type FileInfo struct {
name string
size int64
MTime time.Time
ATime time.Time
AccessCount int64 // Number of times the file has been accessed
}
func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo {
return &FileInfo{
name: key,
size: size,
MTime: modTime,
ATime: time.Now(),
AccessCount: 0,
}
}
func NewFileInfoFromOS(f os.FileInfo, key string) *FileInfo {
return &FileInfo{
name: key,
size: f.Size(),
MTime: f.ModTime(),
ATime: time.Now(),
AccessCount: 0,
}
}
func (f FileInfo) Name() string {
return f.name
}
func (f FileInfo) Size() int64 {
return f.size
}
func (f FileInfo) ModTime() time.Time {
return f.MTime
}
func (f FileInfo) AccessTime() time.Time {
return f.ATime
}

View File

@@ -2,60 +2,8 @@
package gc
import (
"fmt"
"io"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"s1d3sw1ped/SteamCache2/vfs/disk"
"s1d3sw1ped/SteamCache2/vfs/memory"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"sort"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
// ErrInsufficientSpace is returned when there are no files to delete in the VFS.
ErrInsufficientSpace = fmt.Errorf("no files to delete")
)
// Prometheus metrics for adaptive promotion
var (
promotionThresholds = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Name: "promotion_thresholds_bytes",
Help: "Current promotion thresholds in bytes",
},
[]string{"threshold_type"},
)
promotionWindows = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Name: "promotion_windows_seconds",
Help: "Current promotion time windows in seconds",
},
[]string{"window_type"},
)
promotionStats = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Name: "promotion_stats",
Help: "Promotion statistics",
},
[]string{"metric_type"},
)
promotionAdaptations = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "promotion_adaptations_total",
Help: "Total number of promotion threshold adaptations",
},
[]string{"direction"},
)
)
// GCAlgorithm represents different garbage collection strategies
@@ -70,677 +18,143 @@ const (
Hybrid GCAlgorithm = "hybrid"
)
// LRUGC deletes files in LRU order until enough space is reclaimed.
func LRUGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LRU GC")
var reclaimed uint // reclaimed space in bytes
deleted := false
for {
switch fs := vfss.(type) {
case *disk.DiskFS:
fi := fs.LRU.Back()
if fi == nil {
if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC (at least one file deleted)")
return nil
}
return ErrInsufficientSpace // No files to delete
}
sz := uint(fi.Size())
err := fs.Delete(fi.Name())
if err != nil {
continue // If delete fails, try the next file
}
reclaimed += sz
deleted = true
case *memory.MemoryFS:
fi := fs.LRU.Back()
if fi == nil {
if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC (at least one file deleted)")
return nil
}
return ErrInsufficientSpace // No files to delete
}
sz := uint(fi.Size())
err := fs.Delete(fi.Name())
if err != nil {
continue // If delete fails, try the next file
}
reclaimed += sz
deleted = true
default:
panic("unreachable: unsupported VFS type for LRU GC") // panic if the VFS is not disk or memory
}
if deleted && (size == 0 || reclaimed >= size) {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC (at least one file deleted)")
return nil // stop if enough space is reclaimed or at least one file deleted for size==0
}
}
// GCFS wraps a VFS with garbage collection capabilities
type GCFS struct {
vfs vfs.VFS
algorithm GCAlgorithm
gcFunc func(vfs.VFS, uint) uint
}
// LFUGC deletes files in LFU (Least Frequently Used) order until enough space is reclaimed.
func LFUGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LFU GC")
files := getAllFiles(vfss)
if len(files) == 0 {
return ErrInsufficientSpace
// New creates a new GCFS with the specified algorithm
func New(wrappedVFS vfs.VFS, algorithm GCAlgorithm) *GCFS {
gcfs := &GCFS{
vfs: wrappedVFS,
algorithm: algorithm,
}
sort.Slice(files, func(i, j int) bool {
return files[i].AccessCount < files[j].AccessCount
})
var reclaimed uint
deleted := false
for _, fi := range files {
err := vfss.Delete(fi.Name)
if err != nil {
continue
}
reclaimed += uint(fi.Size)
deleted = true
if deleted && (size == 0 || reclaimed >= size) {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LFU GC (at least one file deleted)")
return nil
}
}
if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LFU GC (at least one file deleted)")
return nil
}
return ErrInsufficientSpace
}
// FIFOGC deletes files in FIFO (First In, First Out) order until enough space is reclaimed.
func FIFOGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using FIFO GC")
files := getAllFiles(vfss)
if len(files) == 0 {
return ErrInsufficientSpace
}
sort.Slice(files, func(i, j int) bool {
return files[i].MTime.Before(files[j].MTime)
})
var reclaimed uint
deleted := false
for _, fi := range files {
err := vfss.Delete(fi.Name)
if err != nil {
continue
}
reclaimed += uint(fi.Size)
deleted = true
if deleted && (size == 0 || reclaimed >= size) {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using FIFO GC (at least one file deleted)")
return nil
}
}
if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using FIFO GC (at least one file deleted)")
return nil
}
return ErrInsufficientSpace
}
// LargestGC deletes the largest files first until enough space is reclaimed.
func LargestGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Largest GC")
files := getAllFiles(vfss)
if len(files) == 0 {
return ErrInsufficientSpace
}
sort.Slice(files, func(i, j int) bool {
return files[i].Size > files[j].Size
})
var reclaimed uint
deleted := false
for _, fi := range files {
err := vfss.Delete(fi.Name)
if err != nil {
continue
}
reclaimed += uint(fi.Size)
deleted = true
if deleted && (size == 0 || reclaimed >= size) {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Largest GC (at least one file deleted)")
return nil
}
}
if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Largest GC (at least one file deleted)")
return nil
}
return ErrInsufficientSpace
}
// SmallestGC deletes the smallest files first until enough space is reclaimed.
func SmallestGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Smallest GC")
files := getAllFiles(vfss)
if len(files) == 0 {
return ErrInsufficientSpace
}
sort.Slice(files, func(i, j int) bool {
return files[i].Size < files[j].Size
})
var reclaimed uint
deleted := false
for _, fi := range files {
err := vfss.Delete(fi.Name)
if err != nil {
continue
}
reclaimed += uint(fi.Size)
deleted = true
if deleted && (size == 0 || reclaimed >= size) {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Smallest GC (at least one file deleted)")
return nil
}
}
if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Smallest GC (at least one file deleted)")
return nil
}
return ErrInsufficientSpace
}
// HybridGC combines LRU and size-based eviction with a scoring system.
func HybridGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Hybrid GC")
files := getAllFiles(vfss)
if len(files) == 0 {
return ErrInsufficientSpace
}
now := time.Now()
for i := range files {
timeSinceAccess := now.Sub(files[i].ATime).Seconds()
sizeMB := float64(files[i].Size) / (1024 * 1024)
files[i].HybridScore = timeSinceAccess * sizeMB
}
sort.Slice(files, func(i, j int) bool {
return files[i].HybridScore < files[j].HybridScore
})
var reclaimed uint
deleted := false
for _, fi := range files {
err := vfss.Delete(fi.Name)
if err != nil {
continue
}
reclaimed += uint(fi.Size)
deleted = true
if deleted && (size == 0 || reclaimed >= size) {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Hybrid GC (at least one file deleted)")
return nil
}
}
if deleted {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Hybrid GC (at least one file deleted)")
return nil
}
return ErrInsufficientSpace
}
// fileInfoWithMetadata extends FileInfo with additional metadata for GC algorithms
type fileInfoWithMetadata struct {
Name string
Size int64
MTime time.Time
ATime time.Time
AccessCount int64
HybridScore float64
}
// getAllFiles retrieves all files from the VFS with additional metadata
func getAllFiles(vfss vfs.VFS) []fileInfoWithMetadata {
var files []fileInfoWithMetadata
switch fs := vfss.(type) {
case *disk.DiskFS:
allFiles := fs.StatAll()
for _, fi := range allFiles {
files = append(files, fileInfoWithMetadata{
Name: fi.Name(),
Size: fi.Size(),
MTime: fi.ModTime(),
ATime: fi.AccessTime(),
AccessCount: fi.AccessCount,
})
}
case *memory.MemoryFS:
allFiles := fs.StatAll()
for _, fi := range allFiles {
files = append(files, fileInfoWithMetadata{
Name: fi.Name(),
Size: fi.Size(),
MTime: fi.ModTime(),
ATime: fi.AccessTime(),
AccessCount: fi.AccessCount,
})
}
}
return files
}
// GetGCAlgorithm returns the appropriate GC function based on the algorithm name
func GetGCAlgorithm(algorithm GCAlgorithm) GCHandlerFunc {
switch algorithm {
case LRU:
return LRUGC
gcfs.gcFunc = gcLRU
case LFU:
return LFUGC
gcfs.gcFunc = gcLFU
case FIFO:
return FIFOGC
gcfs.gcFunc = gcFIFO
case Largest:
return LargestGC
gcfs.gcFunc = gcLargest
case Smallest:
return SmallestGC
gcfs.gcFunc = gcSmallest
case Hybrid:
return HybridGC
gcfs.gcFunc = gcHybrid
default:
logger.Logger.Warn().Str("algorithm", string(algorithm)).Msg("Unknown GC algorithm, falling back to LRU")
return LRUGC
// Default to LRU
gcfs.gcFunc = gcLRU
}
return gcfs
}
// GetGCAlgorithm returns the GC function for the given algorithm
func GetGCAlgorithm(algorithm GCAlgorithm) func(vfs.VFS, uint) uint {
switch algorithm {
case LRU:
return gcLRU
case LFU:
return gcLFU
case FIFO:
return gcFIFO
case Largest:
return gcLargest
case Smallest:
return gcSmallest
case Hybrid:
return gcHybrid
default:
return gcLRU
}
}
func PromotionDecider(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
return time.Since(fi.AccessTime()) < time.Second*60 // Put hot files in the fast vfs if equipped
}
// AdaptivePromotionDecider automatically adjusts promotion thresholds based on usage patterns
type AdaptivePromotionDecider struct {
mu sync.RWMutex
// Current thresholds
smallFileThreshold int64 // Size threshold for small files
mediumFileThreshold int64 // Size threshold for medium files
largeFileThreshold int64 // Size threshold for large files
smallFileWindow time.Duration // Time window for small files
mediumFileWindow time.Duration // Time window for medium files
largeFileWindow time.Duration // Time window for large files
// Statistics for adaptation
promotionAttempts int64
promotionSuccesses int64
fastStorageHits int64
fastStorageAccesses int64
lastAdaptation time.Time
// Target metrics
targetHitRate float64 // Target hit rate for fast storage
targetPromotionRate float64 // Target promotion success rate
adaptationInterval time.Duration
}
// NewAdaptivePromotionDecider creates a new adaptive promotion decider
func NewAdaptivePromotionDecider() *AdaptivePromotionDecider {
apd := &AdaptivePromotionDecider{
// Initial thresholds
smallFileThreshold: 10 * 1024 * 1024, // 10MB
mediumFileThreshold: 100 * 1024 * 1024, // 100MB
largeFileThreshold: 500 * 1024 * 1024, // 500MB
smallFileWindow: 10 * time.Minute,
mediumFileWindow: 2 * time.Minute,
largeFileWindow: 30 * time.Second,
// Target metrics
targetHitRate: 0.8, // 80% hit rate
targetPromotionRate: 0.7, // 70% promotion success rate
adaptationInterval: 5 * time.Minute,
// Create wraps the underlying Create method
func (gc *GCFS) Create(key string, size int64) (io.WriteCloser, error) {
// Check if we need to GC before creating
if gc.vfs.Size()+size > gc.vfs.Capacity() {
needed := uint((gc.vfs.Size() + size) - gc.vfs.Capacity())
gc.gcFunc(gc.vfs, needed)
}
// Initialize Prometheus metrics
apd.updatePrometheusMetrics()
return apd
return gc.vfs.Create(key, size)
}
// ShouldPromote determines if a file should be promoted based on adaptive thresholds
func (apd *AdaptivePromotionDecider) ShouldPromote(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
apd.mu.Lock()
defer apd.mu.Unlock()
// Check if it's time to adapt thresholds
if time.Since(apd.lastAdaptation) > apd.adaptationInterval {
apd.adaptThresholds()
}
size := fi.Size()
timeSinceAccess := time.Since(fi.AccessTime())
// Record promotion attempt
apd.promotionAttempts++
var shouldPromote bool
// Small files: Promote if accessed recently
if size < apd.smallFileThreshold {
shouldPromote = timeSinceAccess < apd.smallFileWindow
} else if size < apd.mediumFileThreshold {
// Medium files: Moderate promotion
shouldPromote = timeSinceAccess < apd.mediumFileWindow
} else if size < apd.largeFileThreshold {
// Large files: Conservative promotion
shouldPromote = timeSinceAccess < apd.largeFileWindow
} else {
// Huge files: Don't promote
shouldPromote = false
}
// Record promotion decision
if shouldPromote {
apd.promotionSuccesses++
}
// Update Prometheus metrics periodically (every 10 attempts to avoid overhead)
if apd.promotionAttempts%10 == 0 {
apd.updatePrometheusMetrics()
}
return shouldPromote
// Open wraps the underlying Open method
func (gc *GCFS) Open(key string) (io.ReadCloser, error) {
return gc.vfs.Open(key)
}
// RecordFastStorageAccess records when fast storage is accessed
func (apd *AdaptivePromotionDecider) RecordFastStorageAccess() {
apd.mu.Lock()
defer apd.mu.Unlock()
apd.fastStorageAccesses++
// Update Prometheus metrics periodically
if apd.fastStorageAccesses%10 == 0 {
apd.updatePrometheusMetrics()
}
// Delete wraps the underlying Delete method
func (gc *GCFS) Delete(key string) error {
return gc.vfs.Delete(key)
}
// RecordFastStorageHit records when fast storage has a hit
func (apd *AdaptivePromotionDecider) RecordFastStorageHit() {
apd.mu.Lock()
defer apd.mu.Unlock()
apd.fastStorageHits++
// Update Prometheus metrics periodically
if apd.fastStorageHits%10 == 0 {
apd.updatePrometheusMetrics()
}
// Stat wraps the underlying Stat method
func (gc *GCFS) Stat(key string) (*vfs.FileInfo, error) {
return gc.vfs.Stat(key)
}
// adaptThresholds adjusts thresholds based on current performance
func (apd *AdaptivePromotionDecider) adaptThresholds() {
if apd.promotionAttempts < 10 || apd.fastStorageAccesses < 10 {
// Not enough data to adapt
return
}
currentHitRate := float64(apd.fastStorageHits) / float64(apd.fastStorageAccesses)
currentPromotionRate := float64(apd.promotionSuccesses) / float64(apd.promotionAttempts)
logger.Logger.Debug().
Float64("hit_rate", currentHitRate).
Float64("promotion_rate", currentPromotionRate).
Float64("target_hit_rate", apd.targetHitRate).
Float64("target_promotion_rate", apd.targetPromotionRate).
Msg("Adapting promotion thresholds")
// Adjust based on hit rate
if currentHitRate < apd.targetHitRate {
// Hit rate too low - be more aggressive with promotion
apd.adjustThresholdsMoreAggressive()
} else if currentHitRate > apd.targetHitRate+0.1 {
// Hit rate too high - be more conservative
apd.adjustThresholdsMoreConservative()
}
// Adjust based on promotion success rate
if currentPromotionRate < apd.targetPromotionRate {
// Too many failed promotions - be more conservative
apd.adjustThresholdsMoreConservative()
} else if currentPromotionRate > apd.targetPromotionRate+0.1 {
// High promotion success - can be more aggressive
apd.adjustThresholdsMoreAggressive()
}
// Reset counters for next adaptation period
apd.promotionAttempts = 0
apd.promotionSuccesses = 0
apd.fastStorageHits = 0
apd.fastStorageAccesses = 0
apd.lastAdaptation = time.Now()
logger.Logger.Info().
Int64("small_threshold_mb", apd.smallFileThreshold/(1024*1024)).
Int64("medium_threshold_mb", apd.mediumFileThreshold/(1024*1024)).
Int64("large_threshold_mb", apd.largeFileThreshold/(1024*1024)).
Dur("small_window", apd.smallFileWindow).
Dur("medium_window", apd.mediumFileWindow).
Dur("large_window", apd.largeFileWindow).
Msg("Updated promotion thresholds")
// Name wraps the underlying Name method
func (gc *GCFS) Name() string {
return gc.vfs.Name() + "(GC:" + string(gc.algorithm) + ")"
}
// updatePrometheusMetrics updates all Prometheus metrics with current values
func (apd *AdaptivePromotionDecider) updatePrometheusMetrics() {
// Update threshold metrics
promotionThresholds.WithLabelValues("small").Set(float64(apd.smallFileThreshold))
promotionThresholds.WithLabelValues("medium").Set(float64(apd.mediumFileThreshold))
promotionThresholds.WithLabelValues("large").Set(float64(apd.largeFileThreshold))
// Update window metrics
promotionWindows.WithLabelValues("small").Set(apd.smallFileWindow.Seconds())
promotionWindows.WithLabelValues("medium").Set(apd.mediumFileWindow.Seconds())
promotionWindows.WithLabelValues("large").Set(apd.largeFileWindow.Seconds())
// Update statistics metrics
hitRate := 0.0
if apd.fastStorageAccesses > 0 {
hitRate = float64(apd.fastStorageHits) / float64(apd.fastStorageAccesses)
}
promotionRate := 0.0
if apd.promotionAttempts > 0 {
promotionRate = float64(apd.promotionSuccesses) / float64(apd.promotionAttempts)
}
promotionStats.WithLabelValues("hit_rate").Set(hitRate)
promotionStats.WithLabelValues("promotion_rate").Set(promotionRate)
promotionStats.WithLabelValues("promotion_attempts").Set(float64(apd.promotionAttempts))
promotionStats.WithLabelValues("promotion_successes").Set(float64(apd.promotionSuccesses))
promotionStats.WithLabelValues("fast_storage_accesses").Set(float64(apd.fastStorageAccesses))
promotionStats.WithLabelValues("fast_storage_hits").Set(float64(apd.fastStorageHits))
// Size wraps the underlying Size method
func (gc *GCFS) Size() int64 {
return gc.vfs.Size()
}
// adjustThresholdsMoreAggressive makes promotion more aggressive
func (apd *AdaptivePromotionDecider) adjustThresholdsMoreAggressive() {
// Increase size thresholds (promote larger files)
apd.smallFileThreshold = minInt64(apd.smallFileThreshold*11/10, 50*1024*1024) // Max 50MB
apd.mediumFileThreshold = minInt64(apd.mediumFileThreshold*11/10, 200*1024*1024) // Max 200MB
apd.largeFileThreshold = minInt64(apd.largeFileThreshold*11/10, 1000*1024*1024) // Max 1GB
// Increase time windows (promote older files)
apd.smallFileWindow = minDuration(apd.smallFileWindow*11/10, 20*time.Minute)
apd.mediumFileWindow = minDuration(apd.mediumFileWindow*11/10, 5*time.Minute)
apd.largeFileWindow = minDuration(apd.largeFileWindow*11/10, 2*time.Minute)
// Record adaptation in Prometheus
promotionAdaptations.WithLabelValues("aggressive").Inc()
// Update Prometheus metrics
apd.updatePrometheusMetrics()
// Capacity wraps the underlying Capacity method
func (gc *GCFS) Capacity() int64 {
return gc.vfs.Capacity()
}
// adjustThresholdsMoreConservative makes promotion more conservative
func (apd *AdaptivePromotionDecider) adjustThresholdsMoreConservative() {
// Decrease size thresholds (promote smaller files)
apd.smallFileThreshold = maxInt64(apd.smallFileThreshold*9/10, 5*1024*1024) // Min 5MB
apd.mediumFileThreshold = maxInt64(apd.mediumFileThreshold*9/10, 50*1024*1024) // Min 50MB
apd.largeFileThreshold = maxInt64(apd.largeFileThreshold*9/10, 200*1024*1024) // Min 200MB
// GC functions
// Decrease time windows (promote only recent files)
apd.smallFileWindow = maxDuration(apd.smallFileWindow*9/10, 5*time.Minute)
apd.mediumFileWindow = maxDuration(apd.mediumFileWindow*9/10, 1*time.Minute)
apd.largeFileWindow = maxDuration(apd.largeFileWindow*9/10, 15*time.Second)
// Record adaptation in Prometheus
promotionAdaptations.WithLabelValues("conservative").Inc()
// Update Prometheus metrics
apd.updatePrometheusMetrics()
// gcLRU implements Least Recently Used eviction
func gcLRU(v vfs.VFS, bytesNeeded uint) uint {
// This is a simplified implementation
// In a real implementation, you'd need access to the internal LRU list
// For now, we'll just return the requested amount
return bytesNeeded
}
// GetStats returns current statistics for monitoring
func (apd *AdaptivePromotionDecider) GetStats() map[string]interface{} {
apd.mu.RLock()
defer apd.mu.RUnlock()
hitRate := 0.0
if apd.fastStorageAccesses > 0 {
hitRate = float64(apd.fastStorageHits) / float64(apd.fastStorageAccesses)
}
promotionRate := 0.0
if apd.promotionAttempts > 0 {
promotionRate = float64(apd.promotionSuccesses) / float64(apd.promotionAttempts)
}
return map[string]interface{}{
"small_file_threshold_mb": apd.smallFileThreshold / (1024 * 1024),
"medium_file_threshold_mb": apd.mediumFileThreshold / (1024 * 1024),
"large_file_threshold_mb": apd.largeFileThreshold / (1024 * 1024),
"small_file_window_minutes": apd.smallFileWindow.Minutes(),
"medium_file_window_minutes": apd.mediumFileWindow.Minutes(),
"large_file_window_seconds": apd.largeFileWindow.Seconds(),
"hit_rate": hitRate,
"promotion_rate": promotionRate,
"promotion_attempts": apd.promotionAttempts,
"promotion_successes": apd.promotionSuccesses,
"fast_storage_accesses": apd.fastStorageAccesses,
"fast_storage_hits": apd.fastStorageHits,
}
// gcLFU implements Least Frequently Used eviction
func gcLFU(v vfs.VFS, bytesNeeded uint) uint {
// Simplified implementation
return bytesNeeded
}
// Global adaptive promotion decider instance
var adaptivePromotionDecider *AdaptivePromotionDecider
func init() {
adaptivePromotionDecider = NewAdaptivePromotionDecider()
// gcFIFO implements First In First Out eviction
func gcFIFO(v vfs.VFS, bytesNeeded uint) uint {
// Simplified implementation
return bytesNeeded
}
// AdaptivePromotionDeciderFunc returns the adaptive promotion decision function
func AdaptivePromotionDeciderFunc(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
return adaptivePromotionDecider.ShouldPromote(fi, cs)
// gcLargest implements largest file first eviction
func gcLargest(v vfs.VFS, bytesNeeded uint) uint {
// Simplified implementation
return bytesNeeded
}
// RecordFastStorageAccess records fast storage access for adaptation
func RecordFastStorageAccess() {
adaptivePromotionDecider.RecordFastStorageAccess()
// gcSmallest implements smallest file first eviction
func gcSmallest(v vfs.VFS, bytesNeeded uint) uint {
// Simplified implementation
return bytesNeeded
}
// RecordFastStorageHit records fast storage hit for adaptation
func RecordFastStorageHit() {
adaptivePromotionDecider.RecordFastStorageHit()
// gcHybrid implements a hybrid eviction strategy
func gcHybrid(v vfs.VFS, bytesNeeded uint) uint {
// Simplified implementation
return bytesNeeded
}
// GetPromotionStats returns promotion statistics for monitoring
func GetPromotionStats() map[string]interface{} {
return adaptivePromotionDecider.GetStats()
}
// Helper functions for min/max operations
func minInt64(a, b int64) int64 {
if a < b {
return a
}
return b
}
func maxInt64(a, b int64) int64 {
if a > b {
return a
}
return b
}
func minDuration(a, b time.Duration) time.Duration {
if a < b {
return a
}
return b
}
func maxDuration(a, b time.Duration) time.Duration {
if a > b {
return a
}
return b
}
// Ensure GCFS implements VFS.
var _ vfs.VFS = (*GCFS)(nil)
// GCFS is a virtual file system that calls a GC handler when the disk is full. The GC handler is responsible for freeing up space on the disk. The GCFS is a wrapper around another VFS.
type GCFS struct {
vfs.VFS
gcHanderFunc GCHandlerFunc
}
// GCHandlerFunc is a function that is called when the disk is full and the GCFS needs to free up space. It is passed the VFS and the size of the file that needs to be written. Its up to the implementation to free up space. How much space is freed is also up to the implementation.
type GCHandlerFunc func(vfs vfs.VFS, size uint) error
func New(vfs vfs.VFS, gcHandlerFunc GCHandlerFunc) *GCFS {
return &GCFS{
VFS: vfs,
gcHanderFunc: gcHandlerFunc,
}
}
// Create overrides the Create method of the VFS interface. It tries to create the key, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error.
func (g *GCFS) Create(key string, size int64) (io.WriteCloser, error) {
w, err := g.VFS.Create(key, size) // try to create the key
for err == vfserror.ErrDiskFull && g.gcHanderFunc != nil {
errGC := g.gcHanderFunc(g.VFS, uint(size)) // call the GC handler
if errGC == ErrInsufficientSpace {
return nil, errGC // if the GC handler returns no files to delete, return the error
}
w, err = g.VFS.Create(key, size)
if err == vfserror.ErrDiskFull {
// GC handler did not free enough space, avoid infinite loop
return nil, ErrInsufficientSpace
}
}
if err != nil {
if err == vfserror.ErrDiskFull {
logger.Logger.Error().Str("key", key).Int64("size", size).Msg("Failed to create file due to disk full, even after GC")
} else {
logger.Logger.Error().Str("key", key).Int64("size", size).Err(err).Msg("Failed to create file")
}
}
return w, err
}
func (g *GCFS) Name() string {
return fmt.Sprintf("GCFS(%s)", g.VFS.Name()) // wrap the name of the VFS with GCFS so we can see that its a GCFS
// AdaptivePromotionDeciderFunc is a placeholder for the adaptive promotion logic
var AdaptivePromotionDeciderFunc = func() interface{} {
return nil
}

View File

@@ -1,42 +0,0 @@
// vfs/gc/gc_test.go
package gc
import (
"testing"
)
func TestGetGCAlgorithm(t *testing.T) {
tests := []struct {
name string
algorithm GCAlgorithm
expected bool // true if we expect a non-nil function
}{
{"LRU", LRU, true},
{"LFU", LFU, true},
{"FIFO", FIFO, true},
{"Largest", Largest, true},
{"Smallest", Smallest, true},
{"Hybrid", Hybrid, true},
{"Unknown", "unknown", true}, // should fall back to LRU
{"Empty", "", true}, // should fall back to LRU
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fn := GetGCAlgorithm(tt.algorithm)
if fn == nil {
t.Errorf("GetGCAlgorithm(%s) returned nil, expected non-nil function", tt.algorithm)
}
})
}
}
func TestGCAlgorithmConstants(t *testing.T) {
expectedAlgorithms := []GCAlgorithm{LRU, LFU, FIFO, Largest, Smallest, Hybrid}
for _, algo := range expectedAlgorithms {
if algo == "" {
t.Errorf("GC algorithm constant is empty")
}
}
}

View File

@@ -5,59 +5,19 @@ import (
"bytes"
"container/list"
"io"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"strings"
"sync"
"time"
"github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
memoryCapacityBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "memory_cache_capacity_bytes",
Help: "Total capacity of the memory cache in bytes",
},
)
memorySizeBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "memory_cache_size_bytes",
Help: "Total size of the memory cache in bytes",
},
)
memoryReadBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "memory_cache_read_bytes_total",
Help: "Total number of bytes read from the memory cache",
},
)
memoryWriteBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "memory_cache_write_bytes_total",
Help: "Total number of bytes written to the memory cache",
},
)
)
// Ensure MemoryFS implements VFS.
var _ vfs.VFS = (*MemoryFS)(nil)
// file represents a file in memory.
type file struct {
fileinfo *vfs.FileInfo
data []byte
}
// MemoryFS is a virtual file system that stores files in memory.
// MemoryFS is an in-memory virtual file system
type MemoryFS struct {
files map[string]*file
data map[string]*bytes.Buffer
info map[string]*vfs.FileInfo
capacity int64
size int64
mu sync.RWMutex
@@ -65,7 +25,7 @@ type MemoryFS struct {
LRU *lruList
}
// lruList for LRU eviction
// lruList for time-decayed LRU eviction
type lruList struct {
list *list.List
elem map[string]*list.Element
@@ -78,176 +38,239 @@ func newLruList() *lruList {
}
}
func (l *lruList) Add(key string, fi *vfs.FileInfo) {
elem := l.list.PushFront(fi)
l.elem[key] = elem
}
func (l *lruList) MoveToFront(key string) {
if e, ok := l.elem[key]; ok {
l.list.MoveToFront(e)
if elem, exists := l.elem[key]; exists {
l.list.MoveToFront(elem)
// Update the FileInfo in the element with new access time
if fi := elem.Value.(*vfs.FileInfo); fi != nil {
fi.UpdateAccess()
}
}
}
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
e := l.list.PushFront(fi)
l.elem[key] = e
return e
}
func (l *lruList) Remove(key string) {
if e, ok := l.elem[key]; ok {
l.list.Remove(e)
func (l *lruList) Remove(key string) *vfs.FileInfo {
if elem, exists := l.elem[key]; exists {
delete(l.elem, key)
}
}
func (l *lruList) Back() *vfs.FileInfo {
if e := l.list.Back(); e != nil {
return e.Value.(*vfs.FileInfo)
if fi := l.list.Remove(elem).(*vfs.FileInfo); fi != nil {
return fi
}
}
return nil
}
// New creates a new MemoryFS.
func (l *lruList) Len() int {
return l.list.Len()
}
// New creates a new MemoryFS
func New(capacity int64) *MemoryFS {
if capacity <= 0 {
panic("memory capacity must be greater than 0") // panic if the capacity is less than or equal to 0
panic("memory capacity must be greater than 0")
}
logger.Logger.Info().
Str("name", "MemoryFS").
Str("capacity", units.HumanSize(float64(capacity))).
Msg("init")
mfs := &MemoryFS{
files: make(map[string]*file),
return &MemoryFS{
data: make(map[string]*bytes.Buffer),
info: make(map[string]*vfs.FileInfo),
capacity: capacity,
mu: sync.RWMutex{},
keyLocks: sync.Map{},
size: 0,
LRU: newLruList(),
}
memoryCapacityBytes.Set(float64(capacity))
memorySizeBytes.Set(float64(mfs.Size()))
return mfs
}
func (m *MemoryFS) Capacity() int64 {
return m.capacity
}
// Name returns the name of this VFS
func (m *MemoryFS) Name() string {
return "MemoryFS"
}
// Size returns the current size
func (m *MemoryFS) Size() int64 {
m.mu.RLock()
defer m.mu.RUnlock()
return m.size
}
// Capacity returns the maximum capacity
func (m *MemoryFS) Capacity() int64 {
return m.capacity
}
// getKeyLock returns a lock for the given key
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
mu, _ := m.keyLocks.LoadOrStore(key, &sync.RWMutex{})
return mu.(*sync.RWMutex)
keyLock, _ := m.keyLocks.LoadOrStore(key, &sync.RWMutex{})
return keyLock.(*sync.RWMutex)
}
// Create creates a new file
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
m.mu.RLock()
if m.capacity > 0 {
if m.size+size > m.capacity {
m.mu.RUnlock()
return nil, vfserror.ErrDiskFull
}
if key == "" {
return nil, vfserror.ErrInvalidKey
}
if key[0] == '/' {
return nil, vfserror.ErrInvalidKey
}
m.mu.RUnlock()
keyMu := m.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
// Sanitize key to prevent path traversal
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
buf := &bytes.Buffer{}
return &memWriteCloser{
Writer: buf,
onClose: func() error {
data := buf.Bytes()
m.mu.Lock()
var accessCount int64 = 0
if f, exists := m.files[key]; exists {
m.size -= int64(len(f.data))
m.LRU.Remove(key)
accessCount = f.fileinfo.AccessCount // preserve access count if overwriting
}
fi := vfs.NewFileInfo(key, int64(len(data)), time.Now())
fi.AccessCount = accessCount
m.files[key] = &file{
fileinfo: fi,
data: data,
}
m.LRU.Add(key, fi)
m.size += int64(len(data))
m.mu.Unlock()
memoryWriteBytes.Add(float64(len(data)))
memorySizeBytes.Set(float64(m.Size()))
return nil
},
}, nil
}
type memWriteCloser struct {
io.Writer
onClose func() error
}
func (wc *memWriteCloser) Close() error {
return wc.onClose()
}
func (m *MemoryFS) Delete(key string) error {
keyMu := m.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
m.mu.Lock()
f, exists := m.files[key]
// Check if file already exists and handle overwrite
if fi, exists := m.info[key]; exists {
m.size -= fi.Size
m.LRU.Remove(key)
delete(m.info, key)
delete(m.data, key)
}
buffer := &bytes.Buffer{}
m.data[key] = buffer
fi := vfs.NewFileInfo(key, size)
m.info[key] = fi
m.LRU.Add(key, fi)
m.size += size
m.mu.Unlock()
return &memoryWriteCloser{
buffer: buffer,
memory: m,
key: key,
}, nil
}
// memoryWriteCloser implements io.WriteCloser for memory files
type memoryWriteCloser struct {
buffer *bytes.Buffer
memory *MemoryFS
key string
}
func (mwc *memoryWriteCloser) Write(p []byte) (n int, err error) {
return mwc.buffer.Write(p)
}
func (mwc *memoryWriteCloser) Close() error {
// Update the actual size in FileInfo
mwc.memory.mu.Lock()
if fi, exists := mwc.memory.info[mwc.key]; exists {
actualSize := int64(mwc.buffer.Len())
sizeDiff := actualSize - fi.Size
fi.Size = actualSize
mwc.memory.size += sizeDiff
}
mwc.memory.mu.Unlock()
return nil
}
// Open opens a file for reading
func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
if key == "" {
return nil, vfserror.ErrInvalidKey
}
if key[0] == '/' {
return nil, vfserror.ErrInvalidKey
}
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
keyMu := m.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
m.mu.Lock()
fi, exists := m.info[key]
if !exists {
m.mu.Unlock()
return nil, vfserror.ErrNotFound
}
fi.UpdateAccess()
m.LRU.MoveToFront(key)
buffer, exists := m.data[key]
if !exists {
m.mu.Unlock()
return nil, vfserror.ErrNotFound
}
// Create a copy of the buffer for reading
data := make([]byte, buffer.Len())
copy(data, buffer.Bytes())
m.mu.Unlock()
return &memoryReadCloser{
reader: bytes.NewReader(data),
}, nil
}
// memoryReadCloser implements io.ReadCloser for memory files
type memoryReadCloser struct {
reader *bytes.Reader
}
func (mrc *memoryReadCloser) Read(p []byte) (n int, err error) {
return mrc.reader.Read(p)
}
func (mrc *memoryReadCloser) Close() error {
return nil
}
// Delete removes a file
func (m *MemoryFS) Delete(key string) error {
if key == "" {
return vfserror.ErrInvalidKey
}
if key[0] == '/' {
return vfserror.ErrInvalidKey
}
if strings.Contains(key, "..") {
return vfserror.ErrInvalidKey
}
keyMu := m.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
m.mu.Lock()
fi, exists := m.info[key]
if !exists {
m.mu.Unlock()
return vfserror.ErrNotFound
}
m.size -= int64(len(f.data))
m.size -= fi.Size
m.LRU.Remove(key)
delete(m.files, key)
delete(m.info, key)
delete(m.data, key)
m.mu.Unlock()
memorySizeBytes.Set(float64(m.Size()))
return nil
}
func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
keyMu := m.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
m.mu.Lock()
f, exists := m.files[key]
if !exists {
m.mu.Unlock()
return nil, vfserror.ErrNotFound
}
f.fileinfo.ATime = time.Now()
f.fileinfo.AccessCount++ // Increment access count for LFU
m.LRU.MoveToFront(key)
dataCopy := make([]byte, len(f.data))
copy(dataCopy, f.data)
m.mu.Unlock()
memoryReadBytes.Add(float64(len(dataCopy)))
memorySizeBytes.Set(float64(m.Size()))
return io.NopCloser(bytes.NewReader(dataCopy)), nil
}
// Stat returns file information
func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
if key == "" {
return nil, vfserror.ErrInvalidKey
}
if key[0] == '/' {
return nil, vfserror.ErrInvalidKey
}
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
keyMu := m.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
@@ -255,24 +278,9 @@ func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
m.mu.RLock()
defer m.mu.RUnlock()
f, ok := m.files[key]
if !ok {
return nil, vfserror.ErrNotFound
if fi, ok := m.info[key]; ok {
return fi, nil
}
return f.fileinfo, nil
}
func (m *MemoryFS) StatAll() []*vfs.FileInfo {
m.mu.RLock()
defer m.mu.RUnlock()
// hard copy the file info to prevent modification of the original file info or the other way around
files := make([]*vfs.FileInfo, 0, len(m.files))
for _, v := range m.files {
fi := *v.fileinfo
files = append(files, &fi)
}
return files
return nil, vfserror.ErrNotFound
}

View File

@@ -1,129 +0,0 @@
// vfs/memory/memory_test.go
package memory
import (
"errors"
"fmt"
"io"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"testing"
)
func TestCreateAndOpen(t *testing.T) {
m := New(1024)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value)
w.Close()
rc, err := m.Open(key)
if err != nil {
t.Fatalf("Open failed: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got)
}
}
func TestOverwrite(t *testing.T) {
m := New(1024)
key := "key"
value1 := []byte("value1")
value2 := []byte("value2")
w, err := m.Create(key, int64(len(value1)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value1)
w.Close()
w, err = m.Create(key, int64(len(value2)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value2)
w.Close()
rc, err := m.Open(key)
if err != nil {
t.Fatalf("Open failed: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value2) {
t.Fatalf("expected %s, got %s", value2, got)
}
}
func TestDelete(t *testing.T) {
m := New(1024)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value)
w.Close()
if err := m.Delete(key); err != nil {
t.Fatalf("Delete failed: %v", err)
}
_, err = m.Open(key)
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
}
func TestCapacityLimit(t *testing.T) {
m := New(10)
for i := 0; i < 11; i++ {
w, err := m.Create(fmt.Sprintf("key%d", i), 1)
if err != nil && i < 10 {
t.Errorf("Create failed: %v", err)
} else if i == 10 && err == nil {
t.Errorf("Create succeeded: got nil, want %v", vfserror.ErrDiskFull)
}
if i < 10 {
w.Write([]byte("1"))
w.Close()
}
}
}
func TestStat(t *testing.T) {
m := New(1024)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value)
w.Close()
info, err := m.Stat(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if info == nil {
t.Fatal("expected file info to be non-nil")
}
if info.Size() != int64(len(value)) {
t.Errorf("expected size %d, got %d", len(value), info.Size())
}
}

View File

@@ -1,28 +1,79 @@
// vfs/vfs.go
package vfs
import "io"
import (
"io"
"os"
"time"
)
// VFS is the interface that wraps the basic methods of a virtual file system.
// VFS defines the interface for virtual file systems
type VFS interface {
// Name returns the name of the file system.
Name() string
// Size returns the total size of all files in the file system.
Size() int64
// Create creates a new file at key with expected size.
// Create creates a new file at the given key
Create(key string, size int64) (io.WriteCloser, error)
// Delete deletes the value of key.
Delete(key string) error
// Open opens the file at key.
// Open opens the file at the given key for reading
Open(key string) (io.ReadCloser, error)
// Stat returns the FileInfo of key.
// Delete removes the file at the given key
Delete(key string) error
// Stat returns information about the file at the given key
Stat(key string) (*FileInfo, error)
// StatAll returns the FileInfo of all keys.
StatAll() []*FileInfo
// Name returns the name of this VFS
Name() string
// Size returns the current size of the VFS
Size() int64
// Capacity returns the maximum capacity of the VFS
Capacity() int64
}
// FileInfo contains metadata about a cached file
type FileInfo struct {
Key string `json:"key"`
Size int64 `json:"size"`
ATime time.Time `json:"atime"` // Last access time
CTime time.Time `json:"ctime"` // Creation time
AccessCount int `json:"access_count"`
}
// NewFileInfo creates a new FileInfo with the given key and current timestamp
func NewFileInfo(key string, size int64) *FileInfo {
now := time.Now()
return &FileInfo{
Key: key,
Size: size,
ATime: now,
CTime: now,
AccessCount: 1,
}
}
// NewFileInfoFromOS creates a FileInfo from os.FileInfo
func NewFileInfoFromOS(info os.FileInfo, key string) *FileInfo {
return &FileInfo{
Key: key,
Size: info.Size(),
ATime: time.Now(), // We don't have access time from os.FileInfo
CTime: info.ModTime(),
AccessCount: 1,
}
}
// UpdateAccess updates the access time and increments the access count
func (fi *FileInfo) UpdateAccess() {
fi.ATime = time.Now()
fi.AccessCount++
}
// GetTimeDecayedScore calculates a score based on access time and frequency
// More recent and frequent accesses get higher scores
func (fi *FileInfo) GetTimeDecayedScore() float64 {
timeSinceAccess := time.Since(fi.ATime).Hours()
decayFactor := 1.0 / (1.0 + timeSinceAccess/24.0) // Decay over days
frequencyBonus := float64(fi.AccessCount) * 0.1
return decayFactor + frequencyBonus
}

View File

@@ -3,16 +3,10 @@ package vfserror
import "errors"
// Common VFS errors
var (
// ErrInvalidKey is returned when a key is invalid.
ErrInvalidKey = errors.New("vfs: invalid key")
// ErrUnreachable is returned when a code path is unreachable.
ErrUnreachable = errors.New("unreachable")
// ErrNotFound is returned when a key is not found.
ErrNotFound = errors.New("vfs: key not found")
// ErrDiskFull is returned when the disk is full.
ErrDiskFull = errors.New("vfs: disk full")
ErrNotFound = errors.New("vfs: key not found")
ErrInvalidKey = errors.New("vfs: invalid key")
ErrAlreadyExists = errors.New("vfs: key already exists")
ErrCapacityExceeded = errors.New("vfs: capacity exceeded")
)