- Introduced integration tests for SteamCache to validate caching behavior with real Steam URLs. - Implemented a ServiceManager to manage service configurations, allowing for dynamic detection of services based on User-Agent. - Updated cache key generation to include service prefixes, enhancing cache organization and retrieval. - Enhanced the caching logic to support multiple services, starting with Steam and Epic Games. - Improved .gitignore to exclude test cache files while retaining necessary structure.
745 lines
17 KiB
Go
745 lines
17 KiB
Go
// vfs/disk/disk.go
|
|
package disk
|
|
|
|
import (
|
|
"container/list"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"path/filepath"
|
|
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
|
"s1d3sw1ped/SteamCache2/vfs"
|
|
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
|
"sort"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/docker/go-units"
|
|
"github.com/edsrzf/mmap-go"
|
|
)
|
|
|
|
// Ensure DiskFS implements VFS.
|
|
var _ vfs.VFS = (*DiskFS)(nil)
|
|
|
|
// DiskFS is a virtual file system that stores files on disk.
|
|
type DiskFS struct {
|
|
root string
|
|
|
|
info map[string]*vfs.FileInfo
|
|
capacity int64
|
|
size int64
|
|
mu sync.RWMutex
|
|
keyLocks []sync.Map // Sharded lock pools for better concurrency
|
|
LRU *lruList
|
|
timeUpdater *vfs.BatchedTimeUpdate // Batched time updates for better performance
|
|
}
|
|
|
|
// Number of lock shards for reducing contention
|
|
const numLockShards = 32
|
|
|
|
// lruList for time-decayed LRU eviction
|
|
type lruList struct {
|
|
list *list.List
|
|
elem map[string]*list.Element
|
|
}
|
|
|
|
func newLruList() *lruList {
|
|
return &lruList{
|
|
list: list.New(),
|
|
elem: make(map[string]*list.Element),
|
|
}
|
|
}
|
|
|
|
func (l *lruList) Add(key string, fi *vfs.FileInfo) {
|
|
elem := l.list.PushFront(fi)
|
|
l.elem[key] = elem
|
|
}
|
|
|
|
func (l *lruList) MoveToFront(key string, timeUpdater *vfs.BatchedTimeUpdate) {
|
|
if elem, exists := l.elem[key]; exists {
|
|
l.list.MoveToFront(elem)
|
|
// Update the FileInfo in the element with new access time
|
|
if fi := elem.Value.(*vfs.FileInfo); fi != nil {
|
|
fi.UpdateAccessBatched(timeUpdater)
|
|
}
|
|
}
|
|
}
|
|
|
|
func (l *lruList) Remove(key string) *vfs.FileInfo {
|
|
if elem, exists := l.elem[key]; exists {
|
|
delete(l.elem, key)
|
|
if fi := l.list.Remove(elem).(*vfs.FileInfo); fi != nil {
|
|
return fi
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (l *lruList) Len() int {
|
|
return l.list.Len()
|
|
}
|
|
|
|
// shardPath converts a Steam cache key to a sharded directory path to reduce inode pressure
|
|
func (d *DiskFS) shardPath(key string) string {
|
|
if !strings.HasPrefix(key, "steam/") {
|
|
return key
|
|
}
|
|
|
|
// Extract hash part
|
|
hashPart := key[6:] // Remove "steam/" prefix
|
|
|
|
if len(hashPart) < 4 {
|
|
// For very short hashes, single level sharding
|
|
if len(hashPart) >= 2 {
|
|
shard1 := hashPart[:2]
|
|
return filepath.Join("steam", shard1, hashPart)
|
|
}
|
|
return filepath.Join("steam", hashPart)
|
|
}
|
|
|
|
// Optimal 2-level sharding for Steam hashes (typically 40 chars)
|
|
shard1 := hashPart[:2] // First 2 chars
|
|
shard2 := hashPart[2:4] // Next 2 chars
|
|
return filepath.Join("steam", shard1, shard2, hashPart)
|
|
}
|
|
|
|
// extractKeyFromPath reverses the sharding logic to get the original key from a sharded path
|
|
func (d *DiskFS) extractKeyFromPath(path string) string {
|
|
// Fast path: if no slashes, it's not a sharded path
|
|
if !strings.Contains(path, "/") {
|
|
return path
|
|
}
|
|
|
|
parts := strings.SplitN(path, "/", 5)
|
|
numParts := len(parts)
|
|
|
|
if numParts >= 4 && parts[0] == "steam" {
|
|
lastThree := parts[numParts-3:]
|
|
shard1 := lastThree[0]
|
|
shard2 := lastThree[1]
|
|
filename := lastThree[2]
|
|
|
|
// Verify sharding is correct
|
|
if len(filename) >= 4 && filename[:2] == shard1 && filename[2:4] == shard2 {
|
|
return "steam/" + filename
|
|
}
|
|
}
|
|
|
|
// Handle single-level sharding for short hashes: steam/shard1/filename
|
|
if numParts >= 3 && parts[0] == "steam" {
|
|
lastTwo := parts[numParts-2:]
|
|
shard1 := lastTwo[0]
|
|
filename := lastTwo[1]
|
|
|
|
if len(filename) >= 2 && filename[:2] == shard1 {
|
|
return "steam/" + filename
|
|
}
|
|
}
|
|
|
|
// Fallback: return as-is for any unrecognized format
|
|
return path
|
|
}
|
|
|
|
// New creates a new DiskFS.
|
|
func New(root string, capacity int64) *DiskFS {
|
|
if capacity <= 0 {
|
|
panic("disk capacity must be greater than 0")
|
|
}
|
|
|
|
// Create root directory if it doesn't exist
|
|
os.MkdirAll(root, 0755)
|
|
|
|
// Initialize sharded locks
|
|
keyLocks := make([]sync.Map, numLockShards)
|
|
|
|
d := &DiskFS{
|
|
root: root,
|
|
info: make(map[string]*vfs.FileInfo),
|
|
capacity: capacity,
|
|
size: 0,
|
|
keyLocks: keyLocks,
|
|
LRU: newLruList(),
|
|
timeUpdater: vfs.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
|
|
}
|
|
|
|
d.init()
|
|
return d
|
|
}
|
|
|
|
// init loads existing files from disk and migrates legacy depot files to sharded structure
|
|
func (d *DiskFS) init() {
|
|
tstart := time.Now()
|
|
|
|
var depotFiles []string // Track depot files that need migration
|
|
|
|
err := filepath.Walk(d.root, func(npath string, info os.FileInfo, err error) error {
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if info.IsDir() {
|
|
return nil
|
|
}
|
|
|
|
d.mu.Lock()
|
|
// Extract key from sharded path: remove root and convert sharding back
|
|
// Handle both "./disk" and "disk" root paths
|
|
rootPath := d.root
|
|
if strings.HasPrefix(rootPath, "./") {
|
|
rootPath = rootPath[2:] // Remove "./" prefix
|
|
}
|
|
relPath := strings.ReplaceAll(npath[len(rootPath)+1:], "\\", "/")
|
|
|
|
// Extract the original key from the sharded path
|
|
k := d.extractKeyFromPath(relPath)
|
|
|
|
fi := vfs.NewFileInfoFromOS(info, k)
|
|
d.info[k] = fi
|
|
d.LRU.Add(k, fi)
|
|
// Initialize access time with file modification time
|
|
fi.UpdateAccessBatched(d.timeUpdater)
|
|
d.size += info.Size()
|
|
|
|
// Track depot files for potential migration
|
|
if strings.HasPrefix(relPath, "depot/") {
|
|
depotFiles = append(depotFiles, relPath)
|
|
}
|
|
|
|
d.mu.Unlock()
|
|
|
|
return nil
|
|
})
|
|
if err != nil {
|
|
logger.Logger.Error().Err(err).Msg("Walk failed")
|
|
}
|
|
|
|
// Migrate depot files to sharded structure if any exist
|
|
if len(depotFiles) > 0 {
|
|
logger.Logger.Info().Int("count", len(depotFiles)).Msg("Found legacy depot files, starting migration")
|
|
d.migrateDepotFiles(depotFiles)
|
|
}
|
|
|
|
logger.Logger.Info().
|
|
Str("name", d.Name()).
|
|
Str("root", d.root).
|
|
Str("capacity", units.HumanSize(float64(d.capacity))).
|
|
Str("size", units.HumanSize(float64(d.Size()))).
|
|
Str("files", fmt.Sprint(len(d.info))).
|
|
Str("duration", time.Since(tstart).String()).
|
|
Msg("init")
|
|
}
|
|
|
|
// migrateDepotFiles moves legacy depot files to the sharded steam structure
|
|
func (d *DiskFS) migrateDepotFiles(depotFiles []string) {
|
|
migratedCount := 0
|
|
errorCount := 0
|
|
|
|
for _, relPath := range depotFiles {
|
|
// Extract the steam key from the depot path
|
|
steamKey := d.extractKeyFromPath(relPath)
|
|
if !strings.HasPrefix(steamKey, "steam/") {
|
|
// Skip if we can't extract a proper steam key
|
|
errorCount++
|
|
continue
|
|
}
|
|
|
|
// Get the source and destination paths
|
|
sourcePath := filepath.Join(d.root, relPath)
|
|
shardedPath := d.shardPath(steamKey)
|
|
destPath := filepath.Join(d.root, shardedPath)
|
|
|
|
// Create destination directory
|
|
destDir := filepath.Dir(destPath)
|
|
if err := os.MkdirAll(destDir, 0755); err != nil {
|
|
logger.Logger.Error().Err(err).Str("path", destDir).Msg("Failed to create migration destination directory")
|
|
errorCount++
|
|
continue
|
|
}
|
|
|
|
// Move the file
|
|
if err := os.Rename(sourcePath, destPath); err != nil {
|
|
logger.Logger.Error().Err(err).Str("from", sourcePath).Str("to", destPath).Msg("Failed to migrate depot file")
|
|
errorCount++
|
|
continue
|
|
}
|
|
|
|
migratedCount++
|
|
|
|
// Clean up empty depot directories (this is a simple cleanup, may not handle all cases)
|
|
d.cleanupEmptyDepotDirs(filepath.Dir(sourcePath))
|
|
}
|
|
|
|
logger.Logger.Info().
|
|
Int("migrated", migratedCount).
|
|
Int("errors", errorCount).
|
|
Msg("Depot file migration completed")
|
|
}
|
|
|
|
// cleanupEmptyDepotDirs removes empty depot directories after migration
|
|
func (d *DiskFS) cleanupEmptyDepotDirs(dirPath string) {
|
|
for dirPath != d.root && strings.HasPrefix(dirPath, filepath.Join(d.root, "depot")) {
|
|
entries, err := os.ReadDir(dirPath)
|
|
if err != nil || len(entries) > 0 {
|
|
break
|
|
}
|
|
|
|
// Directory is empty, remove it
|
|
if err := os.Remove(dirPath); err != nil {
|
|
logger.Logger.Error().Err(err).Str("dir", dirPath).Msg("Failed to remove empty depot directory")
|
|
break
|
|
}
|
|
|
|
// Move up to parent directory
|
|
dirPath = filepath.Dir(dirPath)
|
|
}
|
|
}
|
|
|
|
// Name returns the name of this VFS
|
|
func (d *DiskFS) Name() string {
|
|
return "DiskFS"
|
|
}
|
|
|
|
// Size returns the current size
|
|
func (d *DiskFS) Size() int64 {
|
|
d.mu.RLock()
|
|
defer d.mu.RUnlock()
|
|
return d.size
|
|
}
|
|
|
|
// Capacity returns the maximum capacity
|
|
func (d *DiskFS) Capacity() int64 {
|
|
return d.capacity
|
|
}
|
|
|
|
// getShardIndex returns the shard index for a given key
|
|
func getShardIndex(key string) int {
|
|
// Use FNV-1a hash for good distribution
|
|
var h uint32 = 2166136261 // FNV offset basis
|
|
for i := 0; i < len(key); i++ {
|
|
h ^= uint32(key[i])
|
|
h *= 16777619 // FNV prime
|
|
}
|
|
return int(h % numLockShards)
|
|
}
|
|
|
|
// getKeyLock returns a lock for the given key using sharding
|
|
func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
|
|
shardIndex := getShardIndex(key)
|
|
shard := &d.keyLocks[shardIndex]
|
|
|
|
keyLock, _ := shard.LoadOrStore(key, &sync.RWMutex{})
|
|
return keyLock.(*sync.RWMutex)
|
|
}
|
|
|
|
// Create creates a new file
|
|
func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
|
|
if key == "" {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
if key[0] == '/' {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
// Sanitize key to prevent path traversal
|
|
key = filepath.Clean(key)
|
|
key = strings.ReplaceAll(key, "\\", "/")
|
|
if strings.Contains(key, "..") {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
keyMu := d.getKeyLock(key)
|
|
keyMu.Lock()
|
|
defer keyMu.Unlock()
|
|
|
|
d.mu.Lock()
|
|
// Check if file already exists and handle overwrite
|
|
if fi, exists := d.info[key]; exists {
|
|
d.size -= fi.Size
|
|
d.LRU.Remove(key)
|
|
delete(d.info, key)
|
|
}
|
|
|
|
shardedPath := d.shardPath(key)
|
|
path := filepath.Join(d.root, shardedPath)
|
|
d.mu.Unlock()
|
|
|
|
path = strings.ReplaceAll(path, "\\", "/")
|
|
dir := filepath.Dir(path)
|
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
file, err := os.Create(path)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
fi := vfs.NewFileInfo(key, size)
|
|
d.mu.Lock()
|
|
d.info[key] = fi
|
|
d.LRU.Add(key, fi)
|
|
// Initialize access time with current time
|
|
fi.UpdateAccessBatched(d.timeUpdater)
|
|
d.size += size
|
|
d.mu.Unlock()
|
|
|
|
return &diskWriteCloser{
|
|
file: file,
|
|
disk: d,
|
|
key: key,
|
|
declaredSize: size,
|
|
}, nil
|
|
}
|
|
|
|
// diskWriteCloser implements io.WriteCloser for disk files with size adjustment
|
|
type diskWriteCloser struct {
|
|
file *os.File
|
|
disk *DiskFS
|
|
key string
|
|
declaredSize int64
|
|
}
|
|
|
|
func (dwc *diskWriteCloser) Write(p []byte) (n int, err error) {
|
|
return dwc.file.Write(p)
|
|
}
|
|
|
|
func (dwc *diskWriteCloser) Close() error {
|
|
// Get the actual file size
|
|
stat, err := dwc.file.Stat()
|
|
if err != nil {
|
|
dwc.file.Close()
|
|
return err
|
|
}
|
|
|
|
actualSize := stat.Size()
|
|
|
|
// Update the size in FileInfo if it differs from declared size
|
|
dwc.disk.mu.Lock()
|
|
if fi, exists := dwc.disk.info[dwc.key]; exists {
|
|
sizeDiff := actualSize - fi.Size
|
|
fi.Size = actualSize
|
|
dwc.disk.size += sizeDiff
|
|
}
|
|
dwc.disk.mu.Unlock()
|
|
|
|
return dwc.file.Close()
|
|
}
|
|
|
|
// Open opens a file for reading
|
|
func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
|
|
if key == "" {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
if key[0] == '/' {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
// Sanitize key to prevent path traversal
|
|
key = filepath.Clean(key)
|
|
key = strings.ReplaceAll(key, "\\", "/")
|
|
if strings.Contains(key, "..") {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
keyMu := d.getKeyLock(key)
|
|
keyMu.RLock()
|
|
defer keyMu.RUnlock()
|
|
|
|
d.mu.Lock()
|
|
fi, exists := d.info[key]
|
|
if !exists {
|
|
d.mu.Unlock()
|
|
return nil, vfserror.ErrNotFound
|
|
}
|
|
fi.UpdateAccessBatched(d.timeUpdater)
|
|
d.LRU.MoveToFront(key, d.timeUpdater)
|
|
d.mu.Unlock()
|
|
|
|
shardedPath := d.shardPath(key)
|
|
path := filepath.Join(d.root, shardedPath)
|
|
path = strings.ReplaceAll(path, "\\", "/")
|
|
|
|
file, err := os.Open(path)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Use memory mapping for large files (>1MB) to improve performance
|
|
const mmapThreshold = 1024 * 1024 // 1MB
|
|
if fi.Size > mmapThreshold {
|
|
// Close the regular file handle
|
|
file.Close()
|
|
|
|
// Try memory mapping
|
|
mmapFile, err := os.Open(path)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
mapped, err := mmap.Map(mmapFile, mmap.RDONLY, 0)
|
|
if err != nil {
|
|
mmapFile.Close()
|
|
// Fallback to regular file reading
|
|
return os.Open(path)
|
|
}
|
|
|
|
return &mmapReadCloser{
|
|
data: mapped,
|
|
file: mmapFile,
|
|
offset: 0,
|
|
}, nil
|
|
}
|
|
|
|
return file, nil
|
|
}
|
|
|
|
// mmapReadCloser implements io.ReadCloser for memory-mapped files
|
|
type mmapReadCloser struct {
|
|
data mmap.MMap
|
|
file *os.File
|
|
offset int
|
|
}
|
|
|
|
func (m *mmapReadCloser) Read(p []byte) (n int, err error) {
|
|
if m.offset >= len(m.data) {
|
|
return 0, io.EOF
|
|
}
|
|
|
|
n = copy(p, m.data[m.offset:])
|
|
m.offset += n
|
|
return n, nil
|
|
}
|
|
|
|
func (m *mmapReadCloser) Close() error {
|
|
m.data.Unmap()
|
|
return m.file.Close()
|
|
}
|
|
|
|
// Delete removes a file
|
|
func (d *DiskFS) Delete(key string) error {
|
|
if key == "" {
|
|
return vfserror.ErrInvalidKey
|
|
}
|
|
if key[0] == '/' {
|
|
return vfserror.ErrInvalidKey
|
|
}
|
|
|
|
keyMu := d.getKeyLock(key)
|
|
keyMu.Lock()
|
|
defer keyMu.Unlock()
|
|
|
|
d.mu.Lock()
|
|
fi, exists := d.info[key]
|
|
if !exists {
|
|
d.mu.Unlock()
|
|
return vfserror.ErrNotFound
|
|
}
|
|
d.size -= fi.Size
|
|
d.LRU.Remove(key)
|
|
delete(d.info, key)
|
|
d.mu.Unlock()
|
|
|
|
shardedPath := d.shardPath(key)
|
|
path := filepath.Join(d.root, shardedPath)
|
|
path = strings.ReplaceAll(path, "\\", "/")
|
|
|
|
err := os.Remove(path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Stat returns file information
|
|
func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
|
if key == "" {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
if key[0] == '/' {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
keyMu := d.getKeyLock(key)
|
|
keyMu.RLock()
|
|
defer keyMu.RUnlock()
|
|
|
|
d.mu.RLock()
|
|
defer d.mu.RUnlock()
|
|
|
|
if fi, ok := d.info[key]; ok {
|
|
return fi, nil
|
|
}
|
|
|
|
// Check if file exists on disk but wasn't indexed (for migration)
|
|
shardedPath := d.shardPath(key)
|
|
path := filepath.Join(d.root, shardedPath)
|
|
path = strings.ReplaceAll(path, "\\", "/")
|
|
|
|
if info, err := os.Stat(path); err == nil {
|
|
// File exists in sharded location but not indexed, re-index it
|
|
fi := vfs.NewFileInfoFromOS(info, key)
|
|
// We can't modify the map here because we're in a read lock
|
|
// This is a simplified version - in production you'd need to handle this properly
|
|
return fi, nil
|
|
}
|
|
|
|
return nil, vfserror.ErrNotFound
|
|
}
|
|
|
|
// EvictLRU evicts the least recently used files to free up space
|
|
func (d *DiskFS) EvictLRU(bytesNeeded uint) uint {
|
|
d.mu.Lock()
|
|
defer d.mu.Unlock()
|
|
|
|
var evicted uint
|
|
|
|
// Evict from LRU list until we free enough space
|
|
for d.size > d.capacity-int64(bytesNeeded) && d.LRU.Len() > 0 {
|
|
// Get the least recently used item
|
|
elem := d.LRU.list.Back()
|
|
if elem == nil {
|
|
break
|
|
}
|
|
|
|
fi := elem.Value.(*vfs.FileInfo)
|
|
key := fi.Key
|
|
|
|
// Remove from LRU
|
|
d.LRU.Remove(key)
|
|
|
|
// Remove from map
|
|
delete(d.info, key)
|
|
|
|
// Remove file from disk
|
|
shardedPath := d.shardPath(key)
|
|
path := filepath.Join(d.root, shardedPath)
|
|
path = strings.ReplaceAll(path, "\\", "/")
|
|
|
|
if err := os.Remove(path); err != nil {
|
|
// Log error but continue
|
|
continue
|
|
}
|
|
|
|
// Update size
|
|
d.size -= fi.Size
|
|
evicted += uint(fi.Size)
|
|
|
|
// Clean up key lock
|
|
shardIndex := getShardIndex(key)
|
|
d.keyLocks[shardIndex].Delete(key)
|
|
}
|
|
|
|
return evicted
|
|
}
|
|
|
|
// EvictBySize evicts files by size (ascending = smallest first, descending = largest first)
|
|
func (d *DiskFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
|
|
d.mu.Lock()
|
|
defer d.mu.Unlock()
|
|
|
|
var evicted uint
|
|
var candidates []*vfs.FileInfo
|
|
|
|
// Collect all files
|
|
for _, fi := range d.info {
|
|
candidates = append(candidates, fi)
|
|
}
|
|
|
|
// Sort by size
|
|
sort.Slice(candidates, func(i, j int) bool {
|
|
if ascending {
|
|
return candidates[i].Size < candidates[j].Size
|
|
}
|
|
return candidates[i].Size > candidates[j].Size
|
|
})
|
|
|
|
// Evict files until we free enough space
|
|
for _, fi := range candidates {
|
|
if d.size <= d.capacity-int64(bytesNeeded) {
|
|
break
|
|
}
|
|
|
|
key := fi.Key
|
|
|
|
// Remove from LRU
|
|
d.LRU.Remove(key)
|
|
|
|
// Remove from map
|
|
delete(d.info, key)
|
|
|
|
// Remove file from disk
|
|
shardedPath := d.shardPath(key)
|
|
path := filepath.Join(d.root, shardedPath)
|
|
path = strings.ReplaceAll(path, "\\", "/")
|
|
|
|
if err := os.Remove(path); err != nil {
|
|
continue
|
|
}
|
|
|
|
// Update size
|
|
d.size -= fi.Size
|
|
evicted += uint(fi.Size)
|
|
|
|
// Clean up key lock
|
|
shardIndex := getShardIndex(key)
|
|
d.keyLocks[shardIndex].Delete(key)
|
|
}
|
|
|
|
return evicted
|
|
}
|
|
|
|
// EvictFIFO evicts files using FIFO (oldest creation time first)
|
|
func (d *DiskFS) EvictFIFO(bytesNeeded uint) uint {
|
|
d.mu.Lock()
|
|
defer d.mu.Unlock()
|
|
|
|
var evicted uint
|
|
var candidates []*vfs.FileInfo
|
|
|
|
// Collect all files
|
|
for _, fi := range d.info {
|
|
candidates = append(candidates, fi)
|
|
}
|
|
|
|
// Sort by creation time (oldest first)
|
|
sort.Slice(candidates, func(i, j int) bool {
|
|
return candidates[i].CTime.Before(candidates[j].CTime)
|
|
})
|
|
|
|
// Evict oldest files until we free enough space
|
|
for _, fi := range candidates {
|
|
if d.size <= d.capacity-int64(bytesNeeded) {
|
|
break
|
|
}
|
|
|
|
key := fi.Key
|
|
|
|
// Remove from LRU
|
|
d.LRU.Remove(key)
|
|
|
|
// Remove from map
|
|
delete(d.info, key)
|
|
|
|
// Remove file from disk
|
|
shardedPath := d.shardPath(key)
|
|
path := filepath.Join(d.root, shardedPath)
|
|
path = strings.ReplaceAll(path, "\\", "/")
|
|
|
|
if err := os.Remove(path); err != nil {
|
|
continue
|
|
}
|
|
|
|
// Update size
|
|
d.size -= fi.Size
|
|
evicted += uint(fi.Size)
|
|
|
|
// Clean up key lock
|
|
shardIndex := getShardIndex(key)
|
|
d.keyLocks[shardIndex].Delete(key)
|
|
}
|
|
|
|
return evicted
|
|
}
|