Refactor caching logic and enhance hash generation in steamcache
- Replaced SHA1 hash calculations with SHA256 for improved security and consistency in cache key generation. - Introduced a new TestURLHashing function to validate the new cache key generation logic. - Removed outdated hash calculation tests and streamlined the caching process to focus on URL-based hashing. - Implemented lightweight validation methods in ServeHTTP to enhance performance and reliability of cached responses. - Added batched time updates in VFS implementations for better performance during access time tracking.
This commit is contained in:
224
vfs/disk/disk.go
224
vfs/disk/disk.go
@@ -10,6 +10,7 @@ import (
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -25,14 +26,18 @@ var _ vfs.VFS = (*DiskFS)(nil)
|
||||
type DiskFS struct {
|
||||
root string
|
||||
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks sync.Map // map[string]*sync.RWMutex
|
||||
LRU *lruList
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks []sync.Map // Sharded lock pools for better concurrency
|
||||
LRU *lruList
|
||||
timeUpdater *vfs.BatchedTimeUpdate // Batched time updates for better performance
|
||||
}
|
||||
|
||||
// Number of lock shards for reducing contention
|
||||
const numLockShards = 32
|
||||
|
||||
// lruList for time-decayed LRU eviction
|
||||
type lruList struct {
|
||||
list *list.List
|
||||
@@ -51,12 +56,12 @@ func (l *lruList) Add(key string, fi *vfs.FileInfo) {
|
||||
l.elem[key] = elem
|
||||
}
|
||||
|
||||
func (l *lruList) MoveToFront(key string) {
|
||||
func (l *lruList) MoveToFront(key string, timeUpdater *vfs.BatchedTimeUpdate) {
|
||||
if elem, exists := l.elem[key]; exists {
|
||||
l.list.MoveToFront(elem)
|
||||
// Update the FileInfo in the element with new access time
|
||||
if fi := elem.Value.(*vfs.FileInfo); fi != nil {
|
||||
fi.UpdateAccess()
|
||||
fi.UpdateAccessBatched(timeUpdater)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -76,11 +81,8 @@ func (l *lruList) Len() int {
|
||||
}
|
||||
|
||||
// shardPath converts a Steam cache key to a sharded directory path to reduce inode pressure
|
||||
// Optimized for the steam/{hash} format
|
||||
func (d *DiskFS) shardPath(key string) string {
|
||||
// Expect keys in format: steam/{hash}
|
||||
if !strings.HasPrefix(key, "steam/") {
|
||||
// Fallback for non-steam keys (shouldn't happen in optimized setup)
|
||||
return key
|
||||
}
|
||||
|
||||
@@ -103,17 +105,15 @@ func (d *DiskFS) shardPath(key string) string {
|
||||
}
|
||||
|
||||
// extractKeyFromPath reverses the sharding logic to get the original key from a sharded path
|
||||
// Optimized for steam/{hash} format
|
||||
func (d *DiskFS) extractKeyFromPath(path string) string {
|
||||
// Fast path: if no slashes, it's not a sharded path
|
||||
if !strings.Contains(path, "/") {
|
||||
return path
|
||||
}
|
||||
|
||||
parts := strings.Split(path, "/")
|
||||
parts := strings.SplitN(path, "/", 5)
|
||||
numParts := len(parts)
|
||||
|
||||
// Optimized for steam/shard1/shard2/filename format
|
||||
if numParts >= 4 && parts[0] == "steam" {
|
||||
lastThree := parts[numParts-3:]
|
||||
shard1 := lastThree[0]
|
||||
@@ -150,12 +150,17 @@ func New(root string, capacity int64) *DiskFS {
|
||||
// Create root directory if it doesn't exist
|
||||
os.MkdirAll(root, 0755)
|
||||
|
||||
// Initialize sharded locks
|
||||
keyLocks := make([]sync.Map, numLockShards)
|
||||
|
||||
d := &DiskFS{
|
||||
root: root,
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
size: 0,
|
||||
LRU: newLruList(),
|
||||
root: root,
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
size: 0,
|
||||
keyLocks: keyLocks,
|
||||
LRU: newLruList(),
|
||||
timeUpdater: vfs.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
|
||||
}
|
||||
|
||||
d.init()
|
||||
@@ -187,6 +192,8 @@ func (d *DiskFS) init() {
|
||||
fi := vfs.NewFileInfoFromOS(info, k)
|
||||
d.info[k] = fi
|
||||
d.LRU.Add(k, fi)
|
||||
// Initialize access time with file modification time
|
||||
fi.UpdateAccessBatched(d.timeUpdater)
|
||||
d.size += info.Size()
|
||||
|
||||
// Track depot files for potential migration
|
||||
@@ -300,9 +307,23 @@ func (d *DiskFS) Capacity() int64 {
|
||||
return d.capacity
|
||||
}
|
||||
|
||||
// getKeyLock returns a lock for the given key
|
||||
// getShardIndex returns the shard index for a given key
|
||||
func getShardIndex(key string) int {
|
||||
// Use FNV-1a hash for good distribution
|
||||
var h uint32 = 2166136261 // FNV offset basis
|
||||
for i := 0; i < len(key); i++ {
|
||||
h ^= uint32(key[i])
|
||||
h *= 16777619 // FNV prime
|
||||
}
|
||||
return int(h % numLockShards)
|
||||
}
|
||||
|
||||
// getKeyLock returns a lock for the given key using sharding
|
||||
func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
|
||||
keyLock, _ := d.keyLocks.LoadOrStore(key, &sync.RWMutex{})
|
||||
shardIndex := getShardIndex(key)
|
||||
shard := &d.keyLocks[shardIndex]
|
||||
|
||||
keyLock, _ := shard.LoadOrStore(key, &sync.RWMutex{})
|
||||
return keyLock.(*sync.RWMutex)
|
||||
}
|
||||
|
||||
@@ -353,6 +374,8 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
d.mu.Lock()
|
||||
d.info[key] = fi
|
||||
d.LRU.Add(key, fi)
|
||||
// Initialize access time with current time
|
||||
fi.UpdateAccessBatched(d.timeUpdater)
|
||||
d.size += size
|
||||
d.mu.Unlock()
|
||||
|
||||
@@ -424,8 +447,8 @@ func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
|
||||
d.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
fi.UpdateAccess()
|
||||
d.LRU.MoveToFront(key)
|
||||
fi.UpdateAccessBatched(d.timeUpdater)
|
||||
d.LRU.MoveToFront(key, d.timeUpdater)
|
||||
d.mu.Unlock()
|
||||
|
||||
shardedPath := d.shardPath(key)
|
||||
@@ -559,3 +582,158 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
// EvictLRU evicts the least recently used files to free up space
|
||||
func (d *DiskFS) EvictLRU(bytesNeeded uint) uint {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
|
||||
// Evict from LRU list until we free enough space
|
||||
for d.size > d.capacity-int64(bytesNeeded) && d.LRU.Len() > 0 {
|
||||
// Get the least recently used item
|
||||
elem := d.LRU.list.Back()
|
||||
if elem == nil {
|
||||
break
|
||||
}
|
||||
|
||||
fi := elem.Value.(*vfs.FileInfo)
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
d.LRU.Remove(key)
|
||||
|
||||
// Remove from map
|
||||
delete(d.info, key)
|
||||
|
||||
// Remove file from disk
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
if err := os.Remove(path); err != nil {
|
||||
// Log error but continue
|
||||
continue
|
||||
}
|
||||
|
||||
// Update size
|
||||
d.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
d.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictBySize evicts files by size (ascending = smallest first, descending = largest first)
|
||||
func (d *DiskFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*vfs.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range d.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by size
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
if ascending {
|
||||
return candidates[i].Size < candidates[j].Size
|
||||
}
|
||||
return candidates[i].Size > candidates[j].Size
|
||||
})
|
||||
|
||||
// Evict files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if d.size <= d.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
d.LRU.Remove(key)
|
||||
|
||||
// Remove from map
|
||||
delete(d.info, key)
|
||||
|
||||
// Remove file from disk
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
if err := os.Remove(path); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update size
|
||||
d.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
d.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictFIFO evicts files using FIFO (oldest creation time first)
|
||||
func (d *DiskFS) EvictFIFO(bytesNeeded uint) uint {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*vfs.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range d.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by creation time (oldest first)
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
return candidates[i].CTime.Before(candidates[j].CTime)
|
||||
})
|
||||
|
||||
// Evict oldest files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if d.size <= d.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
d.LRU.Remove(key)
|
||||
|
||||
// Remove from map
|
||||
delete(d.info, key)
|
||||
|
||||
// Remove file from disk
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
if err := os.Remove(path); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update size
|
||||
d.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
d.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
108
vfs/gc/gc.go
108
vfs/gc/gc.go
@@ -4,6 +4,8 @@ package gc
|
||||
import (
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/disk"
|
||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||
)
|
||||
|
||||
// GCAlgorithm represents different garbage collection strategies
|
||||
@@ -114,44 +116,122 @@ func (gc *GCFS) Capacity() int64 {
|
||||
return gc.vfs.Capacity()
|
||||
}
|
||||
|
||||
// EvictionStrategy defines an interface for cache eviction
|
||||
type EvictionStrategy interface {
|
||||
Evict(vfs vfs.VFS, bytesNeeded uint) uint
|
||||
}
|
||||
|
||||
// GC functions
|
||||
|
||||
// gcLRU implements Least Recently Used eviction
|
||||
func gcLRU(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// This is a simplified implementation
|
||||
// In a real implementation, you'd need access to the internal LRU list
|
||||
// For now, we'll just return the requested amount
|
||||
return bytesNeeded
|
||||
return evictLRU(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// gcLFU implements Least Frequently Used eviction
|
||||
func gcLFU(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// Simplified implementation
|
||||
return bytesNeeded
|
||||
return evictLFU(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// gcFIFO implements First In First Out eviction
|
||||
func gcFIFO(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// Simplified implementation
|
||||
return bytesNeeded
|
||||
return evictFIFO(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// gcLargest implements largest file first eviction
|
||||
func gcLargest(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// Simplified implementation
|
||||
return bytesNeeded
|
||||
return evictLargest(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// gcSmallest implements smallest file first eviction
|
||||
func gcSmallest(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// Simplified implementation
|
||||
return bytesNeeded
|
||||
return evictSmallest(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// gcHybrid implements a hybrid eviction strategy
|
||||
func gcHybrid(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// Simplified implementation
|
||||
return bytesNeeded
|
||||
return evictHybrid(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// evictLRU performs LRU eviction by removing least recently used files
|
||||
func evictLRU(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// Try to use specific eviction methods if available
|
||||
switch fs := v.(type) {
|
||||
case *memory.MemoryFS:
|
||||
return fs.EvictLRU(bytesNeeded)
|
||||
case *disk.DiskFS:
|
||||
return fs.EvictLRU(bytesNeeded)
|
||||
default:
|
||||
// No fallback - return 0 (no eviction performed)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// evictLFU performs LFU (Least Frequently Used) eviction
|
||||
func evictLFU(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// For now, fall back to size-based eviction
|
||||
// TODO: Implement proper LFU tracking
|
||||
return evictBySize(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// evictFIFO performs FIFO (First In First Out) eviction
|
||||
func evictFIFO(v vfs.VFS, bytesNeeded uint) uint {
|
||||
switch fs := v.(type) {
|
||||
case *memory.MemoryFS:
|
||||
return fs.EvictFIFO(bytesNeeded)
|
||||
case *disk.DiskFS:
|
||||
return fs.EvictFIFO(bytesNeeded)
|
||||
default:
|
||||
// No fallback - return 0 (no eviction performed)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// evictLargest evicts largest files first
|
||||
func evictLargest(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return evictBySizeDesc(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// evictSmallest evicts smallest files first
|
||||
func evictSmallest(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return evictBySizeAsc(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// evictBySize evicts files based on size (smallest first)
|
||||
func evictBySize(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return evictBySizeAsc(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// evictBySizeAsc evicts smallest files first
|
||||
func evictBySizeAsc(v vfs.VFS, bytesNeeded uint) uint {
|
||||
switch fs := v.(type) {
|
||||
case *memory.MemoryFS:
|
||||
return fs.EvictBySize(bytesNeeded, true) // true = ascending (smallest first)
|
||||
case *disk.DiskFS:
|
||||
return fs.EvictBySize(bytesNeeded, true) // true = ascending (smallest first)
|
||||
default:
|
||||
// No fallback - return 0 (no eviction performed)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// evictBySizeDesc evicts largest files first
|
||||
func evictBySizeDesc(v vfs.VFS, bytesNeeded uint) uint {
|
||||
switch fs := v.(type) {
|
||||
case *memory.MemoryFS:
|
||||
return fs.EvictBySize(bytesNeeded, false) // false = descending (largest first)
|
||||
case *disk.DiskFS:
|
||||
return fs.EvictBySize(bytesNeeded, false) // false = descending (largest first)
|
||||
default:
|
||||
// No fallback - return 0 (no eviction performed)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// evictHybrid implements a hybrid eviction strategy
|
||||
func evictHybrid(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// Use LRU as primary strategy, but consider size as tiebreaker
|
||||
return evictLRU(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// AdaptivePromotionDeciderFunc is a placeholder for the adaptive promotion logic
|
||||
|
||||
@@ -7,8 +7,10 @@ import (
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Ensure MemoryFS implements VFS.
|
||||
@@ -16,15 +18,19 @@ var _ vfs.VFS = (*MemoryFS)(nil)
|
||||
|
||||
// MemoryFS is an in-memory virtual file system
|
||||
type MemoryFS struct {
|
||||
data map[string]*bytes.Buffer
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks sync.Map // map[string]*sync.RWMutex
|
||||
LRU *lruList
|
||||
data map[string]*bytes.Buffer
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks []sync.Map // Sharded lock pools for better concurrency
|
||||
LRU *lruList
|
||||
timeUpdater *vfs.BatchedTimeUpdate // Batched time updates for better performance
|
||||
}
|
||||
|
||||
// Number of lock shards for reducing contention
|
||||
const numLockShards = 32
|
||||
|
||||
// lruList for time-decayed LRU eviction
|
||||
type lruList struct {
|
||||
list *list.List
|
||||
@@ -43,12 +49,12 @@ func (l *lruList) Add(key string, fi *vfs.FileInfo) {
|
||||
l.elem[key] = elem
|
||||
}
|
||||
|
||||
func (l *lruList) MoveToFront(key string) {
|
||||
func (l *lruList) MoveToFront(key string, timeUpdater *vfs.BatchedTimeUpdate) {
|
||||
if elem, exists := l.elem[key]; exists {
|
||||
l.list.MoveToFront(elem)
|
||||
// Update the FileInfo in the element with new access time
|
||||
if fi := elem.Value.(*vfs.FileInfo); fi != nil {
|
||||
fi.UpdateAccess()
|
||||
fi.UpdateAccessBatched(timeUpdater)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -73,12 +79,17 @@ func New(capacity int64) *MemoryFS {
|
||||
panic("memory capacity must be greater than 0")
|
||||
}
|
||||
|
||||
// Initialize sharded locks
|
||||
keyLocks := make([]sync.Map, numLockShards)
|
||||
|
||||
return &MemoryFS{
|
||||
data: make(map[string]*bytes.Buffer),
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
size: 0,
|
||||
LRU: newLruList(),
|
||||
data: make(map[string]*bytes.Buffer),
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
size: 0,
|
||||
keyLocks: keyLocks,
|
||||
LRU: newLruList(),
|
||||
timeUpdater: vfs.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,9 +110,23 @@ func (m *MemoryFS) Capacity() int64 {
|
||||
return m.capacity
|
||||
}
|
||||
|
||||
// getKeyLock returns a lock for the given key
|
||||
// getShardIndex returns the shard index for a given key
|
||||
func getShardIndex(key string) int {
|
||||
// Use FNV-1a hash for good distribution
|
||||
var h uint32 = 2166136261 // FNV offset basis
|
||||
for i := 0; i < len(key); i++ {
|
||||
h ^= uint32(key[i])
|
||||
h *= 16777619 // FNV prime
|
||||
}
|
||||
return int(h % numLockShards)
|
||||
}
|
||||
|
||||
// getKeyLock returns a lock for the given key using sharding
|
||||
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
|
||||
keyLock, _ := m.keyLocks.LoadOrStore(key, &sync.RWMutex{})
|
||||
shardIndex := getShardIndex(key)
|
||||
shard := &m.keyLocks[shardIndex]
|
||||
|
||||
keyLock, _ := shard.LoadOrStore(key, &sync.RWMutex{})
|
||||
return keyLock.(*sync.RWMutex)
|
||||
}
|
||||
|
||||
@@ -137,6 +162,8 @@ func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
fi := vfs.NewFileInfo(key, size)
|
||||
m.info[key] = fi
|
||||
m.LRU.Add(key, fi)
|
||||
// Initialize access time with current time
|
||||
fi.UpdateAccessBatched(m.timeUpdater)
|
||||
m.size += size
|
||||
m.mu.Unlock()
|
||||
|
||||
@@ -194,8 +221,8 @@ func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
|
||||
m.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
fi.UpdateAccess()
|
||||
m.LRU.MoveToFront(key)
|
||||
fi.UpdateAccessBatched(m.timeUpdater)
|
||||
m.LRU.MoveToFront(key, m.timeUpdater)
|
||||
|
||||
buffer, exists := m.data[key]
|
||||
if !exists {
|
||||
@@ -284,3 +311,133 @@ func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
// EvictLRU evicts the least recently used files to free up space
|
||||
func (m *MemoryFS) EvictLRU(bytesNeeded uint) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
|
||||
// Evict from LRU list until we free enough space
|
||||
for m.size > m.capacity-int64(bytesNeeded) && m.LRU.Len() > 0 {
|
||||
// Get the least recently used item
|
||||
elem := m.LRU.list.Back()
|
||||
if elem == nil {
|
||||
break
|
||||
}
|
||||
|
||||
fi := elem.Value.(*vfs.FileInfo)
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
m.LRU.Remove(key)
|
||||
|
||||
// Remove from maps
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
|
||||
// Update size
|
||||
m.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
m.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictBySize evicts files by size (ascending = smallest first, descending = largest first)
|
||||
func (m *MemoryFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*vfs.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range m.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by size
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
if ascending {
|
||||
return candidates[i].Size < candidates[j].Size
|
||||
}
|
||||
return candidates[i].Size > candidates[j].Size
|
||||
})
|
||||
|
||||
// Evict files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if m.size <= m.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
m.LRU.Remove(key)
|
||||
|
||||
// Remove from maps
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
|
||||
// Update size
|
||||
m.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
m.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictFIFO evicts files using FIFO (oldest creation time first)
|
||||
func (m *MemoryFS) EvictFIFO(bytesNeeded uint) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*vfs.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range m.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by creation time (oldest first)
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
return candidates[i].CTime.Before(candidates[j].CTime)
|
||||
})
|
||||
|
||||
// Evict oldest files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if m.size <= m.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
m.LRU.Remove(key)
|
||||
|
||||
// Remove from maps
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
|
||||
// Update size
|
||||
m.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
m.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
33
vfs/vfs.go
33
vfs/vfs.go
@@ -69,6 +69,39 @@ func (fi *FileInfo) UpdateAccess() {
|
||||
fi.AccessCount++
|
||||
}
|
||||
|
||||
// BatchedTimeUpdate provides a way to batch time updates for better performance
|
||||
type BatchedTimeUpdate struct {
|
||||
currentTime time.Time
|
||||
lastUpdate time.Time
|
||||
updateInterval time.Duration
|
||||
}
|
||||
|
||||
// NewBatchedTimeUpdate creates a new batched time updater
|
||||
func NewBatchedTimeUpdate(interval time.Duration) *BatchedTimeUpdate {
|
||||
now := time.Now()
|
||||
return &BatchedTimeUpdate{
|
||||
currentTime: now,
|
||||
lastUpdate: now,
|
||||
updateInterval: interval,
|
||||
}
|
||||
}
|
||||
|
||||
// GetTime returns the current cached time, updating it if necessary
|
||||
func (btu *BatchedTimeUpdate) GetTime() time.Time {
|
||||
now := time.Now()
|
||||
if now.Sub(btu.lastUpdate) >= btu.updateInterval {
|
||||
btu.currentTime = now
|
||||
btu.lastUpdate = now
|
||||
}
|
||||
return btu.currentTime
|
||||
}
|
||||
|
||||
// UpdateAccessBatched updates the access time using batched time updates
|
||||
func (fi *FileInfo) UpdateAccessBatched(btu *BatchedTimeUpdate) {
|
||||
fi.ATime = btu.GetTime()
|
||||
fi.AccessCount++
|
||||
}
|
||||
|
||||
// GetTimeDecayedScore calculates a score based on access time and frequency
|
||||
// More recent and frequent accesses get higher scores
|
||||
func (fi *FileInfo) GetTimeDecayedScore() float64 {
|
||||
|
||||
Reference in New Issue
Block a user