Refactor caching and memory management components
All checks were successful
Release Tag / release (push) Successful in 9s

- Updated the caching logic to utilize a predictive cache warmer, enhancing content prefetching based on access patterns.
- Replaced the legacy warming system with a more efficient predictive approach, allowing for better performance and resource management.
- Refactored memory management to integrate dynamic cache size adjustments based on system memory usage, improving overall efficiency.
- Simplified the VFS interface and improved concurrency handling with sharded locks for better performance in multi-threaded environments.
- Enhanced tests to validate the new caching and memory management behaviors, ensuring reliability and performance improvements.
This commit is contained in:
2025-09-22 01:59:15 -05:00
parent 9b2affe95a
commit bfe29dea75
13 changed files with 612 additions and 1215 deletions

View File

@@ -3,8 +3,10 @@ package memory
import (
"bytes"
"container/list"
"io"
"s1d3sw1ped/steamcache2/vfs"
"s1d3sw1ped/steamcache2/vfs/locks"
"s1d3sw1ped/steamcache2/vfs/lru"
"s1d3sw1ped/steamcache2/vfs/types"
"s1d3sw1ped/steamcache2/vfs/vfserror"
"sort"
@@ -13,32 +15,8 @@ import (
"time"
)
// VFS defines the interface for virtual file systems
type VFS interface {
// Create creates a new file at the given key
Create(key string, size int64) (io.WriteCloser, error)
// Open opens the file at the given key for reading
Open(key string) (io.ReadCloser, error)
// Delete removes the file at the given key
Delete(key string) error
// Stat returns information about the file at the given key
Stat(key string) (*types.FileInfo, error)
// Name returns the name of this VFS
Name() string
// Size returns the current size of the VFS
Size() int64
// Capacity returns the maximum capacity of the VFS
Capacity() int64
}
// Ensure MemoryFS implements VFS.
var _ VFS = (*MemoryFS)(nil)
var _ vfs.VFS = (*MemoryFS)(nil)
// MemoryFS is an in-memory virtual file system
type MemoryFS struct {
@@ -48,55 +26,10 @@ type MemoryFS struct {
size int64
mu sync.RWMutex
keyLocks []sync.Map // Sharded lock pools for better concurrency
LRU *lruList
LRU *lru.LRUList[*types.FileInfo]
timeUpdater *types.BatchedTimeUpdate // Batched time updates for better performance
}
// Number of lock shards for reducing contention
const numLockShards = 32
// lruList for time-decayed LRU eviction
type lruList struct {
list *list.List
elem map[string]*list.Element
}
func newLruList() *lruList {
return &lruList{
list: list.New(),
elem: make(map[string]*list.Element),
}
}
func (l *lruList) Add(key string, fi *types.FileInfo) {
elem := l.list.PushFront(fi)
l.elem[key] = elem
}
func (l *lruList) MoveToFront(key string, timeUpdater *types.BatchedTimeUpdate) {
if elem, exists := l.elem[key]; exists {
l.list.MoveToFront(elem)
// Update the FileInfo in the element with new access time
if fi := elem.Value.(*types.FileInfo); fi != nil {
fi.UpdateAccessBatched(timeUpdater)
}
}
}
func (l *lruList) Remove(key string) *types.FileInfo {
if elem, exists := l.elem[key]; exists {
delete(l.elem, key)
if fi := l.list.Remove(elem).(*types.FileInfo); fi != nil {
return fi
}
}
return nil
}
func (l *lruList) Len() int {
return l.list.Len()
}
// New creates a new MemoryFS
func New(capacity int64) *MemoryFS {
if capacity <= 0 {
@@ -104,7 +37,7 @@ func New(capacity int64) *MemoryFS {
}
// Initialize sharded locks
keyLocks := make([]sync.Map, numLockShards)
keyLocks := make([]sync.Map, locks.NumLockShards)
return &MemoryFS{
data: make(map[string]*bytes.Buffer),
@@ -112,7 +45,7 @@ func New(capacity int64) *MemoryFS {
capacity: capacity,
size: 0,
keyLocks: keyLocks,
LRU: newLruList(),
LRU: lru.NewLRUList[*types.FileInfo](),
timeUpdater: types.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
}
}
@@ -163,24 +96,9 @@ func (m *MemoryFS) GetFragmentationStats() map[string]interface{} {
}
}
// getShardIndex returns the shard index for a given key
func getShardIndex(key string) int {
// Use FNV-1a hash for good distribution
var h uint32 = 2166136261 // FNV offset basis
for i := 0; i < len(key); i++ {
h ^= uint32(key[i])
h *= 16777619 // FNV prime
}
return int(h % numLockShards)
}
// getKeyLock returns a lock for the given key using sharding
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
shardIndex := getShardIndex(key)
shard := &m.keyLocks[shardIndex]
keyLock, _ := shard.LoadOrStore(key, &sync.RWMutex{})
return keyLock.(*sync.RWMutex)
return locks.GetKeyLock(m.keyLocks, key)
}
// Create creates a new file
@@ -391,7 +309,7 @@ func (m *MemoryFS) EvictLRU(bytesNeeded uint) uint {
// Evict from LRU list until we free enough space
for m.size > m.capacity-int64(bytesNeeded) && m.LRU.Len() > 0 {
// Get the least recently used item
elem := m.LRU.list.Back()
elem := m.LRU.Back()
if elem == nil {
break
}
@@ -411,7 +329,7 @@ func (m *MemoryFS) EvictLRU(bytesNeeded uint) uint {
evicted += uint(fi.Size)
// Clean up key lock
shardIndex := getShardIndex(key)
shardIndex := locks.GetShardIndex(key)
m.keyLocks[shardIndex].Delete(key)
}
@@ -459,7 +377,7 @@ func (m *MemoryFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
evicted += uint(fi.Size)
// Clean up key lock
shardIndex := getShardIndex(key)
shardIndex := locks.GetShardIndex(key)
m.keyLocks[shardIndex].Delete(key)
}
@@ -504,7 +422,7 @@ func (m *MemoryFS) EvictFIFO(bytesNeeded uint) uint {
evicted += uint(fi.Size)
// Clean up key lock
shardIndex := getShardIndex(key)
shardIndex := locks.GetShardIndex(key)
m.keyLocks[shardIndex].Delete(key)
}