Enhance caching mechanisms and introduce adaptive features

- Updated caching logic to support size-based promotion filtering, ensuring that not all files may be promoted based on size constraints.
- Implemented adaptive caching strategies with a new AdaptiveCacheManager to analyze access patterns and adjust caching strategies dynamically.
- Introduced predictive caching features with a PredictiveCacheManager to prefetch content based on access patterns.
- Added a CacheWarmer to preload popular content into the cache, improving access times for frequently requested files.
- Refactored memory management with a DynamicCacheManager to adjust cache sizes based on system memory usage.
- Enhanced VFS interface and file metadata handling to support new features and improve performance.
- Updated tests to validate new caching behaviors and ensure reliability of the caching system.
This commit is contained in:
2025-09-21 22:47:13 -05:00
parent bbe014e334
commit 45ae234694
12 changed files with 2212 additions and 189 deletions

130
vfs/memory/dynamic.go Normal file
View File

@@ -0,0 +1,130 @@
package memory
import (
"s1d3sw1ped/SteamCache2/vfs"
"sync"
"sync/atomic"
"time"
)
// DynamicCacheManager manages cache size adjustments based on system memory usage
type DynamicCacheManager struct {
originalCacheSize uint64
currentCacheSize uint64
memoryMonitor *MemoryMonitor
cache vfs.VFS
adjustmentInterval time.Duration
lastAdjustment time.Time
mu sync.RWMutex
adjustmentCount int64
isAdjusting int32
}
// NewDynamicCacheManager creates a new dynamic cache manager
func NewDynamicCacheManager(cache vfs.VFS, originalSize uint64, memoryMonitor *MemoryMonitor) *DynamicCacheManager {
return &DynamicCacheManager{
originalCacheSize: originalSize,
currentCacheSize: originalSize,
memoryMonitor: memoryMonitor,
cache: cache,
adjustmentInterval: 30 * time.Second, // Adjust every 30 seconds
}
}
// Start begins the dynamic cache size adjustment process
func (dcm *DynamicCacheManager) Start() {
go dcm.adjustmentLoop()
}
// GetCurrentCacheSize returns the current cache size
func (dcm *DynamicCacheManager) GetCurrentCacheSize() uint64 {
dcm.mu.RLock()
defer dcm.mu.RUnlock()
return atomic.LoadUint64(&dcm.currentCacheSize)
}
// GetOriginalCacheSize returns the original cache size
func (dcm *DynamicCacheManager) GetOriginalCacheSize() uint64 {
dcm.mu.RLock()
defer dcm.mu.RUnlock()
return dcm.originalCacheSize
}
// GetAdjustmentCount returns the number of adjustments made
func (dcm *DynamicCacheManager) GetAdjustmentCount() int64 {
return atomic.LoadInt64(&dcm.adjustmentCount)
}
// adjustmentLoop runs the cache size adjustment loop
func (dcm *DynamicCacheManager) adjustmentLoop() {
ticker := time.NewTicker(dcm.adjustmentInterval)
defer ticker.Stop()
for range ticker.C {
dcm.performAdjustment()
}
}
// performAdjustment performs a cache size adjustment if needed
func (dcm *DynamicCacheManager) performAdjustment() {
// Prevent concurrent adjustments
if !atomic.CompareAndSwapInt32(&dcm.isAdjusting, 0, 1) {
return
}
defer atomic.StoreInt32(&dcm.isAdjusting, 0)
// Check if enough time has passed since last adjustment
if time.Since(dcm.lastAdjustment) < dcm.adjustmentInterval {
return
}
// Get recommended cache size
recommendedSize := dcm.memoryMonitor.GetRecommendedCacheSize(dcm.originalCacheSize)
currentSize := atomic.LoadUint64(&dcm.currentCacheSize)
// Only adjust if there's a significant difference (more than 5%)
sizeDiff := float64(recommendedSize) / float64(currentSize)
if sizeDiff < 0.95 || sizeDiff > 1.05 {
dcm.adjustCacheSize(recommendedSize)
dcm.lastAdjustment = time.Now()
atomic.AddInt64(&dcm.adjustmentCount, 1)
}
}
// adjustCacheSize adjusts the cache size to the recommended size
func (dcm *DynamicCacheManager) adjustCacheSize(newSize uint64) {
dcm.mu.Lock()
defer dcm.mu.Unlock()
oldSize := atomic.LoadUint64(&dcm.currentCacheSize)
atomic.StoreUint64(&dcm.currentCacheSize, newSize)
// If we're reducing the cache size, trigger GC to free up memory
if newSize < oldSize {
// Calculate how much to free
bytesToFree := oldSize - newSize
// Trigger GC on the cache to free up the excess memory
// This is a simplified approach - in practice, you'd want to integrate
// with the actual GC system to free the right amount
if gcCache, ok := dcm.cache.(interface{ ForceGC(uint) }); ok {
gcCache.ForceGC(uint(bytesToFree))
}
}
}
// GetStats returns statistics about the dynamic cache manager
func (dcm *DynamicCacheManager) GetStats() map[string]interface{} {
dcm.mu.RLock()
defer dcm.mu.RUnlock()
return map[string]interface{}{
"original_cache_size": dcm.originalCacheSize,
"current_cache_size": atomic.LoadUint64(&dcm.currentCacheSize),
"adjustment_count": atomic.LoadInt64(&dcm.adjustmentCount),
"last_adjustment": dcm.lastAdjustment,
"memory_utilization": dcm.memoryMonitor.GetMemoryUtilization(),
"target_memory_usage": dcm.memoryMonitor.GetTargetMemoryUsage(),
"current_memory_usage": dcm.memoryMonitor.GetCurrentMemoryUsage(),
}
}

View File

@@ -5,7 +5,7 @@ import (
"bytes"
"container/list"
"io"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/types"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"sort"
"strings"
@@ -13,19 +13,43 @@ import (
"time"
)
// VFS defines the interface for virtual file systems
type VFS interface {
// Create creates a new file at the given key
Create(key string, size int64) (io.WriteCloser, error)
// Open opens the file at the given key for reading
Open(key string) (io.ReadCloser, error)
// Delete removes the file at the given key
Delete(key string) error
// Stat returns information about the file at the given key
Stat(key string) (*types.FileInfo, error)
// Name returns the name of this VFS
Name() string
// Size returns the current size of the VFS
Size() int64
// Capacity returns the maximum capacity of the VFS
Capacity() int64
}
// Ensure MemoryFS implements VFS.
var _ vfs.VFS = (*MemoryFS)(nil)
var _ VFS = (*MemoryFS)(nil)
// MemoryFS is an in-memory virtual file system
type MemoryFS struct {
data map[string]*bytes.Buffer
info map[string]*vfs.FileInfo
info map[string]*types.FileInfo
capacity int64
size int64
mu sync.RWMutex
keyLocks []sync.Map // Sharded lock pools for better concurrency
LRU *lruList
timeUpdater *vfs.BatchedTimeUpdate // Batched time updates for better performance
timeUpdater *types.BatchedTimeUpdate // Batched time updates for better performance
}
// Number of lock shards for reducing contention
@@ -44,25 +68,25 @@ func newLruList() *lruList {
}
}
func (l *lruList) Add(key string, fi *vfs.FileInfo) {
func (l *lruList) Add(key string, fi *types.FileInfo) {
elem := l.list.PushFront(fi)
l.elem[key] = elem
}
func (l *lruList) MoveToFront(key string, timeUpdater *vfs.BatchedTimeUpdate) {
func (l *lruList) MoveToFront(key string, timeUpdater *types.BatchedTimeUpdate) {
if elem, exists := l.elem[key]; exists {
l.list.MoveToFront(elem)
// Update the FileInfo in the element with new access time
if fi := elem.Value.(*vfs.FileInfo); fi != nil {
if fi := elem.Value.(*types.FileInfo); fi != nil {
fi.UpdateAccessBatched(timeUpdater)
}
}
}
func (l *lruList) Remove(key string) *vfs.FileInfo {
func (l *lruList) Remove(key string) *types.FileInfo {
if elem, exists := l.elem[key]; exists {
delete(l.elem, key)
if fi := l.list.Remove(elem).(*vfs.FileInfo); fi != nil {
if fi := l.list.Remove(elem).(*types.FileInfo); fi != nil {
return fi
}
}
@@ -84,12 +108,12 @@ func New(capacity int64) *MemoryFS {
return &MemoryFS{
data: make(map[string]*bytes.Buffer),
info: make(map[string]*vfs.FileInfo),
info: make(map[string]*types.FileInfo),
capacity: capacity,
size: 0,
keyLocks: keyLocks,
LRU: newLruList(),
timeUpdater: vfs.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
timeUpdater: types.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
}
}
@@ -110,6 +134,35 @@ func (m *MemoryFS) Capacity() int64 {
return m.capacity
}
// GetFragmentationStats returns memory fragmentation statistics
func (m *MemoryFS) GetFragmentationStats() map[string]interface{} {
m.mu.RLock()
defer m.mu.RUnlock()
var totalCapacity int64
var totalUsed int64
var bufferCount int
for _, buffer := range m.data {
totalCapacity += int64(buffer.Cap())
totalUsed += int64(buffer.Len())
bufferCount++
}
fragmentationRatio := float64(0)
if totalCapacity > 0 {
fragmentationRatio = float64(totalCapacity-totalUsed) / float64(totalCapacity)
}
return map[string]interface{}{
"buffer_count": bufferCount,
"total_capacity": totalCapacity,
"total_used": totalUsed,
"fragmentation_ratio": fragmentationRatio,
"average_buffer_size": float64(totalUsed) / float64(bufferCount),
}
}
// getShardIndex returns the shard index for a given key
func getShardIndex(key string) int {
// Use FNV-1a hash for good distribution
@@ -159,7 +212,7 @@ func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
buffer := &bytes.Buffer{}
m.data[key] = buffer
fi := vfs.NewFileInfo(key, size)
fi := types.NewFileInfo(key, size)
m.info[key] = fi
m.LRU.Add(key, fi)
// Initialize access time with current time
@@ -230,23 +283,39 @@ func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
return nil, vfserror.ErrNotFound
}
// Create a copy of the buffer for reading
data := make([]byte, buffer.Len())
copy(data, buffer.Bytes())
// Use zero-copy approach - return reader that reads directly from buffer
m.mu.Unlock()
return &memoryReadCloser{
reader: bytes.NewReader(data),
buffer: buffer,
offset: 0,
}, nil
}
// memoryReadCloser implements io.ReadCloser for memory files
// memoryReadCloser implements io.ReadCloser for memory files with zero-copy optimization
type memoryReadCloser struct {
reader *bytes.Reader
buffer *bytes.Buffer
offset int64
}
func (mrc *memoryReadCloser) Read(p []byte) (n int, err error) {
return mrc.reader.Read(p)
if mrc.offset >= int64(mrc.buffer.Len()) {
return 0, io.EOF
}
// Zero-copy read directly from buffer
available := mrc.buffer.Len() - int(mrc.offset)
toRead := len(p)
if toRead > available {
toRead = available
}
// Read directly from buffer without copying
data := mrc.buffer.Bytes()
copy(p, data[mrc.offset:mrc.offset+int64(toRead)])
mrc.offset += int64(toRead)
return toRead, nil
}
func (mrc *memoryReadCloser) Close() error {
@@ -286,7 +355,7 @@ func (m *MemoryFS) Delete(key string) error {
}
// Stat returns file information
func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
func (m *MemoryFS) Stat(key string) (*types.FileInfo, error) {
if key == "" {
return nil, vfserror.ErrInvalidKey
}
@@ -327,7 +396,7 @@ func (m *MemoryFS) EvictLRU(bytesNeeded uint) uint {
break
}
fi := elem.Value.(*vfs.FileInfo)
fi := elem.Value.(*types.FileInfo)
key := fi.Key
// Remove from LRU
@@ -355,7 +424,7 @@ func (m *MemoryFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
defer m.mu.Unlock()
var evicted uint
var candidates []*vfs.FileInfo
var candidates []*types.FileInfo
// Collect all files
for _, fi := range m.info {
@@ -403,7 +472,7 @@ func (m *MemoryFS) EvictFIFO(bytesNeeded uint) uint {
defer m.mu.Unlock()
var evicted uint
var candidates []*vfs.FileInfo
var candidates []*types.FileInfo
// Collect all files
for _, fi := range m.info {

153
vfs/memory/monitor.go Normal file
View File

@@ -0,0 +1,153 @@
package memory
import (
"runtime"
"sync"
"sync/atomic"
"time"
)
// MemoryMonitor tracks system memory usage and provides dynamic sizing recommendations
type MemoryMonitor struct {
targetMemoryUsage uint64 // Target total memory usage in bytes
currentMemoryUsage uint64 // Current total memory usage in bytes
monitoringInterval time.Duration
adjustmentThreshold float64 // Threshold for cache size adjustments (e.g., 0.1 = 10%)
mu sync.RWMutex
ctx chan struct{}
stopChan chan struct{}
isMonitoring int32
}
// NewMemoryMonitor creates a new memory monitor
func NewMemoryMonitor(targetMemoryUsage uint64, monitoringInterval time.Duration, adjustmentThreshold float64) *MemoryMonitor {
return &MemoryMonitor{
targetMemoryUsage: targetMemoryUsage,
monitoringInterval: monitoringInterval,
adjustmentThreshold: adjustmentThreshold,
ctx: make(chan struct{}),
stopChan: make(chan struct{}),
}
}
// Start begins monitoring memory usage
func (mm *MemoryMonitor) Start() {
if atomic.CompareAndSwapInt32(&mm.isMonitoring, 0, 1) {
go mm.monitor()
}
}
// Stop stops monitoring memory usage
func (mm *MemoryMonitor) Stop() {
if atomic.CompareAndSwapInt32(&mm.isMonitoring, 1, 0) {
close(mm.stopChan)
}
}
// GetCurrentMemoryUsage returns the current total memory usage
func (mm *MemoryMonitor) GetCurrentMemoryUsage() uint64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
return atomic.LoadUint64(&mm.currentMemoryUsage)
}
// GetTargetMemoryUsage returns the target memory usage
func (mm *MemoryMonitor) GetTargetMemoryUsage() uint64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
return mm.targetMemoryUsage
}
// GetMemoryUtilization returns the current memory utilization as a percentage
func (mm *MemoryMonitor) GetMemoryUtilization() float64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
current := atomic.LoadUint64(&mm.currentMemoryUsage)
return float64(current) / float64(mm.targetMemoryUsage)
}
// GetRecommendedCacheSize calculates the recommended cache size based on current memory usage
func (mm *MemoryMonitor) GetRecommendedCacheSize(originalCacheSize uint64) uint64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
current := atomic.LoadUint64(&mm.currentMemoryUsage)
target := mm.targetMemoryUsage
// If we're under target, we can use the full cache size
if current <= target {
return originalCacheSize
}
// Calculate how much we're over target
overage := current - target
// If overage is significant, reduce cache size
if overage > uint64(float64(target)*mm.adjustmentThreshold) {
// Reduce cache size by the overage amount, but don't go below 10% of original
minCacheSize := uint64(float64(originalCacheSize) * 0.1)
recommendedSize := originalCacheSize - overage
if recommendedSize < minCacheSize {
recommendedSize = minCacheSize
}
return recommendedSize
}
return originalCacheSize
}
// monitor runs the memory monitoring loop
func (mm *MemoryMonitor) monitor() {
ticker := time.NewTicker(mm.monitoringInterval)
defer ticker.Stop()
for {
select {
case <-mm.stopChan:
return
case <-ticker.C:
mm.updateMemoryUsage()
}
}
}
// updateMemoryUsage updates the current memory usage
func (mm *MemoryMonitor) updateMemoryUsage() {
var m runtime.MemStats
runtime.ReadMemStats(&m)
// Use Alloc (currently allocated memory) as our metric
atomic.StoreUint64(&mm.currentMemoryUsage, m.Alloc)
}
// SetTargetMemoryUsage updates the target memory usage
func (mm *MemoryMonitor) SetTargetMemoryUsage(target uint64) {
mm.mu.Lock()
defer mm.mu.Unlock()
mm.targetMemoryUsage = target
}
// GetMemoryStats returns detailed memory statistics
func (mm *MemoryMonitor) GetMemoryStats() map[string]interface{} {
var m runtime.MemStats
runtime.ReadMemStats(&m)
mm.mu.RLock()
defer mm.mu.RUnlock()
return map[string]interface{}{
"current_usage": atomic.LoadUint64(&mm.currentMemoryUsage),
"target_usage": mm.targetMemoryUsage,
"utilization": mm.GetMemoryUtilization(),
"heap_alloc": m.HeapAlloc,
"heap_sys": m.HeapSys,
"heap_idle": m.HeapIdle,
"heap_inuse": m.HeapInuse,
"stack_inuse": m.StackInuse,
"stack_sys": m.StackSys,
"gc_cycles": m.NumGC,
"gc_pause_total": m.PauseTotalNs,
}
}