Enhance caching mechanisms and introduce adaptive features
- Updated caching logic to support size-based promotion filtering, ensuring that not all files may be promoted based on size constraints. - Implemented adaptive caching strategies with a new AdaptiveCacheManager to analyze access patterns and adjust caching strategies dynamically. - Introduced predictive caching features with a PredictiveCacheManager to prefetch content based on access patterns. - Added a CacheWarmer to preload popular content into the cache, improving access times for frequently requested files. - Refactored memory management with a DynamicCacheManager to adjust cache sizes based on system memory usage. - Enhanced VFS interface and file metadata handling to support new features and improve performance. - Updated tests to validate new caching behaviors and ensure reliability of the caching system.
This commit is contained in:
130
vfs/memory/dynamic.go
Normal file
130
vfs/memory/dynamic.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DynamicCacheManager manages cache size adjustments based on system memory usage
|
||||
type DynamicCacheManager struct {
|
||||
originalCacheSize uint64
|
||||
currentCacheSize uint64
|
||||
memoryMonitor *MemoryMonitor
|
||||
cache vfs.VFS
|
||||
adjustmentInterval time.Duration
|
||||
lastAdjustment time.Time
|
||||
mu sync.RWMutex
|
||||
adjustmentCount int64
|
||||
isAdjusting int32
|
||||
}
|
||||
|
||||
// NewDynamicCacheManager creates a new dynamic cache manager
|
||||
func NewDynamicCacheManager(cache vfs.VFS, originalSize uint64, memoryMonitor *MemoryMonitor) *DynamicCacheManager {
|
||||
return &DynamicCacheManager{
|
||||
originalCacheSize: originalSize,
|
||||
currentCacheSize: originalSize,
|
||||
memoryMonitor: memoryMonitor,
|
||||
cache: cache,
|
||||
adjustmentInterval: 30 * time.Second, // Adjust every 30 seconds
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins the dynamic cache size adjustment process
|
||||
func (dcm *DynamicCacheManager) Start() {
|
||||
go dcm.adjustmentLoop()
|
||||
}
|
||||
|
||||
// GetCurrentCacheSize returns the current cache size
|
||||
func (dcm *DynamicCacheManager) GetCurrentCacheSize() uint64 {
|
||||
dcm.mu.RLock()
|
||||
defer dcm.mu.RUnlock()
|
||||
return atomic.LoadUint64(&dcm.currentCacheSize)
|
||||
}
|
||||
|
||||
// GetOriginalCacheSize returns the original cache size
|
||||
func (dcm *DynamicCacheManager) GetOriginalCacheSize() uint64 {
|
||||
dcm.mu.RLock()
|
||||
defer dcm.mu.RUnlock()
|
||||
return dcm.originalCacheSize
|
||||
}
|
||||
|
||||
// GetAdjustmentCount returns the number of adjustments made
|
||||
func (dcm *DynamicCacheManager) GetAdjustmentCount() int64 {
|
||||
return atomic.LoadInt64(&dcm.adjustmentCount)
|
||||
}
|
||||
|
||||
// adjustmentLoop runs the cache size adjustment loop
|
||||
func (dcm *DynamicCacheManager) adjustmentLoop() {
|
||||
ticker := time.NewTicker(dcm.adjustmentInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
dcm.performAdjustment()
|
||||
}
|
||||
}
|
||||
|
||||
// performAdjustment performs a cache size adjustment if needed
|
||||
func (dcm *DynamicCacheManager) performAdjustment() {
|
||||
// Prevent concurrent adjustments
|
||||
if !atomic.CompareAndSwapInt32(&dcm.isAdjusting, 0, 1) {
|
||||
return
|
||||
}
|
||||
defer atomic.StoreInt32(&dcm.isAdjusting, 0)
|
||||
|
||||
// Check if enough time has passed since last adjustment
|
||||
if time.Since(dcm.lastAdjustment) < dcm.adjustmentInterval {
|
||||
return
|
||||
}
|
||||
|
||||
// Get recommended cache size
|
||||
recommendedSize := dcm.memoryMonitor.GetRecommendedCacheSize(dcm.originalCacheSize)
|
||||
currentSize := atomic.LoadUint64(&dcm.currentCacheSize)
|
||||
|
||||
// Only adjust if there's a significant difference (more than 5%)
|
||||
sizeDiff := float64(recommendedSize) / float64(currentSize)
|
||||
if sizeDiff < 0.95 || sizeDiff > 1.05 {
|
||||
dcm.adjustCacheSize(recommendedSize)
|
||||
dcm.lastAdjustment = time.Now()
|
||||
atomic.AddInt64(&dcm.adjustmentCount, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// adjustCacheSize adjusts the cache size to the recommended size
|
||||
func (dcm *DynamicCacheManager) adjustCacheSize(newSize uint64) {
|
||||
dcm.mu.Lock()
|
||||
defer dcm.mu.Unlock()
|
||||
|
||||
oldSize := atomic.LoadUint64(&dcm.currentCacheSize)
|
||||
atomic.StoreUint64(&dcm.currentCacheSize, newSize)
|
||||
|
||||
// If we're reducing the cache size, trigger GC to free up memory
|
||||
if newSize < oldSize {
|
||||
// Calculate how much to free
|
||||
bytesToFree := oldSize - newSize
|
||||
|
||||
// Trigger GC on the cache to free up the excess memory
|
||||
// This is a simplified approach - in practice, you'd want to integrate
|
||||
// with the actual GC system to free the right amount
|
||||
if gcCache, ok := dcm.cache.(interface{ ForceGC(uint) }); ok {
|
||||
gcCache.ForceGC(uint(bytesToFree))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetStats returns statistics about the dynamic cache manager
|
||||
func (dcm *DynamicCacheManager) GetStats() map[string]interface{} {
|
||||
dcm.mu.RLock()
|
||||
defer dcm.mu.RUnlock()
|
||||
|
||||
return map[string]interface{}{
|
||||
"original_cache_size": dcm.originalCacheSize,
|
||||
"current_cache_size": atomic.LoadUint64(&dcm.currentCacheSize),
|
||||
"adjustment_count": atomic.LoadInt64(&dcm.adjustmentCount),
|
||||
"last_adjustment": dcm.lastAdjustment,
|
||||
"memory_utilization": dcm.memoryMonitor.GetMemoryUtilization(),
|
||||
"target_memory_usage": dcm.memoryMonitor.GetTargetMemoryUsage(),
|
||||
"current_memory_usage": dcm.memoryMonitor.GetCurrentMemoryUsage(),
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user