Files
steamcache2/vfs/memory/monitor.go
Justin Harms 45ae234694 Enhance caching mechanisms and introduce adaptive features
- Updated caching logic to support size-based promotion filtering, ensuring that not all files may be promoted based on size constraints.
- Implemented adaptive caching strategies with a new AdaptiveCacheManager to analyze access patterns and adjust caching strategies dynamically.
- Introduced predictive caching features with a PredictiveCacheManager to prefetch content based on access patterns.
- Added a CacheWarmer to preload popular content into the cache, improving access times for frequently requested files.
- Refactored memory management with a DynamicCacheManager to adjust cache sizes based on system memory usage.
- Enhanced VFS interface and file metadata handling to support new features and improve performance.
- Updated tests to validate new caching behaviors and ensure reliability of the caching system.
2025-09-21 22:47:13 -05:00

154 lines
4.2 KiB
Go

package memory
import (
"runtime"
"sync"
"sync/atomic"
"time"
)
// MemoryMonitor tracks system memory usage and provides dynamic sizing recommendations
type MemoryMonitor struct {
targetMemoryUsage uint64 // Target total memory usage in bytes
currentMemoryUsage uint64 // Current total memory usage in bytes
monitoringInterval time.Duration
adjustmentThreshold float64 // Threshold for cache size adjustments (e.g., 0.1 = 10%)
mu sync.RWMutex
ctx chan struct{}
stopChan chan struct{}
isMonitoring int32
}
// NewMemoryMonitor creates a new memory monitor
func NewMemoryMonitor(targetMemoryUsage uint64, monitoringInterval time.Duration, adjustmentThreshold float64) *MemoryMonitor {
return &MemoryMonitor{
targetMemoryUsage: targetMemoryUsage,
monitoringInterval: monitoringInterval,
adjustmentThreshold: adjustmentThreshold,
ctx: make(chan struct{}),
stopChan: make(chan struct{}),
}
}
// Start begins monitoring memory usage
func (mm *MemoryMonitor) Start() {
if atomic.CompareAndSwapInt32(&mm.isMonitoring, 0, 1) {
go mm.monitor()
}
}
// Stop stops monitoring memory usage
func (mm *MemoryMonitor) Stop() {
if atomic.CompareAndSwapInt32(&mm.isMonitoring, 1, 0) {
close(mm.stopChan)
}
}
// GetCurrentMemoryUsage returns the current total memory usage
func (mm *MemoryMonitor) GetCurrentMemoryUsage() uint64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
return atomic.LoadUint64(&mm.currentMemoryUsage)
}
// GetTargetMemoryUsage returns the target memory usage
func (mm *MemoryMonitor) GetTargetMemoryUsage() uint64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
return mm.targetMemoryUsage
}
// GetMemoryUtilization returns the current memory utilization as a percentage
func (mm *MemoryMonitor) GetMemoryUtilization() float64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
current := atomic.LoadUint64(&mm.currentMemoryUsage)
return float64(current) / float64(mm.targetMemoryUsage)
}
// GetRecommendedCacheSize calculates the recommended cache size based on current memory usage
func (mm *MemoryMonitor) GetRecommendedCacheSize(originalCacheSize uint64) uint64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
current := atomic.LoadUint64(&mm.currentMemoryUsage)
target := mm.targetMemoryUsage
// If we're under target, we can use the full cache size
if current <= target {
return originalCacheSize
}
// Calculate how much we're over target
overage := current - target
// If overage is significant, reduce cache size
if overage > uint64(float64(target)*mm.adjustmentThreshold) {
// Reduce cache size by the overage amount, but don't go below 10% of original
minCacheSize := uint64(float64(originalCacheSize) * 0.1)
recommendedSize := originalCacheSize - overage
if recommendedSize < minCacheSize {
recommendedSize = minCacheSize
}
return recommendedSize
}
return originalCacheSize
}
// monitor runs the memory monitoring loop
func (mm *MemoryMonitor) monitor() {
ticker := time.NewTicker(mm.monitoringInterval)
defer ticker.Stop()
for {
select {
case <-mm.stopChan:
return
case <-ticker.C:
mm.updateMemoryUsage()
}
}
}
// updateMemoryUsage updates the current memory usage
func (mm *MemoryMonitor) updateMemoryUsage() {
var m runtime.MemStats
runtime.ReadMemStats(&m)
// Use Alloc (currently allocated memory) as our metric
atomic.StoreUint64(&mm.currentMemoryUsage, m.Alloc)
}
// SetTargetMemoryUsage updates the target memory usage
func (mm *MemoryMonitor) SetTargetMemoryUsage(target uint64) {
mm.mu.Lock()
defer mm.mu.Unlock()
mm.targetMemoryUsage = target
}
// GetMemoryStats returns detailed memory statistics
func (mm *MemoryMonitor) GetMemoryStats() map[string]interface{} {
var m runtime.MemStats
runtime.ReadMemStats(&m)
mm.mu.RLock()
defer mm.mu.RUnlock()
return map[string]interface{}{
"current_usage": atomic.LoadUint64(&mm.currentMemoryUsage),
"target_usage": mm.targetMemoryUsage,
"utilization": mm.GetMemoryUtilization(),
"heap_alloc": m.HeapAlloc,
"heap_sys": m.HeapSys,
"heap_idle": m.HeapIdle,
"heap_inuse": m.HeapInuse,
"stack_inuse": m.StackInuse,
"stack_sys": m.StackSys,
"gc_cycles": m.NumGC,
"gc_pause_total": m.PauseTotalNs,
}
}