Files
steamcache2/vfs/memory/monitor.go
Justin Harms bfe29dea75
All checks were successful
Release Tag / release (push) Successful in 9s
Refactor caching and memory management components
- Updated the caching logic to utilize a predictive cache warmer, enhancing content prefetching based on access patterns.
- Replaced the legacy warming system with a more efficient predictive approach, allowing for better performance and resource management.
- Refactored memory management to integrate dynamic cache size adjustments based on system memory usage, improving overall efficiency.
- Simplified the VFS interface and improved concurrency handling with sharded locks for better performance in multi-threaded environments.
- Enhanced tests to validate the new caching and memory management behaviors, ensuring reliability and performance improvements.
2025-09-22 01:59:15 -05:00

275 lines
8.0 KiB
Go

package memory
import (
"runtime"
"sync"
"sync/atomic"
"time"
)
// MemoryMonitor tracks system memory usage and provides dynamic sizing recommendations
type MemoryMonitor struct {
targetMemoryUsage uint64 // Target total memory usage in bytes
currentMemoryUsage uint64 // Current total memory usage in bytes
monitoringInterval time.Duration
adjustmentThreshold float64 // Threshold for cache size adjustments (e.g., 0.1 = 10%)
mu sync.RWMutex
ctx chan struct{}
stopChan chan struct{}
isMonitoring int32
// Dynamic cache management fields
originalCacheSize uint64
currentCacheSize uint64
cache interface{} // Generic cache interface
adjustmentInterval time.Duration
lastAdjustment time.Time
adjustmentCount int64
isAdjusting int32
}
// NewMemoryMonitor creates a new memory monitor
func NewMemoryMonitor(targetMemoryUsage uint64, monitoringInterval time.Duration, adjustmentThreshold float64) *MemoryMonitor {
return &MemoryMonitor{
targetMemoryUsage: targetMemoryUsage,
monitoringInterval: monitoringInterval,
adjustmentThreshold: adjustmentThreshold,
ctx: make(chan struct{}),
stopChan: make(chan struct{}),
adjustmentInterval: 30 * time.Second, // Default adjustment interval
}
}
// NewMemoryMonitorWithCache creates a new memory monitor with cache management
func NewMemoryMonitorWithCache(targetMemoryUsage uint64, monitoringInterval time.Duration, adjustmentThreshold float64, cache interface{}, originalCacheSize uint64) *MemoryMonitor {
mm := NewMemoryMonitor(targetMemoryUsage, monitoringInterval, adjustmentThreshold)
mm.cache = cache
mm.originalCacheSize = originalCacheSize
mm.currentCacheSize = originalCacheSize
return mm
}
// Start begins monitoring memory usage
func (mm *MemoryMonitor) Start() {
if atomic.CompareAndSwapInt32(&mm.isMonitoring, 0, 1) {
go mm.monitor()
}
}
// Stop stops monitoring memory usage
func (mm *MemoryMonitor) Stop() {
if atomic.CompareAndSwapInt32(&mm.isMonitoring, 1, 0) {
close(mm.stopChan)
}
}
// GetCurrentMemoryUsage returns the current total memory usage
func (mm *MemoryMonitor) GetCurrentMemoryUsage() uint64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
return atomic.LoadUint64(&mm.currentMemoryUsage)
}
// GetTargetMemoryUsage returns the target memory usage
func (mm *MemoryMonitor) GetTargetMemoryUsage() uint64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
return mm.targetMemoryUsage
}
// GetMemoryUtilization returns the current memory utilization as a percentage
func (mm *MemoryMonitor) GetMemoryUtilization() float64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
current := atomic.LoadUint64(&mm.currentMemoryUsage)
return float64(current) / float64(mm.targetMemoryUsage)
}
// GetRecommendedCacheSize calculates the recommended cache size based on current memory usage
func (mm *MemoryMonitor) GetRecommendedCacheSize(originalCacheSize uint64) uint64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
current := atomic.LoadUint64(&mm.currentMemoryUsage)
target := mm.targetMemoryUsage
// If we're under target, we can use the full cache size
if current <= target {
return originalCacheSize
}
// Calculate how much we're over target
overage := current - target
// If overage is significant, reduce cache size
if overage > uint64(float64(target)*mm.adjustmentThreshold) {
// Reduce cache size by the overage amount, but don't go below 10% of original
minCacheSize := uint64(float64(originalCacheSize) * 0.1)
recommendedSize := originalCacheSize - overage
if recommendedSize < minCacheSize {
recommendedSize = minCacheSize
}
return recommendedSize
}
return originalCacheSize
}
// monitor runs the memory monitoring loop
func (mm *MemoryMonitor) monitor() {
ticker := time.NewTicker(mm.monitoringInterval)
defer ticker.Stop()
for {
select {
case <-mm.stopChan:
return
case <-ticker.C:
mm.updateMemoryUsage()
}
}
}
// updateMemoryUsage updates the current memory usage
func (mm *MemoryMonitor) updateMemoryUsage() {
var m runtime.MemStats
runtime.ReadMemStats(&m)
// Use Alloc (currently allocated memory) as our metric
atomic.StoreUint64(&mm.currentMemoryUsage, m.Alloc)
}
// SetTargetMemoryUsage updates the target memory usage
func (mm *MemoryMonitor) SetTargetMemoryUsage(target uint64) {
mm.mu.Lock()
defer mm.mu.Unlock()
mm.targetMemoryUsage = target
}
// GetMemoryStats returns detailed memory statistics
func (mm *MemoryMonitor) GetMemoryStats() map[string]interface{} {
var m runtime.MemStats
runtime.ReadMemStats(&m)
mm.mu.RLock()
defer mm.mu.RUnlock()
return map[string]interface{}{
"current_usage": atomic.LoadUint64(&mm.currentMemoryUsage),
"target_usage": mm.targetMemoryUsage,
"utilization": mm.GetMemoryUtilization(),
"heap_alloc": m.HeapAlloc,
"heap_sys": m.HeapSys,
"heap_idle": m.HeapIdle,
"heap_inuse": m.HeapInuse,
"stack_inuse": m.StackInuse,
"stack_sys": m.StackSys,
"gc_cycles": m.NumGC,
"gc_pause_total": m.PauseTotalNs,
}
}
// Dynamic Cache Management Methods
// StartDynamicAdjustment begins the dynamic cache size adjustment process
func (mm *MemoryMonitor) StartDynamicAdjustment() {
if mm.cache != nil {
go mm.adjustmentLoop()
}
}
// GetCurrentCacheSize returns the current cache size
func (mm *MemoryMonitor) GetCurrentCacheSize() uint64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
return atomic.LoadUint64(&mm.currentCacheSize)
}
// GetOriginalCacheSize returns the original cache size
func (mm *MemoryMonitor) GetOriginalCacheSize() uint64 {
mm.mu.RLock()
defer mm.mu.RUnlock()
return mm.originalCacheSize
}
// GetAdjustmentCount returns the number of adjustments made
func (mm *MemoryMonitor) GetAdjustmentCount() int64 {
return atomic.LoadInt64(&mm.adjustmentCount)
}
// adjustmentLoop runs the cache size adjustment loop
func (mm *MemoryMonitor) adjustmentLoop() {
ticker := time.NewTicker(mm.adjustmentInterval)
defer ticker.Stop()
for range ticker.C {
mm.performAdjustment()
}
}
// performAdjustment performs a cache size adjustment if needed
func (mm *MemoryMonitor) performAdjustment() {
// Prevent concurrent adjustments
if !atomic.CompareAndSwapInt32(&mm.isAdjusting, 0, 1) {
return
}
defer atomic.StoreInt32(&mm.isAdjusting, 0)
// Check if enough time has passed since last adjustment
if time.Since(mm.lastAdjustment) < mm.adjustmentInterval {
return
}
// Get recommended cache size
recommendedSize := mm.GetRecommendedCacheSize(mm.originalCacheSize)
currentSize := atomic.LoadUint64(&mm.currentCacheSize)
// Only adjust if there's a significant difference (more than 5%)
sizeDiff := float64(recommendedSize) / float64(currentSize)
if sizeDiff < 0.95 || sizeDiff > 1.05 {
mm.adjustCacheSize(recommendedSize)
mm.lastAdjustment = time.Now()
atomic.AddInt64(&mm.adjustmentCount, 1)
}
}
// adjustCacheSize adjusts the cache size to the recommended size
func (mm *MemoryMonitor) adjustCacheSize(newSize uint64) {
mm.mu.Lock()
defer mm.mu.Unlock()
oldSize := atomic.LoadUint64(&mm.currentCacheSize)
atomic.StoreUint64(&mm.currentCacheSize, newSize)
// If we're reducing the cache size, trigger GC to free up memory
if newSize < oldSize {
// Calculate how much to free
bytesToFree := oldSize - newSize
// Trigger GC on the cache to free up the excess memory
// This is a simplified approach - in practice, you'd want to integrate
// with the actual GC system to free the right amount
if gcCache, ok := mm.cache.(interface{ ForceGC(uint) }); ok {
gcCache.ForceGC(uint(bytesToFree))
}
}
}
// GetDynamicStats returns statistics about the dynamic cache manager
func (mm *MemoryMonitor) GetDynamicStats() map[string]interface{} {
mm.mu.RLock()
defer mm.mu.RUnlock()
return map[string]interface{}{
"original_cache_size": mm.originalCacheSize,
"current_cache_size": atomic.LoadUint64(&mm.currentCacheSize),
"adjustment_count": atomic.LoadInt64(&mm.adjustmentCount),
"last_adjustment": mm.lastAdjustment,
"memory_utilization": mm.GetMemoryUtilization(),
"target_memory_usage": mm.GetTargetMemoryUsage(),
"current_memory_usage": mm.GetCurrentMemoryUsage(),
}
}