All checks were successful
Release Tag / release (push) Successful in 9s
- Changed module name from `s1d3sw1ped/SteamCache2` to `s1d3sw1ped/steamcache2` for consistency. - Updated all import paths and references throughout the codebase to reflect the new module name. - Adjusted README and Makefile to use the updated module name, ensuring clarity in usage instructions.
131 lines
4.0 KiB
Go
131 lines
4.0 KiB
Go
package memory
|
|
|
|
import (
|
|
"s1d3sw1ped/steamcache2/vfs"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
)
|
|
|
|
// DynamicCacheManager manages cache size adjustments based on system memory usage
|
|
type DynamicCacheManager struct {
|
|
originalCacheSize uint64
|
|
currentCacheSize uint64
|
|
memoryMonitor *MemoryMonitor
|
|
cache vfs.VFS
|
|
adjustmentInterval time.Duration
|
|
lastAdjustment time.Time
|
|
mu sync.RWMutex
|
|
adjustmentCount int64
|
|
isAdjusting int32
|
|
}
|
|
|
|
// NewDynamicCacheManager creates a new dynamic cache manager
|
|
func NewDynamicCacheManager(cache vfs.VFS, originalSize uint64, memoryMonitor *MemoryMonitor) *DynamicCacheManager {
|
|
return &DynamicCacheManager{
|
|
originalCacheSize: originalSize,
|
|
currentCacheSize: originalSize,
|
|
memoryMonitor: memoryMonitor,
|
|
cache: cache,
|
|
adjustmentInterval: 30 * time.Second, // Adjust every 30 seconds
|
|
}
|
|
}
|
|
|
|
// Start begins the dynamic cache size adjustment process
|
|
func (dcm *DynamicCacheManager) Start() {
|
|
go dcm.adjustmentLoop()
|
|
}
|
|
|
|
// GetCurrentCacheSize returns the current cache size
|
|
func (dcm *DynamicCacheManager) GetCurrentCacheSize() uint64 {
|
|
dcm.mu.RLock()
|
|
defer dcm.mu.RUnlock()
|
|
return atomic.LoadUint64(&dcm.currentCacheSize)
|
|
}
|
|
|
|
// GetOriginalCacheSize returns the original cache size
|
|
func (dcm *DynamicCacheManager) GetOriginalCacheSize() uint64 {
|
|
dcm.mu.RLock()
|
|
defer dcm.mu.RUnlock()
|
|
return dcm.originalCacheSize
|
|
}
|
|
|
|
// GetAdjustmentCount returns the number of adjustments made
|
|
func (dcm *DynamicCacheManager) GetAdjustmentCount() int64 {
|
|
return atomic.LoadInt64(&dcm.adjustmentCount)
|
|
}
|
|
|
|
// adjustmentLoop runs the cache size adjustment loop
|
|
func (dcm *DynamicCacheManager) adjustmentLoop() {
|
|
ticker := time.NewTicker(dcm.adjustmentInterval)
|
|
defer ticker.Stop()
|
|
|
|
for range ticker.C {
|
|
dcm.performAdjustment()
|
|
}
|
|
}
|
|
|
|
// performAdjustment performs a cache size adjustment if needed
|
|
func (dcm *DynamicCacheManager) performAdjustment() {
|
|
// Prevent concurrent adjustments
|
|
if !atomic.CompareAndSwapInt32(&dcm.isAdjusting, 0, 1) {
|
|
return
|
|
}
|
|
defer atomic.StoreInt32(&dcm.isAdjusting, 0)
|
|
|
|
// Check if enough time has passed since last adjustment
|
|
if time.Since(dcm.lastAdjustment) < dcm.adjustmentInterval {
|
|
return
|
|
}
|
|
|
|
// Get recommended cache size
|
|
recommendedSize := dcm.memoryMonitor.GetRecommendedCacheSize(dcm.originalCacheSize)
|
|
currentSize := atomic.LoadUint64(&dcm.currentCacheSize)
|
|
|
|
// Only adjust if there's a significant difference (more than 5%)
|
|
sizeDiff := float64(recommendedSize) / float64(currentSize)
|
|
if sizeDiff < 0.95 || sizeDiff > 1.05 {
|
|
dcm.adjustCacheSize(recommendedSize)
|
|
dcm.lastAdjustment = time.Now()
|
|
atomic.AddInt64(&dcm.adjustmentCount, 1)
|
|
}
|
|
}
|
|
|
|
// adjustCacheSize adjusts the cache size to the recommended size
|
|
func (dcm *DynamicCacheManager) adjustCacheSize(newSize uint64) {
|
|
dcm.mu.Lock()
|
|
defer dcm.mu.Unlock()
|
|
|
|
oldSize := atomic.LoadUint64(&dcm.currentCacheSize)
|
|
atomic.StoreUint64(&dcm.currentCacheSize, newSize)
|
|
|
|
// If we're reducing the cache size, trigger GC to free up memory
|
|
if newSize < oldSize {
|
|
// Calculate how much to free
|
|
bytesToFree := oldSize - newSize
|
|
|
|
// Trigger GC on the cache to free up the excess memory
|
|
// This is a simplified approach - in practice, you'd want to integrate
|
|
// with the actual GC system to free the right amount
|
|
if gcCache, ok := dcm.cache.(interface{ ForceGC(uint) }); ok {
|
|
gcCache.ForceGC(uint(bytesToFree))
|
|
}
|
|
}
|
|
}
|
|
|
|
// GetStats returns statistics about the dynamic cache manager
|
|
func (dcm *DynamicCacheManager) GetStats() map[string]interface{} {
|
|
dcm.mu.RLock()
|
|
defer dcm.mu.RUnlock()
|
|
|
|
return map[string]interface{}{
|
|
"original_cache_size": dcm.originalCacheSize,
|
|
"current_cache_size": atomic.LoadUint64(&dcm.currentCacheSize),
|
|
"adjustment_count": atomic.LoadInt64(&dcm.adjustmentCount),
|
|
"last_adjustment": dcm.lastAdjustment,
|
|
"memory_utilization": dcm.memoryMonitor.GetMemoryUtilization(),
|
|
"target_memory_usage": dcm.memoryMonitor.GetTargetMemoryUsage(),
|
|
"current_memory_usage": dcm.memoryMonitor.GetCurrentMemoryUsage(),
|
|
}
|
|
}
|