- Updated caching logic to support size-based promotion filtering, ensuring that not all files may be promoted based on size constraints. - Implemented adaptive caching strategies with a new AdaptiveCacheManager to analyze access patterns and adjust caching strategies dynamically. - Introduced predictive caching features with a PredictiveCacheManager to prefetch content based on access patterns. - Added a CacheWarmer to preload popular content into the cache, improving access times for frequently requested files. - Refactored memory management with a DynamicCacheManager to adjust cache sizes based on system memory usage. - Enhanced VFS interface and file metadata handling to support new features and improve performance. - Updated tests to validate new caching behaviors and ensure reliability of the caching system.
274 lines
7.3 KiB
Go
274 lines
7.3 KiB
Go
package adaptive
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
)
|
|
|
|
// WorkloadPattern represents different types of workload patterns
|
|
type WorkloadPattern int
|
|
|
|
const (
|
|
PatternUnknown WorkloadPattern = iota
|
|
PatternSequential // Sequential file access (e.g., game installation)
|
|
PatternRandom // Random file access (e.g., game updates)
|
|
PatternBurst // Burst access (e.g., multiple users downloading same game)
|
|
PatternSteady // Steady access (e.g., popular games being accessed regularly)
|
|
)
|
|
|
|
// CacheStrategy represents different caching strategies
|
|
type CacheStrategy int
|
|
|
|
const (
|
|
StrategyLRU CacheStrategy = iota
|
|
StrategyLFU
|
|
StrategySizeBased
|
|
StrategyHybrid
|
|
StrategyPredictive
|
|
)
|
|
|
|
// WorkloadAnalyzer analyzes access patterns to determine optimal caching strategies
|
|
type WorkloadAnalyzer struct {
|
|
accessHistory map[string]*AccessInfo
|
|
patternCounts map[WorkloadPattern]int64
|
|
mu sync.RWMutex
|
|
analysisInterval time.Duration
|
|
ctx context.Context
|
|
cancel context.CancelFunc
|
|
}
|
|
|
|
// AccessInfo tracks access patterns for individual files
|
|
type AccessInfo struct {
|
|
Key string
|
|
AccessCount int64
|
|
LastAccess time.Time
|
|
FirstAccess time.Time
|
|
AccessTimes []time.Time
|
|
Size int64
|
|
AccessPattern WorkloadPattern
|
|
mu sync.RWMutex
|
|
}
|
|
|
|
// AdaptiveCacheManager manages adaptive caching strategies
|
|
type AdaptiveCacheManager struct {
|
|
analyzer *WorkloadAnalyzer
|
|
currentStrategy CacheStrategy
|
|
adaptationCount int64
|
|
mu sync.RWMutex
|
|
}
|
|
|
|
// NewWorkloadAnalyzer creates a new workload analyzer
|
|
func NewWorkloadAnalyzer(analysisInterval time.Duration) *WorkloadAnalyzer {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
analyzer := &WorkloadAnalyzer{
|
|
accessHistory: make(map[string]*AccessInfo),
|
|
patternCounts: make(map[WorkloadPattern]int64),
|
|
analysisInterval: analysisInterval,
|
|
ctx: ctx,
|
|
cancel: cancel,
|
|
}
|
|
|
|
// Start background analysis with much longer interval to reduce overhead
|
|
go analyzer.analyzePatterns()
|
|
|
|
return analyzer
|
|
}
|
|
|
|
// RecordAccess records a file access for pattern analysis (lightweight version)
|
|
func (wa *WorkloadAnalyzer) RecordAccess(key string, size int64) {
|
|
// Use read lock first for better performance
|
|
wa.mu.RLock()
|
|
info, exists := wa.accessHistory[key]
|
|
wa.mu.RUnlock()
|
|
|
|
if !exists {
|
|
// Only acquire write lock when creating new entry
|
|
wa.mu.Lock()
|
|
// Double-check after acquiring write lock
|
|
if _, exists = wa.accessHistory[key]; !exists {
|
|
info = &AccessInfo{
|
|
Key: key,
|
|
AccessCount: 1,
|
|
LastAccess: time.Now(),
|
|
FirstAccess: time.Now(),
|
|
AccessTimes: []time.Time{time.Now()},
|
|
Size: size,
|
|
}
|
|
wa.accessHistory[key] = info
|
|
}
|
|
wa.mu.Unlock()
|
|
} else {
|
|
// Lightweight update - just increment counter and update timestamp
|
|
info.mu.Lock()
|
|
info.AccessCount++
|
|
info.LastAccess = time.Now()
|
|
// Only keep last 10 access times to reduce memory overhead
|
|
if len(info.AccessTimes) > 10 {
|
|
info.AccessTimes = info.AccessTimes[len(info.AccessTimes)-10:]
|
|
} else {
|
|
info.AccessTimes = append(info.AccessTimes, time.Now())
|
|
}
|
|
info.mu.Unlock()
|
|
}
|
|
}
|
|
|
|
// analyzePatterns analyzes access patterns in the background
|
|
func (wa *WorkloadAnalyzer) analyzePatterns() {
|
|
ticker := time.NewTicker(wa.analysisInterval)
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
select {
|
|
case <-wa.ctx.Done():
|
|
return
|
|
case <-ticker.C:
|
|
wa.performAnalysis()
|
|
}
|
|
}
|
|
}
|
|
|
|
// performAnalysis analyzes current access patterns
|
|
func (wa *WorkloadAnalyzer) performAnalysis() {
|
|
wa.mu.Lock()
|
|
defer wa.mu.Unlock()
|
|
|
|
// Reset pattern counts
|
|
wa.patternCounts = make(map[WorkloadPattern]int64)
|
|
|
|
now := time.Now()
|
|
cutoff := now.Add(-wa.analysisInterval * 2) // Analyze last 2 intervals
|
|
|
|
for _, info := range wa.accessHistory {
|
|
info.mu.RLock()
|
|
if info.LastAccess.After(cutoff) {
|
|
pattern := wa.determinePattern(info)
|
|
info.AccessPattern = pattern
|
|
wa.patternCounts[pattern]++
|
|
}
|
|
info.mu.RUnlock()
|
|
}
|
|
}
|
|
|
|
// determinePattern determines the access pattern for a file
|
|
func (wa *WorkloadAnalyzer) determinePattern(info *AccessInfo) WorkloadPattern {
|
|
if len(info.AccessTimes) < 3 {
|
|
return PatternUnknown
|
|
}
|
|
|
|
// Analyze access timing patterns
|
|
intervals := make([]time.Duration, len(info.AccessTimes)-1)
|
|
for i := 1; i < len(info.AccessTimes); i++ {
|
|
intervals[i-1] = info.AccessTimes[i].Sub(info.AccessTimes[i-1])
|
|
}
|
|
|
|
// Calculate variance in access intervals
|
|
var sum, sumSquares time.Duration
|
|
for _, interval := range intervals {
|
|
sum += interval
|
|
sumSquares += interval * interval
|
|
}
|
|
|
|
avg := sum / time.Duration(len(intervals))
|
|
variance := (sumSquares / time.Duration(len(intervals))) - (avg * avg)
|
|
|
|
// Determine pattern based on variance and access count
|
|
if info.AccessCount > 10 && variance < time.Minute {
|
|
return PatternBurst
|
|
} else if info.AccessCount > 5 && variance < time.Hour {
|
|
return PatternSteady
|
|
} else if variance < time.Minute*5 {
|
|
return PatternSequential
|
|
} else {
|
|
return PatternRandom
|
|
}
|
|
}
|
|
|
|
// GetDominantPattern returns the most common access pattern
|
|
func (wa *WorkloadAnalyzer) GetDominantPattern() WorkloadPattern {
|
|
wa.mu.RLock()
|
|
defer wa.mu.RUnlock()
|
|
|
|
var maxCount int64
|
|
var dominantPattern WorkloadPattern
|
|
|
|
for pattern, count := range wa.patternCounts {
|
|
if count > maxCount {
|
|
maxCount = count
|
|
dominantPattern = pattern
|
|
}
|
|
}
|
|
|
|
return dominantPattern
|
|
}
|
|
|
|
// GetAccessInfo returns access information for a key
|
|
func (wa *WorkloadAnalyzer) GetAccessInfo(key string) *AccessInfo {
|
|
wa.mu.RLock()
|
|
defer wa.mu.RUnlock()
|
|
|
|
return wa.accessHistory[key]
|
|
}
|
|
|
|
// Stop stops the workload analyzer
|
|
func (wa *WorkloadAnalyzer) Stop() {
|
|
wa.cancel()
|
|
}
|
|
|
|
// NewAdaptiveCacheManager creates a new adaptive cache manager
|
|
func NewAdaptiveCacheManager(analysisInterval time.Duration) *AdaptiveCacheManager {
|
|
return &AdaptiveCacheManager{
|
|
analyzer: NewWorkloadAnalyzer(analysisInterval),
|
|
currentStrategy: StrategyLRU, // Start with LRU
|
|
}
|
|
}
|
|
|
|
// AdaptStrategy adapts the caching strategy based on workload patterns
|
|
func (acm *AdaptiveCacheManager) AdaptStrategy() CacheStrategy {
|
|
acm.mu.Lock()
|
|
defer acm.mu.Unlock()
|
|
|
|
dominantPattern := acm.analyzer.GetDominantPattern()
|
|
|
|
// Adapt strategy based on dominant pattern
|
|
switch dominantPattern {
|
|
case PatternBurst:
|
|
acm.currentStrategy = StrategyLFU // LFU is good for burst patterns
|
|
case PatternSteady:
|
|
acm.currentStrategy = StrategyHybrid // Hybrid for steady patterns
|
|
case PatternSequential:
|
|
acm.currentStrategy = StrategySizeBased // Size-based for sequential
|
|
case PatternRandom:
|
|
acm.currentStrategy = StrategyLRU // LRU for random patterns
|
|
default:
|
|
acm.currentStrategy = StrategyLRU // Default to LRU
|
|
}
|
|
|
|
atomic.AddInt64(&acm.adaptationCount, 1)
|
|
return acm.currentStrategy
|
|
}
|
|
|
|
// GetCurrentStrategy returns the current caching strategy
|
|
func (acm *AdaptiveCacheManager) GetCurrentStrategy() CacheStrategy {
|
|
acm.mu.RLock()
|
|
defer acm.mu.RUnlock()
|
|
return acm.currentStrategy
|
|
}
|
|
|
|
// RecordAccess records a file access for analysis
|
|
func (acm *AdaptiveCacheManager) RecordAccess(key string, size int64) {
|
|
acm.analyzer.RecordAccess(key, size)
|
|
}
|
|
|
|
// GetAdaptationCount returns the number of strategy adaptations
|
|
func (acm *AdaptiveCacheManager) GetAdaptationCount() int64 {
|
|
return atomic.LoadInt64(&acm.adaptationCount)
|
|
}
|
|
|
|
// Stop stops the adaptive cache manager
|
|
func (acm *AdaptiveCacheManager) Stop() {
|
|
acm.analyzer.Stop()
|
|
}
|