Files
steamcache2/vfs/predictive/predictive.go
Justin Harms 45ae234694 Enhance caching mechanisms and introduce adaptive features
- Updated caching logic to support size-based promotion filtering, ensuring that not all files may be promoted based on size constraints.
- Implemented adaptive caching strategies with a new AdaptiveCacheManager to analyze access patterns and adjust caching strategies dynamically.
- Introduced predictive caching features with a PredictiveCacheManager to prefetch content based on access patterns.
- Added a CacheWarmer to preload popular content into the cache, improving access times for frequently requested files.
- Refactored memory management with a DynamicCacheManager to adjust cache sizes based on system memory usage.
- Enhanced VFS interface and file metadata handling to support new features and improve performance.
- Updated tests to validate new caching behaviors and ensure reliability of the caching system.
2025-09-21 22:47:13 -05:00

368 lines
9.2 KiB
Go

package predictive
import (
"context"
"sync"
"sync/atomic"
"time"
)
// PredictiveCacheManager implements predictive caching strategies
type PredictiveCacheManager struct {
accessPredictor *AccessPredictor
cacheWarmer *CacheWarmer
prefetchQueue chan PrefetchRequest
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
stats *PredictiveStats
}
// PrefetchRequest represents a request to prefetch content
type PrefetchRequest struct {
Key string
Priority int
Reason string
RequestedAt time.Time
}
// PredictiveStats tracks predictive caching statistics
type PredictiveStats struct {
PrefetchHits int64
PrefetchMisses int64
PrefetchRequests int64
CacheWarmHits int64
CacheWarmMisses int64
mu sync.RWMutex
}
// AccessPredictor predicts which files are likely to be accessed next
type AccessPredictor struct {
accessHistory map[string]*AccessSequence
patterns map[string][]string // Key -> likely next keys
mu sync.RWMutex
}
// AccessSequence tracks access sequences for prediction
type AccessSequence struct {
Key string
NextKeys []string
Frequency map[string]int64
LastSeen time.Time
mu sync.RWMutex
}
// CacheWarmer preloads popular content into cache
type CacheWarmer struct {
popularContent map[string]*PopularContent
warmerQueue chan WarmRequest
mu sync.RWMutex
}
// PopularContent tracks popular content for warming
type PopularContent struct {
Key string
AccessCount int64
LastAccess time.Time
Size int64
Priority int
}
// WarmRequest represents a cache warming request
type WarmRequest struct {
Key string
Priority int
Reason string
}
// NewPredictiveCacheManager creates a new predictive cache manager
func NewPredictiveCacheManager() *PredictiveCacheManager {
ctx, cancel := context.WithCancel(context.Background())
pcm := &PredictiveCacheManager{
accessPredictor: NewAccessPredictor(),
cacheWarmer: NewCacheWarmer(),
prefetchQueue: make(chan PrefetchRequest, 1000),
ctx: ctx,
cancel: cancel,
stats: &PredictiveStats{},
}
// Start background workers
pcm.wg.Add(1)
go pcm.prefetchWorker()
pcm.wg.Add(1)
go pcm.analysisWorker()
return pcm
}
// NewAccessPredictor creates a new access predictor
func NewAccessPredictor() *AccessPredictor {
return &AccessPredictor{
accessHistory: make(map[string]*AccessSequence),
patterns: make(map[string][]string),
}
}
// NewCacheWarmer creates a new cache warmer
func NewCacheWarmer() *CacheWarmer {
return &CacheWarmer{
popularContent: make(map[string]*PopularContent),
warmerQueue: make(chan WarmRequest, 100),
}
}
// RecordAccess records a file access for prediction analysis (lightweight version)
func (pcm *PredictiveCacheManager) RecordAccess(key string, previousKey string, size int64) {
// Only record if we have a previous key to avoid overhead
if previousKey != "" {
pcm.accessPredictor.RecordSequence(previousKey, key)
}
// Lightweight popular content tracking - only for large files
if size > 1024*1024 { // Only track files > 1MB
pcm.cacheWarmer.RecordAccess(key, size)
}
// Skip expensive prediction checks on every access
// Only check occasionally to reduce overhead
}
// PredictNextAccess predicts the next likely file to be accessed
func (pcm *PredictiveCacheManager) PredictNextAccess(currentKey string) []string {
return pcm.accessPredictor.PredictNext(currentKey)
}
// RequestPrefetch requests prefetching of predicted content
func (pcm *PredictiveCacheManager) RequestPrefetch(key string, priority int, reason string) {
select {
case pcm.prefetchQueue <- PrefetchRequest{
Key: key,
Priority: priority,
Reason: reason,
RequestedAt: time.Now(),
}:
atomic.AddInt64(&pcm.stats.PrefetchRequests, 1)
default:
// Queue full, skip prefetch
}
}
// RecordSequence records an access sequence for prediction
func (ap *AccessPredictor) RecordSequence(previousKey, currentKey string) {
if previousKey == "" || currentKey == "" {
return
}
ap.mu.Lock()
defer ap.mu.Unlock()
seq, exists := ap.accessHistory[previousKey]
if !exists {
seq = &AccessSequence{
Key: previousKey,
NextKeys: []string{},
Frequency: make(map[string]int64),
LastSeen: time.Now(),
}
ap.accessHistory[previousKey] = seq
}
seq.mu.Lock()
seq.Frequency[currentKey]++
seq.LastSeen = time.Now()
// Update next keys list (keep top 5)
nextKeys := make([]string, 0, 5)
for key, _ := range seq.Frequency {
nextKeys = append(nextKeys, key)
if len(nextKeys) >= 5 {
break
}
}
seq.NextKeys = nextKeys
seq.mu.Unlock()
}
// PredictNext predicts the next likely files to be accessed
func (ap *AccessPredictor) PredictNext(currentKey string) []string {
ap.mu.RLock()
defer ap.mu.RUnlock()
seq, exists := ap.accessHistory[currentKey]
if !exists {
return []string{}
}
seq.mu.RLock()
defer seq.mu.RUnlock()
// Return top predicted keys
predictions := make([]string, len(seq.NextKeys))
copy(predictions, seq.NextKeys)
return predictions
}
// IsPredictedAccess checks if an access was predicted
func (ap *AccessPredictor) IsPredictedAccess(key string) bool {
ap.mu.RLock()
defer ap.mu.RUnlock()
// Check if this key appears in any prediction lists
for _, seq := range ap.accessHistory {
seq.mu.RLock()
for _, predictedKey := range seq.NextKeys {
if predictedKey == key {
seq.mu.RUnlock()
return true
}
}
seq.mu.RUnlock()
}
return false
}
// RecordAccess records a file access for cache warming (lightweight version)
func (cw *CacheWarmer) RecordAccess(key string, size int64) {
// Use read lock first for better performance
cw.mu.RLock()
content, exists := cw.popularContent[key]
cw.mu.RUnlock()
if !exists {
// Only acquire write lock when creating new entry
cw.mu.Lock()
// Double-check after acquiring write lock
if content, exists = cw.popularContent[key]; !exists {
content = &PopularContent{
Key: key,
AccessCount: 1,
LastAccess: time.Now(),
Size: size,
Priority: 1,
}
cw.popularContent[key] = content
}
cw.mu.Unlock()
} else {
// Lightweight update - just increment counter
content.AccessCount++
content.LastAccess = time.Now()
// Only update priority occasionally to reduce overhead
if content.AccessCount%5 == 0 {
if content.AccessCount > 10 {
content.Priority = 3
} else if content.AccessCount > 5 {
content.Priority = 2
}
}
}
}
// GetPopularContent returns the most popular content for warming
func (cw *CacheWarmer) GetPopularContent(limit int) []*PopularContent {
cw.mu.RLock()
defer cw.mu.RUnlock()
// Sort by access count and return top items
popular := make([]*PopularContent, 0, len(cw.popularContent))
for _, content := range cw.popularContent {
popular = append(popular, content)
}
// Simple sort by access count (in production, use proper sorting)
// For now, just return the first 'limit' items
if len(popular) > limit {
popular = popular[:limit]
}
return popular
}
// prefetchWorker processes prefetch requests
func (pcm *PredictiveCacheManager) prefetchWorker() {
defer pcm.wg.Done()
for {
select {
case <-pcm.ctx.Done():
return
case req := <-pcm.prefetchQueue:
// Process prefetch request
pcm.processPrefetchRequest(req)
}
}
}
// analysisWorker performs periodic analysis and cache warming
func (pcm *PredictiveCacheManager) analysisWorker() {
defer pcm.wg.Done()
ticker := time.NewTicker(30 * time.Second) // Analyze every 30 seconds
defer ticker.Stop()
for {
select {
case <-pcm.ctx.Done():
return
case <-ticker.C:
pcm.performAnalysis()
}
}
}
// processPrefetchRequest processes a prefetch request
func (pcm *PredictiveCacheManager) processPrefetchRequest(req PrefetchRequest) {
// In a real implementation, this would:
// 1. Check if content is already cached
// 2. If not, fetch and cache it
// 3. Update statistics
// For now, just log the prefetch request
// In production, integrate with the actual cache system
}
// performAnalysis performs periodic analysis and cache warming
func (pcm *PredictiveCacheManager) performAnalysis() {
// Get popular content for warming
popular := pcm.cacheWarmer.GetPopularContent(10)
// Request warming for popular content
for _, content := range popular {
if content.AccessCount > 5 { // Only warm frequently accessed content
select {
case pcm.cacheWarmer.warmerQueue <- WarmRequest{
Key: content.Key,
Priority: content.Priority,
Reason: "popular_content",
}:
default:
// Queue full, skip
}
}
}
}
// GetStats returns predictive caching statistics
func (pcm *PredictiveCacheManager) GetStats() *PredictiveStats {
pcm.stats.mu.RLock()
defer pcm.stats.mu.RUnlock()
return &PredictiveStats{
PrefetchHits: atomic.LoadInt64(&pcm.stats.PrefetchHits),
PrefetchMisses: atomic.LoadInt64(&pcm.stats.PrefetchMisses),
PrefetchRequests: atomic.LoadInt64(&pcm.stats.PrefetchRequests),
CacheWarmHits: atomic.LoadInt64(&pcm.stats.CacheWarmHits),
CacheWarmMisses: atomic.LoadInt64(&pcm.stats.CacheWarmMisses),
}
}
// Stop stops the predictive cache manager
func (pcm *PredictiveCacheManager) Stop() {
pcm.cancel()
pcm.wg.Wait()
}