All checks were successful
Release Tag / release (push) Successful in 9s
- Updated the caching logic to utilize a predictive cache warmer, enhancing content prefetching based on access patterns. - Replaced the legacy warming system with a more efficient predictive approach, allowing for better performance and resource management. - Refactored memory management to integrate dynamic cache size adjustments based on system memory usage, improving overall efficiency. - Simplified the VFS interface and improved concurrency handling with sharded locks for better performance in multi-threaded environments. - Enhanced tests to validate the new caching and memory management behaviors, ensuring reliability and performance improvements.
426 lines
10 KiB
Go
426 lines
10 KiB
Go
package predictive
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
)
|
|
|
|
// PredictiveCacheManager implements predictive caching strategies
|
|
type PredictiveCacheManager struct {
|
|
accessPredictor *AccessPredictor
|
|
cacheWarmer *CacheWarmer
|
|
prefetchQueue chan PrefetchRequest
|
|
ctx context.Context
|
|
cancel context.CancelFunc
|
|
wg sync.WaitGroup
|
|
stats *PredictiveStats
|
|
}
|
|
|
|
// PrefetchRequest represents a request to prefetch content
|
|
type PrefetchRequest struct {
|
|
Key string
|
|
Priority int
|
|
Reason string
|
|
RequestedAt time.Time
|
|
}
|
|
|
|
// PredictiveStats tracks predictive caching statistics
|
|
type PredictiveStats struct {
|
|
PrefetchHits int64
|
|
PrefetchMisses int64
|
|
PrefetchRequests int64
|
|
CacheWarmHits int64
|
|
CacheWarmMisses int64
|
|
mu sync.RWMutex
|
|
}
|
|
|
|
// AccessPredictor predicts which files are likely to be accessed next
|
|
type AccessPredictor struct {
|
|
accessHistory map[string]*AccessSequence
|
|
patterns map[string][]string // Key -> likely next keys
|
|
mu sync.RWMutex
|
|
}
|
|
|
|
// AccessSequence tracks access sequences for prediction
|
|
type AccessSequence struct {
|
|
Key string
|
|
NextKeys []string
|
|
Frequency map[string]int64
|
|
LastSeen time.Time
|
|
mu sync.RWMutex
|
|
}
|
|
|
|
// CacheWarmer preloads popular content into cache
|
|
type CacheWarmer struct {
|
|
popularContent map[string]*PopularContent
|
|
warmerQueue chan WarmRequest
|
|
mu sync.RWMutex
|
|
}
|
|
|
|
// PopularContent tracks popular content for warming
|
|
type PopularContent struct {
|
|
Key string
|
|
AccessCount int64
|
|
LastAccess time.Time
|
|
Size int64
|
|
Priority int
|
|
}
|
|
|
|
// WarmRequest represents a cache warming request
|
|
type WarmRequest struct {
|
|
Key string
|
|
Priority int
|
|
Reason string
|
|
Size int64
|
|
RequestedAt time.Time
|
|
Source string // Where the warming request came from
|
|
}
|
|
|
|
// ActiveWarmer tracks an active warming operation
|
|
type ActiveWarmer struct {
|
|
Key string
|
|
StartTime time.Time
|
|
Priority int
|
|
Reason string
|
|
mu sync.RWMutex
|
|
}
|
|
|
|
// WarmingStats tracks cache warming statistics
|
|
type WarmingStats struct {
|
|
WarmRequests int64
|
|
WarmSuccesses int64
|
|
WarmFailures int64
|
|
WarmBytes int64
|
|
WarmDuration time.Duration
|
|
PrefetchRequests int64
|
|
PrefetchSuccesses int64
|
|
PrefetchFailures int64
|
|
PrefetchBytes int64
|
|
PrefetchDuration time.Duration
|
|
}
|
|
|
|
// NewPredictiveCacheManager creates a new predictive cache manager
|
|
func NewPredictiveCacheManager() *PredictiveCacheManager {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
pcm := &PredictiveCacheManager{
|
|
accessPredictor: NewAccessPredictor(),
|
|
cacheWarmer: NewCacheWarmer(),
|
|
prefetchQueue: make(chan PrefetchRequest, 1000),
|
|
ctx: ctx,
|
|
cancel: cancel,
|
|
stats: &PredictiveStats{},
|
|
}
|
|
|
|
// Start background workers
|
|
pcm.wg.Add(1)
|
|
go pcm.prefetchWorker()
|
|
|
|
pcm.wg.Add(1)
|
|
go pcm.analysisWorker()
|
|
|
|
return pcm
|
|
}
|
|
|
|
// NewAccessPredictor creates a new access predictor
|
|
func NewAccessPredictor() *AccessPredictor {
|
|
return &AccessPredictor{
|
|
accessHistory: make(map[string]*AccessSequence),
|
|
patterns: make(map[string][]string),
|
|
}
|
|
}
|
|
|
|
// NewCacheWarmer creates a new cache warmer
|
|
func NewCacheWarmer() *CacheWarmer {
|
|
return &CacheWarmer{
|
|
popularContent: make(map[string]*PopularContent),
|
|
warmerQueue: make(chan WarmRequest, 100),
|
|
}
|
|
}
|
|
|
|
// NewWarmingStats creates a new warming stats tracker
|
|
func NewWarmingStats() *WarmingStats {
|
|
return &WarmingStats{}
|
|
}
|
|
|
|
// NewActiveWarmer creates a new active warmer tracker
|
|
func NewActiveWarmer(key string, priority int, reason string) *ActiveWarmer {
|
|
return &ActiveWarmer{
|
|
Key: key,
|
|
StartTime: time.Now(),
|
|
Priority: priority,
|
|
Reason: reason,
|
|
}
|
|
}
|
|
|
|
// RecordAccess records a file access for prediction analysis (lightweight version)
|
|
func (pcm *PredictiveCacheManager) RecordAccess(key string, previousKey string, size int64) {
|
|
// Only record if we have a previous key to avoid overhead
|
|
if previousKey != "" {
|
|
pcm.accessPredictor.RecordSequence(previousKey, key)
|
|
}
|
|
|
|
// Lightweight popular content tracking - only for large files
|
|
if size > 1024*1024 { // Only track files > 1MB
|
|
pcm.cacheWarmer.RecordAccess(key, size)
|
|
}
|
|
|
|
// Skip expensive prediction checks on every access
|
|
// Only check occasionally to reduce overhead
|
|
}
|
|
|
|
// PredictNextAccess predicts the next likely file to be accessed
|
|
func (pcm *PredictiveCacheManager) PredictNextAccess(currentKey string) []string {
|
|
return pcm.accessPredictor.PredictNext(currentKey)
|
|
}
|
|
|
|
// RequestPrefetch requests prefetching of predicted content
|
|
func (pcm *PredictiveCacheManager) RequestPrefetch(key string, priority int, reason string) {
|
|
select {
|
|
case pcm.prefetchQueue <- PrefetchRequest{
|
|
Key: key,
|
|
Priority: priority,
|
|
Reason: reason,
|
|
RequestedAt: time.Now(),
|
|
}:
|
|
atomic.AddInt64(&pcm.stats.PrefetchRequests, 1)
|
|
default:
|
|
// Queue full, skip prefetch
|
|
}
|
|
}
|
|
|
|
// RecordSequence records an access sequence for prediction
|
|
func (ap *AccessPredictor) RecordSequence(previousKey, currentKey string) {
|
|
if previousKey == "" || currentKey == "" {
|
|
return
|
|
}
|
|
|
|
ap.mu.Lock()
|
|
defer ap.mu.Unlock()
|
|
|
|
seq, exists := ap.accessHistory[previousKey]
|
|
if !exists {
|
|
seq = &AccessSequence{
|
|
Key: previousKey,
|
|
NextKeys: []string{},
|
|
Frequency: make(map[string]int64),
|
|
LastSeen: time.Now(),
|
|
}
|
|
ap.accessHistory[previousKey] = seq
|
|
}
|
|
|
|
seq.mu.Lock()
|
|
seq.Frequency[currentKey]++
|
|
seq.LastSeen = time.Now()
|
|
|
|
// Update next keys list (keep top 5)
|
|
nextKeys := make([]string, 0, 5)
|
|
for key, _ := range seq.Frequency {
|
|
nextKeys = append(nextKeys, key)
|
|
if len(nextKeys) >= 5 {
|
|
break
|
|
}
|
|
}
|
|
seq.NextKeys = nextKeys
|
|
seq.mu.Unlock()
|
|
}
|
|
|
|
// PredictNext predicts the next likely files to be accessed
|
|
func (ap *AccessPredictor) PredictNext(currentKey string) []string {
|
|
ap.mu.RLock()
|
|
defer ap.mu.RUnlock()
|
|
|
|
seq, exists := ap.accessHistory[currentKey]
|
|
if !exists {
|
|
return []string{}
|
|
}
|
|
|
|
seq.mu.RLock()
|
|
defer seq.mu.RUnlock()
|
|
|
|
// Return top predicted keys
|
|
predictions := make([]string, len(seq.NextKeys))
|
|
copy(predictions, seq.NextKeys)
|
|
return predictions
|
|
}
|
|
|
|
// IsPredictedAccess checks if an access was predicted
|
|
func (ap *AccessPredictor) IsPredictedAccess(key string) bool {
|
|
ap.mu.RLock()
|
|
defer ap.mu.RUnlock()
|
|
|
|
// Check if this key appears in any prediction lists
|
|
for _, seq := range ap.accessHistory {
|
|
seq.mu.RLock()
|
|
for _, predictedKey := range seq.NextKeys {
|
|
if predictedKey == key {
|
|
seq.mu.RUnlock()
|
|
return true
|
|
}
|
|
}
|
|
seq.mu.RUnlock()
|
|
}
|
|
return false
|
|
}
|
|
|
|
// RecordAccess records a file access for cache warming (lightweight version)
|
|
func (cw *CacheWarmer) RecordAccess(key string, size int64) {
|
|
// Use read lock first for better performance
|
|
cw.mu.RLock()
|
|
content, exists := cw.popularContent[key]
|
|
cw.mu.RUnlock()
|
|
|
|
if !exists {
|
|
// Only acquire write lock when creating new entry
|
|
cw.mu.Lock()
|
|
// Double-check after acquiring write lock
|
|
if content, exists = cw.popularContent[key]; !exists {
|
|
content = &PopularContent{
|
|
Key: key,
|
|
AccessCount: 1,
|
|
LastAccess: time.Now(),
|
|
Size: size,
|
|
Priority: 1,
|
|
}
|
|
cw.popularContent[key] = content
|
|
}
|
|
cw.mu.Unlock()
|
|
} else {
|
|
// Lightweight update - just increment counter
|
|
content.AccessCount++
|
|
content.LastAccess = time.Now()
|
|
|
|
// Only update priority occasionally to reduce overhead
|
|
if content.AccessCount%5 == 0 {
|
|
if content.AccessCount > 10 {
|
|
content.Priority = 3
|
|
} else if content.AccessCount > 5 {
|
|
content.Priority = 2
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// GetPopularContent returns the most popular content for warming
|
|
func (cw *CacheWarmer) GetPopularContent(limit int) []*PopularContent {
|
|
cw.mu.RLock()
|
|
defer cw.mu.RUnlock()
|
|
|
|
// Sort by access count and return top items
|
|
popular := make([]*PopularContent, 0, len(cw.popularContent))
|
|
for _, content := range cw.popularContent {
|
|
popular = append(popular, content)
|
|
}
|
|
|
|
// Simple sort by access count (in production, use proper sorting)
|
|
// For now, just return the first 'limit' items
|
|
if len(popular) > limit {
|
|
popular = popular[:limit]
|
|
}
|
|
|
|
return popular
|
|
}
|
|
|
|
// RequestWarming requests warming of a specific key
|
|
func (cw *CacheWarmer) RequestWarming(key string, priority int, reason string, size int64) {
|
|
select {
|
|
case cw.warmerQueue <- WarmRequest{
|
|
Key: key,
|
|
Priority: priority,
|
|
Reason: reason,
|
|
Size: size,
|
|
RequestedAt: time.Now(),
|
|
Source: "predictive",
|
|
}:
|
|
// Successfully queued
|
|
default:
|
|
// Queue full, skip warming
|
|
}
|
|
}
|
|
|
|
// prefetchWorker processes prefetch requests
|
|
func (pcm *PredictiveCacheManager) prefetchWorker() {
|
|
defer pcm.wg.Done()
|
|
|
|
for {
|
|
select {
|
|
case <-pcm.ctx.Done():
|
|
return
|
|
case req := <-pcm.prefetchQueue:
|
|
// Process prefetch request
|
|
pcm.processPrefetchRequest(req)
|
|
}
|
|
}
|
|
}
|
|
|
|
// analysisWorker performs periodic analysis and cache warming
|
|
func (pcm *PredictiveCacheManager) analysisWorker() {
|
|
defer pcm.wg.Done()
|
|
|
|
ticker := time.NewTicker(30 * time.Second) // Analyze every 30 seconds
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
select {
|
|
case <-pcm.ctx.Done():
|
|
return
|
|
case <-ticker.C:
|
|
pcm.performAnalysis()
|
|
}
|
|
}
|
|
}
|
|
|
|
// processPrefetchRequest processes a prefetch request
|
|
func (pcm *PredictiveCacheManager) processPrefetchRequest(req PrefetchRequest) {
|
|
// In a real implementation, this would:
|
|
// 1. Check if content is already cached
|
|
// 2. If not, fetch and cache it
|
|
// 3. Update statistics
|
|
|
|
// For now, just log the prefetch request
|
|
// In production, integrate with the actual cache system
|
|
}
|
|
|
|
// performAnalysis performs periodic analysis and cache warming
|
|
func (pcm *PredictiveCacheManager) performAnalysis() {
|
|
// Get popular content for warming
|
|
popular := pcm.cacheWarmer.GetPopularContent(10)
|
|
|
|
// Request warming for popular content
|
|
for _, content := range popular {
|
|
if content.AccessCount > 5 { // Only warm frequently accessed content
|
|
select {
|
|
case pcm.cacheWarmer.warmerQueue <- WarmRequest{
|
|
Key: content.Key,
|
|
Priority: content.Priority,
|
|
Reason: "popular_content",
|
|
}:
|
|
default:
|
|
// Queue full, skip
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// GetStats returns predictive caching statistics
|
|
func (pcm *PredictiveCacheManager) GetStats() *PredictiveStats {
|
|
pcm.stats.mu.RLock()
|
|
defer pcm.stats.mu.RUnlock()
|
|
|
|
return &PredictiveStats{
|
|
PrefetchHits: atomic.LoadInt64(&pcm.stats.PrefetchHits),
|
|
PrefetchMisses: atomic.LoadInt64(&pcm.stats.PrefetchMisses),
|
|
PrefetchRequests: atomic.LoadInt64(&pcm.stats.PrefetchRequests),
|
|
CacheWarmHits: atomic.LoadInt64(&pcm.stats.CacheWarmHits),
|
|
CacheWarmMisses: atomic.LoadInt64(&pcm.stats.CacheWarmMisses),
|
|
}
|
|
}
|
|
|
|
// Stop stops the predictive cache manager
|
|
func (pcm *PredictiveCacheManager) Stop() {
|
|
pcm.cancel()
|
|
pcm.wg.Wait()
|
|
}
|