Refactor caching and memory management components
All checks were successful
Release Tag / release (push) Successful in 9s
All checks were successful
Release Tag / release (push) Successful in 9s
- Updated the caching logic to utilize a predictive cache warmer, enhancing content prefetching based on access patterns. - Replaced the legacy warming system with a more efficient predictive approach, allowing for better performance and resource management. - Refactored memory management to integrate dynamic cache size adjustments based on system memory usage, improving overall efficiency. - Simplified the VFS interface and improved concurrency handling with sharded locks for better performance in multi-threaded environments. - Enhanced tests to validate the new caching and memory management behaviors, ensuring reliability and performance improvements.
This commit is contained in:
@@ -70,9 +70,35 @@ type PopularContent struct {
|
||||
|
||||
// WarmRequest represents a cache warming request
|
||||
type WarmRequest struct {
|
||||
Key string
|
||||
Priority int
|
||||
Reason string
|
||||
Key string
|
||||
Priority int
|
||||
Reason string
|
||||
Size int64
|
||||
RequestedAt time.Time
|
||||
Source string // Where the warming request came from
|
||||
}
|
||||
|
||||
// ActiveWarmer tracks an active warming operation
|
||||
type ActiveWarmer struct {
|
||||
Key string
|
||||
StartTime time.Time
|
||||
Priority int
|
||||
Reason string
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// WarmingStats tracks cache warming statistics
|
||||
type WarmingStats struct {
|
||||
WarmRequests int64
|
||||
WarmSuccesses int64
|
||||
WarmFailures int64
|
||||
WarmBytes int64
|
||||
WarmDuration time.Duration
|
||||
PrefetchRequests int64
|
||||
PrefetchSuccesses int64
|
||||
PrefetchFailures int64
|
||||
PrefetchBytes int64
|
||||
PrefetchDuration time.Duration
|
||||
}
|
||||
|
||||
// NewPredictiveCacheManager creates a new predictive cache manager
|
||||
@@ -114,6 +140,21 @@ func NewCacheWarmer() *CacheWarmer {
|
||||
}
|
||||
}
|
||||
|
||||
// NewWarmingStats creates a new warming stats tracker
|
||||
func NewWarmingStats() *WarmingStats {
|
||||
return &WarmingStats{}
|
||||
}
|
||||
|
||||
// NewActiveWarmer creates a new active warmer tracker
|
||||
func NewActiveWarmer(key string, priority int, reason string) *ActiveWarmer {
|
||||
return &ActiveWarmer{
|
||||
Key: key,
|
||||
StartTime: time.Now(),
|
||||
Priority: priority,
|
||||
Reason: reason,
|
||||
}
|
||||
}
|
||||
|
||||
// RecordAccess records a file access for prediction analysis (lightweight version)
|
||||
func (pcm *PredictiveCacheManager) RecordAccess(key string, previousKey string, size int64) {
|
||||
// Only record if we have a previous key to avoid overhead
|
||||
@@ -282,6 +323,23 @@ func (cw *CacheWarmer) GetPopularContent(limit int) []*PopularContent {
|
||||
return popular
|
||||
}
|
||||
|
||||
// RequestWarming requests warming of a specific key
|
||||
func (cw *CacheWarmer) RequestWarming(key string, priority int, reason string, size int64) {
|
||||
select {
|
||||
case cw.warmerQueue <- WarmRequest{
|
||||
Key: key,
|
||||
Priority: priority,
|
||||
Reason: reason,
|
||||
Size: size,
|
||||
RequestedAt: time.Now(),
|
||||
Source: "predictive",
|
||||
}:
|
||||
// Successfully queued
|
||||
default:
|
||||
// Queue full, skip warming
|
||||
}
|
||||
}
|
||||
|
||||
// prefetchWorker processes prefetch requests
|
||||
func (pcm *PredictiveCacheManager) prefetchWorker() {
|
||||
defer pcm.wg.Done()
|
||||
|
||||
Reference in New Issue
Block a user