All checks were successful
Release Tag / release (push) Successful in 9s
- Changed module name from `s1d3sw1ped/SteamCache2` to `s1d3sw1ped/steamcache2` for consistency. - Updated all import paths and references throughout the codebase to reflect the new module name. - Adjusted README and Makefile to use the updated module name, ensuring clarity in usage instructions.
301 lines
7.1 KiB
Go
301 lines
7.1 KiB
Go
package warming
|
|
|
|
import (
|
|
"context"
|
|
"s1d3sw1ped/steamcache2/vfs"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
)
|
|
|
|
// CacheWarmer implements intelligent cache warming strategies
|
|
type CacheWarmer struct {
|
|
vfs vfs.VFS
|
|
warmingQueue chan WarmRequest
|
|
activeWarmers map[string]*ActiveWarmer
|
|
stats *WarmingStats
|
|
ctx context.Context
|
|
cancel context.CancelFunc
|
|
wg sync.WaitGroup
|
|
mu sync.RWMutex
|
|
maxConcurrent int
|
|
warmingEnabled bool
|
|
}
|
|
|
|
// WarmRequest represents a cache warming request
|
|
type WarmRequest struct {
|
|
Key string
|
|
Priority int
|
|
Reason string
|
|
Size int64
|
|
RequestedAt time.Time
|
|
Source string // Where the warming request came from
|
|
}
|
|
|
|
// ActiveWarmer tracks an active warming operation
|
|
type ActiveWarmer struct {
|
|
Key string
|
|
StartTime time.Time
|
|
Priority int
|
|
Reason string
|
|
mu sync.RWMutex
|
|
}
|
|
|
|
// WarmingStats tracks cache warming statistics
|
|
type WarmingStats struct {
|
|
WarmRequests int64
|
|
WarmSuccesses int64
|
|
WarmFailures int64
|
|
WarmBytes int64
|
|
WarmDuration time.Duration
|
|
ActiveWarmers int64
|
|
mu sync.RWMutex
|
|
}
|
|
|
|
// WarmingStrategy defines different warming strategies
|
|
type WarmingStrategy int
|
|
|
|
const (
|
|
StrategyImmediate WarmingStrategy = iota
|
|
StrategyBackground
|
|
StrategyScheduled
|
|
StrategyPredictive
|
|
)
|
|
|
|
// NewCacheWarmer creates a new cache warmer
|
|
func NewCacheWarmer(vfs vfs.VFS, maxConcurrent int) *CacheWarmer {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
cw := &CacheWarmer{
|
|
vfs: vfs,
|
|
warmingQueue: make(chan WarmRequest, 1000),
|
|
activeWarmers: make(map[string]*ActiveWarmer),
|
|
stats: &WarmingStats{},
|
|
ctx: ctx,
|
|
cancel: cancel,
|
|
maxConcurrent: maxConcurrent,
|
|
warmingEnabled: true,
|
|
}
|
|
|
|
// Start warming workers
|
|
for i := 0; i < maxConcurrent; i++ {
|
|
cw.wg.Add(1)
|
|
go cw.warmingWorker(i)
|
|
}
|
|
|
|
// Start cleanup worker
|
|
cw.wg.Add(1)
|
|
go cw.cleanupWorker()
|
|
|
|
return cw
|
|
}
|
|
|
|
// RequestWarming requests warming of content
|
|
func (cw *CacheWarmer) RequestWarming(key string, priority int, reason string, size int64, source string) {
|
|
if !cw.warmingEnabled {
|
|
return
|
|
}
|
|
|
|
// Check if already warming
|
|
cw.mu.RLock()
|
|
if _, exists := cw.activeWarmers[key]; exists {
|
|
cw.mu.RUnlock()
|
|
return // Already warming
|
|
}
|
|
cw.mu.RUnlock()
|
|
|
|
// Check if already cached
|
|
if _, err := cw.vfs.Stat(key); err == nil {
|
|
return // Already cached
|
|
}
|
|
|
|
select {
|
|
case cw.warmingQueue <- WarmRequest{
|
|
Key: key,
|
|
Priority: priority,
|
|
Reason: reason,
|
|
Size: size,
|
|
RequestedAt: time.Now(),
|
|
Source: source,
|
|
}:
|
|
atomic.AddInt64(&cw.stats.WarmRequests, 1)
|
|
default:
|
|
// Queue full, skip warming
|
|
}
|
|
}
|
|
|
|
// warmingWorker processes warming requests
|
|
func (cw *CacheWarmer) warmingWorker(workerID int) {
|
|
defer cw.wg.Done()
|
|
|
|
for {
|
|
select {
|
|
case <-cw.ctx.Done():
|
|
return
|
|
case req := <-cw.warmingQueue:
|
|
cw.processWarmingRequest(req, workerID)
|
|
}
|
|
}
|
|
}
|
|
|
|
// processWarmingRequest processes a warming request
|
|
func (cw *CacheWarmer) processWarmingRequest(req WarmRequest, workerID int) {
|
|
// Mark as active warmer
|
|
cw.mu.Lock()
|
|
cw.activeWarmers[req.Key] = &ActiveWarmer{
|
|
Key: req.Key,
|
|
StartTime: time.Now(),
|
|
Priority: req.Priority,
|
|
Reason: req.Reason,
|
|
}
|
|
cw.mu.Unlock()
|
|
|
|
atomic.AddInt64(&cw.stats.ActiveWarmers, 1)
|
|
|
|
// Simulate warming process
|
|
// In a real implementation, this would:
|
|
// 1. Fetch content from upstream
|
|
// 2. Store in cache
|
|
// 3. Update statistics
|
|
|
|
startTime := time.Now()
|
|
|
|
// Simulate warming delay based on priority
|
|
warmingDelay := time.Duration(100-req.Priority*10) * time.Millisecond
|
|
if warmingDelay < 10*time.Millisecond {
|
|
warmingDelay = 10 * time.Millisecond
|
|
}
|
|
|
|
select {
|
|
case <-time.After(warmingDelay):
|
|
// Warming completed successfully
|
|
atomic.AddInt64(&cw.stats.WarmSuccesses, 1)
|
|
atomic.AddInt64(&cw.stats.WarmBytes, req.Size)
|
|
case <-cw.ctx.Done():
|
|
// Context cancelled
|
|
atomic.AddInt64(&cw.stats.WarmFailures, 1)
|
|
}
|
|
|
|
duration := time.Since(startTime)
|
|
cw.stats.mu.Lock()
|
|
cw.stats.WarmDuration += duration
|
|
cw.stats.mu.Unlock()
|
|
|
|
// Remove from active warmers
|
|
cw.mu.Lock()
|
|
delete(cw.activeWarmers, req.Key)
|
|
cw.mu.Unlock()
|
|
|
|
atomic.AddInt64(&cw.stats.ActiveWarmers, -1)
|
|
}
|
|
|
|
// cleanupWorker cleans up old warming requests
|
|
func (cw *CacheWarmer) cleanupWorker() {
|
|
defer cw.wg.Done()
|
|
|
|
ticker := time.NewTicker(1 * time.Minute)
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
select {
|
|
case <-cw.ctx.Done():
|
|
return
|
|
case <-ticker.C:
|
|
cw.cleanupOldWarmers()
|
|
}
|
|
}
|
|
}
|
|
|
|
// cleanupOldWarmers removes old warming requests
|
|
func (cw *CacheWarmer) cleanupOldWarmers() {
|
|
cw.mu.Lock()
|
|
defer cw.mu.Unlock()
|
|
|
|
now := time.Now()
|
|
cutoff := now.Add(-5 * time.Minute) // Remove warmers older than 5 minutes
|
|
|
|
for key, warmer := range cw.activeWarmers {
|
|
warmer.mu.RLock()
|
|
if warmer.StartTime.Before(cutoff) {
|
|
warmer.mu.RUnlock()
|
|
delete(cw.activeWarmers, key)
|
|
atomic.AddInt64(&cw.stats.WarmFailures, 1)
|
|
} else {
|
|
warmer.mu.RUnlock()
|
|
}
|
|
}
|
|
}
|
|
|
|
// GetActiveWarmers returns currently active warming operations
|
|
func (cw *CacheWarmer) GetActiveWarmers() []*ActiveWarmer {
|
|
cw.mu.RLock()
|
|
defer cw.mu.RUnlock()
|
|
|
|
warmers := make([]*ActiveWarmer, 0, len(cw.activeWarmers))
|
|
for _, warmer := range cw.activeWarmers {
|
|
warmers = append(warmers, warmer)
|
|
}
|
|
|
|
return warmers
|
|
}
|
|
|
|
// GetStats returns warming statistics
|
|
func (cw *CacheWarmer) GetStats() *WarmingStats {
|
|
cw.stats.mu.RLock()
|
|
defer cw.stats.mu.RUnlock()
|
|
|
|
return &WarmingStats{
|
|
WarmRequests: atomic.LoadInt64(&cw.stats.WarmRequests),
|
|
WarmSuccesses: atomic.LoadInt64(&cw.stats.WarmSuccesses),
|
|
WarmFailures: atomic.LoadInt64(&cw.stats.WarmFailures),
|
|
WarmBytes: atomic.LoadInt64(&cw.stats.WarmBytes),
|
|
WarmDuration: cw.stats.WarmDuration,
|
|
ActiveWarmers: atomic.LoadInt64(&cw.stats.ActiveWarmers),
|
|
}
|
|
}
|
|
|
|
// SetWarmingEnabled enables or disables cache warming
|
|
func (cw *CacheWarmer) SetWarmingEnabled(enabled bool) {
|
|
cw.mu.Lock()
|
|
defer cw.mu.Unlock()
|
|
cw.warmingEnabled = enabled
|
|
}
|
|
|
|
// IsWarmingEnabled returns whether warming is enabled
|
|
func (cw *CacheWarmer) IsWarmingEnabled() bool {
|
|
cw.mu.RLock()
|
|
defer cw.mu.RUnlock()
|
|
return cw.warmingEnabled
|
|
}
|
|
|
|
// Stop stops the cache warmer
|
|
func (cw *CacheWarmer) Stop() {
|
|
cw.cancel()
|
|
cw.wg.Wait()
|
|
}
|
|
|
|
// WarmPopularContent warms popular content based on access patterns
|
|
func (cw *CacheWarmer) WarmPopularContent(popularKeys []string, priority int) {
|
|
for _, key := range popularKeys {
|
|
cw.RequestWarming(key, priority, "popular_content", 0, "popular_analyzer")
|
|
}
|
|
}
|
|
|
|
// WarmPredictedContent warms predicted content
|
|
func (cw *CacheWarmer) WarmPredictedContent(predictedKeys []string, priority int) {
|
|
for _, key := range predictedKeys {
|
|
cw.RequestWarming(key, priority, "predicted_access", 0, "predictor")
|
|
}
|
|
}
|
|
|
|
// WarmSequentialContent warms content in sequential order
|
|
func (cw *CacheWarmer) WarmSequentialContent(sequentialKeys []string, priority int) {
|
|
for i, key := range sequentialKeys {
|
|
// Stagger warming requests to avoid overwhelming the system
|
|
go func(k string, delay time.Duration) {
|
|
time.Sleep(delay)
|
|
cw.RequestWarming(k, priority, "sequential_access", 0, "sequential_analyzer")
|
|
}(key, time.Duration(i)*100*time.Millisecond)
|
|
}
|
|
}
|