Refactor caching and memory management components
All checks were successful
Release Tag / release (push) Successful in 9s
All checks were successful
Release Tag / release (push) Successful in 9s
- Updated the caching logic to utilize a predictive cache warmer, enhancing content prefetching based on access patterns. - Replaced the legacy warming system with a more efficient predictive approach, allowing for better performance and resource management. - Refactored memory management to integrate dynamic cache size adjustments based on system memory usage, improving overall efficiency. - Simplified the VFS interface and improved concurrency handling with sharded locks for better performance in multi-threaded environments. - Enhanced tests to validate the new caching and memory management behaviors, ensuring reliability and performance improvements.
This commit is contained in:
@@ -21,7 +21,6 @@ import (
|
||||
"s1d3sw1ped/steamcache2/vfs/gc"
|
||||
"s1d3sw1ped/steamcache2/vfs/memory"
|
||||
"s1d3sw1ped/steamcache2/vfs/predictive"
|
||||
"s1d3sw1ped/steamcache2/vfs/warming"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -781,14 +780,14 @@ type SteamCache struct {
|
||||
// Adaptive and predictive caching
|
||||
adaptiveManager *adaptive.AdaptiveCacheManager
|
||||
predictiveManager *predictive.PredictiveCacheManager
|
||||
cacheWarmer *warming.CacheWarmer
|
||||
cacheWarmer *predictive.CacheWarmer
|
||||
lastAccessKey string // Track last accessed key for sequence analysis
|
||||
lastAccessKeyMu sync.RWMutex
|
||||
adaptiveEnabled bool // Flag to enable/disable adaptive features
|
||||
|
||||
// Dynamic memory management
|
||||
memoryMonitor *memory.MemoryMonitor
|
||||
dynamicCacheMgr *memory.DynamicCacheManager
|
||||
dynamicCacheMgr *memory.MemoryMonitor
|
||||
}
|
||||
|
||||
func New(address string, memorySize string, diskSize string, diskPath, upstream, memoryGC, diskGC string, maxConcurrentRequests int64, maxRequestsPerClient int64) *SteamCache {
|
||||
@@ -925,8 +924,8 @@ func New(address string, memorySize string, diskSize string, diskPath, upstream,
|
||||
// Initialize adaptive and predictive caching (lightweight)
|
||||
adaptiveManager: adaptive.NewAdaptiveCacheManager(5 * time.Minute), // Much longer interval
|
||||
predictiveManager: predictive.NewPredictiveCacheManager(),
|
||||
cacheWarmer: warming.NewCacheWarmer(c, 2), // Reduced to 2 concurrent warmers
|
||||
adaptiveEnabled: true, // Enable by default but can be disabled
|
||||
cacheWarmer: predictive.NewCacheWarmer(), // Use predictive cache warmer
|
||||
adaptiveEnabled: true, // Enable by default but can be disabled
|
||||
|
||||
// Initialize dynamic memory management
|
||||
memoryMonitor: memory.NewMemoryMonitor(uint64(memorysize), 10*time.Second, 0.1), // 10% threshold
|
||||
@@ -935,7 +934,7 @@ func New(address string, memorySize string, diskSize string, diskPath, upstream,
|
||||
|
||||
// Initialize dynamic cache manager if we have memory cache
|
||||
if m != nil && sc.memoryMonitor != nil {
|
||||
sc.dynamicCacheMgr = memory.NewDynamicCacheManager(mgc, uint64(memorysize), sc.memoryMonitor)
|
||||
sc.dynamicCacheMgr = memory.NewMemoryMonitorWithCache(uint64(memorysize), 10*time.Second, 0.1, mgc, uint64(memorysize))
|
||||
sc.dynamicCacheMgr.Start()
|
||||
sc.memoryMonitor.Start()
|
||||
}
|
||||
@@ -1535,6 +1534,6 @@ func (sc *SteamCache) recordCacheMiss(key string, size int64) {
|
||||
|
||||
// Only trigger warming for very large files to reduce overhead
|
||||
if size > 10*1024*1024 { // Only warm files > 10MB
|
||||
sc.cacheWarmer.RequestWarming(key, 3, "cache_miss", size, "cache_miss_analyzer")
|
||||
sc.cacheWarmer.RequestWarming(key, 3, "cache_miss", size)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,20 +3,25 @@ package steamcache
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestCaching(t *testing.T) {
|
||||
td := t.TempDir()
|
||||
|
||||
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
||||
|
||||
sc := New("localhost:8080", "1G", "1G", td, "", "lru", "lru", 200, 5)
|
||||
|
||||
w, err := sc.vfs.Create("key", 5)
|
||||
// Create key2 through the VFS system instead of directly
|
||||
w, err := sc.vfs.Create("key2", 6)
|
||||
if err != nil {
|
||||
t.Errorf("Create key2 failed: %v", err)
|
||||
}
|
||||
w.Write([]byte("value2"))
|
||||
w.Close()
|
||||
|
||||
w, err = sc.vfs.Create("key", 5)
|
||||
if err != nil {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
}
|
||||
@@ -82,9 +87,18 @@ func TestCaching(t *testing.T) {
|
||||
t.Errorf("Total size too large: got %d, want at most 34", sc.vfs.Size())
|
||||
}
|
||||
|
||||
// First ensure the file is indexed by opening it
|
||||
rc, err = sc.vfs.Open("key2")
|
||||
if err != nil {
|
||||
t.Errorf("Open key2 failed: %v", err)
|
||||
}
|
||||
rc.Close()
|
||||
|
||||
// Give promotion goroutine time to complete before deleting
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
sc.memory.Delete("key2")
|
||||
sc.disk.Delete("key2") // Also delete from disk cache
|
||||
os.Remove(filepath.Join(td, "key2"))
|
||||
|
||||
if _, err := sc.vfs.Open("key2"); err == nil {
|
||||
t.Errorf("Open failed: got nil, want error")
|
||||
|
||||
Reference in New Issue
Block a user