Refactor caching logic and enhance hash generation in steamcache
- Replaced SHA1 hash calculations with SHA256 for improved security and consistency in cache key generation. - Introduced a new TestURLHashing function to validate the new cache key generation logic. - Removed outdated hash calculation tests and streamlined the caching process to focus on URL-based hashing. - Implemented lightweight validation methods in ServeHTTP to enhance performance and reliability of cached responses. - Added batched time updates in VFS implementations for better performance during access time tracking.
This commit is contained in:
@@ -7,8 +7,10 @@ import (
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Ensure MemoryFS implements VFS.
|
||||
@@ -16,15 +18,19 @@ var _ vfs.VFS = (*MemoryFS)(nil)
|
||||
|
||||
// MemoryFS is an in-memory virtual file system
|
||||
type MemoryFS struct {
|
||||
data map[string]*bytes.Buffer
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks sync.Map // map[string]*sync.RWMutex
|
||||
LRU *lruList
|
||||
data map[string]*bytes.Buffer
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks []sync.Map // Sharded lock pools for better concurrency
|
||||
LRU *lruList
|
||||
timeUpdater *vfs.BatchedTimeUpdate // Batched time updates for better performance
|
||||
}
|
||||
|
||||
// Number of lock shards for reducing contention
|
||||
const numLockShards = 32
|
||||
|
||||
// lruList for time-decayed LRU eviction
|
||||
type lruList struct {
|
||||
list *list.List
|
||||
@@ -43,12 +49,12 @@ func (l *lruList) Add(key string, fi *vfs.FileInfo) {
|
||||
l.elem[key] = elem
|
||||
}
|
||||
|
||||
func (l *lruList) MoveToFront(key string) {
|
||||
func (l *lruList) MoveToFront(key string, timeUpdater *vfs.BatchedTimeUpdate) {
|
||||
if elem, exists := l.elem[key]; exists {
|
||||
l.list.MoveToFront(elem)
|
||||
// Update the FileInfo in the element with new access time
|
||||
if fi := elem.Value.(*vfs.FileInfo); fi != nil {
|
||||
fi.UpdateAccess()
|
||||
fi.UpdateAccessBatched(timeUpdater)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -73,12 +79,17 @@ func New(capacity int64) *MemoryFS {
|
||||
panic("memory capacity must be greater than 0")
|
||||
}
|
||||
|
||||
// Initialize sharded locks
|
||||
keyLocks := make([]sync.Map, numLockShards)
|
||||
|
||||
return &MemoryFS{
|
||||
data: make(map[string]*bytes.Buffer),
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
size: 0,
|
||||
LRU: newLruList(),
|
||||
data: make(map[string]*bytes.Buffer),
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
size: 0,
|
||||
keyLocks: keyLocks,
|
||||
LRU: newLruList(),
|
||||
timeUpdater: vfs.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,9 +110,23 @@ func (m *MemoryFS) Capacity() int64 {
|
||||
return m.capacity
|
||||
}
|
||||
|
||||
// getKeyLock returns a lock for the given key
|
||||
// getShardIndex returns the shard index for a given key
|
||||
func getShardIndex(key string) int {
|
||||
// Use FNV-1a hash for good distribution
|
||||
var h uint32 = 2166136261 // FNV offset basis
|
||||
for i := 0; i < len(key); i++ {
|
||||
h ^= uint32(key[i])
|
||||
h *= 16777619 // FNV prime
|
||||
}
|
||||
return int(h % numLockShards)
|
||||
}
|
||||
|
||||
// getKeyLock returns a lock for the given key using sharding
|
||||
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
|
||||
keyLock, _ := m.keyLocks.LoadOrStore(key, &sync.RWMutex{})
|
||||
shardIndex := getShardIndex(key)
|
||||
shard := &m.keyLocks[shardIndex]
|
||||
|
||||
keyLock, _ := shard.LoadOrStore(key, &sync.RWMutex{})
|
||||
return keyLock.(*sync.RWMutex)
|
||||
}
|
||||
|
||||
@@ -137,6 +162,8 @@ func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
fi := vfs.NewFileInfo(key, size)
|
||||
m.info[key] = fi
|
||||
m.LRU.Add(key, fi)
|
||||
// Initialize access time with current time
|
||||
fi.UpdateAccessBatched(m.timeUpdater)
|
||||
m.size += size
|
||||
m.mu.Unlock()
|
||||
|
||||
@@ -194,8 +221,8 @@ func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
|
||||
m.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
fi.UpdateAccess()
|
||||
m.LRU.MoveToFront(key)
|
||||
fi.UpdateAccessBatched(m.timeUpdater)
|
||||
m.LRU.MoveToFront(key, m.timeUpdater)
|
||||
|
||||
buffer, exists := m.data[key]
|
||||
if !exists {
|
||||
@@ -284,3 +311,133 @@ func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
// EvictLRU evicts the least recently used files to free up space
|
||||
func (m *MemoryFS) EvictLRU(bytesNeeded uint) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
|
||||
// Evict from LRU list until we free enough space
|
||||
for m.size > m.capacity-int64(bytesNeeded) && m.LRU.Len() > 0 {
|
||||
// Get the least recently used item
|
||||
elem := m.LRU.list.Back()
|
||||
if elem == nil {
|
||||
break
|
||||
}
|
||||
|
||||
fi := elem.Value.(*vfs.FileInfo)
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
m.LRU.Remove(key)
|
||||
|
||||
// Remove from maps
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
|
||||
// Update size
|
||||
m.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
m.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictBySize evicts files by size (ascending = smallest first, descending = largest first)
|
||||
func (m *MemoryFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*vfs.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range m.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by size
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
if ascending {
|
||||
return candidates[i].Size < candidates[j].Size
|
||||
}
|
||||
return candidates[i].Size > candidates[j].Size
|
||||
})
|
||||
|
||||
// Evict files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if m.size <= m.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
m.LRU.Remove(key)
|
||||
|
||||
// Remove from maps
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
|
||||
// Update size
|
||||
m.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
m.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictFIFO evicts files using FIFO (oldest creation time first)
|
||||
func (m *MemoryFS) EvictFIFO(bytesNeeded uint) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*vfs.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range m.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by creation time (oldest first)
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
return candidates[i].CTime.Before(candidates[j].CTime)
|
||||
})
|
||||
|
||||
// Evict oldest files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if m.size <= m.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
m.LRU.Remove(key)
|
||||
|
||||
// Remove from maps
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
|
||||
// Update size
|
||||
m.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
m.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user