All checks were successful
Release Tag / release (push) Successful in 9s
- Updated the caching logic to utilize a predictive cache warmer, enhancing content prefetching based on access patterns. - Replaced the legacy warming system with a more efficient predictive approach, allowing for better performance and resource management. - Refactored memory management to integrate dynamic cache size adjustments based on system memory usage, improving overall efficiency. - Simplified the VFS interface and improved concurrency handling with sharded locks for better performance in multi-threaded environments. - Enhanced tests to validate the new caching and memory management behaviors, ensuring reliability and performance improvements.
431 lines
9.0 KiB
Go
431 lines
9.0 KiB
Go
// vfs/memory/memory.go
|
|
package memory
|
|
|
|
import (
|
|
"bytes"
|
|
"io"
|
|
"s1d3sw1ped/steamcache2/vfs"
|
|
"s1d3sw1ped/steamcache2/vfs/locks"
|
|
"s1d3sw1ped/steamcache2/vfs/lru"
|
|
"s1d3sw1ped/steamcache2/vfs/types"
|
|
"s1d3sw1ped/steamcache2/vfs/vfserror"
|
|
"sort"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
)
|
|
|
|
// Ensure MemoryFS implements VFS.
|
|
var _ vfs.VFS = (*MemoryFS)(nil)
|
|
|
|
// MemoryFS is an in-memory virtual file system
|
|
type MemoryFS struct {
|
|
data map[string]*bytes.Buffer
|
|
info map[string]*types.FileInfo
|
|
capacity int64
|
|
size int64
|
|
mu sync.RWMutex
|
|
keyLocks []sync.Map // Sharded lock pools for better concurrency
|
|
LRU *lru.LRUList[*types.FileInfo]
|
|
timeUpdater *types.BatchedTimeUpdate // Batched time updates for better performance
|
|
}
|
|
|
|
// New creates a new MemoryFS
|
|
func New(capacity int64) *MemoryFS {
|
|
if capacity <= 0 {
|
|
panic("memory capacity must be greater than 0")
|
|
}
|
|
|
|
// Initialize sharded locks
|
|
keyLocks := make([]sync.Map, locks.NumLockShards)
|
|
|
|
return &MemoryFS{
|
|
data: make(map[string]*bytes.Buffer),
|
|
info: make(map[string]*types.FileInfo),
|
|
capacity: capacity,
|
|
size: 0,
|
|
keyLocks: keyLocks,
|
|
LRU: lru.NewLRUList[*types.FileInfo](),
|
|
timeUpdater: types.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
|
|
}
|
|
}
|
|
|
|
// Name returns the name of this VFS
|
|
func (m *MemoryFS) Name() string {
|
|
return "MemoryFS"
|
|
}
|
|
|
|
// Size returns the current size
|
|
func (m *MemoryFS) Size() int64 {
|
|
m.mu.RLock()
|
|
defer m.mu.RUnlock()
|
|
return m.size
|
|
}
|
|
|
|
// Capacity returns the maximum capacity
|
|
func (m *MemoryFS) Capacity() int64 {
|
|
return m.capacity
|
|
}
|
|
|
|
// GetFragmentationStats returns memory fragmentation statistics
|
|
func (m *MemoryFS) GetFragmentationStats() map[string]interface{} {
|
|
m.mu.RLock()
|
|
defer m.mu.RUnlock()
|
|
|
|
var totalCapacity int64
|
|
var totalUsed int64
|
|
var bufferCount int
|
|
|
|
for _, buffer := range m.data {
|
|
totalCapacity += int64(buffer.Cap())
|
|
totalUsed += int64(buffer.Len())
|
|
bufferCount++
|
|
}
|
|
|
|
fragmentationRatio := float64(0)
|
|
if totalCapacity > 0 {
|
|
fragmentationRatio = float64(totalCapacity-totalUsed) / float64(totalCapacity)
|
|
}
|
|
|
|
return map[string]interface{}{
|
|
"buffer_count": bufferCount,
|
|
"total_capacity": totalCapacity,
|
|
"total_used": totalUsed,
|
|
"fragmentation_ratio": fragmentationRatio,
|
|
"average_buffer_size": float64(totalUsed) / float64(bufferCount),
|
|
}
|
|
}
|
|
|
|
// getKeyLock returns a lock for the given key using sharding
|
|
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
|
|
return locks.GetKeyLock(m.keyLocks, key)
|
|
}
|
|
|
|
// Create creates a new file
|
|
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
|
|
if key == "" {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
if key[0] == '/' {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
// Sanitize key to prevent path traversal
|
|
if strings.Contains(key, "..") {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
keyMu := m.getKeyLock(key)
|
|
keyMu.Lock()
|
|
defer keyMu.Unlock()
|
|
|
|
m.mu.Lock()
|
|
// Check if file already exists and handle overwrite
|
|
if fi, exists := m.info[key]; exists {
|
|
m.size -= fi.Size
|
|
m.LRU.Remove(key)
|
|
delete(m.info, key)
|
|
delete(m.data, key)
|
|
}
|
|
|
|
buffer := &bytes.Buffer{}
|
|
m.data[key] = buffer
|
|
fi := types.NewFileInfo(key, size)
|
|
m.info[key] = fi
|
|
m.LRU.Add(key, fi)
|
|
// Initialize access time with current time
|
|
fi.UpdateAccessBatched(m.timeUpdater)
|
|
m.size += size
|
|
m.mu.Unlock()
|
|
|
|
return &memoryWriteCloser{
|
|
buffer: buffer,
|
|
memory: m,
|
|
key: key,
|
|
}, nil
|
|
}
|
|
|
|
// memoryWriteCloser implements io.WriteCloser for memory files
|
|
type memoryWriteCloser struct {
|
|
buffer *bytes.Buffer
|
|
memory *MemoryFS
|
|
key string
|
|
}
|
|
|
|
func (mwc *memoryWriteCloser) Write(p []byte) (n int, err error) {
|
|
return mwc.buffer.Write(p)
|
|
}
|
|
|
|
func (mwc *memoryWriteCloser) Close() error {
|
|
// Update the actual size in FileInfo
|
|
mwc.memory.mu.Lock()
|
|
if fi, exists := mwc.memory.info[mwc.key]; exists {
|
|
actualSize := int64(mwc.buffer.Len())
|
|
sizeDiff := actualSize - fi.Size
|
|
fi.Size = actualSize
|
|
mwc.memory.size += sizeDiff
|
|
}
|
|
mwc.memory.mu.Unlock()
|
|
return nil
|
|
}
|
|
|
|
// Open opens a file for reading
|
|
func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
|
|
if key == "" {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
if key[0] == '/' {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
if strings.Contains(key, "..") {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
keyMu := m.getKeyLock(key)
|
|
keyMu.RLock()
|
|
defer keyMu.RUnlock()
|
|
|
|
m.mu.Lock()
|
|
fi, exists := m.info[key]
|
|
if !exists {
|
|
m.mu.Unlock()
|
|
return nil, vfserror.ErrNotFound
|
|
}
|
|
fi.UpdateAccessBatched(m.timeUpdater)
|
|
m.LRU.MoveToFront(key, m.timeUpdater)
|
|
|
|
buffer, exists := m.data[key]
|
|
if !exists {
|
|
m.mu.Unlock()
|
|
return nil, vfserror.ErrNotFound
|
|
}
|
|
|
|
// Use zero-copy approach - return reader that reads directly from buffer
|
|
m.mu.Unlock()
|
|
|
|
return &memoryReadCloser{
|
|
buffer: buffer,
|
|
offset: 0,
|
|
}, nil
|
|
}
|
|
|
|
// memoryReadCloser implements io.ReadCloser for memory files with zero-copy optimization
|
|
type memoryReadCloser struct {
|
|
buffer *bytes.Buffer
|
|
offset int64
|
|
}
|
|
|
|
func (mrc *memoryReadCloser) Read(p []byte) (n int, err error) {
|
|
if mrc.offset >= int64(mrc.buffer.Len()) {
|
|
return 0, io.EOF
|
|
}
|
|
|
|
// Zero-copy read directly from buffer
|
|
available := mrc.buffer.Len() - int(mrc.offset)
|
|
toRead := len(p)
|
|
if toRead > available {
|
|
toRead = available
|
|
}
|
|
|
|
// Read directly from buffer without copying
|
|
data := mrc.buffer.Bytes()
|
|
copy(p, data[mrc.offset:mrc.offset+int64(toRead)])
|
|
mrc.offset += int64(toRead)
|
|
|
|
return toRead, nil
|
|
}
|
|
|
|
func (mrc *memoryReadCloser) Close() error {
|
|
return nil
|
|
}
|
|
|
|
// Delete removes a file
|
|
func (m *MemoryFS) Delete(key string) error {
|
|
if key == "" {
|
|
return vfserror.ErrInvalidKey
|
|
}
|
|
if key[0] == '/' {
|
|
return vfserror.ErrInvalidKey
|
|
}
|
|
|
|
if strings.Contains(key, "..") {
|
|
return vfserror.ErrInvalidKey
|
|
}
|
|
|
|
keyMu := m.getKeyLock(key)
|
|
keyMu.Lock()
|
|
defer keyMu.Unlock()
|
|
|
|
m.mu.Lock()
|
|
fi, exists := m.info[key]
|
|
if !exists {
|
|
m.mu.Unlock()
|
|
return vfserror.ErrNotFound
|
|
}
|
|
m.size -= fi.Size
|
|
m.LRU.Remove(key)
|
|
delete(m.info, key)
|
|
delete(m.data, key)
|
|
m.mu.Unlock()
|
|
|
|
return nil
|
|
}
|
|
|
|
// Stat returns file information
|
|
func (m *MemoryFS) Stat(key string) (*types.FileInfo, error) {
|
|
if key == "" {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
if key[0] == '/' {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
if strings.Contains(key, "..") {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
keyMu := m.getKeyLock(key)
|
|
keyMu.RLock()
|
|
defer keyMu.RUnlock()
|
|
|
|
m.mu.RLock()
|
|
defer m.mu.RUnlock()
|
|
|
|
if fi, ok := m.info[key]; ok {
|
|
return fi, nil
|
|
}
|
|
|
|
return nil, vfserror.ErrNotFound
|
|
}
|
|
|
|
// EvictLRU evicts the least recently used files to free up space
|
|
func (m *MemoryFS) EvictLRU(bytesNeeded uint) uint {
|
|
m.mu.Lock()
|
|
defer m.mu.Unlock()
|
|
|
|
var evicted uint
|
|
|
|
// Evict from LRU list until we free enough space
|
|
for m.size > m.capacity-int64(bytesNeeded) && m.LRU.Len() > 0 {
|
|
// Get the least recently used item
|
|
elem := m.LRU.Back()
|
|
if elem == nil {
|
|
break
|
|
}
|
|
|
|
fi := elem.Value.(*types.FileInfo)
|
|
key := fi.Key
|
|
|
|
// Remove from LRU
|
|
m.LRU.Remove(key)
|
|
|
|
// Remove from maps
|
|
delete(m.info, key)
|
|
delete(m.data, key)
|
|
|
|
// Update size
|
|
m.size -= fi.Size
|
|
evicted += uint(fi.Size)
|
|
|
|
// Clean up key lock
|
|
shardIndex := locks.GetShardIndex(key)
|
|
m.keyLocks[shardIndex].Delete(key)
|
|
}
|
|
|
|
return evicted
|
|
}
|
|
|
|
// EvictBySize evicts files by size (ascending = smallest first, descending = largest first)
|
|
func (m *MemoryFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
|
|
m.mu.Lock()
|
|
defer m.mu.Unlock()
|
|
|
|
var evicted uint
|
|
var candidates []*types.FileInfo
|
|
|
|
// Collect all files
|
|
for _, fi := range m.info {
|
|
candidates = append(candidates, fi)
|
|
}
|
|
|
|
// Sort by size
|
|
sort.Slice(candidates, func(i, j int) bool {
|
|
if ascending {
|
|
return candidates[i].Size < candidates[j].Size
|
|
}
|
|
return candidates[i].Size > candidates[j].Size
|
|
})
|
|
|
|
// Evict files until we free enough space
|
|
for _, fi := range candidates {
|
|
if m.size <= m.capacity-int64(bytesNeeded) {
|
|
break
|
|
}
|
|
|
|
key := fi.Key
|
|
|
|
// Remove from LRU
|
|
m.LRU.Remove(key)
|
|
|
|
// Remove from maps
|
|
delete(m.info, key)
|
|
delete(m.data, key)
|
|
|
|
// Update size
|
|
m.size -= fi.Size
|
|
evicted += uint(fi.Size)
|
|
|
|
// Clean up key lock
|
|
shardIndex := locks.GetShardIndex(key)
|
|
m.keyLocks[shardIndex].Delete(key)
|
|
}
|
|
|
|
return evicted
|
|
}
|
|
|
|
// EvictFIFO evicts files using FIFO (oldest creation time first)
|
|
func (m *MemoryFS) EvictFIFO(bytesNeeded uint) uint {
|
|
m.mu.Lock()
|
|
defer m.mu.Unlock()
|
|
|
|
var evicted uint
|
|
var candidates []*types.FileInfo
|
|
|
|
// Collect all files
|
|
for _, fi := range m.info {
|
|
candidates = append(candidates, fi)
|
|
}
|
|
|
|
// Sort by creation time (oldest first)
|
|
sort.Slice(candidates, func(i, j int) bool {
|
|
return candidates[i].CTime.Before(candidates[j].CTime)
|
|
})
|
|
|
|
// Evict oldest files until we free enough space
|
|
for _, fi := range candidates {
|
|
if m.size <= m.capacity-int64(bytesNeeded) {
|
|
break
|
|
}
|
|
|
|
key := fi.Key
|
|
|
|
// Remove from LRU
|
|
m.LRU.Remove(key)
|
|
|
|
// Remove from maps
|
|
delete(m.info, key)
|
|
delete(m.data, key)
|
|
|
|
// Update size
|
|
m.size -= fi.Size
|
|
evicted += uint(fi.Size)
|
|
|
|
// Clean up key lock
|
|
shardIndex := locks.GetShardIndex(key)
|
|
m.keyLocks[shardIndex].Delete(key)
|
|
}
|
|
|
|
return evicted
|
|
}
|