// vfs/memory/memory.go package memory import ( "bytes" "container/list" "io" "s1d3sw1ped/steamcache2/vfs/types" "s1d3sw1ped/steamcache2/vfs/vfserror" "sort" "strings" "sync" "time" ) // VFS defines the interface for virtual file systems type VFS interface { // Create creates a new file at the given key Create(key string, size int64) (io.WriteCloser, error) // Open opens the file at the given key for reading Open(key string) (io.ReadCloser, error) // Delete removes the file at the given key Delete(key string) error // Stat returns information about the file at the given key Stat(key string) (*types.FileInfo, error) // Name returns the name of this VFS Name() string // Size returns the current size of the VFS Size() int64 // Capacity returns the maximum capacity of the VFS Capacity() int64 } // Ensure MemoryFS implements VFS. var _ VFS = (*MemoryFS)(nil) // MemoryFS is an in-memory virtual file system type MemoryFS struct { data map[string]*bytes.Buffer info map[string]*types.FileInfo capacity int64 size int64 mu sync.RWMutex keyLocks []sync.Map // Sharded lock pools for better concurrency LRU *lruList timeUpdater *types.BatchedTimeUpdate // Batched time updates for better performance } // Number of lock shards for reducing contention const numLockShards = 32 // lruList for time-decayed LRU eviction type lruList struct { list *list.List elem map[string]*list.Element } func newLruList() *lruList { return &lruList{ list: list.New(), elem: make(map[string]*list.Element), } } func (l *lruList) Add(key string, fi *types.FileInfo) { elem := l.list.PushFront(fi) l.elem[key] = elem } func (l *lruList) MoveToFront(key string, timeUpdater *types.BatchedTimeUpdate) { if elem, exists := l.elem[key]; exists { l.list.MoveToFront(elem) // Update the FileInfo in the element with new access time if fi := elem.Value.(*types.FileInfo); fi != nil { fi.UpdateAccessBatched(timeUpdater) } } } func (l *lruList) Remove(key string) *types.FileInfo { if elem, exists := l.elem[key]; exists { delete(l.elem, key) if fi := l.list.Remove(elem).(*types.FileInfo); fi != nil { return fi } } return nil } func (l *lruList) Len() int { return l.list.Len() } // New creates a new MemoryFS func New(capacity int64) *MemoryFS { if capacity <= 0 { panic("memory capacity must be greater than 0") } // Initialize sharded locks keyLocks := make([]sync.Map, numLockShards) return &MemoryFS{ data: make(map[string]*bytes.Buffer), info: make(map[string]*types.FileInfo), capacity: capacity, size: 0, keyLocks: keyLocks, LRU: newLruList(), timeUpdater: types.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms } } // Name returns the name of this VFS func (m *MemoryFS) Name() string { return "MemoryFS" } // Size returns the current size func (m *MemoryFS) Size() int64 { m.mu.RLock() defer m.mu.RUnlock() return m.size } // Capacity returns the maximum capacity func (m *MemoryFS) Capacity() int64 { return m.capacity } // GetFragmentationStats returns memory fragmentation statistics func (m *MemoryFS) GetFragmentationStats() map[string]interface{} { m.mu.RLock() defer m.mu.RUnlock() var totalCapacity int64 var totalUsed int64 var bufferCount int for _, buffer := range m.data { totalCapacity += int64(buffer.Cap()) totalUsed += int64(buffer.Len()) bufferCount++ } fragmentationRatio := float64(0) if totalCapacity > 0 { fragmentationRatio = float64(totalCapacity-totalUsed) / float64(totalCapacity) } return map[string]interface{}{ "buffer_count": bufferCount, "total_capacity": totalCapacity, "total_used": totalUsed, "fragmentation_ratio": fragmentationRatio, "average_buffer_size": float64(totalUsed) / float64(bufferCount), } } // getShardIndex returns the shard index for a given key func getShardIndex(key string) int { // Use FNV-1a hash for good distribution var h uint32 = 2166136261 // FNV offset basis for i := 0; i < len(key); i++ { h ^= uint32(key[i]) h *= 16777619 // FNV prime } return int(h % numLockShards) } // getKeyLock returns a lock for the given key using sharding func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex { shardIndex := getShardIndex(key) shard := &m.keyLocks[shardIndex] keyLock, _ := shard.LoadOrStore(key, &sync.RWMutex{}) return keyLock.(*sync.RWMutex) } // Create creates a new file func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) { if key == "" { return nil, vfserror.ErrInvalidKey } if key[0] == '/' { return nil, vfserror.ErrInvalidKey } // Sanitize key to prevent path traversal if strings.Contains(key, "..") { return nil, vfserror.ErrInvalidKey } keyMu := m.getKeyLock(key) keyMu.Lock() defer keyMu.Unlock() m.mu.Lock() // Check if file already exists and handle overwrite if fi, exists := m.info[key]; exists { m.size -= fi.Size m.LRU.Remove(key) delete(m.info, key) delete(m.data, key) } buffer := &bytes.Buffer{} m.data[key] = buffer fi := types.NewFileInfo(key, size) m.info[key] = fi m.LRU.Add(key, fi) // Initialize access time with current time fi.UpdateAccessBatched(m.timeUpdater) m.size += size m.mu.Unlock() return &memoryWriteCloser{ buffer: buffer, memory: m, key: key, }, nil } // memoryWriteCloser implements io.WriteCloser for memory files type memoryWriteCloser struct { buffer *bytes.Buffer memory *MemoryFS key string } func (mwc *memoryWriteCloser) Write(p []byte) (n int, err error) { return mwc.buffer.Write(p) } func (mwc *memoryWriteCloser) Close() error { // Update the actual size in FileInfo mwc.memory.mu.Lock() if fi, exists := mwc.memory.info[mwc.key]; exists { actualSize := int64(mwc.buffer.Len()) sizeDiff := actualSize - fi.Size fi.Size = actualSize mwc.memory.size += sizeDiff } mwc.memory.mu.Unlock() return nil } // Open opens a file for reading func (m *MemoryFS) Open(key string) (io.ReadCloser, error) { if key == "" { return nil, vfserror.ErrInvalidKey } if key[0] == '/' { return nil, vfserror.ErrInvalidKey } if strings.Contains(key, "..") { return nil, vfserror.ErrInvalidKey } keyMu := m.getKeyLock(key) keyMu.RLock() defer keyMu.RUnlock() m.mu.Lock() fi, exists := m.info[key] if !exists { m.mu.Unlock() return nil, vfserror.ErrNotFound } fi.UpdateAccessBatched(m.timeUpdater) m.LRU.MoveToFront(key, m.timeUpdater) buffer, exists := m.data[key] if !exists { m.mu.Unlock() return nil, vfserror.ErrNotFound } // Use zero-copy approach - return reader that reads directly from buffer m.mu.Unlock() return &memoryReadCloser{ buffer: buffer, offset: 0, }, nil } // memoryReadCloser implements io.ReadCloser for memory files with zero-copy optimization type memoryReadCloser struct { buffer *bytes.Buffer offset int64 } func (mrc *memoryReadCloser) Read(p []byte) (n int, err error) { if mrc.offset >= int64(mrc.buffer.Len()) { return 0, io.EOF } // Zero-copy read directly from buffer available := mrc.buffer.Len() - int(mrc.offset) toRead := len(p) if toRead > available { toRead = available } // Read directly from buffer without copying data := mrc.buffer.Bytes() copy(p, data[mrc.offset:mrc.offset+int64(toRead)]) mrc.offset += int64(toRead) return toRead, nil } func (mrc *memoryReadCloser) Close() error { return nil } // Delete removes a file func (m *MemoryFS) Delete(key string) error { if key == "" { return vfserror.ErrInvalidKey } if key[0] == '/' { return vfserror.ErrInvalidKey } if strings.Contains(key, "..") { return vfserror.ErrInvalidKey } keyMu := m.getKeyLock(key) keyMu.Lock() defer keyMu.Unlock() m.mu.Lock() fi, exists := m.info[key] if !exists { m.mu.Unlock() return vfserror.ErrNotFound } m.size -= fi.Size m.LRU.Remove(key) delete(m.info, key) delete(m.data, key) m.mu.Unlock() return nil } // Stat returns file information func (m *MemoryFS) Stat(key string) (*types.FileInfo, error) { if key == "" { return nil, vfserror.ErrInvalidKey } if key[0] == '/' { return nil, vfserror.ErrInvalidKey } if strings.Contains(key, "..") { return nil, vfserror.ErrInvalidKey } keyMu := m.getKeyLock(key) keyMu.RLock() defer keyMu.RUnlock() m.mu.RLock() defer m.mu.RUnlock() if fi, ok := m.info[key]; ok { return fi, nil } return nil, vfserror.ErrNotFound } // EvictLRU evicts the least recently used files to free up space func (m *MemoryFS) EvictLRU(bytesNeeded uint) uint { m.mu.Lock() defer m.mu.Unlock() var evicted uint // Evict from LRU list until we free enough space for m.size > m.capacity-int64(bytesNeeded) && m.LRU.Len() > 0 { // Get the least recently used item elem := m.LRU.list.Back() if elem == nil { break } fi := elem.Value.(*types.FileInfo) key := fi.Key // Remove from LRU m.LRU.Remove(key) // Remove from maps delete(m.info, key) delete(m.data, key) // Update size m.size -= fi.Size evicted += uint(fi.Size) // Clean up key lock shardIndex := getShardIndex(key) m.keyLocks[shardIndex].Delete(key) } return evicted } // EvictBySize evicts files by size (ascending = smallest first, descending = largest first) func (m *MemoryFS) EvictBySize(bytesNeeded uint, ascending bool) uint { m.mu.Lock() defer m.mu.Unlock() var evicted uint var candidates []*types.FileInfo // Collect all files for _, fi := range m.info { candidates = append(candidates, fi) } // Sort by size sort.Slice(candidates, func(i, j int) bool { if ascending { return candidates[i].Size < candidates[j].Size } return candidates[i].Size > candidates[j].Size }) // Evict files until we free enough space for _, fi := range candidates { if m.size <= m.capacity-int64(bytesNeeded) { break } key := fi.Key // Remove from LRU m.LRU.Remove(key) // Remove from maps delete(m.info, key) delete(m.data, key) // Update size m.size -= fi.Size evicted += uint(fi.Size) // Clean up key lock shardIndex := getShardIndex(key) m.keyLocks[shardIndex].Delete(key) } return evicted } // EvictFIFO evicts files using FIFO (oldest creation time first) func (m *MemoryFS) EvictFIFO(bytesNeeded uint) uint { m.mu.Lock() defer m.mu.Unlock() var evicted uint var candidates []*types.FileInfo // Collect all files for _, fi := range m.info { candidates = append(candidates, fi) } // Sort by creation time (oldest first) sort.Slice(candidates, func(i, j int) bool { return candidates[i].CTime.Before(candidates[j].CTime) }) // Evict oldest files until we free enough space for _, fi := range candidates { if m.size <= m.capacity-int64(bytesNeeded) { break } key := fi.Key // Remove from LRU m.LRU.Remove(key) // Remove from maps delete(m.info, key) delete(m.data, key) // Update size m.size -= fi.Size evicted += uint(fi.Size) // Clean up key lock shardIndex := getShardIndex(key) m.keyLocks[shardIndex].Delete(key) } return evicted }