Files
steamcache2/vfs/memory/memory.go
Justin Harms f378d0e81f
Some checks failed
PR Check / check-and-test (pull_request) Failing after 2m11s
feat: update dependencies and improve caching mechanism
- Added Prometheus client library for metrics collection.
- Refactored garbage collection strategy from random deletion to LRU (Least Recently Used) deletion.
- Introduced per-key locking in cache to prevent race conditions.
- Enhanced logging with structured log messages for cache hits and misses.
- Implemented a retry mechanism for upstream requests with exponential backoff.
- Updated Go modules and indirect dependencies for better compatibility and performance.
- Removed unused sync filesystem implementation.
- Added version initialization to ensure a default version string.
2025-07-12 06:43:00 -05:00

170 lines
3.1 KiB
Go

package memory
import (
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"sync"
"time"
"github.com/docker/go-units"
)
// Ensure MemoryFS implements VFS.
var _ vfs.VFS = (*MemoryFS)(nil)
// file represents a file in memory.
type file struct {
fileinfo *vfs.FileInfo
data []byte
}
// MemoryFS is a virtual file system that stores files in memory.
type MemoryFS struct {
files map[string]*file
capacity int64
mu sync.Mutex
bytePool sync.Pool // Pool for []byte slices
}
// New creates a new MemoryFS.
func New(capacity int64) *MemoryFS {
if capacity <= 0 {
panic("memory capacity must be greater than 0") // panic if the capacity is less than or equal to 0
}
logger.Logger.Info().
Str("name", "MemoryFS").
Str("capacity", units.HumanSize(float64(capacity))).
Msg("init")
return &MemoryFS{
files: make(map[string]*file),
capacity: capacity,
mu: sync.Mutex{},
bytePool: sync.Pool{
New: func() interface{} { return make([]byte, 0) }, // Initial capacity for pooled slices
},
}
}
func (m *MemoryFS) Capacity() int64 {
return m.capacity
}
func (m *MemoryFS) Name() string {
return "MemoryFS"
}
func (m *MemoryFS) Size() int64 {
var size int64
m.mu.Lock()
defer m.mu.Unlock()
for _, v := range m.files {
size += int64(len(v.data))
}
return size
}
func (m *MemoryFS) Set(key string, src []byte) error {
if m.capacity > 0 {
if size := m.Size() + int64(len(src)); size > m.capacity {
return vfserror.ErrDiskFull
}
}
m.mu.Lock()
defer m.mu.Unlock()
// Use pooled slice
data := m.bytePool.Get().([]byte)
if cap(data) < len(src) {
data = make([]byte, len(src)) // expand the slice if the pool slice is too small
} else {
data = data[:len(src)] // reuse the pool slice, but resize it to fit
}
copy(data, src)
m.files[key] = &file{
fileinfo: vfs.NewFileInfo(
key,
int64(len(src)),
time.Now(),
),
data: data,
}
return nil
}
func (m *MemoryFS) Delete(key string) error {
_, err := m.Stat(key)
if err != nil {
return err
}
m.mu.Lock()
defer m.mu.Unlock()
// Return data to pool
if f, ok := m.files[key]; ok {
m.bytePool.Put(f.data)
}
delete(m.files, key)
return nil
}
func (m *MemoryFS) Get(key string) ([]byte, error) {
_, err := m.Stat(key)
if err != nil {
return nil, err
}
m.mu.Lock()
defer m.mu.Unlock()
m.files[key].fileinfo.ATime = time.Now()
dst := make([]byte, len(m.files[key].data))
copy(dst, m.files[key].data)
logger.Logger.Debug().
Str("name", key).
Str("status", "GET").
Int64("size", int64(len(dst))).
Msg("get file from memory")
return dst, nil
}
func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
m.mu.Lock()
defer m.mu.Unlock()
f, ok := m.files[key]
if !ok {
return nil, vfserror.ErrNotFound
}
return f.fileinfo, nil
}
func (m *MemoryFS) StatAll() []*vfs.FileInfo {
m.mu.Lock()
defer m.mu.Unlock()
// hard copy the file info to prevent modification of the original file info or the other way around
files := make([]*vfs.FileInfo, 0, len(m.files))
for _, v := range m.files {
fi := *v.fileinfo
files = append(files, &fi)
}
return files
}