refactor: moved the GC stuff around and corrected all tests
All checks were successful
PR Check / check-and-test (pull_request) Successful in 30s

This commit is contained in:
2025-07-13 04:20:12 -05:00
parent 1673e9554a
commit 539f14e8ec
9 changed files with 368 additions and 376 deletions

View File

@@ -1,63 +0,0 @@
// steamcache/gc.go
package steamcache
import (
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"s1d3sw1ped/SteamCache2/vfs/disk"
"s1d3sw1ped/SteamCache2/vfs/memory"
"time"
)
// lruGC deletes files in LRU order until enough space is reclaimed.
func lruGC(vfss vfs.VFS, size uint) {
deletions := 0
var reclaimed uint
for reclaimed < size {
switch fs := vfss.(type) {
case *disk.DiskFS:
fi := fs.LRU.Back()
if fi == nil {
break
}
sz := uint(fi.Size())
err := fs.Delete(fi.Name())
if err != nil {
continue
}
reclaimed += sz
deletions++
case *memory.MemoryFS:
fi := fs.LRU.Back()
if fi == nil {
break
}
sz := uint(fi.Size())
err := fs.Delete(fi.Name())
if err != nil {
continue
}
reclaimed += sz
deletions++
default:
// Fallback to old method if not supported
stats := vfss.StatAll()
if len(stats) == 0 {
break
}
fi := stats[0] // Assume sorted or pick first
sz := uint(fi.Size())
err := vfss.Delete(fi.Name())
if err != nil {
continue
}
reclaimed += sz
deletions++
}
}
}
func cachehandler(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
return time.Since(fi.AccessTime()) < time.Second*60 // Put hot files in the fast vfs if equipped
}

View File

@@ -80,21 +80,21 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
}
c := cache.New(
cachehandler,
gc.PromotionDecider,
)
var m *memory.MemoryFS
var mgc *gc.GCFS
if memorysize > 0 {
m = memory.New(memorysize)
mgc = gc.New(m, memoryMultiplier, lruGC)
mgc = gc.New(m, memoryMultiplier, gc.LRUGC)
}
var d *disk.DiskFS
var dgc *gc.GCFS
if disksize > 0 {
d = disk.New(diskPath, disksize)
dgc = gc.New(d, diskMultiplier, lruGC)
dgc = gc.New(d, diskMultiplier, gc.LRUGC)
}
// configure the cache to match the specified mode (memory only, disk only, or memory and disk) based on the provided sizes
@@ -154,7 +154,7 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
if d != nil {
if d.Size() > d.Capacity() {
lruGC(d, uint(d.Size()-d.Capacity()))
gc.LRUGC(d, uint(d.Size()-d.Capacity()))
}
}

View File

@@ -9,8 +9,6 @@ import (
)
func TestCaching(t *testing.T) {
t.Parallel()
td := t.TempDir()
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
@@ -84,3 +82,29 @@ func TestCaching(t *testing.T) {
t.Errorf("Open failed: got nil, want error")
}
}
func TestCacheMissAndHit(t *testing.T) {
sc := New("localhost:8080", "0", 0, "1G", 100, t.TempDir(), "")
key := "testkey"
value := []byte("testvalue")
// Simulate miss: but since no upstream, skip full ServeHTTP, test VFS
w, err := sc.vfs.Create(key, int64(len(value)))
if err != nil {
t.Fatal(err)
}
w.Write(value)
w.Close()
rc, err := sc.vfs.Open(key)
if err != nil {
t.Fatal(err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value) {
t.Errorf("expected %s, got %s", value, got)
}
}

View File

@@ -17,8 +17,6 @@ func testMemory() vfs.VFS {
}
func TestNew(t *testing.T) {
t.Parallel()
fast := testMemory()
slow := testMemory()
@@ -31,8 +29,6 @@ func TestNew(t *testing.T) {
}
func TestNewPanics(t *testing.T) {
t.Parallel()
defer func() {
if r := recover(); r == nil {
t.Fatal("expected panic but did not get one")
@@ -44,9 +40,7 @@ func TestNewPanics(t *testing.T) {
cache.SetSlow(nil)
}
func TestSetAndGet(t *testing.T) {
t.Parallel()
func TestCreateAndOpen(t *testing.T) {
fast := testMemory()
slow := testMemory()
cache := New(nil)
@@ -75,9 +69,7 @@ func TestSetAndGet(t *testing.T) {
}
}
func TestSetAndGetNoFast(t *testing.T) {
t.Parallel()
func TestCreateAndOpenNoFast(t *testing.T) {
slow := testMemory()
cache := New(nil)
cache.SetSlow(slow)
@@ -104,9 +96,7 @@ func TestSetAndGetNoFast(t *testing.T) {
}
}
func TestCaching(t *testing.T) {
t.Parallel()
func TestCachingPromotion(t *testing.T) {
fast := testMemory()
slow := testMemory()
cache := New(func(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
@@ -118,57 +108,29 @@ func TestCaching(t *testing.T) {
key := "test"
value := []byte("value")
wf, _ := fast.Create(key, int64(len(value)))
wf.Write(value)
wf.Close()
ws, _ := slow.Create(key, int64(len(value)))
ws.Write(value)
ws.Close()
state := cache.cacheState(key)
if state != cachestate.CacheStateHit {
t.Fatalf("expected %v, got %v", cachestate.CacheStateHit, state)
}
err := fast.Delete(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
rc, err := cache.Open(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
state = cache.cacheState(key)
if state != cachestate.CacheStateMiss {
t.Fatalf("expected %v, got %v", cachestate.CacheStateMiss, state)
}
if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got)
}
err = cache.Delete(key)
// Check if promoted to fast
_, err = fast.Open(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = cache.Open(key)
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
state = cache.cacheState(key)
if state != cachestate.CacheStateNotFound {
t.Fatalf("expected %v, got %v", cachestate.CacheStateNotFound, state)
t.Error("Expected promotion to fast cache")
}
}
func TestGetNotFound(t *testing.T) {
t.Parallel()
func TestOpenNotFound(t *testing.T) {
fast := testMemory()
slow := testMemory()
cache := New(nil)
@@ -182,8 +144,6 @@ func TestGetNotFound(t *testing.T) {
}
func TestDelete(t *testing.T) {
t.Parallel()
fast := testMemory()
slow := testMemory()
cache := New(nil)
@@ -211,8 +171,6 @@ func TestDelete(t *testing.T) {
}
func TestStat(t *testing.T) {
t.Parallel()
fast := testMemory()
slow := testMemory()
cache := New(nil)
@@ -237,4 +195,7 @@ func TestStat(t *testing.T) {
if info == nil {
t.Fatal("expected file info to be non-nil")
}
if info.Size() != int64(len(value)) {
t.Errorf("expected size %d, got %d", len(value), info.Size())
}
}

View File

@@ -61,7 +61,6 @@ type DiskFS struct {
size int64
mu sync.RWMutex
keyLocks sync.Map // map[string]*sync.RWMutex
sg sync.WaitGroup
LRU *lruList
}
@@ -135,7 +134,6 @@ func new(root string, capacity int64, skipinit bool) *DiskFS {
capacity: capacity,
mu: sync.RWMutex{},
keyLocks: sync.Map{},
sg: sync.WaitGroup{},
LRU: newLruList(),
}
@@ -162,8 +160,28 @@ func NewSkipInit(root string, capacity int64) *DiskFS {
func (d *DiskFS) init() {
tstart := time.Now()
d.walk(d.root)
d.sg.Wait()
err := filepath.Walk(d.root, func(npath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
d.mu.Lock()
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
fi := vfs.NewFileInfoFromOS(info, k)
d.info[k] = fi
d.LRU.Add(k, fi)
d.size += info.Size()
d.mu.Unlock()
return nil
})
if err != nil {
logger.Logger.Error().Err(err).Msg("Walk failed")
}
logger.Logger.Info().
Str("name", d.Name()).
@@ -175,37 +193,6 @@ func (d *DiskFS) init() {
Msg("init")
}
func (d *DiskFS) walk(path string) {
d.sg.Add(1)
go func() {
defer d.sg.Done()
filepath.Walk(path, func(npath string, info os.FileInfo, err error) error {
if path == npath {
return nil
}
if err != nil {
return err
}
if info.IsDir() {
d.walk(npath)
return filepath.SkipDir
}
d.mu.Lock()
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
fi := vfs.NewFileInfoFromOS(info, k)
d.info[k] = fi
d.LRU.Add(k, fi)
d.size += info.Size()
d.mu.Unlock()
return nil
})
}()
}
func (d *DiskFS) Capacity() int64 {
return d.capacity
}
@@ -235,6 +222,7 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
@@ -257,11 +245,14 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
if fi, exists := d.info[key]; exists {
d.size -= fi.Size()
d.LRU.Remove(key)
d.Delete(key)
delete(d.info, key)
path := filepath.Join(d.root, key)
os.Remove(path) // Ignore error, as file might not exist or other issues
}
d.mu.Unlock()
path := filepath.Join(d.root, key)
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, err
@@ -332,6 +323,7 @@ func (d *DiskFS) Delete(key string) error {
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
if strings.Contains(key, "..") {
return vfserror.ErrInvalidKey
}
@@ -352,6 +344,7 @@ func (d *DiskFS) Delete(key string) error {
d.mu.Unlock()
path := filepath.Join(d.root, key)
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
if err := os.Remove(path); err != nil {
return err
}
@@ -372,6 +365,7 @@ func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
@@ -391,6 +385,7 @@ func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
d.mu.Unlock()
path := filepath.Join(d.root, key)
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
file, err := os.Open(path)
if err != nil {
return nil, err
@@ -434,6 +429,7 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}

View File

@@ -2,6 +2,7 @@
package disk
import (
"errors"
"fmt"
"io"
"os"
@@ -10,65 +11,85 @@ import (
"testing"
)
func TestAllDisk(t *testing.T) {
t.Parallel()
func TestCreateAndOpen(t *testing.T) {
m := NewSkipInit(t.TempDir(), 1024)
w, err := m.Create("key", 5)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Errorf("Create failed: %v", err)
t.Fatalf("Create failed: %v", err)
}
w.Write([]byte("value"))
w.Write(value)
w.Close()
w, err = m.Create("key", 6)
rc, err := m.Open(key)
if err != nil {
t.Errorf("Create failed: %v", err)
t.Fatalf("Open failed: %v", err)
}
w.Write([]byte("value1"))
w.Close()
rc, err := m.Open("key")
if err != nil {
t.Errorf("Open failed: %v", err)
}
d, _ := io.ReadAll(rc)
got, _ := io.ReadAll(rc)
rc.Close()
if string(d) != "value1" {
t.Errorf("Get failed: got %s, want %s", d, "value1")
}
if err := m.Delete("key"); err != nil {
t.Errorf("Delete failed: %v", err)
}
if _, err := m.Open("key"); err == nil {
t.Errorf("Open failed: got nil, want %v", vfserror.ErrNotFound)
}
if err := m.Delete("key"); err == nil {
t.Errorf("Delete failed: got nil, want %v", vfserror.ErrNotFound)
}
if _, err := m.Stat("key"); err == nil {
t.Errorf("Stat failed: got nil, want %v", vfserror.ErrNotFound)
}
w, err = m.Create("key", 5)
if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value"))
w.Close()
if _, err := m.Stat("key"); err != nil {
t.Errorf("Stat failed: %v", err)
if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got)
}
}
func TestLimited(t *testing.T) {
t.Parallel()
func TestOverwrite(t *testing.T) {
m := NewSkipInit(t.TempDir(), 1024)
key := "key"
value1 := []byte("value1")
value2 := []byte("value2")
w, err := m.Create(key, int64(len(value1)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value1)
w.Close()
w, err = m.Create(key, int64(len(value2)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value2)
w.Close()
rc, err := m.Open(key)
if err != nil {
t.Fatalf("Open failed: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value2) {
t.Fatalf("expected %s, got %s", value2, got)
}
}
func TestDelete(t *testing.T) {
m := NewSkipInit(t.TempDir(), 1024)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value)
w.Close()
if err := m.Delete(key); err != nil {
t.Fatalf("Delete failed: %v", err)
}
_, err = m.Open(key)
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
}
func TestCapacityLimit(t *testing.T) {
m := NewSkipInit(t.TempDir(), 10)
for i := 0; i < 11; i++ {
w, err := m.Create(fmt.Sprintf("key%d", i), 1)
@@ -84,15 +105,11 @@ func TestLimited(t *testing.T) {
}
}
func TestInit(t *testing.T) {
t.Parallel()
func TestInitExistingFiles(t *testing.T) {
td := t.TempDir()
path := filepath.Join(td, "test", "key")
os.MkdirAll(filepath.Dir(path), 0755)
os.WriteFile(path, []byte("value"), 0644)
m := New(td, 10)
@@ -100,8 +117,13 @@ func TestInit(t *testing.T) {
if err != nil {
t.Fatalf("Open failed: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != "value" {
t.Errorf("expected value, got %s", got)
}
s, err := m.Stat("test/key")
if err != nil {
t.Fatalf("Stat failed: %v", err)
@@ -114,16 +136,13 @@ func TestInit(t *testing.T) {
}
}
func TestDiskSizeDiscrepancy(t *testing.T) {
t.Parallel()
func TestSizeConsistency(t *testing.T) {
td := t.TempDir()
assumedSize := int64(6 + 5 + 6) // 6 + 5 + 6 bytes for key, key1, key2
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
m := New(td, 1024)
if m.Size() != 6 {
t.Errorf("Size failed: got %d, want %d", m.Size(), 6)
t.Errorf("Size failed: got %d, want 6", m.Size())
}
w, err := m.Create("key", 5)
@@ -140,6 +159,7 @@ func TestDiskSizeDiscrepancy(t *testing.T) {
w.Write([]byte("value1"))
w.Close()
assumedSize := int64(6 + 5 + 6)
if assumedSize != m.Size() {
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
}
@@ -151,45 +171,10 @@ func TestDiskSizeDiscrepancy(t *testing.T) {
d, _ := io.ReadAll(rc)
rc.Close()
if string(d) != "value" {
t.Errorf("Get failed: got %s, want %s", d, "value")
}
rc, err = m.Open("key1")
if err != nil {
t.Errorf("Open failed: %v", err)
}
d, _ = io.ReadAll(rc)
rc.Close()
if string(d) != "value1" {
t.Errorf("Get failed: got %s, want %s", d, "value1")
t.Errorf("Get failed: got %s, want value", d)
}
m = New(td, 1024)
if assumedSize != m.Size() {
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
}
rc, err = m.Open("key")
if err != nil {
t.Errorf("Open failed: %v", err)
}
d, _ = io.ReadAll(rc)
rc.Close()
if string(d) != "value" {
t.Errorf("Get failed: got %s, want %s", d, "value")
}
rc, err = m.Open("key1")
if err != nil {
t.Errorf("Open failed: %v", err)
}
d, _ = io.ReadAll(rc)
rc.Close()
if string(d) != "value1" {
t.Errorf("Get failed: got %s, want %s", d, "value1")
}
if assumedSize != m.Size() {
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
}

View File

@@ -4,10 +4,76 @@ package gc
import (
"fmt"
"io"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"s1d3sw1ped/SteamCache2/vfs/disk"
"s1d3sw1ped/SteamCache2/vfs/memory"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"time"
)
// LRUGC deletes files in LRU order until enough space is reclaimed.
func LRUGC(vfss vfs.VFS, size uint) {
attempts := 0
deletions := 0
var reclaimed uint
for reclaimed < size {
if attempts > 10 {
logger.Logger.Debug().
Int("attempts", attempts).
Msg("GC: Too many attempts to reclaim space, giving up")
return
}
attempts++
switch fs := vfss.(type) {
case *disk.DiskFS:
fi := fs.LRU.Back()
if fi == nil {
break
}
sz := uint(fi.Size())
err := fs.Delete(fi.Name())
if err != nil {
continue
}
reclaimed += sz
deletions++
case *memory.MemoryFS:
fi := fs.LRU.Back()
if fi == nil {
break
}
sz := uint(fi.Size())
err := fs.Delete(fi.Name())
if err != nil {
continue
}
reclaimed += sz
deletions++
default:
// Fallback to old method if not supported
stats := vfss.StatAll()
if len(stats) == 0 {
break
}
fi := stats[0] // Assume sorted or pick first
sz := uint(fi.Size())
err := vfss.Delete(fi.Name())
if err != nil {
continue
}
reclaimed += sz
deletions++
}
}
}
func PromotionDecider(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
return time.Since(fi.AccessTime()) < time.Second*60 // Put hot files in the fast vfs if equipped
}
// Ensure GCFS implements VFS.
var _ vfs.VFS = (*GCFS)(nil)
@@ -39,7 +105,7 @@ func (g *GCFS) Create(key string, size int64) (io.WriteCloser, error) {
w, err := g.VFS.Create(key, size) // try to create the key
// if it fails due to disk full error, call the GC handler and try again in a loop that will continue until it succeeds or the error is not disk full
for err == vfserror.ErrDiskFull && g.gcHanderFunc != nil { // if the error is disk full and there is a GC handler
if err == vfserror.ErrDiskFull && g.gcHanderFunc != nil { // if the error is disk full and there is a GC handler
g.gcHanderFunc(g.VFS, uint(size*int64(g.multiplier))) // call the GC handler
w, err = g.VFS.Create(key, size)
}

View File

@@ -1,96 +1,73 @@
// vfs/gc/gc_test.go
package gc
// func TestGCSmallRandom(t *testing.T) {
// t.Parallel()
import (
"errors"
"fmt"
"s1d3sw1ped/SteamCache2/vfs/memory"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"testing"
)
// m := memory.New(1024 * 1024 * 16)
// gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) {
// deletions := 0
// var reclaimed uint
func TestGCOnFull(t *testing.T) {
m := memory.New(10)
gc := New(m, 2, LRUGC)
// t.Logf("GC starting to reclaim %d bytes", size)
for i := 0; i < 5; i++ {
w, err := gc.Create(fmt.Sprintf("key%d", i), 2)
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write([]byte("ab"))
w.Close()
}
// stats := vfs.StatAll()
// sort.Slice(stats, func(i, j int) bool {
// // Sort by access time so we can remove the oldest files first.
// return stats[i].AccessTime().Before(stats[j].AccessTime())
// })
// Cache full at 10 bytes
w, err := gc.Create("key5", 2)
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write([]byte("cd"))
w.Close()
// // Delete the oldest files until we've reclaimed enough space.
// for _, s := range stats {
// sz := uint(s.Size()) // Get the size of the file
// err := vfs.Delete(s.Name())
// if err != nil {
// panic(err)
// }
// reclaimed += sz // Track how much space we've reclaimed
// deletions++ // Track how many files we've deleted
if gc.Size() > 10 {
t.Errorf("Size exceeded: %d > 10", gc.Size())
}
// // t.Logf("GC deleting %s, %v", s.Name(), s.AccessTime().Format(time.RFC3339Nano))
// Check if older keys were evicted
_, err = m.Open("key0")
if err == nil {
t.Error("Expected key0 to be evicted")
}
}
// if reclaimed >= size { // We've reclaimed enough space
// break
// }
// }
// return uint(reclaimed), uint(deletions)
// })
func TestNoGCNeeded(t *testing.T) {
m := memory.New(20)
gc := New(m, 2, LRUGC)
// for i := 0; i < 10000; i++ {
// if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024*1, 1024*4)); err != nil {
// t.Errorf("Set failed: %v", err)
// }
// }
for i := 0; i < 5; i++ {
w, err := gc.Create(fmt.Sprintf("key%d", i), 2)
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write([]byte("ab"))
w.Close()
}
// if gc.Size() > 1024*1024*16 {
// t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
// }
// }
if gc.Size() != 10 {
t.Errorf("Size: got %d, want 10", gc.Size())
}
}
// func genRandomData(min int, max int) []byte {
// data := make([]byte, rand.Intn(max-min)+min)
// rand.Read(data)
// return data
// }
func TestGCInsufficientSpace(t *testing.T) {
m := memory.New(5)
gc := New(m, 1, LRUGC)
// func TestGCLargeRandom(t *testing.T) {
// t.Parallel()
// m := memory.New(1024 * 1024 * 16) // 16MB
// gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) {
// deletions := 0
// var reclaimed uint
// t.Logf("GC starting to reclaim %d bytes", size)
// stats := vfs.StatAll()
// sort.Slice(stats, func(i, j int) bool {
// // Sort by access time so we can remove the oldest files first.
// return stats[i].AccessTime().Before(stats[j].AccessTime())
// })
// // Delete the oldest files until we've reclaimed enough space.
// for _, s := range stats {
// sz := uint(s.Size()) // Get the size of the file
// vfs.Delete(s.Name())
// reclaimed += sz // Track how much space we've reclaimed
// deletions++ // Track how many files we've deleted
// if reclaimed >= size { // We've reclaimed enough space
// break
// }
// }
// return uint(reclaimed), uint(deletions)
// })
// for i := 0; i < 10000; i++ {
// if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024, 1024*1024)); err != nil {
// t.Errorf("Set failed: %v", err)
// }
// }
// if gc.Size() > 1024*1024*16 {
// t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
// }
// }
w, err := gc.Create("key0", 10)
if err == nil {
w.Close()
t.Error("Expected ErrDiskFull")
} else if !errors.Is(err, vfserror.ErrDiskFull) {
t.Errorf("Unexpected error: %v", err)
}
}

View File

@@ -2,71 +2,92 @@
package memory
import (
"errors"
"fmt"
"io"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"testing"
)
func TestAllMemory(t *testing.T) {
t.Parallel()
func TestCreateAndOpen(t *testing.T) {
m := New(1024)
w, err := m.Create("key", 5)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Errorf("Create failed: %v", err)
t.Fatalf("Create failed: %v", err)
}
w.Write([]byte("value"))
w.Write(value)
w.Close()
w, err = m.Create("key", 6)
rc, err := m.Open(key)
if err != nil {
t.Errorf("Create failed: %v", err)
t.Fatalf("Open failed: %v", err)
}
w.Write([]byte("value1"))
w.Close()
rc, err := m.Open("key")
if err != nil {
t.Errorf("Open failed: %v", err)
}
d, _ := io.ReadAll(rc)
got, _ := io.ReadAll(rc)
rc.Close()
if string(d) != "value1" {
t.Errorf("Get failed: got %s, want %s", d, "value1")
}
if err := m.Delete("key"); err != nil {
t.Errorf("Delete failed: %v", err)
}
if _, err := m.Open("key"); err == nil {
t.Errorf("Open failed: got nil, want %v", vfserror.ErrNotFound)
}
if err := m.Delete("key"); err == nil {
t.Errorf("Delete failed: got nil, want %v", vfserror.ErrNotFound)
}
if _, err := m.Stat("key"); err == nil {
t.Errorf("Stat failed: got nil, want %v", vfserror.ErrNotFound)
}
w, err = m.Create("key", 5)
if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value"))
w.Close()
if _, err := m.Stat("key"); err != nil {
t.Errorf("Stat failed: %v", err)
if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got)
}
}
func TestLimited(t *testing.T) {
t.Parallel()
func TestOverwrite(t *testing.T) {
m := New(1024)
key := "key"
value1 := []byte("value1")
value2 := []byte("value2")
w, err := m.Create(key, int64(len(value1)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value1)
w.Close()
w, err = m.Create(key, int64(len(value2)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value2)
w.Close()
rc, err := m.Open(key)
if err != nil {
t.Fatalf("Open failed: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value2) {
t.Fatalf("expected %s, got %s", value2, got)
}
}
func TestDelete(t *testing.T) {
m := New(1024)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value)
w.Close()
if err := m.Delete(key); err != nil {
t.Fatalf("Delete failed: %v", err)
}
_, err = m.Open(key)
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
}
func TestCapacityLimit(t *testing.T) {
m := New(10)
for i := 0; i < 11; i++ {
w, err := m.Create(fmt.Sprintf("key%d", i), 1)
@@ -81,3 +102,28 @@ func TestLimited(t *testing.T) {
}
}
}
func TestStat(t *testing.T) {
m := New(1024)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value)
w.Close()
info, err := m.Stat(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if info == nil {
t.Fatal("expected file info to be non-nil")
}
if info.Size() != int64(len(value)) {
t.Errorf("expected size %d, got %d", len(value), info.Size())
}
}