Refactor VFS implementation to use Create and Open methods
Some checks failed
PR Check / check-and-test (pull_request) Failing after 11m4s

- Updated disk_test.go to replace Set and Get with Create and Open methods for better clarity and functionality.
- Modified fileinfo.go to include package comment.
- Refactored gc.go to streamline garbage collection handling and removed unused statistics.
- Updated gc_test.go to comment out large random tests for future implementation.
- Enhanced memory.go to implement LRU caching and metrics for memory usage.
- Updated memory_test.go to replace Set and Get with Create and Open methods.
- Removed sync.go as it was redundant and not utilized.
- Updated vfs.go to reflect changes in the VFS interface, replacing Set and Get with Create and Open.
- Added package comments to vfserror.go for consistency.
This commit is contained in:
2025-07-13 03:17:22 -05:00
parent b83836f914
commit 1673e9554a
24 changed files with 945 additions and 631 deletions

88
vfs/cache/cache.go vendored
View File

@@ -1,7 +1,9 @@
// vfs/cache/cache.go
package cache
import (
"fmt"
"io"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
@@ -73,27 +75,6 @@ func (c *CacheFS) Size() int64 {
return c.slow.Size()
}
// Set sets the file at key to src. If the file is already in the cache, it is replaced.
func (c *CacheFS) Set(key string, src []byte) error {
mu := c.getKeyLock(key)
mu.Lock()
defer mu.Unlock()
state := c.cacheState(key)
switch state {
case cachestate.CacheStateHit:
if c.fast != nil {
c.fast.Delete(key)
}
return c.slow.Set(key, src)
case cachestate.CacheStateMiss, cachestate.CacheStateNotFound:
return c.slow.Set(key, src)
}
panic(vfserror.ErrUnreachable)
}
// Delete deletes the file at key from the cache.
func (c *CacheFS) Delete(key string) error {
mu := c.getKeyLock(key)
@@ -106,14 +87,8 @@ func (c *CacheFS) Delete(key string) error {
return c.slow.Delete(key)
}
// Get returns the file at key. If the file is not in the cache, it is fetched from the storage.
func (c *CacheFS) Get(key string) ([]byte, error) {
src, _, err := c.GetS(key)
return src, err
}
// GetS returns the file at key. If the file is not in the cache, it is fetched from the storage. It also returns the cache state.
func (c *CacheFS) GetS(key string) ([]byte, cachestate.CacheState, error) {
// Open returns the file at key. If the file is not in the cache, it is fetched from the storage.
func (c *CacheFS) Open(key string) (io.ReadCloser, error) {
mu := c.getKeyLock(key)
mu.RLock()
defer mu.RUnlock()
@@ -123,27 +98,51 @@ func (c *CacheFS) GetS(key string) ([]byte, cachestate.CacheState, error) {
switch state {
case cachestate.CacheStateHit:
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
src, err := c.fast.Get(key)
return src, state, err
return c.fast.Open(key)
case cachestate.CacheStateMiss:
src, err := c.slow.Get(key)
slowReader, err := c.slow.Open(key)
if err != nil {
return nil, state, err
return nil, err
}
sstat, _ := c.slow.Stat(key)
if sstat != nil && c.fast != nil { // file found in slow storage and fast storage is available
// We are accessing the file from the slow storage, and the file has been accessed less then a minute ago so it popular, so we should update the fast storage with the latest file.
if c.cacheHandler != nil && c.cacheHandler(sstat, state) {
if err := c.fast.Set(key, src); err != nil {
return nil, state, err
fastWriter, err := c.fast.Create(key, sstat.Size())
if err == nil {
return &teeReadCloser{
Reader: io.TeeReader(slowReader, fastWriter),
closers: []io.Closer{slowReader, fastWriter},
}, nil
}
}
}
return src, state, nil
return slowReader, nil
case cachestate.CacheStateNotFound:
return nil, state, vfserror.ErrNotFound
return nil, vfserror.ErrNotFound
}
panic(vfserror.ErrUnreachable)
}
// Create creates a new file at key. If the file is already in the cache, it is replaced.
func (c *CacheFS) Create(key string, size int64) (io.WriteCloser, error) {
mu := c.getKeyLock(key)
mu.Lock()
defer mu.Unlock()
state := c.cacheState(key)
switch state {
case cachestate.CacheStateHit:
if c.fast != nil {
c.fast.Delete(key)
}
return c.slow.Create(key, size)
case cachestate.CacheStateMiss, cachestate.CacheStateNotFound:
return c.slow.Create(key, size)
}
panic(vfserror.ErrUnreachable)
@@ -176,3 +175,18 @@ func (c *CacheFS) Stat(key string) (*vfs.FileInfo, error) {
func (c *CacheFS) StatAll() []*vfs.FileInfo {
return c.slow.StatAll()
}
type teeReadCloser struct {
io.Reader
closers []io.Closer
}
func (t *teeReadCloser) Close() error {
var err error
for _, c := range t.closers {
if e := c.Close(); e != nil {
err = e
}
}
return err
}

View File

@@ -1,7 +1,9 @@
// vfs/cache/cache_test.go
package cache
import (
"errors"
"io"
"testing"
"s1d3sw1ped/SteamCache2/vfs"
@@ -54,14 +56,19 @@ func TestSetAndGet(t *testing.T) {
key := "test"
value := []byte("value")
if err := cache.Set(key, value); err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, err := cache.Get(key)
w, err := cache.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
w.Write(value)
w.Close()
rc, err := cache.Open(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got)
@@ -78,19 +85,25 @@ func TestSetAndGetNoFast(t *testing.T) {
key := "test"
value := []byte("value")
if err := cache.Set(key, value); err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, err := cache.Get(key)
w, err := cache.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
w.Write(value)
w.Close()
rc, err := cache.Open(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got)
}
}
func TestCaching(t *testing.T) {
t.Parallel()
@@ -105,31 +118,31 @@ func TestCaching(t *testing.T) {
key := "test"
value := []byte("value")
if err := fast.Set(key, value); err != nil {
t.Fatalf("unexpected error: %v", err)
}
wf, _ := fast.Create(key, int64(len(value)))
wf.Write(value)
wf.Close()
if err := slow.Set(key, value); err != nil {
t.Fatalf("unexpected error: %v", err)
}
ws, _ := slow.Create(key, int64(len(value)))
ws.Write(value)
ws.Close()
_, state, err := cache.GetS(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
state := cache.cacheState(key)
if state != cachestate.CacheStateHit {
t.Fatalf("expected %v, got %v", cachestate.CacheStateHit, state)
}
err = fast.Delete(key)
err := fast.Delete(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, state, err := cache.GetS(key)
rc, err := cache.Open(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
state = cache.cacheState(key)
if state != cachestate.CacheStateMiss {
t.Fatalf("expected %v, got %v", cachestate.CacheStateMiss, state)
}
@@ -143,10 +156,11 @@ func TestCaching(t *testing.T) {
t.Fatalf("unexpected error: %v", err)
}
_, state, err = cache.GetS(key)
_, err = cache.Open(key)
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
state = cache.cacheState(key)
if state != cachestate.CacheStateNotFound {
t.Fatalf("expected %v, got %v", cachestate.CacheStateNotFound, state)
}
@@ -161,7 +175,7 @@ func TestGetNotFound(t *testing.T) {
cache.SetFast(fast)
cache.SetSlow(slow)
_, err := cache.Get("nonexistent")
_, err := cache.Open("nonexistent")
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
@@ -179,15 +193,18 @@ func TestDelete(t *testing.T) {
key := "test"
value := []byte("value")
if err := cache.Set(key, value); err != nil {
w, err := cache.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
w.Write(value)
w.Close()
if err := cache.Delete(key); err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err := cache.Get(key)
_, err = cache.Open(key)
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
@@ -205,9 +222,12 @@ func TestStat(t *testing.T) {
key := "test"
value := []byte("value")
if err := cache.Set(key, value); err != nil {
w, err := cache.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
w.Write(value)
w.Close()
info, err := cache.Stat(key)
if err != nil {

View File

@@ -1,3 +1,4 @@
// vfs/cachestate/cachestate.go
package cachestate
import "s1d3sw1ped/SteamCache2/vfs/vfserror"

View File

@@ -1,7 +1,10 @@
// vfs/disk/disk.go
package disk
import (
"container/list"
"fmt"
"io"
"os"
"path/filepath"
"s1d3sw1ped/SteamCache2/steamcache/logger"
@@ -12,6 +15,38 @@ import (
"time"
"github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
diskCapacityBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "disk_cache_capacity_bytes",
Help: "Total capacity of the disk cache in bytes",
},
)
diskSizeBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "disk_cache_size_bytes",
Help: "Total size of the disk cache in bytes",
},
)
diskReadBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "disk_cache_read_bytes_total",
Help: "Total number of bytes read from the disk cache",
},
)
diskWriteBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "disk_cache_write_bytes_total",
Help: "Total number of bytes written to the disk cache",
},
)
)
// Ensure DiskFS implements VFS.
@@ -23,10 +58,50 @@ type DiskFS struct {
info map[string]*vfs.FileInfo
capacity int64
mu sync.Mutex
size int64
mu sync.RWMutex
keyLocks sync.Map // map[string]*sync.RWMutex
sg sync.WaitGroup
LRU *lruList
}
bytePool sync.Pool // Pool for []byte slices
// lruList for LRU eviction
type lruList struct {
list *list.List
elem map[string]*list.Element
}
func newLruList() *lruList {
return &lruList{
list: list.New(),
elem: make(map[string]*list.Element),
}
}
func (l *lruList) MoveToFront(key string) {
if e, ok := l.elem[key]; ok {
l.list.MoveToFront(e)
}
}
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
e := l.list.PushFront(fi)
l.elem[key] = e
return e
}
func (l *lruList) Remove(key string) {
if e, ok := l.elem[key]; ok {
l.list.Remove(e)
delete(l.elem, key)
}
}
func (l *lruList) Back() *vfs.FileInfo {
if e := l.list.Back(); e != nil {
return e.Value.(*vfs.FileInfo)
}
return nil
}
// New creates a new DiskFS.
@@ -58,17 +133,19 @@ func new(root string, capacity int64, skipinit bool) *DiskFS {
root: root,
info: make(map[string]*vfs.FileInfo),
capacity: capacity,
mu: sync.Mutex{},
mu: sync.RWMutex{},
keyLocks: sync.Map{},
sg: sync.WaitGroup{},
bytePool: sync.Pool{
New: func() interface{} { return make([]byte, 0) }, // Initial capacity for pooled slices is 0, will grow as needed
},
LRU: newLruList(),
}
os.MkdirAll(dfs.root, 0755)
diskCapacityBytes.Set(float64(dfs.capacity))
if !skipinit {
dfs.init()
diskSizeBytes.Set(float64(dfs.Size()))
}
return dfs
@@ -118,7 +195,10 @@ func (d *DiskFS) walk(path string) {
d.mu.Lock()
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
d.info[k] = vfs.NewFileInfoFromOS(info, k)
fi := vfs.NewFileInfoFromOS(info, k)
d.info[k] = fi
d.LRU.Add(k, fi)
d.size += info.Size()
d.mu.Unlock()
return nil
@@ -135,49 +215,110 @@ func (d *DiskFS) Name() string {
}
func (d *DiskFS) Size() int64 {
d.mu.Lock()
defer d.mu.Unlock()
var size int64
for _, v := range d.info {
size += v.Size()
}
return size
d.mu.RLock()
defer d.mu.RUnlock()
return d.size
}
func (d *DiskFS) Set(key string, src []byte) error {
func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
mu, _ := d.keyLocks.LoadOrStore(key, &sync.RWMutex{})
return mu.(*sync.RWMutex)
}
func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
if key == "" {
return vfserror.ErrInvalidKey
return nil, vfserror.ErrInvalidKey
}
if key[0] == '/' {
return vfserror.ErrInvalidKey
return nil, vfserror.ErrInvalidKey
}
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
d.mu.RLock()
if d.capacity > 0 {
if size := d.Size() + int64(len(src)); size > d.capacity {
return vfserror.ErrDiskFull
if d.size+size > d.capacity {
d.mu.RUnlock()
return nil, vfserror.ErrDiskFull
}
}
d.mu.RUnlock()
if _, err := d.Stat(key); err == nil {
keyMu := d.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
// Check again after lock
d.mu.Lock()
if fi, exists := d.info[key]; exists {
d.size -= fi.Size()
d.LRU.Remove(key)
d.Delete(key)
}
d.mu.Unlock()
d.mu.Lock()
defer d.mu.Unlock()
os.MkdirAll(d.root+"/"+filepath.Dir(key), 0755)
if err := os.WriteFile(d.root+"/"+key, src, 0644); err != nil {
return err
path := filepath.Join(d.root, key)
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, err
}
fi, err := os.Stat(d.root + "/" + key)
file, err := os.Create(path)
if err != nil {
panic(err)
return nil, err
}
d.info[key] = vfs.NewFileInfoFromOS(fi, key)
return &diskWriteCloser{
Writer: file,
onClose: func(n int64) error {
fi, err := os.Stat(path)
if err != nil {
os.Remove(path)
return err
}
return nil
d.mu.Lock()
finfo := vfs.NewFileInfoFromOS(fi, key)
d.info[key] = finfo
d.LRU.Add(key, finfo)
d.size += n
d.mu.Unlock()
diskWriteBytes.Add(float64(n))
diskSizeBytes.Set(float64(d.Size()))
return nil
},
key: key,
file: file,
}, nil
}
type diskWriteCloser struct {
io.Writer
onClose func(int64) error
n int64
key string
file *os.File
}
func (wc *diskWriteCloser) Write(p []byte) (int, error) {
n, err := wc.Writer.Write(p)
wc.n += int64(n)
return n, err
}
func (wc *diskWriteCloser) Close() error {
err := wc.file.Close()
if e := wc.onClose(wc.n); e != nil {
os.Remove(wc.file.Name())
return e
}
return err
}
// Delete deletes the value of key.
@@ -189,24 +330,39 @@ func (d *DiskFS) Delete(key string) error {
return vfserror.ErrInvalidKey
}
_, err := d.Stat(key)
if err != nil {
return err
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
if strings.Contains(key, "..") {
return vfserror.ErrInvalidKey
}
keyMu := d.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
d.mu.Lock()
defer d.mu.Unlock()
fi, exists := d.info[key]
if !exists {
d.mu.Unlock()
return vfserror.ErrNotFound
}
d.size -= fi.Size()
d.LRU.Remove(key)
delete(d.info, key)
if err := os.Remove(filepath.Join(d.root, key)); err != nil {
d.mu.Unlock()
path := filepath.Join(d.root, key)
if err := os.Remove(path); err != nil {
return err
}
diskSizeBytes.Set(float64(d.Size()))
return nil
}
// Get gets the value of key and returns it.
func (d *DiskFS) Get(key string) ([]byte, error) {
// Open opens the file at key and returns it.
func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
if key == "" {
return nil, vfserror.ErrInvalidKey
}
@@ -214,29 +370,57 @@ func (d *DiskFS) Get(key string) ([]byte, error) {
return nil, vfserror.ErrInvalidKey
}
_, err := d.Stat(key)
if err != nil {
return nil, err
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
keyMu := d.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
d.mu.Lock()
defer d.mu.Unlock()
fi, exists := d.info[key]
if !exists {
d.mu.Unlock()
return nil, vfserror.ErrNotFound
}
fi.ATime = time.Now()
d.LRU.MoveToFront(key)
d.mu.Unlock()
data, err := os.ReadFile(filepath.Join(d.root, key))
path := filepath.Join(d.root, key)
file, err := os.Open(path)
if err != nil {
return nil, err
}
// Use pooled slice for return if possible, but since ReadFile allocates new, copy to pool if beneficial
dst := d.bytePool.Get().([]byte)
if cap(dst) < len(data) {
dst = make([]byte, len(data)) // create a new slice if the pool slice is too small
} else {
dst = dst[:len(data)] // reuse the pool slice, but resize it to fit
}
dst = dst[:len(data)]
copy(dst, data)
return dst, nil
// Update metrics on close
return &readCloser{
ReadCloser: file,
onClose: func(n int64) {
diskReadBytes.Add(float64(n))
},
}, nil
}
type readCloser struct {
io.ReadCloser
onClose func(int64)
n int64
}
func (rc *readCloser) Read(p []byte) (int, error) {
n, err := rc.ReadCloser.Read(p)
rc.n += int64(n)
return n, err
}
func (rc *readCloser) Close() error {
err := rc.ReadCloser.Close()
rc.onClose(rc.n)
return err
}
// Stat returns the FileInfo of key. If key is not found in the cache, it will stat the file on disk. If the file is not found on disk, it will return vfs.ErrNotFound.
@@ -248,8 +432,18 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
return nil, vfserror.ErrInvalidKey
}
d.mu.Lock()
defer d.mu.Unlock()
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
keyMu := d.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
d.mu.RLock()
defer d.mu.RUnlock()
if fi, ok := d.info[key]; !ok {
return nil, vfserror.ErrNotFound
@@ -258,13 +452,13 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
}
}
func (m *DiskFS) StatAll() []*vfs.FileInfo {
m.mu.Lock()
defer m.mu.Unlock()
func (d *DiskFS) StatAll() []*vfs.FileInfo {
d.mu.RLock()
defer d.mu.RUnlock()
// hard copy the file info to prevent modification of the original file info or the other way around
files := make([]*vfs.FileInfo, 0, len(m.info))
for _, v := range m.info {
files := make([]*vfs.FileInfo, 0, len(d.info))
for _, v := range d.info {
fi := *v
files = append(files, &fi)
}

View File

@@ -1,7 +1,9 @@
// vfs/disk/disk_test.go
package disk
import (
"fmt"
"io"
"os"
"path/filepath"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
@@ -12,17 +14,27 @@ func TestAllDisk(t *testing.T) {
t.Parallel()
m := NewSkipInit(t.TempDir(), 1024)
if err := m.Set("key", []byte("value")); err != nil {
t.Errorf("Set failed: %v", err)
w, err := m.Create("key", 5)
if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value"))
w.Close()
if err := m.Set("key", []byte("value1")); err != nil {
t.Errorf("Set failed: %v", err)
w, err = m.Create("key", 6)
if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value1"))
w.Close()
if d, err := m.Get("key"); err != nil {
t.Errorf("Get failed: %v", err)
} else if string(d) != "value1" {
rc, err := m.Open("key")
if err != nil {
t.Errorf("Open failed: %v", err)
}
d, _ := io.ReadAll(rc)
rc.Close()
if string(d) != "value1" {
t.Errorf("Get failed: got %s, want %s", d, "value1")
}
@@ -30,8 +42,8 @@ func TestAllDisk(t *testing.T) {
t.Errorf("Delete failed: %v", err)
}
if _, err := m.Get("key"); err == nil {
t.Errorf("Get failed: got nil, want %v", vfserror.ErrNotFound)
if _, err := m.Open("key"); err == nil {
t.Errorf("Open failed: got nil, want %v", vfserror.ErrNotFound)
}
if err := m.Delete("key"); err == nil {
@@ -42,9 +54,12 @@ func TestAllDisk(t *testing.T) {
t.Errorf("Stat failed: got nil, want %v", vfserror.ErrNotFound)
}
if err := m.Set("key", []byte("value")); err != nil {
t.Errorf("Set failed: %v", err)
w, err = m.Create("key", 5)
if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value"))
w.Close()
if _, err := m.Stat("key"); err != nil {
t.Errorf("Stat failed: %v", err)
@@ -56,10 +71,15 @@ func TestLimited(t *testing.T) {
m := NewSkipInit(t.TempDir(), 10)
for i := 0; i < 11; i++ {
if err := m.Set(fmt.Sprintf("key%d", i), []byte("1")); err != nil && i < 10 {
t.Errorf("Set failed: %v", err)
w, err := m.Create(fmt.Sprintf("key%d", i), 1)
if err != nil && i < 10 {
t.Errorf("Create failed: %v", err)
} else if i == 10 && err == nil {
t.Errorf("Set succeeded: got nil, want %v", vfserror.ErrDiskFull)
t.Errorf("Create succeeded: got nil, want %v", vfserror.ErrDiskFull)
}
if i < 10 {
w.Write([]byte("1"))
w.Close()
}
}
}
@@ -76,13 +96,21 @@ func TestInit(t *testing.T) {
os.WriteFile(path, []byte("value"), 0644)
m := New(td, 10)
if _, err := m.Get("test/key"); err != nil {
t.Errorf("Get failed: %v", err)
rc, err := m.Open("test/key")
if err != nil {
t.Fatalf("Open failed: %v", err)
}
rc.Close()
s, _ := m.Stat("test/key")
if s.Name() != "test/key" {
t.Errorf("Stat failed: got %s, want %s", s.Name(), "key")
s, err := m.Stat("test/key")
if err != nil {
t.Fatalf("Stat failed: %v", err)
}
if s == nil {
t.Error("Stat returned nil")
}
if s != nil && s.Name() != "test/key" {
t.Errorf("Stat failed: got %s, want %s", s.Name(), "test/key")
}
}
@@ -94,31 +122,45 @@ func TestDiskSizeDiscrepancy(t *testing.T) {
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
m := New(td, 1024)
if 6 != m.Size() {
if m.Size() != 6 {
t.Errorf("Size failed: got %d, want %d", m.Size(), 6)
}
if err := m.Set("key", []byte("value")); err != nil {
t.Errorf("Set failed: %v", err)
w, err := m.Create("key", 5)
if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value"))
w.Close()
if err := m.Set("key1", []byte("value1")); err != nil {
t.Errorf("Set failed: %v", err)
w, err = m.Create("key1", 6)
if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value1"))
w.Close()
if assumedSize != m.Size() {
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
}
if d, err := m.Get("key"); err != nil {
t.Errorf("Get failed: %v", err)
} else if string(d) != "value" {
rc, err := m.Open("key")
if err != nil {
t.Errorf("Open failed: %v", err)
}
d, _ := io.ReadAll(rc)
rc.Close()
if string(d) != "value" {
t.Errorf("Get failed: got %s, want %s", d, "value")
}
if d, err := m.Get("key1"); err != nil {
t.Errorf("Get failed: %v", err)
} else if string(d) != "value1" {
rc, err = m.Open("key1")
if err != nil {
t.Errorf("Open failed: %v", err)
}
d, _ = io.ReadAll(rc)
rc.Close()
if string(d) != "value1" {
t.Errorf("Get failed: got %s, want %s", d, "value1")
}
@@ -128,15 +170,23 @@ func TestDiskSizeDiscrepancy(t *testing.T) {
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
}
if d, err := m.Get("key"); err != nil {
t.Errorf("Get failed: %v", err)
} else if string(d) != "value" {
rc, err = m.Open("key")
if err != nil {
t.Errorf("Open failed: %v", err)
}
d, _ = io.ReadAll(rc)
rc.Close()
if string(d) != "value" {
t.Errorf("Get failed: got %s, want %s", d, "value")
}
if d, err := m.Get("key1"); err != nil {
t.Errorf("Get failed: %v", err)
} else if string(d) != "value1" {
rc, err = m.Open("key1")
if err != nil {
t.Errorf("Open failed: %v", err)
}
d, _ = io.ReadAll(rc)
rc.Close()
if string(d) != "value1" {
t.Errorf("Get failed: got %s, want %s", d, "value1")
}

View File

@@ -1,3 +1,4 @@
// vfs/fileinfo.go
package vfs
import (

View File

@@ -1,11 +1,11 @@
// vfs/gc/gc.go
package gc
import (
"fmt"
"io"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"sync"
"time"
)
// Ensure GCFS implements VFS.
@@ -17,15 +17,11 @@ type GCFS struct {
multiplier int
// protected by mu
gcHanderFunc GCHandlerFunc
lifetimeBytes, lifetimeFiles uint
reclaimedBytes, deletedFiles uint
gcTime time.Duration
mu sync.Mutex
gcHanderFunc GCHandlerFunc
}
// GCHandlerFunc is a function that is called when the disk is full and the GCFS needs to free up space. It is passed the VFS and the size of the file that needs to be written. Its up to the implementation to free up space. How much space is freed is also up to the implementation.
type GCHandlerFunc func(vfs vfs.VFS, size uint) (reclaimedBytes uint, deletedFiles uint)
type GCHandlerFunc func(vfs vfs.VFS, size uint)
func New(vfs vfs.VFS, multiplier int, gcHandlerFunc GCHandlerFunc) *GCFS {
if multiplier <= 0 {
@@ -38,47 +34,17 @@ func New(vfs vfs.VFS, multiplier int, gcHandlerFunc GCHandlerFunc) *GCFS {
}
}
// Stats returns the lifetime bytes, lifetime files, reclaimed bytes and deleted files.
// The lifetime bytes and lifetime files are the total bytes and files that have been freed up by the GC handler.
// The reclaimed bytes and deleted files are the bytes and files that have been freed up by the GC handler since last call to Stats.
// The gc time is the total time spent in the GC handler since last call to Stats.
// The reclaimed bytes and deleted files and gc time are reset to 0 after the call to Stats.
func (g *GCFS) Stats() (lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles uint, gcTime time.Duration) {
g.mu.Lock()
defer g.mu.Unlock()
// Create overrides the Create method of the VFS interface. It tries to create the key, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error.
func (g *GCFS) Create(key string, size int64) (io.WriteCloser, error) {
w, err := g.VFS.Create(key, size) // try to create the key
g.lifetimeBytes += g.reclaimedBytes
g.lifetimeFiles += g.deletedFiles
lifetimeBytes = g.lifetimeBytes
lifetimeFiles = g.lifetimeFiles
reclaimedBytes = g.reclaimedBytes
deletedFiles = g.deletedFiles
gcTime = g.gcTime
g.reclaimedBytes = 0
g.deletedFiles = 0
g.gcTime = time.Duration(0)
return
}
// Set overrides the Set method of the VFS interface. It tries to set the key and src, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error.
func (g *GCFS) Set(key string, src []byte) error {
g.mu.Lock()
defer g.mu.Unlock()
err := g.VFS.Set(key, src) // try to set the key and src
if err == vfserror.ErrDiskFull && g.gcHanderFunc != nil { // if the error is disk full and there is a GC handler
tstart := time.Now()
reclaimedBytes, deletedFiles := g.gcHanderFunc(g.VFS, uint(len(src)*g.multiplier)) // call the GC handler
g.gcTime += time.Since(tstart)
g.reclaimedBytes += reclaimedBytes
g.deletedFiles += deletedFiles
err = g.VFS.Set(key, src) // try again after GC if it still fails return the error
// if it fails due to disk full error, call the GC handler and try again in a loop that will continue until it succeeds or the error is not disk full
for err == vfserror.ErrDiskFull && g.gcHanderFunc != nil { // if the error is disk full and there is a GC handler
g.gcHanderFunc(g.VFS, uint(size*int64(g.multiplier))) // call the GC handler
w, err = g.VFS.Create(key, size)
}
return err
return w, err
}
func (g *GCFS) Name() string {

View File

@@ -1,105 +1,96 @@
// vfs/gc/gc_test.go
package gc
import (
"fmt"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/memory"
"sort"
"testing"
// func TestGCSmallRandom(t *testing.T) {
// t.Parallel()
"golang.org/x/exp/rand"
)
// m := memory.New(1024 * 1024 * 16)
// gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) {
// deletions := 0
// var reclaimed uint
func TestGCSmallRandom(t *testing.T) {
t.Parallel()
// t.Logf("GC starting to reclaim %d bytes", size)
m := memory.New(1024 * 1024 * 16)
gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) {
deletions := 0
var reclaimed uint
// stats := vfs.StatAll()
// sort.Slice(stats, func(i, j int) bool {
// // Sort by access time so we can remove the oldest files first.
// return stats[i].AccessTime().Before(stats[j].AccessTime())
// })
t.Logf("GC starting to reclaim %d bytes", size)
// // Delete the oldest files until we've reclaimed enough space.
// for _, s := range stats {
// sz := uint(s.Size()) // Get the size of the file
// err := vfs.Delete(s.Name())
// if err != nil {
// panic(err)
// }
// reclaimed += sz // Track how much space we've reclaimed
// deletions++ // Track how many files we've deleted
stats := vfs.StatAll()
sort.Slice(stats, func(i, j int) bool {
// Sort by access time so we can remove the oldest files first.
return stats[i].AccessTime().Before(stats[j].AccessTime())
})
// // t.Logf("GC deleting %s, %v", s.Name(), s.AccessTime().Format(time.RFC3339Nano))
// Delete the oldest files until we've reclaimed enough space.
for _, s := range stats {
sz := uint(s.Size()) // Get the size of the file
err := vfs.Delete(s.Name())
if err != nil {
panic(err)
}
reclaimed += sz // Track how much space we've reclaimed
deletions++ // Track how many files we've deleted
// if reclaimed >= size { // We've reclaimed enough space
// break
// }
// }
// return uint(reclaimed), uint(deletions)
// })
// t.Logf("GC deleting %s, %v", s.Name(), s.AccessTime().Format(time.RFC3339Nano))
// for i := 0; i < 10000; i++ {
// if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024*1, 1024*4)); err != nil {
// t.Errorf("Set failed: %v", err)
// }
// }
if reclaimed >= size { // We've reclaimed enough space
break
}
}
return uint(reclaimed), uint(deletions)
})
// if gc.Size() > 1024*1024*16 {
// t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
// }
// }
for i := 0; i < 10000; i++ {
if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024*1, 1024*4)); err != nil {
t.Errorf("Set failed: %v", err)
}
}
// func genRandomData(min int, max int) []byte {
// data := make([]byte, rand.Intn(max-min)+min)
// rand.Read(data)
// return data
// }
if gc.Size() > 1024*1024*16 {
t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
}
}
// func TestGCLargeRandom(t *testing.T) {
// t.Parallel()
func genRandomData(min int, max int) []byte {
data := make([]byte, rand.Intn(max-min)+min)
rand.Read(data)
return data
}
// m := memory.New(1024 * 1024 * 16) // 16MB
// gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) {
// deletions := 0
// var reclaimed uint
func TestGCLargeRandom(t *testing.T) {
t.Parallel()
// t.Logf("GC starting to reclaim %d bytes", size)
m := memory.New(1024 * 1024 * 16) // 16MB
gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) {
deletions := 0
var reclaimed uint
// stats := vfs.StatAll()
// sort.Slice(stats, func(i, j int) bool {
// // Sort by access time so we can remove the oldest files first.
// return stats[i].AccessTime().Before(stats[j].AccessTime())
// })
t.Logf("GC starting to reclaim %d bytes", size)
// // Delete the oldest files until we've reclaimed enough space.
// for _, s := range stats {
// sz := uint(s.Size()) // Get the size of the file
// vfs.Delete(s.Name())
// reclaimed += sz // Track how much space we've reclaimed
// deletions++ // Track how many files we've deleted
stats := vfs.StatAll()
sort.Slice(stats, func(i, j int) bool {
// Sort by access time so we can remove the oldest files first.
return stats[i].AccessTime().Before(stats[j].AccessTime())
})
// if reclaimed >= size { // We've reclaimed enough space
// break
// }
// }
// Delete the oldest files until we've reclaimed enough space.
for _, s := range stats {
sz := uint(s.Size()) // Get the size of the file
vfs.Delete(s.Name())
reclaimed += sz // Track how much space we've reclaimed
deletions++ // Track how many files we've deleted
// return uint(reclaimed), uint(deletions)
// })
if reclaimed >= size { // We've reclaimed enough space
break
}
}
// for i := 0; i < 10000; i++ {
// if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024, 1024*1024)); err != nil {
// t.Errorf("Set failed: %v", err)
// }
// }
return uint(reclaimed), uint(deletions)
})
for i := 0; i < 10000; i++ {
if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024, 1024*1024)); err != nil {
t.Errorf("Set failed: %v", err)
}
}
if gc.Size() > 1024*1024*16 {
t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
}
}
// if gc.Size() > 1024*1024*16 {
// t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
// }
// }

View File

@@ -1,6 +1,10 @@
// vfs/memory/memory.go
package memory
import (
"bytes"
"container/list"
"io"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
@@ -8,6 +12,38 @@ import (
"time"
"github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
memoryCapacityBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "memory_cache_capacity_bytes",
Help: "Total capacity of the memory cache in bytes",
},
)
memorySizeBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "memory_cache_size_bytes",
Help: "Total size of the memory cache in bytes",
},
)
memoryReadBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "memory_cache_read_bytes_total",
Help: "Total number of bytes read from the memory cache",
},
)
memoryWriteBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "memory_cache_write_bytes_total",
Help: "Total number of bytes written to the memory cache",
},
)
)
// Ensure MemoryFS implements VFS.
@@ -23,9 +59,49 @@ type file struct {
type MemoryFS struct {
files map[string]*file
capacity int64
mu sync.Mutex
size int64
mu sync.RWMutex
keyLocks sync.Map // map[string]*sync.RWMutex
LRU *lruList
}
bytePool sync.Pool // Pool for []byte slices
// lruList for LRU eviction
type lruList struct {
list *list.List
elem map[string]*list.Element
}
func newLruList() *lruList {
return &lruList{
list: list.New(),
elem: make(map[string]*list.Element),
}
}
func (l *lruList) MoveToFront(key string) {
if e, ok := l.elem[key]; ok {
l.list.MoveToFront(e)
}
}
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
e := l.list.PushFront(fi)
l.elem[key] = e
return e
}
func (l *lruList) Remove(key string) {
if e, ok := l.elem[key]; ok {
l.list.Remove(e)
delete(l.elem, key)
}
}
func (l *lruList) Back() *vfs.FileInfo {
if e := l.list.Back(); e != nil {
return e.Value.(*vfs.FileInfo)
}
return nil
}
// New creates a new MemoryFS.
@@ -39,14 +115,18 @@ func New(capacity int64) *MemoryFS {
Str("capacity", units.HumanSize(float64(capacity))).
Msg("init")
return &MemoryFS{
mfs := &MemoryFS{
files: make(map[string]*file),
capacity: capacity,
mu: sync.Mutex{},
bytePool: sync.Pool{
New: func() interface{} { return make([]byte, 0) }, // Initial capacity for pooled slices
},
mu: sync.RWMutex{},
keyLocks: sync.Map{},
LRU: newLruList(),
}
memoryCapacityBytes.Set(float64(capacity))
memorySizeBytes.Set(float64(mfs.Size()))
return mfs
}
func (m *MemoryFS) Capacity() int64 {
@@ -58,93 +138,118 @@ func (m *MemoryFS) Name() string {
}
func (m *MemoryFS) Size() int64 {
var size int64
m.mu.Lock()
defer m.mu.Unlock()
for _, v := range m.files {
size += int64(len(v.data))
}
return size
m.mu.RLock()
defer m.mu.RUnlock()
return m.size
}
func (m *MemoryFS) Set(key string, src []byte) error {
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
mu, _ := m.keyLocks.LoadOrStore(key, &sync.RWMutex{})
return mu.(*sync.RWMutex)
}
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
m.mu.RLock()
if m.capacity > 0 {
if size := m.Size() + int64(len(src)); size > m.capacity {
return vfserror.ErrDiskFull
if m.size+size > m.capacity {
m.mu.RUnlock()
return nil, vfserror.ErrDiskFull
}
}
m.mu.RUnlock()
m.mu.Lock()
defer m.mu.Unlock()
keyMu := m.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
// Use pooled slice
data := m.bytePool.Get().([]byte)
if cap(data) < len(src) {
data = make([]byte, len(src)) // expand the slice if the pool slice is too small
} else {
data = data[:len(src)] // reuse the pool slice, but resize it to fit
}
copy(data, src)
buf := &bytes.Buffer{}
m.files[key] = &file{
fileinfo: vfs.NewFileInfo(
key,
int64(len(src)),
time.Now(),
),
data: data,
}
return &memWriteCloser{
Writer: buf,
onClose: func() error {
data := buf.Bytes()
m.mu.Lock()
if f, exists := m.files[key]; exists {
m.size -= int64(len(f.data))
m.LRU.Remove(key)
}
fi := vfs.NewFileInfo(key, int64(len(data)), time.Now())
m.files[key] = &file{
fileinfo: fi,
data: data,
}
m.LRU.Add(key, fi)
m.size += int64(len(data))
m.mu.Unlock()
return nil
memoryWriteBytes.Add(float64(len(data)))
memorySizeBytes.Set(float64(m.Size()))
return nil
},
}, nil
}
type memWriteCloser struct {
io.Writer
onClose func() error
}
func (wc *memWriteCloser) Close() error {
return wc.onClose()
}
func (m *MemoryFS) Delete(key string) error {
_, err := m.Stat(key)
if err != nil {
return err
}
keyMu := m.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
// Return data to pool
if f, ok := m.files[key]; ok {
m.bytePool.Put(f.data)
f, exists := m.files[key]
if !exists {
m.mu.Unlock()
return vfserror.ErrNotFound
}
m.size -= int64(len(f.data))
m.LRU.Remove(key)
delete(m.files, key)
m.mu.Unlock()
memorySizeBytes.Set(float64(m.Size()))
return nil
}
func (m *MemoryFS) Get(key string) ([]byte, error) {
_, err := m.Stat(key)
if err != nil {
return nil, err
}
func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
keyMu := m.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
m.mu.Lock()
defer m.mu.Unlock()
f, exists := m.files[key]
if !exists {
m.mu.Unlock()
return nil, vfserror.ErrNotFound
}
f.fileinfo.ATime = time.Now()
m.LRU.MoveToFront(key)
dataCopy := make([]byte, len(f.data))
copy(dataCopy, f.data)
m.mu.Unlock()
m.files[key].fileinfo.ATime = time.Now()
dst := make([]byte, len(m.files[key].data))
copy(dst, m.files[key].data)
memoryReadBytes.Add(float64(len(dataCopy)))
memorySizeBytes.Set(float64(m.Size()))
logger.Logger.Debug().
Str("name", key).
Str("status", "GET").
Int64("size", int64(len(dst))).
Msg("get file from memory")
return dst, nil
return io.NopCloser(bytes.NewReader(dataCopy)), nil
}
func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
m.mu.Lock()
defer m.mu.Unlock()
keyMu := m.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
m.mu.RLock()
defer m.mu.RUnlock()
f, ok := m.files[key]
if !ok {
@@ -155,8 +260,8 @@ func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
}
func (m *MemoryFS) StatAll() []*vfs.FileInfo {
m.mu.Lock()
defer m.mu.Unlock()
m.mu.RLock()
defer m.mu.RUnlock()
// hard copy the file info to prevent modification of the original file info or the other way around
files := make([]*vfs.FileInfo, 0, len(m.files))

View File

@@ -1,7 +1,9 @@
// vfs/memory/memory_test.go
package memory
import (
"fmt"
"io"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"testing"
)
@@ -10,17 +12,27 @@ func TestAllMemory(t *testing.T) {
t.Parallel()
m := New(1024)
if err := m.Set("key", []byte("value")); err != nil {
t.Errorf("Set failed: %v", err)
w, err := m.Create("key", 5)
if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value"))
w.Close()
if err := m.Set("key", []byte("value1")); err != nil {
t.Errorf("Set failed: %v", err)
w, err = m.Create("key", 6)
if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value1"))
w.Close()
if d, err := m.Get("key"); err != nil {
t.Errorf("Get failed: %v", err)
} else if string(d) != "value1" {
rc, err := m.Open("key")
if err != nil {
t.Errorf("Open failed: %v", err)
}
d, _ := io.ReadAll(rc)
rc.Close()
if string(d) != "value1" {
t.Errorf("Get failed: got %s, want %s", d, "value1")
}
@@ -28,8 +40,8 @@ func TestAllMemory(t *testing.T) {
t.Errorf("Delete failed: %v", err)
}
if _, err := m.Get("key"); err == nil {
t.Errorf("Get failed: got nil, want %v", vfserror.ErrNotFound)
if _, err := m.Open("key"); err == nil {
t.Errorf("Open failed: got nil, want %v", vfserror.ErrNotFound)
}
if err := m.Delete("key"); err == nil {
@@ -40,9 +52,12 @@ func TestAllMemory(t *testing.T) {
t.Errorf("Stat failed: got nil, want %v", vfserror.ErrNotFound)
}
if err := m.Set("key", []byte("value")); err != nil {
t.Errorf("Set failed: %v", err)
w, err = m.Create("key", 5)
if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value"))
w.Close()
if _, err := m.Stat("key"); err != nil {
t.Errorf("Stat failed: %v", err)
@@ -54,10 +69,15 @@ func TestLimited(t *testing.T) {
m := New(10)
for i := 0; i < 11; i++ {
if err := m.Set(fmt.Sprintf("key%d", i), []byte("1")); err != nil && i < 10 {
t.Errorf("Set failed: %v", err)
w, err := m.Create(fmt.Sprintf("key%d", i), 1)
if err != nil && i < 10 {
t.Errorf("Create failed: %v", err)
} else if i == 10 && err == nil {
t.Errorf("Set succeeded: got nil, want %v", vfserror.ErrDiskFull)
t.Errorf("Create succeeded: got nil, want %v", vfserror.ErrDiskFull)
}
if i < 10 {
w.Write([]byte("1"))
w.Close()
}
}
}

View File

@@ -1,76 +0,0 @@
package sync
// import (
// "fmt"
// "s1d3sw1ped/SteamCache2/vfs"
// "sync"
// )
// // Ensure SyncFS implements VFS.
// var _ vfs.VFS = (*SyncFS)(nil)
// type SyncFS struct {
// vfs vfs.VFS
// mu sync.RWMutex
// }
// func New(vfs vfs.VFS) *SyncFS {
// return &SyncFS{
// vfs: vfs,
// mu: sync.RWMutex{},
// }
// }
// // Name returns the name of the file system.
// func (sfs *SyncFS) Name() string {
// return fmt.Sprintf("SyncFS(%s)", sfs.vfs.Name())
// }
// // Size returns the total size of all files in the file system.
// func (sfs *SyncFS) Size() int64 {
// sfs.mu.RLock()
// defer sfs.mu.RUnlock()
// return sfs.vfs.Size()
// }
// // Set sets the value of key as src.
// // Setting the same key multiple times, the last set call takes effect.
// func (sfs *SyncFS) Set(key string, src []byte) error {
// sfs.mu.Lock()
// defer sfs.mu.Unlock()
// return sfs.vfs.Set(key, src)
// }
// // Delete deletes the value of key.
// func (sfs *SyncFS) Delete(key string) error {
// sfs.mu.Lock()
// defer sfs.mu.Unlock()
// return sfs.vfs.Delete(key)
// }
// // Get gets the value of key to dst, and returns dst no matter whether or not there is an error.
// func (sfs *SyncFS) Get(key string) ([]byte, error) {
// sfs.mu.RLock()
// defer sfs.mu.RUnlock()
// return sfs.vfs.Get(key)
// }
// // Stat returns the FileInfo of key.
// func (sfs *SyncFS) Stat(key string) (*vfs.FileInfo, error) {
// sfs.mu.RLock()
// defer sfs.mu.RUnlock()
// return sfs.vfs.Stat(key)
// }
// // StatAll returns the FileInfo of all keys.
// func (sfs *SyncFS) StatAll() []*vfs.FileInfo {
// sfs.mu.RLock()
// defer sfs.mu.RUnlock()
// return sfs.vfs.StatAll()
// }

View File

@@ -1,5 +1,8 @@
// vfs/vfs.go
package vfs
import "io"
// VFS is the interface that wraps the basic methods of a virtual file system.
type VFS interface {
// Name returns the name of the file system.
@@ -8,15 +11,14 @@ type VFS interface {
// Size returns the total size of all files in the file system.
Size() int64
// Set sets the value of key as src.
// Setting the same key multiple times, the last set call takes effect.
Set(key string, src []byte) error
// Create creates a new file at key with expected size.
Create(key string, size int64) (io.WriteCloser, error)
// Delete deletes the value of key.
Delete(key string) error
// Get gets the value of key to dst, and returns dst no matter whether or not there is an error.
Get(key string) ([]byte, error)
// Open opens the file at key.
Open(key string) (io.ReadCloser, error)
// Stat returns the FileInfo of key.
Stat(key string) (*FileInfo, error)

View File

@@ -1,3 +1,4 @@
// vfs/vfserror/vfserror.go
package vfserror
import "errors"