Refactor VFS implementation to use Create and Open methods
Some checks failed
PR Check / check-and-test (pull_request) Failing after 11m4s
Some checks failed
PR Check / check-and-test (pull_request) Failing after 11m4s
- Updated disk_test.go to replace Set and Get with Create and Open methods for better clarity and functionality. - Modified fileinfo.go to include package comment. - Refactored gc.go to streamline garbage collection handling and removed unused statistics. - Updated gc_test.go to comment out large random tests for future implementation. - Enhanced memory.go to implement LRU caching and metrics for memory usage. - Updated memory_test.go to replace Set and Get with Create and Open methods. - Removed sync.go as it was redundant and not utilized. - Updated vfs.go to reflect changes in the VFS interface, replacing Set and Get with Create and Open. - Added package comments to vfserror.go for consistency.
This commit is contained in:
314
vfs/disk/disk.go
314
vfs/disk/disk.go
@@ -1,7 +1,10 @@
|
||||
// vfs/disk/disk.go
|
||||
package disk
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
@@ -12,6 +15,38 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
diskCapacityBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "disk_cache_capacity_bytes",
|
||||
Help: "Total capacity of the disk cache in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
diskSizeBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "disk_cache_size_bytes",
|
||||
Help: "Total size of the disk cache in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
diskReadBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "disk_cache_read_bytes_total",
|
||||
Help: "Total number of bytes read from the disk cache",
|
||||
},
|
||||
)
|
||||
|
||||
diskWriteBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "disk_cache_write_bytes_total",
|
||||
Help: "Total number of bytes written to the disk cache",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
// Ensure DiskFS implements VFS.
|
||||
@@ -23,10 +58,50 @@ type DiskFS struct {
|
||||
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
mu sync.Mutex
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks sync.Map // map[string]*sync.RWMutex
|
||||
sg sync.WaitGroup
|
||||
LRU *lruList
|
||||
}
|
||||
|
||||
bytePool sync.Pool // Pool for []byte slices
|
||||
// lruList for LRU eviction
|
||||
type lruList struct {
|
||||
list *list.List
|
||||
elem map[string]*list.Element
|
||||
}
|
||||
|
||||
func newLruList() *lruList {
|
||||
return &lruList{
|
||||
list: list.New(),
|
||||
elem: make(map[string]*list.Element),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) MoveToFront(key string) {
|
||||
if e, ok := l.elem[key]; ok {
|
||||
l.list.MoveToFront(e)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
|
||||
e := l.list.PushFront(fi)
|
||||
l.elem[key] = e
|
||||
return e
|
||||
}
|
||||
|
||||
func (l *lruList) Remove(key string) {
|
||||
if e, ok := l.elem[key]; ok {
|
||||
l.list.Remove(e)
|
||||
delete(l.elem, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Back() *vfs.FileInfo {
|
||||
if e := l.list.Back(); e != nil {
|
||||
return e.Value.(*vfs.FileInfo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// New creates a new DiskFS.
|
||||
@@ -58,17 +133,19 @@ func new(root string, capacity int64, skipinit bool) *DiskFS {
|
||||
root: root,
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
mu: sync.Mutex{},
|
||||
mu: sync.RWMutex{},
|
||||
keyLocks: sync.Map{},
|
||||
sg: sync.WaitGroup{},
|
||||
bytePool: sync.Pool{
|
||||
New: func() interface{} { return make([]byte, 0) }, // Initial capacity for pooled slices is 0, will grow as needed
|
||||
},
|
||||
LRU: newLruList(),
|
||||
}
|
||||
|
||||
os.MkdirAll(dfs.root, 0755)
|
||||
|
||||
diskCapacityBytes.Set(float64(dfs.capacity))
|
||||
|
||||
if !skipinit {
|
||||
dfs.init()
|
||||
diskSizeBytes.Set(float64(dfs.Size()))
|
||||
}
|
||||
|
||||
return dfs
|
||||
@@ -118,7 +195,10 @@ func (d *DiskFS) walk(path string) {
|
||||
|
||||
d.mu.Lock()
|
||||
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
|
||||
d.info[k] = vfs.NewFileInfoFromOS(info, k)
|
||||
fi := vfs.NewFileInfoFromOS(info, k)
|
||||
d.info[k] = fi
|
||||
d.LRU.Add(k, fi)
|
||||
d.size += info.Size()
|
||||
d.mu.Unlock()
|
||||
|
||||
return nil
|
||||
@@ -135,49 +215,110 @@ func (d *DiskFS) Name() string {
|
||||
}
|
||||
|
||||
func (d *DiskFS) Size() int64 {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
var size int64
|
||||
for _, v := range d.info {
|
||||
size += v.Size()
|
||||
}
|
||||
return size
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
return d.size
|
||||
}
|
||||
|
||||
func (d *DiskFS) Set(key string, src []byte) error {
|
||||
func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
|
||||
mu, _ := d.keyLocks.LoadOrStore(key, &sync.RWMutex{})
|
||||
return mu.(*sync.RWMutex)
|
||||
}
|
||||
|
||||
func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
if key == "" {
|
||||
return vfserror.ErrInvalidKey
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return vfserror.ErrInvalidKey
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
d.mu.RLock()
|
||||
if d.capacity > 0 {
|
||||
if size := d.Size() + int64(len(src)); size > d.capacity {
|
||||
return vfserror.ErrDiskFull
|
||||
if d.size+size > d.capacity {
|
||||
d.mu.RUnlock()
|
||||
return nil, vfserror.ErrDiskFull
|
||||
}
|
||||
}
|
||||
d.mu.RUnlock()
|
||||
|
||||
if _, err := d.Stat(key); err == nil {
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
// Check again after lock
|
||||
d.mu.Lock()
|
||||
if fi, exists := d.info[key]; exists {
|
||||
d.size -= fi.Size()
|
||||
d.LRU.Remove(key)
|
||||
d.Delete(key)
|
||||
}
|
||||
d.mu.Unlock()
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
os.MkdirAll(d.root+"/"+filepath.Dir(key), 0755)
|
||||
if err := os.WriteFile(d.root+"/"+key, src, 0644); err != nil {
|
||||
return err
|
||||
path := filepath.Join(d.root, key)
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fi, err := os.Stat(d.root + "/" + key)
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d.info[key] = vfs.NewFileInfoFromOS(fi, key)
|
||||
return &diskWriteCloser{
|
||||
Writer: file,
|
||||
onClose: func(n int64) error {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
os.Remove(path)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
d.mu.Lock()
|
||||
finfo := vfs.NewFileInfoFromOS(fi, key)
|
||||
d.info[key] = finfo
|
||||
d.LRU.Add(key, finfo)
|
||||
d.size += n
|
||||
d.mu.Unlock()
|
||||
|
||||
diskWriteBytes.Add(float64(n))
|
||||
diskSizeBytes.Set(float64(d.Size()))
|
||||
|
||||
return nil
|
||||
},
|
||||
key: key,
|
||||
file: file,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type diskWriteCloser struct {
|
||||
io.Writer
|
||||
onClose func(int64) error
|
||||
n int64
|
||||
key string
|
||||
file *os.File
|
||||
}
|
||||
|
||||
func (wc *diskWriteCloser) Write(p []byte) (int, error) {
|
||||
n, err := wc.Writer.Write(p)
|
||||
wc.n += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (wc *diskWriteCloser) Close() error {
|
||||
err := wc.file.Close()
|
||||
if e := wc.onClose(wc.n); e != nil {
|
||||
os.Remove(wc.file.Name())
|
||||
return e
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete deletes the value of key.
|
||||
@@ -189,24 +330,39 @@ func (d *DiskFS) Delete(key string) error {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
_, err := d.Stat(key)
|
||||
if err != nil {
|
||||
return err
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
if strings.Contains(key, "..") {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
fi, exists := d.info[key]
|
||||
if !exists {
|
||||
d.mu.Unlock()
|
||||
return vfserror.ErrNotFound
|
||||
}
|
||||
d.size -= fi.Size()
|
||||
d.LRU.Remove(key)
|
||||
delete(d.info, key)
|
||||
if err := os.Remove(filepath.Join(d.root, key)); err != nil {
|
||||
d.mu.Unlock()
|
||||
|
||||
path := filepath.Join(d.root, key)
|
||||
if err := os.Remove(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diskSizeBytes.Set(float64(d.Size()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get gets the value of key and returns it.
|
||||
func (d *DiskFS) Get(key string) ([]byte, error) {
|
||||
// Open opens the file at key and returns it.
|
||||
func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
@@ -214,29 +370,57 @@ func (d *DiskFS) Get(key string) ([]byte, error) {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
_, err := d.Stat(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
fi, exists := d.info[key]
|
||||
if !exists {
|
||||
d.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
fi.ATime = time.Now()
|
||||
d.LRU.MoveToFront(key)
|
||||
d.mu.Unlock()
|
||||
|
||||
data, err := os.ReadFile(filepath.Join(d.root, key))
|
||||
path := filepath.Join(d.root, key)
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use pooled slice for return if possible, but since ReadFile allocates new, copy to pool if beneficial
|
||||
dst := d.bytePool.Get().([]byte)
|
||||
if cap(dst) < len(data) {
|
||||
dst = make([]byte, len(data)) // create a new slice if the pool slice is too small
|
||||
} else {
|
||||
dst = dst[:len(data)] // reuse the pool slice, but resize it to fit
|
||||
}
|
||||
dst = dst[:len(data)]
|
||||
copy(dst, data)
|
||||
return dst, nil
|
||||
// Update metrics on close
|
||||
return &readCloser{
|
||||
ReadCloser: file,
|
||||
onClose: func(n int64) {
|
||||
diskReadBytes.Add(float64(n))
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type readCloser struct {
|
||||
io.ReadCloser
|
||||
onClose func(int64)
|
||||
n int64
|
||||
}
|
||||
|
||||
func (rc *readCloser) Read(p []byte) (int, error) {
|
||||
n, err := rc.ReadCloser.Read(p)
|
||||
rc.n += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (rc *readCloser) Close() error {
|
||||
err := rc.ReadCloser.Close()
|
||||
rc.onClose(rc.n)
|
||||
return err
|
||||
}
|
||||
|
||||
// Stat returns the FileInfo of key. If key is not found in the cache, it will stat the file on disk. If the file is not found on disk, it will return vfs.ErrNotFound.
|
||||
@@ -248,8 +432,18 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
if fi, ok := d.info[key]; !ok {
|
||||
return nil, vfserror.ErrNotFound
|
||||
@@ -258,13 +452,13 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *DiskFS) StatAll() []*vfs.FileInfo {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
func (d *DiskFS) StatAll() []*vfs.FileInfo {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
// hard copy the file info to prevent modification of the original file info or the other way around
|
||||
files := make([]*vfs.FileInfo, 0, len(m.info))
|
||||
for _, v := range m.info {
|
||||
files := make([]*vfs.FileInfo, 0, len(d.info))
|
||||
for _, v := range d.info {
|
||||
fi := *v
|
||||
files = append(files, &fi)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user