Files
steamcache2/vfs/disk/disk.go
Justin Harms 1673e9554a
Some checks failed
PR Check / check-and-test (pull_request) Failing after 11m4s
Refactor VFS implementation to use Create and Open methods
- Updated disk_test.go to replace Set and Get with Create and Open methods for better clarity and functionality.
- Modified fileinfo.go to include package comment.
- Refactored gc.go to streamline garbage collection handling and removed unused statistics.
- Updated gc_test.go to comment out large random tests for future implementation.
- Enhanced memory.go to implement LRU caching and metrics for memory usage.
- Updated memory_test.go to replace Set and Get with Create and Open methods.
- Removed sync.go as it was redundant and not utilized.
- Updated vfs.go to reflect changes in the VFS interface, replacing Set and Get with Create and Open.
- Added package comments to vfserror.go for consistency.
2025-07-13 03:17:22 -05:00

468 lines
9.1 KiB
Go

// vfs/disk/disk.go
package disk
import (
"container/list"
"fmt"
"io"
"os"
"path/filepath"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"strings"
"sync"
"time"
"github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
diskCapacityBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "disk_cache_capacity_bytes",
Help: "Total capacity of the disk cache in bytes",
},
)
diskSizeBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "disk_cache_size_bytes",
Help: "Total size of the disk cache in bytes",
},
)
diskReadBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "disk_cache_read_bytes_total",
Help: "Total number of bytes read from the disk cache",
},
)
diskWriteBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "disk_cache_write_bytes_total",
Help: "Total number of bytes written to the disk cache",
},
)
)
// Ensure DiskFS implements VFS.
var _ vfs.VFS = (*DiskFS)(nil)
// DiskFS is a virtual file system that stores files on disk.
type DiskFS struct {
root string
info map[string]*vfs.FileInfo
capacity int64
size int64
mu sync.RWMutex
keyLocks sync.Map // map[string]*sync.RWMutex
sg sync.WaitGroup
LRU *lruList
}
// lruList for LRU eviction
type lruList struct {
list *list.List
elem map[string]*list.Element
}
func newLruList() *lruList {
return &lruList{
list: list.New(),
elem: make(map[string]*list.Element),
}
}
func (l *lruList) MoveToFront(key string) {
if e, ok := l.elem[key]; ok {
l.list.MoveToFront(e)
}
}
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
e := l.list.PushFront(fi)
l.elem[key] = e
return e
}
func (l *lruList) Remove(key string) {
if e, ok := l.elem[key]; ok {
l.list.Remove(e)
delete(l.elem, key)
}
}
func (l *lruList) Back() *vfs.FileInfo {
if e := l.list.Back(); e != nil {
return e.Value.(*vfs.FileInfo)
}
return nil
}
// New creates a new DiskFS.
func new(root string, capacity int64, skipinit bool) *DiskFS {
if capacity <= 0 {
panic("disk capacity must be greater than 0") // panic if the capacity is less than or equal to 0
}
if root == "" {
panic("disk root must not be empty") // panic if the root is empty
}
fi, err := os.Stat(root)
if err != nil {
if !os.IsNotExist(err) {
panic(err) // panic if the error is something other than not found
}
os.Mkdir(root, 0755) // create the root directory if it does not exist
fi, err = os.Stat(root) // re-stat to get the file info
if err != nil {
panic(err) // panic if the re-stat fails
}
}
if !fi.IsDir() {
panic("disk root must be a directory") // panic if the root is not a directory
}
dfs := &DiskFS{
root: root,
info: make(map[string]*vfs.FileInfo),
capacity: capacity,
mu: sync.RWMutex{},
keyLocks: sync.Map{},
sg: sync.WaitGroup{},
LRU: newLruList(),
}
os.MkdirAll(dfs.root, 0755)
diskCapacityBytes.Set(float64(dfs.capacity))
if !skipinit {
dfs.init()
diskSizeBytes.Set(float64(dfs.Size()))
}
return dfs
}
func New(root string, capacity int64) *DiskFS {
return new(root, capacity, false)
}
func NewSkipInit(root string, capacity int64) *DiskFS {
return new(root, capacity, true)
}
func (d *DiskFS) init() {
tstart := time.Now()
d.walk(d.root)
d.sg.Wait()
logger.Logger.Info().
Str("name", d.Name()).
Str("root", d.root).
Str("capacity", units.HumanSize(float64(d.capacity))).
Str("size", units.HumanSize(float64(d.Size()))).
Str("files", fmt.Sprint(len(d.info))).
Str("duration", time.Since(tstart).String()).
Msg("init")
}
func (d *DiskFS) walk(path string) {
d.sg.Add(1)
go func() {
defer d.sg.Done()
filepath.Walk(path, func(npath string, info os.FileInfo, err error) error {
if path == npath {
return nil
}
if err != nil {
return err
}
if info.IsDir() {
d.walk(npath)
return filepath.SkipDir
}
d.mu.Lock()
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
fi := vfs.NewFileInfoFromOS(info, k)
d.info[k] = fi
d.LRU.Add(k, fi)
d.size += info.Size()
d.mu.Unlock()
return nil
})
}()
}
func (d *DiskFS) Capacity() int64 {
return d.capacity
}
func (d *DiskFS) Name() string {
return "DiskFS"
}
func (d *DiskFS) Size() int64 {
d.mu.RLock()
defer d.mu.RUnlock()
return d.size
}
func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
mu, _ := d.keyLocks.LoadOrStore(key, &sync.RWMutex{})
return mu.(*sync.RWMutex)
}
func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
if key == "" {
return nil, vfserror.ErrInvalidKey
}
if key[0] == '/' {
return nil, vfserror.ErrInvalidKey
}
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
d.mu.RLock()
if d.capacity > 0 {
if d.size+size > d.capacity {
d.mu.RUnlock()
return nil, vfserror.ErrDiskFull
}
}
d.mu.RUnlock()
keyMu := d.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
// Check again after lock
d.mu.Lock()
if fi, exists := d.info[key]; exists {
d.size -= fi.Size()
d.LRU.Remove(key)
d.Delete(key)
}
d.mu.Unlock()
path := filepath.Join(d.root, key)
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, err
}
file, err := os.Create(path)
if err != nil {
return nil, err
}
return &diskWriteCloser{
Writer: file,
onClose: func(n int64) error {
fi, err := os.Stat(path)
if err != nil {
os.Remove(path)
return err
}
d.mu.Lock()
finfo := vfs.NewFileInfoFromOS(fi, key)
d.info[key] = finfo
d.LRU.Add(key, finfo)
d.size += n
d.mu.Unlock()
diskWriteBytes.Add(float64(n))
diskSizeBytes.Set(float64(d.Size()))
return nil
},
key: key,
file: file,
}, nil
}
type diskWriteCloser struct {
io.Writer
onClose func(int64) error
n int64
key string
file *os.File
}
func (wc *diskWriteCloser) Write(p []byte) (int, error) {
n, err := wc.Writer.Write(p)
wc.n += int64(n)
return n, err
}
func (wc *diskWriteCloser) Close() error {
err := wc.file.Close()
if e := wc.onClose(wc.n); e != nil {
os.Remove(wc.file.Name())
return e
}
return err
}
// Delete deletes the value of key.
func (d *DiskFS) Delete(key string) error {
if key == "" {
return vfserror.ErrInvalidKey
}
if key[0] == '/' {
return vfserror.ErrInvalidKey
}
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
if strings.Contains(key, "..") {
return vfserror.ErrInvalidKey
}
keyMu := d.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
d.mu.Lock()
fi, exists := d.info[key]
if !exists {
d.mu.Unlock()
return vfserror.ErrNotFound
}
d.size -= fi.Size()
d.LRU.Remove(key)
delete(d.info, key)
d.mu.Unlock()
path := filepath.Join(d.root, key)
if err := os.Remove(path); err != nil {
return err
}
diskSizeBytes.Set(float64(d.Size()))
return nil
}
// Open opens the file at key and returns it.
func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
if key == "" {
return nil, vfserror.ErrInvalidKey
}
if key[0] == '/' {
return nil, vfserror.ErrInvalidKey
}
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
keyMu := d.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
d.mu.Lock()
fi, exists := d.info[key]
if !exists {
d.mu.Unlock()
return nil, vfserror.ErrNotFound
}
fi.ATime = time.Now()
d.LRU.MoveToFront(key)
d.mu.Unlock()
path := filepath.Join(d.root, key)
file, err := os.Open(path)
if err != nil {
return nil, err
}
// Update metrics on close
return &readCloser{
ReadCloser: file,
onClose: func(n int64) {
diskReadBytes.Add(float64(n))
},
}, nil
}
type readCloser struct {
io.ReadCloser
onClose func(int64)
n int64
}
func (rc *readCloser) Read(p []byte) (int, error) {
n, err := rc.ReadCloser.Read(p)
rc.n += int64(n)
return n, err
}
func (rc *readCloser) Close() error {
err := rc.ReadCloser.Close()
rc.onClose(rc.n)
return err
}
// Stat returns the FileInfo of key. If key is not found in the cache, it will stat the file on disk. If the file is not found on disk, it will return vfs.ErrNotFound.
func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
if key == "" {
return nil, vfserror.ErrInvalidKey
}
if key[0] == '/' {
return nil, vfserror.ErrInvalidKey
}
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
keyMu := d.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
d.mu.RLock()
defer d.mu.RUnlock()
if fi, ok := d.info[key]; !ok {
return nil, vfserror.ErrNotFound
} else {
return fi, nil
}
}
func (d *DiskFS) StatAll() []*vfs.FileInfo {
d.mu.RLock()
defer d.mu.RUnlock()
// hard copy the file info to prevent modification of the original file info or the other way around
files := make([]*vfs.FileInfo, 0, len(d.info))
for _, v := range d.info {
fi := *v
files = append(files, &fi)
}
return files
}