Some checks failed
PR Check / check-and-test (pull_request) Failing after 2m11s
- Added Prometheus client library for metrics collection. - Refactored garbage collection strategy from random deletion to LRU (Least Recently Used) deletion. - Introduced per-key locking in cache to prevent race conditions. - Enhanced logging with structured log messages for cache hits and misses. - Implemented a retry mechanism for upstream requests with exponential backoff. - Updated Go modules and indirect dependencies for better compatibility and performance. - Removed unused sync filesystem implementation. - Added version initialization to ensure a default version string.
274 lines
5.5 KiB
Go
274 lines
5.5 KiB
Go
package disk
|
|
|
|
import (
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
|
"s1d3sw1ped/SteamCache2/vfs"
|
|
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/docker/go-units"
|
|
)
|
|
|
|
// Ensure DiskFS implements VFS.
|
|
var _ vfs.VFS = (*DiskFS)(nil)
|
|
|
|
// DiskFS is a virtual file system that stores files on disk.
|
|
type DiskFS struct {
|
|
root string
|
|
|
|
info map[string]*vfs.FileInfo
|
|
capacity int64
|
|
mu sync.Mutex
|
|
sg sync.WaitGroup
|
|
|
|
bytePool sync.Pool // Pool for []byte slices
|
|
}
|
|
|
|
// New creates a new DiskFS.
|
|
func new(root string, capacity int64, skipinit bool) *DiskFS {
|
|
if capacity <= 0 {
|
|
panic("disk capacity must be greater than 0") // panic if the capacity is less than or equal to 0
|
|
}
|
|
|
|
if root == "" {
|
|
panic("disk root must not be empty") // panic if the root is empty
|
|
}
|
|
|
|
fi, err := os.Stat(root)
|
|
if err != nil {
|
|
if !os.IsNotExist(err) {
|
|
panic(err) // panic if the error is something other than not found
|
|
}
|
|
os.Mkdir(root, 0755) // create the root directory if it does not exist
|
|
fi, err = os.Stat(root) // re-stat to get the file info
|
|
if err != nil {
|
|
panic(err) // panic if the re-stat fails
|
|
}
|
|
}
|
|
if !fi.IsDir() {
|
|
panic("disk root must be a directory") // panic if the root is not a directory
|
|
}
|
|
|
|
dfs := &DiskFS{
|
|
root: root,
|
|
info: make(map[string]*vfs.FileInfo),
|
|
capacity: capacity,
|
|
mu: sync.Mutex{},
|
|
sg: sync.WaitGroup{},
|
|
bytePool: sync.Pool{
|
|
New: func() interface{} { return make([]byte, 0) }, // Initial capacity for pooled slices is 0, will grow as needed
|
|
},
|
|
}
|
|
|
|
os.MkdirAll(dfs.root, 0755)
|
|
|
|
if !skipinit {
|
|
dfs.init()
|
|
}
|
|
|
|
return dfs
|
|
}
|
|
|
|
func New(root string, capacity int64) *DiskFS {
|
|
return new(root, capacity, false)
|
|
}
|
|
|
|
func NewSkipInit(root string, capacity int64) *DiskFS {
|
|
return new(root, capacity, true)
|
|
}
|
|
|
|
func (d *DiskFS) init() {
|
|
tstart := time.Now()
|
|
|
|
d.walk(d.root)
|
|
d.sg.Wait()
|
|
|
|
logger.Logger.Info().
|
|
Str("name", d.Name()).
|
|
Str("root", d.root).
|
|
Str("capacity", units.HumanSize(float64(d.capacity))).
|
|
Str("size", units.HumanSize(float64(d.Size()))).
|
|
Str("files", fmt.Sprint(len(d.info))).
|
|
Str("duration", time.Since(tstart).String()).
|
|
Msg("init")
|
|
}
|
|
|
|
func (d *DiskFS) walk(path string) {
|
|
d.sg.Add(1)
|
|
go func() {
|
|
defer d.sg.Done()
|
|
filepath.Walk(path, func(npath string, info os.FileInfo, err error) error {
|
|
if path == npath {
|
|
return nil
|
|
}
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if info.IsDir() {
|
|
d.walk(npath)
|
|
return filepath.SkipDir
|
|
}
|
|
|
|
d.mu.Lock()
|
|
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
|
|
d.info[k] = vfs.NewFileInfoFromOS(info, k)
|
|
d.mu.Unlock()
|
|
|
|
return nil
|
|
})
|
|
}()
|
|
}
|
|
|
|
func (d *DiskFS) Capacity() int64 {
|
|
return d.capacity
|
|
}
|
|
|
|
func (d *DiskFS) Name() string {
|
|
return "DiskFS"
|
|
}
|
|
|
|
func (d *DiskFS) Size() int64 {
|
|
d.mu.Lock()
|
|
defer d.mu.Unlock()
|
|
|
|
var size int64
|
|
for _, v := range d.info {
|
|
size += v.Size()
|
|
}
|
|
return size
|
|
}
|
|
|
|
func (d *DiskFS) Set(key string, src []byte) error {
|
|
if key == "" {
|
|
return vfserror.ErrInvalidKey
|
|
}
|
|
if key[0] == '/' {
|
|
return vfserror.ErrInvalidKey
|
|
}
|
|
|
|
if d.capacity > 0 {
|
|
if size := d.Size() + int64(len(src)); size > d.capacity {
|
|
return vfserror.ErrDiskFull
|
|
}
|
|
}
|
|
|
|
if _, err := d.Stat(key); err == nil {
|
|
d.Delete(key)
|
|
}
|
|
|
|
d.mu.Lock()
|
|
defer d.mu.Unlock()
|
|
os.MkdirAll(d.root+"/"+filepath.Dir(key), 0755)
|
|
if err := os.WriteFile(d.root+"/"+key, src, 0644); err != nil {
|
|
return err
|
|
}
|
|
|
|
fi, err := os.Stat(d.root + "/" + key)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
|
|
d.info[key] = vfs.NewFileInfoFromOS(fi, key)
|
|
|
|
return nil
|
|
}
|
|
|
|
// Delete deletes the value of key.
|
|
func (d *DiskFS) Delete(key string) error {
|
|
if key == "" {
|
|
return vfserror.ErrInvalidKey
|
|
}
|
|
if key[0] == '/' {
|
|
return vfserror.ErrInvalidKey
|
|
}
|
|
|
|
_, err := d.Stat(key)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
d.mu.Lock()
|
|
defer d.mu.Unlock()
|
|
|
|
delete(d.info, key)
|
|
if err := os.Remove(filepath.Join(d.root, key)); err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Get gets the value of key and returns it.
|
|
func (d *DiskFS) Get(key string) ([]byte, error) {
|
|
if key == "" {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
if key[0] == '/' {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
_, err := d.Stat(key)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
d.mu.Lock()
|
|
defer d.mu.Unlock()
|
|
|
|
data, err := os.ReadFile(filepath.Join(d.root, key))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Use pooled slice for return if possible, but since ReadFile allocates new, copy to pool if beneficial
|
|
dst := d.bytePool.Get().([]byte)
|
|
if cap(dst) < len(data) {
|
|
dst = make([]byte, len(data)) // create a new slice if the pool slice is too small
|
|
} else {
|
|
dst = dst[:len(data)] // reuse the pool slice, but resize it to fit
|
|
}
|
|
dst = dst[:len(data)]
|
|
copy(dst, data)
|
|
return dst, nil
|
|
}
|
|
|
|
// Stat returns the FileInfo of key. If key is not found in the cache, it will stat the file on disk. If the file is not found on disk, it will return vfs.ErrNotFound.
|
|
func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
|
if key == "" {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
if key[0] == '/' {
|
|
return nil, vfserror.ErrInvalidKey
|
|
}
|
|
|
|
d.mu.Lock()
|
|
defer d.mu.Unlock()
|
|
|
|
if fi, ok := d.info[key]; !ok {
|
|
return nil, vfserror.ErrNotFound
|
|
} else {
|
|
return fi, nil
|
|
}
|
|
}
|
|
|
|
func (m *DiskFS) StatAll() []*vfs.FileInfo {
|
|
m.mu.Lock()
|
|
defer m.mu.Unlock()
|
|
|
|
// hard copy the file info to prevent modification of the original file info or the other way around
|
|
files := make([]*vfs.FileInfo, 0, len(m.info))
|
|
for _, v := range m.info {
|
|
fi := *v
|
|
files = append(files, &fi)
|
|
}
|
|
|
|
return files
|
|
}
|