Enhance FileInfo structure and DiskFS functionality
All checks were successful
Release Tag / release (push) Successful in 12s

- Added CTime (creation time) and AccessCount fields to FileInfo struct for better file metadata tracking.
- Updated NewFileInfo and NewFileInfoFromOS functions to initialize new fields.
- Enhanced DiskFS to maintain access counts and file metadata, including flushing to JSON files.
- Modified Open and Create methods to increment access counts and set creation times appropriately.
- Updated garbage collection logic to utilize real access counts for files.
This commit is contained in:
2025-07-19 05:29:18 -05:00
parent 56bb1ddc12
commit 30e804709f
4 changed files with 90 additions and 39 deletions

View File

@@ -3,8 +3,10 @@ package disk
import ( import (
"container/list" "container/list"
"encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"s1d3sw1ped/SteamCache2/steamcache/logger" "s1d3sw1ped/SteamCache2/steamcache/logger"
@@ -56,12 +58,17 @@ var _ vfs.VFS = (*DiskFS)(nil)
type DiskFS struct { type DiskFS struct {
root string root string
info map[string]*vfs.FileInfo info map[string]*vfs.FileInfo
capacity int64 capacity int64
size int64 size int64
mu sync.RWMutex mu sync.RWMutex
keyLocks sync.Map // map[string]*sync.RWMutex keyLocks sync.Map // map[string]*sync.RWMutex
LRU *lruList LRU *lruList
accessCounts map[string]int64 // key: filename, value: access count
fileMeta map[string]struct {
AccessCount int64
CTime int64
} // key: filename
} }
// lruList for LRU eviction // lruList for LRU eviction
@@ -129,12 +136,17 @@ func new(root string, capacity int64, skipinit bool) *DiskFS {
} }
dfs := &DiskFS{ dfs := &DiskFS{
root: root, root: root,
info: make(map[string]*vfs.FileInfo), info: make(map[string]*vfs.FileInfo),
capacity: capacity, capacity: capacity,
mu: sync.RWMutex{}, mu: sync.RWMutex{},
keyLocks: sync.Map{}, keyLocks: sync.Map{},
LRU: newLruList(), LRU: newLruList(),
accessCounts: make(map[string]int64),
fileMeta: make(map[string]struct {
AccessCount int64
CTime int64
}),
} }
os.MkdirAll(dfs.root, 0755) os.MkdirAll(dfs.root, 0755)
@@ -282,6 +294,15 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
diskWriteBytes.Add(float64(n)) diskWriteBytes.Add(float64(n))
diskSizeBytes.Set(float64(d.Size())) diskSizeBytes.Set(float64(d.Size()))
// On new file, set access count to 1
finfo.AccessCount = 1
finfo.CTime = time.Now()
d.fileMeta[key] = struct {
AccessCount int64
CTime int64
}{1, finfo.CTime.Unix()}
flushFileMeta(d.root, d.fileMeta)
return nil return nil
}, },
key: key, key: key,
@@ -351,6 +372,10 @@ func (d *DiskFS) Delete(key string) error {
diskSizeBytes.Set(float64(d.Size())) diskSizeBytes.Set(float64(d.Size()))
delete(d.accessCounts, key)
delete(d.fileMeta, key)
flushFileMeta(d.root, d.fileMeta)
return nil return nil
} }
@@ -381,9 +406,17 @@ func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
return nil, vfserror.ErrNotFound return nil, vfserror.ErrNotFound
} }
fi.ATime = time.Now() fi.ATime = time.Now()
fi.AccessCount++ // Increment access count
d.LRU.MoveToFront(key) d.LRU.MoveToFront(key)
d.mu.Unlock() d.mu.Unlock()
fi.AccessCount++
d.fileMeta[key] = struct {
AccessCount int64
CTime int64
}{fi.AccessCount, fi.CTime.Unix()}
flushFileMeta(d.root, d.fileMeta)
path := filepath.Join(d.root, key) path := filepath.Join(d.root, key)
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
file, err := os.Open(path) file, err := os.Open(path)
@@ -461,3 +494,22 @@ func (d *DiskFS) StatAll() []*vfs.FileInfo {
return files return files
} }
func flushAccessCounts(root string, counts map[string]int64) {
path := filepath.Join(root, "access_counts.json")
data, _ := json.MarshalIndent(counts, "", " ")
_ = ioutil.WriteFile(path, data, 0644)
}
func flushFileMeta(root string, meta map[string]struct {
AccessCount int64
CTime int64
}) {
path := filepath.Join(root, "filemeta.json")
if len(meta) == 0 {
_ = os.Remove(path)
return
}
data, _ := json.MarshalIndent(meta, "", " ")
_ = ioutil.WriteFile(path, data, 0644)
}

View File

@@ -7,27 +7,35 @@ import (
) )
type FileInfo struct { type FileInfo struct {
name string name string
size int64 size int64
MTime time.Time MTime time.Time
ATime time.Time ATime time.Time
CTime time.Time // Creation time
AccessCount int64
} }
func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo { func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo {
now := time.Now()
return &FileInfo{ return &FileInfo{
name: key, name: key,
size: size, size: size,
MTime: modTime, MTime: modTime,
ATime: time.Now(), ATime: now,
CTime: now,
AccessCount: 0,
} }
} }
func NewFileInfoFromOS(f os.FileInfo, key string) *FileInfo { func NewFileInfoFromOS(f os.FileInfo, key string) *FileInfo {
now := time.Now()
return &FileInfo{ return &FileInfo{
name: key, name: key,
size: f.Size(), size: f.Size(),
MTime: f.ModTime(), MTime: f.ModTime(),
ATime: time.Now(), ATime: now,
CTime: now, // Will be overwritten if loaded from disk
AccessCount: 0,
} }
} }

View File

@@ -308,25 +308,23 @@ func getAllFiles(vfss vfs.VFS) []fileInfoWithMetadata {
case *disk.DiskFS: case *disk.DiskFS:
allFiles := fs.StatAll() allFiles := fs.StatAll()
for _, fi := range allFiles { for _, fi := range allFiles {
// For disk, we can't easily track access count, so we'll use 1 as default
files = append(files, fileInfoWithMetadata{ files = append(files, fileInfoWithMetadata{
Name: fi.Name(), Name: fi.Name(),
Size: fi.Size(), Size: fi.Size(),
MTime: fi.ModTime(), MTime: fi.ModTime(),
ATime: fi.AccessTime(), ATime: fi.AccessTime(),
AccessCount: 1, AccessCount: fi.AccessCount, // Use real access count
}) })
} }
case *memory.MemoryFS: case *memory.MemoryFS:
allFiles := fs.StatAll() allFiles := fs.StatAll()
for _, fi := range allFiles { for _, fi := range allFiles {
// For memory, we can't easily track access count, so we'll use 1 as default
files = append(files, fileInfoWithMetadata{ files = append(files, fileInfoWithMetadata{
Name: fi.Name(), Name: fi.Name(),
Size: fi.Size(), Size: fi.Size(),
MTime: fi.ModTime(), MTime: fi.ModTime(),
ATime: fi.AccessTime(), ATime: fi.AccessTime(),
AccessCount: 1, AccessCount: fi.AccessCount, // Use real access count
}) })
} }
} }

View File

@@ -149,15 +149,6 @@ func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
} }
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) { func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
m.mu.RLock()
if m.capacity > 0 {
if m.size+size > m.capacity {
m.mu.RUnlock()
return nil, vfserror.ErrDiskFull
}
}
m.mu.RUnlock()
keyMu := m.getKeyLock(key) keyMu := m.getKeyLock(key)
keyMu.Lock() keyMu.Lock()
defer keyMu.Unlock() defer keyMu.Unlock()
@@ -169,18 +160,19 @@ func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
onClose: func() error { onClose: func() error {
data := buf.Bytes() data := buf.Bytes()
m.mu.Lock() m.mu.Lock()
defer m.mu.Unlock()
if f, exists := m.files[key]; exists { if f, exists := m.files[key]; exists {
m.size -= int64(len(f.data)) m.size -= int64(len(f.data))
m.LRU.Remove(key) m.LRU.Remove(key)
} }
fi := vfs.NewFileInfo(key, int64(len(data)), time.Now()) fi := vfs.NewFileInfo(key, int64(len(data)), time.Now())
fi.CTime = time.Now() // Set creation time
m.files[key] = &file{ m.files[key] = &file{
fileinfo: fi,
data: data, data: data,
fileinfo: fi,
} }
m.LRU.Add(key, fi) m.LRU.Add(key, fi)
m.size += int64(len(data)) m.size += int64(len(data))
m.mu.Unlock()
memoryWriteBytes.Add(float64(len(data))) memoryWriteBytes.Add(float64(len(data)))
memorySizeBytes.Set(float64(m.Size())) memorySizeBytes.Set(float64(m.Size()))
@@ -232,6 +224,7 @@ func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
return nil, vfserror.ErrNotFound return nil, vfserror.ErrNotFound
} }
f.fileinfo.ATime = time.Now() f.fileinfo.ATime = time.Now()
f.fileinfo.AccessCount++ // Increment access count
m.LRU.MoveToFront(key) m.LRU.MoveToFront(key)
dataCopy := make([]byte, len(f.data)) dataCopy := make([]byte, len(f.data))
copy(dataCopy, f.data) copy(dataCopy, f.data)