3 Commits

Author SHA1 Message Date
30e804709f Enhance FileInfo structure and DiskFS functionality
All checks were successful
Release Tag / release (push) Successful in 12s
- Added CTime (creation time) and AccessCount fields to FileInfo struct for better file metadata tracking.
- Updated NewFileInfo and NewFileInfoFromOS functions to initialize new fields.
- Enhanced DiskFS to maintain access counts and file metadata, including flushing to JSON files.
- Modified Open and Create methods to increment access counts and set creation times appropriately.
- Updated garbage collection logic to utilize real access counts for files.
2025-07-19 05:29:18 -05:00
56bb1ddc12 Add hop-by-hop header handling in ServeHTTP method
All checks were successful
Release Tag / release (push) Successful in 12s
- Introduced a map for hop-by-hop headers to be removed from responses.
- Enhanced cache serving logic to read and filter HTTP responses, ensuring only relevant headers are forwarded.
- Updated cache writing to handle full HTTP responses, improving cache integrity and performance.
2025-07-19 05:07:36 -05:00
9c65cdb156 Fix HTTP status code for root path in ServeHTTP method to ensure correct response for upstream verification
All checks were successful
Release Tag / release (push) Successful in 12s
2025-07-19 04:42:20 -05:00
5 changed files with 157 additions and 63 deletions

View File

@@ -2,6 +2,7 @@
package steamcache package steamcache
import ( import (
"bufio"
"context" "context"
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
@@ -24,6 +25,8 @@ import (
"sync" "sync"
"time" "time"
"bytes"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
@@ -146,6 +149,19 @@ func verifyResponseHash(resp *http.Response, bodyData []byte, expectedHash strin
return strings.EqualFold(actualHash, expectedHash) return strings.EqualFold(actualHash, expectedHash)
} }
var hopByHopHeaders = map[string]struct{}{
"Connection": {},
"Keep-Alive": {},
"Proxy-Authenticate": {},
"Proxy-Authorization": {},
"TE": {},
"Trailer": {},
"Transfer-Encoding": {},
"Upgrade": {},
"Date": {},
"Server": {},
}
type SteamCache struct { type SteamCache struct {
address string address string
upstream string upstream string
@@ -323,7 +339,7 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
if r.URL.Path == "/" { if r.URL.Path == "/" {
w.WriteHeader(http.StatusFound) // this is used by steamcache2's upstream verification at startup w.WriteHeader(http.StatusOK) // this is used by steamcache2's upstream verification at startup
return return
} }
@@ -357,25 +373,41 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
reader, err := sc.vfs.Open(cacheKey) cachePath := cacheKey // You may want to add a .http or .cache extension for clarity
// Try to serve from cache
file, err := sc.vfs.Open(cachePath)
if err == nil { if err == nil {
defer reader.Close() defer file.Close()
w.Header().Add("X-LanCache-Status", "HIT") buf := bufio.NewReader(file)
resp, err := http.ReadResponse(buf, nil)
io.Copy(w, reader) if err == nil {
// Remove hop-by-hop and server-specific headers
logger.Logger.Info(). for k, vv := range resp.Header {
Str("key", cacheKey). if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
Str("host", r.Host). continue
Str("status", "HIT"). }
Dur("duration", time.Since(tstart)). for _, v := range vv {
Msg("request") w.Header().Add(k, v)
}
requestsTotal.WithLabelValues(r.Method, "200").Inc() }
cacheStatusTotal.WithLabelValues("HIT").Inc() // Add our own headers
responseTime.WithLabelValues("HIT").Observe(time.Since(tstart).Seconds()) w.Header().Set("X-LanCache-Status", "HIT")
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
return w.WriteHeader(resp.StatusCode)
io.Copy(w, resp.Body)
resp.Body.Close()
logger.Logger.Info().
Str("key", cacheKey).
Str("host", r.Host).
Str("status", "HIT").
Dur("duration", time.Since(tstart)).
Msg("request")
requestsTotal.WithLabelValues(r.Method, "200").Inc()
cacheStatusTotal.WithLabelValues("HIT").Inc()
responseTime.WithLabelValues("HIT").Observe(time.Since(tstart).Seconds())
return
}
} }
var req *http.Request var req *http.Request
@@ -448,8 +480,6 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
defer resp.Body.Close() defer resp.Body.Close()
size := resp.ContentLength
// Read the entire response body into memory for hash verification // Read the entire response body into memory for hash verification
bodyData, err := io.ReadAll(resp.Body) bodyData, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
@@ -500,15 +530,28 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
// Write to response (always serve the file) // Write to response (always serve the file)
w.Header().Add("X-LanCache-Status", "MISS") // Remove hop-by-hop and server-specific headers
for k, vv := range resp.Header {
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
continue
}
for _, v := range vv {
w.Header().Add(k, v)
}
}
// Add our own headers
w.Header().Set("X-LanCache-Status", "MISS")
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
w.Write(bodyData) w.Write(bodyData)
// Only cache the file if hash verification passed (or no hash was present) // Only cache the file if hash verification passed (or no hash was present)
if hashVerified { if hashVerified {
writer, _ := sc.vfs.Create(cacheKey, size) writer, _ := sc.vfs.Create(cachePath, int64(0)) // size is not known in advance
if writer != nil { if writer != nil {
defer writer.Close() defer writer.Close()
writer.Write(bodyData) // Write the full HTTP response to cache
resp.Body = io.NopCloser(bytes.NewReader(bodyData)) // Reset body for writing
resp.Write(writer)
} }
} else { } else {
logger.Logger.Warn(). logger.Logger.Warn().

View File

@@ -3,8 +3,10 @@ package disk
import ( import (
"container/list" "container/list"
"encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"s1d3sw1ped/SteamCache2/steamcache/logger" "s1d3sw1ped/SteamCache2/steamcache/logger"
@@ -56,12 +58,17 @@ var _ vfs.VFS = (*DiskFS)(nil)
type DiskFS struct { type DiskFS struct {
root string root string
info map[string]*vfs.FileInfo info map[string]*vfs.FileInfo
capacity int64 capacity int64
size int64 size int64
mu sync.RWMutex mu sync.RWMutex
keyLocks sync.Map // map[string]*sync.RWMutex keyLocks sync.Map // map[string]*sync.RWMutex
LRU *lruList LRU *lruList
accessCounts map[string]int64 // key: filename, value: access count
fileMeta map[string]struct {
AccessCount int64
CTime int64
} // key: filename
} }
// lruList for LRU eviction // lruList for LRU eviction
@@ -129,12 +136,17 @@ func new(root string, capacity int64, skipinit bool) *DiskFS {
} }
dfs := &DiskFS{ dfs := &DiskFS{
root: root, root: root,
info: make(map[string]*vfs.FileInfo), info: make(map[string]*vfs.FileInfo),
capacity: capacity, capacity: capacity,
mu: sync.RWMutex{}, mu: sync.RWMutex{},
keyLocks: sync.Map{}, keyLocks: sync.Map{},
LRU: newLruList(), LRU: newLruList(),
accessCounts: make(map[string]int64),
fileMeta: make(map[string]struct {
AccessCount int64
CTime int64
}),
} }
os.MkdirAll(dfs.root, 0755) os.MkdirAll(dfs.root, 0755)
@@ -282,6 +294,15 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
diskWriteBytes.Add(float64(n)) diskWriteBytes.Add(float64(n))
diskSizeBytes.Set(float64(d.Size())) diskSizeBytes.Set(float64(d.Size()))
// On new file, set access count to 1
finfo.AccessCount = 1
finfo.CTime = time.Now()
d.fileMeta[key] = struct {
AccessCount int64
CTime int64
}{1, finfo.CTime.Unix()}
flushFileMeta(d.root, d.fileMeta)
return nil return nil
}, },
key: key, key: key,
@@ -351,6 +372,10 @@ func (d *DiskFS) Delete(key string) error {
diskSizeBytes.Set(float64(d.Size())) diskSizeBytes.Set(float64(d.Size()))
delete(d.accessCounts, key)
delete(d.fileMeta, key)
flushFileMeta(d.root, d.fileMeta)
return nil return nil
} }
@@ -381,9 +406,17 @@ func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
return nil, vfserror.ErrNotFound return nil, vfserror.ErrNotFound
} }
fi.ATime = time.Now() fi.ATime = time.Now()
fi.AccessCount++ // Increment access count
d.LRU.MoveToFront(key) d.LRU.MoveToFront(key)
d.mu.Unlock() d.mu.Unlock()
fi.AccessCount++
d.fileMeta[key] = struct {
AccessCount int64
CTime int64
}{fi.AccessCount, fi.CTime.Unix()}
flushFileMeta(d.root, d.fileMeta)
path := filepath.Join(d.root, key) path := filepath.Join(d.root, key)
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
file, err := os.Open(path) file, err := os.Open(path)
@@ -461,3 +494,22 @@ func (d *DiskFS) StatAll() []*vfs.FileInfo {
return files return files
} }
func flushAccessCounts(root string, counts map[string]int64) {
path := filepath.Join(root, "access_counts.json")
data, _ := json.MarshalIndent(counts, "", " ")
_ = ioutil.WriteFile(path, data, 0644)
}
func flushFileMeta(root string, meta map[string]struct {
AccessCount int64
CTime int64
}) {
path := filepath.Join(root, "filemeta.json")
if len(meta) == 0 {
_ = os.Remove(path)
return
}
data, _ := json.MarshalIndent(meta, "", " ")
_ = ioutil.WriteFile(path, data, 0644)
}

View File

@@ -7,27 +7,35 @@ import (
) )
type FileInfo struct { type FileInfo struct {
name string name string
size int64 size int64
MTime time.Time MTime time.Time
ATime time.Time ATime time.Time
CTime time.Time // Creation time
AccessCount int64
} }
func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo { func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo {
now := time.Now()
return &FileInfo{ return &FileInfo{
name: key, name: key,
size: size, size: size,
MTime: modTime, MTime: modTime,
ATime: time.Now(), ATime: now,
CTime: now,
AccessCount: 0,
} }
} }
func NewFileInfoFromOS(f os.FileInfo, key string) *FileInfo { func NewFileInfoFromOS(f os.FileInfo, key string) *FileInfo {
now := time.Now()
return &FileInfo{ return &FileInfo{
name: key, name: key,
size: f.Size(), size: f.Size(),
MTime: f.ModTime(), MTime: f.ModTime(),
ATime: time.Now(), ATime: now,
CTime: now, // Will be overwritten if loaded from disk
AccessCount: 0,
} }
} }

View File

@@ -308,25 +308,23 @@ func getAllFiles(vfss vfs.VFS) []fileInfoWithMetadata {
case *disk.DiskFS: case *disk.DiskFS:
allFiles := fs.StatAll() allFiles := fs.StatAll()
for _, fi := range allFiles { for _, fi := range allFiles {
// For disk, we can't easily track access count, so we'll use 1 as default
files = append(files, fileInfoWithMetadata{ files = append(files, fileInfoWithMetadata{
Name: fi.Name(), Name: fi.Name(),
Size: fi.Size(), Size: fi.Size(),
MTime: fi.ModTime(), MTime: fi.ModTime(),
ATime: fi.AccessTime(), ATime: fi.AccessTime(),
AccessCount: 1, AccessCount: fi.AccessCount, // Use real access count
}) })
} }
case *memory.MemoryFS: case *memory.MemoryFS:
allFiles := fs.StatAll() allFiles := fs.StatAll()
for _, fi := range allFiles { for _, fi := range allFiles {
// For memory, we can't easily track access count, so we'll use 1 as default
files = append(files, fileInfoWithMetadata{ files = append(files, fileInfoWithMetadata{
Name: fi.Name(), Name: fi.Name(),
Size: fi.Size(), Size: fi.Size(),
MTime: fi.ModTime(), MTime: fi.ModTime(),
ATime: fi.AccessTime(), ATime: fi.AccessTime(),
AccessCount: 1, AccessCount: fi.AccessCount, // Use real access count
}) })
} }
} }

View File

@@ -149,15 +149,6 @@ func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
} }
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) { func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
m.mu.RLock()
if m.capacity > 0 {
if m.size+size > m.capacity {
m.mu.RUnlock()
return nil, vfserror.ErrDiskFull
}
}
m.mu.RUnlock()
keyMu := m.getKeyLock(key) keyMu := m.getKeyLock(key)
keyMu.Lock() keyMu.Lock()
defer keyMu.Unlock() defer keyMu.Unlock()
@@ -169,18 +160,19 @@ func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
onClose: func() error { onClose: func() error {
data := buf.Bytes() data := buf.Bytes()
m.mu.Lock() m.mu.Lock()
defer m.mu.Unlock()
if f, exists := m.files[key]; exists { if f, exists := m.files[key]; exists {
m.size -= int64(len(f.data)) m.size -= int64(len(f.data))
m.LRU.Remove(key) m.LRU.Remove(key)
} }
fi := vfs.NewFileInfo(key, int64(len(data)), time.Now()) fi := vfs.NewFileInfo(key, int64(len(data)), time.Now())
fi.CTime = time.Now() // Set creation time
m.files[key] = &file{ m.files[key] = &file{
fileinfo: fi,
data: data, data: data,
fileinfo: fi,
} }
m.LRU.Add(key, fi) m.LRU.Add(key, fi)
m.size += int64(len(data)) m.size += int64(len(data))
m.mu.Unlock()
memoryWriteBytes.Add(float64(len(data))) memoryWriteBytes.Add(float64(len(data)))
memorySizeBytes.Set(float64(m.Size())) memorySizeBytes.Set(float64(m.Size()))
@@ -232,6 +224,7 @@ func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
return nil, vfserror.ErrNotFound return nil, vfserror.ErrNotFound
} }
f.fileinfo.ATime = time.Now() f.fileinfo.ATime = time.Now()
f.fileinfo.AccessCount++ // Increment access count
m.LRU.MoveToFront(key) m.LRU.MoveToFront(key)
dataCopy := make([]byte, len(f.data)) dataCopy := make([]byte, len(f.data))
copy(dataCopy, f.data) copy(dataCopy, f.data)