6 Commits

Author SHA1 Message Date
30e804709f Enhance FileInfo structure and DiskFS functionality
All checks were successful
Release Tag / release (push) Successful in 12s
- Added CTime (creation time) and AccessCount fields to FileInfo struct for better file metadata tracking.
- Updated NewFileInfo and NewFileInfoFromOS functions to initialize new fields.
- Enhanced DiskFS to maintain access counts and file metadata, including flushing to JSON files.
- Modified Open and Create methods to increment access counts and set creation times appropriately.
- Updated garbage collection logic to utilize real access counts for files.
2025-07-19 05:29:18 -05:00
56bb1ddc12 Add hop-by-hop header handling in ServeHTTP method
All checks were successful
Release Tag / release (push) Successful in 12s
- Introduced a map for hop-by-hop headers to be removed from responses.
- Enhanced cache serving logic to read and filter HTTP responses, ensuring only relevant headers are forwarded.
- Updated cache writing to handle full HTTP responses, improving cache integrity and performance.
2025-07-19 05:07:36 -05:00
9c65cdb156 Fix HTTP status code for root path in ServeHTTP method to ensure correct response for upstream verification
All checks were successful
Release Tag / release (push) Successful in 12s
2025-07-19 04:42:20 -05:00
ae013f9a3b Enhance SteamCache configuration and HTTP client settings
All checks were successful
Release Tag / release (push) Successful in 14s
- Added upstream server configuration to launch.json for improved connectivity.
- Increased HTTP client timeout from 60s to 120s for better handling of slow responses.
- Updated server timeouts in steamcache.go: increased ReadTimeout to 30s and WriteTimeout to 60s.
- Introduced ReadHeaderTimeout to mitigate header attacks and set MaxHeaderBytes to 1MB.
- Improved error logging in the Run method to include HTTP status codes for better debugging.
- Adjusted ServeHTTP method to handle root path and metrics endpoint correctly.
2025-07-19 04:40:05 -05:00
d94b53c395 Merge pull request 'Update .goreleaser.yaml and enhance HTTP client settings in steamcache.go' (#10) from fix/connection-pooling into main
All checks were successful
Release Tag / release (push) Successful in 15s
Reviewed-on: s1d3sw1ped/SteamCache2#10
2025-07-19 09:13:37 +00:00
847931ed43 Update .goreleaser.yaml and enhance HTTP client settings in steamcache.go
All checks were successful
PR Check / check-and-test (pull_request) Successful in 18s
- Removed copyright footer from .goreleaser.yaml.
- Increased HTTP client connection settings in steamcache.go for improved performance:
  - MaxIdleConns from 100 to 200
  - MaxIdleConnsPerHost from 10 to 50
  - IdleConnTimeout from 90s to 120s
  - TLSHandshakeTimeout from 10s to 15s
  - ResponseHeaderTimeout from 10s to 30s
  - ExpectContinueTimeout from 1s to 5s
  - Added DisableCompression and ForceAttemptHTTP2 options.
- Removed debug logging for manifest files in ServeHTTP method.
2025-07-19 04:12:56 -05:00
7 changed files with 190 additions and 94 deletions

View File

@@ -48,7 +48,6 @@ changelog:
release:
name_template: "{{ .ProjectName }}-{{ .Version }}"
footer: "Copyright (c) 2025 s1d3sw1ped"
gitea_urls:
api: https://git.s1d3sw1ped.com/api/v1

6
.vscode/launch.json vendored
View File

@@ -23,6 +23,8 @@
"lru",
"--log-level",
"debug",
"--upstream",
"http://192.168.2.5:80",
],
},
{
@@ -40,6 +42,8 @@
"hybrid",
"--log-level",
"debug",
"--upstream",
"http://192.168.2.5:80",
],
},
{
@@ -55,6 +59,8 @@
"lfu",
"--log-level",
"debug",
"--upstream",
"http://192.168.2.5:80",
],
}
]

View File

@@ -2,6 +2,7 @@
package steamcache
import (
"bufio"
"context"
"crypto/sha1"
"encoding/hex"
@@ -24,6 +25,8 @@ import (
"sync"
"time"
"bytes"
"github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -146,6 +149,19 @@ func verifyResponseHash(resp *http.Response, bodyData []byte, expectedHash strin
return strings.EqualFold(actualHash, expectedHash)
}
var hopByHopHeaders = map[string]struct{}{
"Connection": {},
"Keep-Alive": {},
"Proxy-Authenticate": {},
"Proxy-Authorization": {},
"TE": {},
"Trailer": {},
"Transfer-Encoding": {},
"Upgrade": {},
"Date": {},
"Server": {},
}
type SteamCache struct {
address string
upstream string
@@ -222,21 +238,23 @@ func New(address string, memorySize string, diskSize string, diskPath, upstream,
}
transport := &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 10,
IdleConnTimeout: 90 * time.Second,
MaxIdleConns: 200, // Increased from 100
MaxIdleConnsPerHost: 50, // Increased from 10
IdleConnTimeout: 120 * time.Second, // Increased from 90s
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSHandshakeTimeout: 15 * time.Second, // Increased from 10s
ResponseHeaderTimeout: 30 * time.Second, // Increased from 10s
ExpectContinueTimeout: 5 * time.Second, // Increased from 1s
DisableCompression: true, // Steam doesn't use compression
ForceAttemptHTTP2: true, // Enable HTTP/2 if available
}
client := &http.Client{
Transport: transport,
Timeout: 60 * time.Second,
Timeout: 120 * time.Second, // Increased from 60s
}
sc := &SteamCache{
@@ -249,10 +267,12 @@ func New(address string, memorySize string, diskSize string, diskPath, upstream,
diskgc: dgc,
client: client,
server: &http.Server{
Addr: address,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 120 * time.Second,
Addr: address,
ReadTimeout: 30 * time.Second, // Increased
WriteTimeout: 60 * time.Second, // Increased
IdleTimeout: 120 * time.Second, // Good for keep-alive
ReadHeaderTimeout: 10 * time.Second, // New, for header attacks
MaxHeaderBytes: 1 << 20, // 1MB, optional
},
}
@@ -266,7 +286,8 @@ func New(address string, memorySize string, diskSize string, diskPath, upstream,
if d != nil {
if d.Size() > d.Capacity() {
gc.LRUGC(d, uint(d.Size()-d.Capacity()))
gcHandler := gc.GetGCAlgorithm(gc.GCAlgorithm(diskGC))
gcHandler(d, uint(d.Size()-d.Capacity()))
}
}
@@ -277,7 +298,7 @@ func (sc *SteamCache) Run() {
if sc.upstream != "" {
resp, err := sc.client.Get(sc.upstream)
if err != nil || resp.StatusCode != http.StatusOK {
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
logger.Logger.Error().Err(err).Int("status_code", resp.StatusCode).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
os.Exit(1)
}
resp.Body.Close()
@@ -310,11 +331,6 @@ func (sc *SteamCache) Shutdown() {
}
func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/metrics" {
promhttp.Handler().ServeHTTP(w, r)
return
}
if r.Method != http.MethodGet {
requestsTotal.WithLabelValues(r.Method, "405").Inc()
logger.Logger.Warn().Str("method", r.Method).Msg("Only GET method is supported")
@@ -322,6 +338,11 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
if r.URL.Path == "/" {
w.WriteHeader(http.StatusOK) // this is used by steamcache2's upstream verification at startup
return
}
if r.URL.String() == "/lancache-heartbeat" {
w.Header().Add("X-LanCache-Processed-By", "SteamCache2")
w.WriteHeader(http.StatusNoContent)
@@ -329,6 +350,11 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
if r.URL.Path == "/metrics" {
promhttp.Handler().ServeHTTP(w, r)
return
}
if strings.HasPrefix(r.URL.String(), "/depot/") {
// trim the query parameters from the URL path
// this is necessary because the cache key should not include query parameters
@@ -347,25 +373,41 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
reader, err := sc.vfs.Open(cacheKey)
cachePath := cacheKey // You may want to add a .http or .cache extension for clarity
// Try to serve from cache
file, err := sc.vfs.Open(cachePath)
if err == nil {
defer reader.Close()
w.Header().Add("X-LanCache-Status", "HIT")
io.Copy(w, reader)
logger.Logger.Info().
Str("key", cacheKey).
Str("host", r.Host).
Str("status", "HIT").
Dur("duration", time.Since(tstart)).
Msg("request")
requestsTotal.WithLabelValues(r.Method, "200").Inc()
cacheStatusTotal.WithLabelValues("HIT").Inc()
responseTime.WithLabelValues("HIT").Observe(time.Since(tstart).Seconds())
return
defer file.Close()
buf := bufio.NewReader(file)
resp, err := http.ReadResponse(buf, nil)
if err == nil {
// Remove hop-by-hop and server-specific headers
for k, vv := range resp.Header {
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
continue
}
for _, v := range vv {
w.Header().Add(k, v)
}
}
// Add our own headers
w.Header().Set("X-LanCache-Status", "HIT")
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
w.WriteHeader(resp.StatusCode)
io.Copy(w, resp.Body)
resp.Body.Close()
logger.Logger.Info().
Str("key", cacheKey).
Str("host", r.Host).
Str("status", "HIT").
Dur("duration", time.Since(tstart)).
Msg("request")
requestsTotal.WithLabelValues(r.Method, "200").Inc()
cacheStatusTotal.WithLabelValues("HIT").Inc()
responseTime.WithLabelValues("HIT").Observe(time.Since(tstart).Seconds())
return
}
}
var req *http.Request
@@ -438,8 +480,6 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
defer resp.Body.Close()
size := resp.ContentLength
// Read the entire response body into memory for hash verification
bodyData, err := io.ReadAll(resp.Body)
if err != nil {
@@ -453,18 +493,6 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
filename := filepath.Base(cacheKey)
expectedHash, hasHash := extractHashFromFilename(filename)
// Debug logging for manifest files
if strings.Contains(cacheKey, "manifest") {
logger.Logger.Debug().
Str("key", cacheKey).
Str("filename", filename).
Bool("hasHash", hasHash).
Str("expectedHash", expectedHash).
Int64("content_length_header", resp.ContentLength).
Int("actual_content_length", len(bodyData)).
Msg("Manifest file hash verification debug")
}
// Hash verification using Steam's X-Content-Sha header and content length verification
hashVerified := true
if hasHash {
@@ -502,15 +530,28 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
// Write to response (always serve the file)
w.Header().Add("X-LanCache-Status", "MISS")
// Remove hop-by-hop and server-specific headers
for k, vv := range resp.Header {
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
continue
}
for _, v := range vv {
w.Header().Add(k, v)
}
}
// Add our own headers
w.Header().Set("X-LanCache-Status", "MISS")
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
w.Write(bodyData)
// Only cache the file if hash verification passed (or no hash was present)
if hashVerified {
writer, _ := sc.vfs.Create(cacheKey, size)
writer, _ := sc.vfs.Create(cachePath, int64(0)) // size is not known in advance
if writer != nil {
defer writer.Close()
writer.Write(bodyData)
// Write the full HTTP response to cache
resp.Body = io.NopCloser(bytes.NewReader(bodyData)) // Reset body for writing
resp.Write(writer)
}
} else {
logger.Logger.Warn().
@@ -522,7 +563,6 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
Str("key", cacheKey).
Str("host", r.Host).
Str("status", "MISS").
Bool("hash_verified", hasHash).
Dur("duration", time.Since(tstart)).
Msg("request")

View File

@@ -3,8 +3,10 @@ package disk
import (
"container/list"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"s1d3sw1ped/SteamCache2/steamcache/logger"
@@ -56,12 +58,17 @@ var _ vfs.VFS = (*DiskFS)(nil)
type DiskFS struct {
root string
info map[string]*vfs.FileInfo
capacity int64
size int64
mu sync.RWMutex
keyLocks sync.Map // map[string]*sync.RWMutex
LRU *lruList
info map[string]*vfs.FileInfo
capacity int64
size int64
mu sync.RWMutex
keyLocks sync.Map // map[string]*sync.RWMutex
LRU *lruList
accessCounts map[string]int64 // key: filename, value: access count
fileMeta map[string]struct {
AccessCount int64
CTime int64
} // key: filename
}
// lruList for LRU eviction
@@ -129,12 +136,17 @@ func new(root string, capacity int64, skipinit bool) *DiskFS {
}
dfs := &DiskFS{
root: root,
info: make(map[string]*vfs.FileInfo),
capacity: capacity,
mu: sync.RWMutex{},
keyLocks: sync.Map{},
LRU: newLruList(),
root: root,
info: make(map[string]*vfs.FileInfo),
capacity: capacity,
mu: sync.RWMutex{},
keyLocks: sync.Map{},
LRU: newLruList(),
accessCounts: make(map[string]int64),
fileMeta: make(map[string]struct {
AccessCount int64
CTime int64
}),
}
os.MkdirAll(dfs.root, 0755)
@@ -282,6 +294,15 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
diskWriteBytes.Add(float64(n))
diskSizeBytes.Set(float64(d.Size()))
// On new file, set access count to 1
finfo.AccessCount = 1
finfo.CTime = time.Now()
d.fileMeta[key] = struct {
AccessCount int64
CTime int64
}{1, finfo.CTime.Unix()}
flushFileMeta(d.root, d.fileMeta)
return nil
},
key: key,
@@ -351,6 +372,10 @@ func (d *DiskFS) Delete(key string) error {
diskSizeBytes.Set(float64(d.Size()))
delete(d.accessCounts, key)
delete(d.fileMeta, key)
flushFileMeta(d.root, d.fileMeta)
return nil
}
@@ -381,9 +406,17 @@ func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
return nil, vfserror.ErrNotFound
}
fi.ATime = time.Now()
fi.AccessCount++ // Increment access count
d.LRU.MoveToFront(key)
d.mu.Unlock()
fi.AccessCount++
d.fileMeta[key] = struct {
AccessCount int64
CTime int64
}{fi.AccessCount, fi.CTime.Unix()}
flushFileMeta(d.root, d.fileMeta)
path := filepath.Join(d.root, key)
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
file, err := os.Open(path)
@@ -461,3 +494,22 @@ func (d *DiskFS) StatAll() []*vfs.FileInfo {
return files
}
func flushAccessCounts(root string, counts map[string]int64) {
path := filepath.Join(root, "access_counts.json")
data, _ := json.MarshalIndent(counts, "", " ")
_ = ioutil.WriteFile(path, data, 0644)
}
func flushFileMeta(root string, meta map[string]struct {
AccessCount int64
CTime int64
}) {
path := filepath.Join(root, "filemeta.json")
if len(meta) == 0 {
_ = os.Remove(path)
return
}
data, _ := json.MarshalIndent(meta, "", " ")
_ = ioutil.WriteFile(path, data, 0644)
}

View File

@@ -7,27 +7,35 @@ import (
)
type FileInfo struct {
name string
size int64
MTime time.Time
ATime time.Time
name string
size int64
MTime time.Time
ATime time.Time
CTime time.Time // Creation time
AccessCount int64
}
func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo {
now := time.Now()
return &FileInfo{
name: key,
size: size,
MTime: modTime,
ATime: time.Now(),
name: key,
size: size,
MTime: modTime,
ATime: now,
CTime: now,
AccessCount: 0,
}
}
func NewFileInfoFromOS(f os.FileInfo, key string) *FileInfo {
now := time.Now()
return &FileInfo{
name: key,
size: f.Size(),
MTime: f.ModTime(),
ATime: time.Now(),
name: key,
size: f.Size(),
MTime: f.ModTime(),
ATime: now,
CTime: now, // Will be overwritten if loaded from disk
AccessCount: 0,
}
}

View File

@@ -308,25 +308,23 @@ func getAllFiles(vfss vfs.VFS) []fileInfoWithMetadata {
case *disk.DiskFS:
allFiles := fs.StatAll()
for _, fi := range allFiles {
// For disk, we can't easily track access count, so we'll use 1 as default
files = append(files, fileInfoWithMetadata{
Name: fi.Name(),
Size: fi.Size(),
MTime: fi.ModTime(),
ATime: fi.AccessTime(),
AccessCount: 1,
AccessCount: fi.AccessCount, // Use real access count
})
}
case *memory.MemoryFS:
allFiles := fs.StatAll()
for _, fi := range allFiles {
// For memory, we can't easily track access count, so we'll use 1 as default
files = append(files, fileInfoWithMetadata{
Name: fi.Name(),
Size: fi.Size(),
MTime: fi.ModTime(),
ATime: fi.AccessTime(),
AccessCount: 1,
AccessCount: fi.AccessCount, // Use real access count
})
}
}

View File

@@ -149,15 +149,6 @@ func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
}
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
m.mu.RLock()
if m.capacity > 0 {
if m.size+size > m.capacity {
m.mu.RUnlock()
return nil, vfserror.ErrDiskFull
}
}
m.mu.RUnlock()
keyMu := m.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
@@ -169,18 +160,19 @@ func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
onClose: func() error {
data := buf.Bytes()
m.mu.Lock()
defer m.mu.Unlock()
if f, exists := m.files[key]; exists {
m.size -= int64(len(f.data))
m.LRU.Remove(key)
}
fi := vfs.NewFileInfo(key, int64(len(data)), time.Now())
fi.CTime = time.Now() // Set creation time
m.files[key] = &file{
fileinfo: fi,
data: data,
fileinfo: fi,
}
m.LRU.Add(key, fi)
m.size += int64(len(data))
m.mu.Unlock()
memoryWriteBytes.Add(float64(len(data)))
memorySizeBytes.Set(float64(m.Size()))
@@ -232,6 +224,7 @@ func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
return nil, vfserror.ErrNotFound
}
f.fileinfo.ATime = time.Now()
f.fileinfo.AccessCount++ // Increment access count
m.LRU.MoveToFront(key)
dataCopy := make([]byte, len(f.data))
copy(dataCopy, f.data)