3 Commits

Author SHA1 Message Date
56bb1ddc12 Add hop-by-hop header handling in ServeHTTP method
All checks were successful
Release Tag / release (push) Successful in 12s
- Introduced a map for hop-by-hop headers to be removed from responses.
- Enhanced cache serving logic to read and filter HTTP responses, ensuring only relevant headers are forwarded.
- Updated cache writing to handle full HTTP responses, improving cache integrity and performance.
2025-07-19 05:07:36 -05:00
9c65cdb156 Fix HTTP status code for root path in ServeHTTP method to ensure correct response for upstream verification
All checks were successful
Release Tag / release (push) Successful in 12s
2025-07-19 04:42:20 -05:00
ae013f9a3b Enhance SteamCache configuration and HTTP client settings
All checks were successful
Release Tag / release (push) Successful in 14s
- Added upstream server configuration to launch.json for improved connectivity.
- Increased HTTP client timeout from 60s to 120s for better handling of slow responses.
- Updated server timeouts in steamcache.go: increased ReadTimeout to 30s and WriteTimeout to 60s.
- Introduced ReadHeaderTimeout to mitigate header attacks and set MaxHeaderBytes to 1MB.
- Improved error logging in the Run method to include HTTP status codes for better debugging.
- Adjusted ServeHTTP method to handle root path and metrics endpoint correctly.
2025-07-19 04:40:05 -05:00
2 changed files with 92 additions and 35 deletions

6
.vscode/launch.json vendored
View File

@@ -23,6 +23,8 @@
"lru",
"--log-level",
"debug",
"--upstream",
"http://192.168.2.5:80",
],
},
{
@@ -40,6 +42,8 @@
"hybrid",
"--log-level",
"debug",
"--upstream",
"http://192.168.2.5:80",
],
},
{
@@ -55,6 +59,8 @@
"lfu",
"--log-level",
"debug",
"--upstream",
"http://192.168.2.5:80",
],
}
]

View File

@@ -2,6 +2,7 @@
package steamcache
import (
"bufio"
"context"
"crypto/sha1"
"encoding/hex"
@@ -24,6 +25,8 @@ import (
"sync"
"time"
"bytes"
"github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -146,6 +149,19 @@ func verifyResponseHash(resp *http.Response, bodyData []byte, expectedHash strin
return strings.EqualFold(actualHash, expectedHash)
}
var hopByHopHeaders = map[string]struct{}{
"Connection": {},
"Keep-Alive": {},
"Proxy-Authenticate": {},
"Proxy-Authorization": {},
"TE": {},
"Trailer": {},
"Transfer-Encoding": {},
"Upgrade": {},
"Date": {},
"Server": {},
}
type SteamCache struct {
address string
upstream string
@@ -238,7 +254,7 @@ func New(address string, memorySize string, diskSize string, diskPath, upstream,
client := &http.Client{
Transport: transport,
Timeout: 60 * time.Second,
Timeout: 120 * time.Second, // Increased from 60s
}
sc := &SteamCache{
@@ -251,10 +267,12 @@ func New(address string, memorySize string, diskSize string, diskPath, upstream,
diskgc: dgc,
client: client,
server: &http.Server{
Addr: address,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 120 * time.Second,
Addr: address,
ReadTimeout: 30 * time.Second, // Increased
WriteTimeout: 60 * time.Second, // Increased
IdleTimeout: 120 * time.Second, // Good for keep-alive
ReadHeaderTimeout: 10 * time.Second, // New, for header attacks
MaxHeaderBytes: 1 << 20, // 1MB, optional
},
}
@@ -268,7 +286,8 @@ func New(address string, memorySize string, diskSize string, diskPath, upstream,
if d != nil {
if d.Size() > d.Capacity() {
gc.LRUGC(d, uint(d.Size()-d.Capacity()))
gcHandler := gc.GetGCAlgorithm(gc.GCAlgorithm(diskGC))
gcHandler(d, uint(d.Size()-d.Capacity()))
}
}
@@ -279,7 +298,7 @@ func (sc *SteamCache) Run() {
if sc.upstream != "" {
resp, err := sc.client.Get(sc.upstream)
if err != nil || resp.StatusCode != http.StatusOK {
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
logger.Logger.Error().Err(err).Int("status_code", resp.StatusCode).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
os.Exit(1)
}
resp.Body.Close()
@@ -312,11 +331,6 @@ func (sc *SteamCache) Shutdown() {
}
func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/metrics" {
promhttp.Handler().ServeHTTP(w, r)
return
}
if r.Method != http.MethodGet {
requestsTotal.WithLabelValues(r.Method, "405").Inc()
logger.Logger.Warn().Str("method", r.Method).Msg("Only GET method is supported")
@@ -324,6 +338,11 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
if r.URL.Path == "/" {
w.WriteHeader(http.StatusOK) // this is used by steamcache2's upstream verification at startup
return
}
if r.URL.String() == "/lancache-heartbeat" {
w.Header().Add("X-LanCache-Processed-By", "SteamCache2")
w.WriteHeader(http.StatusNoContent)
@@ -331,6 +350,11 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
if r.URL.Path == "/metrics" {
promhttp.Handler().ServeHTTP(w, r)
return
}
if strings.HasPrefix(r.URL.String(), "/depot/") {
// trim the query parameters from the URL path
// this is necessary because the cache key should not include query parameters
@@ -349,25 +373,41 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
reader, err := sc.vfs.Open(cacheKey)
cachePath := cacheKey // You may want to add a .http or .cache extension for clarity
// Try to serve from cache
file, err := sc.vfs.Open(cachePath)
if err == nil {
defer reader.Close()
w.Header().Add("X-LanCache-Status", "HIT")
io.Copy(w, reader)
logger.Logger.Info().
Str("key", cacheKey).
Str("host", r.Host).
Str("status", "HIT").
Dur("duration", time.Since(tstart)).
Msg("request")
requestsTotal.WithLabelValues(r.Method, "200").Inc()
cacheStatusTotal.WithLabelValues("HIT").Inc()
responseTime.WithLabelValues("HIT").Observe(time.Since(tstart).Seconds())
return
defer file.Close()
buf := bufio.NewReader(file)
resp, err := http.ReadResponse(buf, nil)
if err == nil {
// Remove hop-by-hop and server-specific headers
for k, vv := range resp.Header {
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
continue
}
for _, v := range vv {
w.Header().Add(k, v)
}
}
// Add our own headers
w.Header().Set("X-LanCache-Status", "HIT")
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
w.WriteHeader(resp.StatusCode)
io.Copy(w, resp.Body)
resp.Body.Close()
logger.Logger.Info().
Str("key", cacheKey).
Str("host", r.Host).
Str("status", "HIT").
Dur("duration", time.Since(tstart)).
Msg("request")
requestsTotal.WithLabelValues(r.Method, "200").Inc()
cacheStatusTotal.WithLabelValues("HIT").Inc()
responseTime.WithLabelValues("HIT").Observe(time.Since(tstart).Seconds())
return
}
}
var req *http.Request
@@ -440,8 +480,6 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
defer resp.Body.Close()
size := resp.ContentLength
// Read the entire response body into memory for hash verification
bodyData, err := io.ReadAll(resp.Body)
if err != nil {
@@ -492,15 +530,28 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
// Write to response (always serve the file)
w.Header().Add("X-LanCache-Status", "MISS")
// Remove hop-by-hop and server-specific headers
for k, vv := range resp.Header {
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
continue
}
for _, v := range vv {
w.Header().Add(k, v)
}
}
// Add our own headers
w.Header().Set("X-LanCache-Status", "MISS")
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
w.Write(bodyData)
// Only cache the file if hash verification passed (or no hash was present)
if hashVerified {
writer, _ := sc.vfs.Create(cacheKey, size)
writer, _ := sc.vfs.Create(cachePath, int64(0)) // size is not known in advance
if writer != nil {
defer writer.Close()
writer.Write(bodyData)
// Write the full HTTP response to cache
resp.Body = io.NopCloser(bytes.NewReader(bodyData)) // Reset body for writing
resp.Write(writer)
}
} else {
logger.Logger.Warn().