feat: update dependencies and improve caching mechanism
Some checks failed
PR Check / check-and-test (pull_request) Failing after 2m11s

- Added Prometheus client library for metrics collection.
- Refactored garbage collection strategy from random deletion to LRU (Least Recently Used) deletion.
- Introduced per-key locking in cache to prevent race conditions.
- Enhanced logging with structured log messages for cache hits and misses.
- Implemented a retry mechanism for upstream requests with exponential backoff.
- Updated Go modules and indirect dependencies for better compatibility and performance.
- Removed unused sync filesystem implementation.
- Added version initialization to ensure a default version string.
This commit is contained in:
2025-07-12 06:43:00 -05:00
parent 8c1bb695b8
commit f378d0e81f
13 changed files with 326 additions and 219 deletions

2
.gitignore vendored
View File

@@ -1,3 +1,5 @@
dist/
tmp/
__*.exe
.smashed.txt
.smashignore

View File

@@ -3,6 +3,8 @@ package cmd
import (
"os"
"s1d3sw1ped/SteamCache2/steamcache"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/version"
"github.com/rs/zerolog"
"github.com/spf13/cobra"
@@ -16,8 +18,9 @@ var (
diskpath string
upstream string
pprof bool
verbose bool
pprof bool
logLevel string
logFormat string
)
var rootCmd = &cobra.Command{
@@ -29,9 +32,27 @@ var rootCmd = &cobra.Command{
By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache,
significantly improving download times and reducing the load on the internet connection.`,
Run: func(cmd *cobra.Command, args []string) {
if verbose {
// Configure logging
switch logLevel {
case "debug":
zerolog.SetGlobalLevel(zerolog.DebugLevel)
case "error":
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
case "info":
zerolog.SetGlobalLevel(zerolog.InfoLevel)
default:
zerolog.SetGlobalLevel(zerolog.InfoLevel) // Default to info level if not specified
}
var writer zerolog.ConsoleWriter
if logFormat == "json" {
writer = zerolog.ConsoleWriter{Out: os.Stderr, NoColor: true}
} else {
writer = zerolog.ConsoleWriter{Out: os.Stderr}
}
logger.Logger = zerolog.New(writer).With().Timestamp().Logger()
logger.Logger.Info().
Msg("starting SteamCache2 " + version.Version)
sc := steamcache.New(
":80",
@@ -43,7 +64,14 @@ var rootCmd = &cobra.Command{
upstream,
pprof,
)
logger.Logger.Info().
Msg("starting SteamCache2 on port 80")
sc.Run()
logger.Logger.Info().Msg("SteamCache2 stopped")
os.Exit(0)
},
}
@@ -67,5 +95,7 @@ func init() {
rootCmd.Flags().BoolVarP(&pprof, "pprof", "P", false, "Enable pprof")
rootCmd.Flags().MarkHidden("pprof")
rootCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "Enable verbose logging")
rootCmd.Flags().StringVarP(&logLevel, "log-level", "l", "info", "Logging level: debug, info, error")
rootCmd.Flags().StringVarP(&logFormat, "log-format", "f", "console", "Logging format: json, console")
}

10
go.mod
View File

@@ -4,15 +4,23 @@ go 1.23.0
require (
github.com/docker/go-units v0.5.0
github.com/prometheus/client_golang v1.22.0
github.com/rs/zerolog v1.33.0
github.com/spf13/cobra v1.8.1
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/sys v0.12.0 // indirect
golang.org/x/sys v0.30.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
)

32
go.sum
View File

@@ -1,16 +1,40 @@
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
@@ -19,11 +43,17 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -4,9 +4,8 @@ import (
"runtime/debug"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"sort"
"time"
"golang.org/x/exp/rand"
)
func init() {
@@ -14,34 +13,30 @@ func init() {
debug.SetGCPercent(50)
}
// RandomGC randomly deletes files until we've reclaimed enough space.
func randomgc(vfss vfs.VFS, size uint) (uint, uint) {
// Randomly delete files until we've reclaimed enough space.
random := func(vfss vfs.VFS, stats []*vfs.FileInfo) int64 {
randfile := stats[rand.Intn(len(stats))]
sz := randfile.Size()
err := vfss.Delete(randfile.Name())
if err != nil {
return 0
}
return sz
}
// lruGC deletes files in LRU order until enough space is reclaimed.
func lruGC(vfss vfs.VFS, size uint) (uint, uint) {
deletions := 0
targetreclaim := int64(size)
var reclaimed int64
var reclaimed uint
stats := vfss.StatAll()
for {
if reclaimed >= targetreclaim {
sort.Slice(stats, func(i, j int) bool {
return stats[i].AccessTime().Before(stats[j].AccessTime())
})
for _, s := range stats {
sz := uint(s.Size())
err := vfss.Delete(s.Name())
if err != nil {
continue
}
reclaimed += sz
deletions++
if reclaimed >= size {
break
}
reclaimed += random(vfss, stats)
deletions++
}
return uint(reclaimed), uint(deletions)
return reclaimed, uint(deletions)
}
func cachehandler(fi *vfs.FileInfo, cs cachestate.CacheState) bool {

View File

@@ -1,13 +1,7 @@
package logger
import (
"os"
"github.com/rs/zerolog"
)
func init() {
zerolog.SetGlobalLevel(zerolog.InfoLevel)
}
var Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Logger()
var Logger zerolog.Logger

View File

@@ -1,29 +1,45 @@
package steamcache
import (
"fmt"
"io"
"net/http"
"net/url"
"os"
"runtime"
"s1d3sw1ped/SteamCache2/steamcache/avgcachestate"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/version"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cache"
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"s1d3sw1ped/SteamCache2/vfs/disk"
"s1d3sw1ped/SteamCache2/vfs/gc"
"s1d3sw1ped/SteamCache2/vfs/memory"
syncfs "s1d3sw1ped/SteamCache2/vfs/sync"
// syncfs "s1d3sw1ped/SteamCache2/vfs/sync"
"strings"
"sync"
"time"
pprof "net/http/pprof"
"github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
requestsTotal = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "http_requests_total",
Help: "Total number of HTTP requests",
},
[]string{"method", "status"},
)
cacheHitRate = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "cache_hit_rate",
Help: "Cache hit rate",
},
)
)
type SteamCache struct {
@@ -40,9 +56,6 @@ type SteamCache struct {
diskgc *gc.GCFS
hits *avgcachestate.AvgCacheState
dirty bool
mu sync.Mutex
}
func New(address string, memorySize string, memoryMultiplier int, diskSize string, diskMultiplier int, diskPath, upstream string, pprof bool) *SteamCache {
@@ -64,14 +77,14 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
var mgc *gc.GCFS
if memorysize > 0 {
m = memory.New(memorysize)
mgc = gc.New(m, memoryMultiplier, randomgc)
mgc = gc.New(m, memoryMultiplier, lruGC)
}
var d *disk.DiskFS
var dgc *gc.GCFS
if disksize > 0 {
d = disk.New(diskPath, disksize)
dgc = gc.New(d, diskMultiplier, randomgc)
dgc = gc.New(d, diskMultiplier, lruGC)
}
// configure the cache to match the specified mode (memory only, disk only, or memory and disk) based on the provided sizes
@@ -98,7 +111,8 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
pprof: pprof,
upstream: upstream,
address: address,
vfs: syncfs.New(c),
// vfs: syncfs.New(c),
vfs: c,
memory: m,
disk: d,
@@ -111,7 +125,7 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
if d != nil {
if d.Size() > d.Capacity() {
randomgc(d, uint(d.Size()-d.Capacity()))
lruGC(d, uint(d.Size()-d.Capacity()))
}
}
@@ -127,18 +141,6 @@ func (sc *SteamCache) Run() {
}
}
sc.mu.Lock()
sc.dirty = true
sc.mu.Unlock()
sc.LogStats()
t := time.NewTicker(1 * time.Second)
go func() {
for range t.C {
sc.LogStats()
}
}()
err := http.ListenAndServe(sc.address, sc)
if err != nil {
if err == http.ErrServerClosed {
@@ -150,71 +152,6 @@ func (sc *SteamCache) Run() {
}
}
func (sc *SteamCache) LogStats() {
sc.mu.Lock()
defer sc.mu.Unlock()
if sc.dirty {
up := sc.upstream
if up == "" {
up = "{host in request}"
}
logger.Logger.Info().Str("address", sc.address).Str("version", version.Version).Str("upstream", up).Msg("listening")
if sc.memory != nil { // only log memory if memory is enabled
lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles, gcTime := sc.memorygc.Stats()
logger.Logger.Info().
Str("size", units.HumanSize(float64(sc.memory.Size()))).
Str("capacity", units.HumanSize(float64(sc.memory.Capacity()))).
Str("files", fmt.Sprintf("%d", len(sc.memory.StatAll()))).
Msg("memory")
logger.Logger.Info().
Str("data_total", units.HumanSize(float64(lifetimeBytes))).
Uint("files_total", lifetimeFiles).
Str("data", units.HumanSize(float64(reclaimedBytes))).
Uint("files", deletedFiles).
Str("gc_time", gcTime.String()).
Msg("memory_gc")
}
if sc.disk != nil { // only log disk if disk is enabled
lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles, gcTime := sc.diskgc.Stats()
logger.Logger.Info().
Str("size", units.HumanSize(float64(sc.disk.Size()))).
Str("capacity", units.HumanSize(float64(sc.disk.Capacity()))).
Str("files", fmt.Sprintf("%d", len(sc.disk.StatAll()))).
Msg("disk")
logger.Logger.Info().
Str("data_total", units.HumanSize(float64(lifetimeBytes))).
Uint("files_total", lifetimeFiles).
Str("data", units.HumanSize(float64(reclaimedBytes))).
Uint("files", deletedFiles).
Str("gc_time", gcTime.String()).
Msg("disk_gc")
}
// log golang Garbage Collection stats
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger.Logger.Info().
Str("alloc", units.HumanSize(float64(m.Alloc))).
Str("sys", units.HumanSize(float64(m.Sys))).
Msg("app_gc")
logger.Logger.Info().
Str("hitrate", fmt.Sprintf("%.2f%%", sc.hits.Avg()*100)).
Msg("cache")
logger.Logger.Info().Msg("") // empty line to separate log entries for better readability
sc.dirty = false
}
}
func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if sc.pprof && r.URL.Path == "/debug/pprof/" {
pprof.Index(w, r)
@@ -223,8 +160,13 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
pprof.Handler(strings.TrimPrefix(r.URL.Path, "/debug/pprof/")).ServeHTTP(w, r)
return
}
if r.URL.Path == "/metrics" {
promhttp.Handler().ServeHTTP(w, r)
return
}
if r.Method != http.MethodGet {
requestsTotal.WithLabelValues(r.Method, "405").Inc()
http.Error(w, "Only GET method is supported", http.StatusMethodNotAllowed)
return
}
@@ -236,24 +178,33 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
sc.mu.Lock()
sc.dirty = true
sc.mu.Unlock()
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
tstart := time.Now()
cacheKey := strings.ReplaceAll(r.URL.String()[1:], "\\", "/") // replace all backslashes with forward slashes shouldn't be necessary but just in case
if cacheKey == "" {
requestsTotal.WithLabelValues(r.Method, "400").Inc()
http.Error(w, "Invalid URL", http.StatusBadRequest)
return
}
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
data, err := sc.vfs.Get(cacheKey)
if err == nil {
sc.hits.Add(cachestate.CacheStateHit)
w.Header().Add("X-LanCache-Status", "HIT")
requestsTotal.WithLabelValues(r.Method, "200").Inc()
cacheHitRate.Set(sc.hits.Avg())
w.Write(data)
logger.Logger.Debug().Str("key", r.URL.String()).Msg("cache")
logger.Logger.Info().
Str("key", cacheKey).
Str("host", r.Host).
Str("status", "HIT").
Int64("size", int64(len(data))).
Dur("duration", time.Since(tstart)).
Msg("request")
return
}
@@ -261,17 +212,18 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if sc.upstream != "" { // if an upstream server is configured, proxy the request to the upstream server
ur, err := url.JoinPath(sc.upstream, r.URL.String())
if err != nil {
requestsTotal.WithLabelValues(r.Method, "500").Inc()
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
return
}
req, err = http.NewRequest(http.MethodGet, ur, nil)
if err != nil {
requestsTotal.WithLabelValues(r.Method, "500").Inc()
http.Error(w, "Failed to create request", http.StatusInternalServerError)
return
}
req.Host = r.Host
logger.Logger.Debug().Str("key", cacheKey).Str("host", sc.upstream).Msg("upstream")
} else { // if no upstream server is configured, proxy the request to the host specified in the request
host := r.Host
if r.Header.Get("X-Sls-Https") == "enable" {
@@ -282,35 +234,51 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ur, err := url.JoinPath(host, r.URL.String())
if err != nil {
requestsTotal.WithLabelValues(r.Method, "500").Inc()
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
return
}
req, err = http.NewRequest(http.MethodGet, ur, nil)
if err != nil {
requestsTotal.WithLabelValues(r.Method, "500").Inc()
http.Error(w, "Failed to create request", http.StatusInternalServerError)
return
}
logger.Logger.Debug().Str("key", cacheKey).Str("host", host).Msg("forward")
}
req.Header.Add("X-Sls-Https", r.Header.Get("X-Sls-Https"))
req.Header.Add("User-Agent", r.Header.Get("User-Agent"))
resp, err := http.DefaultClient.Do(req)
if err != nil {
// Copy headers from the original request to the new request
for key, values := range r.Header {
for _, value := range values {
req.Header.Add(key, value)
}
}
// req.Header.Add("X-Sls-Https", r.Header.Get("X-Sls-Https"))
// req.Header.Add("User-Agent", r.Header.Get("User-Agent"))
// Retry logic
backoffSchedule := []time.Duration{1 * time.Second, 3 * time.Second, 10 * time.Second}
var resp *http.Response
for i, backoff := range backoffSchedule {
resp, err = http.DefaultClient.Do(req)
if err == nil && resp.StatusCode == http.StatusOK {
break
}
if i < len(backoffSchedule)-1 {
time.Sleep(backoff)
}
}
if err != nil || resp.StatusCode != http.StatusOK {
requestsTotal.WithLabelValues(r.Method, "500").Inc()
http.Error(w, "Failed to fetch the requested URL", http.StatusInternalServerError)
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
http.Error(w, "Failed to fetch the requested URL", resp.StatusCode)
return
}
body, err := io.ReadAll(resp.Body)
if err != nil {
requestsTotal.WithLabelValues(r.Method, "500").Inc()
http.Error(w, "Failed to read response body", http.StatusInternalServerError)
return
}
@@ -318,5 +286,16 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
sc.vfs.Set(cacheKey, body)
sc.hits.Add(cachestate.CacheStateMiss)
w.Header().Add("X-LanCache-Status", "MISS")
requestsTotal.WithLabelValues(r.Method, "200").Inc()
cacheHitRate.Set(sc.hits.Avg())
w.Write(body)
logger.Logger.Info().
Str("key", cacheKey).
Str("host", r.Host).
Str("status", "MISS").
Int64("size", int64(len(body))).
Dur("duration", time.Since(tstart)).
Msg("request")
}

View File

@@ -15,9 +15,6 @@ func TestCaching(t *testing.T) {
sc := New("localhost:8080", "1GB", 10, "1GB", 100, td, "", false)
sc.dirty = true
sc.LogStats()
if err := sc.vfs.Set("key", []byte("value")); err != nil {
t.Errorf("Set failed: %v", err)
}
@@ -25,9 +22,6 @@ func TestCaching(t *testing.T) {
t.Errorf("Set failed: %v", err)
}
sc.dirty = true
sc.LogStats()
if sc.diskgc.Size() != 17 {
t.Errorf("Size failed: got %d, want %d", sc.diskgc.Size(), 17)
}

View File

@@ -1,3 +1,9 @@
package version
var Version string
func init() {
if Version == "" {
Version = "0.0.0-dev"
}
}

26
vfs/cache/cache.go vendored
View File

@@ -5,6 +5,7 @@ import (
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"sync"
)
// Ensure CacheFS implements VFS.
@@ -16,6 +17,8 @@ type CacheFS struct {
slow vfs.VFS
cacheHandler CacheHandler
keyLocks sync.Map // map[string]*sync.RWMutex for per-key locks
}
type CacheHandler func(*vfs.FileInfo, cachestate.CacheState) bool
@@ -24,6 +27,7 @@ type CacheHandler func(*vfs.FileInfo, cachestate.CacheState) bool
func New(cacheHandler CacheHandler) *CacheFS {
return &CacheFS{
cacheHandler: cacheHandler,
keyLocks: sync.Map{},
}
}
@@ -39,6 +43,12 @@ func (c *CacheFS) SetFast(vfs vfs.VFS) {
c.fast = vfs
}
// getKeyLock returns a RWMutex for the given key, creating it if necessary.
func (c *CacheFS) getKeyLock(key string) *sync.RWMutex {
mu, _ := c.keyLocks.LoadOrStore(key, &sync.RWMutex{})
return mu.(*sync.RWMutex)
}
// cacheState returns the state of the file at key.
func (c *CacheFS) cacheState(key string) cachestate.CacheState {
if c.fast != nil {
@@ -65,6 +75,10 @@ func (c *CacheFS) Size() int64 {
// Set sets the file at key to src. If the file is already in the cache, it is replaced.
func (c *CacheFS) Set(key string, src []byte) error {
mu := c.getKeyLock(key)
mu.Lock()
defer mu.Unlock()
state := c.cacheState(key)
switch state {
@@ -82,6 +96,10 @@ func (c *CacheFS) Set(key string, src []byte) error {
// Delete deletes the file at key from the cache.
func (c *CacheFS) Delete(key string) error {
mu := c.getKeyLock(key)
mu.Lock()
defer mu.Unlock()
if c.fast != nil {
c.fast.Delete(key)
}
@@ -96,6 +114,10 @@ func (c *CacheFS) Get(key string) ([]byte, error) {
// GetS returns the file at key. If the file is not in the cache, it is fetched from the storage. It also returns the cache state.
func (c *CacheFS) GetS(key string) ([]byte, cachestate.CacheState, error) {
mu := c.getKeyLock(key)
mu.RLock()
defer mu.RUnlock()
state := c.cacheState(key)
switch state {
@@ -130,6 +152,10 @@ func (c *CacheFS) GetS(key string) ([]byte, cachestate.CacheState, error) {
// Stat returns information about the file at key.
// Warning: This will return information about the file in the fastest storage its in.
func (c *CacheFS) Stat(key string) (*vfs.FileInfo, error) {
mu := c.getKeyLock(key)
mu.RLock()
defer mu.RUnlock()
state := c.cacheState(key)
switch state {

View File

@@ -25,6 +25,8 @@ type DiskFS struct {
capacity int64
mu sync.Mutex
sg sync.WaitGroup
bytePool sync.Pool // Pool for []byte slices
}
// New creates a new DiskFS.
@@ -42,6 +44,11 @@ func new(root string, capacity int64, skipinit bool) *DiskFS {
if !os.IsNotExist(err) {
panic(err) // panic if the error is something other than not found
}
os.Mkdir(root, 0755) // create the root directory if it does not exist
fi, err = os.Stat(root) // re-stat to get the file info
if err != nil {
panic(err) // panic if the re-stat fails
}
}
if !fi.IsDir() {
panic("disk root must be a directory") // panic if the root is not a directory
@@ -53,6 +60,9 @@ func new(root string, capacity int64, skipinit bool) *DiskFS {
capacity: capacity,
mu: sync.Mutex{},
sg: sync.WaitGroup{},
bytePool: sync.Pool{
New: func() interface{} { return make([]byte, 0) }, // Initial capacity for pooled slices is 0, will grow as needed
},
}
os.MkdirAll(dfs.root, 0755)
@@ -73,8 +83,6 @@ func NewSkipInit(root string, capacity int64) *DiskFS {
}
func (d *DiskFS) init() {
// logger.Logger.Info().Str("name", d.Name()).Str("root", d.root).Str("capacity", units.HumanSize(float64(d.capacity))).Msg("init")
tstart := time.Now()
d.walk(d.root)
@@ -110,11 +118,9 @@ func (d *DiskFS) walk(path string) {
d.mu.Lock()
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
logger.Logger.Debug().Str("name", k).Str("root", d.root).Msg("walk")
d.info[k] = vfs.NewFileInfoFromOS(info, k)
d.mu.Unlock()
// logger.Logger.Debug().Str("name", d.Name()).Str("root", d.root).Str("capacity", units.HumanSize(float64(d.capacity))).Str("path", npath).Msg("init")
return nil
})
}()
@@ -153,10 +159,7 @@ func (d *DiskFS) Set(key string, src []byte) error {
}
}
logger.Logger.Debug().Str("name", key).Str("root", d.root).Msg("set")
if _, err := d.Stat(key); err == nil {
logger.Logger.Debug().Str("name", key).Str("root", d.root).Msg("delete")
d.Delete(key)
}
@@ -224,7 +227,16 @@ func (d *DiskFS) Get(key string) ([]byte, error) {
return nil, err
}
return data, nil
// Use pooled slice for return if possible, but since ReadFile allocates new, copy to pool if beneficial
dst := d.bytePool.Get().([]byte)
if cap(dst) < len(data) {
dst = make([]byte, len(data)) // create a new slice if the pool slice is too small
} else {
dst = dst[:len(data)] // reuse the pool slice, but resize it to fit
}
dst = dst[:len(data)]
copy(dst, data)
return dst, nil
}
// Stat returns the FileInfo of key. If key is not found in the cache, it will stat the file on disk. If the file is not found on disk, it will return vfs.ErrNotFound.
@@ -236,8 +248,6 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
return nil, vfserror.ErrInvalidKey
}
logger.Logger.Debug().Str("name", key).Str("root", d.root).Msg("stat")
d.mu.Lock()
defer d.mu.Unlock()

View File

@@ -1,10 +1,13 @@
package memory
import (
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"sync"
"time"
"github.com/docker/go-units"
)
// Ensure MemoryFS implements VFS.
@@ -21,6 +24,8 @@ type MemoryFS struct {
files map[string]*file
capacity int64
mu sync.Mutex
bytePool sync.Pool // Pool for []byte slices
}
// New creates a new MemoryFS.
@@ -29,10 +34,18 @@ func New(capacity int64) *MemoryFS {
panic("memory capacity must be greater than 0") // panic if the capacity is less than or equal to 0
}
logger.Logger.Info().
Str("name", "MemoryFS").
Str("capacity", units.HumanSize(float64(capacity))).
Msg("init")
return &MemoryFS{
files: make(map[string]*file),
capacity: capacity,
mu: sync.Mutex{},
bytePool: sync.Pool{
New: func() interface{} { return make([]byte, 0) }, // Initial capacity for pooled slices
},
}
}
@@ -67,13 +80,22 @@ func (m *MemoryFS) Set(key string, src []byte) error {
m.mu.Lock()
defer m.mu.Unlock()
// Use pooled slice
data := m.bytePool.Get().([]byte)
if cap(data) < len(src) {
data = make([]byte, len(src)) // expand the slice if the pool slice is too small
} else {
data = data[:len(src)] // reuse the pool slice, but resize it to fit
}
copy(data, src)
m.files[key] = &file{
fileinfo: vfs.NewFileInfo(
key,
int64(len(src)),
time.Now(),
),
data: src,
data: data,
}
return nil
@@ -88,6 +110,11 @@ func (m *MemoryFS) Delete(key string) error {
m.mu.Lock()
defer m.mu.Unlock()
// Return data to pool
if f, ok := m.files[key]; ok {
m.bytePool.Put(f.data)
}
delete(m.files, key)
return nil
@@ -106,6 +133,12 @@ func (m *MemoryFS) Get(key string) ([]byte, error) {
dst := make([]byte, len(m.files[key].data))
copy(dst, m.files[key].data)
logger.Logger.Debug().
Str("name", key).
Str("status", "GET").
Int64("size", int64(len(dst))).
Msg("get file from memory")
return dst, nil
}

View File

@@ -1,76 +1,76 @@
package sync
import (
"fmt"
"s1d3sw1ped/SteamCache2/vfs"
"sync"
)
// import (
// "fmt"
// "s1d3sw1ped/SteamCache2/vfs"
// "sync"
// )
// Ensure SyncFS implements VFS.
var _ vfs.VFS = (*SyncFS)(nil)
// // Ensure SyncFS implements VFS.
// var _ vfs.VFS = (*SyncFS)(nil)
type SyncFS struct {
vfs vfs.VFS
mu sync.RWMutex
}
// type SyncFS struct {
// vfs vfs.VFS
// mu sync.RWMutex
// }
func New(vfs vfs.VFS) *SyncFS {
return &SyncFS{
vfs: vfs,
mu: sync.RWMutex{},
}
}
// func New(vfs vfs.VFS) *SyncFS {
// return &SyncFS{
// vfs: vfs,
// mu: sync.RWMutex{},
// }
// }
// Name returns the name of the file system.
func (sfs *SyncFS) Name() string {
return fmt.Sprintf("SyncFS(%s)", sfs.vfs.Name())
}
// // Name returns the name of the file system.
// func (sfs *SyncFS) Name() string {
// return fmt.Sprintf("SyncFS(%s)", sfs.vfs.Name())
// }
// Size returns the total size of all files in the file system.
func (sfs *SyncFS) Size() int64 {
sfs.mu.RLock()
defer sfs.mu.RUnlock()
// // Size returns the total size of all files in the file system.
// func (sfs *SyncFS) Size() int64 {
// sfs.mu.RLock()
// defer sfs.mu.RUnlock()
return sfs.vfs.Size()
}
// return sfs.vfs.Size()
// }
// Set sets the value of key as src.
// Setting the same key multiple times, the last set call takes effect.
func (sfs *SyncFS) Set(key string, src []byte) error {
sfs.mu.Lock()
defer sfs.mu.Unlock()
// // Set sets the value of key as src.
// // Setting the same key multiple times, the last set call takes effect.
// func (sfs *SyncFS) Set(key string, src []byte) error {
// sfs.mu.Lock()
// defer sfs.mu.Unlock()
return sfs.vfs.Set(key, src)
}
// return sfs.vfs.Set(key, src)
// }
// Delete deletes the value of key.
func (sfs *SyncFS) Delete(key string) error {
sfs.mu.Lock()
defer sfs.mu.Unlock()
// // Delete deletes the value of key.
// func (sfs *SyncFS) Delete(key string) error {
// sfs.mu.Lock()
// defer sfs.mu.Unlock()
return sfs.vfs.Delete(key)
}
// return sfs.vfs.Delete(key)
// }
// Get gets the value of key to dst, and returns dst no matter whether or not there is an error.
func (sfs *SyncFS) Get(key string) ([]byte, error) {
sfs.mu.RLock()
defer sfs.mu.RUnlock()
// // Get gets the value of key to dst, and returns dst no matter whether or not there is an error.
// func (sfs *SyncFS) Get(key string) ([]byte, error) {
// sfs.mu.RLock()
// defer sfs.mu.RUnlock()
return sfs.vfs.Get(key)
}
// return sfs.vfs.Get(key)
// }
// Stat returns the FileInfo of key.
func (sfs *SyncFS) Stat(key string) (*vfs.FileInfo, error) {
sfs.mu.RLock()
defer sfs.mu.RUnlock()
// // Stat returns the FileInfo of key.
// func (sfs *SyncFS) Stat(key string) (*vfs.FileInfo, error) {
// sfs.mu.RLock()
// defer sfs.mu.RUnlock()
return sfs.vfs.Stat(key)
}
// return sfs.vfs.Stat(key)
// }
// StatAll returns the FileInfo of all keys.
func (sfs *SyncFS) StatAll() []*vfs.FileInfo {
sfs.mu.RLock()
defer sfs.mu.RUnlock()
// // StatAll returns the FileInfo of all keys.
// func (sfs *SyncFS) StatAll() []*vfs.FileInfo {
// sfs.mu.RLock()
// defer sfs.mu.RUnlock()
return sfs.vfs.StatAll()
}
// return sfs.vfs.StatAll()
// }