18 Commits
1.0.2 ... 1.0.7

Author SHA1 Message Date
00792d87a5 Merge pull request 'fix: gc was being stupid allowing another thread to take the space it made before it could not anymore' (#5) from fix/gc-breaking-downloads into main
All checks were successful
Release Tag / release (push) Successful in 13s
Reviewed-on: s1d3sw1ped/SteamCache2#5
2025-07-13 12:51:17 +00:00
3427b8f5bc fix: gc was being stupid allowing another thread to take the space it made before it could not anymore
All checks were successful
PR Check / check-and-test (pull_request) Successful in 12s
2025-07-13 07:50:22 -05:00
7f744d04b0 Merge pull request 'fix: trim query parameters from URL path in ServeHTTP to ensure cache key correctness' (#4) from fix/query-params into main
All checks were successful
Release Tag / release (push) Successful in 17s
Reviewed-on: s1d3sw1ped/SteamCache2#4
2025-07-13 10:43:21 +00:00
6c98d03ae7 fix: trim query parameters from URL path in ServeHTTP to ensure cache key correctness
All checks were successful
PR Check / check-and-test (pull_request) Successful in 16s
2025-07-13 05:42:07 -05:00
17ff507c89 Merge pull request 'fix: redo the whole caching functionality to make it really 420 blaze it fast' (#3) from fix/blazing-sun-speed into main
All checks were successful
Release Tag / release (push) Successful in 26s
Reviewed-on: s1d3sw1ped/SteamCache2#3
2025-07-13 10:21:19 +00:00
539f14e8ec refactor: moved the GC stuff around and corrected all tests
All checks were successful
PR Check / check-and-test (pull_request) Successful in 30s
2025-07-13 04:20:12 -05:00
1673e9554a Refactor VFS implementation to use Create and Open methods
Some checks failed
PR Check / check-and-test (pull_request) Failing after 11m4s
- Updated disk_test.go to replace Set and Get with Create and Open methods for better clarity and functionality.
- Modified fileinfo.go to include package comment.
- Refactored gc.go to streamline garbage collection handling and removed unused statistics.
- Updated gc_test.go to comment out large random tests for future implementation.
- Enhanced memory.go to implement LRU caching and metrics for memory usage.
- Updated memory_test.go to replace Set and Get with Create and Open methods.
- Removed sync.go as it was redundant and not utilized.
- Updated vfs.go to reflect changes in the VFS interface, replacing Set and Get with Create and Open.
- Added package comments to vfserror.go for consistency.
2025-07-13 03:17:22 -05:00
b83836f914 fix: update log message for server startup and improve request handling in ServeHTTP
All checks were successful
PR Check / check-and-test (pull_request) Successful in 1m6s
2025-07-12 09:48:06 -05:00
745856f0f4 fix: correct format key to formats in .goreleaser.yaml
All checks were successful
PR Check / check-and-test (pull_request) Successful in 1m4s
2025-07-12 09:21:56 -05:00
b4d2b1305e fix: add logging for unsupported methods and error handling in ServeHTTP
All checks were successful
PR Check / check-and-test (pull_request) Successful in 1m6s
2025-07-12 08:50:34 -05:00
0d263be2ca Merge pull request 'feat: update dependencies and improve caching mechanism' (#2) from feature/blazing-sun-speed into main
All checks were successful
Release Tag / release (push) Successful in 1m17s
Reviewed-on: s1d3sw1ped/SteamCache2#2
2025-07-12 13:18:53 +00:00
63a1c21861 fix: update action versions to use main branch for consistency
All checks were successful
PR Check / check-and-test (pull_request) Successful in 1m13s
2025-07-12 07:32:35 -05:00
0a73e46f90 fix: remove golangci-lint-action from PR workflow
All checks were successful
PR Check / check-and-test (pull_request) Successful in 1m42s
2025-07-12 07:16:48 -05:00
6f1158edeb fix: update action versions to use main branch for consistency
Some checks failed
PR Check / check-and-test (pull_request) Failing after 1m4s
2025-07-12 07:10:14 -05:00
93b682cfa5 chore: update action versions to latest in CI workflow
Some checks failed
PR Check / check-and-test (pull_request) Failing after 15s
2025-07-12 07:08:25 -05:00
f378d0e81f feat: update dependencies and improve caching mechanism
Some checks failed
PR Check / check-and-test (pull_request) Failing after 2m11s
- Added Prometheus client library for metrics collection.
- Refactored garbage collection strategy from random deletion to LRU (Least Recently Used) deletion.
- Introduced per-key locking in cache to prevent race conditions.
- Enhanced logging with structured log messages for cache hits and misses.
- Implemented a retry mechanism for upstream requests with exponential backoff.
- Updated Go modules and indirect dependencies for better compatibility and performance.
- Removed unused sync filesystem implementation.
- Added version initialization to ensure a default version string.
2025-07-12 06:43:00 -05:00
8c1bb695b8 fix: enhance logging to handle empty upstream values
All checks were successful
Release Tag / release (push) Successful in 10s
2025-01-23 11:31:28 -06:00
f58951fd92 fix: removed specific to my dev setup config 2025-01-23 11:31:13 -06:00
29 changed files with 1379 additions and 950 deletions

View File

@@ -8,14 +8,14 @@ jobs:
release: release:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@main
with: with:
fetch-depth: 0 fetch-depth: 0
- run: git fetch --force --tags - run: git fetch --force --tags
- uses: actions/setup-go@v5 - uses: actions/setup-go@main
with: with:
go-version-file: 'go.mod' go-version-file: 'go.mod'
- uses: goreleaser/goreleaser-action@v6 - uses: goreleaser/goreleaser-action@master
with: with:
distribution: goreleaser distribution: goreleaser
version: 'latest' version: 'latest'

View File

@@ -6,14 +6,10 @@ jobs:
check-and-test: check-and-test:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@main
- uses: actions/setup-go@v5 - uses: actions/setup-go@main
with: with:
go-version-file: 'go.mod' go-version-file: 'go.mod'
- run: go mod tidy - run: go mod tidy
- uses: golangci/golangci-lint-action@v3
with:
args: -D errcheck
version: latest
- run: go build ./... - run: go build ./...
- run: go test -race -v -shuffle=on ./... - run: go test -race -v -shuffle=on ./...

2
.gitignore vendored
View File

@@ -1,3 +1,5 @@
dist/ dist/
tmp/ tmp/
__*.exe __*.exe
.smashed.txt
.smashignore

View File

@@ -16,7 +16,7 @@ builds:
- amd64 - amd64
archives: archives:
- format: tar.gz - formats: tar.gz
name_template: >- name_template: >-
{{ .ProjectName }}_ {{ .ProjectName }}_
{{- title .Os }}_ {{- title .Os }}_
@@ -26,7 +26,7 @@ archives:
{{- if .Arm }}v{{ .Arm }}{{ end }} {{- if .Arm }}v{{ .Arm }}{{ end }}
format_overrides: format_overrides:
- goos: windows - goos: windows
format: zip formats: zip
changelog: changelog:
sort: asc sort: asc

15
.vscode/launch.json vendored
View File

@@ -17,9 +17,8 @@
"10G", "10G",
"--disk-path", "--disk-path",
"tmp/disk", "tmp/disk",
"--upstream", "--log-level",
"http://192.168.2.88:80", "debug",
"--verbose",
], ],
}, },
{ {
@@ -33,9 +32,8 @@
"10G", "10G",
"--disk-path", "--disk-path",
"tmp/disk", "tmp/disk",
"--upstream", "--log-level",
"http://192.168.2.88:80", "debug",
"--verbose",
], ],
}, },
{ {
@@ -47,9 +45,8 @@
"args": [ "args": [
"--memory", "--memory",
"1G", "1G",
"--upstream", "--log-level",
"http://192.168.2.88:80", "debug",
"--verbose",
], ],
} }
] ]

View File

@@ -1,23 +1,27 @@
// cmd/root.go
package cmd package cmd
import ( import (
"os" "os"
"runtime"
"s1d3sw1ped/SteamCache2/steamcache" "s1d3sw1ped/SteamCache2/steamcache"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/version"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
var ( var (
threads int
memory string memory string
memorymultiplier int
disk string disk string
diskmultiplier int
diskpath string diskpath string
upstream string upstream string
pprof bool logLevel string
verbose bool logFormat string
) )
var rootCmd = &cobra.Command{ var rootCmd = &cobra.Command{
@@ -29,21 +33,52 @@ var rootCmd = &cobra.Command{
By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache, By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache,
significantly improving download times and reducing the load on the internet connection.`, significantly improving download times and reducing the load on the internet connection.`,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
if verbose { // Configure logging
switch logLevel {
case "debug":
zerolog.SetGlobalLevel(zerolog.DebugLevel) zerolog.SetGlobalLevel(zerolog.DebugLevel)
case "error":
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
case "info":
zerolog.SetGlobalLevel(zerolog.InfoLevel)
default:
zerolog.SetGlobalLevel(zerolog.InfoLevel) // Default to info level if not specified
}
var writer zerolog.ConsoleWriter
if logFormat == "json" {
writer = zerolog.ConsoleWriter{Out: os.Stderr, NoColor: true}
} else {
writer = zerolog.ConsoleWriter{Out: os.Stderr}
}
logger.Logger = zerolog.New(writer).With().Timestamp().Logger()
logger.Logger.Info().
Msg("SteamCache2 " + version.Version + " starting...")
address := ":80"
if runtime.GOMAXPROCS(-1) != threads {
runtime.GOMAXPROCS(threads)
logger.Logger.Info().
Int("threads", threads).
Msg("Maximum number of threads set")
} }
sc := steamcache.New( sc := steamcache.New(
":80", address,
memory, memory,
memorymultiplier,
disk, disk,
diskmultiplier,
diskpath, diskpath,
upstream, upstream,
pprof,
) )
logger.Logger.Info().
Msg("SteamCache2 " + version.Version + " started on " + address)
sc.Run() sc.Run()
logger.Logger.Info().Msg("SteamCache2 stopped")
os.Exit(0)
}, },
} }
@@ -57,15 +92,14 @@ func Execute() {
} }
func init() { func init() {
rootCmd.Flags().IntVarP(&threads, "threads", "t", runtime.GOMAXPROCS(-1), "Number of worker threads to use for processing requests")
rootCmd.Flags().StringVarP(&memory, "memory", "m", "0", "The size of the memory cache") rootCmd.Flags().StringVarP(&memory, "memory", "m", "0", "The size of the memory cache")
rootCmd.Flags().IntVarP(&memorymultiplier, "memory-gc", "M", 10, "The gc value for the memory cache")
rootCmd.Flags().StringVarP(&disk, "disk", "d", "0", "The size of the disk cache") rootCmd.Flags().StringVarP(&disk, "disk", "d", "0", "The size of the disk cache")
rootCmd.Flags().IntVarP(&diskmultiplier, "disk-gc", "D", 100, "The gc value for the disk cache")
rootCmd.Flags().StringVarP(&diskpath, "disk-path", "p", "", "The path to the disk cache") rootCmd.Flags().StringVarP(&diskpath, "disk-path", "p", "", "The path to the disk cache")
rootCmd.Flags().StringVarP(&upstream, "upstream", "u", "", "The upstream server to proxy requests overrides the host header from the client but forwards the original host header to the upstream server") rootCmd.Flags().StringVarP(&upstream, "upstream", "u", "", "The upstream server to proxy requests overrides the host header from the client but forwards the original host header to the upstream server")
rootCmd.Flags().BoolVarP(&pprof, "pprof", "P", false, "Enable pprof") rootCmd.Flags().StringVarP(&logLevel, "log-level", "l", "info", "Logging level: debug, info, error")
rootCmd.Flags().MarkHidden("pprof") rootCmd.Flags().StringVarP(&logFormat, "log-format", "f", "console", "Logging format: json, console")
rootCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "Enable verbose logging")
} }

View File

@@ -1,3 +1,4 @@
// cmd/version.go
package cmd package cmd
import ( import (

11
go.mod
View File

@@ -4,15 +4,22 @@ go 1.23.0
require ( require (
github.com/docker/go-units v0.5.0 github.com/docker/go-units v0.5.0
github.com/prometheus/client_golang v1.22.0
github.com/rs/zerolog v1.33.0 github.com/rs/zerolog v1.33.0
github.com/spf13/cobra v1.8.1 github.com/spf13/cobra v1.8.1
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8
) )
require ( require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-isatty v0.0.19 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/sys v0.12.0 // indirect golang.org/x/sys v0.30.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
) )

34
go.sum
View File

@@ -1,16 +1,40 @@
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
@@ -19,11 +43,15 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,3 +1,4 @@
// main.go
package main package main
import ( import (

View File

@@ -1,63 +0,0 @@
package avgcachestate
import (
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"sync"
)
// AvgCacheState is a cache state that averages the last N cache states.
type AvgCacheState struct {
size int
avgs []cachestate.CacheState
mu sync.Mutex
}
// New creates a new average cache state with the given size.
func New(size int) *AvgCacheState {
a := &AvgCacheState{
size: size,
avgs: make([]cachestate.CacheState, size),
mu: sync.Mutex{},
}
a.Clear()
return a
}
// Clear resets the average cache state to zero.
func (a *AvgCacheState) Clear() {
a.mu.Lock()
defer a.mu.Unlock()
for i := 0; i < len(a.avgs); i++ {
a.avgs[i] = cachestate.CacheStateMiss
}
}
// Add adds a cache state to the average cache state.
func (a *AvgCacheState) Add(cs cachestate.CacheState) {
a.mu.Lock()
defer a.mu.Unlock()
a.avgs = append(a.avgs, cs)
if len(a.avgs) > a.size {
a.avgs = a.avgs[1:]
}
}
// Avg returns the average cache state.
func (a *AvgCacheState) Avg() float64 {
a.mu.Lock()
defer a.mu.Unlock()
var hits int
for _, cs := range a.avgs {
if cs == cachestate.CacheStateHit {
hits++
}
}
return float64(hits) / float64(len(a.avgs))
}

View File

@@ -1,49 +0,0 @@
package steamcache
import (
"runtime/debug"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"time"
"golang.org/x/exp/rand"
)
func init() {
// Set the GC percentage to 50%. This is a good balance between performance and memory usage.
debug.SetGCPercent(50)
}
// RandomGC randomly deletes files until we've reclaimed enough space.
func randomgc(vfss vfs.VFS, size uint) (uint, uint) {
// Randomly delete files until we've reclaimed enough space.
random := func(vfss vfs.VFS, stats []*vfs.FileInfo) int64 {
randfile := stats[rand.Intn(len(stats))]
sz := randfile.Size()
err := vfss.Delete(randfile.Name())
if err != nil {
return 0
}
return sz
}
deletions := 0
targetreclaim := int64(size)
var reclaimed int64
stats := vfss.StatAll()
for {
if reclaimed >= targetreclaim {
break
}
reclaimed += random(vfss, stats)
deletions++
}
return uint(reclaimed), uint(deletions)
}
func cachehandler(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
return time.Since(fi.AccessTime()) < time.Second*10 // Put hot files in the fast vfs if equipped
}

View File

@@ -1,13 +1,8 @@
// steamcache/logger/logger.go
package logger package logger
import ( import (
"os"
"github.com/rs/zerolog" "github.com/rs/zerolog"
) )
func init() { var Logger zerolog.Logger
zerolog.SetGlobalLevel(zerolog.InfoLevel)
}
var Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Logger()

View File

@@ -1,33 +1,56 @@
// steamcache/steamcache.go
package steamcache package steamcache
import ( import (
"fmt" "context"
"io" "io"
"net"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
"runtime"
"s1d3sw1ped/SteamCache2/steamcache/avgcachestate"
"s1d3sw1ped/SteamCache2/steamcache/logger" "s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/version"
"s1d3sw1ped/SteamCache2/vfs" "s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cache" "s1d3sw1ped/SteamCache2/vfs/cache"
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"s1d3sw1ped/SteamCache2/vfs/disk" "s1d3sw1ped/SteamCache2/vfs/disk"
"s1d3sw1ped/SteamCache2/vfs/gc" "s1d3sw1ped/SteamCache2/vfs/gc"
"s1d3sw1ped/SteamCache2/vfs/memory" "s1d3sw1ped/SteamCache2/vfs/memory"
syncfs "s1d3sw1ped/SteamCache2/vfs/sync"
"strings" "strings"
"sync" "sync"
"time" "time"
pprof "net/http/pprof"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
requestsTotal = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "http_requests_total",
Help: "Total number of HTTP requests",
},
[]string{"method", "status"},
)
cacheStatusTotal = promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "cache_status_total",
Help: "Total cache status counts",
},
[]string{"status"},
)
responseTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "response_time_seconds",
Help: "Response time in seconds",
Buckets: prometheus.DefBuckets,
},
)
) )
type SteamCache struct { type SteamCache struct {
pprof bool
address string address string
upstream string upstream string
@@ -39,13 +62,13 @@ type SteamCache struct {
memorygc *gc.GCFS memorygc *gc.GCFS
diskgc *gc.GCFS diskgc *gc.GCFS
hits *avgcachestate.AvgCacheState server *http.Server
client *http.Client
dirty bool cancel context.CancelFunc
mu sync.Mutex wg sync.WaitGroup
} }
func New(address string, memorySize string, memoryMultiplier int, diskSize string, diskMultiplier int, diskPath, upstream string, pprof bool) *SteamCache { func New(address string, memorySize string, diskSize string, diskPath, upstream string) *SteamCache {
memorysize, err := units.FromHumanSize(memorySize) memorysize, err := units.FromHumanSize(memorySize)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -57,21 +80,21 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
} }
c := cache.New( c := cache.New(
cachehandler, gc.PromotionDecider,
) )
var m *memory.MemoryFS var m *memory.MemoryFS
var mgc *gc.GCFS var mgc *gc.GCFS
if memorysize > 0 { if memorysize > 0 {
m = memory.New(memorysize) m = memory.New(memorysize)
mgc = gc.New(m, memoryMultiplier, randomgc) mgc = gc.New(m, gc.LRUGC)
} }
var d *disk.DiskFS var d *disk.DiskFS
var dgc *gc.GCFS var dgc *gc.GCFS
if disksize > 0 { if disksize > 0 {
d = disk.New(diskPath, disksize) d = disk.New(diskPath, disksize)
dgc = gc.New(d, diskMultiplier, randomgc) dgc = gc.New(d, gc.LRUGC)
} }
// configure the cache to match the specified mode (memory only, disk only, or memory and disk) based on the provided sizes // configure the cache to match the specified mode (memory only, disk only, or memory and disk) based on the provided sizes
@@ -94,24 +117,44 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
os.Exit(1) os.Exit(1)
} }
transport := &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 10,
IdleConnTimeout: 90 * time.Second,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
client := &http.Client{
Transport: transport,
Timeout: 60 * time.Second,
}
sc := &SteamCache{ sc := &SteamCache{
pprof: pprof,
upstream: upstream, upstream: upstream,
address: address, address: address,
vfs: syncfs.New(c), vfs: c,
memory: m, memory: m,
disk: d, disk: d,
memorygc: mgc, memorygc: mgc,
diskgc: dgc, diskgc: dgc,
client: client,
hits: avgcachestate.New(100), server: &http.Server{
Addr: address,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 120 * time.Second,
},
} }
if d != nil { if d != nil {
if d.Size() > d.Capacity() { if d.Size() > d.Capacity() {
randomgc(d, uint(d.Size()-d.Capacity())) gc.LRUGC(d, uint(d.Size()-d.Capacity()))
} }
} }
@@ -120,106 +163,49 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
func (sc *SteamCache) Run() { func (sc *SteamCache) Run() {
if sc.upstream != "" { if sc.upstream != "" {
_, err := http.Get(sc.upstream) resp, err := sc.client.Get(sc.upstream)
if err != nil { if err != nil || resp.StatusCode != http.StatusOK {
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server") logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
os.Exit(1) os.Exit(1)
} }
resp.Body.Close()
} }
sc.mu.Lock() sc.server.Handler = sc
sc.dirty = true ctx, cancel := context.WithCancel(context.Background())
sc.mu.Unlock() sc.cancel = cancel
sc.LogStats() sc.wg.Add(1)
t := time.NewTicker(1 * time.Second)
go func() { go func() {
for range t.C { defer sc.wg.Done()
sc.LogStats() err := sc.server.ListenAndServe()
} if err != nil && err != http.ErrServerClosed {
}()
err := http.ListenAndServe(sc.address, sc)
if err != nil {
if err == http.ErrServerClosed {
logger.Logger.Info().Msg("shutdown")
return
}
logger.Logger.Error().Err(err).Msg("Failed to start SteamCache2") logger.Logger.Error().Err(err).Msg("Failed to start SteamCache2")
os.Exit(1) os.Exit(1)
} }
}()
<-ctx.Done()
sc.server.Shutdown(ctx)
sc.wg.Wait()
} }
func (sc *SteamCache) LogStats() { func (sc *SteamCache) Shutdown() {
sc.mu.Lock() if sc.cancel != nil {
defer sc.mu.Unlock() sc.cancel()
if sc.dirty {
logger.Logger.Info().Str("address", sc.address).Str("version", version.Version).Str("upstream", sc.upstream).Msg("listening")
if sc.memory != nil { // only log memory if memory is enabled
lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles, gcTime := sc.memorygc.Stats()
logger.Logger.Info().
Str("size", units.HumanSize(float64(sc.memory.Size()))).
Str("capacity", units.HumanSize(float64(sc.memory.Capacity()))).
Str("files", fmt.Sprintf("%d", len(sc.memory.StatAll()))).
Msg("memory")
logger.Logger.Info().
Str("data_total", units.HumanSize(float64(lifetimeBytes))).
Uint("files_total", lifetimeFiles).
Str("data", units.HumanSize(float64(reclaimedBytes))).
Uint("files", deletedFiles).
Str("gc_time", gcTime.String()).
Msg("memory_gc")
}
if sc.disk != nil { // only log disk if disk is enabled
lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles, gcTime := sc.diskgc.Stats()
logger.Logger.Info().
Str("size", units.HumanSize(float64(sc.disk.Size()))).
Str("capacity", units.HumanSize(float64(sc.disk.Capacity()))).
Str("files", fmt.Sprintf("%d", len(sc.disk.StatAll()))).
Msg("disk")
logger.Logger.Info().
Str("data_total", units.HumanSize(float64(lifetimeBytes))).
Uint("files_total", lifetimeFiles).
Str("data", units.HumanSize(float64(reclaimedBytes))).
Uint("files", deletedFiles).
Str("gc_time", gcTime.String()).
Msg("disk_gc")
}
// log golang Garbage Collection stats
var m runtime.MemStats
runtime.ReadMemStats(&m)
logger.Logger.Info().
Str("alloc", units.HumanSize(float64(m.Alloc))).
Str("sys", units.HumanSize(float64(m.Sys))).
Msg("app_gc")
logger.Logger.Info().
Str("hitrate", fmt.Sprintf("%.2f%%", sc.hits.Avg()*100)).
Msg("cache")
logger.Logger.Info().Msg("") // empty line to separate log entries for better readability
sc.dirty = false
} }
sc.wg.Wait()
} }
func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if sc.pprof && r.URL.Path == "/debug/pprof/" { if r.URL.Path == "/metrics" {
pprof.Index(w, r) promhttp.Handler().ServeHTTP(w, r)
return
} else if sc.pprof && strings.HasPrefix(r.URL.Path, "/debug/pprof/") {
pprof.Handler(strings.TrimPrefix(r.URL.Path, "/debug/pprof/")).ServeHTTP(w, r)
return return
} }
if r.Method != http.MethodGet { if r.Method != http.MethodGet {
requestsTotal.WithLabelValues(r.Method, "405").Inc()
logger.Logger.Warn().Str("method", r.Method).Msg("Only GET method is supported")
http.Error(w, "Only GET method is supported", http.StatusMethodNotAllowed) http.Error(w, "Only GET method is supported", http.StatusMethodNotAllowed)
return return
} }
@@ -231,42 +217,63 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
sc.mu.Lock() if strings.HasPrefix(r.URL.String(), "/depot/") {
sc.dirty = true // trim the query parameters from the URL path
sc.mu.Unlock() // this is necessary because the cache key should not include query parameters
path := strings.Split(r.URL.String(), "?")[0]
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too tstart := time.Now()
defer func() { responseTime.Observe(time.Since(tstart).Seconds()) }()
cacheKey := strings.ReplaceAll(path[1:], "\\", "/") // replace all backslashes with forward slashes shouldn't be necessary but just in case
cacheKey := strings.ReplaceAll(r.URL.String()[1:], "\\", "/") // replace all backslashes with forward slashes shouldn't be necessary but just in case
if cacheKey == "" { if cacheKey == "" {
requestsTotal.WithLabelValues(r.Method, "400").Inc()
logger.Logger.Warn().Str("url", path).Msg("Invalid URL")
http.Error(w, "Invalid URL", http.StatusBadRequest) http.Error(w, "Invalid URL", http.StatusBadRequest)
return return
} }
data, err := sc.vfs.Get(cacheKey) w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
reader, err := sc.vfs.Open(cacheKey)
if err == nil { if err == nil {
sc.hits.Add(cachestate.CacheStateHit) defer reader.Close()
w.Header().Add("X-LanCache-Status", "HIT") w.Header().Add("X-LanCache-Status", "HIT")
w.Write(data)
logger.Logger.Debug().Str("key", r.URL.String()).Msg("cache") io.Copy(w, reader)
logger.Logger.Info().
Str("key", cacheKey).
Str("host", r.Host).
Str("status", "HIT").
Dur("duration", time.Since(tstart)).
Msg("request")
requestsTotal.WithLabelValues(r.Method, "200").Inc()
cacheStatusTotal.WithLabelValues("HIT").Inc()
return return
} }
var req *http.Request var req *http.Request
if sc.upstream != "" { // if an upstream server is configured, proxy the request to the upstream server if sc.upstream != "" { // if an upstream server is configured, proxy the request to the upstream server
ur, err := url.JoinPath(sc.upstream, r.URL.String()) ur, err := url.JoinPath(sc.upstream, path)
if err != nil { if err != nil {
requestsTotal.WithLabelValues(r.Method, "500").Inc()
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to join URL path")
http.Error(w, "Failed to join URL path", http.StatusInternalServerError) http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
return return
} }
req, err = http.NewRequest(http.MethodGet, ur, nil) req, err = http.NewRequest(http.MethodGet, ur, nil)
if err != nil { if err != nil {
requestsTotal.WithLabelValues(r.Method, "500").Inc()
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to create request")
http.Error(w, "Failed to create request", http.StatusInternalServerError) http.Error(w, "Failed to create request", http.StatusInternalServerError)
return return
} }
req.Host = r.Host req.Host = r.Host
logger.Logger.Debug().Str("key", cacheKey).Str("host", sc.upstream).Msg("upstream")
} else { // if no upstream server is configured, proxy the request to the host specified in the request } else { // if no upstream server is configured, proxy the request to the host specified in the request
host := r.Host host := r.Host
if r.Header.Get("X-Sls-Https") == "enable" { if r.Header.Get("X-Sls-Https") == "enable" {
@@ -275,43 +282,90 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
host = "http://" + host host = "http://" + host
} }
ur, err := url.JoinPath(host, r.URL.String()) ur, err := url.JoinPath(host, path)
if err != nil { if err != nil {
requestsTotal.WithLabelValues(r.Method, "500").Inc()
logger.Logger.Error().Err(err).Str("host", host).Msg("Failed to join URL path")
http.Error(w, "Failed to join URL path", http.StatusInternalServerError) http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
return return
} }
req, err = http.NewRequest(http.MethodGet, ur, nil) req, err = http.NewRequest(http.MethodGet, ur, nil)
if err != nil { if err != nil {
requestsTotal.WithLabelValues(r.Method, "500").Inc()
logger.Logger.Error().Err(err).Str("host", host).Msg("Failed to create request")
http.Error(w, "Failed to create request", http.StatusInternalServerError) http.Error(w, "Failed to create request", http.StatusInternalServerError)
return return
} }
logger.Logger.Debug().Str("key", cacheKey).Str("host", host).Msg("forward")
} }
req.Header.Add("X-Sls-Https", r.Header.Get("X-Sls-Https")) // Copy headers from the original request to the new request
req.Header.Add("User-Agent", r.Header.Get("User-Agent")) for key, values := range r.Header {
resp, err := http.DefaultClient.Do(req) for _, value := range values {
if err != nil { req.Header.Add(key, value)
}
}
// Retry logic
backoffSchedule := []time.Duration{1 * time.Second, 3 * time.Second, 10 * time.Second}
var resp *http.Response
for i, backoff := range backoffSchedule {
resp, err = sc.client.Do(req)
if err == nil && resp.StatusCode == http.StatusOK {
break
}
if i < len(backoffSchedule)-1 {
time.Sleep(backoff)
}
}
if err != nil || resp.StatusCode != http.StatusOK {
requestsTotal.WithLabelValues(r.Method, "500 upstream host "+r.Host).Inc()
logger.Logger.Error().Err(err).Str("url", req.URL.String()).Msg("Failed to fetch the requested URL")
http.Error(w, "Failed to fetch the requested URL", http.StatusInternalServerError) http.Error(w, "Failed to fetch the requested URL", http.StatusInternalServerError)
return return
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { size := resp.ContentLength
http.Error(w, "Failed to fetch the requested URL", resp.StatusCode)
return // this is sortof not needed as we should always be able to get a writer from the cache as long as the gc is able to reclaim enough space aka the file is not bigger than the disk can handle
ww := w.(io.Writer) // default writer to write to the response writer
writer, _ := sc.vfs.Create(cacheKey, size) // create a writer to write to the cache
if writer != nil { // if the writer is not nil, it means the cache is writable
defer writer.Close() // close the writer when done
ww = io.MultiWriter(w, writer) // write to both the response writer and the cache writer
} }
body, err := io.ReadAll(resp.Body)
if err != nil {
http.Error(w, "Failed to read response body", http.StatusInternalServerError)
return
}
sc.vfs.Set(cacheKey, body)
sc.hits.Add(cachestate.CacheStateMiss)
w.Header().Add("X-LanCache-Status", "MISS") w.Header().Add("X-LanCache-Status", "MISS")
w.Write(body)
io.Copy(ww, resp.Body)
logger.Logger.Info().
Str("key", cacheKey).
Str("host", r.Host).
Str("status", "MISS").
Dur("duration", time.Since(tstart)).
Msg("request")
requestsTotal.WithLabelValues(r.Method, "200").Inc()
cacheStatusTotal.WithLabelValues("MISS").Inc()
return
}
if r.URL.Path == "/favicon.ico" {
w.WriteHeader(http.StatusNoContent)
return
}
if r.URL.Path == "/robots.txt" {
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
w.Write([]byte("User-agent: *\nDisallow: /\n"))
return
}
requestsTotal.WithLabelValues(r.Method, "404").Inc()
logger.Logger.Warn().Str("url", r.URL.String()).Msg("Not found")
http.Error(w, "Not found", http.StatusNotFound)
} }

View File

@@ -1,32 +1,33 @@
// steamcache/steamcache_test.go
package steamcache package steamcache
import ( import (
"io"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
) )
func TestCaching(t *testing.T) { func TestCaching(t *testing.T) {
t.Parallel()
td := t.TempDir() td := t.TempDir()
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644) os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
sc := New("localhost:8080", "1GB", 10, "1GB", 100, td, "", false) sc := New("localhost:8080", "1G", "1G", td, "")
sc.dirty = true w, err := sc.vfs.Create("key", 5)
sc.LogStats() if err != nil {
t.Errorf("Create failed: %v", err)
if err := sc.vfs.Set("key", []byte("value")); err != nil {
t.Errorf("Set failed: %v", err)
}
if err := sc.vfs.Set("key1", []byte("value1")); err != nil {
t.Errorf("Set failed: %v", err)
} }
w.Write([]byte("value"))
w.Close()
sc.dirty = true w, err = sc.vfs.Create("key1", 6)
sc.LogStats() if err != nil {
t.Errorf("Create failed: %v", err)
}
w.Write([]byte("value1"))
w.Close()
if sc.diskgc.Size() != 17 { if sc.diskgc.Size() != 17 {
t.Errorf("Size failed: got %d, want %d", sc.diskgc.Size(), 17) t.Errorf("Size failed: got %d, want %d", sc.diskgc.Size(), 17)
@@ -36,21 +37,33 @@ func TestCaching(t *testing.T) {
t.Errorf("Size failed: got %d, want %d", sc.vfs.Size(), 17) t.Errorf("Size failed: got %d, want %d", sc.vfs.Size(), 17)
} }
if d, err := sc.vfs.Get("key"); err != nil { rc, err := sc.vfs.Open("key")
t.Errorf("Get failed: %v", err) if err != nil {
} else if string(d) != "value" { t.Errorf("Open failed: %v", err)
}
d, _ := io.ReadAll(rc)
rc.Close()
if string(d) != "value" {
t.Errorf("Get failed: got %s, want %s", d, "value") t.Errorf("Get failed: got %s, want %s", d, "value")
} }
if d, err := sc.vfs.Get("key1"); err != nil { rc, err = sc.vfs.Open("key1")
t.Errorf("Get failed: %v", err) if err != nil {
} else if string(d) != "value1" { t.Errorf("Open failed: %v", err)
}
d, _ = io.ReadAll(rc)
rc.Close()
if string(d) != "value1" {
t.Errorf("Get failed: got %s, want %s", d, "value1") t.Errorf("Get failed: got %s, want %s", d, "value1")
} }
if d, err := sc.vfs.Get("key2"); err != nil { rc, err = sc.vfs.Open("key2")
t.Errorf("Get failed: %v", err) if err != nil {
} else if string(d) != "value2" { t.Errorf("Open failed: %v", err)
}
d, _ = io.ReadAll(rc)
rc.Close()
if string(d) != "value2" {
t.Errorf("Get failed: got %s, want %s", d, "value2") t.Errorf("Get failed: got %s, want %s", d, "value2")
} }
@@ -65,7 +78,33 @@ func TestCaching(t *testing.T) {
sc.memory.Delete("key2") sc.memory.Delete("key2")
os.Remove(filepath.Join(td, "key2")) os.Remove(filepath.Join(td, "key2"))
if _, err := sc.vfs.Get("key2"); err == nil { if _, err := sc.vfs.Open("key2"); err == nil {
t.Errorf("Get failed: got nil, want error") t.Errorf("Open failed: got nil, want error")
}
}
func TestCacheMissAndHit(t *testing.T) {
sc := New("localhost:8080", "0", "1G", t.TempDir(), "")
key := "testkey"
value := []byte("testvalue")
// Simulate miss: but since no upstream, skip full ServeHTTP, test VFS
w, err := sc.vfs.Create(key, int64(len(value)))
if err != nil {
t.Fatal(err)
}
w.Write(value)
w.Close()
rc, err := sc.vfs.Open(key)
if err != nil {
t.Fatal(err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value) {
t.Errorf("expected %s, got %s", value, got)
} }
} }

View File

@@ -1,3 +1,10 @@
// version/version.go
package version package version
var Version string var Version string
func init() {
if Version == "" {
Version = "0.0.0-dev"
}
}

104
vfs/cache/cache.go vendored
View File

@@ -1,10 +1,13 @@
// vfs/cache/cache.go
package cache package cache
import ( import (
"fmt" "fmt"
"io"
"s1d3sw1ped/SteamCache2/vfs" "s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cachestate" "s1d3sw1ped/SteamCache2/vfs/cachestate"
"s1d3sw1ped/SteamCache2/vfs/vfserror" "s1d3sw1ped/SteamCache2/vfs/vfserror"
"sync"
) )
// Ensure CacheFS implements VFS. // Ensure CacheFS implements VFS.
@@ -16,6 +19,8 @@ type CacheFS struct {
slow vfs.VFS slow vfs.VFS
cacheHandler CacheHandler cacheHandler CacheHandler
keyLocks sync.Map // map[string]*sync.RWMutex for per-key locks
} }
type CacheHandler func(*vfs.FileInfo, cachestate.CacheState) bool type CacheHandler func(*vfs.FileInfo, cachestate.CacheState) bool
@@ -24,6 +29,7 @@ type CacheHandler func(*vfs.FileInfo, cachestate.CacheState) bool
func New(cacheHandler CacheHandler) *CacheFS { func New(cacheHandler CacheHandler) *CacheFS {
return &CacheFS{ return &CacheFS{
cacheHandler: cacheHandler, cacheHandler: cacheHandler,
keyLocks: sync.Map{},
} }
} }
@@ -39,6 +45,12 @@ func (c *CacheFS) SetFast(vfs vfs.VFS) {
c.fast = vfs c.fast = vfs
} }
// getKeyLock returns a RWMutex for the given key, creating it if necessary.
func (c *CacheFS) getKeyLock(key string) *sync.RWMutex {
mu, _ := c.keyLocks.LoadOrStore(key, &sync.RWMutex{})
return mu.(*sync.RWMutex)
}
// cacheState returns the state of the file at key. // cacheState returns the state of the file at key.
func (c *CacheFS) cacheState(key string) cachestate.CacheState { func (c *CacheFS) cacheState(key string) cachestate.CacheState {
if c.fast != nil { if c.fast != nil {
@@ -63,65 +75,74 @@ func (c *CacheFS) Size() int64 {
return c.slow.Size() return c.slow.Size()
} }
// Set sets the file at key to src. If the file is already in the cache, it is replaced.
func (c *CacheFS) Set(key string, src []byte) error {
state := c.cacheState(key)
switch state {
case cachestate.CacheStateHit:
if c.fast != nil {
c.fast.Delete(key)
}
return c.slow.Set(key, src)
case cachestate.CacheStateMiss, cachestate.CacheStateNotFound:
return c.slow.Set(key, src)
}
panic(vfserror.ErrUnreachable)
}
// Delete deletes the file at key from the cache. // Delete deletes the file at key from the cache.
func (c *CacheFS) Delete(key string) error { func (c *CacheFS) Delete(key string) error {
mu := c.getKeyLock(key)
mu.Lock()
defer mu.Unlock()
if c.fast != nil { if c.fast != nil {
c.fast.Delete(key) c.fast.Delete(key)
} }
return c.slow.Delete(key) return c.slow.Delete(key)
} }
// Get returns the file at key. If the file is not in the cache, it is fetched from the storage. // Open returns the file at key. If the file is not in the cache, it is fetched from the storage.
func (c *CacheFS) Get(key string) ([]byte, error) { func (c *CacheFS) Open(key string) (io.ReadCloser, error) {
src, _, err := c.GetS(key) mu := c.getKeyLock(key)
return src, err mu.RLock()
} defer mu.RUnlock()
// GetS returns the file at key. If the file is not in the cache, it is fetched from the storage. It also returns the cache state.
func (c *CacheFS) GetS(key string) ([]byte, cachestate.CacheState, error) {
state := c.cacheState(key) state := c.cacheState(key)
switch state { switch state {
case cachestate.CacheStateHit: case cachestate.CacheStateHit:
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check // if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
src, err := c.fast.Get(key) return c.fast.Open(key)
return src, state, err
case cachestate.CacheStateMiss: case cachestate.CacheStateMiss:
src, err := c.slow.Get(key) slowReader, err := c.slow.Open(key)
if err != nil { if err != nil {
return nil, state, err return nil, err
} }
sstat, _ := c.slow.Stat(key) sstat, _ := c.slow.Stat(key)
if sstat != nil && c.fast != nil { // file found in slow storage and fast storage is available if sstat != nil && c.fast != nil { // file found in slow storage and fast storage is available
// We are accessing the file from the slow storage, and the file has been accessed less then a minute ago so it popular, so we should update the fast storage with the latest file. // We are accessing the file from the slow storage, and the file has been accessed less then a minute ago so it popular, so we should update the fast storage with the latest file.
if c.cacheHandler != nil && c.cacheHandler(sstat, state) { if c.cacheHandler != nil && c.cacheHandler(sstat, state) {
if err := c.fast.Set(key, src); err != nil { fastWriter, err := c.fast.Create(key, sstat.Size())
return nil, state, err if err == nil {
return &teeReadCloser{
Reader: io.TeeReader(slowReader, fastWriter),
closers: []io.Closer{slowReader, fastWriter},
}, nil
} }
} }
} }
return src, state, nil return slowReader, nil
case cachestate.CacheStateNotFound: case cachestate.CacheStateNotFound:
return nil, state, vfserror.ErrNotFound return nil, vfserror.ErrNotFound
}
panic(vfserror.ErrUnreachable)
}
// Create creates a new file at key. If the file is already in the cache, it is replaced.
func (c *CacheFS) Create(key string, size int64) (io.WriteCloser, error) {
mu := c.getKeyLock(key)
mu.Lock()
defer mu.Unlock()
state := c.cacheState(key)
switch state {
case cachestate.CacheStateHit:
if c.fast != nil {
c.fast.Delete(key)
}
return c.slow.Create(key, size)
case cachestate.CacheStateMiss, cachestate.CacheStateNotFound:
return c.slow.Create(key, size)
} }
panic(vfserror.ErrUnreachable) panic(vfserror.ErrUnreachable)
@@ -130,6 +151,10 @@ func (c *CacheFS) GetS(key string) ([]byte, cachestate.CacheState, error) {
// Stat returns information about the file at key. // Stat returns information about the file at key.
// Warning: This will return information about the file in the fastest storage its in. // Warning: This will return information about the file in the fastest storage its in.
func (c *CacheFS) Stat(key string) (*vfs.FileInfo, error) { func (c *CacheFS) Stat(key string) (*vfs.FileInfo, error) {
mu := c.getKeyLock(key)
mu.RLock()
defer mu.RUnlock()
state := c.cacheState(key) state := c.cacheState(key)
switch state { switch state {
@@ -150,3 +175,18 @@ func (c *CacheFS) Stat(key string) (*vfs.FileInfo, error) {
func (c *CacheFS) StatAll() []*vfs.FileInfo { func (c *CacheFS) StatAll() []*vfs.FileInfo {
return c.slow.StatAll() return c.slow.StatAll()
} }
type teeReadCloser struct {
io.Reader
closers []io.Closer
}
func (t *teeReadCloser) Close() error {
var err error
for _, c := range t.closers {
if e := c.Close(); e != nil {
err = e
}
}
return err
}

View File

@@ -1,7 +1,9 @@
// vfs/cache/cache_test.go
package cache package cache
import ( import (
"errors" "errors"
"io"
"testing" "testing"
"s1d3sw1ped/SteamCache2/vfs" "s1d3sw1ped/SteamCache2/vfs"
@@ -15,8 +17,6 @@ func testMemory() vfs.VFS {
} }
func TestNew(t *testing.T) { func TestNew(t *testing.T) {
t.Parallel()
fast := testMemory() fast := testMemory()
slow := testMemory() slow := testMemory()
@@ -29,8 +29,6 @@ func TestNew(t *testing.T) {
} }
func TestNewPanics(t *testing.T) { func TestNewPanics(t *testing.T) {
t.Parallel()
defer func() { defer func() {
if r := recover(); r == nil { if r := recover(); r == nil {
t.Fatal("expected panic but did not get one") t.Fatal("expected panic but did not get one")
@@ -42,9 +40,7 @@ func TestNewPanics(t *testing.T) {
cache.SetSlow(nil) cache.SetSlow(nil)
} }
func TestSetAndGet(t *testing.T) { func TestCreateAndOpen(t *testing.T) {
t.Parallel()
fast := testMemory() fast := testMemory()
slow := testMemory() slow := testMemory()
cache := New(nil) cache := New(nil)
@@ -54,23 +50,26 @@ func TestSetAndGet(t *testing.T) {
key := "test" key := "test"
value := []byte("value") value := []byte("value")
if err := cache.Set(key, value); err != nil { w, err := cache.Create(key, int64(len(value)))
t.Fatalf("unexpected error: %v", err)
}
got, err := cache.Get(key)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
w.Write(value)
w.Close()
rc, err := cache.Open(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value) { if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got) t.Fatalf("expected %s, got %s", value, got)
} }
} }
func TestSetAndGetNoFast(t *testing.T) { func TestCreateAndOpenNoFast(t *testing.T) {
t.Parallel()
slow := testMemory() slow := testMemory()
cache := New(nil) cache := New(nil)
cache.SetSlow(slow) cache.SetSlow(slow)
@@ -78,22 +77,26 @@ func TestSetAndGetNoFast(t *testing.T) {
key := "test" key := "test"
value := []byte("value") value := []byte("value")
if err := cache.Set(key, value); err != nil { w, err := cache.Create(key, int64(len(value)))
t.Fatalf("unexpected error: %v", err)
}
got, err := cache.Get(key)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
w.Write(value)
w.Close()
rc, err := cache.Open(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value) { if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got) t.Fatalf("expected %s, got %s", value, got)
} }
} }
func TestCaching(t *testing.T) {
t.Parallel()
func TestCachingPromotion(t *testing.T) {
fast := testMemory() fast := testMemory()
slow := testMemory() slow := testMemory()
cache := New(func(fi *vfs.FileInfo, cs cachestate.CacheState) bool { cache := New(func(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
@@ -105,71 +108,42 @@ func TestCaching(t *testing.T) {
key := "test" key := "test"
value := []byte("value") value := []byte("value")
if err := fast.Set(key, value); err != nil { ws, _ := slow.Create(key, int64(len(value)))
t.Fatalf("unexpected error: %v", err) ws.Write(value)
} ws.Close()
if err := slow.Set(key, value); err != nil { rc, err := cache.Open(key)
t.Fatalf("unexpected error: %v", err)
}
_, state, err := cache.GetS(key)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
if state != cachestate.CacheStateHit { got, _ := io.ReadAll(rc)
t.Fatalf("expected %v, got %v", cachestate.CacheStateHit, state) rc.Close()
}
err = fast.Delete(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
got, state, err := cache.GetS(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if state != cachestate.CacheStateMiss {
t.Fatalf("expected %v, got %v", cachestate.CacheStateMiss, state)
}
if string(got) != string(value) { if string(got) != string(value) {
t.Fatalf("expected %s, got %s", value, got) t.Fatalf("expected %s, got %s", value, got)
} }
err = cache.Delete(key) // Check if promoted to fast
_, err = fast.Open(key)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Error("Expected promotion to fast cache")
}
_, state, err = cache.GetS(key)
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
if state != cachestate.CacheStateNotFound {
t.Fatalf("expected %v, got %v", cachestate.CacheStateNotFound, state)
} }
} }
func TestGetNotFound(t *testing.T) { func TestOpenNotFound(t *testing.T) {
t.Parallel()
fast := testMemory() fast := testMemory()
slow := testMemory() slow := testMemory()
cache := New(nil) cache := New(nil)
cache.SetFast(fast) cache.SetFast(fast)
cache.SetSlow(slow) cache.SetSlow(slow)
_, err := cache.Get("nonexistent") _, err := cache.Open("nonexistent")
if !errors.Is(err, vfserror.ErrNotFound) { if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err) t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
} }
} }
func TestDelete(t *testing.T) { func TestDelete(t *testing.T) {
t.Parallel()
fast := testMemory() fast := testMemory()
slow := testMemory() slow := testMemory()
cache := New(nil) cache := New(nil)
@@ -179,23 +153,24 @@ func TestDelete(t *testing.T) {
key := "test" key := "test"
value := []byte("value") value := []byte("value")
if err := cache.Set(key, value); err != nil { w, err := cache.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
w.Write(value)
w.Close()
if err := cache.Delete(key); err != nil { if err := cache.Delete(key); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
_, err := cache.Get(key) _, err = cache.Open(key)
if !errors.Is(err, vfserror.ErrNotFound) { if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err) t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
} }
} }
func TestStat(t *testing.T) { func TestStat(t *testing.T) {
t.Parallel()
fast := testMemory() fast := testMemory()
slow := testMemory() slow := testMemory()
cache := New(nil) cache := New(nil)
@@ -205,9 +180,12 @@ func TestStat(t *testing.T) {
key := "test" key := "test"
value := []byte("value") value := []byte("value")
if err := cache.Set(key, value); err != nil { w, err := cache.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
w.Write(value)
w.Close()
info, err := cache.Stat(key) info, err := cache.Stat(key)
if err != nil { if err != nil {
@@ -217,4 +195,7 @@ func TestStat(t *testing.T) {
if info == nil { if info == nil {
t.Fatal("expected file info to be non-nil") t.Fatal("expected file info to be non-nil")
} }
if info.Size() != int64(len(value)) {
t.Errorf("expected size %d, got %d", len(value), info.Size())
}
} }

View File

@@ -1,3 +1,4 @@
// vfs/cachestate/cachestate.go
package cachestate package cachestate
import "s1d3sw1ped/SteamCache2/vfs/vfserror" import "s1d3sw1ped/SteamCache2/vfs/vfserror"

View File

@@ -1,7 +1,10 @@
// vfs/disk/disk.go
package disk package disk
import ( import (
"container/list"
"fmt" "fmt"
"io"
"os" "os"
"path/filepath" "path/filepath"
"s1d3sw1ped/SteamCache2/steamcache/logger" "s1d3sw1ped/SteamCache2/steamcache/logger"
@@ -12,6 +15,38 @@ import (
"time" "time"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
diskCapacityBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "disk_cache_capacity_bytes",
Help: "Total capacity of the disk cache in bytes",
},
)
diskSizeBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "disk_cache_size_bytes",
Help: "Total size of the disk cache in bytes",
},
)
diskReadBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "disk_cache_read_bytes_total",
Help: "Total number of bytes read from the disk cache",
},
)
diskWriteBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "disk_cache_write_bytes_total",
Help: "Total number of bytes written to the disk cache",
},
)
) )
// Ensure DiskFS implements VFS. // Ensure DiskFS implements VFS.
@@ -23,8 +58,49 @@ type DiskFS struct {
info map[string]*vfs.FileInfo info map[string]*vfs.FileInfo
capacity int64 capacity int64
mu sync.Mutex size int64
sg sync.WaitGroup mu sync.RWMutex
keyLocks sync.Map // map[string]*sync.RWMutex
LRU *lruList
}
// lruList for LRU eviction
type lruList struct {
list *list.List
elem map[string]*list.Element
}
func newLruList() *lruList {
return &lruList{
list: list.New(),
elem: make(map[string]*list.Element),
}
}
func (l *lruList) MoveToFront(key string) {
if e, ok := l.elem[key]; ok {
l.list.MoveToFront(e)
}
}
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
e := l.list.PushFront(fi)
l.elem[key] = e
return e
}
func (l *lruList) Remove(key string) {
if e, ok := l.elem[key]; ok {
l.list.Remove(e)
delete(l.elem, key)
}
}
func (l *lruList) Back() *vfs.FileInfo {
if e := l.list.Back(); e != nil {
return e.Value.(*vfs.FileInfo)
}
return nil
} }
// New creates a new DiskFS. // New creates a new DiskFS.
@@ -42,6 +118,11 @@ func new(root string, capacity int64, skipinit bool) *DiskFS {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
panic(err) // panic if the error is something other than not found panic(err) // panic if the error is something other than not found
} }
os.Mkdir(root, 0755) // create the root directory if it does not exist
fi, err = os.Stat(root) // re-stat to get the file info
if err != nil {
panic(err) // panic if the re-stat fails
}
} }
if !fi.IsDir() { if !fi.IsDir() {
panic("disk root must be a directory") // panic if the root is not a directory panic("disk root must be a directory") // panic if the root is not a directory
@@ -51,14 +132,18 @@ func new(root string, capacity int64, skipinit bool) *DiskFS {
root: root, root: root,
info: make(map[string]*vfs.FileInfo), info: make(map[string]*vfs.FileInfo),
capacity: capacity, capacity: capacity,
mu: sync.Mutex{}, mu: sync.RWMutex{},
sg: sync.WaitGroup{}, keyLocks: sync.Map{},
LRU: newLruList(),
} }
os.MkdirAll(dfs.root, 0755) os.MkdirAll(dfs.root, 0755)
diskCapacityBytes.Set(float64(dfs.capacity))
if !skipinit { if !skipinit {
dfs.init() dfs.init()
diskSizeBytes.Set(float64(dfs.Size()))
} }
return dfs return dfs
@@ -73,12 +158,30 @@ func NewSkipInit(root string, capacity int64) *DiskFS {
} }
func (d *DiskFS) init() { func (d *DiskFS) init() {
// logger.Logger.Info().Str("name", d.Name()).Str("root", d.root).Str("capacity", units.HumanSize(float64(d.capacity))).Msg("init")
tstart := time.Now() tstart := time.Now()
d.walk(d.root) err := filepath.Walk(d.root, func(npath string, info os.FileInfo, err error) error {
d.sg.Wait() if err != nil {
return err
}
if info.IsDir() {
return nil
}
d.mu.Lock()
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
fi := vfs.NewFileInfoFromOS(info, k)
d.info[k] = fi
d.LRU.Add(k, fi)
d.size += info.Size()
d.mu.Unlock()
return nil
})
if err != nil {
logger.Logger.Error().Err(err).Msg("Walk failed")
}
logger.Logger.Info(). logger.Logger.Info().
Str("name", d.Name()). Str("name", d.Name()).
@@ -90,36 +193,6 @@ func (d *DiskFS) init() {
Msg("init") Msg("init")
} }
func (d *DiskFS) walk(path string) {
d.sg.Add(1)
go func() {
defer d.sg.Done()
filepath.Walk(path, func(npath string, info os.FileInfo, err error) error {
if path == npath {
return nil
}
if err != nil {
return err
}
if info.IsDir() {
d.walk(npath)
return filepath.SkipDir
}
d.mu.Lock()
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
logger.Logger.Debug().Str("name", k).Str("root", d.root).Msg("walk")
d.info[k] = vfs.NewFileInfoFromOS(info, k)
d.mu.Unlock()
// logger.Logger.Debug().Str("name", d.Name()).Str("root", d.root).Str("capacity", units.HumanSize(float64(d.capacity))).Str("path", npath).Msg("init")
return nil
})
}()
}
func (d *DiskFS) Capacity() int64 { func (d *DiskFS) Capacity() int64 {
return d.capacity return d.capacity
} }
@@ -129,52 +202,114 @@ func (d *DiskFS) Name() string {
} }
func (d *DiskFS) Size() int64 { func (d *DiskFS) Size() int64 {
d.mu.Lock() d.mu.RLock()
defer d.mu.Unlock() defer d.mu.RUnlock()
return d.size
var size int64
for _, v := range d.info {
size += v.Size()
}
return size
} }
func (d *DiskFS) Set(key string, src []byte) error { func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
mu, _ := d.keyLocks.LoadOrStore(key, &sync.RWMutex{})
return mu.(*sync.RWMutex)
}
func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
if key == "" { if key == "" {
return vfserror.ErrInvalidKey return nil, vfserror.ErrInvalidKey
} }
if key[0] == '/' { if key[0] == '/' {
return vfserror.ErrInvalidKey return nil, vfserror.ErrInvalidKey
} }
// Sanitize key to prevent path traversal
key = filepath.Clean(key)
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
d.mu.RLock()
if d.capacity > 0 { if d.capacity > 0 {
if size := d.Size() + int64(len(src)); size > d.capacity { if d.size+size > d.capacity {
return vfserror.ErrDiskFull d.mu.RUnlock()
return nil, vfserror.ErrDiskFull
} }
} }
d.mu.RUnlock()
logger.Logger.Debug().Str("name", key).Str("root", d.root).Msg("set") keyMu := d.getKeyLock(key)
keyMu.Lock()
if _, err := d.Stat(key); err == nil { defer keyMu.Unlock()
logger.Logger.Debug().Str("name", key).Str("root", d.root).Msg("delete")
d.Delete(key)
}
// Check again after lock
d.mu.Lock() d.mu.Lock()
defer d.mu.Unlock() if fi, exists := d.info[key]; exists {
os.MkdirAll(d.root+"/"+filepath.Dir(key), 0755) d.size -= fi.Size()
if err := os.WriteFile(d.root+"/"+key, src, 0644); err != nil { d.LRU.Remove(key)
delete(d.info, key)
path := filepath.Join(d.root, key)
os.Remove(path) // Ignore error, as file might not exist or other issues
}
d.mu.Unlock()
path := filepath.Join(d.root, key)
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, err
}
file, err := os.Create(path)
if err != nil {
return nil, err
}
return &diskWriteCloser{
Writer: file,
onClose: func(n int64) error {
fi, err := os.Stat(path)
if err != nil {
os.Remove(path)
return err return err
} }
fi, err := os.Stat(d.root + "/" + key) d.mu.Lock()
if err != nil { finfo := vfs.NewFileInfoFromOS(fi, key)
panic(err) d.info[key] = finfo
} d.LRU.Add(key, finfo)
d.size += n
d.mu.Unlock()
d.info[key] = vfs.NewFileInfoFromOS(fi, key) diskWriteBytes.Add(float64(n))
diskSizeBytes.Set(float64(d.Size()))
return nil return nil
},
key: key,
file: file,
}, nil
}
type diskWriteCloser struct {
io.Writer
onClose func(int64) error
n int64
key string
file *os.File
}
func (wc *diskWriteCloser) Write(p []byte) (int, error) {
n, err := wc.Writer.Write(p)
wc.n += int64(n)
return n, err
}
func (wc *diskWriteCloser) Close() error {
err := wc.file.Close()
if e := wc.onClose(wc.n); e != nil {
os.Remove(wc.file.Name())
return e
}
return err
} }
// Delete deletes the value of key. // Delete deletes the value of key.
@@ -186,24 +321,41 @@ func (d *DiskFS) Delete(key string) error {
return vfserror.ErrInvalidKey return vfserror.ErrInvalidKey
} }
_, err := d.Stat(key) // Sanitize key to prevent path traversal
if err != nil { key = filepath.Clean(key)
return err key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
if strings.Contains(key, "..") {
return vfserror.ErrInvalidKey
} }
keyMu := d.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
d.mu.Lock() d.mu.Lock()
defer d.mu.Unlock() fi, exists := d.info[key]
if !exists {
d.mu.Unlock()
return vfserror.ErrNotFound
}
d.size -= fi.Size()
d.LRU.Remove(key)
delete(d.info, key) delete(d.info, key)
if err := os.Remove(filepath.Join(d.root, key)); err != nil { d.mu.Unlock()
path := filepath.Join(d.root, key)
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
if err := os.Remove(path); err != nil {
return err return err
} }
diskSizeBytes.Set(float64(d.Size()))
return nil return nil
} }
// Get gets the value of key and returns it. // Open opens the file at key and returns it.
func (d *DiskFS) Get(key string) ([]byte, error) { func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
if key == "" { if key == "" {
return nil, vfserror.ErrInvalidKey return nil, vfserror.ErrInvalidKey
} }
@@ -211,20 +363,59 @@ func (d *DiskFS) Get(key string) ([]byte, error) {
return nil, vfserror.ErrInvalidKey return nil, vfserror.ErrInvalidKey
} }
_, err := d.Stat(key) // Sanitize key to prevent path traversal
if err != nil { key = filepath.Clean(key)
return nil, err key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
} }
keyMu := d.getKeyLock(key)
keyMu.RLock()
defer keyMu.RUnlock()
d.mu.Lock() d.mu.Lock()
defer d.mu.Unlock() fi, exists := d.info[key]
if !exists {
d.mu.Unlock()
return nil, vfserror.ErrNotFound
}
fi.ATime = time.Now()
d.LRU.MoveToFront(key)
d.mu.Unlock()
data, err := os.ReadFile(filepath.Join(d.root, key)) path := filepath.Join(d.root, key)
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
file, err := os.Open(path)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return data, nil // Update metrics on close
return &readCloser{
ReadCloser: file,
onClose: func(n int64) {
diskReadBytes.Add(float64(n))
},
}, nil
}
type readCloser struct {
io.ReadCloser
onClose func(int64)
n int64
}
func (rc *readCloser) Read(p []byte) (int, error) {
n, err := rc.ReadCloser.Read(p)
rc.n += int64(n)
return n, err
}
func (rc *readCloser) Close() error {
err := rc.ReadCloser.Close()
rc.onClose(rc.n)
return err
} }
// Stat returns the FileInfo of key. If key is not found in the cache, it will stat the file on disk. If the file is not found on disk, it will return vfs.ErrNotFound. // Stat returns the FileInfo of key. If key is not found in the cache, it will stat the file on disk. If the file is not found on disk, it will return vfs.ErrNotFound.
@@ -236,10 +427,19 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
return nil, vfserror.ErrInvalidKey return nil, vfserror.ErrInvalidKey
} }
logger.Logger.Debug().Str("name", key).Str("root", d.root).Msg("stat") // Sanitize key to prevent path traversal
key = filepath.Clean(key)
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
if strings.Contains(key, "..") {
return nil, vfserror.ErrInvalidKey
}
d.mu.Lock() keyMu := d.getKeyLock(key)
defer d.mu.Unlock() keyMu.RLock()
defer keyMu.RUnlock()
d.mu.RLock()
defer d.mu.RUnlock()
if fi, ok := d.info[key]; !ok { if fi, ok := d.info[key]; !ok {
return nil, vfserror.ErrNotFound return nil, vfserror.ErrNotFound
@@ -248,13 +448,13 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
} }
} }
func (m *DiskFS) StatAll() []*vfs.FileInfo { func (d *DiskFS) StatAll() []*vfs.FileInfo {
m.mu.Lock() d.mu.RLock()
defer m.mu.Unlock() defer d.mu.RUnlock()
// hard copy the file info to prevent modification of the original file info or the other way around // hard copy the file info to prevent modification of the original file info or the other way around
files := make([]*vfs.FileInfo, 0, len(m.info)) files := make([]*vfs.FileInfo, 0, len(d.info))
for _, v := range m.info { for _, v := range d.info {
fi := *v fi := *v
files = append(files, &fi) files = append(files, &fi)
} }

View File

@@ -1,145 +1,180 @@
// vfs/disk/disk_test.go
package disk package disk
import ( import (
"errors"
"fmt" "fmt"
"io"
"os" "os"
"path/filepath" "path/filepath"
"s1d3sw1ped/SteamCache2/vfs/vfserror" "s1d3sw1ped/SteamCache2/vfs/vfserror"
"testing" "testing"
) )
func TestAllDisk(t *testing.T) { func TestCreateAndOpen(t *testing.T) {
t.Parallel()
m := NewSkipInit(t.TempDir(), 1024) m := NewSkipInit(t.TempDir(), 1024)
if err := m.Set("key", []byte("value")); err != nil { key := "key"
t.Errorf("Set failed: %v", err) value := []byte("value")
}
if err := m.Set("key", []byte("value1")); err != nil { w, err := m.Create(key, int64(len(value)))
t.Errorf("Set failed: %v", err) if err != nil {
t.Fatalf("Create failed: %v", err)
} }
w.Write(value)
w.Close()
if d, err := m.Get("key"); err != nil { rc, err := m.Open(key)
t.Errorf("Get failed: %v", err) if err != nil {
} else if string(d) != "value1" { t.Fatalf("Open failed: %v", err)
t.Errorf("Get failed: got %s, want %s", d, "value1")
} }
got, _ := io.ReadAll(rc)
rc.Close()
if err := m.Delete("key"); err != nil { if string(got) != string(value) {
t.Errorf("Delete failed: %v", err) t.Fatalf("expected %s, got %s", value, got)
}
if _, err := m.Get("key"); err == nil {
t.Errorf("Get failed: got nil, want %v", vfserror.ErrNotFound)
}
if err := m.Delete("key"); err == nil {
t.Errorf("Delete failed: got nil, want %v", vfserror.ErrNotFound)
}
if _, err := m.Stat("key"); err == nil {
t.Errorf("Stat failed: got nil, want %v", vfserror.ErrNotFound)
}
if err := m.Set("key", []byte("value")); err != nil {
t.Errorf("Set failed: %v", err)
}
if _, err := m.Stat("key"); err != nil {
t.Errorf("Stat failed: %v", err)
} }
} }
func TestLimited(t *testing.T) { func TestOverwrite(t *testing.T) {
t.Parallel() m := NewSkipInit(t.TempDir(), 1024)
key := "key"
value1 := []byte("value1")
value2 := []byte("value2")
w, err := m.Create(key, int64(len(value1)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value1)
w.Close()
w, err = m.Create(key, int64(len(value2)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value2)
w.Close()
rc, err := m.Open(key)
if err != nil {
t.Fatalf("Open failed: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value2) {
t.Fatalf("expected %s, got %s", value2, got)
}
}
func TestDelete(t *testing.T) {
m := NewSkipInit(t.TempDir(), 1024)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value)
w.Close()
if err := m.Delete(key); err != nil {
t.Fatalf("Delete failed: %v", err)
}
_, err = m.Open(key)
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
}
func TestCapacityLimit(t *testing.T) {
m := NewSkipInit(t.TempDir(), 10) m := NewSkipInit(t.TempDir(), 10)
for i := 0; i < 11; i++ { for i := 0; i < 11; i++ {
if err := m.Set(fmt.Sprintf("key%d", i), []byte("1")); err != nil && i < 10 { w, err := m.Create(fmt.Sprintf("key%d", i), 1)
t.Errorf("Set failed: %v", err) if err != nil && i < 10 {
t.Errorf("Create failed: %v", err)
} else if i == 10 && err == nil { } else if i == 10 && err == nil {
t.Errorf("Set succeeded: got nil, want %v", vfserror.ErrDiskFull) t.Errorf("Create succeeded: got nil, want %v", vfserror.ErrDiskFull)
}
if i < 10 {
w.Write([]byte("1"))
w.Close()
} }
} }
} }
func TestInit(t *testing.T) { func TestInitExistingFiles(t *testing.T) {
t.Parallel()
td := t.TempDir() td := t.TempDir()
path := filepath.Join(td, "test", "key") path := filepath.Join(td, "test", "key")
os.MkdirAll(filepath.Dir(path), 0755) os.MkdirAll(filepath.Dir(path), 0755)
os.WriteFile(path, []byte("value"), 0644) os.WriteFile(path, []byte("value"), 0644)
m := New(td, 10) m := New(td, 10)
if _, err := m.Get("test/key"); err != nil { rc, err := m.Open("test/key")
t.Errorf("Get failed: %v", err) if err != nil {
t.Fatalf("Open failed: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != "value" {
t.Errorf("expected value, got %s", got)
} }
s, _ := m.Stat("test/key") s, err := m.Stat("test/key")
if s.Name() != "test/key" { if err != nil {
t.Errorf("Stat failed: got %s, want %s", s.Name(), "key") t.Fatalf("Stat failed: %v", err)
}
if s == nil {
t.Error("Stat returned nil")
}
if s != nil && s.Name() != "test/key" {
t.Errorf("Stat failed: got %s, want %s", s.Name(), "test/key")
} }
} }
func TestDiskSizeDiscrepancy(t *testing.T) { func TestSizeConsistency(t *testing.T) {
t.Parallel()
td := t.TempDir() td := t.TempDir()
assumedSize := int64(6 + 5 + 6) // 6 + 5 + 6 bytes for key, key1, key2
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644) os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
m := New(td, 1024) m := New(td, 1024)
if 6 != m.Size() { if m.Size() != 6 {
t.Errorf("Size failed: got %d, want %d", m.Size(), 6) t.Errorf("Size failed: got %d, want 6", m.Size())
} }
if err := m.Set("key", []byte("value")); err != nil { w, err := m.Create("key", 5)
t.Errorf("Set failed: %v", err) if err != nil {
t.Errorf("Create failed: %v", err)
} }
w.Write([]byte("value"))
w.Close()
if err := m.Set("key1", []byte("value1")); err != nil { w, err = m.Create("key1", 6)
t.Errorf("Set failed: %v", err) if err != nil {
t.Errorf("Create failed: %v", err)
} }
w.Write([]byte("value1"))
w.Close()
assumedSize := int64(6 + 5 + 6)
if assumedSize != m.Size() { if assumedSize != m.Size() {
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize) t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
} }
if d, err := m.Get("key"); err != nil { rc, err := m.Open("key")
t.Errorf("Get failed: %v", err) if err != nil {
} else if string(d) != "value" { t.Errorf("Open failed: %v", err)
t.Errorf("Get failed: got %s, want %s", d, "value")
} }
d, _ := io.ReadAll(rc)
if d, err := m.Get("key1"); err != nil { rc.Close()
t.Errorf("Get failed: %v", err) if string(d) != "value" {
} else if string(d) != "value1" { t.Errorf("Get failed: got %s, want value", d)
t.Errorf("Get failed: got %s, want %s", d, "value1")
} }
m = New(td, 1024) m = New(td, 1024)
if assumedSize != m.Size() {
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
}
if d, err := m.Get("key"); err != nil {
t.Errorf("Get failed: %v", err)
} else if string(d) != "value" {
t.Errorf("Get failed: got %s, want %s", d, "value")
}
if d, err := m.Get("key1"); err != nil {
t.Errorf("Get failed: %v", err)
} else if string(d) != "value1" {
t.Errorf("Get failed: got %s, want %s", d, "value1")
}
if assumedSize != m.Size() { if assumedSize != m.Size() {
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize) t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
} }

View File

@@ -1,3 +1,4 @@
// vfs/fileinfo.go
package vfs package vfs
import ( import (

View File

@@ -1,84 +1,108 @@
// vfs/gc/gc.go
package gc package gc
import ( import (
"fmt" "fmt"
"io"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs" "s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/cachestate"
"s1d3sw1ped/SteamCache2/vfs/disk"
"s1d3sw1ped/SteamCache2/vfs/memory"
"s1d3sw1ped/SteamCache2/vfs/vfserror" "s1d3sw1ped/SteamCache2/vfs/vfserror"
"sync"
"time" "time"
) )
var (
// ErrInsufficientSpace is returned when there are no files to delete in the VFS.
ErrInsufficientSpace = fmt.Errorf("no files to delete")
)
// LRUGC deletes files in LRU order until enough space is reclaimed.
func LRUGC(vfss vfs.VFS, size uint) error {
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LRU GC")
var reclaimed uint // reclaimed space in bytes
for {
switch fs := vfss.(type) {
case *disk.DiskFS:
fi := fs.LRU.Back()
if fi == nil {
return ErrInsufficientSpace // No files to delete
}
sz := uint(fi.Size())
err := fs.Delete(fi.Name())
if err != nil {
continue // If delete fails, try the next file
}
reclaimed += sz
case *memory.MemoryFS:
fi := fs.LRU.Back()
if fi == nil {
return ErrInsufficientSpace // No files to delete
}
sz := uint(fi.Size())
err := fs.Delete(fi.Name())
if err != nil {
continue // If delete fails, try the next file
}
reclaimed += sz
default:
panic("unreachable: unsupported VFS type for LRU GC") // panic if the VFS is not disk or memory
}
if reclaimed >= size {
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC")
return nil // stop if enough space is reclaimed
}
}
}
func PromotionDecider(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
return time.Since(fi.AccessTime()) < time.Second*60 // Put hot files in the fast vfs if equipped
}
// Ensure GCFS implements VFS. // Ensure GCFS implements VFS.
var _ vfs.VFS = (*GCFS)(nil) var _ vfs.VFS = (*GCFS)(nil)
// GCFS is a virtual file system that calls a GC handler when the disk is full. The GC handler is responsible for freeing up space on the disk. The GCFS is a wrapper around another VFS. // GCFS is a virtual file system that calls a GC handler when the disk is full. The GC handler is responsible for freeing up space on the disk. The GCFS is a wrapper around another VFS.
type GCFS struct { type GCFS struct {
vfs.VFS vfs.VFS
multiplier int
// protected by mu
gcHanderFunc GCHandlerFunc gcHanderFunc GCHandlerFunc
lifetimeBytes, lifetimeFiles uint
reclaimedBytes, deletedFiles uint
gcTime time.Duration
mu sync.Mutex
} }
// GCHandlerFunc is a function that is called when the disk is full and the GCFS needs to free up space. It is passed the VFS and the size of the file that needs to be written. Its up to the implementation to free up space. How much space is freed is also up to the implementation. // GCHandlerFunc is a function that is called when the disk is full and the GCFS needs to free up space. It is passed the VFS and the size of the file that needs to be written. Its up to the implementation to free up space. How much space is freed is also up to the implementation.
type GCHandlerFunc func(vfs vfs.VFS, size uint) (reclaimedBytes uint, deletedFiles uint) type GCHandlerFunc func(vfs vfs.VFS, size uint) error
func New(vfs vfs.VFS, multiplier int, gcHandlerFunc GCHandlerFunc) *GCFS { func New(vfs vfs.VFS, gcHandlerFunc GCHandlerFunc) *GCFS {
if multiplier <= 0 {
multiplier = 1 // if the multiplier is less than or equal to 0 set it to 1 will be slow but the user can set it to a higher value if they want
}
return &GCFS{ return &GCFS{
VFS: vfs, VFS: vfs,
multiplier: multiplier,
gcHanderFunc: gcHandlerFunc, gcHanderFunc: gcHandlerFunc,
} }
} }
// Stats returns the lifetime bytes, lifetime files, reclaimed bytes and deleted files. // Create overrides the Create method of the VFS interface. It tries to create the key, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error.
// The lifetime bytes and lifetime files are the total bytes and files that have been freed up by the GC handler. func (g *GCFS) Create(key string, size int64) (io.WriteCloser, error) {
// The reclaimed bytes and deleted files are the bytes and files that have been freed up by the GC handler since last call to Stats. w, err := g.VFS.Create(key, size) // try to create the key
// The gc time is the total time spent in the GC handler since last call to Stats. for err == vfserror.ErrDiskFull && g.gcHanderFunc != nil { // if the error is disk full and there is a GC handler
// The reclaimed bytes and deleted files and gc time are reset to 0 after the call to Stats. errr := g.gcHanderFunc(g.VFS, uint(size)) // call the GC handler
func (g *GCFS) Stats() (lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles uint, gcTime time.Duration) { if errr == ErrInsufficientSpace {
g.mu.Lock() return nil, errr // if the GC handler returns no files to delete, return the error
defer g.mu.Unlock() }
w, err = g.VFS.Create(key, size)
g.lifetimeBytes += g.reclaimedBytes
g.lifetimeFiles += g.deletedFiles
lifetimeBytes = g.lifetimeBytes
lifetimeFiles = g.lifetimeFiles
reclaimedBytes = g.reclaimedBytes
deletedFiles = g.deletedFiles
gcTime = g.gcTime
g.reclaimedBytes = 0
g.deletedFiles = 0
g.gcTime = time.Duration(0)
return
} }
// Set overrides the Set method of the VFS interface. It tries to set the key and src, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error. if err != nil {
func (g *GCFS) Set(key string, src []byte) error { if err == vfserror.ErrDiskFull {
g.mu.Lock() logger.Logger.Error().Str("key", key).Int64("size", size).Msg("Failed to create file due to disk full, even after GC")
defer g.mu.Unlock() } else {
err := g.VFS.Set(key, src) // try to set the key and src logger.Logger.Error().Str("key", key).Int64("size", size).Err(err).Msg("Failed to create file")
}
if err == vfserror.ErrDiskFull && g.gcHanderFunc != nil { // if the error is disk full and there is a GC handler
tstart := time.Now()
reclaimedBytes, deletedFiles := g.gcHanderFunc(g.VFS, uint(len(src)*g.multiplier)) // call the GC handler
g.gcTime += time.Since(tstart)
g.reclaimedBytes += reclaimedBytes
g.deletedFiles += deletedFiles
err = g.VFS.Set(key, src) // try again after GC if it still fails return the error
} }
return err return w, err
} }
func (g *GCFS) Name() string { func (g *GCFS) Name() string {

View File

@@ -1,105 +1,72 @@
// vfs/gc/gc_test.go
package gc package gc
import ( import (
"errors"
"fmt" "fmt"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/memory" "s1d3sw1ped/SteamCache2/vfs/memory"
"sort"
"testing" "testing"
"golang.org/x/exp/rand"
) )
func TestGCSmallRandom(t *testing.T) { func TestGCOnFull(t *testing.T) {
t.Parallel() m := memory.New(10)
gc := New(m, LRUGC)
m := memory.New(1024 * 1024 * 16) for i := 0; i < 5; i++ {
gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) { w, err := gc.Create(fmt.Sprintf("key%d", i), 2)
deletions := 0
var reclaimed uint
t.Logf("GC starting to reclaim %d bytes", size)
stats := vfs.StatAll()
sort.Slice(stats, func(i, j int) bool {
// Sort by access time so we can remove the oldest files first.
return stats[i].AccessTime().Before(stats[j].AccessTime())
})
// Delete the oldest files until we've reclaimed enough space.
for _, s := range stats {
sz := uint(s.Size()) // Get the size of the file
err := vfs.Delete(s.Name())
if err != nil { if err != nil {
panic(err) t.Fatalf("Create failed: %v", err)
} }
reclaimed += sz // Track how much space we've reclaimed w.Write([]byte("ab"))
deletions++ // Track how many files we've deleted w.Close()
// t.Logf("GC deleting %s, %v", s.Name(), s.AccessTime().Format(time.RFC3339Nano))
if reclaimed >= size { // We've reclaimed enough space
break
} }
}
return uint(reclaimed), uint(deletions)
})
for i := 0; i < 10000; i++ { // Cache full at 10 bytes
if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024*1, 1024*4)); err != nil { w, err := gc.Create("key5", 2)
t.Errorf("Set failed: %v", err) if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write([]byte("cd"))
w.Close()
if gc.Size() > 10 {
t.Errorf("Size exceeded: %d > 10", gc.Size())
}
// Check if older keys were evicted
_, err = m.Open("key0")
if err == nil {
t.Error("Expected key0 to be evicted")
} }
} }
if gc.Size() > 1024*1024*16 { func TestNoGCNeeded(t *testing.T) {
t.Errorf("MemoryFS size is %d, want <= 1024", m.Size()) m := memory.New(20)
gc := New(m, LRUGC)
for i := 0; i < 5; i++ {
w, err := gc.Create(fmt.Sprintf("key%d", i), 2)
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write([]byte("ab"))
w.Close()
}
if gc.Size() != 10 {
t.Errorf("Size: got %d, want 10", gc.Size())
} }
} }
func genRandomData(min int, max int) []byte { func TestGCInsufficientSpace(t *testing.T) {
data := make([]byte, rand.Intn(max-min)+min) m := memory.New(5)
rand.Read(data) gc := New(m, LRUGC)
return data
}
func TestGCLargeRandom(t *testing.T) { w, err := gc.Create("key0", 10)
t.Parallel() if err == nil {
w.Close()
m := memory.New(1024 * 1024 * 16) // 16MB t.Error("Expected ErrDiskFull")
gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) { } else if !errors.Is(err, ErrInsufficientSpace) {
deletions := 0 t.Errorf("Unexpected error: %v", err)
var reclaimed uint
t.Logf("GC starting to reclaim %d bytes", size)
stats := vfs.StatAll()
sort.Slice(stats, func(i, j int) bool {
// Sort by access time so we can remove the oldest files first.
return stats[i].AccessTime().Before(stats[j].AccessTime())
})
// Delete the oldest files until we've reclaimed enough space.
for _, s := range stats {
sz := uint(s.Size()) // Get the size of the file
vfs.Delete(s.Name())
reclaimed += sz // Track how much space we've reclaimed
deletions++ // Track how many files we've deleted
if reclaimed >= size { // We've reclaimed enough space
break
}
}
return uint(reclaimed), uint(deletions)
})
for i := 0; i < 10000; i++ {
if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024, 1024*1024)); err != nil {
t.Errorf("Set failed: %v", err)
}
}
if gc.Size() > 1024*1024*16 {
t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
} }
} }

View File

@@ -1,10 +1,49 @@
// vfs/memory/memory.go
package memory package memory
import ( import (
"bytes"
"container/list"
"io"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs" "s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/vfserror" "s1d3sw1ped/SteamCache2/vfs/vfserror"
"sync" "sync"
"time" "time"
"github.com/docker/go-units"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
memoryCapacityBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "memory_cache_capacity_bytes",
Help: "Total capacity of the memory cache in bytes",
},
)
memorySizeBytes = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "memory_cache_size_bytes",
Help: "Total size of the memory cache in bytes",
},
)
memoryReadBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "memory_cache_read_bytes_total",
Help: "Total number of bytes read from the memory cache",
},
)
memoryWriteBytes = promauto.NewCounter(
prometheus.CounterOpts{
Name: "memory_cache_write_bytes_total",
Help: "Total number of bytes written to the memory cache",
},
)
) )
// Ensure MemoryFS implements VFS. // Ensure MemoryFS implements VFS.
@@ -20,7 +59,49 @@ type file struct {
type MemoryFS struct { type MemoryFS struct {
files map[string]*file files map[string]*file
capacity int64 capacity int64
mu sync.Mutex size int64
mu sync.RWMutex
keyLocks sync.Map // map[string]*sync.RWMutex
LRU *lruList
}
// lruList for LRU eviction
type lruList struct {
list *list.List
elem map[string]*list.Element
}
func newLruList() *lruList {
return &lruList{
list: list.New(),
elem: make(map[string]*list.Element),
}
}
func (l *lruList) MoveToFront(key string) {
if e, ok := l.elem[key]; ok {
l.list.MoveToFront(e)
}
}
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
e := l.list.PushFront(fi)
l.elem[key] = e
return e
}
func (l *lruList) Remove(key string) {
if e, ok := l.elem[key]; ok {
l.list.Remove(e)
delete(l.elem, key)
}
}
func (l *lruList) Back() *vfs.FileInfo {
if e := l.list.Back(); e != nil {
return e.Value.(*vfs.FileInfo)
}
return nil
} }
// New creates a new MemoryFS. // New creates a new MemoryFS.
@@ -29,11 +110,23 @@ func New(capacity int64) *MemoryFS {
panic("memory capacity must be greater than 0") // panic if the capacity is less than or equal to 0 panic("memory capacity must be greater than 0") // panic if the capacity is less than or equal to 0
} }
return &MemoryFS{ logger.Logger.Info().
Str("name", "MemoryFS").
Str("capacity", units.HumanSize(float64(capacity))).
Msg("init")
mfs := &MemoryFS{
files: make(map[string]*file), files: make(map[string]*file),
capacity: capacity, capacity: capacity,
mu: sync.Mutex{}, mu: sync.RWMutex{},
keyLocks: sync.Map{},
LRU: newLruList(),
} }
memoryCapacityBytes.Set(float64(capacity))
memorySizeBytes.Set(float64(mfs.Size()))
return mfs
} }
func (m *MemoryFS) Capacity() int64 { func (m *MemoryFS) Capacity() int64 {
@@ -45,73 +138,118 @@ func (m *MemoryFS) Name() string {
} }
func (m *MemoryFS) Size() int64 { func (m *MemoryFS) Size() int64 {
var size int64 m.mu.RLock()
defer m.mu.RUnlock()
m.mu.Lock() return m.size
defer m.mu.Unlock()
for _, v := range m.files {
size += int64(len(v.data))
} }
return size func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
mu, _ := m.keyLocks.LoadOrStore(key, &sync.RWMutex{})
return mu.(*sync.RWMutex)
} }
func (m *MemoryFS) Set(key string, src []byte) error { func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
m.mu.RLock()
if m.capacity > 0 { if m.capacity > 0 {
if size := m.Size() + int64(len(src)); size > m.capacity { if m.size+size > m.capacity {
return vfserror.ErrDiskFull m.mu.RUnlock()
return nil, vfserror.ErrDiskFull
} }
} }
m.mu.RUnlock()
keyMu := m.getKeyLock(key)
keyMu.Lock()
defer keyMu.Unlock()
buf := &bytes.Buffer{}
return &memWriteCloser{
Writer: buf,
onClose: func() error {
data := buf.Bytes()
m.mu.Lock() m.mu.Lock()
defer m.mu.Unlock() if f, exists := m.files[key]; exists {
m.size -= int64(len(f.data))
m.files[key] = &file{ m.LRU.Remove(key)
fileinfo: vfs.NewFileInfo(
key,
int64(len(src)),
time.Now(),
),
data: src,
} }
fi := vfs.NewFileInfo(key, int64(len(data)), time.Now())
m.files[key] = &file{
fileinfo: fi,
data: data,
}
m.LRU.Add(key, fi)
m.size += int64(len(data))
m.mu.Unlock()
memoryWriteBytes.Add(float64(len(data)))
memorySizeBytes.Set(float64(m.Size()))
return nil return nil
},
}, nil
}
type memWriteCloser struct {
io.Writer
onClose func() error
}
func (wc *memWriteCloser) Close() error {
return wc.onClose()
} }
func (m *MemoryFS) Delete(key string) error { func (m *MemoryFS) Delete(key string) error {
_, err := m.Stat(key) keyMu := m.getKeyLock(key)
if err != nil { keyMu.Lock()
return err defer keyMu.Unlock()
}
m.mu.Lock() m.mu.Lock()
defer m.mu.Unlock() f, exists := m.files[key]
if !exists {
m.mu.Unlock()
return vfserror.ErrNotFound
}
m.size -= int64(len(f.data))
m.LRU.Remove(key)
delete(m.files, key) delete(m.files, key)
m.mu.Unlock()
memorySizeBytes.Set(float64(m.Size()))
return nil return nil
} }
func (m *MemoryFS) Get(key string) ([]byte, error) { func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
_, err := m.Stat(key) keyMu := m.getKeyLock(key)
if err != nil { keyMu.RLock()
return nil, err defer keyMu.RUnlock()
}
m.mu.Lock() m.mu.Lock()
defer m.mu.Unlock() f, exists := m.files[key]
if !exists {
m.mu.Unlock()
return nil, vfserror.ErrNotFound
}
f.fileinfo.ATime = time.Now()
m.LRU.MoveToFront(key)
dataCopy := make([]byte, len(f.data))
copy(dataCopy, f.data)
m.mu.Unlock()
m.files[key].fileinfo.ATime = time.Now() memoryReadBytes.Add(float64(len(dataCopy)))
dst := make([]byte, len(m.files[key].data)) memorySizeBytes.Set(float64(m.Size()))
copy(dst, m.files[key].data)
return dst, nil return io.NopCloser(bytes.NewReader(dataCopy)), nil
} }
func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) { func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
m.mu.Lock() keyMu := m.getKeyLock(key)
defer m.mu.Unlock() keyMu.RLock()
defer keyMu.RUnlock()
m.mu.RLock()
defer m.mu.RUnlock()
f, ok := m.files[key] f, ok := m.files[key]
if !ok { if !ok {
@@ -122,8 +260,8 @@ func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
} }
func (m *MemoryFS) StatAll() []*vfs.FileInfo { func (m *MemoryFS) StatAll() []*vfs.FileInfo {
m.mu.Lock() m.mu.RLock()
defer m.mu.Unlock() defer m.mu.RUnlock()
// hard copy the file info to prevent modification of the original file info or the other way around // hard copy the file info to prevent modification of the original file info or the other way around
files := make([]*vfs.FileInfo, 0, len(m.files)) files := make([]*vfs.FileInfo, 0, len(m.files))

View File

@@ -1,63 +1,129 @@
// vfs/memory/memory_test.go
package memory package memory
import ( import (
"errors"
"fmt" "fmt"
"io"
"s1d3sw1ped/SteamCache2/vfs/vfserror" "s1d3sw1ped/SteamCache2/vfs/vfserror"
"testing" "testing"
) )
func TestAllMemory(t *testing.T) { func TestCreateAndOpen(t *testing.T) {
t.Parallel()
m := New(1024) m := New(1024)
if err := m.Set("key", []byte("value")); err != nil { key := "key"
t.Errorf("Set failed: %v", err) value := []byte("value")
}
if err := m.Set("key", []byte("value1")); err != nil { w, err := m.Create(key, int64(len(value)))
t.Errorf("Set failed: %v", err) if err != nil {
t.Fatalf("Create failed: %v", err)
} }
w.Write(value)
w.Close()
if d, err := m.Get("key"); err != nil { rc, err := m.Open(key)
t.Errorf("Get failed: %v", err) if err != nil {
} else if string(d) != "value1" { t.Fatalf("Open failed: %v", err)
t.Errorf("Get failed: got %s, want %s", d, "value1")
} }
got, _ := io.ReadAll(rc)
rc.Close()
if err := m.Delete("key"); err != nil { if string(got) != string(value) {
t.Errorf("Delete failed: %v", err) t.Fatalf("expected %s, got %s", value, got)
}
if _, err := m.Get("key"); err == nil {
t.Errorf("Get failed: got nil, want %v", vfserror.ErrNotFound)
}
if err := m.Delete("key"); err == nil {
t.Errorf("Delete failed: got nil, want %v", vfserror.ErrNotFound)
}
if _, err := m.Stat("key"); err == nil {
t.Errorf("Stat failed: got nil, want %v", vfserror.ErrNotFound)
}
if err := m.Set("key", []byte("value")); err != nil {
t.Errorf("Set failed: %v", err)
}
if _, err := m.Stat("key"); err != nil {
t.Errorf("Stat failed: %v", err)
} }
} }
func TestLimited(t *testing.T) { func TestOverwrite(t *testing.T) {
t.Parallel() m := New(1024)
key := "key"
value1 := []byte("value1")
value2 := []byte("value2")
w, err := m.Create(key, int64(len(value1)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value1)
w.Close()
w, err = m.Create(key, int64(len(value2)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value2)
w.Close()
rc, err := m.Open(key)
if err != nil {
t.Fatalf("Open failed: %v", err)
}
got, _ := io.ReadAll(rc)
rc.Close()
if string(got) != string(value2) {
t.Fatalf("expected %s, got %s", value2, got)
}
}
func TestDelete(t *testing.T) {
m := New(1024)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value)
w.Close()
if err := m.Delete(key); err != nil {
t.Fatalf("Delete failed: %v", err)
}
_, err = m.Open(key)
if !errors.Is(err, vfserror.ErrNotFound) {
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
}
}
func TestCapacityLimit(t *testing.T) {
m := New(10) m := New(10)
for i := 0; i < 11; i++ { for i := 0; i < 11; i++ {
if err := m.Set(fmt.Sprintf("key%d", i), []byte("1")); err != nil && i < 10 { w, err := m.Create(fmt.Sprintf("key%d", i), 1)
t.Errorf("Set failed: %v", err) if err != nil && i < 10 {
t.Errorf("Create failed: %v", err)
} else if i == 10 && err == nil { } else if i == 10 && err == nil {
t.Errorf("Set succeeded: got nil, want %v", vfserror.ErrDiskFull) t.Errorf("Create succeeded: got nil, want %v", vfserror.ErrDiskFull)
}
if i < 10 {
w.Write([]byte("1"))
w.Close()
} }
} }
} }
func TestStat(t *testing.T) {
m := New(1024)
key := "key"
value := []byte("value")
w, err := m.Create(key, int64(len(value)))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
w.Write(value)
w.Close()
info, err := m.Stat(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if info == nil {
t.Fatal("expected file info to be non-nil")
}
if info.Size() != int64(len(value)) {
t.Errorf("expected size %d, got %d", len(value), info.Size())
}
}

View File

@@ -1,76 +0,0 @@
package sync
import (
"fmt"
"s1d3sw1ped/SteamCache2/vfs"
"sync"
)
// Ensure SyncFS implements VFS.
var _ vfs.VFS = (*SyncFS)(nil)
type SyncFS struct {
vfs vfs.VFS
mu sync.RWMutex
}
func New(vfs vfs.VFS) *SyncFS {
return &SyncFS{
vfs: vfs,
mu: sync.RWMutex{},
}
}
// Name returns the name of the file system.
func (sfs *SyncFS) Name() string {
return fmt.Sprintf("SyncFS(%s)", sfs.vfs.Name())
}
// Size returns the total size of all files in the file system.
func (sfs *SyncFS) Size() int64 {
sfs.mu.RLock()
defer sfs.mu.RUnlock()
return sfs.vfs.Size()
}
// Set sets the value of key as src.
// Setting the same key multiple times, the last set call takes effect.
func (sfs *SyncFS) Set(key string, src []byte) error {
sfs.mu.Lock()
defer sfs.mu.Unlock()
return sfs.vfs.Set(key, src)
}
// Delete deletes the value of key.
func (sfs *SyncFS) Delete(key string) error {
sfs.mu.Lock()
defer sfs.mu.Unlock()
return sfs.vfs.Delete(key)
}
// Get gets the value of key to dst, and returns dst no matter whether or not there is an error.
func (sfs *SyncFS) Get(key string) ([]byte, error) {
sfs.mu.RLock()
defer sfs.mu.RUnlock()
return sfs.vfs.Get(key)
}
// Stat returns the FileInfo of key.
func (sfs *SyncFS) Stat(key string) (*vfs.FileInfo, error) {
sfs.mu.RLock()
defer sfs.mu.RUnlock()
return sfs.vfs.Stat(key)
}
// StatAll returns the FileInfo of all keys.
func (sfs *SyncFS) StatAll() []*vfs.FileInfo {
sfs.mu.RLock()
defer sfs.mu.RUnlock()
return sfs.vfs.StatAll()
}

View File

@@ -1,5 +1,8 @@
// vfs/vfs.go
package vfs package vfs
import "io"
// VFS is the interface that wraps the basic methods of a virtual file system. // VFS is the interface that wraps the basic methods of a virtual file system.
type VFS interface { type VFS interface {
// Name returns the name of the file system. // Name returns the name of the file system.
@@ -8,15 +11,14 @@ type VFS interface {
// Size returns the total size of all files in the file system. // Size returns the total size of all files in the file system.
Size() int64 Size() int64
// Set sets the value of key as src. // Create creates a new file at key with expected size.
// Setting the same key multiple times, the last set call takes effect. Create(key string, size int64) (io.WriteCloser, error)
Set(key string, src []byte) error
// Delete deletes the value of key. // Delete deletes the value of key.
Delete(key string) error Delete(key string) error
// Get gets the value of key to dst, and returns dst no matter whether or not there is an error. // Open opens the file at key.
Get(key string) ([]byte, error) Open(key string) (io.ReadCloser, error)
// Stat returns the FileInfo of key. // Stat returns the FileInfo of key.
Stat(key string) (*FileInfo, error) Stat(key string) (*FileInfo, error)

View File

@@ -1,3 +1,4 @@
// vfs/vfserror/vfserror.go
package vfserror package vfserror
import "errors" import "errors"