Compare commits
27 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| ae013f9a3b | |||
| d94b53c395 | |||
| 847931ed43 | |||
| 4387236d22 | |||
| f6ce004922 | |||
| 8e487876d2 | |||
| 1be7f5bd20 | |||
| f237b89ca7 | |||
| ae07239021 | |||
| 4876998f5d | |||
| 163e64790c | |||
| 00792d87a5 | |||
| 3427b8f5bc | |||
| 7f744d04b0 | |||
| 6c98d03ae7 | |||
| 17ff507c89 | |||
| 539f14e8ec | |||
| 1673e9554a | |||
| b83836f914 | |||
| 745856f0f4 | |||
| b4d2b1305e | |||
| 0d263be2ca | |||
| 63a1c21861 | |||
| 0a73e46f90 | |||
| 6f1158edeb | |||
| 93b682cfa5 | |||
| f378d0e81f |
@@ -8,14 +8,14 @@ jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@main
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- run: git fetch --force --tags
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@main
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- uses: goreleaser/goreleaser-action@v6
|
||||
- uses: goreleaser/goreleaser-action@master
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: 'latest'
|
||||
|
||||
@@ -6,14 +6,10 @@ jobs:
|
||||
check-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@main
|
||||
- uses: actions/setup-go@main
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- run: go mod tidy
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
args: -D errcheck
|
||||
version: latest
|
||||
- run: go build ./...
|
||||
- run: go test -race -v -shuffle=on ./...
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,3 +1,5 @@
|
||||
dist/
|
||||
tmp/
|
||||
__*.exe
|
||||
*.exe
|
||||
.smashed.txt
|
||||
.smashignore
|
||||
|
||||
@@ -2,11 +2,17 @@ version: 2
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go mod tidy -v
|
||||
|
||||
builds:
|
||||
- ldflags:
|
||||
- id: default
|
||||
binary: steamcache2
|
||||
ldflags:
|
||||
- -s
|
||||
- -w
|
||||
- -extldflags "-static"
|
||||
- -X s1d3sw1ped/SteamCache2/version.Version={{.Version}}
|
||||
- -X s1d3sw1ped/SteamCache2/version.Date={{.Date}}
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
@@ -14,19 +20,24 @@ builds:
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
ignore:
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
|
||||
archives:
|
||||
- format: tar.gz
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
{{- title .Os }}_
|
||||
{{- if eq .Arch "amd64" }}x86_64
|
||||
{{- else if eq .Arch "386" }}i386
|
||||
{{- else }}{{ .Arch }}{{ end }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
- id: default
|
||||
name_template: "{{ .ProjectName }}-{{ .Os }}-{{ .Arch }}"
|
||||
formats: tar.gz
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
formats: zip
|
||||
files:
|
||||
- README.md
|
||||
- LICENSE
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
@@ -36,12 +47,7 @@ changelog:
|
||||
- "^test:"
|
||||
|
||||
release:
|
||||
name_template: '{{.ProjectName}}-{{.Version}}'
|
||||
footer: >-
|
||||
|
||||
---
|
||||
|
||||
Released by [GoReleaser](https://github.com/goreleaser/goreleaser).
|
||||
name_template: "{{ .ProjectName }}-{{ .Version }}"
|
||||
|
||||
gitea_urls:
|
||||
api: https://git.s1d3sw1ped.com/api/v1
|
||||
|
||||
23
.vscode/launch.json
vendored
23
.vscode/launch.json
vendored
@@ -17,7 +17,14 @@
|
||||
"10G",
|
||||
"--disk-path",
|
||||
"tmp/disk",
|
||||
"--verbose",
|
||||
"--memory-gc",
|
||||
"lfu",
|
||||
"--disk-gc",
|
||||
"lru",
|
||||
"--log-level",
|
||||
"debug",
|
||||
"--upstream",
|
||||
"http://192.168.2.5:80",
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -31,7 +38,12 @@
|
||||
"10G",
|
||||
"--disk-path",
|
||||
"tmp/disk",
|
||||
"--verbose",
|
||||
"--disk-gc",
|
||||
"hybrid",
|
||||
"--log-level",
|
||||
"debug",
|
||||
"--upstream",
|
||||
"http://192.168.2.5:80",
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -43,7 +55,12 @@
|
||||
"args": [
|
||||
"--memory",
|
||||
"1G",
|
||||
"--verbose",
|
||||
"--memory-gc",
|
||||
"lfu",
|
||||
"--log-level",
|
||||
"debug",
|
||||
"--upstream",
|
||||
"http://192.168.2.5:80",
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
32
README.md
32
README.md
@@ -17,6 +17,38 @@ SteamCache2 is a blazing fast download cache for Steam, designed to reduce bandw
|
||||
```sh
|
||||
./SteamCache2 --memory 1G --disk 10G --disk-path tmp/disk
|
||||
```
|
||||
|
||||
### Advanced Configuration
|
||||
|
||||
#### Garbage Collection Algorithms
|
||||
|
||||
SteamCache2 supports multiple garbage collection algorithms for both memory and disk caches:
|
||||
|
||||
```sh
|
||||
# Use LFU for memory cache (good for long-running servers)
|
||||
./SteamCache2 --memory 4G --memory-gc lfu --disk 100G --disk-gc lru
|
||||
|
||||
# Use FIFO for predictable eviction (good for testing)
|
||||
./SteamCache2 --memory 2G --memory-gc fifo --disk 50G --disk-gc fifo
|
||||
|
||||
# Use size-based eviction for disk cache
|
||||
./SteamCache2 --memory 1G --disk 200G --disk-gc largest
|
||||
```
|
||||
|
||||
**Available GC Algorithms:**
|
||||
|
||||
- **`lru`** (default): Least Recently Used - evicts oldest accessed files
|
||||
- **`lfu`**: Least Frequently Used - evicts least accessed files (good for popular content)
|
||||
- **`fifo`**: First In, First Out - evicts oldest created files (predictable)
|
||||
- **`largest`**: Size-based - evicts largest files first (maximizes file count)
|
||||
- **`smallest`**: Size-based - evicts smallest files first (maximizes cache hit rate)
|
||||
- **`hybrid`**: Combines access time and file size for optimal eviction
|
||||
|
||||
**Use Cases:**
|
||||
- **LAN Events**: Use `lfu` for memory caches to keep popular games
|
||||
- **Gaming Cafes**: Use `hybrid` for balanced performance
|
||||
- **Testing**: Use `fifo` for predictable behavior
|
||||
- **Large Files**: Use `largest` to prioritize keeping many small files
|
||||
2. Configure your DNS:
|
||||
- If your on Windows and don't want a whole network implementation (THIS)[#windows-hosts-file-override]
|
||||
|
||||
|
||||
78
cmd/root.go
78
cmd/root.go
@@ -1,23 +1,30 @@
|
||||
// cmd/root.go
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"s1d3sw1ped/SteamCache2/steamcache"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/version"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
memory string
|
||||
memorymultiplier int
|
||||
disk string
|
||||
diskmultiplier int
|
||||
diskpath string
|
||||
upstream string
|
||||
threads int
|
||||
|
||||
pprof bool
|
||||
verbose bool
|
||||
memory string
|
||||
disk string
|
||||
diskpath string
|
||||
upstream string
|
||||
|
||||
memoryGC string
|
||||
diskGC string
|
||||
|
||||
logLevel string
|
||||
logFormat string
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
@@ -29,21 +36,54 @@ var rootCmd = &cobra.Command{
|
||||
By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache,
|
||||
significantly improving download times and reducing the load on the internet connection.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if verbose {
|
||||
// Configure logging
|
||||
switch logLevel {
|
||||
case "debug":
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
case "error":
|
||||
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
|
||||
case "info":
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
default:
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel) // Default to info level if not specified
|
||||
}
|
||||
var writer zerolog.ConsoleWriter
|
||||
if logFormat == "json" {
|
||||
writer = zerolog.ConsoleWriter{Out: os.Stderr, NoColor: true}
|
||||
} else {
|
||||
writer = zerolog.ConsoleWriter{Out: os.Stderr}
|
||||
}
|
||||
logger.Logger = zerolog.New(writer).With().Timestamp().Logger()
|
||||
|
||||
logger.Logger.Info().
|
||||
Msg("SteamCache2 " + version.Version + " " + version.Date + " starting...")
|
||||
|
||||
address := ":80"
|
||||
|
||||
if runtime.GOMAXPROCS(-1) != threads {
|
||||
runtime.GOMAXPROCS(threads)
|
||||
logger.Logger.Info().
|
||||
Int("threads", threads).
|
||||
Msg("Maximum number of threads set")
|
||||
}
|
||||
|
||||
sc := steamcache.New(
|
||||
":80",
|
||||
address,
|
||||
memory,
|
||||
memorymultiplier,
|
||||
disk,
|
||||
diskmultiplier,
|
||||
diskpath,
|
||||
upstream,
|
||||
pprof,
|
||||
memoryGC,
|
||||
diskGC,
|
||||
)
|
||||
|
||||
logger.Logger.Info().
|
||||
Msg("SteamCache2 " + version.Version + " started on " + address)
|
||||
|
||||
sc.Run()
|
||||
|
||||
logger.Logger.Info().Msg("SteamCache2 stopped")
|
||||
os.Exit(0)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -57,15 +97,17 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.Flags().IntVarP(&threads, "threads", "t", runtime.GOMAXPROCS(-1), "Number of worker threads to use for processing requests")
|
||||
|
||||
rootCmd.Flags().StringVarP(&memory, "memory", "m", "0", "The size of the memory cache")
|
||||
rootCmd.Flags().IntVarP(&memorymultiplier, "memory-gc", "M", 10, "The gc value for the memory cache")
|
||||
rootCmd.Flags().StringVarP(&disk, "disk", "d", "0", "The size of the disk cache")
|
||||
rootCmd.Flags().IntVarP(&diskmultiplier, "disk-gc", "D", 100, "The gc value for the disk cache")
|
||||
rootCmd.Flags().StringVarP(&diskpath, "disk-path", "p", "", "The path to the disk cache")
|
||||
|
||||
rootCmd.Flags().StringVarP(&upstream, "upstream", "u", "", "The upstream server to proxy requests overrides the host header from the client but forwards the original host header to the upstream server")
|
||||
|
||||
rootCmd.Flags().BoolVarP(&pprof, "pprof", "P", false, "Enable pprof")
|
||||
rootCmd.Flags().MarkHidden("pprof")
|
||||
rootCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "Enable verbose logging")
|
||||
rootCmd.Flags().StringVarP(&memoryGC, "memory-gc", "", "lru", "Memory cache GC algorithm: lru, lfu, fifo, largest, smallest, hybrid")
|
||||
rootCmd.Flags().StringVarP(&diskGC, "disk-gc", "", "lru", "Disk cache GC algorithm: lru, lfu, fifo, largest, smallest, hybrid")
|
||||
|
||||
rootCmd.Flags().StringVarP(&logLevel, "log-level", "l", "info", "Logging level: debug, info, error")
|
||||
rootCmd.Flags().StringVarP(&logFormat, "log-format", "f", "console", "Logging format: json, console")
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// cmd/version.go
|
||||
package cmd
|
||||
|
||||
import (
|
||||
@@ -14,7 +15,7 @@ var versionCmd = &cobra.Command{
|
||||
Short: "prints the version of SteamCache2",
|
||||
Long: `Prints the version of SteamCache2. This command is useful for checking the version of the application.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Fprintln(os.Stderr, "SteamCache2", version.Version)
|
||||
fmt.Fprintln(os.Stderr, "SteamCache2", version.Version, version.Date)
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
11
go.mod
11
go.mod
@@ -4,15 +4,22 @@ go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/sys v0.12.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
)
|
||||
|
||||
34
go.sum
34
go.sum
@@ -1,16 +1,40 @@
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
@@ -19,11 +43,15 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
package avgcachestate
|
||||
|
||||
import (
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AvgCacheState is a cache state that averages the last N cache states.
|
||||
type AvgCacheState struct {
|
||||
size int
|
||||
avgs []cachestate.CacheState
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// New creates a new average cache state with the given size.
|
||||
func New(size int) *AvgCacheState {
|
||||
a := &AvgCacheState{
|
||||
size: size,
|
||||
avgs: make([]cachestate.CacheState, size),
|
||||
mu: sync.Mutex{},
|
||||
}
|
||||
|
||||
a.Clear()
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// Clear resets the average cache state to zero.
|
||||
func (a *AvgCacheState) Clear() {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
for i := 0; i < len(a.avgs); i++ {
|
||||
a.avgs[i] = cachestate.CacheStateMiss
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a cache state to the average cache state.
|
||||
func (a *AvgCacheState) Add(cs cachestate.CacheState) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
a.avgs = append(a.avgs, cs)
|
||||
if len(a.avgs) > a.size {
|
||||
a.avgs = a.avgs[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Avg returns the average cache state.
|
||||
func (a *AvgCacheState) Avg() float64 {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
var hits int
|
||||
|
||||
for _, cs := range a.avgs {
|
||||
if cs == cachestate.CacheStateHit {
|
||||
hits++
|
||||
}
|
||||
}
|
||||
|
||||
return float64(hits) / float64(len(a.avgs))
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
package steamcache
|
||||
|
||||
import (
|
||||
"runtime/debug"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/rand"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set the GC percentage to 50%. This is a good balance between performance and memory usage.
|
||||
debug.SetGCPercent(50)
|
||||
}
|
||||
|
||||
// RandomGC randomly deletes files until we've reclaimed enough space.
|
||||
func randomgc(vfss vfs.VFS, size uint) (uint, uint) {
|
||||
// Randomly delete files until we've reclaimed enough space.
|
||||
random := func(vfss vfs.VFS, stats []*vfs.FileInfo) int64 {
|
||||
randfile := stats[rand.Intn(len(stats))]
|
||||
sz := randfile.Size()
|
||||
err := vfss.Delete(randfile.Name())
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return sz
|
||||
}
|
||||
|
||||
deletions := 0
|
||||
targetreclaim := int64(size)
|
||||
var reclaimed int64
|
||||
|
||||
stats := vfss.StatAll()
|
||||
for {
|
||||
if reclaimed >= targetreclaim {
|
||||
break
|
||||
}
|
||||
reclaimed += random(vfss, stats)
|
||||
deletions++
|
||||
}
|
||||
|
||||
return uint(reclaimed), uint(deletions)
|
||||
}
|
||||
|
||||
func cachehandler(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
||||
return time.Since(fi.AccessTime()) < time.Second*10 // Put hot files in the fast vfs if equipped
|
||||
}
|
||||
@@ -1,13 +1,8 @@
|
||||
// steamcache/logger/logger.go
|
||||
package logger
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
}
|
||||
|
||||
var Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Logger()
|
||||
var Logger zerolog.Logger
|
||||
|
||||
@@ -1,33 +1,152 @@
|
||||
// steamcache/steamcache.go
|
||||
package steamcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/avgcachestate"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/version"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cache"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"s1d3sw1ped/SteamCache2/vfs/disk"
|
||||
"s1d3sw1ped/SteamCache2/vfs/gc"
|
||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||
syncfs "s1d3sw1ped/SteamCache2/vfs/sync"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
pprof "net/http/pprof"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
// min returns the minimum of two integers
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
var (
|
||||
requestsTotal = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_requests_total",
|
||||
Help: "Total number of HTTP requests",
|
||||
},
|
||||
[]string{"method", "status"},
|
||||
)
|
||||
|
||||
cacheStatusTotal = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cache_status_total",
|
||||
Help: "Total cache status counts",
|
||||
},
|
||||
[]string{"status"},
|
||||
)
|
||||
|
||||
responseTime = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "response_time_seconds",
|
||||
Help: "Response time in seconds",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
},
|
||||
[]string{"cache_status"},
|
||||
)
|
||||
)
|
||||
|
||||
// hashVerificationTotal tracks hash verification attempts
|
||||
var hashVerificationTotal = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "hash_verification_total",
|
||||
Help: "Total hash verification attempts",
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
|
||||
// extractHashFromFilename extracts a hash from a filename if present
|
||||
// Steam depot files often have hashes in their names like: filename_hash.ext
|
||||
func extractHashFromFilename(filename string) (string, bool) {
|
||||
// Common patterns for Steam depot files with hashes
|
||||
patterns := []*regexp.Regexp{
|
||||
regexp.MustCompile(`^([a-fA-F0-9]{40})$`), // Standalone SHA1 hash (40 hex chars)
|
||||
regexp.MustCompile(`^([a-fA-F0-9]{40})\.`), // SHA1 hash with extension
|
||||
}
|
||||
|
||||
for _, pattern := range patterns {
|
||||
if matches := pattern.FindStringSubmatch(filename); len(matches) > 1 {
|
||||
return strings.ToLower(matches[1]), true
|
||||
}
|
||||
}
|
||||
|
||||
// Debug: log when we don't find a hash pattern
|
||||
if strings.Contains(filename, "manifest") {
|
||||
logger.Logger.Debug().
|
||||
Str("filename", filename).
|
||||
Msg("No hash pattern found in manifest filename")
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
// calculateFileHash calculates the SHA1 hash of the given data
|
||||
func calculateFileHash(data []byte) string {
|
||||
hash := sha1.Sum(data)
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// calculateResponseHash calculates the SHA1 hash of the full HTTP response
|
||||
func calculateResponseHash(resp *http.Response, bodyData []byte) string {
|
||||
hash := sha1.New()
|
||||
|
||||
// Include status line
|
||||
statusLine := fmt.Sprintf("HTTP/1.1 %d %s\n", resp.StatusCode, resp.Status)
|
||||
hash.Write([]byte(statusLine))
|
||||
|
||||
// Include headers (sorted for consistency)
|
||||
headers := make([]string, 0, len(resp.Header))
|
||||
for key, values := range resp.Header {
|
||||
for _, value := range values {
|
||||
headers = append(headers, fmt.Sprintf("%s: %s\n", key, value))
|
||||
}
|
||||
}
|
||||
sort.Strings(headers)
|
||||
for _, header := range headers {
|
||||
hash.Write([]byte(header))
|
||||
}
|
||||
|
||||
// Include empty line between headers and body
|
||||
hash.Write([]byte("\n"))
|
||||
|
||||
// Include body
|
||||
hash.Write(bodyData)
|
||||
|
||||
return hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
||||
// verifyFileHash verifies that the file content matches the expected hash
|
||||
func verifyFileHash(data []byte, expectedHash string) bool {
|
||||
actualHash := calculateFileHash(data)
|
||||
return strings.EqualFold(actualHash, expectedHash)
|
||||
}
|
||||
|
||||
// verifyResponseHash verifies that the full HTTP response matches the expected hash
|
||||
func verifyResponseHash(resp *http.Response, bodyData []byte, expectedHash string) bool {
|
||||
actualHash := calculateResponseHash(resp, bodyData)
|
||||
return strings.EqualFold(actualHash, expectedHash)
|
||||
}
|
||||
|
||||
type SteamCache struct {
|
||||
pprof bool
|
||||
address string
|
||||
upstream string
|
||||
|
||||
@@ -39,13 +158,13 @@ type SteamCache struct {
|
||||
memorygc *gc.GCFS
|
||||
diskgc *gc.GCFS
|
||||
|
||||
hits *avgcachestate.AvgCacheState
|
||||
|
||||
dirty bool
|
||||
mu sync.Mutex
|
||||
server *http.Server
|
||||
client *http.Client
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func New(address string, memorySize string, memoryMultiplier int, diskSize string, diskMultiplier int, diskPath, upstream string, pprof bool) *SteamCache {
|
||||
func New(address string, memorySize string, diskSize string, diskPath, upstream, memoryGC, diskGC string) *SteamCache {
|
||||
memorysize, err := units.FromHumanSize(memorySize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -57,21 +176,29 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
|
||||
}
|
||||
|
||||
c := cache.New(
|
||||
cachehandler,
|
||||
gc.AdaptivePromotionDeciderFunc,
|
||||
)
|
||||
|
||||
var m *memory.MemoryFS
|
||||
var mgc *gc.GCFS
|
||||
if memorysize > 0 {
|
||||
m = memory.New(memorysize)
|
||||
mgc = gc.New(m, memoryMultiplier, randomgc)
|
||||
memoryGCAlgo := gc.GCAlgorithm(memoryGC)
|
||||
if memoryGCAlgo == "" {
|
||||
memoryGCAlgo = gc.LRU // default to LRU
|
||||
}
|
||||
mgc = gc.New(m, gc.GetGCAlgorithm(memoryGCAlgo))
|
||||
}
|
||||
|
||||
var d *disk.DiskFS
|
||||
var dgc *gc.GCFS
|
||||
if disksize > 0 {
|
||||
d = disk.New(diskPath, disksize)
|
||||
dgc = gc.New(d, diskMultiplier, randomgc)
|
||||
diskGCAlgo := gc.GCAlgorithm(diskGC)
|
||||
if diskGCAlgo == "" {
|
||||
diskGCAlgo = gc.LRU // default to LRU
|
||||
}
|
||||
dgc = gc.New(d, gc.GetGCAlgorithm(diskGCAlgo))
|
||||
}
|
||||
|
||||
// configure the cache to match the specified mode (memory only, disk only, or memory and disk) based on the provided sizes
|
||||
@@ -94,24 +221,57 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
MaxIdleConns: 200, // Increased from 100
|
||||
MaxIdleConnsPerHost: 50, // Increased from 10
|
||||
IdleConnTimeout: 120 * time.Second, // Increased from 90s
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
TLSHandshakeTimeout: 15 * time.Second, // Increased from 10s
|
||||
ResponseHeaderTimeout: 30 * time.Second, // Increased from 10s
|
||||
ExpectContinueTimeout: 5 * time.Second, // Increased from 1s
|
||||
DisableCompression: true, // Steam doesn't use compression
|
||||
ForceAttemptHTTP2: true, // Enable HTTP/2 if available
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
Timeout: 120 * time.Second, // Increased from 60s
|
||||
}
|
||||
|
||||
sc := &SteamCache{
|
||||
pprof: pprof,
|
||||
upstream: upstream,
|
||||
address: address,
|
||||
vfs: syncfs.New(c),
|
||||
|
||||
memory: m,
|
||||
disk: d,
|
||||
|
||||
vfs: c,
|
||||
memory: m,
|
||||
disk: d,
|
||||
memorygc: mgc,
|
||||
diskgc: dgc,
|
||||
client: client,
|
||||
server: &http.Server{
|
||||
Addr: address,
|
||||
ReadTimeout: 30 * time.Second, // Increased
|
||||
WriteTimeout: 60 * time.Second, // Increased
|
||||
IdleTimeout: 120 * time.Second, // Good for keep-alive
|
||||
ReadHeaderTimeout: 10 * time.Second, // New, for header attacks
|
||||
MaxHeaderBytes: 1 << 20, // 1MB, optional
|
||||
},
|
||||
}
|
||||
|
||||
hits: avgcachestate.New(100),
|
||||
// Log GC algorithm configuration
|
||||
if m != nil {
|
||||
logger.Logger.Info().Str("memory_gc", memoryGC).Msg("Memory cache GC algorithm configured")
|
||||
}
|
||||
if d != nil {
|
||||
logger.Logger.Info().Str("disk_gc", diskGC).Msg("Disk cache GC algorithm configured")
|
||||
}
|
||||
|
||||
if d != nil {
|
||||
if d.Size() > d.Capacity() {
|
||||
randomgc(d, uint(d.Size()-d.Capacity()))
|
||||
gcHandler := gc.GetGCAlgorithm(gc.GCAlgorithm(diskGC))
|
||||
gcHandler(d, uint(d.Size()-d.Capacity()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,112 +280,50 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
|
||||
|
||||
func (sc *SteamCache) Run() {
|
||||
if sc.upstream != "" {
|
||||
_, err := http.Get(sc.upstream)
|
||||
if err != nil {
|
||||
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
|
||||
resp, err := sc.client.Get(sc.upstream)
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
logger.Logger.Error().Err(err).Int("status_code", resp.StatusCode).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
|
||||
os.Exit(1)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
sc.mu.Lock()
|
||||
sc.dirty = true
|
||||
sc.mu.Unlock()
|
||||
sc.server.Handler = sc
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
sc.cancel = cancel
|
||||
|
||||
sc.LogStats()
|
||||
t := time.NewTicker(1 * time.Second)
|
||||
sc.wg.Add(1)
|
||||
go func() {
|
||||
for range t.C {
|
||||
sc.LogStats()
|
||||
defer sc.wg.Done()
|
||||
err := sc.server.ListenAndServe()
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
logger.Logger.Error().Err(err).Msg("Failed to start SteamCache2")
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
err := http.ListenAndServe(sc.address, sc)
|
||||
if err != nil {
|
||||
if err == http.ErrServerClosed {
|
||||
logger.Logger.Info().Msg("shutdown")
|
||||
return
|
||||
}
|
||||
logger.Logger.Error().Err(err).Msg("Failed to start SteamCache2")
|
||||
os.Exit(1)
|
||||
}
|
||||
<-ctx.Done()
|
||||
sc.server.Shutdown(ctx)
|
||||
sc.wg.Wait()
|
||||
}
|
||||
|
||||
func (sc *SteamCache) LogStats() {
|
||||
sc.mu.Lock()
|
||||
defer sc.mu.Unlock()
|
||||
if sc.dirty {
|
||||
|
||||
up := sc.upstream
|
||||
if up == "" {
|
||||
up = "{host in request}"
|
||||
}
|
||||
|
||||
logger.Logger.Info().Str("address", sc.address).Str("version", version.Version).Str("upstream", up).Msg("listening")
|
||||
if sc.memory != nil { // only log memory if memory is enabled
|
||||
lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles, gcTime := sc.memorygc.Stats()
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("size", units.HumanSize(float64(sc.memory.Size()))).
|
||||
Str("capacity", units.HumanSize(float64(sc.memory.Capacity()))).
|
||||
Str("files", fmt.Sprintf("%d", len(sc.memory.StatAll()))).
|
||||
Msg("memory")
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("data_total", units.HumanSize(float64(lifetimeBytes))).
|
||||
Uint("files_total", lifetimeFiles).
|
||||
Str("data", units.HumanSize(float64(reclaimedBytes))).
|
||||
Uint("files", deletedFiles).
|
||||
Str("gc_time", gcTime.String()).
|
||||
Msg("memory_gc")
|
||||
}
|
||||
|
||||
if sc.disk != nil { // only log disk if disk is enabled
|
||||
lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles, gcTime := sc.diskgc.Stats()
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("size", units.HumanSize(float64(sc.disk.Size()))).
|
||||
Str("capacity", units.HumanSize(float64(sc.disk.Capacity()))).
|
||||
Str("files", fmt.Sprintf("%d", len(sc.disk.StatAll()))).
|
||||
Msg("disk")
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("data_total", units.HumanSize(float64(lifetimeBytes))).
|
||||
Uint("files_total", lifetimeFiles).
|
||||
Str("data", units.HumanSize(float64(reclaimedBytes))).
|
||||
Uint("files", deletedFiles).
|
||||
Str("gc_time", gcTime.String()).
|
||||
Msg("disk_gc")
|
||||
}
|
||||
|
||||
// log golang Garbage Collection stats
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("alloc", units.HumanSize(float64(m.Alloc))).
|
||||
Str("sys", units.HumanSize(float64(m.Sys))).
|
||||
Msg("app_gc")
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("hitrate", fmt.Sprintf("%.2f%%", sc.hits.Avg()*100)).
|
||||
Msg("cache")
|
||||
|
||||
logger.Logger.Info().Msg("") // empty line to separate log entries for better readability
|
||||
sc.dirty = false
|
||||
func (sc *SteamCache) Shutdown() {
|
||||
if sc.cancel != nil {
|
||||
sc.cancel()
|
||||
}
|
||||
sc.wg.Wait()
|
||||
}
|
||||
|
||||
func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if sc.pprof && r.URL.Path == "/debug/pprof/" {
|
||||
pprof.Index(w, r)
|
||||
return
|
||||
} else if sc.pprof && strings.HasPrefix(r.URL.Path, "/debug/pprof/") {
|
||||
pprof.Handler(strings.TrimPrefix(r.URL.Path, "/debug/pprof/")).ServeHTTP(w, r)
|
||||
if r.Method != http.MethodGet {
|
||||
requestsTotal.WithLabelValues(r.Method, "405").Inc()
|
||||
logger.Logger.Warn().Str("method", r.Method).Msg("Only GET method is supported")
|
||||
http.Error(w, "Only GET method is supported", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Only GET method is supported", http.StatusMethodNotAllowed)
|
||||
if r.URL.Path == "/" {
|
||||
w.WriteHeader(http.StatusFound) // this is used by steamcache2's upstream verification at startup
|
||||
return
|
||||
}
|
||||
|
||||
@@ -236,87 +334,215 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
sc.mu.Lock()
|
||||
sc.dirty = true
|
||||
sc.mu.Unlock()
|
||||
|
||||
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
|
||||
|
||||
cacheKey := strings.ReplaceAll(r.URL.String()[1:], "\\", "/") // replace all backslashes with forward slashes shouldn't be necessary but just in case
|
||||
if cacheKey == "" {
|
||||
http.Error(w, "Invalid URL", http.StatusBadRequest)
|
||||
if r.URL.Path == "/metrics" {
|
||||
promhttp.Handler().ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := sc.vfs.Get(cacheKey)
|
||||
if err == nil {
|
||||
sc.hits.Add(cachestate.CacheStateHit)
|
||||
w.Header().Add("X-LanCache-Status", "HIT")
|
||||
w.Write(data)
|
||||
logger.Logger.Debug().Str("key", r.URL.String()).Msg("cache")
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(r.URL.String(), "/depot/") {
|
||||
// trim the query parameters from the URL path
|
||||
// this is necessary because the cache key should not include query parameters
|
||||
path := strings.Split(r.URL.String(), "?")[0]
|
||||
|
||||
var req *http.Request
|
||||
if sc.upstream != "" { // if an upstream server is configured, proxy the request to the upstream server
|
||||
ur, err := url.JoinPath(sc.upstream, r.URL.String())
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
|
||||
tstart := time.Now()
|
||||
|
||||
cacheKey := strings.ReplaceAll(path[1:], "\\", "/") // replace all backslashes with forward slashes shouldn't be necessary but just in case
|
||||
|
||||
if cacheKey == "" {
|
||||
requestsTotal.WithLabelValues(r.Method, "400").Inc()
|
||||
logger.Logger.Warn().Str("url", path).Msg("Invalid URL")
|
||||
http.Error(w, "Invalid URL", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
req, err = http.NewRequest(http.MethodGet, ur, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to create request", http.StatusInternalServerError)
|
||||
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
|
||||
|
||||
reader, err := sc.vfs.Open(cacheKey)
|
||||
if err == nil {
|
||||
defer reader.Close()
|
||||
w.Header().Add("X-LanCache-Status", "HIT")
|
||||
|
||||
io.Copy(w, reader)
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("key", cacheKey).
|
||||
Str("host", r.Host).
|
||||
Str("status", "HIT").
|
||||
Dur("duration", time.Since(tstart)).
|
||||
Msg("request")
|
||||
|
||||
requestsTotal.WithLabelValues(r.Method, "200").Inc()
|
||||
cacheStatusTotal.WithLabelValues("HIT").Inc()
|
||||
responseTime.WithLabelValues("HIT").Observe(time.Since(tstart).Seconds())
|
||||
|
||||
return
|
||||
}
|
||||
req.Host = r.Host
|
||||
logger.Logger.Debug().Str("key", cacheKey).Str("host", sc.upstream).Msg("upstream")
|
||||
} else { // if no upstream server is configured, proxy the request to the host specified in the request
|
||||
host := r.Host
|
||||
if r.Header.Get("X-Sls-Https") == "enable" {
|
||||
host = "https://" + host
|
||||
|
||||
var req *http.Request
|
||||
if sc.upstream != "" { // if an upstream server is configured, proxy the request to the upstream server
|
||||
ur, err := url.JoinPath(sc.upstream, path)
|
||||
if err != nil {
|
||||
requestsTotal.WithLabelValues(r.Method, "500").Inc()
|
||||
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to join URL path")
|
||||
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
req, err = http.NewRequest(http.MethodGet, ur, nil)
|
||||
if err != nil {
|
||||
requestsTotal.WithLabelValues(r.Method, "500").Inc()
|
||||
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to create request")
|
||||
http.Error(w, "Failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
req.Host = r.Host
|
||||
} else { // if no upstream server is configured, proxy the request to the host specified in the request
|
||||
host := r.Host
|
||||
if r.Header.Get("X-Sls-Https") == "enable" {
|
||||
host = "https://" + host
|
||||
} else {
|
||||
host = "http://" + host
|
||||
}
|
||||
|
||||
ur, err := url.JoinPath(host, path)
|
||||
if err != nil {
|
||||
requestsTotal.WithLabelValues(r.Method, "500").Inc()
|
||||
logger.Logger.Error().Err(err).Str("host", host).Msg("Failed to join URL path")
|
||||
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
req, err = http.NewRequest(http.MethodGet, ur, nil)
|
||||
if err != nil {
|
||||
requestsTotal.WithLabelValues(r.Method, "500").Inc()
|
||||
logger.Logger.Error().Err(err).Str("host", host).Msg("Failed to create request")
|
||||
http.Error(w, "Failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Copy headers from the original request to the new request
|
||||
for key, values := range r.Header {
|
||||
for _, value := range values {
|
||||
req.Header.Add(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Retry logic
|
||||
backoffSchedule := []time.Duration{1 * time.Second, 3 * time.Second, 10 * time.Second}
|
||||
var resp *http.Response
|
||||
for i, backoff := range backoffSchedule {
|
||||
resp, err = sc.client.Do(req)
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
break
|
||||
}
|
||||
if i < len(backoffSchedule)-1 {
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
}
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
requestsTotal.WithLabelValues(r.Method, "500 upstream host "+r.Host).Inc()
|
||||
logger.Logger.Error().Err(err).Str("url", req.URL.String()).Msg("Failed to fetch the requested URL")
|
||||
http.Error(w, "Failed to fetch the requested URL", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
size := resp.ContentLength
|
||||
|
||||
// Read the entire response body into memory for hash verification
|
||||
bodyData, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
requestsTotal.WithLabelValues(r.Method, "500").Inc()
|
||||
logger.Logger.Error().Err(err).Str("url", req.URL.String()).Msg("Failed to read response body")
|
||||
http.Error(w, "Failed to read response body", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract filename from cache key for hash verification
|
||||
filename := filepath.Base(cacheKey)
|
||||
expectedHash, hasHash := extractHashFromFilename(filename)
|
||||
|
||||
// Hash verification using Steam's X-Content-Sha header and content length verification
|
||||
hashVerified := true
|
||||
if hasHash {
|
||||
// Get the hash from Steam's X-Content-Sha header
|
||||
steamHash := resp.Header.Get("X-Content-Sha")
|
||||
|
||||
// Verify using Steam's hash
|
||||
if strings.EqualFold(steamHash, expectedHash) {
|
||||
hashVerificationTotal.WithLabelValues("success").Inc()
|
||||
} else {
|
||||
hashVerificationTotal.WithLabelValues("failed").Inc()
|
||||
logger.Logger.Error().
|
||||
Str("key", cacheKey).
|
||||
Str("expected_hash", expectedHash).
|
||||
Str("steam_hash", steamHash).
|
||||
Int("content_length", len(bodyData)).
|
||||
Msg("Steam hash verification failed - Steam's hash doesn't match filename")
|
||||
hashVerified = false
|
||||
}
|
||||
} else {
|
||||
host = "http://" + host
|
||||
hashVerificationTotal.WithLabelValues("no_hash").Inc()
|
||||
}
|
||||
|
||||
ur, err := url.JoinPath(host, r.URL.String())
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
|
||||
return
|
||||
// Always verify content length as an additional safety check
|
||||
if resp.ContentLength > 0 && int64(len(bodyData)) != resp.ContentLength {
|
||||
hashVerificationTotal.WithLabelValues("content_length_failed").Inc()
|
||||
logger.Logger.Error().
|
||||
Str("key", cacheKey).
|
||||
Int("actual_content_length", len(bodyData)).
|
||||
Int64("expected_content_length", resp.ContentLength).
|
||||
Msg("Content length verification failed")
|
||||
hashVerified = false
|
||||
} else if resp.ContentLength > 0 {
|
||||
hashVerificationTotal.WithLabelValues("content_length_success").Inc()
|
||||
}
|
||||
|
||||
req, err = http.NewRequest(http.MethodGet, ur, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
// Write to response (always serve the file)
|
||||
w.Header().Add("X-LanCache-Status", "MISS")
|
||||
w.Write(bodyData)
|
||||
|
||||
// Only cache the file if hash verification passed (or no hash was present)
|
||||
if hashVerified {
|
||||
writer, _ := sc.vfs.Create(cacheKey, size)
|
||||
if writer != nil {
|
||||
defer writer.Close()
|
||||
writer.Write(bodyData)
|
||||
}
|
||||
} else {
|
||||
logger.Logger.Warn().
|
||||
Str("key", cacheKey).
|
||||
Msg("File served but not cached due to hash verification failure")
|
||||
}
|
||||
|
||||
logger.Logger.Debug().Str("key", cacheKey).Str("host", host).Msg("forward")
|
||||
}
|
||||
logger.Logger.Info().
|
||||
Str("key", cacheKey).
|
||||
Str("host", r.Host).
|
||||
Str("status", "MISS").
|
||||
Dur("duration", time.Since(tstart)).
|
||||
Msg("request")
|
||||
|
||||
req.Header.Add("X-Sls-Https", r.Header.Get("X-Sls-Https"))
|
||||
req.Header.Add("User-Agent", r.Header.Get("User-Agent"))
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to fetch the requested URL", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
requestsTotal.WithLabelValues(r.Method, "200").Inc()
|
||||
cacheStatusTotal.WithLabelValues("MISS").Inc()
|
||||
responseTime.WithLabelValues("MISS").Observe(time.Since(tstart).Seconds())
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
http.Error(w, "Failed to fetch the requested URL", resp.StatusCode)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to read response body", http.StatusInternalServerError)
|
||||
if r.URL.Path == "/favicon.ico" {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
sc.vfs.Set(cacheKey, body)
|
||||
sc.hits.Add(cachestate.CacheStateMiss)
|
||||
w.Header().Add("X-LanCache-Status", "MISS")
|
||||
w.Write(body)
|
||||
if r.URL.Path == "/robots.txt" {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("User-agent: *\nDisallow: /\n"))
|
||||
return
|
||||
}
|
||||
|
||||
requestsTotal.WithLabelValues(r.Method, "404").Inc()
|
||||
logger.Logger.Warn().Str("url", r.URL.String()).Msg("Not found")
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
}
|
||||
|
||||
@@ -1,32 +1,34 @@
|
||||
// steamcache/steamcache_test.go
|
||||
package steamcache
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCaching(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
td := t.TempDir()
|
||||
|
||||
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
||||
|
||||
sc := New("localhost:8080", "1GB", 10, "1GB", 100, td, "", false)
|
||||
sc := New("localhost:8080", "1G", "1G", td, "", "lru", "lru")
|
||||
|
||||
sc.dirty = true
|
||||
sc.LogStats()
|
||||
|
||||
if err := sc.vfs.Set("key", []byte("value")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
if err := sc.vfs.Set("key1", []byte("value1")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
w, err := sc.vfs.Create("key", 5)
|
||||
if err != nil {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
}
|
||||
w.Write([]byte("value"))
|
||||
w.Close()
|
||||
|
||||
sc.dirty = true
|
||||
sc.LogStats()
|
||||
w, err = sc.vfs.Create("key1", 6)
|
||||
if err != nil {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
}
|
||||
w.Write([]byte("value1"))
|
||||
w.Close()
|
||||
|
||||
if sc.diskgc.Size() != 17 {
|
||||
t.Errorf("Size failed: got %d, want %d", sc.diskgc.Size(), 17)
|
||||
@@ -36,21 +38,33 @@ func TestCaching(t *testing.T) {
|
||||
t.Errorf("Size failed: got %d, want %d", sc.vfs.Size(), 17)
|
||||
}
|
||||
|
||||
if d, err := sc.vfs.Get("key"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value" {
|
||||
rc, err := sc.vfs.Open("key")
|
||||
if err != nil {
|
||||
t.Errorf("Open failed: %v", err)
|
||||
}
|
||||
d, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if string(d) != "value" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value")
|
||||
}
|
||||
|
||||
if d, err := sc.vfs.Get("key1"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value1" {
|
||||
rc, err = sc.vfs.Open("key1")
|
||||
if err != nil {
|
||||
t.Errorf("Open failed: %v", err)
|
||||
}
|
||||
d, _ = io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if string(d) != "value1" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
||||
}
|
||||
|
||||
if d, err := sc.vfs.Get("key2"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value2" {
|
||||
rc, err = sc.vfs.Open("key2")
|
||||
if err != nil {
|
||||
t.Errorf("Open failed: %v", err)
|
||||
}
|
||||
d, _ = io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if string(d) != "value2" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value2")
|
||||
}
|
||||
|
||||
@@ -65,7 +79,167 @@ func TestCaching(t *testing.T) {
|
||||
sc.memory.Delete("key2")
|
||||
os.Remove(filepath.Join(td, "key2"))
|
||||
|
||||
if _, err := sc.vfs.Get("key2"); err == nil {
|
||||
t.Errorf("Get failed: got nil, want error")
|
||||
if _, err := sc.vfs.Open("key2"); err == nil {
|
||||
t.Errorf("Open failed: got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheMissAndHit(t *testing.T) {
|
||||
sc := New("localhost:8080", "0", "1G", t.TempDir(), "", "lru", "lru")
|
||||
|
||||
key := "testkey"
|
||||
value := []byte("testvalue")
|
||||
|
||||
// Simulate miss: but since no upstream, skip full ServeHTTP, test VFS
|
||||
w, err := sc.vfs.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
rc, err := sc.vfs.Open(key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Errorf("expected %s, got %s", value, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashExtraction(t *testing.T) {
|
||||
// Test the specific key from the user's issue
|
||||
testCases := []struct {
|
||||
filename string
|
||||
expectedHash string
|
||||
shouldHaveHash bool
|
||||
}{
|
||||
{
|
||||
filename: "e89c81a1a926eb4732e146bc806491da8a7d89ca",
|
||||
expectedHash: "e89c81a1a926eb4732e146bc806491da8a7d89ca",
|
||||
shouldHaveHash: true, // Now it should work with the new standalone hash pattern
|
||||
},
|
||||
{
|
||||
filename: "chunk_e89c81a1a926eb4732e146bc806491da8a7d89ca",
|
||||
expectedHash: "",
|
||||
shouldHaveHash: false, // No longer supported with simplified patterns
|
||||
},
|
||||
{
|
||||
filename: "file.e89c81a1a926eb4732e146bc806491da8a7d89ca.chunk",
|
||||
expectedHash: "",
|
||||
shouldHaveHash: false, // No longer supported with simplified patterns
|
||||
},
|
||||
{
|
||||
filename: "chunk_abc123def456",
|
||||
expectedHash: "",
|
||||
shouldHaveHash: false, // Not 40 chars
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
hash, hasHash := extractHashFromFilename(tc.filename)
|
||||
if hasHash != tc.shouldHaveHash {
|
||||
t.Errorf("filename: %s, expected hasHash: %v, got: %v", tc.filename, tc.shouldHaveHash, hasHash)
|
||||
}
|
||||
if hasHash && hash != tc.expectedHash {
|
||||
t.Errorf("filename: %s, expected hash: %s, got: %s", tc.filename, tc.expectedHash, hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashCalculation(t *testing.T) {
|
||||
// Test data
|
||||
testData := []byte("Hello, World!")
|
||||
|
||||
// Calculate hash
|
||||
hash := calculateFileHash(testData)
|
||||
|
||||
// Expected SHA1 hash of "Hello, World!"
|
||||
expectedHash := "0a0a9f2a6772942557ab5355d76af442f8f65e01"
|
||||
|
||||
if hash != expectedHash {
|
||||
t.Errorf("Hash calculation failed: expected %s, got %s", expectedHash, hash)
|
||||
}
|
||||
|
||||
// Test verification
|
||||
if !verifyFileHash(testData, expectedHash) {
|
||||
t.Error("Hash verification failed for correct hash")
|
||||
}
|
||||
|
||||
if verifyFileHash(testData, "wronghash") {
|
||||
t.Error("Hash verification passed for wrong hash")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashVerificationWithRealData(t *testing.T) {
|
||||
// Test with some real data to ensure our hash calculation is correct
|
||||
testCases := []struct {
|
||||
data string
|
||||
expected string
|
||||
}{
|
||||
{"", "da39a3ee5e6b4b0d3255bfef95601890afd80709"}, // SHA1 of empty string
|
||||
{"test", "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"}, // SHA1 of "test"
|
||||
{"Hello, World!", "0a0a9f2a6772942557ab5355d76af442f8f65e01"}, // SHA1 of "Hello, World!"
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
data := []byte(tc.data)
|
||||
hash := calculateFileHash(data)
|
||||
if hash != tc.expected {
|
||||
t.Errorf("Hash calculation failed for '%s': expected %s, got %s", tc.data, tc.expected, hash)
|
||||
}
|
||||
|
||||
if !verifyFileHash(data, tc.expected) {
|
||||
t.Errorf("Hash verification failed for '%s'", tc.data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResponseHashCalculation(t *testing.T) {
|
||||
// Create a mock HTTP response
|
||||
resp := &http.Response{
|
||||
StatusCode: 200,
|
||||
Status: "200 OK",
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"application/octet-stream"},
|
||||
"Content-Length": []string{"13"},
|
||||
"Cache-Control": []string{"public, max-age=3600"},
|
||||
},
|
||||
}
|
||||
|
||||
bodyData := []byte("Hello, World!")
|
||||
|
||||
// Calculate response hash
|
||||
responseHash := calculateResponseHash(resp, bodyData)
|
||||
|
||||
// The hash should be different from just the body hash
|
||||
bodyHash := calculateFileHash(bodyData)
|
||||
|
||||
if responseHash == bodyHash {
|
||||
t.Error("Response hash should be different from body hash when headers are present")
|
||||
}
|
||||
|
||||
// Test that the same response produces the same hash
|
||||
responseHash2 := calculateResponseHash(resp, bodyData)
|
||||
if responseHash != responseHash2 {
|
||||
t.Error("Response hash should be consistent for the same response")
|
||||
}
|
||||
|
||||
// Test with different headers
|
||||
resp2 := &http.Response{
|
||||
StatusCode: 200,
|
||||
Status: "200 OK",
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"text/plain"},
|
||||
"Content-Length": []string{"13"},
|
||||
},
|
||||
}
|
||||
|
||||
responseHash3 := calculateResponseHash(resp2, bodyData)
|
||||
if responseHash == responseHash3 {
|
||||
t.Error("Response hash should be different for different headers")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
// version/version.go
|
||||
package version
|
||||
|
||||
import "time"
|
||||
|
||||
var Version string
|
||||
var Date string
|
||||
|
||||
func init() {
|
||||
if Version == "" {
|
||||
Version = "0.0.0-dev"
|
||||
}
|
||||
if Date == "" {
|
||||
Date = time.Now().Format("2006-01-02 15:04:05")
|
||||
}
|
||||
}
|
||||
|
||||
147
vfs/cache/cache.go
vendored
147
vfs/cache/cache.go
vendored
@@ -1,10 +1,14 @@
|
||||
// vfs/cache/cache.go
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"s1d3sw1ped/SteamCache2/vfs/gc"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Ensure CacheFS implements VFS.
|
||||
@@ -16,6 +20,8 @@ type CacheFS struct {
|
||||
slow vfs.VFS
|
||||
|
||||
cacheHandler CacheHandler
|
||||
|
||||
keyLocks sync.Map // map[string]*sync.RWMutex for per-key locks
|
||||
}
|
||||
|
||||
type CacheHandler func(*vfs.FileInfo, cachestate.CacheState) bool
|
||||
@@ -24,6 +30,7 @@ type CacheHandler func(*vfs.FileInfo, cachestate.CacheState) bool
|
||||
func New(cacheHandler CacheHandler) *CacheFS {
|
||||
return &CacheFS{
|
||||
cacheHandler: cacheHandler,
|
||||
keyLocks: sync.Map{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,6 +46,12 @@ func (c *CacheFS) SetFast(vfs vfs.VFS) {
|
||||
c.fast = vfs
|
||||
}
|
||||
|
||||
// getKeyLock returns a RWMutex for the given key, creating it if necessary.
|
||||
func (c *CacheFS) getKeyLock(key string) *sync.RWMutex {
|
||||
mu, _ := c.keyLocks.LoadOrStore(key, &sync.RWMutex{})
|
||||
return mu.(*sync.RWMutex)
|
||||
}
|
||||
|
||||
// cacheState returns the state of the file at key.
|
||||
func (c *CacheFS) cacheState(key string) cachestate.CacheState {
|
||||
if c.fast != nil {
|
||||
@@ -63,8 +76,68 @@ func (c *CacheFS) Size() int64 {
|
||||
return c.slow.Size()
|
||||
}
|
||||
|
||||
// Set sets the file at key to src. If the file is already in the cache, it is replaced.
|
||||
func (c *CacheFS) Set(key string, src []byte) error {
|
||||
// Delete deletes the file at key from the cache.
|
||||
func (c *CacheFS) Delete(key string) error {
|
||||
mu := c.getKeyLock(key)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if c.fast != nil {
|
||||
c.fast.Delete(key)
|
||||
}
|
||||
return c.slow.Delete(key)
|
||||
}
|
||||
|
||||
// Open returns the file at key. If the file is not in the cache, it is fetched from the storage.
|
||||
func (c *CacheFS) Open(key string) (io.ReadCloser, error) {
|
||||
mu := c.getKeyLock(key)
|
||||
mu.RLock()
|
||||
defer mu.RUnlock()
|
||||
|
||||
state := c.cacheState(key)
|
||||
|
||||
switch state {
|
||||
case cachestate.CacheStateHit:
|
||||
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
|
||||
// Record fast storage access for adaptive promotion
|
||||
if c.fast != nil {
|
||||
gc.RecordFastStorageAccess()
|
||||
}
|
||||
return c.fast.Open(key)
|
||||
case cachestate.CacheStateMiss:
|
||||
slowReader, err := c.slow.Open(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sstat, _ := c.slow.Stat(key)
|
||||
if sstat != nil && c.fast != nil { // file found in slow storage and fast storage is available
|
||||
// We are accessing the file from the slow storage, and the file has been accessed less then a minute ago so it popular, so we should update the fast storage with the latest file.
|
||||
if c.cacheHandler != nil && c.cacheHandler(sstat, state) {
|
||||
fastWriter, err := c.fast.Create(key, sstat.Size())
|
||||
if err == nil {
|
||||
return &teeReadCloser{
|
||||
Reader: io.TeeReader(slowReader, fastWriter),
|
||||
closers: []io.Closer{slowReader, fastWriter},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return slowReader, nil
|
||||
case cachestate.CacheStateNotFound:
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
panic(vfserror.ErrUnreachable)
|
||||
}
|
||||
|
||||
// Create creates a new file at key. If the file is already in the cache, it is replaced.
|
||||
func (c *CacheFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
mu := c.getKeyLock(key)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
state := c.cacheState(key)
|
||||
|
||||
switch state {
|
||||
@@ -72,56 +145,9 @@ func (c *CacheFS) Set(key string, src []byte) error {
|
||||
if c.fast != nil {
|
||||
c.fast.Delete(key)
|
||||
}
|
||||
return c.slow.Set(key, src)
|
||||
return c.slow.Create(key, size)
|
||||
case cachestate.CacheStateMiss, cachestate.CacheStateNotFound:
|
||||
return c.slow.Set(key, src)
|
||||
}
|
||||
|
||||
panic(vfserror.ErrUnreachable)
|
||||
}
|
||||
|
||||
// Delete deletes the file at key from the cache.
|
||||
func (c *CacheFS) Delete(key string) error {
|
||||
if c.fast != nil {
|
||||
c.fast.Delete(key)
|
||||
}
|
||||
return c.slow.Delete(key)
|
||||
}
|
||||
|
||||
// Get returns the file at key. If the file is not in the cache, it is fetched from the storage.
|
||||
func (c *CacheFS) Get(key string) ([]byte, error) {
|
||||
src, _, err := c.GetS(key)
|
||||
return src, err
|
||||
}
|
||||
|
||||
// GetS returns the file at key. If the file is not in the cache, it is fetched from the storage. It also returns the cache state.
|
||||
func (c *CacheFS) GetS(key string) ([]byte, cachestate.CacheState, error) {
|
||||
state := c.cacheState(key)
|
||||
|
||||
switch state {
|
||||
case cachestate.CacheStateHit:
|
||||
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
|
||||
src, err := c.fast.Get(key)
|
||||
return src, state, err
|
||||
case cachestate.CacheStateMiss:
|
||||
src, err := c.slow.Get(key)
|
||||
if err != nil {
|
||||
return nil, state, err
|
||||
}
|
||||
|
||||
sstat, _ := c.slow.Stat(key)
|
||||
if sstat != nil && c.fast != nil { // file found in slow storage and fast storage is available
|
||||
// We are accessing the file from the slow storage, and the file has been accessed less then a minute ago so it popular, so we should update the fast storage with the latest file.
|
||||
if c.cacheHandler != nil && c.cacheHandler(sstat, state) {
|
||||
if err := c.fast.Set(key, src); err != nil {
|
||||
return nil, state, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return src, state, nil
|
||||
case cachestate.CacheStateNotFound:
|
||||
return nil, state, vfserror.ErrNotFound
|
||||
return c.slow.Create(key, size)
|
||||
}
|
||||
|
||||
panic(vfserror.ErrUnreachable)
|
||||
@@ -130,6 +156,10 @@ func (c *CacheFS) GetS(key string) ([]byte, cachestate.CacheState, error) {
|
||||
// Stat returns information about the file at key.
|
||||
// Warning: This will return information about the file in the fastest storage its in.
|
||||
func (c *CacheFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
mu := c.getKeyLock(key)
|
||||
mu.RLock()
|
||||
defer mu.RUnlock()
|
||||
|
||||
state := c.cacheState(key)
|
||||
|
||||
switch state {
|
||||
@@ -150,3 +180,18 @@ func (c *CacheFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
func (c *CacheFS) StatAll() []*vfs.FileInfo {
|
||||
return c.slow.StatAll()
|
||||
}
|
||||
|
||||
type teeReadCloser struct {
|
||||
io.Reader
|
||||
closers []io.Closer
|
||||
}
|
||||
|
||||
func (t *teeReadCloser) Close() error {
|
||||
var err error
|
||||
for _, c := range t.closers {
|
||||
if e := c.Close(); e != nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
115
vfs/cache/cache_test.go
vendored
115
vfs/cache/cache_test.go
vendored
@@ -1,7 +1,9 @@
|
||||
// vfs/cache/cache_test.go
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
@@ -15,8 +17,6 @@ func testMemory() vfs.VFS {
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
|
||||
@@ -29,8 +29,6 @@ func TestNew(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewPanics(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatal("expected panic but did not get one")
|
||||
@@ -42,9 +40,7 @@ func TestNewPanics(t *testing.T) {
|
||||
cache.SetSlow(nil)
|
||||
}
|
||||
|
||||
func TestSetAndGet(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
func TestCreateAndOpen(t *testing.T) {
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
@@ -54,23 +50,26 @@ func TestSetAndGet(t *testing.T) {
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
if err := cache.Set(key, value); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
got, err := cache.Get(key)
|
||||
w, err := cache.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
rc, err := cache.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetAndGetNoFast(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
func TestCreateAndOpenNoFast(t *testing.T) {
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
cache.SetSlow(slow)
|
||||
@@ -78,22 +77,26 @@ func TestSetAndGetNoFast(t *testing.T) {
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
if err := cache.Set(key, value); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
got, err := cache.Get(key)
|
||||
w, err := cache.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
rc, err := cache.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
}
|
||||
func TestCaching(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
func TestCachingPromotion(t *testing.T) {
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(func(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
||||
@@ -105,71 +108,42 @@ func TestCaching(t *testing.T) {
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
if err := fast.Set(key, value); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
ws, _ := slow.Create(key, int64(len(value)))
|
||||
ws.Write(value)
|
||||
ws.Close()
|
||||
|
||||
if err := slow.Set(key, value); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, state, err := cache.GetS(key)
|
||||
rc, err := cache.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if state != cachestate.CacheStateHit {
|
||||
t.Fatalf("expected %v, got %v", cachestate.CacheStateHit, state)
|
||||
}
|
||||
|
||||
err = fast.Delete(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
got, state, err := cache.GetS(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if state != cachestate.CacheStateMiss {
|
||||
t.Fatalf("expected %v, got %v", cachestate.CacheStateMiss, state)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
|
||||
err = cache.Delete(key)
|
||||
// Check if promoted to fast
|
||||
_, err = fast.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, state, err = cache.GetS(key)
|
||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
||||
}
|
||||
if state != cachestate.CacheStateNotFound {
|
||||
t.Fatalf("expected %v, got %v", cachestate.CacheStateNotFound, state)
|
||||
t.Error("Expected promotion to fast cache")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNotFound(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
func TestOpenNotFound(t *testing.T) {
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
_, err := cache.Get("nonexistent")
|
||||
_, err := cache.Open("nonexistent")
|
||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
@@ -179,23 +153,24 @@ func TestDelete(t *testing.T) {
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
if err := cache.Set(key, value); err != nil {
|
||||
w, err := cache.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
if err := cache.Delete(key); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, err := cache.Get(key)
|
||||
_, err = cache.Open(key)
|
||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStat(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
@@ -205,9 +180,12 @@ func TestStat(t *testing.T) {
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
if err := cache.Set(key, value); err != nil {
|
||||
w, err := cache.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
info, err := cache.Stat(key)
|
||||
if err != nil {
|
||||
@@ -217,4 +195,7 @@ func TestStat(t *testing.T) {
|
||||
if info == nil {
|
||||
t.Fatal("expected file info to be non-nil")
|
||||
}
|
||||
if info.Size() != int64(len(value)) {
|
||||
t.Errorf("expected size %d, got %d", len(value), info.Size())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// vfs/cachestate/cachestate.go
|
||||
package cachestate
|
||||
|
||||
import "s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
|
||||
374
vfs/disk/disk.go
374
vfs/disk/disk.go
@@ -1,7 +1,10 @@
|
||||
// vfs/disk/disk.go
|
||||
package disk
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
@@ -12,6 +15,38 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
diskCapacityBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "disk_cache_capacity_bytes",
|
||||
Help: "Total capacity of the disk cache in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
diskSizeBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "disk_cache_size_bytes",
|
||||
Help: "Total size of the disk cache in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
diskReadBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "disk_cache_read_bytes_total",
|
||||
Help: "Total number of bytes read from the disk cache",
|
||||
},
|
||||
)
|
||||
|
||||
diskWriteBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "disk_cache_write_bytes_total",
|
||||
Help: "Total number of bytes written to the disk cache",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
// Ensure DiskFS implements VFS.
|
||||
@@ -23,8 +58,49 @@ type DiskFS struct {
|
||||
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
mu sync.Mutex
|
||||
sg sync.WaitGroup
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks sync.Map // map[string]*sync.RWMutex
|
||||
LRU *lruList
|
||||
}
|
||||
|
||||
// lruList for LRU eviction
|
||||
type lruList struct {
|
||||
list *list.List
|
||||
elem map[string]*list.Element
|
||||
}
|
||||
|
||||
func newLruList() *lruList {
|
||||
return &lruList{
|
||||
list: list.New(),
|
||||
elem: make(map[string]*list.Element),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) MoveToFront(key string) {
|
||||
if e, ok := l.elem[key]; ok {
|
||||
l.list.MoveToFront(e)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
|
||||
e := l.list.PushFront(fi)
|
||||
l.elem[key] = e
|
||||
return e
|
||||
}
|
||||
|
||||
func (l *lruList) Remove(key string) {
|
||||
if e, ok := l.elem[key]; ok {
|
||||
l.list.Remove(e)
|
||||
delete(l.elem, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Back() *vfs.FileInfo {
|
||||
if e := l.list.Back(); e != nil {
|
||||
return e.Value.(*vfs.FileInfo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// New creates a new DiskFS.
|
||||
@@ -42,6 +118,11 @@ func new(root string, capacity int64, skipinit bool) *DiskFS {
|
||||
if !os.IsNotExist(err) {
|
||||
panic(err) // panic if the error is something other than not found
|
||||
}
|
||||
os.Mkdir(root, 0755) // create the root directory if it does not exist
|
||||
fi, err = os.Stat(root) // re-stat to get the file info
|
||||
if err != nil {
|
||||
panic(err) // panic if the re-stat fails
|
||||
}
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
panic("disk root must be a directory") // panic if the root is not a directory
|
||||
@@ -51,14 +132,18 @@ func new(root string, capacity int64, skipinit bool) *DiskFS {
|
||||
root: root,
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
mu: sync.Mutex{},
|
||||
sg: sync.WaitGroup{},
|
||||
mu: sync.RWMutex{},
|
||||
keyLocks: sync.Map{},
|
||||
LRU: newLruList(),
|
||||
}
|
||||
|
||||
os.MkdirAll(dfs.root, 0755)
|
||||
|
||||
diskCapacityBytes.Set(float64(dfs.capacity))
|
||||
|
||||
if !skipinit {
|
||||
dfs.init()
|
||||
diskSizeBytes.Set(float64(dfs.Size()))
|
||||
}
|
||||
|
||||
return dfs
|
||||
@@ -73,12 +158,30 @@ func NewSkipInit(root string, capacity int64) *DiskFS {
|
||||
}
|
||||
|
||||
func (d *DiskFS) init() {
|
||||
// logger.Logger.Info().Str("name", d.Name()).Str("root", d.root).Str("capacity", units.HumanSize(float64(d.capacity))).Msg("init")
|
||||
|
||||
tstart := time.Now()
|
||||
|
||||
d.walk(d.root)
|
||||
d.sg.Wait()
|
||||
err := filepath.Walk(d.root, func(npath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
|
||||
fi := vfs.NewFileInfoFromOS(info, k)
|
||||
d.info[k] = fi
|
||||
d.LRU.Add(k, fi)
|
||||
d.size += info.Size()
|
||||
d.mu.Unlock()
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Logger.Error().Err(err).Msg("Walk failed")
|
||||
}
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("name", d.Name()).
|
||||
@@ -90,36 +193,6 @@ func (d *DiskFS) init() {
|
||||
Msg("init")
|
||||
}
|
||||
|
||||
func (d *DiskFS) walk(path string) {
|
||||
d.sg.Add(1)
|
||||
go func() {
|
||||
defer d.sg.Done()
|
||||
filepath.Walk(path, func(npath string, info os.FileInfo, err error) error {
|
||||
if path == npath {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
d.walk(npath)
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
|
||||
logger.Logger.Debug().Str("name", k).Str("root", d.root).Msg("walk")
|
||||
d.info[k] = vfs.NewFileInfoFromOS(info, k)
|
||||
d.mu.Unlock()
|
||||
|
||||
// logger.Logger.Debug().Str("name", d.Name()).Str("root", d.root).Str("capacity", units.HumanSize(float64(d.capacity))).Str("path", npath).Msg("init")
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
func (d *DiskFS) Capacity() int64 {
|
||||
return d.capacity
|
||||
}
|
||||
@@ -129,52 +202,114 @@ func (d *DiskFS) Name() string {
|
||||
}
|
||||
|
||||
func (d *DiskFS) Size() int64 {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
var size int64
|
||||
for _, v := range d.info {
|
||||
size += v.Size()
|
||||
}
|
||||
return size
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
return d.size
|
||||
}
|
||||
|
||||
func (d *DiskFS) Set(key string, src []byte) error {
|
||||
func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
|
||||
mu, _ := d.keyLocks.LoadOrStore(key, &sync.RWMutex{})
|
||||
return mu.(*sync.RWMutex)
|
||||
}
|
||||
|
||||
func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
if key == "" {
|
||||
return vfserror.ErrInvalidKey
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return vfserror.ErrInvalidKey
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
d.mu.RLock()
|
||||
if d.capacity > 0 {
|
||||
if size := d.Size() + int64(len(src)); size > d.capacity {
|
||||
return vfserror.ErrDiskFull
|
||||
if d.size+size > d.capacity {
|
||||
d.mu.RUnlock()
|
||||
return nil, vfserror.ErrDiskFull
|
||||
}
|
||||
}
|
||||
d.mu.RUnlock()
|
||||
|
||||
logger.Logger.Debug().Str("name", key).Str("root", d.root).Msg("set")
|
||||
|
||||
if _, err := d.Stat(key); err == nil {
|
||||
logger.Logger.Debug().Str("name", key).Str("root", d.root).Msg("delete")
|
||||
d.Delete(key)
|
||||
}
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
// Check again after lock
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
os.MkdirAll(d.root+"/"+filepath.Dir(key), 0755)
|
||||
if err := os.WriteFile(d.root+"/"+key, src, 0644); err != nil {
|
||||
return err
|
||||
if fi, exists := d.info[key]; exists {
|
||||
d.size -= fi.Size()
|
||||
d.LRU.Remove(key)
|
||||
delete(d.info, key)
|
||||
path := filepath.Join(d.root, key)
|
||||
os.Remove(path) // Ignore error, as file might not exist or other issues
|
||||
}
|
||||
d.mu.Unlock()
|
||||
|
||||
path := filepath.Join(d.root, key)
|
||||
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fi, err := os.Stat(d.root + "/" + key)
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d.info[key] = vfs.NewFileInfoFromOS(fi, key)
|
||||
return &diskWriteCloser{
|
||||
Writer: file,
|
||||
onClose: func(n int64) error {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
os.Remove(path)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
d.mu.Lock()
|
||||
finfo := vfs.NewFileInfoFromOS(fi, key)
|
||||
d.info[key] = finfo
|
||||
d.LRU.Add(key, finfo)
|
||||
d.size += n
|
||||
d.mu.Unlock()
|
||||
|
||||
diskWriteBytes.Add(float64(n))
|
||||
diskSizeBytes.Set(float64(d.Size()))
|
||||
|
||||
return nil
|
||||
},
|
||||
key: key,
|
||||
file: file,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type diskWriteCloser struct {
|
||||
io.Writer
|
||||
onClose func(int64) error
|
||||
n int64
|
||||
key string
|
||||
file *os.File
|
||||
}
|
||||
|
||||
func (wc *diskWriteCloser) Write(p []byte) (int, error) {
|
||||
n, err := wc.Writer.Write(p)
|
||||
wc.n += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (wc *diskWriteCloser) Close() error {
|
||||
err := wc.file.Close()
|
||||
if e := wc.onClose(wc.n); e != nil {
|
||||
os.Remove(wc.file.Name())
|
||||
return e
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete deletes the value of key.
|
||||
@@ -186,24 +321,41 @@ func (d *DiskFS) Delete(key string) error {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
_, err := d.Stat(key)
|
||||
if err != nil {
|
||||
return err
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
|
||||
if strings.Contains(key, "..") {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
fi, exists := d.info[key]
|
||||
if !exists {
|
||||
d.mu.Unlock()
|
||||
return vfserror.ErrNotFound
|
||||
}
|
||||
d.size -= fi.Size()
|
||||
d.LRU.Remove(key)
|
||||
delete(d.info, key)
|
||||
if err := os.Remove(filepath.Join(d.root, key)); err != nil {
|
||||
d.mu.Unlock()
|
||||
|
||||
path := filepath.Join(d.root, key)
|
||||
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
|
||||
if err := os.Remove(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diskSizeBytes.Set(float64(d.Size()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get gets the value of key and returns it.
|
||||
func (d *DiskFS) Get(key string) ([]byte, error) {
|
||||
// Open opens the file at key and returns it.
|
||||
func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
@@ -211,20 +363,59 @@ func (d *DiskFS) Get(key string) ([]byte, error) {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
_, err := d.Stat(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
fi, exists := d.info[key]
|
||||
if !exists {
|
||||
d.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
fi.ATime = time.Now()
|
||||
d.LRU.MoveToFront(key)
|
||||
d.mu.Unlock()
|
||||
|
||||
data, err := os.ReadFile(filepath.Join(d.root, key))
|
||||
path := filepath.Join(d.root, key)
|
||||
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
// Update metrics on close
|
||||
return &readCloser{
|
||||
ReadCloser: file,
|
||||
onClose: func(n int64) {
|
||||
diskReadBytes.Add(float64(n))
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type readCloser struct {
|
||||
io.ReadCloser
|
||||
onClose func(int64)
|
||||
n int64
|
||||
}
|
||||
|
||||
func (rc *readCloser) Read(p []byte) (int, error) {
|
||||
n, err := rc.ReadCloser.Read(p)
|
||||
rc.n += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (rc *readCloser) Close() error {
|
||||
err := rc.ReadCloser.Close()
|
||||
rc.onClose(rc.n)
|
||||
return err
|
||||
}
|
||||
|
||||
// Stat returns the FileInfo of key. If key is not found in the cache, it will stat the file on disk. If the file is not found on disk, it will return vfs.ErrNotFound.
|
||||
@@ -236,10 +427,19 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
logger.Logger.Debug().Str("name", key).Str("root", d.root).Msg("stat")
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
if fi, ok := d.info[key]; !ok {
|
||||
return nil, vfserror.ErrNotFound
|
||||
@@ -248,13 +448,13 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *DiskFS) StatAll() []*vfs.FileInfo {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
func (d *DiskFS) StatAll() []*vfs.FileInfo {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
// hard copy the file info to prevent modification of the original file info or the other way around
|
||||
files := make([]*vfs.FileInfo, 0, len(m.info))
|
||||
for _, v := range m.info {
|
||||
files := make([]*vfs.FileInfo, 0, len(d.info))
|
||||
for _, v := range d.info {
|
||||
fi := *v
|
||||
files = append(files, &fi)
|
||||
}
|
||||
|
||||
@@ -1,145 +1,180 @@
|
||||
// vfs/disk/disk_test.go
|
||||
package disk
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAllDisk(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
func TestCreateAndOpen(t *testing.T) {
|
||||
m := NewSkipInit(t.TempDir(), 1024)
|
||||
if err := m.Set("key", []byte("value")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
key := "key"
|
||||
value := []byte("value")
|
||||
|
||||
if err := m.Set("key", []byte("value1")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
w, err := m.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
if d, err := m.Get("key"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value1" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
||||
rc, err := m.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if err := m.Delete("key"); err != nil {
|
||||
t.Errorf("Delete failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := m.Get("key"); err == nil {
|
||||
t.Errorf("Get failed: got nil, want %v", vfserror.ErrNotFound)
|
||||
}
|
||||
|
||||
if err := m.Delete("key"); err == nil {
|
||||
t.Errorf("Delete failed: got nil, want %v", vfserror.ErrNotFound)
|
||||
}
|
||||
|
||||
if _, err := m.Stat("key"); err == nil {
|
||||
t.Errorf("Stat failed: got nil, want %v", vfserror.ErrNotFound)
|
||||
}
|
||||
|
||||
if err := m.Set("key", []byte("value")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := m.Stat("key"); err != nil {
|
||||
t.Errorf("Stat failed: %v", err)
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLimited(t *testing.T) {
|
||||
t.Parallel()
|
||||
func TestOverwrite(t *testing.T) {
|
||||
m := NewSkipInit(t.TempDir(), 1024)
|
||||
key := "key"
|
||||
value1 := []byte("value1")
|
||||
value2 := []byte("value2")
|
||||
|
||||
w, err := m.Create(key, int64(len(value1)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value1)
|
||||
w.Close()
|
||||
|
||||
w, err = m.Create(key, int64(len(value2)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value2)
|
||||
w.Close()
|
||||
|
||||
rc, err := m.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value2) {
|
||||
t.Fatalf("expected %s, got %s", value2, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
m := NewSkipInit(t.TempDir(), 1024)
|
||||
key := "key"
|
||||
value := []byte("value")
|
||||
|
||||
w, err := m.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
if err := m.Delete(key); err != nil {
|
||||
t.Fatalf("Delete failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = m.Open(key)
|
||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCapacityLimit(t *testing.T) {
|
||||
m := NewSkipInit(t.TempDir(), 10)
|
||||
for i := 0; i < 11; i++ {
|
||||
if err := m.Set(fmt.Sprintf("key%d", i), []byte("1")); err != nil && i < 10 {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
w, err := m.Create(fmt.Sprintf("key%d", i), 1)
|
||||
if err != nil && i < 10 {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
} else if i == 10 && err == nil {
|
||||
t.Errorf("Set succeeded: got nil, want %v", vfserror.ErrDiskFull)
|
||||
t.Errorf("Create succeeded: got nil, want %v", vfserror.ErrDiskFull)
|
||||
}
|
||||
if i < 10 {
|
||||
w.Write([]byte("1"))
|
||||
w.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
func TestInitExistingFiles(t *testing.T) {
|
||||
td := t.TempDir()
|
||||
|
||||
path := filepath.Join(td, "test", "key")
|
||||
|
||||
os.MkdirAll(filepath.Dir(path), 0755)
|
||||
|
||||
os.WriteFile(path, []byte("value"), 0644)
|
||||
|
||||
m := New(td, 10)
|
||||
if _, err := m.Get("test/key"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
rc, err := m.Open("test/key")
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != "value" {
|
||||
t.Errorf("expected value, got %s", got)
|
||||
}
|
||||
|
||||
s, _ := m.Stat("test/key")
|
||||
if s.Name() != "test/key" {
|
||||
t.Errorf("Stat failed: got %s, want %s", s.Name(), "key")
|
||||
s, err := m.Stat("test/key")
|
||||
if err != nil {
|
||||
t.Fatalf("Stat failed: %v", err)
|
||||
}
|
||||
if s == nil {
|
||||
t.Error("Stat returned nil")
|
||||
}
|
||||
if s != nil && s.Name() != "test/key" {
|
||||
t.Errorf("Stat failed: got %s, want %s", s.Name(), "test/key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiskSizeDiscrepancy(t *testing.T) {
|
||||
t.Parallel()
|
||||
func TestSizeConsistency(t *testing.T) {
|
||||
td := t.TempDir()
|
||||
|
||||
assumedSize := int64(6 + 5 + 6) // 6 + 5 + 6 bytes for key, key1, key2
|
||||
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
||||
|
||||
m := New(td, 1024)
|
||||
if 6 != m.Size() {
|
||||
t.Errorf("Size failed: got %d, want %d", m.Size(), 6)
|
||||
if m.Size() != 6 {
|
||||
t.Errorf("Size failed: got %d, want 6", m.Size())
|
||||
}
|
||||
|
||||
if err := m.Set("key", []byte("value")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
w, err := m.Create("key", 5)
|
||||
if err != nil {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
}
|
||||
w.Write([]byte("value"))
|
||||
w.Close()
|
||||
|
||||
if err := m.Set("key1", []byte("value1")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
w, err = m.Create("key1", 6)
|
||||
if err != nil {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
}
|
||||
w.Write([]byte("value1"))
|
||||
w.Close()
|
||||
|
||||
assumedSize := int64(6 + 5 + 6)
|
||||
if assumedSize != m.Size() {
|
||||
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
|
||||
}
|
||||
|
||||
if d, err := m.Get("key"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value")
|
||||
rc, err := m.Open("key")
|
||||
if err != nil {
|
||||
t.Errorf("Open failed: %v", err)
|
||||
}
|
||||
|
||||
if d, err := m.Get("key1"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value1" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
||||
d, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if string(d) != "value" {
|
||||
t.Errorf("Get failed: got %s, want value", d)
|
||||
}
|
||||
|
||||
m = New(td, 1024)
|
||||
|
||||
if assumedSize != m.Size() {
|
||||
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
|
||||
}
|
||||
|
||||
if d, err := m.Get("key"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value")
|
||||
}
|
||||
|
||||
if d, err := m.Get("key1"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value1" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
||||
}
|
||||
|
||||
if assumedSize != m.Size() {
|
||||
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// vfs/fileinfo.go
|
||||
package vfs
|
||||
|
||||
import (
|
||||
|
||||
748
vfs/gc/gc.go
748
vfs/gc/gc.go
@@ -1,84 +1,728 @@
|
||||
// vfs/gc/gc.go
|
||||
package gc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"s1d3sw1ped/SteamCache2/vfs/disk"
|
||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInsufficientSpace is returned when there are no files to delete in the VFS.
|
||||
ErrInsufficientSpace = fmt.Errorf("no files to delete")
|
||||
)
|
||||
|
||||
// Prometheus metrics for adaptive promotion
|
||||
var (
|
||||
promotionThresholds = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "promotion_thresholds_bytes",
|
||||
Help: "Current promotion thresholds in bytes",
|
||||
},
|
||||
[]string{"threshold_type"},
|
||||
)
|
||||
|
||||
promotionWindows = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "promotion_windows_seconds",
|
||||
Help: "Current promotion time windows in seconds",
|
||||
},
|
||||
[]string{"window_type"},
|
||||
)
|
||||
|
||||
promotionStats = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "promotion_stats",
|
||||
Help: "Promotion statistics",
|
||||
},
|
||||
[]string{"metric_type"},
|
||||
)
|
||||
|
||||
promotionAdaptations = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "promotion_adaptations_total",
|
||||
Help: "Total number of promotion threshold adaptations",
|
||||
},
|
||||
[]string{"direction"},
|
||||
)
|
||||
)
|
||||
|
||||
// GCAlgorithm represents different garbage collection strategies
|
||||
type GCAlgorithm string
|
||||
|
||||
const (
|
||||
LRU GCAlgorithm = "lru"
|
||||
LFU GCAlgorithm = "lfu"
|
||||
FIFO GCAlgorithm = "fifo"
|
||||
Largest GCAlgorithm = "largest"
|
||||
Smallest GCAlgorithm = "smallest"
|
||||
Hybrid GCAlgorithm = "hybrid"
|
||||
)
|
||||
|
||||
// LRUGC deletes files in LRU order until enough space is reclaimed.
|
||||
func LRUGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LRU GC")
|
||||
|
||||
var reclaimed uint // reclaimed space in bytes
|
||||
|
||||
for {
|
||||
switch fs := vfss.(type) {
|
||||
case *disk.DiskFS:
|
||||
fi := fs.LRU.Back()
|
||||
if fi == nil {
|
||||
return ErrInsufficientSpace // No files to delete
|
||||
}
|
||||
sz := uint(fi.Size())
|
||||
err := fs.Delete(fi.Name())
|
||||
if err != nil {
|
||||
continue // If delete fails, try the next file
|
||||
}
|
||||
reclaimed += sz
|
||||
case *memory.MemoryFS:
|
||||
fi := fs.LRU.Back()
|
||||
if fi == nil {
|
||||
return ErrInsufficientSpace // No files to delete
|
||||
}
|
||||
sz := uint(fi.Size())
|
||||
err := fs.Delete(fi.Name())
|
||||
if err != nil {
|
||||
continue // If delete fails, try the next file
|
||||
}
|
||||
reclaimed += sz
|
||||
default:
|
||||
panic("unreachable: unsupported VFS type for LRU GC") // panic if the VFS is not disk or memory
|
||||
}
|
||||
|
||||
if reclaimed >= size {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC")
|
||||
return nil // stop if enough space is reclaimed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LFUGC deletes files in LFU (Least Frequently Used) order until enough space is reclaimed.
|
||||
func LFUGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LFU GC")
|
||||
|
||||
// Get all files and sort by access count (frequency)
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// Sort by access count (ascending - least frequently used first)
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].AccessCount < files[j].AccessCount
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
for _, fi := range files {
|
||||
if reclaimed >= size {
|
||||
break
|
||||
}
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
}
|
||||
|
||||
if reclaimed >= size {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LFU GC")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// FIFOGC deletes files in FIFO (First In, First Out) order until enough space is reclaimed.
|
||||
func FIFOGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using FIFO GC")
|
||||
|
||||
// Get all files and sort by creation time (oldest first)
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// Sort by creation time (ascending - oldest first)
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].MTime.Before(files[j].MTime)
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
for _, fi := range files {
|
||||
if reclaimed >= size {
|
||||
break
|
||||
}
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
}
|
||||
|
||||
if reclaimed >= size {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using FIFO GC")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// LargestGC deletes the largest files first until enough space is reclaimed.
|
||||
func LargestGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Largest GC")
|
||||
|
||||
// Get all files and sort by size (largest first)
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// Sort by size (descending - largest first)
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].Size > files[j].Size
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
for _, fi := range files {
|
||||
if reclaimed >= size {
|
||||
break
|
||||
}
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
}
|
||||
|
||||
if reclaimed >= size {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Largest GC")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// SmallestGC deletes the smallest files first until enough space is reclaimed.
|
||||
func SmallestGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Smallest GC")
|
||||
|
||||
// Get all files and sort by size (smallest first)
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// Sort by size (ascending - smallest first)
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].Size < files[j].Size
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
for _, fi := range files {
|
||||
if reclaimed >= size {
|
||||
break
|
||||
}
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
}
|
||||
|
||||
if reclaimed >= size {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Smallest GC")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// HybridGC combines LRU and size-based eviction with a scoring system.
|
||||
func HybridGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Hybrid GC")
|
||||
|
||||
// Get all files and calculate hybrid scores
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// Calculate hybrid scores (lower score = more likely to be evicted)
|
||||
// Score = (time since last access in seconds) * (file size in MB)
|
||||
now := time.Now()
|
||||
for i := range files {
|
||||
timeSinceAccess := now.Sub(files[i].ATime).Seconds()
|
||||
sizeMB := float64(files[i].Size) / (1024 * 1024)
|
||||
files[i].HybridScore = timeSinceAccess * sizeMB
|
||||
}
|
||||
|
||||
// Sort by hybrid score (ascending - lowest scores first)
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].HybridScore < files[j].HybridScore
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
for _, fi := range files {
|
||||
if reclaimed >= size {
|
||||
break
|
||||
}
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
}
|
||||
|
||||
if reclaimed >= size {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Hybrid GC")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// fileInfoWithMetadata extends FileInfo with additional metadata for GC algorithms
|
||||
type fileInfoWithMetadata struct {
|
||||
Name string
|
||||
Size int64
|
||||
MTime time.Time
|
||||
ATime time.Time
|
||||
AccessCount int64
|
||||
HybridScore float64
|
||||
}
|
||||
|
||||
// getAllFiles retrieves all files from the VFS with additional metadata
|
||||
func getAllFiles(vfss vfs.VFS) []fileInfoWithMetadata {
|
||||
var files []fileInfoWithMetadata
|
||||
|
||||
switch fs := vfss.(type) {
|
||||
case *disk.DiskFS:
|
||||
allFiles := fs.StatAll()
|
||||
for _, fi := range allFiles {
|
||||
// For disk, we can't easily track access count, so we'll use 1 as default
|
||||
files = append(files, fileInfoWithMetadata{
|
||||
Name: fi.Name(),
|
||||
Size: fi.Size(),
|
||||
MTime: fi.ModTime(),
|
||||
ATime: fi.AccessTime(),
|
||||
AccessCount: 1,
|
||||
})
|
||||
}
|
||||
case *memory.MemoryFS:
|
||||
allFiles := fs.StatAll()
|
||||
for _, fi := range allFiles {
|
||||
// For memory, we can't easily track access count, so we'll use 1 as default
|
||||
files = append(files, fileInfoWithMetadata{
|
||||
Name: fi.Name(),
|
||||
Size: fi.Size(),
|
||||
MTime: fi.ModTime(),
|
||||
ATime: fi.AccessTime(),
|
||||
AccessCount: 1,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
// GetGCAlgorithm returns the appropriate GC function based on the algorithm name
|
||||
func GetGCAlgorithm(algorithm GCAlgorithm) GCHandlerFunc {
|
||||
switch algorithm {
|
||||
case LRU:
|
||||
return LRUGC
|
||||
case LFU:
|
||||
return LFUGC
|
||||
case FIFO:
|
||||
return FIFOGC
|
||||
case Largest:
|
||||
return LargestGC
|
||||
case Smallest:
|
||||
return SmallestGC
|
||||
case Hybrid:
|
||||
return HybridGC
|
||||
default:
|
||||
logger.Logger.Warn().Str("algorithm", string(algorithm)).Msg("Unknown GC algorithm, falling back to LRU")
|
||||
return LRUGC
|
||||
}
|
||||
}
|
||||
|
||||
func PromotionDecider(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
||||
return time.Since(fi.AccessTime()) < time.Second*60 // Put hot files in the fast vfs if equipped
|
||||
}
|
||||
|
||||
// AdaptivePromotionDecider automatically adjusts promotion thresholds based on usage patterns
|
||||
type AdaptivePromotionDecider struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// Current thresholds
|
||||
smallFileThreshold int64 // Size threshold for small files
|
||||
mediumFileThreshold int64 // Size threshold for medium files
|
||||
largeFileThreshold int64 // Size threshold for large files
|
||||
smallFileWindow time.Duration // Time window for small files
|
||||
mediumFileWindow time.Duration // Time window for medium files
|
||||
largeFileWindow time.Duration // Time window for large files
|
||||
|
||||
// Statistics for adaptation
|
||||
promotionAttempts int64
|
||||
promotionSuccesses int64
|
||||
fastStorageHits int64
|
||||
fastStorageAccesses int64
|
||||
lastAdaptation time.Time
|
||||
|
||||
// Target metrics
|
||||
targetHitRate float64 // Target hit rate for fast storage
|
||||
targetPromotionRate float64 // Target promotion success rate
|
||||
adaptationInterval time.Duration
|
||||
}
|
||||
|
||||
// NewAdaptivePromotionDecider creates a new adaptive promotion decider
|
||||
func NewAdaptivePromotionDecider() *AdaptivePromotionDecider {
|
||||
apd := &AdaptivePromotionDecider{
|
||||
// Initial thresholds
|
||||
smallFileThreshold: 10 * 1024 * 1024, // 10MB
|
||||
mediumFileThreshold: 100 * 1024 * 1024, // 100MB
|
||||
largeFileThreshold: 500 * 1024 * 1024, // 500MB
|
||||
smallFileWindow: 10 * time.Minute,
|
||||
mediumFileWindow: 2 * time.Minute,
|
||||
largeFileWindow: 30 * time.Second,
|
||||
|
||||
// Target metrics
|
||||
targetHitRate: 0.8, // 80% hit rate
|
||||
targetPromotionRate: 0.7, // 70% promotion success rate
|
||||
adaptationInterval: 5 * time.Minute,
|
||||
}
|
||||
|
||||
// Initialize Prometheus metrics
|
||||
apd.updatePrometheusMetrics()
|
||||
|
||||
return apd
|
||||
}
|
||||
|
||||
// ShouldPromote determines if a file should be promoted based on adaptive thresholds
|
||||
func (apd *AdaptivePromotionDecider) ShouldPromote(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
||||
apd.mu.Lock()
|
||||
defer apd.mu.Unlock()
|
||||
|
||||
// Check if it's time to adapt thresholds
|
||||
if time.Since(apd.lastAdaptation) > apd.adaptationInterval {
|
||||
apd.adaptThresholds()
|
||||
}
|
||||
|
||||
size := fi.Size()
|
||||
timeSinceAccess := time.Since(fi.AccessTime())
|
||||
|
||||
// Record promotion attempt
|
||||
apd.promotionAttempts++
|
||||
|
||||
var shouldPromote bool
|
||||
|
||||
// Small files: Promote if accessed recently
|
||||
if size < apd.smallFileThreshold {
|
||||
shouldPromote = timeSinceAccess < apd.smallFileWindow
|
||||
} else if size < apd.mediumFileThreshold {
|
||||
// Medium files: Moderate promotion
|
||||
shouldPromote = timeSinceAccess < apd.mediumFileWindow
|
||||
} else if size < apd.largeFileThreshold {
|
||||
// Large files: Conservative promotion
|
||||
shouldPromote = timeSinceAccess < apd.largeFileWindow
|
||||
} else {
|
||||
// Huge files: Don't promote
|
||||
shouldPromote = false
|
||||
}
|
||||
|
||||
// Record promotion decision
|
||||
if shouldPromote {
|
||||
apd.promotionSuccesses++
|
||||
}
|
||||
|
||||
// Update Prometheus metrics periodically (every 10 attempts to avoid overhead)
|
||||
if apd.promotionAttempts%10 == 0 {
|
||||
apd.updatePrometheusMetrics()
|
||||
}
|
||||
|
||||
return shouldPromote
|
||||
}
|
||||
|
||||
// RecordFastStorageAccess records when fast storage is accessed
|
||||
func (apd *AdaptivePromotionDecider) RecordFastStorageAccess() {
|
||||
apd.mu.Lock()
|
||||
defer apd.mu.Unlock()
|
||||
apd.fastStorageAccesses++
|
||||
|
||||
// Update Prometheus metrics periodically
|
||||
if apd.fastStorageAccesses%10 == 0 {
|
||||
apd.updatePrometheusMetrics()
|
||||
}
|
||||
}
|
||||
|
||||
// RecordFastStorageHit records when fast storage has a hit
|
||||
func (apd *AdaptivePromotionDecider) RecordFastStorageHit() {
|
||||
apd.mu.Lock()
|
||||
defer apd.mu.Unlock()
|
||||
apd.fastStorageHits++
|
||||
|
||||
// Update Prometheus metrics periodically
|
||||
if apd.fastStorageHits%10 == 0 {
|
||||
apd.updatePrometheusMetrics()
|
||||
}
|
||||
}
|
||||
|
||||
// adaptThresholds adjusts thresholds based on current performance
|
||||
func (apd *AdaptivePromotionDecider) adaptThresholds() {
|
||||
if apd.promotionAttempts < 10 || apd.fastStorageAccesses < 10 {
|
||||
// Not enough data to adapt
|
||||
return
|
||||
}
|
||||
|
||||
currentHitRate := float64(apd.fastStorageHits) / float64(apd.fastStorageAccesses)
|
||||
currentPromotionRate := float64(apd.promotionSuccesses) / float64(apd.promotionAttempts)
|
||||
|
||||
logger.Logger.Debug().
|
||||
Float64("hit_rate", currentHitRate).
|
||||
Float64("promotion_rate", currentPromotionRate).
|
||||
Float64("target_hit_rate", apd.targetHitRate).
|
||||
Float64("target_promotion_rate", apd.targetPromotionRate).
|
||||
Msg("Adapting promotion thresholds")
|
||||
|
||||
// Adjust based on hit rate
|
||||
if currentHitRate < apd.targetHitRate {
|
||||
// Hit rate too low - be more aggressive with promotion
|
||||
apd.adjustThresholdsMoreAggressive()
|
||||
} else if currentHitRate > apd.targetHitRate+0.1 {
|
||||
// Hit rate too high - be more conservative
|
||||
apd.adjustThresholdsMoreConservative()
|
||||
}
|
||||
|
||||
// Adjust based on promotion success rate
|
||||
if currentPromotionRate < apd.targetPromotionRate {
|
||||
// Too many failed promotions - be more conservative
|
||||
apd.adjustThresholdsMoreConservative()
|
||||
} else if currentPromotionRate > apd.targetPromotionRate+0.1 {
|
||||
// High promotion success - can be more aggressive
|
||||
apd.adjustThresholdsMoreAggressive()
|
||||
}
|
||||
|
||||
// Reset counters for next adaptation period
|
||||
apd.promotionAttempts = 0
|
||||
apd.promotionSuccesses = 0
|
||||
apd.fastStorageHits = 0
|
||||
apd.fastStorageAccesses = 0
|
||||
apd.lastAdaptation = time.Now()
|
||||
|
||||
logger.Logger.Info().
|
||||
Int64("small_threshold_mb", apd.smallFileThreshold/(1024*1024)).
|
||||
Int64("medium_threshold_mb", apd.mediumFileThreshold/(1024*1024)).
|
||||
Int64("large_threshold_mb", apd.largeFileThreshold/(1024*1024)).
|
||||
Dur("small_window", apd.smallFileWindow).
|
||||
Dur("medium_window", apd.mediumFileWindow).
|
||||
Dur("large_window", apd.largeFileWindow).
|
||||
Msg("Updated promotion thresholds")
|
||||
}
|
||||
|
||||
// updatePrometheusMetrics updates all Prometheus metrics with current values
|
||||
func (apd *AdaptivePromotionDecider) updatePrometheusMetrics() {
|
||||
// Update threshold metrics
|
||||
promotionThresholds.WithLabelValues("small").Set(float64(apd.smallFileThreshold))
|
||||
promotionThresholds.WithLabelValues("medium").Set(float64(apd.mediumFileThreshold))
|
||||
promotionThresholds.WithLabelValues("large").Set(float64(apd.largeFileThreshold))
|
||||
|
||||
// Update window metrics
|
||||
promotionWindows.WithLabelValues("small").Set(apd.smallFileWindow.Seconds())
|
||||
promotionWindows.WithLabelValues("medium").Set(apd.mediumFileWindow.Seconds())
|
||||
promotionWindows.WithLabelValues("large").Set(apd.largeFileWindow.Seconds())
|
||||
|
||||
// Update statistics metrics
|
||||
hitRate := 0.0
|
||||
if apd.fastStorageAccesses > 0 {
|
||||
hitRate = float64(apd.fastStorageHits) / float64(apd.fastStorageAccesses)
|
||||
}
|
||||
promotionRate := 0.0
|
||||
if apd.promotionAttempts > 0 {
|
||||
promotionRate = float64(apd.promotionSuccesses) / float64(apd.promotionAttempts)
|
||||
}
|
||||
|
||||
promotionStats.WithLabelValues("hit_rate").Set(hitRate)
|
||||
promotionStats.WithLabelValues("promotion_rate").Set(promotionRate)
|
||||
promotionStats.WithLabelValues("promotion_attempts").Set(float64(apd.promotionAttempts))
|
||||
promotionStats.WithLabelValues("promotion_successes").Set(float64(apd.promotionSuccesses))
|
||||
promotionStats.WithLabelValues("fast_storage_accesses").Set(float64(apd.fastStorageAccesses))
|
||||
promotionStats.WithLabelValues("fast_storage_hits").Set(float64(apd.fastStorageHits))
|
||||
}
|
||||
|
||||
// adjustThresholdsMoreAggressive makes promotion more aggressive
|
||||
func (apd *AdaptivePromotionDecider) adjustThresholdsMoreAggressive() {
|
||||
// Increase size thresholds (promote larger files)
|
||||
apd.smallFileThreshold = minInt64(apd.smallFileThreshold*11/10, 50*1024*1024) // Max 50MB
|
||||
apd.mediumFileThreshold = minInt64(apd.mediumFileThreshold*11/10, 200*1024*1024) // Max 200MB
|
||||
apd.largeFileThreshold = minInt64(apd.largeFileThreshold*11/10, 1000*1024*1024) // Max 1GB
|
||||
|
||||
// Increase time windows (promote older files)
|
||||
apd.smallFileWindow = minDuration(apd.smallFileWindow*11/10, 20*time.Minute)
|
||||
apd.mediumFileWindow = minDuration(apd.mediumFileWindow*11/10, 5*time.Minute)
|
||||
apd.largeFileWindow = minDuration(apd.largeFileWindow*11/10, 2*time.Minute)
|
||||
|
||||
// Record adaptation in Prometheus
|
||||
promotionAdaptations.WithLabelValues("aggressive").Inc()
|
||||
|
||||
// Update Prometheus metrics
|
||||
apd.updatePrometheusMetrics()
|
||||
}
|
||||
|
||||
// adjustThresholdsMoreConservative makes promotion more conservative
|
||||
func (apd *AdaptivePromotionDecider) adjustThresholdsMoreConservative() {
|
||||
// Decrease size thresholds (promote smaller files)
|
||||
apd.smallFileThreshold = maxInt64(apd.smallFileThreshold*9/10, 5*1024*1024) // Min 5MB
|
||||
apd.mediumFileThreshold = maxInt64(apd.mediumFileThreshold*9/10, 50*1024*1024) // Min 50MB
|
||||
apd.largeFileThreshold = maxInt64(apd.largeFileThreshold*9/10, 200*1024*1024) // Min 200MB
|
||||
|
||||
// Decrease time windows (promote only recent files)
|
||||
apd.smallFileWindow = maxDuration(apd.smallFileWindow*9/10, 5*time.Minute)
|
||||
apd.mediumFileWindow = maxDuration(apd.mediumFileWindow*9/10, 1*time.Minute)
|
||||
apd.largeFileWindow = maxDuration(apd.largeFileWindow*9/10, 15*time.Second)
|
||||
|
||||
// Record adaptation in Prometheus
|
||||
promotionAdaptations.WithLabelValues("conservative").Inc()
|
||||
|
||||
// Update Prometheus metrics
|
||||
apd.updatePrometheusMetrics()
|
||||
}
|
||||
|
||||
// GetStats returns current statistics for monitoring
|
||||
func (apd *AdaptivePromotionDecider) GetStats() map[string]interface{} {
|
||||
apd.mu.RLock()
|
||||
defer apd.mu.RUnlock()
|
||||
|
||||
hitRate := 0.0
|
||||
if apd.fastStorageAccesses > 0 {
|
||||
hitRate = float64(apd.fastStorageHits) / float64(apd.fastStorageAccesses)
|
||||
}
|
||||
|
||||
promotionRate := 0.0
|
||||
if apd.promotionAttempts > 0 {
|
||||
promotionRate = float64(apd.promotionSuccesses) / float64(apd.promotionAttempts)
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"small_file_threshold_mb": apd.smallFileThreshold / (1024 * 1024),
|
||||
"medium_file_threshold_mb": apd.mediumFileThreshold / (1024 * 1024),
|
||||
"large_file_threshold_mb": apd.largeFileThreshold / (1024 * 1024),
|
||||
"small_file_window_minutes": apd.smallFileWindow.Minutes(),
|
||||
"medium_file_window_minutes": apd.mediumFileWindow.Minutes(),
|
||||
"large_file_window_seconds": apd.largeFileWindow.Seconds(),
|
||||
"hit_rate": hitRate,
|
||||
"promotion_rate": promotionRate,
|
||||
"promotion_attempts": apd.promotionAttempts,
|
||||
"promotion_successes": apd.promotionSuccesses,
|
||||
"fast_storage_accesses": apd.fastStorageAccesses,
|
||||
"fast_storage_hits": apd.fastStorageHits,
|
||||
}
|
||||
}
|
||||
|
||||
// Global adaptive promotion decider instance
|
||||
var adaptivePromotionDecider *AdaptivePromotionDecider
|
||||
|
||||
func init() {
|
||||
adaptivePromotionDecider = NewAdaptivePromotionDecider()
|
||||
}
|
||||
|
||||
// AdaptivePromotionDeciderFunc returns the adaptive promotion decision function
|
||||
func AdaptivePromotionDeciderFunc(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
||||
return adaptivePromotionDecider.ShouldPromote(fi, cs)
|
||||
}
|
||||
|
||||
// RecordFastStorageAccess records fast storage access for adaptation
|
||||
func RecordFastStorageAccess() {
|
||||
adaptivePromotionDecider.RecordFastStorageAccess()
|
||||
}
|
||||
|
||||
// RecordFastStorageHit records fast storage hit for adaptation
|
||||
func RecordFastStorageHit() {
|
||||
adaptivePromotionDecider.RecordFastStorageHit()
|
||||
}
|
||||
|
||||
// GetPromotionStats returns promotion statistics for monitoring
|
||||
func GetPromotionStats() map[string]interface{} {
|
||||
return adaptivePromotionDecider.GetStats()
|
||||
}
|
||||
|
||||
// Helper functions for min/max operations
|
||||
func minInt64(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func maxInt64(a, b int64) int64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func minDuration(a, b time.Duration) time.Duration {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func maxDuration(a, b time.Duration) time.Duration {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Ensure GCFS implements VFS.
|
||||
var _ vfs.VFS = (*GCFS)(nil)
|
||||
|
||||
// GCFS is a virtual file system that calls a GC handler when the disk is full. The GC handler is responsible for freeing up space on the disk. The GCFS is a wrapper around another VFS.
|
||||
type GCFS struct {
|
||||
vfs.VFS
|
||||
multiplier int
|
||||
|
||||
// protected by mu
|
||||
gcHanderFunc GCHandlerFunc
|
||||
lifetimeBytes, lifetimeFiles uint
|
||||
reclaimedBytes, deletedFiles uint
|
||||
gcTime time.Duration
|
||||
mu sync.Mutex
|
||||
gcHanderFunc GCHandlerFunc
|
||||
}
|
||||
|
||||
// GCHandlerFunc is a function that is called when the disk is full and the GCFS needs to free up space. It is passed the VFS and the size of the file that needs to be written. Its up to the implementation to free up space. How much space is freed is also up to the implementation.
|
||||
type GCHandlerFunc func(vfs vfs.VFS, size uint) (reclaimedBytes uint, deletedFiles uint)
|
||||
type GCHandlerFunc func(vfs vfs.VFS, size uint) error
|
||||
|
||||
func New(vfs vfs.VFS, multiplier int, gcHandlerFunc GCHandlerFunc) *GCFS {
|
||||
if multiplier <= 0 {
|
||||
multiplier = 1 // if the multiplier is less than or equal to 0 set it to 1 will be slow but the user can set it to a higher value if they want
|
||||
}
|
||||
func New(vfs vfs.VFS, gcHandlerFunc GCHandlerFunc) *GCFS {
|
||||
return &GCFS{
|
||||
VFS: vfs,
|
||||
multiplier: multiplier,
|
||||
gcHanderFunc: gcHandlerFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// Stats returns the lifetime bytes, lifetime files, reclaimed bytes and deleted files.
|
||||
// The lifetime bytes and lifetime files are the total bytes and files that have been freed up by the GC handler.
|
||||
// The reclaimed bytes and deleted files are the bytes and files that have been freed up by the GC handler since last call to Stats.
|
||||
// The gc time is the total time spent in the GC handler since last call to Stats.
|
||||
// The reclaimed bytes and deleted files and gc time are reset to 0 after the call to Stats.
|
||||
func (g *GCFS) Stats() (lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles uint, gcTime time.Duration) {
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
|
||||
g.lifetimeBytes += g.reclaimedBytes
|
||||
g.lifetimeFiles += g.deletedFiles
|
||||
|
||||
lifetimeBytes = g.lifetimeBytes
|
||||
lifetimeFiles = g.lifetimeFiles
|
||||
reclaimedBytes = g.reclaimedBytes
|
||||
deletedFiles = g.deletedFiles
|
||||
gcTime = g.gcTime
|
||||
|
||||
g.reclaimedBytes = 0
|
||||
g.deletedFiles = 0
|
||||
g.gcTime = time.Duration(0)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Set overrides the Set method of the VFS interface. It tries to set the key and src, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error.
|
||||
func (g *GCFS) Set(key string, src []byte) error {
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
err := g.VFS.Set(key, src) // try to set the key and src
|
||||
|
||||
if err == vfserror.ErrDiskFull && g.gcHanderFunc != nil { // if the error is disk full and there is a GC handler
|
||||
tstart := time.Now()
|
||||
reclaimedBytes, deletedFiles := g.gcHanderFunc(g.VFS, uint(len(src)*g.multiplier)) // call the GC handler
|
||||
g.gcTime += time.Since(tstart)
|
||||
g.reclaimedBytes += reclaimedBytes
|
||||
g.deletedFiles += deletedFiles
|
||||
err = g.VFS.Set(key, src) // try again after GC if it still fails return the error
|
||||
// Create overrides the Create method of the VFS interface. It tries to create the key, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error.
|
||||
func (g *GCFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
w, err := g.VFS.Create(key, size) // try to create the key
|
||||
for err == vfserror.ErrDiskFull && g.gcHanderFunc != nil { // if the error is disk full and there is a GC handler
|
||||
errr := g.gcHanderFunc(g.VFS, uint(size)) // call the GC handler
|
||||
if errr == ErrInsufficientSpace {
|
||||
return nil, errr // if the GC handler returns no files to delete, return the error
|
||||
}
|
||||
w, err = g.VFS.Create(key, size)
|
||||
}
|
||||
|
||||
return err
|
||||
if err != nil {
|
||||
if err == vfserror.ErrDiskFull {
|
||||
logger.Logger.Error().Str("key", key).Int64("size", size).Msg("Failed to create file due to disk full, even after GC")
|
||||
} else {
|
||||
logger.Logger.Error().Str("key", key).Int64("size", size).Err(err).Msg("Failed to create file")
|
||||
}
|
||||
}
|
||||
|
||||
return w, err
|
||||
}
|
||||
|
||||
func (g *GCFS) Name() string {
|
||||
|
||||
@@ -1,105 +1,42 @@
|
||||
// vfs/gc/gc_test.go
|
||||
package gc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/exp/rand"
|
||||
)
|
||||
|
||||
func TestGCSmallRandom(t *testing.T) {
|
||||
t.Parallel()
|
||||
func TestGetGCAlgorithm(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
algorithm GCAlgorithm
|
||||
expected bool // true if we expect a non-nil function
|
||||
}{
|
||||
{"LRU", LRU, true},
|
||||
{"LFU", LFU, true},
|
||||
{"FIFO", FIFO, true},
|
||||
{"Largest", Largest, true},
|
||||
{"Smallest", Smallest, true},
|
||||
{"Hybrid", Hybrid, true},
|
||||
{"Unknown", "unknown", true}, // should fall back to LRU
|
||||
{"Empty", "", true}, // should fall back to LRU
|
||||
}
|
||||
|
||||
m := memory.New(1024 * 1024 * 16)
|
||||
gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) {
|
||||
deletions := 0
|
||||
var reclaimed uint
|
||||
|
||||
t.Logf("GC starting to reclaim %d bytes", size)
|
||||
|
||||
stats := vfs.StatAll()
|
||||
sort.Slice(stats, func(i, j int) bool {
|
||||
// Sort by access time so we can remove the oldest files first.
|
||||
return stats[i].AccessTime().Before(stats[j].AccessTime())
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fn := GetGCAlgorithm(tt.algorithm)
|
||||
if fn == nil {
|
||||
t.Errorf("GetGCAlgorithm(%s) returned nil, expected non-nil function", tt.algorithm)
|
||||
}
|
||||
})
|
||||
|
||||
// Delete the oldest files until we've reclaimed enough space.
|
||||
for _, s := range stats {
|
||||
sz := uint(s.Size()) // Get the size of the file
|
||||
err := vfs.Delete(s.Name())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
reclaimed += sz // Track how much space we've reclaimed
|
||||
deletions++ // Track how many files we've deleted
|
||||
|
||||
// t.Logf("GC deleting %s, %v", s.Name(), s.AccessTime().Format(time.RFC3339Nano))
|
||||
|
||||
if reclaimed >= size { // We've reclaimed enough space
|
||||
break
|
||||
}
|
||||
}
|
||||
return uint(reclaimed), uint(deletions)
|
||||
})
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024*1, 1024*4)); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if gc.Size() > 1024*1024*16 {
|
||||
t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func genRandomData(min int, max int) []byte {
|
||||
data := make([]byte, rand.Intn(max-min)+min)
|
||||
rand.Read(data)
|
||||
return data
|
||||
}
|
||||
func TestGCAlgorithmConstants(t *testing.T) {
|
||||
expectedAlgorithms := []GCAlgorithm{LRU, LFU, FIFO, Largest, Smallest, Hybrid}
|
||||
|
||||
func TestGCLargeRandom(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := memory.New(1024 * 1024 * 16) // 16MB
|
||||
gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) {
|
||||
deletions := 0
|
||||
var reclaimed uint
|
||||
|
||||
t.Logf("GC starting to reclaim %d bytes", size)
|
||||
|
||||
stats := vfs.StatAll()
|
||||
sort.Slice(stats, func(i, j int) bool {
|
||||
// Sort by access time so we can remove the oldest files first.
|
||||
return stats[i].AccessTime().Before(stats[j].AccessTime())
|
||||
})
|
||||
|
||||
// Delete the oldest files until we've reclaimed enough space.
|
||||
for _, s := range stats {
|
||||
sz := uint(s.Size()) // Get the size of the file
|
||||
vfs.Delete(s.Name())
|
||||
reclaimed += sz // Track how much space we've reclaimed
|
||||
deletions++ // Track how many files we've deleted
|
||||
|
||||
if reclaimed >= size { // We've reclaimed enough space
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return uint(reclaimed), uint(deletions)
|
||||
})
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024, 1024*1024)); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
for _, algo := range expectedAlgorithms {
|
||||
if algo == "" {
|
||||
t.Errorf("GC algorithm constant is empty")
|
||||
}
|
||||
}
|
||||
|
||||
if gc.Size() > 1024*1024*16 {
|
||||
t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,49 @@
|
||||
// vfs/memory/memory.go
|
||||
package memory
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"container/list"
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
memoryCapacityBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "memory_cache_capacity_bytes",
|
||||
Help: "Total capacity of the memory cache in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
memorySizeBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "memory_cache_size_bytes",
|
||||
Help: "Total size of the memory cache in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
memoryReadBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "memory_cache_read_bytes_total",
|
||||
Help: "Total number of bytes read from the memory cache",
|
||||
},
|
||||
)
|
||||
|
||||
memoryWriteBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "memory_cache_write_bytes_total",
|
||||
Help: "Total number of bytes written to the memory cache",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
// Ensure MemoryFS implements VFS.
|
||||
@@ -20,7 +59,49 @@ type file struct {
|
||||
type MemoryFS struct {
|
||||
files map[string]*file
|
||||
capacity int64
|
||||
mu sync.Mutex
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks sync.Map // map[string]*sync.RWMutex
|
||||
LRU *lruList
|
||||
}
|
||||
|
||||
// lruList for LRU eviction
|
||||
type lruList struct {
|
||||
list *list.List
|
||||
elem map[string]*list.Element
|
||||
}
|
||||
|
||||
func newLruList() *lruList {
|
||||
return &lruList{
|
||||
list: list.New(),
|
||||
elem: make(map[string]*list.Element),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) MoveToFront(key string) {
|
||||
if e, ok := l.elem[key]; ok {
|
||||
l.list.MoveToFront(e)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
|
||||
e := l.list.PushFront(fi)
|
||||
l.elem[key] = e
|
||||
return e
|
||||
}
|
||||
|
||||
func (l *lruList) Remove(key string) {
|
||||
if e, ok := l.elem[key]; ok {
|
||||
l.list.Remove(e)
|
||||
delete(l.elem, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Back() *vfs.FileInfo {
|
||||
if e := l.list.Back(); e != nil {
|
||||
return e.Value.(*vfs.FileInfo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// New creates a new MemoryFS.
|
||||
@@ -29,11 +110,23 @@ func New(capacity int64) *MemoryFS {
|
||||
panic("memory capacity must be greater than 0") // panic if the capacity is less than or equal to 0
|
||||
}
|
||||
|
||||
return &MemoryFS{
|
||||
logger.Logger.Info().
|
||||
Str("name", "MemoryFS").
|
||||
Str("capacity", units.HumanSize(float64(capacity))).
|
||||
Msg("init")
|
||||
|
||||
mfs := &MemoryFS{
|
||||
files: make(map[string]*file),
|
||||
capacity: capacity,
|
||||
mu: sync.Mutex{},
|
||||
mu: sync.RWMutex{},
|
||||
keyLocks: sync.Map{},
|
||||
LRU: newLruList(),
|
||||
}
|
||||
|
||||
memoryCapacityBytes.Set(float64(capacity))
|
||||
memorySizeBytes.Set(float64(mfs.Size()))
|
||||
|
||||
return mfs
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Capacity() int64 {
|
||||
@@ -45,73 +138,118 @@ func (m *MemoryFS) Name() string {
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Size() int64 {
|
||||
var size int64
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
for _, v := range m.files {
|
||||
size += int64(len(v.data))
|
||||
}
|
||||
|
||||
return size
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.size
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Set(key string, src []byte) error {
|
||||
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
|
||||
mu, _ := m.keyLocks.LoadOrStore(key, &sync.RWMutex{})
|
||||
return mu.(*sync.RWMutex)
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
m.mu.RLock()
|
||||
if m.capacity > 0 {
|
||||
if size := m.Size() + int64(len(src)); size > m.capacity {
|
||||
return vfserror.ErrDiskFull
|
||||
if m.size+size > m.capacity {
|
||||
m.mu.RUnlock()
|
||||
return nil, vfserror.ErrDiskFull
|
||||
}
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
m.files[key] = &file{
|
||||
fileinfo: vfs.NewFileInfo(
|
||||
key,
|
||||
int64(len(src)),
|
||||
time.Now(),
|
||||
),
|
||||
data: src,
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
return nil
|
||||
return &memWriteCloser{
|
||||
Writer: buf,
|
||||
onClose: func() error {
|
||||
data := buf.Bytes()
|
||||
m.mu.Lock()
|
||||
if f, exists := m.files[key]; exists {
|
||||
m.size -= int64(len(f.data))
|
||||
m.LRU.Remove(key)
|
||||
}
|
||||
fi := vfs.NewFileInfo(key, int64(len(data)), time.Now())
|
||||
m.files[key] = &file{
|
||||
fileinfo: fi,
|
||||
data: data,
|
||||
}
|
||||
m.LRU.Add(key, fi)
|
||||
m.size += int64(len(data))
|
||||
m.mu.Unlock()
|
||||
|
||||
memoryWriteBytes.Add(float64(len(data)))
|
||||
memorySizeBytes.Set(float64(m.Size()))
|
||||
|
||||
return nil
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type memWriteCloser struct {
|
||||
io.Writer
|
||||
onClose func() error
|
||||
}
|
||||
|
||||
func (wc *memWriteCloser) Close() error {
|
||||
return wc.onClose()
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Delete(key string) error {
|
||||
_, err := m.Stat(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
f, exists := m.files[key]
|
||||
if !exists {
|
||||
m.mu.Unlock()
|
||||
return vfserror.ErrNotFound
|
||||
}
|
||||
m.size -= int64(len(f.data))
|
||||
m.LRU.Remove(key)
|
||||
delete(m.files, key)
|
||||
m.mu.Unlock()
|
||||
|
||||
memorySizeBytes.Set(float64(m.Size()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Get(key string) ([]byte, error) {
|
||||
_, err := m.Stat(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
f, exists := m.files[key]
|
||||
if !exists {
|
||||
m.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
f.fileinfo.ATime = time.Now()
|
||||
m.LRU.MoveToFront(key)
|
||||
dataCopy := make([]byte, len(f.data))
|
||||
copy(dataCopy, f.data)
|
||||
m.mu.Unlock()
|
||||
|
||||
m.files[key].fileinfo.ATime = time.Now()
|
||||
dst := make([]byte, len(m.files[key].data))
|
||||
copy(dst, m.files[key].data)
|
||||
memoryReadBytes.Add(float64(len(dataCopy)))
|
||||
memorySizeBytes.Set(float64(m.Size()))
|
||||
|
||||
return dst, nil
|
||||
return io.NopCloser(bytes.NewReader(dataCopy)), nil
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
f, ok := m.files[key]
|
||||
if !ok {
|
||||
@@ -122,8 +260,8 @@ func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
}
|
||||
|
||||
func (m *MemoryFS) StatAll() []*vfs.FileInfo {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
// hard copy the file info to prevent modification of the original file info or the other way around
|
||||
files := make([]*vfs.FileInfo, 0, len(m.files))
|
||||
|
||||
@@ -1,63 +1,129 @@
|
||||
// vfs/memory/memory_test.go
|
||||
package memory
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAllMemory(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
func TestCreateAndOpen(t *testing.T) {
|
||||
m := New(1024)
|
||||
if err := m.Set("key", []byte("value")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
key := "key"
|
||||
value := []byte("value")
|
||||
|
||||
if err := m.Set("key", []byte("value1")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
w, err := m.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
if d, err := m.Get("key"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value1" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
||||
rc, err := m.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if err := m.Delete("key"); err != nil {
|
||||
t.Errorf("Delete failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := m.Get("key"); err == nil {
|
||||
t.Errorf("Get failed: got nil, want %v", vfserror.ErrNotFound)
|
||||
}
|
||||
|
||||
if err := m.Delete("key"); err == nil {
|
||||
t.Errorf("Delete failed: got nil, want %v", vfserror.ErrNotFound)
|
||||
}
|
||||
|
||||
if _, err := m.Stat("key"); err == nil {
|
||||
t.Errorf("Stat failed: got nil, want %v", vfserror.ErrNotFound)
|
||||
}
|
||||
|
||||
if err := m.Set("key", []byte("value")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := m.Stat("key"); err != nil {
|
||||
t.Errorf("Stat failed: %v", err)
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLimited(t *testing.T) {
|
||||
t.Parallel()
|
||||
func TestOverwrite(t *testing.T) {
|
||||
m := New(1024)
|
||||
key := "key"
|
||||
value1 := []byte("value1")
|
||||
value2 := []byte("value2")
|
||||
|
||||
w, err := m.Create(key, int64(len(value1)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value1)
|
||||
w.Close()
|
||||
|
||||
w, err = m.Create(key, int64(len(value2)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value2)
|
||||
w.Close()
|
||||
|
||||
rc, err := m.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value2) {
|
||||
t.Fatalf("expected %s, got %s", value2, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
m := New(1024)
|
||||
key := "key"
|
||||
value := []byte("value")
|
||||
|
||||
w, err := m.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
if err := m.Delete(key); err != nil {
|
||||
t.Fatalf("Delete failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = m.Open(key)
|
||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCapacityLimit(t *testing.T) {
|
||||
m := New(10)
|
||||
for i := 0; i < 11; i++ {
|
||||
if err := m.Set(fmt.Sprintf("key%d", i), []byte("1")); err != nil && i < 10 {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
w, err := m.Create(fmt.Sprintf("key%d", i), 1)
|
||||
if err != nil && i < 10 {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
} else if i == 10 && err == nil {
|
||||
t.Errorf("Set succeeded: got nil, want %v", vfserror.ErrDiskFull)
|
||||
t.Errorf("Create succeeded: got nil, want %v", vfserror.ErrDiskFull)
|
||||
}
|
||||
if i < 10 {
|
||||
w.Write([]byte("1"))
|
||||
w.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStat(t *testing.T) {
|
||||
m := New(1024)
|
||||
key := "key"
|
||||
value := []byte("value")
|
||||
|
||||
w, err := m.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
info, err := m.Stat(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
t.Fatal("expected file info to be non-nil")
|
||||
}
|
||||
if info.Size() != int64(len(value)) {
|
||||
t.Errorf("expected size %d, got %d", len(value), info.Size())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Ensure SyncFS implements VFS.
|
||||
var _ vfs.VFS = (*SyncFS)(nil)
|
||||
|
||||
type SyncFS struct {
|
||||
vfs vfs.VFS
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func New(vfs vfs.VFS) *SyncFS {
|
||||
return &SyncFS{
|
||||
vfs: vfs,
|
||||
mu: sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the file system.
|
||||
func (sfs *SyncFS) Name() string {
|
||||
return fmt.Sprintf("SyncFS(%s)", sfs.vfs.Name())
|
||||
}
|
||||
|
||||
// Size returns the total size of all files in the file system.
|
||||
func (sfs *SyncFS) Size() int64 {
|
||||
sfs.mu.RLock()
|
||||
defer sfs.mu.RUnlock()
|
||||
|
||||
return sfs.vfs.Size()
|
||||
}
|
||||
|
||||
// Set sets the value of key as src.
|
||||
// Setting the same key multiple times, the last set call takes effect.
|
||||
func (sfs *SyncFS) Set(key string, src []byte) error {
|
||||
sfs.mu.Lock()
|
||||
defer sfs.mu.Unlock()
|
||||
|
||||
return sfs.vfs.Set(key, src)
|
||||
}
|
||||
|
||||
// Delete deletes the value of key.
|
||||
func (sfs *SyncFS) Delete(key string) error {
|
||||
sfs.mu.Lock()
|
||||
defer sfs.mu.Unlock()
|
||||
|
||||
return sfs.vfs.Delete(key)
|
||||
}
|
||||
|
||||
// Get gets the value of key to dst, and returns dst no matter whether or not there is an error.
|
||||
func (sfs *SyncFS) Get(key string) ([]byte, error) {
|
||||
sfs.mu.RLock()
|
||||
defer sfs.mu.RUnlock()
|
||||
|
||||
return sfs.vfs.Get(key)
|
||||
}
|
||||
|
||||
// Stat returns the FileInfo of key.
|
||||
func (sfs *SyncFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
sfs.mu.RLock()
|
||||
defer sfs.mu.RUnlock()
|
||||
|
||||
return sfs.vfs.Stat(key)
|
||||
}
|
||||
|
||||
// StatAll returns the FileInfo of all keys.
|
||||
func (sfs *SyncFS) StatAll() []*vfs.FileInfo {
|
||||
sfs.mu.RLock()
|
||||
defer sfs.mu.RUnlock()
|
||||
|
||||
return sfs.vfs.StatAll()
|
||||
}
|
||||
12
vfs/vfs.go
12
vfs/vfs.go
@@ -1,5 +1,8 @@
|
||||
// vfs/vfs.go
|
||||
package vfs
|
||||
|
||||
import "io"
|
||||
|
||||
// VFS is the interface that wraps the basic methods of a virtual file system.
|
||||
type VFS interface {
|
||||
// Name returns the name of the file system.
|
||||
@@ -8,15 +11,14 @@ type VFS interface {
|
||||
// Size returns the total size of all files in the file system.
|
||||
Size() int64
|
||||
|
||||
// Set sets the value of key as src.
|
||||
// Setting the same key multiple times, the last set call takes effect.
|
||||
Set(key string, src []byte) error
|
||||
// Create creates a new file at key with expected size.
|
||||
Create(key string, size int64) (io.WriteCloser, error)
|
||||
|
||||
// Delete deletes the value of key.
|
||||
Delete(key string) error
|
||||
|
||||
// Get gets the value of key to dst, and returns dst no matter whether or not there is an error.
|
||||
Get(key string) ([]byte, error)
|
||||
// Open opens the file at key.
|
||||
Open(key string) (io.ReadCloser, error)
|
||||
|
||||
// Stat returns the FileInfo of key.
|
||||
Stat(key string) (*FileInfo, error)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// vfs/vfserror/vfserror.go
|
||||
package vfserror
|
||||
|
||||
import "errors"
|
||||
|
||||
Reference in New Issue
Block a user