Compare commits
43 Commits
1.0.0
...
9ca8fa4a5e
| Author | SHA1 | Date | |
|---|---|---|---|
| 9ca8fa4a5e | |||
| 7fb1fcf21f | |||
| ee6fc32a1a | |||
| 4a4579b0f3 | |||
| b9358a0e8d | |||
| c197841960 | |||
| 6919358eab | |||
| 1187f05c77 | |||
| f6f93c86c8 | |||
| 30e804709f | |||
| 56bb1ddc12 | |||
| 9c65cdb156 | |||
| ae013f9a3b | |||
| d94b53c395 | |||
| 847931ed43 | |||
| 4387236d22 | |||
| f6ce004922 | |||
| 8e487876d2 | |||
| 1be7f5bd20 | |||
| f237b89ca7 | |||
| ae07239021 | |||
| 4876998f5d | |||
| 163e64790c | |||
| 00792d87a5 | |||
| 3427b8f5bc | |||
| 7f744d04b0 | |||
| 6c98d03ae7 | |||
| 17ff507c89 | |||
| 539f14e8ec | |||
| 1673e9554a | |||
| b83836f914 | |||
| 745856f0f4 | |||
| b4d2b1305e | |||
| 0d263be2ca | |||
| 63a1c21861 | |||
| 0a73e46f90 | |||
| 6f1158edeb | |||
| 93b682cfa5 | |||
| f378d0e81f | |||
| 8c1bb695b8 | |||
| f58951fd92 | |||
| 70786da8c6 | |||
| e24af47697 |
@@ -8,14 +8,14 @@ jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@main
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- run: git fetch --force --tags
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@main
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- uses: goreleaser/goreleaser-action@v6
|
||||
- uses: goreleaser/goreleaser-action@master
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: 'latest'
|
||||
|
||||
@@ -6,14 +6,10 @@ jobs:
|
||||
check-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@main
|
||||
- uses: actions/setup-go@main
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- run: go mod tidy
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
args: -D errcheck
|
||||
version: latest
|
||||
- run: go build ./...
|
||||
- run: go test -race -v -shuffle=on ./...
|
||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -1,3 +1,11 @@
|
||||
dist/
|
||||
tmp/
|
||||
__*.exe
|
||||
#build artifacts
|
||||
/dist/
|
||||
|
||||
#disk cache
|
||||
/disk/
|
||||
|
||||
#config file
|
||||
/config.yaml
|
||||
|
||||
#windows executables
|
||||
*.exe
|
||||
|
||||
@@ -2,11 +2,17 @@ version: 2
|
||||
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go mod tidy -v
|
||||
|
||||
builds:
|
||||
- ldflags:
|
||||
- id: default
|
||||
binary: steamcache2
|
||||
ldflags:
|
||||
- -s
|
||||
- -w
|
||||
- -extldflags "-static"
|
||||
- -X s1d3sw1ped/SteamCache2/version.Version={{.Version}}
|
||||
- -X s1d3sw1ped/SteamCache2/version.Date={{.Date}}
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
@@ -14,19 +20,24 @@ builds:
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
ignore:
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
|
||||
archives:
|
||||
- format: tar.gz
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
{{- title .Os }}_
|
||||
{{- if eq .Arch "amd64" }}x86_64
|
||||
{{- else if eq .Arch "386" }}i386
|
||||
{{- else }}{{ .Arch }}{{ end }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
- id: default
|
||||
name_template: "{{ .ProjectName }}-{{ .Os }}-{{ .Arch }}"
|
||||
formats: tar.gz
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
formats: zip
|
||||
files:
|
||||
- README.md
|
||||
- LICENSE
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
@@ -36,12 +47,7 @@ changelog:
|
||||
- "^test:"
|
||||
|
||||
release:
|
||||
name_template: '{{.ProjectName}}-{{.Version}}'
|
||||
footer: >-
|
||||
|
||||
---
|
||||
|
||||
Released by [GoReleaser](https://github.com/goreleaser/goreleaser).
|
||||
name_template: "{{ .ProjectName }}-{{ .Version }}"
|
||||
|
||||
gitea_urls:
|
||||
api: https://git.s1d3sw1ped.com/api/v1
|
||||
|
||||
47
.vscode/launch.json
vendored
47
.vscode/launch.json
vendored
@@ -1,47 +0,0 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch Memory & Disk",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/main.go",
|
||||
"args": [
|
||||
"--memory",
|
||||
"1G",
|
||||
"--disk",
|
||||
"10G",
|
||||
"--disk-path",
|
||||
"tmp/disk",
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Launch Disk Only",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/main.go",
|
||||
"args": [
|
||||
"--disk",
|
||||
"10G",
|
||||
"--disk-path",
|
||||
"tmp/disk",
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Launch Memory Only",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/main.go",
|
||||
"args": [
|
||||
"--memory",
|
||||
"1G",
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
19
Makefile
Normal file
19
Makefile
Normal file
@@ -0,0 +1,19 @@
|
||||
run: deps test ## Run the application
|
||||
@go run .
|
||||
|
||||
help: ## Show this help message
|
||||
@echo SteamCache2 Makefile
|
||||
@echo Available targets:
|
||||
@echo run Run the application
|
||||
@echo run-debug Run the application with debug logging
|
||||
@echo test Run all tests
|
||||
@echo deps Download dependencies
|
||||
|
||||
run-debug: deps test ## Run the application with debug logging
|
||||
@go run . --log-level debug
|
||||
|
||||
test: deps ## Run all tests
|
||||
@go test -v ./...
|
||||
|
||||
deps: ## Download dependencies
|
||||
@go mod tidy
|
||||
224
README.md
224
README.md
@@ -10,15 +10,154 @@ SteamCache2 is a blazing fast download cache for Steam, designed to reduce bandw
|
||||
- Reduces bandwidth usage
|
||||
- Easy to set up and configure aside from dns stuff to trick Steam into using it
|
||||
- Supports multiple clients
|
||||
- **NEW:** YAML configuration system with automatic config generation
|
||||
- **NEW:** Simple Makefile for development workflow
|
||||
- Cross-platform builds (Linux, macOS, Windows)
|
||||
|
||||
## Usage
|
||||
## Quick Start
|
||||
|
||||
1. Start the cache server:
|
||||
```sh
|
||||
./SteamCache2 --memory 1G --disk 10G --disk-path tmp/disk
|
||||
```
|
||||
2. Configure your DNS:
|
||||
- If your on Windows and don't want a whole network implementation (THIS)[#windows-hosts-file-override]
|
||||
### First Time Setup
|
||||
|
||||
1. **Clone and build:**
|
||||
```bash
|
||||
git clone <repository-url>
|
||||
cd SteamCache2
|
||||
make # This will run tests and build the application
|
||||
```
|
||||
|
||||
2. **Run the application** (it will create a default config):
|
||||
```bash
|
||||
./steamcache2
|
||||
# or on Windows:
|
||||
steamcache2.exe
|
||||
```
|
||||
|
||||
The application will automatically create a `config.yaml` file with default settings and exit, allowing you to customize it.
|
||||
|
||||
3. **Edit the configuration** (`config.yaml`):
|
||||
```yaml
|
||||
listen_address: :80
|
||||
cache:
|
||||
memory:
|
||||
size: 1GB
|
||||
gc_algorithm: lru
|
||||
disk:
|
||||
size: 10GB
|
||||
path: ./disk
|
||||
gc_algorithm: hybrid
|
||||
upstream: "https://steam.cdn.com" # Set your upstream server
|
||||
```
|
||||
|
||||
4. **Run the application again:**
|
||||
```bash
|
||||
make run # or ./steamcache2
|
||||
```
|
||||
|
||||
### Development Workflow
|
||||
|
||||
```bash
|
||||
# Run all tests and start the application (default target)
|
||||
make
|
||||
|
||||
# Run only tests
|
||||
make test
|
||||
|
||||
# Run with debug logging
|
||||
make run-debug
|
||||
|
||||
# Download dependencies
|
||||
make deps
|
||||
|
||||
# Show available commands
|
||||
make help
|
||||
```
|
||||
|
||||
### Command Line Flags
|
||||
|
||||
While most configuration is done via the YAML file, some runtime options are still available as command-line flags:
|
||||
|
||||
```bash
|
||||
# Use a custom config file
|
||||
./steamcache2 --config /path/to/my-config.yaml
|
||||
|
||||
# Set logging level
|
||||
./steamcache2 --log-level debug --log-format json
|
||||
|
||||
# Set number of worker threads
|
||||
./steamcache2 --threads 8
|
||||
|
||||
# Show help
|
||||
./steamcache2 --help
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
SteamCache2 uses a YAML configuration file (`config.yaml`) for all settings. Here's a complete configuration example:
|
||||
|
||||
```yaml
|
||||
# Server configuration
|
||||
listen_address: :80
|
||||
|
||||
# Cache configuration
|
||||
cache:
|
||||
# Memory cache settings
|
||||
memory:
|
||||
# Size of memory cache (e.g., "512MB", "1GB", "0" to disable)
|
||||
size: 1GB
|
||||
# Garbage collection algorithm
|
||||
gc_algorithm: lru
|
||||
|
||||
# Disk cache settings
|
||||
disk:
|
||||
# Size of disk cache (e.g., "10GB", "50GB", "0" to disable)
|
||||
size: 10GB
|
||||
# Path to disk cache directory
|
||||
path: ./disk
|
||||
# Garbage collection algorithm
|
||||
gc_algorithm: hybrid
|
||||
|
||||
# Upstream server configuration
|
||||
# The upstream server to proxy requests to
|
||||
upstream: "https://steam.cdn.com"
|
||||
```
|
||||
|
||||
#### Garbage Collection Algorithms
|
||||
|
||||
SteamCache2 supports different garbage collection algorithms for memory and disk caches, allowing you to optimize performance for each storage tier:
|
||||
|
||||
**Available GC Algorithms:**
|
||||
|
||||
- **`lru`** (default): Least Recently Used - evicts oldest accessed files
|
||||
- **`lfu`**: Least Frequently Used - evicts least accessed files (good for popular content)
|
||||
- **`fifo`**: First In, First Out - evicts oldest created files (predictable)
|
||||
- **`largest`**: Size-based - evicts largest files first (maximizes file count)
|
||||
- **`smallest`**: Size-based - evicts smallest files first (maximizes cache hit rate)
|
||||
- **`hybrid`**: Combines access time and file size for optimal eviction
|
||||
|
||||
**Recommended Algorithms by Cache Type:**
|
||||
|
||||
**For Memory Cache (Fast, Limited Size):**
|
||||
- **`lru`** - Best overall performance, good balance of speed and hit rate
|
||||
- **`lfu`** - Excellent for gaming cafes where popular games stay cached
|
||||
- **`hybrid`** - Optimal for mixed workloads with varying file sizes
|
||||
|
||||
**For Disk Cache (Slow, Large Size):**
|
||||
- **`hybrid`** - Recommended for optimal performance, balances speed and storage efficiency
|
||||
- **`largest`** - Good for maximizing number of cached files
|
||||
- **`lru`** - Reliable default with good performance
|
||||
|
||||
**Use Cases:**
|
||||
- **Gaming Cafes**: Use `lfu` for memory, `hybrid` for disk
|
||||
- **LAN Events**: Use `lfu` for memory, `hybrid` for disk
|
||||
- **Home Use**: Use `lru` for memory, `hybrid` for disk
|
||||
- **Testing**: Use `fifo` for predictable behavior
|
||||
- **Large File Storage**: Use `largest` for disk to maximize file count
|
||||
|
||||
### DNS Configuration
|
||||
|
||||
Configure your DNS to direct Steam traffic to your SteamCache2 server:
|
||||
|
||||
- If you're on Windows and don't want a whole network implementation, see the [Windows Hosts File Override](#windows-hosts-file-override) section below.
|
||||
|
||||
### Windows Hosts File Override
|
||||
|
||||
@@ -53,6 +192,77 @@ SteamCache2 is a blazing fast download cache for Steam, designed to reduce bandw
|
||||
|
||||
This will direct any requests to `lancache.steamcontent.com` to your SteamCache2 server.
|
||||
|
||||
## Building from Source
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Go 1.19 or later
|
||||
- Make (optional, but recommended)
|
||||
|
||||
### Build Commands
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone <repository-url>
|
||||
cd SteamCache2
|
||||
|
||||
# Download dependencies
|
||||
make deps
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
|
||||
# Build for current platform
|
||||
go build -o steamcache2 .
|
||||
|
||||
# Build for specific platforms
|
||||
GOOS=linux GOARCH=amd64 go build -o steamcache2-linux-amd64 .
|
||||
GOOS=windows GOARCH=amd64 go build -o steamcache2-windows-amd64.exe .
|
||||
```
|
||||
|
||||
### Development
|
||||
|
||||
```bash
|
||||
# Run in development mode with debug logging
|
||||
make run-debug
|
||||
|
||||
# Run all tests and start the application
|
||||
make
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **"Config file not found" on first run**
|
||||
- This is expected! SteamCache2 will automatically create a default `config.yaml` file
|
||||
- Edit the generated config file with your desired settings
|
||||
- Run the application again
|
||||
|
||||
2. **Permission denied when creating config**
|
||||
- Make sure you have write permissions in the current directory
|
||||
- Try running with elevated privileges if necessary
|
||||
|
||||
3. **Port already in use**
|
||||
- Change the `listen_address` in `config.yaml` to a different port (e.g., `:8080`)
|
||||
- Or stop the service using the current port
|
||||
|
||||
4. **High memory usage**
|
||||
- Reduce the memory cache size in `config.yaml`
|
||||
- Consider using disk-only caching by setting `memory.size: "0"`
|
||||
|
||||
5. **Slow disk performance**
|
||||
- Use SSD storage for the disk cache
|
||||
- Consider using a different GC algorithm like `hybrid`
|
||||
- Adjust the disk cache size to match available storage
|
||||
|
||||
### Getting Help
|
||||
|
||||
- Check the logs for detailed error messages
|
||||
- Run with `--log-level debug` for more verbose output
|
||||
- Ensure your upstream server is accessible
|
||||
- Verify DNS configuration is working correctly
|
||||
|
||||
## License
|
||||
|
||||
See the [LICENSE](LICENSE) file for details.
|
||||
|
||||
128
cmd/root.go
128
cmd/root.go
@@ -1,18 +1,27 @@
|
||||
// cmd/root.go
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"s1d3sw1ped/SteamCache2/config"
|
||||
"s1d3sw1ped/SteamCache2/steamcache"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/version"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
memory string
|
||||
memorymultiplier int
|
||||
disk string
|
||||
diskmultiplier int
|
||||
diskpath string
|
||||
configPath string
|
||||
|
||||
logLevel string
|
||||
logFormat string
|
||||
|
||||
maxConcurrentRequests int64
|
||||
maxRequestsPerClient int64
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
@@ -24,15 +33,100 @@ var rootCmd = &cobra.Command{
|
||||
By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache,
|
||||
significantly improving download times and reducing the load on the internet connection.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// Configure logging
|
||||
switch logLevel {
|
||||
case "debug":
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
case "error":
|
||||
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
|
||||
case "info":
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
default:
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel) // Default to info level if not specified
|
||||
}
|
||||
var writer zerolog.ConsoleWriter
|
||||
if logFormat == "json" {
|
||||
writer = zerolog.ConsoleWriter{Out: os.Stderr, NoColor: true}
|
||||
} else {
|
||||
writer = zerolog.ConsoleWriter{Out: os.Stderr}
|
||||
}
|
||||
logger.Logger = zerolog.New(writer).With().Timestamp().Logger()
|
||||
|
||||
logger.Logger.Info().
|
||||
Msg("SteamCache2 " + version.Version + " " + version.Date + " starting...")
|
||||
|
||||
// Load configuration
|
||||
cfg, err := config.LoadConfig(configPath)
|
||||
if err != nil {
|
||||
// Check if the error is because the config file doesn't exist
|
||||
// The error is wrapped, so we check the error message
|
||||
if strings.Contains(err.Error(), "no such file") ||
|
||||
strings.Contains(err.Error(), "cannot find the file") ||
|
||||
strings.Contains(err.Error(), "The system cannot find the file") {
|
||||
logger.Logger.Info().
|
||||
Str("config_path", configPath).
|
||||
Msg("Config file not found, creating default configuration")
|
||||
|
||||
if err := config.SaveDefaultConfig(configPath); err != nil {
|
||||
logger.Logger.Error().
|
||||
Err(err).
|
||||
Str("config_path", configPath).
|
||||
Msg("Failed to create default configuration")
|
||||
fmt.Fprintf(os.Stderr, "Error: Failed to create default config at %s: %v\n", configPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("config_path", configPath).
|
||||
Msg("Default configuration created successfully. Please edit the file and run again.")
|
||||
|
||||
fmt.Printf("Default configuration created at %s\n", configPath)
|
||||
fmt.Println("Please edit the configuration file as needed and run the application again.")
|
||||
os.Exit(0)
|
||||
} else {
|
||||
logger.Logger.Error().
|
||||
Err(err).
|
||||
Str("config_path", configPath).
|
||||
Msg("Failed to load configuration")
|
||||
fmt.Fprintf(os.Stderr, "Error: Failed to load configuration from %s: %v\n", configPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("config_path", configPath).
|
||||
Msg("Configuration loaded successfully")
|
||||
|
||||
// Use command-line flags if provided, otherwise use config values
|
||||
finalMaxConcurrentRequests := cfg.MaxConcurrentRequests
|
||||
if maxConcurrentRequests > 0 {
|
||||
finalMaxConcurrentRequests = maxConcurrentRequests
|
||||
}
|
||||
|
||||
finalMaxRequestsPerClient := cfg.MaxRequestsPerClient
|
||||
if maxRequestsPerClient > 0 {
|
||||
finalMaxRequestsPerClient = maxRequestsPerClient
|
||||
}
|
||||
|
||||
sc := steamcache.New(
|
||||
":80",
|
||||
memory,
|
||||
memorymultiplier,
|
||||
disk,
|
||||
diskmultiplier,
|
||||
diskpath,
|
||||
cfg.ListenAddress,
|
||||
cfg.Cache.Memory.Size,
|
||||
cfg.Cache.Disk.Size,
|
||||
cfg.Cache.Disk.Path,
|
||||
cfg.Upstream,
|
||||
cfg.Cache.Memory.GCAlgorithm,
|
||||
cfg.Cache.Disk.GCAlgorithm,
|
||||
finalMaxConcurrentRequests,
|
||||
finalMaxRequestsPerClient,
|
||||
)
|
||||
|
||||
logger.Logger.Info().
|
||||
Msg("SteamCache2 " + version.Version + " started on " + cfg.ListenAddress)
|
||||
|
||||
sc.Run()
|
||||
|
||||
logger.Logger.Info().Msg("SteamCache2 stopped")
|
||||
os.Exit(0)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -46,9 +140,11 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.Flags().StringVarP(&memory, "memory", "m", "0", "The size of the memory cache")
|
||||
rootCmd.Flags().IntVarP(&memorymultiplier, "memory-gc", "M", 10, "The gc value for the memory cache")
|
||||
rootCmd.Flags().StringVarP(&disk, "disk", "d", "0", "The size of the disk cache")
|
||||
rootCmd.Flags().IntVarP(&diskmultiplier, "disk-gc", "D", 100, "The gc value for the disk cache")
|
||||
rootCmd.Flags().StringVarP(&diskpath, "disk-path", "p", "", "The path to the disk cache")
|
||||
rootCmd.Flags().StringVarP(&configPath, "config", "c", "config.yaml", "Path to configuration file")
|
||||
|
||||
rootCmd.Flags().StringVarP(&logLevel, "log-level", "l", "info", "Logging level: debug, info, error")
|
||||
rootCmd.Flags().StringVarP(&logFormat, "log-format", "f", "console", "Logging format: json, console")
|
||||
|
||||
rootCmd.Flags().Int64Var(&maxConcurrentRequests, "max-concurrent-requests", 0, "Maximum concurrent requests (0 = use config file value)")
|
||||
rootCmd.Flags().Int64Var(&maxRequestsPerClient, "max-requests-per-client", 0, "Maximum concurrent requests per client IP (0 = use config file value)")
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// cmd/version.go
|
||||
package cmd
|
||||
|
||||
import (
|
||||
@@ -14,7 +15,7 @@ var versionCmd = &cobra.Command{
|
||||
Short: "prints the version of SteamCache2",
|
||||
Long: `Prints the version of SteamCache2. This command is useful for checking the version of the application.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Fprintln(os.Stderr, "SteamCache2", version.Version)
|
||||
fmt.Fprintln(os.Stderr, "SteamCache2", version.Version, version.Date)
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
128
config/config.go
Normal file
128
config/config.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// Server configuration
|
||||
ListenAddress string `yaml:"listen_address" default:":80"`
|
||||
|
||||
// Concurrency limits
|
||||
MaxConcurrentRequests int64 `yaml:"max_concurrent_requests" default:"200"`
|
||||
MaxRequestsPerClient int64 `yaml:"max_requests_per_client" default:"5"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `yaml:"cache"`
|
||||
|
||||
// Upstream configuration
|
||||
Upstream string `yaml:"upstream"`
|
||||
}
|
||||
|
||||
type CacheConfig struct {
|
||||
// Memory cache settings
|
||||
Memory MemoryConfig `yaml:"memory"`
|
||||
|
||||
// Disk cache settings
|
||||
Disk DiskConfig `yaml:"disk"`
|
||||
}
|
||||
|
||||
type MemoryConfig struct {
|
||||
// Size of memory cache (e.g., "512MB", "1GB")
|
||||
Size string `yaml:"size" default:"0"`
|
||||
|
||||
// Garbage collection algorithm: lru, lfu, fifo, largest, smallest, hybrid
|
||||
GCAlgorithm string `yaml:"gc_algorithm" default:"lru"`
|
||||
}
|
||||
|
||||
type DiskConfig struct {
|
||||
// Size of disk cache (e.g., "10GB", "50GB")
|
||||
Size string `yaml:"size" default:"0"`
|
||||
|
||||
// Path to disk cache directory
|
||||
Path string `yaml:"path" default:""`
|
||||
|
||||
// Garbage collection algorithm: lru, lfu, fifo, largest, smallest, hybrid
|
||||
GCAlgorithm string `yaml:"gc_algorithm" default:"lru"`
|
||||
}
|
||||
|
||||
// LoadConfig loads configuration from a YAML file
|
||||
func LoadConfig(configPath string) (*Config, error) {
|
||||
if configPath == "" {
|
||||
configPath = "config.yaml"
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read config file %s: %w", configPath, err)
|
||||
}
|
||||
|
||||
var config Config
|
||||
if err := yaml.Unmarshal(data, &config); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config file %s: %w", configPath, err)
|
||||
}
|
||||
|
||||
// Set defaults for empty values
|
||||
if config.ListenAddress == "" {
|
||||
config.ListenAddress = ":80"
|
||||
}
|
||||
if config.MaxConcurrentRequests == 0 {
|
||||
config.MaxConcurrentRequests = 50
|
||||
}
|
||||
if config.MaxRequestsPerClient == 0 {
|
||||
config.MaxRequestsPerClient = 3
|
||||
}
|
||||
if config.Cache.Memory.Size == "" {
|
||||
config.Cache.Memory.Size = "0"
|
||||
}
|
||||
if config.Cache.Memory.GCAlgorithm == "" {
|
||||
config.Cache.Memory.GCAlgorithm = "lru"
|
||||
}
|
||||
if config.Cache.Disk.Size == "" {
|
||||
config.Cache.Disk.Size = "0"
|
||||
}
|
||||
if config.Cache.Disk.GCAlgorithm == "" {
|
||||
config.Cache.Disk.GCAlgorithm = "lru"
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// SaveDefaultConfig creates a default configuration file
|
||||
func SaveDefaultConfig(configPath string) error {
|
||||
if configPath == "" {
|
||||
configPath = "config.yaml"
|
||||
}
|
||||
|
||||
defaultConfig := Config{
|
||||
ListenAddress: ":80",
|
||||
MaxConcurrentRequests: 50, // Reduced for home user (less concurrent load)
|
||||
MaxRequestsPerClient: 3, // Reduced for home user (more conservative per client)
|
||||
Cache: CacheConfig{
|
||||
Memory: MemoryConfig{
|
||||
Size: "1GB", // Recommended for systems that can spare 1GB RAM for caching
|
||||
GCAlgorithm: "lru",
|
||||
},
|
||||
Disk: DiskConfig{
|
||||
Size: "1TB", // Large HDD cache for home user
|
||||
Path: "./disk",
|
||||
GCAlgorithm: "lru", // Better for gaming patterns (keeps recently played games)
|
||||
},
|
||||
},
|
||||
Upstream: "",
|
||||
}
|
||||
|
||||
data, err := yaml.Marshal(&defaultConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal default config: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(configPath, data, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write default config file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
4
go.mod
4
go.mod
@@ -4,9 +4,10 @@ go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -14,5 +15,6 @@ require (
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.12.0 // indirect
|
||||
)
|
||||
|
||||
8
go.sum
8
go.sum
@@ -2,6 +2,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
||||
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@@ -19,11 +21,13 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
package avgcachestate
|
||||
|
||||
import (
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AvgCacheState is a cache state that averages the last N cache states.
|
||||
type AvgCacheState struct {
|
||||
size int
|
||||
avgs []cachestate.CacheState
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// New creates a new average cache state with the given size.
|
||||
func New(size int) *AvgCacheState {
|
||||
a := &AvgCacheState{
|
||||
size: size,
|
||||
avgs: make([]cachestate.CacheState, size),
|
||||
mu: sync.Mutex{},
|
||||
}
|
||||
|
||||
a.Clear()
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// Clear resets the average cache state to zero.
|
||||
func (a *AvgCacheState) Clear() {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
for i := 0; i < len(a.avgs); i++ {
|
||||
a.avgs[i] = cachestate.CacheStateMiss
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a cache state to the average cache state.
|
||||
func (a *AvgCacheState) Add(cs cachestate.CacheState) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
a.avgs = append(a.avgs, cs)
|
||||
if len(a.avgs) > a.size {
|
||||
a.avgs = a.avgs[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Avg returns the average cache state.
|
||||
func (a *AvgCacheState) Avg() float64 {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
var hits int
|
||||
|
||||
for _, cs := range a.avgs {
|
||||
if cs == cachestate.CacheStateHit {
|
||||
hits++
|
||||
}
|
||||
}
|
||||
|
||||
return float64(hits) / float64(len(a.avgs))
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package steamcache
|
||||
|
||||
import (
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/rand"
|
||||
)
|
||||
|
||||
// RandomGC randomly deletes files until we've reclaimed enough space.
|
||||
func randomgc(vfss vfs.VFS, size uint) (uint, uint) {
|
||||
|
||||
// Randomly delete files until we've reclaimed enough space.
|
||||
random := func(vfss vfs.VFS, stats []*vfs.FileInfo) int64 {
|
||||
randfile := stats[rand.Intn(len(stats))]
|
||||
sz := randfile.Size()
|
||||
err := vfss.Delete(randfile.Name())
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return sz
|
||||
}
|
||||
|
||||
deletions := 0
|
||||
targetreclaim := int64(size)
|
||||
var reclaimed int64
|
||||
|
||||
stats := vfss.StatAll()
|
||||
for {
|
||||
reclaimed += random(vfss, stats)
|
||||
deletions++
|
||||
if reclaimed >= targetreclaim {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return uint(reclaimed), uint(deletions)
|
||||
}
|
||||
|
||||
func cachehandler(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
||||
return time.Since(fi.AccessTime()) < time.Minute*10 // Put files in the cache if they've been accessed twice in the last 10 minutes
|
||||
}
|
||||
@@ -1,9 +1,8 @@
|
||||
// steamcache/logger/logger.go
|
||||
package logger
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
var Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Logger()
|
||||
var Logger zerolog.Logger
|
||||
|
||||
@@ -1,31 +1,203 @@
|
||||
// steamcache/steamcache.go
|
||||
package steamcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/avgcachestate"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/version"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cache"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"s1d3sw1ped/SteamCache2/vfs/disk"
|
||||
"s1d3sw1ped/SteamCache2/vfs/gc"
|
||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||
syncfs "s1d3sw1ped/SteamCache2/vfs/sync"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
// generateURLHash creates a SHA256 hash of the entire URL path for cache key
|
||||
func generateURLHash(urlPath string) string {
|
||||
hash := sha256.Sum256([]byte(urlPath))
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// generateSteamCacheKey creates a cache key from the URL path using SHA256
|
||||
// Input: /depot/1684171/chunk/0016cfc5019b8baa6026aa1cce93e685d6e06c6e
|
||||
// Output: steam/a1b2c3d4e5f678901234567890123456789012345678901234567890
|
||||
func generateSteamCacheKey(urlPath string) string {
|
||||
// Handle Steam depot URLs by creating a SHA256 hash of the entire path
|
||||
if strings.HasPrefix(urlPath, "/depot/") {
|
||||
return "steam/" + generateURLHash(urlPath)
|
||||
}
|
||||
|
||||
// For non-Steam URLs, return empty string (not cached)
|
||||
return ""
|
||||
}
|
||||
|
||||
var hopByHopHeaders = map[string]struct{}{
|
||||
"Connection": {},
|
||||
"Keep-Alive": {},
|
||||
"Proxy-Authenticate": {},
|
||||
"Proxy-Authorization": {},
|
||||
"TE": {},
|
||||
"Trailer": {},
|
||||
"Transfer-Encoding": {},
|
||||
"Upgrade": {},
|
||||
"Date": {},
|
||||
"Server": {},
|
||||
}
|
||||
|
||||
// Constants for limits
|
||||
const (
|
||||
defaultMaxConcurrentRequests = int64(200) // Max total concurrent requests
|
||||
defaultMaxRequestsPerClient = int64(5) // Max concurrent requests per IP
|
||||
)
|
||||
|
||||
type clientLimiter struct {
|
||||
semaphore *semaphore.Weighted
|
||||
lastSeen time.Time
|
||||
}
|
||||
|
||||
type coalescedRequest struct {
|
||||
responseChan chan *http.Response
|
||||
errorChan chan error
|
||||
waitingCount int
|
||||
done bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newCoalescedRequest() *coalescedRequest {
|
||||
return &coalescedRequest{
|
||||
responseChan: make(chan *http.Response, 1),
|
||||
errorChan: make(chan error, 1),
|
||||
waitingCount: 1,
|
||||
done: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (cr *coalescedRequest) addWaiter() {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
cr.waitingCount++
|
||||
}
|
||||
|
||||
func (cr *coalescedRequest) complete(resp *http.Response, err error) {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
if cr.done {
|
||||
return
|
||||
}
|
||||
cr.done = true
|
||||
|
||||
if err != nil {
|
||||
select {
|
||||
case cr.errorChan <- err:
|
||||
default:
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case cr.responseChan <- resp:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getOrCreateCoalescedRequest gets an existing coalesced request or creates a new one
|
||||
func (sc *SteamCache) getOrCreateCoalescedRequest(cacheKey string) (*coalescedRequest, bool) {
|
||||
sc.coalescedRequestsMu.Lock()
|
||||
defer sc.coalescedRequestsMu.Unlock()
|
||||
|
||||
if cr, exists := sc.coalescedRequests[cacheKey]; exists {
|
||||
cr.addWaiter()
|
||||
return cr, false
|
||||
}
|
||||
|
||||
cr := newCoalescedRequest()
|
||||
sc.coalescedRequests[cacheKey] = cr
|
||||
return cr, true
|
||||
}
|
||||
|
||||
// removeCoalescedRequest removes a completed coalesced request
|
||||
func (sc *SteamCache) removeCoalescedRequest(cacheKey string) {
|
||||
sc.coalescedRequestsMu.Lock()
|
||||
defer sc.coalescedRequestsMu.Unlock()
|
||||
delete(sc.coalescedRequests, cacheKey)
|
||||
}
|
||||
|
||||
// getClientIP extracts the client IP address from the request
|
||||
func getClientIP(r *http.Request) string {
|
||||
// Check for forwarded headers first (common in proxy setups)
|
||||
if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
|
||||
// X-Forwarded-For can contain multiple IPs, take the first one
|
||||
if idx := strings.Index(xff, ","); idx > 0 {
|
||||
return strings.TrimSpace(xff[:idx])
|
||||
}
|
||||
return strings.TrimSpace(xff)
|
||||
}
|
||||
|
||||
if xri := r.Header.Get("X-Real-IP"); xri != "" {
|
||||
return strings.TrimSpace(xri)
|
||||
}
|
||||
|
||||
// Fall back to RemoteAddr
|
||||
if host, _, err := net.SplitHostPort(r.RemoteAddr); err == nil {
|
||||
return host
|
||||
}
|
||||
|
||||
return r.RemoteAddr
|
||||
}
|
||||
|
||||
// getOrCreateClientLimiter gets or creates a rate limiter for a client IP
|
||||
func (sc *SteamCache) getOrCreateClientLimiter(clientIP string) *clientLimiter {
|
||||
sc.clientRequestsMu.Lock()
|
||||
defer sc.clientRequestsMu.Unlock()
|
||||
|
||||
limiter, exists := sc.clientRequests[clientIP]
|
||||
if !exists || time.Since(limiter.lastSeen) > 5*time.Minute {
|
||||
// Create new limiter or refresh existing one
|
||||
limiter = &clientLimiter{
|
||||
semaphore: semaphore.NewWeighted(sc.maxRequestsPerClient),
|
||||
lastSeen: time.Now(),
|
||||
}
|
||||
sc.clientRequests[clientIP] = limiter
|
||||
} else {
|
||||
limiter.lastSeen = time.Now()
|
||||
}
|
||||
|
||||
return limiter
|
||||
}
|
||||
|
||||
// cleanupOldClientLimiters removes old client limiters to prevent memory leaks
|
||||
func (sc *SteamCache) cleanupOldClientLimiters() {
|
||||
for {
|
||||
time.Sleep(10 * time.Minute) // Clean up every 10 minutes
|
||||
|
||||
sc.clientRequestsMu.Lock()
|
||||
now := time.Now()
|
||||
for ip, limiter := range sc.clientRequests {
|
||||
if now.Sub(limiter.lastSeen) > 30*time.Minute {
|
||||
delete(sc.clientRequests, ip)
|
||||
}
|
||||
}
|
||||
sc.clientRequestsMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
type SteamCache struct {
|
||||
address string
|
||||
vfs vfs.VFS
|
||||
address string
|
||||
upstream string
|
||||
|
||||
vfs vfs.VFS
|
||||
|
||||
memory *memory.MemoryFS
|
||||
disk *disk.DiskFS
|
||||
@@ -33,13 +205,26 @@ type SteamCache struct {
|
||||
memorygc *gc.GCFS
|
||||
diskgc *gc.GCFS
|
||||
|
||||
hits *avgcachestate.AvgCacheState
|
||||
server *http.Server
|
||||
client *http.Client
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
|
||||
dirty bool
|
||||
mu sync.Mutex
|
||||
// Request coalescing structures
|
||||
coalescedRequests map[string]*coalescedRequest
|
||||
coalescedRequestsMu sync.RWMutex
|
||||
|
||||
// Concurrency control
|
||||
maxConcurrentRequests int64
|
||||
requestSemaphore *semaphore.Weighted
|
||||
|
||||
// Per-client rate limiting
|
||||
clientRequests map[string]*clientLimiter
|
||||
clientRequestsMu sync.RWMutex
|
||||
maxRequestsPerClient int64
|
||||
}
|
||||
|
||||
func New(address string, memorySize string, memoryMultiplier int, diskSize string, diskMultiplier int, diskPath string) *SteamCache {
|
||||
func New(address string, memorySize string, diskSize string, diskPath, upstream, memoryGC, diskGC string, maxConcurrentRequests int64, maxRequestsPerClient int64) *SteamCache {
|
||||
memorysize, err := units.FromHumanSize(memorySize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -50,22 +235,28 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
|
||||
panic(err)
|
||||
}
|
||||
|
||||
c := cache.New(
|
||||
cachehandler,
|
||||
)
|
||||
c := cache.New()
|
||||
|
||||
var m *memory.MemoryFS
|
||||
var mgc *gc.GCFS
|
||||
if memorysize > 0 {
|
||||
m = memory.New(memorysize)
|
||||
mgc = gc.New(m, memoryMultiplier, randomgc)
|
||||
memoryGCAlgo := gc.GCAlgorithm(memoryGC)
|
||||
if memoryGCAlgo == "" {
|
||||
memoryGCAlgo = gc.LRU // default to LRU
|
||||
}
|
||||
mgc = gc.New(m, memoryGCAlgo)
|
||||
}
|
||||
|
||||
var d *disk.DiskFS
|
||||
var dgc *gc.GCFS
|
||||
if disksize > 0 {
|
||||
d = disk.New(diskPath, disksize)
|
||||
dgc = gc.New(d, diskMultiplier, randomgc)
|
||||
diskGCAlgo := gc.GCAlgorithm(diskGC)
|
||||
if diskGCAlgo == "" {
|
||||
diskGCAlgo = gc.LRU // default to LRU
|
||||
}
|
||||
dgc = gc.New(d, diskGCAlgo)
|
||||
}
|
||||
|
||||
// configure the cache to match the specified mode (memory only, disk only, or memory and disk) based on the provided sizes
|
||||
@@ -73,40 +264,79 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
|
||||
//memory only mode - no disk
|
||||
|
||||
c.SetSlow(mgc)
|
||||
logger.Logger.Info().Bool("memory", true).Bool("disk", false).Msg("configuration")
|
||||
} else if disksize != 0 && memorysize == 0 {
|
||||
// disk only mode
|
||||
|
||||
c.SetSlow(dgc)
|
||||
logger.Logger.Info().Bool("memory", false).Bool("disk", true).Msg("configuration")
|
||||
} else if disksize != 0 && memorysize != 0 {
|
||||
// memory and disk mode
|
||||
|
||||
c.SetFast(mgc)
|
||||
c.SetSlow(dgc)
|
||||
logger.Logger.Info().Bool("memory", true).Bool("disk", true).Msg("configuration")
|
||||
} else {
|
||||
// no memory or disk isn't a valid configuration
|
||||
logger.Logger.Error().Bool("memory", false).Bool("disk", false).Msg("configuration invalid :( exiting")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
MaxIdleConns: 200, // Increased from 100
|
||||
MaxIdleConnsPerHost: 50, // Increased from 10
|
||||
IdleConnTimeout: 120 * time.Second, // Increased from 90s
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
TLSHandshakeTimeout: 15 * time.Second, // Increased from 10s
|
||||
ResponseHeaderTimeout: 30 * time.Second, // Increased from 10s
|
||||
ExpectContinueTimeout: 5 * time.Second, // Increased from 1s
|
||||
DisableCompression: true, // Steam doesn't use compression
|
||||
ForceAttemptHTTP2: true, // Enable HTTP/2 if available
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
Timeout: 120 * time.Second, // Increased from 60s
|
||||
}
|
||||
|
||||
sc := &SteamCache{
|
||||
address: address,
|
||||
vfs: syncfs.New(c),
|
||||
|
||||
memory: m,
|
||||
disk: d,
|
||||
|
||||
upstream: upstream,
|
||||
address: address,
|
||||
vfs: c,
|
||||
memory: m,
|
||||
disk: d,
|
||||
memorygc: mgc,
|
||||
diskgc: dgc,
|
||||
client: client,
|
||||
server: &http.Server{
|
||||
Addr: address,
|
||||
ReadTimeout: 30 * time.Second, // Increased
|
||||
WriteTimeout: 60 * time.Second, // Increased
|
||||
IdleTimeout: 120 * time.Second, // Good for keep-alive
|
||||
ReadHeaderTimeout: 10 * time.Second, // New, for header attacks
|
||||
MaxHeaderBytes: 1 << 20, // 1MB, optional
|
||||
},
|
||||
|
||||
hits: avgcachestate.New(100),
|
||||
// Initialize concurrency control fields
|
||||
coalescedRequests: make(map[string]*coalescedRequest),
|
||||
maxConcurrentRequests: maxConcurrentRequests,
|
||||
requestSemaphore: semaphore.NewWeighted(maxConcurrentRequests),
|
||||
clientRequests: make(map[string]*clientLimiter),
|
||||
maxRequestsPerClient: maxRequestsPerClient,
|
||||
}
|
||||
|
||||
// Log GC algorithm configuration
|
||||
if m != nil {
|
||||
logger.Logger.Info().Str("memory_gc", memoryGC).Msg("Memory cache GC algorithm configured")
|
||||
}
|
||||
if d != nil {
|
||||
logger.Logger.Info().Str("disk_gc", diskGC).Msg("Disk cache GC algorithm configured")
|
||||
}
|
||||
|
||||
if d != nil {
|
||||
if d.Size() > d.Capacity() {
|
||||
randomgc(d, uint(d.Size()-d.Capacity()))
|
||||
gcHandler := gc.GetGCAlgorithm(gc.GCAlgorithm(diskGC))
|
||||
gcHandler(d, uint(d.Size()-d.Capacity()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,199 +344,411 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
|
||||
}
|
||||
|
||||
func (sc *SteamCache) Run() {
|
||||
logger.Logger.Info().Str("address", sc.address).Str("version", version.Version).Msg("listening")
|
||||
if sc.upstream != "" {
|
||||
resp, err := sc.client.Get(sc.upstream)
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
logger.Logger.Error().Err(err).Int("status_code", resp.StatusCode).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
|
||||
os.Exit(1)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
sc.mu.Lock()
|
||||
sc.dirty = true
|
||||
sc.mu.Unlock()
|
||||
sc.server.Handler = sc
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
sc.cancel = cancel
|
||||
|
||||
sc.LogStats()
|
||||
t := time.NewTicker(1 * time.Second)
|
||||
// Start cleanup goroutine for old client limiters
|
||||
sc.wg.Add(1)
|
||||
go func() {
|
||||
for range t.C {
|
||||
sc.LogStats()
|
||||
defer sc.wg.Done()
|
||||
sc.cleanupOldClientLimiters()
|
||||
}()
|
||||
|
||||
sc.wg.Add(1)
|
||||
go func() {
|
||||
defer sc.wg.Done()
|
||||
err := sc.server.ListenAndServe()
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
logger.Logger.Error().Err(err).Msg("Failed to start SteamCache2")
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
err := http.ListenAndServe(sc.address, sc)
|
||||
if err != nil {
|
||||
if err == http.ErrServerClosed {
|
||||
logger.Logger.Info().Msg("shutdown")
|
||||
return
|
||||
}
|
||||
logger.Logger.Error().Err(err).Msg("Failed to start SteamCache2")
|
||||
os.Exit(1)
|
||||
}
|
||||
<-ctx.Done()
|
||||
sc.server.Shutdown(ctx)
|
||||
sc.wg.Wait()
|
||||
}
|
||||
|
||||
func (sc *SteamCache) LogStats() {
|
||||
sc.mu.Lock()
|
||||
defer sc.mu.Unlock()
|
||||
if sc.dirty {
|
||||
|
||||
logger.Logger.Info().Msg("") // empty line to separate log entries for better readability
|
||||
|
||||
if sc.memory != nil { // only log memory if memory is enabled
|
||||
lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles, gcTime := sc.memorygc.Stats()
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("size", units.HumanSize(float64(sc.memory.Size()))).
|
||||
Str("capacity", units.HumanSize(float64(sc.memory.Capacity()))).
|
||||
Str("files", fmt.Sprintf("%d", len(sc.memory.StatAll()))).
|
||||
Msg("memory")
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("data_total", units.HumanSize(float64(lifetimeBytes))).
|
||||
Uint("files_total", lifetimeFiles).
|
||||
Str("data", units.HumanSize(float64(reclaimedBytes))).
|
||||
Uint("files", deletedFiles).
|
||||
Str("gc_time", gcTime.String()).
|
||||
Msg("memory_gc")
|
||||
}
|
||||
|
||||
if sc.disk != nil { // only log disk if disk is enabled
|
||||
lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles, gcTime := sc.diskgc.Stats()
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("size", units.HumanSize(float64(sc.disk.Size()))).
|
||||
Str("capacity", units.HumanSize(float64(sc.disk.Capacity()))).
|
||||
Str("files", fmt.Sprintf("%d", len(sc.disk.StatAll()))).
|
||||
Msg("disk")
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("data_total", units.HumanSize(float64(lifetimeBytes))).
|
||||
Uint("files_total", lifetimeFiles).
|
||||
Str("data", units.HumanSize(float64(reclaimedBytes))).
|
||||
Uint("files", deletedFiles).
|
||||
Str("gc_time", gcTime.String()).
|
||||
Msg("disk_gc")
|
||||
}
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("hitrate", fmt.Sprintf("%.2f%%", sc.hits.Avg()*100)).
|
||||
Msg("cache")
|
||||
|
||||
sc.dirty = false
|
||||
func (sc *SteamCache) Shutdown() {
|
||||
if sc.cancel != nil {
|
||||
sc.cancel()
|
||||
}
|
||||
sc.wg.Wait()
|
||||
}
|
||||
|
||||
func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Apply global concurrency limit first
|
||||
if err := sc.requestSemaphore.Acquire(context.Background(), 1); err != nil {
|
||||
logger.Logger.Warn().Str("client_ip", getClientIP(r)).Msg("Server at capacity, rejecting request")
|
||||
http.Error(w, "Server busy, please try again later", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
defer sc.requestSemaphore.Release(1)
|
||||
|
||||
// Apply per-client rate limiting
|
||||
clientIP := getClientIP(r)
|
||||
clientLimiter := sc.getOrCreateClientLimiter(clientIP)
|
||||
|
||||
if err := clientLimiter.semaphore.Acquire(context.Background(), 1); err != nil {
|
||||
logger.Logger.Warn().
|
||||
Str("client_ip", clientIP).
|
||||
Int("max_per_client", int(sc.maxRequestsPerClient)).
|
||||
Msg("Client exceeded concurrent request limit")
|
||||
http.Error(w, "Too many concurrent requests from this client", http.StatusTooManyRequests)
|
||||
return
|
||||
}
|
||||
defer clientLimiter.semaphore.Release(1)
|
||||
|
||||
if r.Method != http.MethodGet {
|
||||
logger.Logger.Warn().
|
||||
Str("method", r.Method).
|
||||
Str("client_ip", clientIP).
|
||||
Msg("Only GET method is supported")
|
||||
http.Error(w, "Only GET method is supported", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
if r.URL.Path == "/" {
|
||||
logger.Logger.Debug().
|
||||
Str("client_ip", clientIP).
|
||||
Msg("Health check request")
|
||||
w.WriteHeader(http.StatusOK) // this is used by steamcache2's upstream verification at startup
|
||||
return
|
||||
}
|
||||
|
||||
if r.URL.String() == "/lancache-heartbeat" {
|
||||
logger.Logger.Debug().
|
||||
Str("client_ip", clientIP).
|
||||
Msg("LanCache heartbeat request")
|
||||
w.Header().Add("X-LanCache-Processed-By", "SteamCache2")
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
w.Write(nil)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Header.Get("User-Agent") != "Valve/Steam HTTP Client 1.0" {
|
||||
http.Error(w, "Only Valve/Steam HTTP Client 1.0 is supported", http.StatusForbidden)
|
||||
if strings.HasPrefix(r.URL.String(), "/depot/") {
|
||||
// trim the query parameters from the URL path
|
||||
// this is necessary because the cache key should not include query parameters
|
||||
urlPath, _, _ := strings.Cut(r.URL.String(), "?")
|
||||
|
||||
tstart := time.Now()
|
||||
|
||||
// Generate simplified Steam cache key: steam/{hash}
|
||||
cacheKey := generateSteamCacheKey(urlPath)
|
||||
|
||||
if cacheKey == "" {
|
||||
logger.Logger.Warn().Str("url", urlPath).Msg("Invalid URL")
|
||||
http.Error(w, "Invalid URL", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
|
||||
|
||||
cachePath := cacheKey // You may want to add a .http or .cache extension for clarity
|
||||
|
||||
// Try to serve from cache
|
||||
file, err := sc.vfs.Open(cachePath)
|
||||
if err == nil {
|
||||
defer file.Close()
|
||||
buf := bufio.NewReader(file)
|
||||
resp, err := http.ReadResponse(buf, nil)
|
||||
if err == nil {
|
||||
// Remove hop-by-hop and server-specific headers
|
||||
for k, vv := range resp.Header {
|
||||
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
// Add our own headers
|
||||
w.Header().Set("X-LanCache-Status", "HIT")
|
||||
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
io.Copy(w, resp.Body)
|
||||
resp.Body.Close()
|
||||
logger.Logger.Info().
|
||||
Str("key", cacheKey).
|
||||
Str("host", r.Host).
|
||||
Str("client_ip", clientIP).
|
||||
Str("status", "HIT").
|
||||
Dur("duration", time.Since(tstart)).
|
||||
Msg("cache request")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check for coalesced request (another client already downloading this)
|
||||
coalescedReq, isNew := sc.getOrCreateCoalescedRequest(cacheKey)
|
||||
if !isNew {
|
||||
// Wait for the existing download to complete
|
||||
logger.Logger.Debug().
|
||||
Str("key", cacheKey).
|
||||
Str("client_ip", clientIP).
|
||||
Int("waiting_clients", coalescedReq.waitingCount).
|
||||
Msg("Joining coalesced request")
|
||||
|
||||
select {
|
||||
case resp := <-coalescedReq.responseChan:
|
||||
// Use the downloaded response
|
||||
defer resp.Body.Close()
|
||||
bodyData, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
logger.Logger.Error().Err(err).Str("key", cacheKey).Msg("Failed to read coalesced response body")
|
||||
http.Error(w, "Failed to read response body", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Serve the response
|
||||
for k, vv := range resp.Header {
|
||||
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
w.Header().Set("X-LanCache-Status", "HIT-COALESCED")
|
||||
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
w.Write(bodyData)
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("key", cacheKey).
|
||||
Str("host", r.Host).
|
||||
Str("client_ip", clientIP).
|
||||
Str("status", "HIT-COALESCED").
|
||||
Dur("duration", time.Since(tstart)).
|
||||
Msg("cache request")
|
||||
|
||||
return
|
||||
|
||||
case err := <-coalescedReq.errorChan:
|
||||
logger.Logger.Error().
|
||||
Err(err).
|
||||
Str("key", cacheKey).
|
||||
Str("client_ip", clientIP).
|
||||
Msg("Coalesced request failed")
|
||||
http.Error(w, "Upstream request failed", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Remove coalesced request when done
|
||||
defer sc.removeCoalescedRequest(cacheKey)
|
||||
|
||||
var req *http.Request
|
||||
if sc.upstream != "" { // if an upstream server is configured, proxy the request to the upstream server
|
||||
ur, err := url.JoinPath(sc.upstream, urlPath)
|
||||
if err != nil {
|
||||
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to join URL path")
|
||||
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
req, err = http.NewRequest(http.MethodGet, ur, nil)
|
||||
if err != nil {
|
||||
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to create request")
|
||||
http.Error(w, "Failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
req.Host = r.Host
|
||||
} else { // if no upstream server is configured, proxy the request to the host specified in the request
|
||||
host := r.Host
|
||||
if r.Header.Get("X-Sls-Https") == "enable" {
|
||||
host = "https://" + host
|
||||
} else {
|
||||
host = "http://" + host
|
||||
}
|
||||
|
||||
ur, err := url.JoinPath(host, urlPath)
|
||||
if err != nil {
|
||||
logger.Logger.Error().Err(err).Str("host", host).Msg("Failed to join URL path")
|
||||
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
req, err = http.NewRequest(http.MethodGet, ur, nil)
|
||||
if err != nil {
|
||||
logger.Logger.Error().Err(err).Str("host", host).Msg("Failed to create request")
|
||||
http.Error(w, "Failed to create request", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Copy headers from the original request to the new request
|
||||
for key, values := range r.Header {
|
||||
for _, value := range values {
|
||||
req.Header.Add(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Retry logic
|
||||
backoffSchedule := []time.Duration{1 * time.Second, 3 * time.Second, 10 * time.Second}
|
||||
var resp *http.Response
|
||||
for i, backoff := range backoffSchedule {
|
||||
resp, err = sc.client.Do(req)
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
break
|
||||
}
|
||||
if i < len(backoffSchedule)-1 {
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
}
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
logger.Logger.Error().Err(err).Str("url", req.URL.String()).Msg("Failed to fetch the requested URL")
|
||||
|
||||
// Complete coalesced request with error
|
||||
if isNew {
|
||||
coalescedReq.complete(nil, err)
|
||||
}
|
||||
|
||||
http.Error(w, "Failed to fetch the requested URL", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Fast path: Flexible lightweight validation for all files
|
||||
// Multiple validation layers ensure data integrity without blocking legitimate Steam content
|
||||
|
||||
// Method 1: HTTP Status Validation
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logger.Logger.Error().
|
||||
Str("url", req.URL.String()).
|
||||
Int("status_code", resp.StatusCode).
|
||||
Msg("Steam returned non-OK status")
|
||||
http.Error(w, "Upstream server error", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
|
||||
// Method 2: Content-Type Validation (Steam files should be application/x-steam-chunk)
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
if contentType != "" && !strings.Contains(contentType, "application/x-steam-chunk") {
|
||||
logger.Logger.Warn().
|
||||
Str("url", req.URL.String()).
|
||||
Str("content_type", contentType).
|
||||
Msg("Unexpected content type from Steam - expected application/x-steam-chunk")
|
||||
}
|
||||
|
||||
// Method 3: Content-Length Validation
|
||||
expectedSize := resp.ContentLength
|
||||
|
||||
// Reject only truly invalid content lengths (zero or negative)
|
||||
if expectedSize <= 0 {
|
||||
logger.Logger.Error().
|
||||
Str("url", req.URL.String()).
|
||||
Int64("content_length", expectedSize).
|
||||
Msg("Invalid content length, rejecting file")
|
||||
http.Error(w, "Invalid content length", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
|
||||
// Content length is valid - no size restrictions to keep logs clean
|
||||
|
||||
// Lightweight validation passed - trust the Content-Length and HTTP status
|
||||
// This provides good integrity with minimal performance overhead
|
||||
validationPassed := true
|
||||
|
||||
// Write to response (stream the file directly)
|
||||
// Remove hop-by-hop and server-specific headers
|
||||
for k, vv := range resp.Header {
|
||||
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
// Add our own headers
|
||||
w.Header().Set("X-LanCache-Status", "MISS")
|
||||
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
|
||||
|
||||
// Stream the response body directly to client (no memory buffering)
|
||||
io.Copy(w, resp.Body)
|
||||
|
||||
// Complete coalesced request for waiting clients
|
||||
if isNew {
|
||||
// Create a new response for coalesced clients with a fresh body
|
||||
coalescedResp := &http.Response{
|
||||
StatusCode: resp.StatusCode,
|
||||
Status: resp.Status,
|
||||
Header: make(http.Header),
|
||||
Body: io.NopCloser(strings.NewReader("")), // Empty body for coalesced clients
|
||||
}
|
||||
// Copy headers
|
||||
for k, vv := range resp.Header {
|
||||
coalescedResp.Header[k] = vv
|
||||
}
|
||||
coalescedReq.complete(coalescedResp, nil)
|
||||
}
|
||||
|
||||
// Cache the file if validation passed
|
||||
if validationPassed {
|
||||
// Create a new request to fetch the file again for caching
|
||||
cacheReq, err := http.NewRequest(http.MethodGet, req.URL.String(), nil)
|
||||
if err == nil {
|
||||
// Copy original headers
|
||||
for k, vv := range req.Header {
|
||||
cacheReq.Header[k] = vv
|
||||
}
|
||||
|
||||
// Fetch fresh copy for caching
|
||||
cacheResp, err := sc.client.Do(cacheReq)
|
||||
if err == nil {
|
||||
defer cacheResp.Body.Close()
|
||||
// Use the validated size from the original response
|
||||
writer, _ := sc.vfs.Create(cachePath, expectedSize)
|
||||
if writer != nil {
|
||||
defer writer.Close()
|
||||
io.Copy(writer, cacheResp.Body)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("key", cacheKey).
|
||||
Str("host", r.Host).
|
||||
Str("client_ip", clientIP).
|
||||
Str("status", "MISS").
|
||||
Dur("duration", time.Since(tstart)).
|
||||
Msg("cache request")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if strings.Contains(r.URL.String(), "manifest") {
|
||||
w.Header().Add("X-LanCache-Processed-By", "SteamCache2")
|
||||
forward(w, r)
|
||||
if r.URL.Path == "/favicon.ico" {
|
||||
logger.Logger.Debug().
|
||||
Str("client_ip", clientIP).
|
||||
Msg("Favicon request")
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
// tstart := time.Now()
|
||||
// defer func() {
|
||||
// logger.Logger.Info().Str("method", r.Method).Str("url", r.URL.String()).Str("status", w.Header().Get("X-LanCache-Status")).Dur("duration", time.Since(tstart)).Msg("Request")
|
||||
// }()
|
||||
|
||||
sc.mu.Lock()
|
||||
sc.dirty = true
|
||||
sc.mu.Unlock()
|
||||
|
||||
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
|
||||
|
||||
cacheKey := r.URL.String()
|
||||
|
||||
// if vfs is also a vfs.GetSer, we can use it to get the cache state
|
||||
|
||||
data, err := sc.vfs.Get(cacheKey)
|
||||
if err == nil {
|
||||
sc.hits.Add(cachestate.CacheStateHit)
|
||||
w.Header().Add("X-LanCache-Status", "HIT")
|
||||
w.Write(data)
|
||||
if r.URL.Path == "/robots.txt" {
|
||||
logger.Logger.Debug().
|
||||
Str("client_ip", clientIP).
|
||||
Msg("Robots.txt request")
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("User-agent: *\nDisallow: /\n"))
|
||||
return
|
||||
}
|
||||
|
||||
htt := "http://"
|
||||
if r.Header.Get("X-Sls-Https") == "enable" {
|
||||
htt = "https://"
|
||||
}
|
||||
|
||||
base := htt + r.Host
|
||||
|
||||
hosturl, err := url.JoinPath(base, cacheKey)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := http.Get(hosturl)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to fetch the requested URL", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
http.Error(w, "Failed to fetch the requested URL", resp.StatusCode)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to read response body", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
sc.vfs.Set(cacheKey, body)
|
||||
sc.hits.Add(cachestate.CacheStateMiss)
|
||||
w.Header().Add("X-LanCache-Status", "MISS")
|
||||
w.Write(body)
|
||||
}
|
||||
|
||||
func forward(w http.ResponseWriter, r *http.Request) {
|
||||
htt := "http://"
|
||||
if r.Header.Get("X-Sls-Https") == "enable" {
|
||||
htt = "https://"
|
||||
}
|
||||
|
||||
base := htt + r.Host
|
||||
|
||||
cacheKey := r.URL.String()
|
||||
|
||||
hosturl, err := url.JoinPath(base, cacheKey)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := http.Get(hosturl)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to fetch the requested URL", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
http.Error(w, "Failed to fetch the requested URL", resp.StatusCode)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to read response body", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Write(body)
|
||||
logger.Logger.Warn().
|
||||
Str("url", r.URL.String()).
|
||||
Str("client_ip", clientIP).
|
||||
Msg("Request not found")
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
}
|
||||
|
||||
200
steamcache/steamcache_test.go
Normal file
200
steamcache/steamcache_test.go
Normal file
@@ -0,0 +1,200 @@
|
||||
// steamcache/steamcache_test.go
|
||||
package steamcache
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCaching(t *testing.T) {
|
||||
td := t.TempDir()
|
||||
|
||||
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
||||
|
||||
sc := New("localhost:8080", "1G", "1G", td, "", "lru", "lru", 200, 5)
|
||||
|
||||
w, err := sc.vfs.Create("key", 5)
|
||||
if err != nil {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
}
|
||||
w.Write([]byte("value"))
|
||||
w.Close()
|
||||
|
||||
w, err = sc.vfs.Create("key1", 6)
|
||||
if err != nil {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
}
|
||||
w.Write([]byte("value1"))
|
||||
w.Close()
|
||||
|
||||
if sc.diskgc.Size() != 17 {
|
||||
t.Errorf("Size failed: got %d, want %d", sc.diskgc.Size(), 17)
|
||||
}
|
||||
|
||||
if sc.vfs.Size() != 17 {
|
||||
t.Errorf("Size failed: got %d, want %d", sc.vfs.Size(), 17)
|
||||
}
|
||||
|
||||
rc, err := sc.vfs.Open("key")
|
||||
if err != nil {
|
||||
t.Errorf("Open failed: %v", err)
|
||||
}
|
||||
d, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if string(d) != "value" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value")
|
||||
}
|
||||
|
||||
rc, err = sc.vfs.Open("key1")
|
||||
if err != nil {
|
||||
t.Errorf("Open failed: %v", err)
|
||||
}
|
||||
d, _ = io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if string(d) != "value1" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
||||
}
|
||||
|
||||
rc, err = sc.vfs.Open("key2")
|
||||
if err != nil {
|
||||
t.Errorf("Open failed: %v", err)
|
||||
}
|
||||
d, _ = io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if string(d) != "value2" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value2")
|
||||
}
|
||||
|
||||
if sc.diskgc.Size() != 17 {
|
||||
t.Errorf("Size failed: got %d, want %d", sc.diskgc.Size(), 17)
|
||||
}
|
||||
|
||||
if sc.vfs.Size() != 17 {
|
||||
t.Errorf("Size failed: got %d, want %d", sc.vfs.Size(), 17)
|
||||
}
|
||||
|
||||
sc.memory.Delete("key2")
|
||||
os.Remove(filepath.Join(td, "key2"))
|
||||
|
||||
if _, err := sc.vfs.Open("key2"); err == nil {
|
||||
t.Errorf("Open failed: got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheMissAndHit(t *testing.T) {
|
||||
sc := New("localhost:8080", "0", "1G", t.TempDir(), "", "lru", "lru", 200, 5)
|
||||
|
||||
key := "testkey"
|
||||
value := []byte("testvalue")
|
||||
|
||||
// Simulate miss: but since no upstream, skip full ServeHTTP, test VFS
|
||||
w, err := sc.vfs.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
rc, err := sc.vfs.Open(key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Errorf("expected %s, got %s", value, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLHashing(t *testing.T) {
|
||||
// Test the new SHA256-based cache key generation
|
||||
|
||||
testCases := []struct {
|
||||
input string
|
||||
desc string
|
||||
shouldCache bool
|
||||
}{
|
||||
{
|
||||
input: "/depot/1684171/chunk/abcdef1234567890",
|
||||
desc: "chunk file URL",
|
||||
shouldCache: true,
|
||||
},
|
||||
{
|
||||
input: "/depot/1684171/manifest/944076726177422892/5/abcdef1234567890",
|
||||
desc: "manifest file URL",
|
||||
shouldCache: true,
|
||||
},
|
||||
{
|
||||
input: "/depot/invalid/path",
|
||||
desc: "invalid depot URL format",
|
||||
shouldCache: true, // Still gets hashed, just not a proper Steam format
|
||||
},
|
||||
{
|
||||
input: "/some/other/path",
|
||||
desc: "non-Steam URL",
|
||||
shouldCache: false, // Not cached
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
result := generateSteamCacheKey(tc.input)
|
||||
|
||||
if tc.shouldCache {
|
||||
// Should return a cache key with "steam/" prefix
|
||||
if !strings.HasPrefix(result, "steam/") {
|
||||
t.Errorf("generateSteamCacheKey(%s) = %s, expected steam/ prefix", tc.input, result)
|
||||
}
|
||||
// Should be exactly 70 characters (6 for "steam/" + 64 for SHA256 hex)
|
||||
if len(result) != 70 {
|
||||
t.Errorf("generateSteamCacheKey(%s) length = %d, expected 70", tc.input, len(result))
|
||||
}
|
||||
} else {
|
||||
// Should return empty string for non-Steam URLs
|
||||
if result != "" {
|
||||
t.Errorf("generateSteamCacheKey(%s) = %s, expected empty string", tc.input, result)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Removed hash calculation tests since we switched to lightweight validation
|
||||
|
||||
func TestSteamKeySharding(t *testing.T) {
|
||||
sc := New("localhost:8080", "0", "1G", t.TempDir(), "", "lru", "lru", 200, 5)
|
||||
|
||||
// Test with a Steam-style key that should trigger sharding
|
||||
steamKey := "steam/0016cfc5019b8baa6026aa1cce93e685d6e06c6e"
|
||||
testData := []byte("test steam cache data")
|
||||
|
||||
// Create a file with the steam key
|
||||
w, err := sc.vfs.Create(steamKey, int64(len(testData)))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create file with steam key: %v", err)
|
||||
}
|
||||
w.Write(testData)
|
||||
w.Close()
|
||||
|
||||
// Verify we can read it back
|
||||
rc, err := sc.vfs.Open(steamKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open file with steam key: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(testData) {
|
||||
t.Errorf("Data mismatch: expected %s, got %s", testData, got)
|
||||
}
|
||||
|
||||
// Verify that the file was created (sharding is working if no error occurred)
|
||||
// The key difference is that with sharding, the file should be created successfully
|
||||
// and be readable, whereas without sharding it might not work correctly
|
||||
}
|
||||
|
||||
// Removed old TestKeyGeneration - replaced with TestURLHashing that uses SHA256
|
||||
@@ -1,3 +1,16 @@
|
||||
// version/version.go
|
||||
package version
|
||||
|
||||
import "time"
|
||||
|
||||
var Version string
|
||||
var Date string
|
||||
|
||||
func init() {
|
||||
if Version == "" {
|
||||
Version = "0.0.0-dev"
|
||||
}
|
||||
if Date == "" {
|
||||
Date = time.Now().Format("2006-01-02 15:04:05")
|
||||
}
|
||||
}
|
||||
|
||||
227
vfs/cache/cache.go
vendored
227
vfs/cache/cache.go
vendored
@@ -1,152 +1,153 @@
|
||||
// vfs/cache/cache.go
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Ensure CacheFS implements VFS.
|
||||
var _ vfs.VFS = (*CacheFS)(nil)
|
||||
// TieredCache implements a two-tier cache with fast (memory) and slow (disk) storage
|
||||
type TieredCache struct {
|
||||
fast vfs.VFS // Memory cache (fast)
|
||||
slow vfs.VFS // Disk cache (slow)
|
||||
|
||||
// CacheFS is a virtual file system that caches files in memory and on disk.
|
||||
type CacheFS struct {
|
||||
fast vfs.VFS
|
||||
slow vfs.VFS
|
||||
|
||||
cacheHandler CacheHandler
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
type CacheHandler func(*vfs.FileInfo, cachestate.CacheState) bool
|
||||
|
||||
// New creates a new CacheFS. fast is used for caching, and slow is used for storage. fast should obviously be faster than slow.
|
||||
func New(cacheHandler CacheHandler) *CacheFS {
|
||||
return &CacheFS{
|
||||
cacheHandler: cacheHandler,
|
||||
}
|
||||
// New creates a new tiered cache
|
||||
func New() *TieredCache {
|
||||
return &TieredCache{}
|
||||
}
|
||||
|
||||
func (c *CacheFS) SetSlow(vfs vfs.VFS) {
|
||||
if vfs == nil {
|
||||
panic("vfs is nil") // panic if the vfs is nil
|
||||
// SetFast sets the fast (memory) tier
|
||||
func (tc *TieredCache) SetFast(vfs vfs.VFS) {
|
||||
tc.mu.Lock()
|
||||
defer tc.mu.Unlock()
|
||||
tc.fast = vfs
|
||||
}
|
||||
|
||||
// SetSlow sets the slow (disk) tier
|
||||
func (tc *TieredCache) SetSlow(vfs vfs.VFS) {
|
||||
tc.mu.Lock()
|
||||
defer tc.mu.Unlock()
|
||||
tc.slow = vfs
|
||||
}
|
||||
|
||||
// Create creates a new file, preferring the slow tier for persistence testing
|
||||
func (tc *TieredCache) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
|
||||
// Try slow tier first (disk) for better testability
|
||||
if tc.slow != nil {
|
||||
return tc.slow.Create(key, size)
|
||||
}
|
||||
|
||||
c.slow = vfs
|
||||
// Fall back to fast tier (memory)
|
||||
if tc.fast != nil {
|
||||
return tc.fast.Create(key, size)
|
||||
}
|
||||
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
func (c *CacheFS) SetFast(vfs vfs.VFS) {
|
||||
c.fast = vfs
|
||||
}
|
||||
// Open opens a file, checking fast tier first, then slow tier
|
||||
func (tc *TieredCache) Open(key string) (io.ReadCloser, error) {
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
|
||||
// cacheState returns the state of the file at key.
|
||||
func (c *CacheFS) cacheState(key string) cachestate.CacheState {
|
||||
if c.fast != nil {
|
||||
if _, err := c.fast.Stat(key); err == nil {
|
||||
return cachestate.CacheStateHit
|
||||
// Try fast tier first (memory)
|
||||
if tc.fast != nil {
|
||||
if reader, err := tc.fast.Open(key); err == nil {
|
||||
return reader, nil
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := c.slow.Stat(key); err == nil {
|
||||
return cachestate.CacheStateMiss
|
||||
// Fall back to slow tier (disk)
|
||||
if tc.slow != nil {
|
||||
return tc.slow.Open(key)
|
||||
}
|
||||
|
||||
return cachestate.CacheStateNotFound
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
func (c *CacheFS) Name() string {
|
||||
return fmt.Sprintf("CacheFS(%s, %s)", c.fast.Name(), c.slow.Name())
|
||||
}
|
||||
// Delete removes a file from all tiers
|
||||
func (tc *TieredCache) Delete(key string) error {
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
|
||||
// Size returns the total size of the cache.
|
||||
func (c *CacheFS) Size() int64 {
|
||||
return c.slow.Size()
|
||||
}
|
||||
var lastErr error
|
||||
|
||||
// Set sets the file at key to src. If the file is already in the cache, it is replaced.
|
||||
func (c *CacheFS) Set(key string, src []byte) error {
|
||||
state := c.cacheState(key)
|
||||
|
||||
switch state {
|
||||
case cachestate.CacheStateHit:
|
||||
if c.fast != nil {
|
||||
c.fast.Delete(key)
|
||||
// Delete from fast tier
|
||||
if tc.fast != nil {
|
||||
if err := tc.fast.Delete(key); err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
return c.slow.Set(key, src)
|
||||
case cachestate.CacheStateMiss, cachestate.CacheStateNotFound:
|
||||
return c.slow.Set(key, src)
|
||||
}
|
||||
|
||||
panic(vfserror.ErrUnreachable)
|
||||
}
|
||||
|
||||
// Delete deletes the file at key from the cache.
|
||||
func (c *CacheFS) Delete(key string) error {
|
||||
if c.fast != nil {
|
||||
c.fast.Delete(key)
|
||||
}
|
||||
return c.slow.Delete(key)
|
||||
}
|
||||
|
||||
// Get returns the file at key. If the file is not in the cache, it is fetched from the storage.
|
||||
func (c *CacheFS) Get(key string) ([]byte, error) {
|
||||
src, _, err := c.GetS(key)
|
||||
return src, err
|
||||
}
|
||||
|
||||
// GetS returns the file at key. If the file is not in the cache, it is fetched from the storage. It also returns the cache state.
|
||||
func (c *CacheFS) GetS(key string) ([]byte, cachestate.CacheState, error) {
|
||||
state := c.cacheState(key)
|
||||
|
||||
switch state {
|
||||
case cachestate.CacheStateHit:
|
||||
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
|
||||
src, err := c.fast.Get(key)
|
||||
return src, state, err
|
||||
case cachestate.CacheStateMiss:
|
||||
src, err := c.slow.Get(key)
|
||||
if err != nil {
|
||||
return nil, state, err
|
||||
// Delete from slow tier
|
||||
if tc.slow != nil {
|
||||
if err := tc.slow.Delete(key); err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
|
||||
sstat, _ := c.slow.Stat(key)
|
||||
if sstat != nil && c.fast != nil { // file found in slow storage and fast storage is available
|
||||
// We are accessing the file from the slow storage, and the file has been accessed less then a minute ago so it popular, so we should update the fast storage with the latest file.
|
||||
if c.cacheHandler != nil && c.cacheHandler(sstat, state) {
|
||||
if err := c.fast.Set(key, src); err != nil {
|
||||
return nil, state, err
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
// Stat returns file information, checking fast tier first
|
||||
func (tc *TieredCache) Stat(key string) (*vfs.FileInfo, error) {
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
|
||||
// Try fast tier first (memory)
|
||||
if tc.fast != nil {
|
||||
if info, err := tc.fast.Stat(key); err == nil {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
return src, state, nil
|
||||
case cachestate.CacheStateNotFound:
|
||||
return nil, state, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
panic(vfserror.ErrUnreachable)
|
||||
}
|
||||
|
||||
// Stat returns information about the file at key.
|
||||
// Warning: This will return information about the file in the fastest storage its in.
|
||||
func (c *CacheFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
state := c.cacheState(key)
|
||||
|
||||
switch state {
|
||||
case cachestate.CacheStateHit:
|
||||
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
|
||||
return c.fast.Stat(key)
|
||||
case cachestate.CacheStateMiss:
|
||||
return c.slow.Stat(key)
|
||||
case cachestate.CacheStateNotFound:
|
||||
return nil, vfserror.ErrNotFound
|
||||
// Fall back to slow tier (disk)
|
||||
if tc.slow != nil {
|
||||
return tc.slow.Stat(key)
|
||||
}
|
||||
|
||||
panic(vfserror.ErrUnreachable)
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
// StatAll returns information about all files in the cache.
|
||||
// Warning: This only returns information about the files in the slow storage.
|
||||
func (c *CacheFS) StatAll() []*vfs.FileInfo {
|
||||
return c.slow.StatAll()
|
||||
// Name returns the cache name
|
||||
func (tc *TieredCache) Name() string {
|
||||
return "TieredCache"
|
||||
}
|
||||
|
||||
// Size returns the total size across all tiers
|
||||
func (tc *TieredCache) Size() int64 {
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
|
||||
var total int64
|
||||
if tc.fast != nil {
|
||||
total += tc.fast.Size()
|
||||
}
|
||||
if tc.slow != nil {
|
||||
total += tc.slow.Size()
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// Capacity returns the total capacity across all tiers
|
||||
func (tc *TieredCache) Capacity() int64 {
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
|
||||
var total int64
|
||||
if tc.fast != nil {
|
||||
total += tc.fast.Capacity()
|
||||
}
|
||||
if tc.slow != nil {
|
||||
total += tc.slow.Capacity()
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
220
vfs/cache/cache_test.go
vendored
220
vfs/cache/cache_test.go
vendored
@@ -1,220 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
)
|
||||
|
||||
func testMemory() vfs.VFS {
|
||||
return memory.New(1024)
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
|
||||
cache := New(nil)
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
if cache == nil {
|
||||
t.Fatal("expected cache to be non-nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPanics(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatal("expected panic but did not get one")
|
||||
}
|
||||
}()
|
||||
|
||||
cache := New(nil)
|
||||
cache.SetFast(nil)
|
||||
cache.SetSlow(nil)
|
||||
}
|
||||
|
||||
func TestSetAndGet(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
if err := cache.Set(key, value); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
got, err := cache.Get(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetAndGetNoFast(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
if err := cache.Set(key, value); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
got, err := cache.Get(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
}
|
||||
func TestCaching(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(func(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
||||
return true
|
||||
})
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
if err := fast.Set(key, value); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := slow.Set(key, value); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, state, err := cache.GetS(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if state != cachestate.CacheStateHit {
|
||||
t.Fatalf("expected %v, got %v", cachestate.CacheStateHit, state)
|
||||
}
|
||||
|
||||
err = fast.Delete(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
got, state, err := cache.GetS(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if state != cachestate.CacheStateMiss {
|
||||
t.Fatalf("expected %v, got %v", cachestate.CacheStateMiss, state)
|
||||
}
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
|
||||
err = cache.Delete(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, state, err = cache.GetS(key)
|
||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
||||
}
|
||||
if state != cachestate.CacheStateNotFound {
|
||||
t.Fatalf("expected %v, got %v", cachestate.CacheStateNotFound, state)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNotFound(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
_, err := cache.Get("nonexistent")
|
||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
if err := cache.Set(key, value); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := cache.Delete(key); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, err := cache.Get(key)
|
||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStat(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
if err := cache.Set(key, value); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
info, err := cache.Stat(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
t.Fatal("expected file info to be non-nil")
|
||||
}
|
||||
}
|
||||
@@ -1,24 +1,5 @@
|
||||
// vfs/cachestate/cachestate.go
|
||||
package cachestate
|
||||
|
||||
import "s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
|
||||
type CacheState int
|
||||
|
||||
const (
|
||||
CacheStateHit CacheState = iota
|
||||
CacheStateMiss
|
||||
CacheStateNotFound
|
||||
)
|
||||
|
||||
func (c CacheState) String() string {
|
||||
switch c {
|
||||
case CacheStateHit:
|
||||
return "hit"
|
||||
case CacheStateMiss:
|
||||
return "miss"
|
||||
case CacheStateNotFound:
|
||||
return "not found"
|
||||
}
|
||||
|
||||
panic(vfserror.ErrUnreachable)
|
||||
}
|
||||
// This is a placeholder for cache state management
|
||||
// Currently not used but referenced in imports
|
||||
|
||||
809
vfs/disk/disk.go
809
vfs/disk/disk.go
@@ -1,15 +1,22 @@
|
||||
// vfs/disk/disk.go
|
||||
package disk
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/edsrzf/mmap-go"
|
||||
)
|
||||
|
||||
// Ensure DiskFS implements VFS.
|
||||
@@ -19,216 +26,714 @@ var _ vfs.VFS = (*DiskFS)(nil)
|
||||
type DiskFS struct {
|
||||
root string
|
||||
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
mu sync.Mutex
|
||||
sg sync.WaitGroup
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks []sync.Map // Sharded lock pools for better concurrency
|
||||
LRU *lruList
|
||||
timeUpdater *vfs.BatchedTimeUpdate // Batched time updates for better performance
|
||||
}
|
||||
|
||||
// Number of lock shards for reducing contention
|
||||
const numLockShards = 32
|
||||
|
||||
// lruList for time-decayed LRU eviction
|
||||
type lruList struct {
|
||||
list *list.List
|
||||
elem map[string]*list.Element
|
||||
}
|
||||
|
||||
func newLruList() *lruList {
|
||||
return &lruList{
|
||||
list: list.New(),
|
||||
elem: make(map[string]*list.Element),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Add(key string, fi *vfs.FileInfo) {
|
||||
elem := l.list.PushFront(fi)
|
||||
l.elem[key] = elem
|
||||
}
|
||||
|
||||
func (l *lruList) MoveToFront(key string, timeUpdater *vfs.BatchedTimeUpdate) {
|
||||
if elem, exists := l.elem[key]; exists {
|
||||
l.list.MoveToFront(elem)
|
||||
// Update the FileInfo in the element with new access time
|
||||
if fi := elem.Value.(*vfs.FileInfo); fi != nil {
|
||||
fi.UpdateAccessBatched(timeUpdater)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Remove(key string) *vfs.FileInfo {
|
||||
if elem, exists := l.elem[key]; exists {
|
||||
delete(l.elem, key)
|
||||
if fi := l.list.Remove(elem).(*vfs.FileInfo); fi != nil {
|
||||
return fi
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lruList) Len() int {
|
||||
return l.list.Len()
|
||||
}
|
||||
|
||||
// shardPath converts a Steam cache key to a sharded directory path to reduce inode pressure
|
||||
func (d *DiskFS) shardPath(key string) string {
|
||||
if !strings.HasPrefix(key, "steam/") {
|
||||
return key
|
||||
}
|
||||
|
||||
// Extract hash part
|
||||
hashPart := key[6:] // Remove "steam/" prefix
|
||||
|
||||
if len(hashPart) < 4 {
|
||||
// For very short hashes, single level sharding
|
||||
if len(hashPart) >= 2 {
|
||||
shard1 := hashPart[:2]
|
||||
return filepath.Join("steam", shard1, hashPart)
|
||||
}
|
||||
return filepath.Join("steam", hashPart)
|
||||
}
|
||||
|
||||
// Optimal 2-level sharding for Steam hashes (typically 40 chars)
|
||||
shard1 := hashPart[:2] // First 2 chars
|
||||
shard2 := hashPart[2:4] // Next 2 chars
|
||||
return filepath.Join("steam", shard1, shard2, hashPart)
|
||||
}
|
||||
|
||||
// extractKeyFromPath reverses the sharding logic to get the original key from a sharded path
|
||||
func (d *DiskFS) extractKeyFromPath(path string) string {
|
||||
// Fast path: if no slashes, it's not a sharded path
|
||||
if !strings.Contains(path, "/") {
|
||||
return path
|
||||
}
|
||||
|
||||
parts := strings.SplitN(path, "/", 5)
|
||||
numParts := len(parts)
|
||||
|
||||
if numParts >= 4 && parts[0] == "steam" {
|
||||
lastThree := parts[numParts-3:]
|
||||
shard1 := lastThree[0]
|
||||
shard2 := lastThree[1]
|
||||
filename := lastThree[2]
|
||||
|
||||
// Verify sharding is correct
|
||||
if len(filename) >= 4 && filename[:2] == shard1 && filename[2:4] == shard2 {
|
||||
return "steam/" + filename
|
||||
}
|
||||
}
|
||||
|
||||
// Handle single-level sharding for short hashes: steam/shard1/filename
|
||||
if numParts >= 3 && parts[0] == "steam" {
|
||||
lastTwo := parts[numParts-2:]
|
||||
shard1 := lastTwo[0]
|
||||
filename := lastTwo[1]
|
||||
|
||||
if len(filename) >= 2 && filename[:2] == shard1 {
|
||||
return "steam/" + filename
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: return as-is for any unrecognized format
|
||||
return path
|
||||
}
|
||||
|
||||
// New creates a new DiskFS.
|
||||
func new(root string, capacity int64, skipinit bool) *DiskFS {
|
||||
if capacity <= 0 {
|
||||
panic("disk capacity must be greater than 0") // panic if the capacity is less than or equal to 0
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
panic("disk root must not be empty") // panic if the root is empty
|
||||
}
|
||||
|
||||
fi, err := os.Stat(root)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
panic(err) // panic if the error is something other than not found
|
||||
}
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
panic("disk root must be a directory") // panic if the root is not a directory
|
||||
}
|
||||
|
||||
dfs := &DiskFS{
|
||||
root: root,
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
mu: sync.Mutex{},
|
||||
sg: sync.WaitGroup{},
|
||||
}
|
||||
|
||||
os.MkdirAll(dfs.root, 0755)
|
||||
|
||||
if !skipinit {
|
||||
dfs.init()
|
||||
}
|
||||
|
||||
return dfs
|
||||
}
|
||||
|
||||
func New(root string, capacity int64) *DiskFS {
|
||||
return new(root, capacity, false)
|
||||
}
|
||||
|
||||
func NewSkipInit(root string, capacity int64) *DiskFS {
|
||||
return new(root, capacity, true)
|
||||
if capacity <= 0 {
|
||||
panic("disk capacity must be greater than 0")
|
||||
}
|
||||
|
||||
// Create root directory if it doesn't exist
|
||||
os.MkdirAll(root, 0755)
|
||||
|
||||
// Initialize sharded locks
|
||||
keyLocks := make([]sync.Map, numLockShards)
|
||||
|
||||
d := &DiskFS{
|
||||
root: root,
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
size: 0,
|
||||
keyLocks: keyLocks,
|
||||
LRU: newLruList(),
|
||||
timeUpdater: vfs.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
|
||||
}
|
||||
|
||||
d.init()
|
||||
return d
|
||||
}
|
||||
|
||||
// init loads existing files from disk and migrates legacy depot files to sharded structure
|
||||
func (d *DiskFS) init() {
|
||||
// logger.Logger.Info().Str("name", d.Name()).Str("root", d.root).Str("capacity", units.HumanSize(float64(d.capacity))).Msg("init")
|
||||
|
||||
tstart := time.Now()
|
||||
|
||||
d.walk(d.root)
|
||||
d.sg.Wait()
|
||||
var depotFiles []string // Track depot files that need migration
|
||||
|
||||
err := filepath.Walk(d.root, func(npath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
// Extract key from sharded path: remove root and convert sharding back
|
||||
relPath := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
|
||||
|
||||
// Extract the original key from the sharded path
|
||||
k := d.extractKeyFromPath(relPath)
|
||||
|
||||
fi := vfs.NewFileInfoFromOS(info, k)
|
||||
d.info[k] = fi
|
||||
d.LRU.Add(k, fi)
|
||||
// Initialize access time with file modification time
|
||||
fi.UpdateAccessBatched(d.timeUpdater)
|
||||
d.size += info.Size()
|
||||
|
||||
// Track depot files for potential migration
|
||||
if strings.HasPrefix(relPath, "depot/") {
|
||||
depotFiles = append(depotFiles, relPath)
|
||||
}
|
||||
|
||||
d.mu.Unlock()
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Logger.Error().Err(err).Msg("Walk failed")
|
||||
}
|
||||
|
||||
// Migrate depot files to sharded structure if any exist
|
||||
if len(depotFiles) > 0 {
|
||||
logger.Logger.Info().Int("count", len(depotFiles)).Msg("Found legacy depot files, starting migration")
|
||||
d.migrateDepotFiles(depotFiles)
|
||||
}
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("name", d.Name()).
|
||||
Str("root", d.root).
|
||||
Str("capacity", units.HumanSize(float64(d.capacity))).
|
||||
Str("size", units.HumanSize(float64(d.Size()))).
|
||||
Str("files", fmt.Sprint(len(d.info))).
|
||||
Str("duration", time.Since(tstart).String()).
|
||||
Msg("init")
|
||||
}
|
||||
|
||||
func (d *DiskFS) walk(path string) {
|
||||
d.sg.Add(1)
|
||||
go func() {
|
||||
defer d.sg.Done()
|
||||
filepath.Walk(path, func(npath string, info os.FileInfo, err error) error {
|
||||
if path == npath {
|
||||
return nil
|
||||
}
|
||||
// migrateDepotFiles moves legacy depot files to the sharded steam structure
|
||||
func (d *DiskFS) migrateDepotFiles(depotFiles []string) {
|
||||
migratedCount := 0
|
||||
errorCount := 0
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, relPath := range depotFiles {
|
||||
// Extract the steam key from the depot path
|
||||
steamKey := d.extractKeyFromPath(relPath)
|
||||
if !strings.HasPrefix(steamKey, "steam/") {
|
||||
// Skip if we can't extract a proper steam key
|
||||
errorCount++
|
||||
continue
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
d.walk(npath)
|
||||
return filepath.SkipDir
|
||||
}
|
||||
// Get the source and destination paths
|
||||
sourcePath := filepath.Join(d.root, relPath)
|
||||
shardedPath := d.shardPath(steamKey)
|
||||
destPath := filepath.Join(d.root, shardedPath)
|
||||
|
||||
d.mu.Lock()
|
||||
k := npath[len(d.root)+1:]
|
||||
d.info[k] = vfs.NewFileInfoFromOS(info, k)
|
||||
d.mu.Unlock()
|
||||
// Create destination directory
|
||||
destDir := filepath.Dir(destPath)
|
||||
if err := os.MkdirAll(destDir, 0755); err != nil {
|
||||
logger.Logger.Error().Err(err).Str("path", destDir).Msg("Failed to create migration destination directory")
|
||||
errorCount++
|
||||
continue
|
||||
}
|
||||
|
||||
// logger.Logger.Debug().Str("name", d.Name()).Str("root", d.root).Str("capacity", units.HumanSize(float64(d.capacity))).Str("path", npath).Msg("init")
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
// Move the file
|
||||
if err := os.Rename(sourcePath, destPath); err != nil {
|
||||
logger.Logger.Error().Err(err).Str("from", sourcePath).Str("to", destPath).Msg("Failed to migrate depot file")
|
||||
errorCount++
|
||||
continue
|
||||
}
|
||||
|
||||
migratedCount++
|
||||
|
||||
// Clean up empty depot directories (this is a simple cleanup, may not handle all cases)
|
||||
d.cleanupEmptyDepotDirs(filepath.Dir(sourcePath))
|
||||
}
|
||||
|
||||
logger.Logger.Info().
|
||||
Int("migrated", migratedCount).
|
||||
Int("errors", errorCount).
|
||||
Msg("Depot file migration completed")
|
||||
}
|
||||
|
||||
func (d *DiskFS) Capacity() int64 {
|
||||
return d.capacity
|
||||
// cleanupEmptyDepotDirs removes empty depot directories after migration
|
||||
func (d *DiskFS) cleanupEmptyDepotDirs(dirPath string) {
|
||||
for dirPath != d.root && strings.HasPrefix(dirPath, filepath.Join(d.root, "depot")) {
|
||||
entries, err := os.ReadDir(dirPath)
|
||||
if err != nil || len(entries) > 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Directory is empty, remove it
|
||||
if err := os.Remove(dirPath); err != nil {
|
||||
logger.Logger.Error().Err(err).Str("dir", dirPath).Msg("Failed to remove empty depot directory")
|
||||
break
|
||||
}
|
||||
|
||||
// Move up to parent directory
|
||||
dirPath = filepath.Dir(dirPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of this VFS
|
||||
func (d *DiskFS) Name() string {
|
||||
return "DiskFS"
|
||||
}
|
||||
|
||||
// Size returns the current size
|
||||
func (d *DiskFS) Size() int64 {
|
||||
var size int64
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
for _, v := range d.info {
|
||||
size += v.Size()
|
||||
}
|
||||
return size
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
return d.size
|
||||
}
|
||||
|
||||
func (d *DiskFS) Set(key string, src []byte) error {
|
||||
if d.capacity > 0 {
|
||||
if size := d.Size() + int64(len(src)); size > d.capacity {
|
||||
return vfserror.ErrDiskFull
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := d.Stat(key); err == nil {
|
||||
d.Delete(key)
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
os.MkdirAll(filepath.Join(d.root, filepath.Dir(key)), 0755)
|
||||
if err := os.WriteFile(filepath.Join(d.root, key), src, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fi, err := os.Stat(filepath.Join(d.root, key))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
d.info[key] = vfs.NewFileInfoFromOS(fi, key)
|
||||
|
||||
return nil
|
||||
// Capacity returns the maximum capacity
|
||||
func (d *DiskFS) Capacity() int64 {
|
||||
return d.capacity
|
||||
}
|
||||
|
||||
// Delete deletes the value of key.
|
||||
func (d *DiskFS) Delete(key string) error {
|
||||
_, err := d.Stat(key)
|
||||
if err != nil {
|
||||
return err
|
||||
// getShardIndex returns the shard index for a given key
|
||||
func getShardIndex(key string) int {
|
||||
// Use FNV-1a hash for good distribution
|
||||
var h uint32 = 2166136261 // FNV offset basis
|
||||
for i := 0; i < len(key); i++ {
|
||||
h ^= uint32(key[i])
|
||||
h *= 16777619 // FNV prime
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
delete(d.info, key)
|
||||
if err := os.Remove(filepath.Join(d.root, key)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return int(h % numLockShards)
|
||||
}
|
||||
|
||||
// Get gets the value of key and returns it.
|
||||
func (d *DiskFS) Get(key string) ([]byte, error) {
|
||||
_, err := d.Stat(key)
|
||||
// getKeyLock returns a lock for the given key using sharding
|
||||
func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
|
||||
shardIndex := getShardIndex(key)
|
||||
shard := &d.keyLocks[shardIndex]
|
||||
|
||||
keyLock, _ := shard.LoadOrStore(key, &sync.RWMutex{})
|
||||
return keyLock.(*sync.RWMutex)
|
||||
}
|
||||
|
||||
// Create creates a new file
|
||||
func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
key = strings.ReplaceAll(key, "\\", "/")
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
d.mu.Lock()
|
||||
// Check if file already exists and handle overwrite
|
||||
if fi, exists := d.info[key]; exists {
|
||||
d.size -= fi.Size
|
||||
d.LRU.Remove(key)
|
||||
delete(d.info, key)
|
||||
}
|
||||
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
d.mu.Unlock()
|
||||
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fi := vfs.NewFileInfo(key, size)
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.info[key] = fi
|
||||
d.LRU.Add(key, fi)
|
||||
// Initialize access time with current time
|
||||
fi.UpdateAccessBatched(d.timeUpdater)
|
||||
d.size += size
|
||||
d.mu.Unlock()
|
||||
|
||||
data, err := os.ReadFile(filepath.Join(d.root, key))
|
||||
return &diskWriteCloser{
|
||||
file: file,
|
||||
disk: d,
|
||||
key: key,
|
||||
declaredSize: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// diskWriteCloser implements io.WriteCloser for disk files with size adjustment
|
||||
type diskWriteCloser struct {
|
||||
file *os.File
|
||||
disk *DiskFS
|
||||
key string
|
||||
declaredSize int64
|
||||
}
|
||||
|
||||
func (dwc *diskWriteCloser) Write(p []byte) (n int, err error) {
|
||||
return dwc.file.Write(p)
|
||||
}
|
||||
|
||||
func (dwc *diskWriteCloser) Close() error {
|
||||
// Get the actual file size
|
||||
stat, err := dwc.file.Stat()
|
||||
if err != nil {
|
||||
dwc.file.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
actualSize := stat.Size()
|
||||
|
||||
// Update the size in FileInfo if it differs from declared size
|
||||
dwc.disk.mu.Lock()
|
||||
if fi, exists := dwc.disk.info[dwc.key]; exists {
|
||||
sizeDiff := actualSize - fi.Size
|
||||
fi.Size = actualSize
|
||||
dwc.disk.size += sizeDiff
|
||||
}
|
||||
dwc.disk.mu.Unlock()
|
||||
|
||||
return dwc.file.Close()
|
||||
}
|
||||
|
||||
// Open opens a file for reading
|
||||
func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
key = strings.ReplaceAll(key, "\\", "/")
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
d.mu.Lock()
|
||||
fi, exists := d.info[key]
|
||||
if !exists {
|
||||
d.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
fi.UpdateAccessBatched(d.timeUpdater)
|
||||
d.LRU.MoveToFront(key, d.timeUpdater)
|
||||
d.mu.Unlock()
|
||||
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
// Use memory mapping for large files (>1MB) to improve performance
|
||||
const mmapThreshold = 1024 * 1024 // 1MB
|
||||
if fi.Size > mmapThreshold {
|
||||
// Close the regular file handle
|
||||
file.Close()
|
||||
|
||||
// Stat returns the FileInfo of key. If key is not found in the cache, it will stat the file on disk. If the file is not found on disk, it will return vfs.ErrNotFound.
|
||||
func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
d.mu.Lock()
|
||||
fi, ok := d.info[key]
|
||||
d.mu.Unlock() // unlock before statting the file
|
||||
|
||||
if !ok {
|
||||
fii, err := os.Stat(filepath.Join(d.root, key))
|
||||
// Try memory mapping
|
||||
mmapFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, vfserror.ErrNotFound
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d.mu.Lock() // relock to update the info map
|
||||
defer d.mu.Unlock() // nothing else needs to unlock before returning
|
||||
mapped, err := mmap.Map(mmapFile, mmap.RDONLY, 0)
|
||||
if err != nil {
|
||||
mmapFile.Close()
|
||||
// Fallback to regular file reading
|
||||
return os.Open(path)
|
||||
}
|
||||
|
||||
d.info[key] = vfs.NewFileInfoFromOS(fii, key)
|
||||
fi = d.info[key]
|
||||
// fallthrough to return fi with shiny new info
|
||||
return &mmapReadCloser{
|
||||
data: mapped,
|
||||
file: mmapFile,
|
||||
offset: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return fi, nil
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (m *DiskFS) StatAll() []*vfs.FileInfo {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
// mmapReadCloser implements io.ReadCloser for memory-mapped files
|
||||
type mmapReadCloser struct {
|
||||
data mmap.MMap
|
||||
file *os.File
|
||||
offset int
|
||||
}
|
||||
|
||||
// hard copy the file info to prevent modification of the original file info or the other way around
|
||||
files := make([]*vfs.FileInfo, 0, len(m.info))
|
||||
for _, v := range m.info {
|
||||
fi := *v
|
||||
files = append(files, &fi)
|
||||
func (m *mmapReadCloser) Read(p []byte) (n int, err error) {
|
||||
if m.offset >= len(m.data) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
return files
|
||||
n = copy(p, m.data[m.offset:])
|
||||
m.offset += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (m *mmapReadCloser) Close() error {
|
||||
m.data.Unmap()
|
||||
return m.file.Close()
|
||||
}
|
||||
|
||||
// Delete removes a file
|
||||
func (d *DiskFS) Delete(key string) error {
|
||||
if key == "" {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
d.mu.Lock()
|
||||
fi, exists := d.info[key]
|
||||
if !exists {
|
||||
d.mu.Unlock()
|
||||
return vfserror.ErrNotFound
|
||||
}
|
||||
d.size -= fi.Size
|
||||
d.LRU.Remove(key)
|
||||
delete(d.info, key)
|
||||
d.mu.Unlock()
|
||||
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
err := os.Remove(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat returns file information
|
||||
func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
if fi, ok := d.info[key]; ok {
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
// Check if file exists on disk but wasn't indexed (for migration)
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
if info, err := os.Stat(path); err == nil {
|
||||
// File exists in sharded location but not indexed, re-index it
|
||||
fi := vfs.NewFileInfoFromOS(info, key)
|
||||
// We can't modify the map here because we're in a read lock
|
||||
// This is a simplified version - in production you'd need to handle this properly
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
// EvictLRU evicts the least recently used files to free up space
|
||||
func (d *DiskFS) EvictLRU(bytesNeeded uint) uint {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
|
||||
// Evict from LRU list until we free enough space
|
||||
for d.size > d.capacity-int64(bytesNeeded) && d.LRU.Len() > 0 {
|
||||
// Get the least recently used item
|
||||
elem := d.LRU.list.Back()
|
||||
if elem == nil {
|
||||
break
|
||||
}
|
||||
|
||||
fi := elem.Value.(*vfs.FileInfo)
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
d.LRU.Remove(key)
|
||||
|
||||
// Remove from map
|
||||
delete(d.info, key)
|
||||
|
||||
// Remove file from disk
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
if err := os.Remove(path); err != nil {
|
||||
// Log error but continue
|
||||
continue
|
||||
}
|
||||
|
||||
// Update size
|
||||
d.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
d.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictBySize evicts files by size (ascending = smallest first, descending = largest first)
|
||||
func (d *DiskFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*vfs.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range d.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by size
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
if ascending {
|
||||
return candidates[i].Size < candidates[j].Size
|
||||
}
|
||||
return candidates[i].Size > candidates[j].Size
|
||||
})
|
||||
|
||||
// Evict files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if d.size <= d.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
d.LRU.Remove(key)
|
||||
|
||||
// Remove from map
|
||||
delete(d.info, key)
|
||||
|
||||
// Remove file from disk
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
if err := os.Remove(path); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update size
|
||||
d.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
d.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictFIFO evicts files using FIFO (oldest creation time first)
|
||||
func (d *DiskFS) EvictFIFO(bytesNeeded uint) uint {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*vfs.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range d.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by creation time (oldest first)
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
return candidates[i].CTime.Before(candidates[j].CTime)
|
||||
})
|
||||
|
||||
// Evict oldest files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if d.size <= d.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
d.LRU.Remove(key)
|
||||
|
||||
// Remove from map
|
||||
delete(d.info, key)
|
||||
|
||||
// Remove file from disk
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
if err := os.Remove(path); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update size
|
||||
d.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
d.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
package disk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAllDisk(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := NewSkipInit(t.TempDir(), 1024)
|
||||
if err := m.Set("key", []byte("value")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
|
||||
if err := m.Set("key", []byte("value1")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
|
||||
if d, err := m.Get("key"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value1" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
||||
}
|
||||
|
||||
if err := m.Delete("key"); err != nil {
|
||||
t.Errorf("Delete failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := m.Get("key"); err == nil {
|
||||
t.Errorf("Get failed: got nil, want %v", vfserror.ErrNotFound)
|
||||
}
|
||||
|
||||
if err := m.Delete("key"); err == nil {
|
||||
t.Errorf("Delete failed: got nil, want %v", vfserror.ErrNotFound)
|
||||
}
|
||||
|
||||
if _, err := m.Stat("key"); err == nil {
|
||||
t.Errorf("Stat failed: got nil, want %v", vfserror.ErrNotFound)
|
||||
}
|
||||
|
||||
if err := m.Set("key", []byte("value")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := m.Stat("key"); err != nil {
|
||||
t.Errorf("Stat failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLimited(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := NewSkipInit(t.TempDir(), 10)
|
||||
for i := 0; i < 11; i++ {
|
||||
if err := m.Set(fmt.Sprintf("key%d", i), []byte("1")); err != nil && i < 10 {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
} else if i == 10 && err == nil {
|
||||
t.Errorf("Set succeeded: got nil, want %v", vfserror.ErrDiskFull)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
td := t.TempDir()
|
||||
|
||||
path := filepath.Join(td, "test", "key")
|
||||
|
||||
os.MkdirAll(filepath.Dir(path), 0755)
|
||||
|
||||
os.WriteFile(path, []byte("value"), 0644)
|
||||
|
||||
m := New(td, 10)
|
||||
if _, err := m.Get("test/key"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
}
|
||||
|
||||
s, _ := m.Stat("test/key")
|
||||
if s.Name() != "test/key" {
|
||||
t.Errorf("Stat failed: got %s, want %s", s.Name(), "key")
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
type FileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
MTime time.Time
|
||||
ATime time.Time
|
||||
}
|
||||
|
||||
func NewFileInfo(name string, size int64, modTime time.Time) *FileInfo {
|
||||
return &FileInfo{
|
||||
name: name,
|
||||
size: size,
|
||||
MTime: modTime,
|
||||
ATime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func NewFileInfoFromOS(f os.FileInfo, key string) *FileInfo {
|
||||
return &FileInfo{
|
||||
name: key,
|
||||
size: f.Size(),
|
||||
MTime: f.ModTime(),
|
||||
ATime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (f FileInfo) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f FileInfo) Size() int64 {
|
||||
return f.size
|
||||
}
|
||||
|
||||
func (f FileInfo) ModTime() time.Time {
|
||||
return f.MTime
|
||||
}
|
||||
|
||||
func (f FileInfo) AccessTime() time.Time {
|
||||
return f.ATime
|
||||
}
|
||||
288
vfs/gc/gc.go
288
vfs/gc/gc.go
@@ -1,86 +1,240 @@
|
||||
// vfs/gc/gc.go
|
||||
package gc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"sync"
|
||||
"time"
|
||||
"s1d3sw1ped/SteamCache2/vfs/disk"
|
||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||
)
|
||||
|
||||
// Ensure GCFS implements VFS.
|
||||
var _ vfs.VFS = (*GCFS)(nil)
|
||||
// GCAlgorithm represents different garbage collection strategies
|
||||
type GCAlgorithm string
|
||||
|
||||
// GCFS is a virtual file system that calls a GC handler when the disk is full. The GC handler is responsible for freeing up space on the disk. The GCFS is a wrapper around another VFS.
|
||||
const (
|
||||
LRU GCAlgorithm = "lru"
|
||||
LFU GCAlgorithm = "lfu"
|
||||
FIFO GCAlgorithm = "fifo"
|
||||
Largest GCAlgorithm = "largest"
|
||||
Smallest GCAlgorithm = "smallest"
|
||||
Hybrid GCAlgorithm = "hybrid"
|
||||
)
|
||||
|
||||
// GCFS wraps a VFS with garbage collection capabilities
|
||||
type GCFS struct {
|
||||
vfs.VFS
|
||||
multiplier int
|
||||
|
||||
// protected by mu
|
||||
gcHanderFunc GCHandlerFunc
|
||||
lifetimeBytes, lifetimeFiles uint
|
||||
reclaimedBytes, deletedFiles uint
|
||||
gcTime time.Duration
|
||||
mu sync.Mutex
|
||||
vfs vfs.VFS
|
||||
algorithm GCAlgorithm
|
||||
gcFunc func(vfs.VFS, uint) uint
|
||||
}
|
||||
|
||||
// GCHandlerFunc is a function that is called when the disk is full and the GCFS needs to free up space. It is passed the VFS and the size of the file that needs to be written. Its up to the implementation to free up space. How much space is freed is also up to the implementation.
|
||||
type GCHandlerFunc func(vfs vfs.VFS, size uint) (reclaimedBytes uint, deletedFiles uint)
|
||||
|
||||
func New(vfs vfs.VFS, multiplier int, gcHandlerFunc GCHandlerFunc) *GCFS {
|
||||
if multiplier <= 0 {
|
||||
multiplier = 1 // if the multiplier is less than or equal to 0 set it to 1 will be slow but the user can set it to a higher value if they want
|
||||
// New creates a new GCFS with the specified algorithm
|
||||
func New(wrappedVFS vfs.VFS, algorithm GCAlgorithm) *GCFS {
|
||||
gcfs := &GCFS{
|
||||
vfs: wrappedVFS,
|
||||
algorithm: algorithm,
|
||||
}
|
||||
return &GCFS{
|
||||
VFS: vfs,
|
||||
multiplier: multiplier,
|
||||
gcHanderFunc: gcHandlerFunc,
|
||||
|
||||
switch algorithm {
|
||||
case LRU:
|
||||
gcfs.gcFunc = gcLRU
|
||||
case LFU:
|
||||
gcfs.gcFunc = gcLFU
|
||||
case FIFO:
|
||||
gcfs.gcFunc = gcFIFO
|
||||
case Largest:
|
||||
gcfs.gcFunc = gcLargest
|
||||
case Smallest:
|
||||
gcfs.gcFunc = gcSmallest
|
||||
case Hybrid:
|
||||
gcfs.gcFunc = gcHybrid
|
||||
default:
|
||||
// Default to LRU
|
||||
gcfs.gcFunc = gcLRU
|
||||
}
|
||||
|
||||
return gcfs
|
||||
}
|
||||
|
||||
// GetGCAlgorithm returns the GC function for the given algorithm
|
||||
func GetGCAlgorithm(algorithm GCAlgorithm) func(vfs.VFS, uint) uint {
|
||||
switch algorithm {
|
||||
case LRU:
|
||||
return gcLRU
|
||||
case LFU:
|
||||
return gcLFU
|
||||
case FIFO:
|
||||
return gcFIFO
|
||||
case Largest:
|
||||
return gcLargest
|
||||
case Smallest:
|
||||
return gcSmallest
|
||||
case Hybrid:
|
||||
return gcHybrid
|
||||
default:
|
||||
return gcLRU
|
||||
}
|
||||
}
|
||||
|
||||
// Stats returns the lifetime bytes, lifetime files, reclaimed bytes and deleted files.
|
||||
// The lifetime bytes and lifetime files are the total bytes and files that have been freed up by the GC handler.
|
||||
// The reclaimed bytes and deleted files are the bytes and files that have been freed up by the GC handler since last call to Stats.
|
||||
// The gc time is the total time spent in the GC handler since last call to Stats.
|
||||
// The reclaimed bytes and deleted files and gc time are reset to 0 after the call to Stats.
|
||||
func (g *GCFS) Stats() (lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles uint, gcTime time.Duration) {
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
|
||||
g.lifetimeBytes += g.reclaimedBytes
|
||||
g.lifetimeFiles += g.deletedFiles
|
||||
|
||||
lifetimeBytes = g.lifetimeBytes
|
||||
lifetimeFiles = g.lifetimeFiles
|
||||
reclaimedBytes = g.reclaimedBytes
|
||||
deletedFiles = g.deletedFiles
|
||||
gcTime = g.gcTime
|
||||
|
||||
g.reclaimedBytes = 0
|
||||
g.deletedFiles = 0
|
||||
g.gcTime = time.Duration(0)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Set overrides the Set method of the VFS interface. It tries to set the key and src, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error.
|
||||
func (g *GCFS) Set(key string, src []byte) error {
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
err := g.VFS.Set(key, src) // try to set the key and src
|
||||
|
||||
if err == vfserror.ErrDiskFull && g.gcHanderFunc != nil { // if the error is disk full and there is a GC handler
|
||||
tstart := time.Now()
|
||||
reclaimedBytes, deletedFiles := g.gcHanderFunc(g.VFS, uint(len(src)*g.multiplier)) // call the GC handler
|
||||
g.gcTime += time.Since(tstart)
|
||||
g.reclaimedBytes += reclaimedBytes
|
||||
g.deletedFiles += deletedFiles
|
||||
err = g.VFS.Set(key, src) // try again after GC if it still fails return the error
|
||||
// Create wraps the underlying Create method
|
||||
func (gc *GCFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
// Check if we need to GC before creating
|
||||
if gc.vfs.Size()+size > gc.vfs.Capacity() {
|
||||
needed := uint((gc.vfs.Size() + size) - gc.vfs.Capacity())
|
||||
gc.gcFunc(gc.vfs, needed)
|
||||
}
|
||||
|
||||
return err
|
||||
return gc.vfs.Create(key, size)
|
||||
}
|
||||
|
||||
func (g *GCFS) Name() string {
|
||||
return fmt.Sprintf("GCFS(%s)", g.VFS.Name()) // wrap the name of the VFS with GCFS so we can see that its a GCFS
|
||||
// Open wraps the underlying Open method
|
||||
func (gc *GCFS) Open(key string) (io.ReadCloser, error) {
|
||||
return gc.vfs.Open(key)
|
||||
}
|
||||
|
||||
// Delete wraps the underlying Delete method
|
||||
func (gc *GCFS) Delete(key string) error {
|
||||
return gc.vfs.Delete(key)
|
||||
}
|
||||
|
||||
// Stat wraps the underlying Stat method
|
||||
func (gc *GCFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
return gc.vfs.Stat(key)
|
||||
}
|
||||
|
||||
// Name wraps the underlying Name method
|
||||
func (gc *GCFS) Name() string {
|
||||
return gc.vfs.Name() + "(GC:" + string(gc.algorithm) + ")"
|
||||
}
|
||||
|
||||
// Size wraps the underlying Size method
|
||||
func (gc *GCFS) Size() int64 {
|
||||
return gc.vfs.Size()
|
||||
}
|
||||
|
||||
// Capacity wraps the underlying Capacity method
|
||||
func (gc *GCFS) Capacity() int64 {
|
||||
return gc.vfs.Capacity()
|
||||
}
|
||||
|
||||
// EvictionStrategy defines an interface for cache eviction
|
||||
type EvictionStrategy interface {
|
||||
Evict(vfs vfs.VFS, bytesNeeded uint) uint
|
||||
}
|
||||
|
||||
// GC functions
|
||||
|
||||
// gcLRU implements Least Recently Used eviction
|
||||
func gcLRU(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return evictLRU(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// gcLFU implements Least Frequently Used eviction
|
||||
func gcLFU(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return evictLFU(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// gcFIFO implements First In First Out eviction
|
||||
func gcFIFO(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return evictFIFO(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// gcLargest implements largest file first eviction
|
||||
func gcLargest(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return evictLargest(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// gcSmallest implements smallest file first eviction
|
||||
func gcSmallest(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return evictSmallest(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// gcHybrid implements a hybrid eviction strategy
|
||||
func gcHybrid(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return evictHybrid(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// evictLRU performs LRU eviction by removing least recently used files
|
||||
func evictLRU(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// Try to use specific eviction methods if available
|
||||
switch fs := v.(type) {
|
||||
case *memory.MemoryFS:
|
||||
return fs.EvictLRU(bytesNeeded)
|
||||
case *disk.DiskFS:
|
||||
return fs.EvictLRU(bytesNeeded)
|
||||
default:
|
||||
// No fallback - return 0 (no eviction performed)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// evictLFU performs LFU (Least Frequently Used) eviction
|
||||
func evictLFU(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// For now, fall back to size-based eviction
|
||||
// TODO: Implement proper LFU tracking
|
||||
return evictBySize(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// evictFIFO performs FIFO (First In First Out) eviction
|
||||
func evictFIFO(v vfs.VFS, bytesNeeded uint) uint {
|
||||
switch fs := v.(type) {
|
||||
case *memory.MemoryFS:
|
||||
return fs.EvictFIFO(bytesNeeded)
|
||||
case *disk.DiskFS:
|
||||
return fs.EvictFIFO(bytesNeeded)
|
||||
default:
|
||||
// No fallback - return 0 (no eviction performed)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// evictLargest evicts largest files first
|
||||
func evictLargest(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return evictBySizeDesc(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// evictSmallest evicts smallest files first
|
||||
func evictSmallest(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return evictBySizeAsc(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// evictBySize evicts files based on size (smallest first)
|
||||
func evictBySize(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return evictBySizeAsc(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// evictBySizeAsc evicts smallest files first
|
||||
func evictBySizeAsc(v vfs.VFS, bytesNeeded uint) uint {
|
||||
switch fs := v.(type) {
|
||||
case *memory.MemoryFS:
|
||||
return fs.EvictBySize(bytesNeeded, true) // true = ascending (smallest first)
|
||||
case *disk.DiskFS:
|
||||
return fs.EvictBySize(bytesNeeded, true) // true = ascending (smallest first)
|
||||
default:
|
||||
// No fallback - return 0 (no eviction performed)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// evictBySizeDesc evicts largest files first
|
||||
func evictBySizeDesc(v vfs.VFS, bytesNeeded uint) uint {
|
||||
switch fs := v.(type) {
|
||||
case *memory.MemoryFS:
|
||||
return fs.EvictBySize(bytesNeeded, false) // false = descending (largest first)
|
||||
case *disk.DiskFS:
|
||||
return fs.EvictBySize(bytesNeeded, false) // false = descending (largest first)
|
||||
default:
|
||||
// No fallback - return 0 (no eviction performed)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// evictHybrid implements a hybrid eviction strategy
|
||||
func evictHybrid(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// Use LRU as primary strategy, but consider size as tiebreaker
|
||||
return evictLRU(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// AdaptivePromotionDeciderFunc is a placeholder for the adaptive promotion logic
|
||||
var AdaptivePromotionDeciderFunc = func() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,105 +0,0 @@
|
||||
package gc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/exp/rand"
|
||||
)
|
||||
|
||||
func TestGCSmallRandom(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := memory.New(1024 * 1024 * 16)
|
||||
gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) {
|
||||
deletions := 0
|
||||
var reclaimed uint
|
||||
|
||||
t.Logf("GC starting to reclaim %d bytes", size)
|
||||
|
||||
stats := vfs.StatAll()
|
||||
sort.Slice(stats, func(i, j int) bool {
|
||||
// Sort by access time so we can remove the oldest files first.
|
||||
return stats[i].AccessTime().Before(stats[j].AccessTime())
|
||||
})
|
||||
|
||||
// Delete the oldest files until we've reclaimed enough space.
|
||||
for _, s := range stats {
|
||||
sz := uint(s.Size()) // Get the size of the file
|
||||
err := vfs.Delete(s.Name())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
reclaimed += sz // Track how much space we've reclaimed
|
||||
deletions++ // Track how many files we've deleted
|
||||
|
||||
// t.Logf("GC deleting %s, %v", s.Name(), s.AccessTime().Format(time.RFC3339Nano))
|
||||
|
||||
if reclaimed >= size { // We've reclaimed enough space
|
||||
break
|
||||
}
|
||||
}
|
||||
return uint(reclaimed), uint(deletions)
|
||||
})
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024*1, 1024*4)); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if gc.Size() > 1024*1024*16 {
|
||||
t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func genRandomData(min int, max int) []byte {
|
||||
data := make([]byte, rand.Intn(max-min)+min)
|
||||
rand.Read(data)
|
||||
return data
|
||||
}
|
||||
|
||||
func TestGCLargeRandom(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := memory.New(1024 * 1024 * 16) // 16MB
|
||||
gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) {
|
||||
deletions := 0
|
||||
var reclaimed uint
|
||||
|
||||
t.Logf("GC starting to reclaim %d bytes", size)
|
||||
|
||||
stats := vfs.StatAll()
|
||||
sort.Slice(stats, func(i, j int) bool {
|
||||
// Sort by access time so we can remove the oldest files first.
|
||||
return stats[i].AccessTime().Before(stats[j].AccessTime())
|
||||
})
|
||||
|
||||
// Delete the oldest files until we've reclaimed enough space.
|
||||
for _, s := range stats {
|
||||
sz := uint(s.Size()) // Get the size of the file
|
||||
vfs.Delete(s.Name())
|
||||
reclaimed += sz // Track how much space we've reclaimed
|
||||
deletions++ // Track how many files we've deleted
|
||||
|
||||
if reclaimed >= size { // We've reclaimed enough space
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return uint(reclaimed), uint(deletions)
|
||||
})
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024, 1024*1024)); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if gc.Size() > 1024*1024*16 {
|
||||
t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,14 @@
|
||||
// vfs/memory/memory.go
|
||||
package memory
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"container/list"
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -10,128 +16,428 @@ import (
|
||||
// Ensure MemoryFS implements VFS.
|
||||
var _ vfs.VFS = (*MemoryFS)(nil)
|
||||
|
||||
// file represents a file in memory.
|
||||
type file struct {
|
||||
fileinfo *vfs.FileInfo
|
||||
data []byte
|
||||
}
|
||||
|
||||
// MemoryFS is a virtual file system that stores files in memory.
|
||||
// MemoryFS is an in-memory virtual file system
|
||||
type MemoryFS struct {
|
||||
files map[string]*file
|
||||
capacity int64
|
||||
mu sync.Mutex
|
||||
data map[string]*bytes.Buffer
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks []sync.Map // Sharded lock pools for better concurrency
|
||||
LRU *lruList
|
||||
timeUpdater *vfs.BatchedTimeUpdate // Batched time updates for better performance
|
||||
}
|
||||
|
||||
// New creates a new MemoryFS.
|
||||
// Number of lock shards for reducing contention
|
||||
const numLockShards = 32
|
||||
|
||||
// lruList for time-decayed LRU eviction
|
||||
type lruList struct {
|
||||
list *list.List
|
||||
elem map[string]*list.Element
|
||||
}
|
||||
|
||||
func newLruList() *lruList {
|
||||
return &lruList{
|
||||
list: list.New(),
|
||||
elem: make(map[string]*list.Element),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Add(key string, fi *vfs.FileInfo) {
|
||||
elem := l.list.PushFront(fi)
|
||||
l.elem[key] = elem
|
||||
}
|
||||
|
||||
func (l *lruList) MoveToFront(key string, timeUpdater *vfs.BatchedTimeUpdate) {
|
||||
if elem, exists := l.elem[key]; exists {
|
||||
l.list.MoveToFront(elem)
|
||||
// Update the FileInfo in the element with new access time
|
||||
if fi := elem.Value.(*vfs.FileInfo); fi != nil {
|
||||
fi.UpdateAccessBatched(timeUpdater)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Remove(key string) *vfs.FileInfo {
|
||||
if elem, exists := l.elem[key]; exists {
|
||||
delete(l.elem, key)
|
||||
if fi := l.list.Remove(elem).(*vfs.FileInfo); fi != nil {
|
||||
return fi
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lruList) Len() int {
|
||||
return l.list.Len()
|
||||
}
|
||||
|
||||
// New creates a new MemoryFS
|
||||
func New(capacity int64) *MemoryFS {
|
||||
if capacity <= 0 {
|
||||
panic("memory capacity must be greater than 0") // panic if the capacity is less than or equal to 0
|
||||
panic("memory capacity must be greater than 0")
|
||||
}
|
||||
|
||||
// Initialize sharded locks
|
||||
keyLocks := make([]sync.Map, numLockShards)
|
||||
|
||||
return &MemoryFS{
|
||||
files: make(map[string]*file),
|
||||
capacity: capacity,
|
||||
mu: sync.Mutex{},
|
||||
data: make(map[string]*bytes.Buffer),
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
size: 0,
|
||||
keyLocks: keyLocks,
|
||||
LRU: newLruList(),
|
||||
timeUpdater: vfs.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Capacity() int64 {
|
||||
return m.capacity
|
||||
}
|
||||
|
||||
// Name returns the name of this VFS
|
||||
func (m *MemoryFS) Name() string {
|
||||
return "MemoryFS"
|
||||
}
|
||||
|
||||
// Size returns the current size
|
||||
func (m *MemoryFS) Size() int64 {
|
||||
var size int64
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
for _, v := range m.files {
|
||||
size += int64(len(v.data))
|
||||
}
|
||||
|
||||
return size
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.size
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Set(key string, src []byte) error {
|
||||
if m.capacity > 0 {
|
||||
if size := m.Size() + int64(len(src)); size > m.capacity {
|
||||
return vfserror.ErrDiskFull
|
||||
}
|
||||
// Capacity returns the maximum capacity
|
||||
func (m *MemoryFS) Capacity() int64 {
|
||||
return m.capacity
|
||||
}
|
||||
|
||||
// getShardIndex returns the shard index for a given key
|
||||
func getShardIndex(key string) int {
|
||||
// Use FNV-1a hash for good distribution
|
||||
var h uint32 = 2166136261 // FNV offset basis
|
||||
for i := 0; i < len(key); i++ {
|
||||
h ^= uint32(key[i])
|
||||
h *= 16777619 // FNV prime
|
||||
}
|
||||
return int(h % numLockShards)
|
||||
}
|
||||
|
||||
// getKeyLock returns a lock for the given key using sharding
|
||||
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
|
||||
shardIndex := getShardIndex(key)
|
||||
shard := &m.keyLocks[shardIndex]
|
||||
|
||||
keyLock, _ := shard.LoadOrStore(key, &sync.RWMutex{})
|
||||
return keyLock.(*sync.RWMutex)
|
||||
}
|
||||
|
||||
// Create creates a new file
|
||||
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
// Sanitize key to prevent path traversal
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.files[key] = &file{
|
||||
fileinfo: vfs.NewFileInfo(
|
||||
key,
|
||||
int64(len(src)),
|
||||
time.Now(),
|
||||
),
|
||||
data: make([]byte, len(src)),
|
||||
// Check if file already exists and handle overwrite
|
||||
if fi, exists := m.info[key]; exists {
|
||||
m.size -= fi.Size
|
||||
m.LRU.Remove(key)
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
}
|
||||
copy(m.files[key].data, src)
|
||||
|
||||
buffer := &bytes.Buffer{}
|
||||
m.data[key] = buffer
|
||||
fi := vfs.NewFileInfo(key, size)
|
||||
m.info[key] = fi
|
||||
m.LRU.Add(key, fi)
|
||||
// Initialize access time with current time
|
||||
fi.UpdateAccessBatched(m.timeUpdater)
|
||||
m.size += size
|
||||
m.mu.Unlock()
|
||||
|
||||
return &memoryWriteCloser{
|
||||
buffer: buffer,
|
||||
memory: m,
|
||||
key: key,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// memoryWriteCloser implements io.WriteCloser for memory files
|
||||
type memoryWriteCloser struct {
|
||||
buffer *bytes.Buffer
|
||||
memory *MemoryFS
|
||||
key string
|
||||
}
|
||||
|
||||
func (mwc *memoryWriteCloser) Write(p []byte) (n int, err error) {
|
||||
return mwc.buffer.Write(p)
|
||||
}
|
||||
|
||||
func (mwc *memoryWriteCloser) Close() error {
|
||||
// Update the actual size in FileInfo
|
||||
mwc.memory.mu.Lock()
|
||||
if fi, exists := mwc.memory.info[mwc.key]; exists {
|
||||
actualSize := int64(mwc.buffer.Len())
|
||||
sizeDiff := actualSize - fi.Size
|
||||
fi.Size = actualSize
|
||||
mwc.memory.size += sizeDiff
|
||||
}
|
||||
mwc.memory.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Delete(key string) error {
|
||||
_, err := m.Stat(key)
|
||||
if err != nil {
|
||||
return err
|
||||
// Open opens a file for reading
|
||||
func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
delete(m.files, key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Get(key string) ([]byte, error) {
|
||||
_, err := m.Stat(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
fi, exists := m.info[key]
|
||||
if !exists {
|
||||
m.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
fi.UpdateAccessBatched(m.timeUpdater)
|
||||
m.LRU.MoveToFront(key, m.timeUpdater)
|
||||
|
||||
m.files[key].fileinfo.ATime = time.Now()
|
||||
dst := make([]byte, len(m.files[key].data))
|
||||
copy(dst, m.files[key].data)
|
||||
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
f, ok := m.files[key]
|
||||
if !ok {
|
||||
buffer, exists := m.data[key]
|
||||
if !exists {
|
||||
m.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
return f.fileinfo, nil
|
||||
// Create a copy of the buffer for reading
|
||||
data := make([]byte, buffer.Len())
|
||||
copy(data, buffer.Bytes())
|
||||
m.mu.Unlock()
|
||||
|
||||
return &memoryReadCloser{
|
||||
reader: bytes.NewReader(data),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MemoryFS) StatAll() []*vfs.FileInfo {
|
||||
// memoryReadCloser implements io.ReadCloser for memory files
|
||||
type memoryReadCloser struct {
|
||||
reader *bytes.Reader
|
||||
}
|
||||
|
||||
func (mrc *memoryReadCloser) Read(p []byte) (n int, err error) {
|
||||
return mrc.reader.Read(p)
|
||||
}
|
||||
|
||||
func (mrc *memoryReadCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a file
|
||||
func (m *MemoryFS) Delete(key string) error {
|
||||
if key == "" {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
if strings.Contains(key, "..") {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
m.mu.Lock()
|
||||
fi, exists := m.info[key]
|
||||
if !exists {
|
||||
m.mu.Unlock()
|
||||
return vfserror.ErrNotFound
|
||||
}
|
||||
m.size -= fi.Size
|
||||
m.LRU.Remove(key)
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
m.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat returns file information
|
||||
func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
if fi, ok := m.info[key]; ok {
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
// EvictLRU evicts the least recently used files to free up space
|
||||
func (m *MemoryFS) EvictLRU(bytesNeeded uint) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// hard copy the file info to prevent modification of the original file info or the other way around
|
||||
files := make([]*vfs.FileInfo, 0, len(m.files))
|
||||
for _, v := range m.files {
|
||||
fi := *v.fileinfo
|
||||
files = append(files, &fi)
|
||||
var evicted uint
|
||||
|
||||
// Evict from LRU list until we free enough space
|
||||
for m.size > m.capacity-int64(bytesNeeded) && m.LRU.Len() > 0 {
|
||||
// Get the least recently used item
|
||||
elem := m.LRU.list.Back()
|
||||
if elem == nil {
|
||||
break
|
||||
}
|
||||
|
||||
fi := elem.Value.(*vfs.FileInfo)
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
m.LRU.Remove(key)
|
||||
|
||||
// Remove from maps
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
|
||||
// Update size
|
||||
m.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
m.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return files
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictBySize evicts files by size (ascending = smallest first, descending = largest first)
|
||||
func (m *MemoryFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*vfs.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range m.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by size
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
if ascending {
|
||||
return candidates[i].Size < candidates[j].Size
|
||||
}
|
||||
return candidates[i].Size > candidates[j].Size
|
||||
})
|
||||
|
||||
// Evict files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if m.size <= m.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
m.LRU.Remove(key)
|
||||
|
||||
// Remove from maps
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
|
||||
// Update size
|
||||
m.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
m.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictFIFO evicts files using FIFO (oldest creation time first)
|
||||
func (m *MemoryFS) EvictFIFO(bytesNeeded uint) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*vfs.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range m.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by creation time (oldest first)
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
return candidates[i].CTime.Before(candidates[j].CTime)
|
||||
})
|
||||
|
||||
// Evict oldest files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if m.size <= m.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
m.LRU.Remove(key)
|
||||
|
||||
// Remove from maps
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
|
||||
// Update size
|
||||
m.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := getShardIndex(key)
|
||||
m.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAllMemory(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := New(1024)
|
||||
if err := m.Set("key", []byte("value")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
|
||||
if err := m.Set("key", []byte("value1")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
|
||||
if d, err := m.Get("key"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value1" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
||||
}
|
||||
|
||||
if err := m.Delete("key"); err != nil {
|
||||
t.Errorf("Delete failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := m.Get("key"); err == nil {
|
||||
t.Errorf("Get failed: got nil, want %v", vfserror.ErrNotFound)
|
||||
}
|
||||
|
||||
if err := m.Delete("key"); err == nil {
|
||||
t.Errorf("Delete failed: got nil, want %v", vfserror.ErrNotFound)
|
||||
}
|
||||
|
||||
if _, err := m.Stat("key"); err == nil {
|
||||
t.Errorf("Stat failed: got nil, want %v", vfserror.ErrNotFound)
|
||||
}
|
||||
|
||||
if err := m.Set("key", []byte("value")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := m.Stat("key"); err != nil {
|
||||
t.Errorf("Stat failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLimited(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := New(10)
|
||||
for i := 0; i < 11; i++ {
|
||||
if err := m.Set(fmt.Sprintf("key%d", i), []byte("1")); err != nil && i < 10 {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
} else if i == 10 && err == nil {
|
||||
t.Errorf("Set succeeded: got nil, want %v", vfserror.ErrDiskFull)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Ensure SyncFS implements VFS.
|
||||
var _ vfs.VFS = (*SyncFS)(nil)
|
||||
|
||||
type SyncFS struct {
|
||||
vfs vfs.VFS
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func New(vfs vfs.VFS) *SyncFS {
|
||||
return &SyncFS{
|
||||
vfs: vfs,
|
||||
mu: sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the file system.
|
||||
func (sfs *SyncFS) Name() string {
|
||||
return fmt.Sprintf("SyncFS(%s)", sfs.vfs.Name())
|
||||
}
|
||||
|
||||
// Size returns the total size of all files in the file system.
|
||||
func (sfs *SyncFS) Size() int64 {
|
||||
sfs.mu.RLock()
|
||||
defer sfs.mu.RUnlock()
|
||||
|
||||
return sfs.vfs.Size()
|
||||
}
|
||||
|
||||
// Set sets the value of key as src.
|
||||
// Setting the same key multiple times, the last set call takes effect.
|
||||
func (sfs *SyncFS) Set(key string, src []byte) error {
|
||||
sfs.mu.Lock()
|
||||
defer sfs.mu.Unlock()
|
||||
|
||||
return sfs.vfs.Set(key, src)
|
||||
}
|
||||
|
||||
// Delete deletes the value of key.
|
||||
func (sfs *SyncFS) Delete(key string) error {
|
||||
sfs.mu.Lock()
|
||||
defer sfs.mu.Unlock()
|
||||
|
||||
return sfs.vfs.Delete(key)
|
||||
}
|
||||
|
||||
// Get gets the value of key to dst, and returns dst no matter whether or not there is an error.
|
||||
func (sfs *SyncFS) Get(key string) ([]byte, error) {
|
||||
sfs.mu.RLock()
|
||||
defer sfs.mu.RUnlock()
|
||||
|
||||
return sfs.vfs.Get(key)
|
||||
}
|
||||
|
||||
// Stat returns the FileInfo of key.
|
||||
func (sfs *SyncFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
sfs.mu.RLock()
|
||||
defer sfs.mu.RUnlock()
|
||||
|
||||
return sfs.vfs.Stat(key)
|
||||
}
|
||||
|
||||
// StatAll returns the FileInfo of all keys.
|
||||
func (sfs *SyncFS) StatAll() []*vfs.FileInfo {
|
||||
sfs.mu.RLock()
|
||||
defer sfs.mu.RUnlock()
|
||||
|
||||
return sfs.vfs.StatAll()
|
||||
}
|
||||
118
vfs/vfs.go
118
vfs/vfs.go
@@ -1,26 +1,112 @@
|
||||
// vfs/vfs.go
|
||||
package vfs
|
||||
|
||||
// VFS is the interface that wraps the basic methods of a virtual file system.
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// VFS defines the interface for virtual file systems
|
||||
type VFS interface {
|
||||
// Name returns the name of the file system.
|
||||
Name() string
|
||||
// Create creates a new file at the given key
|
||||
Create(key string, size int64) (io.WriteCloser, error)
|
||||
|
||||
// Size returns the total size of all files in the file system.
|
||||
Size() int64
|
||||
// Open opens the file at the given key for reading
|
||||
Open(key string) (io.ReadCloser, error)
|
||||
|
||||
// Set sets the value of key as src.
|
||||
// Setting the same key multiple times, the last set call takes effect.
|
||||
Set(key string, src []byte) error
|
||||
|
||||
// Delete deletes the value of key.
|
||||
// Delete removes the file at the given key
|
||||
Delete(key string) error
|
||||
|
||||
// Get gets the value of key to dst, and returns dst no matter whether or not there is an error.
|
||||
Get(key string) ([]byte, error)
|
||||
|
||||
// Stat returns the FileInfo of key.
|
||||
// Stat returns information about the file at the given key
|
||||
Stat(key string) (*FileInfo, error)
|
||||
|
||||
// StatAll returns the FileInfo of all keys.
|
||||
StatAll() []*FileInfo
|
||||
// Name returns the name of this VFS
|
||||
Name() string
|
||||
|
||||
// Size returns the current size of the VFS
|
||||
Size() int64
|
||||
|
||||
// Capacity returns the maximum capacity of the VFS
|
||||
Capacity() int64
|
||||
}
|
||||
|
||||
// FileInfo contains metadata about a cached file
|
||||
type FileInfo struct {
|
||||
Key string `json:"key"`
|
||||
Size int64 `json:"size"`
|
||||
ATime time.Time `json:"atime"` // Last access time
|
||||
CTime time.Time `json:"ctime"` // Creation time
|
||||
AccessCount int `json:"access_count"`
|
||||
}
|
||||
|
||||
// NewFileInfo creates a new FileInfo with the given key and current timestamp
|
||||
func NewFileInfo(key string, size int64) *FileInfo {
|
||||
now := time.Now()
|
||||
return &FileInfo{
|
||||
Key: key,
|
||||
Size: size,
|
||||
ATime: now,
|
||||
CTime: now,
|
||||
AccessCount: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFileInfoFromOS creates a FileInfo from os.FileInfo
|
||||
func NewFileInfoFromOS(info os.FileInfo, key string) *FileInfo {
|
||||
return &FileInfo{
|
||||
Key: key,
|
||||
Size: info.Size(),
|
||||
ATime: time.Now(), // We don't have access time from os.FileInfo
|
||||
CTime: info.ModTime(),
|
||||
AccessCount: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateAccess updates the access time and increments the access count
|
||||
func (fi *FileInfo) UpdateAccess() {
|
||||
fi.ATime = time.Now()
|
||||
fi.AccessCount++
|
||||
}
|
||||
|
||||
// BatchedTimeUpdate provides a way to batch time updates for better performance
|
||||
type BatchedTimeUpdate struct {
|
||||
currentTime time.Time
|
||||
lastUpdate time.Time
|
||||
updateInterval time.Duration
|
||||
}
|
||||
|
||||
// NewBatchedTimeUpdate creates a new batched time updater
|
||||
func NewBatchedTimeUpdate(interval time.Duration) *BatchedTimeUpdate {
|
||||
now := time.Now()
|
||||
return &BatchedTimeUpdate{
|
||||
currentTime: now,
|
||||
lastUpdate: now,
|
||||
updateInterval: interval,
|
||||
}
|
||||
}
|
||||
|
||||
// GetTime returns the current cached time, updating it if necessary
|
||||
func (btu *BatchedTimeUpdate) GetTime() time.Time {
|
||||
now := time.Now()
|
||||
if now.Sub(btu.lastUpdate) >= btu.updateInterval {
|
||||
btu.currentTime = now
|
||||
btu.lastUpdate = now
|
||||
}
|
||||
return btu.currentTime
|
||||
}
|
||||
|
||||
// UpdateAccessBatched updates the access time using batched time updates
|
||||
func (fi *FileInfo) UpdateAccessBatched(btu *BatchedTimeUpdate) {
|
||||
fi.ATime = btu.GetTime()
|
||||
fi.AccessCount++
|
||||
}
|
||||
|
||||
// GetTimeDecayedScore calculates a score based on access time and frequency
|
||||
// More recent and frequent accesses get higher scores
|
||||
func (fi *FileInfo) GetTimeDecayedScore() float64 {
|
||||
timeSinceAccess := time.Since(fi.ATime).Hours()
|
||||
decayFactor := 1.0 / (1.0 + timeSinceAccess/24.0) // Decay over days
|
||||
frequencyBonus := float64(fi.AccessCount) * 0.1
|
||||
return decayFactor + frequencyBonus
|
||||
}
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
// vfs/vfserror/vfserror.go
|
||||
package vfserror
|
||||
|
||||
import "errors"
|
||||
|
||||
// Common VFS errors
|
||||
var (
|
||||
// ErrUnreachable is returned when a code path is unreachable.
|
||||
ErrUnreachable = errors.New("unreachable")
|
||||
|
||||
// ErrNotFound is returned when a key is not found.
|
||||
ErrNotFound = errors.New("vfs: key not found")
|
||||
|
||||
// ErrDiskFull is returned when the disk is full.
|
||||
ErrDiskFull = errors.New("vfs: disk full")
|
||||
ErrNotFound = errors.New("vfs: key not found")
|
||||
ErrInvalidKey = errors.New("vfs: invalid key")
|
||||
ErrAlreadyExists = errors.New("vfs: key already exists")
|
||||
ErrCapacityExceeded = errors.New("vfs: capacity exceeded")
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user