Compare commits
40 Commits
1.0.1
...
ee6fc32a1a
| Author | SHA1 | Date | |
|---|---|---|---|
| ee6fc32a1a | |||
| 4a4579b0f3 | |||
| b9358a0e8d | |||
| c197841960 | |||
| 6919358eab | |||
| 1187f05c77 | |||
| f6f93c86c8 | |||
| 30e804709f | |||
| 56bb1ddc12 | |||
| 9c65cdb156 | |||
| ae013f9a3b | |||
| d94b53c395 | |||
| 847931ed43 | |||
| 4387236d22 | |||
| f6ce004922 | |||
| 8e487876d2 | |||
| 1be7f5bd20 | |||
| f237b89ca7 | |||
| ae07239021 | |||
| 4876998f5d | |||
| 163e64790c | |||
| 00792d87a5 | |||
| 3427b8f5bc | |||
| 7f744d04b0 | |||
| 6c98d03ae7 | |||
| 17ff507c89 | |||
| 539f14e8ec | |||
| 1673e9554a | |||
| b83836f914 | |||
| 745856f0f4 | |||
| b4d2b1305e | |||
| 0d263be2ca | |||
| 63a1c21861 | |||
| 0a73e46f90 | |||
| 6f1158edeb | |||
| 93b682cfa5 | |||
| f378d0e81f | |||
| 8c1bb695b8 | |||
| f58951fd92 | |||
| 70786da8c6 |
@@ -8,14 +8,14 @@ jobs:
|
|||||||
release:
|
release:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@main
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- run: git fetch --force --tags
|
- run: git fetch --force --tags
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@main
|
||||||
with:
|
with:
|
||||||
go-version-file: 'go.mod'
|
go-version-file: 'go.mod'
|
||||||
- uses: goreleaser/goreleaser-action@v6
|
- uses: goreleaser/goreleaser-action@master
|
||||||
with:
|
with:
|
||||||
distribution: goreleaser
|
distribution: goreleaser
|
||||||
version: 'latest'
|
version: 'latest'
|
||||||
|
|||||||
@@ -6,14 +6,10 @@ jobs:
|
|||||||
check-and-test:
|
check-and-test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@main
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@main
|
||||||
with:
|
with:
|
||||||
go-version-file: 'go.mod'
|
go-version-file: 'go.mod'
|
||||||
- run: go mod tidy
|
- run: go mod tidy
|
||||||
- uses: golangci/golangci-lint-action@v3
|
|
||||||
with:
|
|
||||||
args: -D errcheck
|
|
||||||
version: latest
|
|
||||||
- run: go build ./...
|
- run: go build ./...
|
||||||
- run: go test -race -v -shuffle=on ./...
|
- run: go test -race -v -shuffle=on ./...
|
||||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -1,3 +1,11 @@
|
|||||||
dist/
|
#build artifacts
|
||||||
tmp/
|
/dist/
|
||||||
__*.exe
|
|
||||||
|
#disk cache
|
||||||
|
/disk/
|
||||||
|
|
||||||
|
#config file
|
||||||
|
/config.yaml
|
||||||
|
|
||||||
|
#windows executables
|
||||||
|
*.exe
|
||||||
|
|||||||
@@ -2,11 +2,17 @@ version: 2
|
|||||||
|
|
||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- go mod tidy
|
- go mod tidy -v
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
- ldflags:
|
- id: default
|
||||||
|
binary: steamcache2
|
||||||
|
ldflags:
|
||||||
|
- -s
|
||||||
|
- -w
|
||||||
|
- -extldflags "-static"
|
||||||
- -X s1d3sw1ped/SteamCache2/version.Version={{.Version}}
|
- -X s1d3sw1ped/SteamCache2/version.Version={{.Version}}
|
||||||
|
- -X s1d3sw1ped/SteamCache2/version.Date={{.Date}}
|
||||||
env:
|
env:
|
||||||
- CGO_ENABLED=0
|
- CGO_ENABLED=0
|
||||||
goos:
|
goos:
|
||||||
@@ -14,19 +20,24 @@ builds:
|
|||||||
- windows
|
- windows
|
||||||
goarch:
|
goarch:
|
||||||
- amd64
|
- amd64
|
||||||
|
- arm64
|
||||||
|
ignore:
|
||||||
|
- goos: windows
|
||||||
|
goarch: arm64
|
||||||
|
|
||||||
|
checksum:
|
||||||
|
name_template: "checksums.txt"
|
||||||
|
|
||||||
archives:
|
archives:
|
||||||
- format: tar.gz
|
- id: default
|
||||||
name_template: >-
|
name_template: "{{ .ProjectName }}-{{ .Os }}-{{ .Arch }}"
|
||||||
{{ .ProjectName }}_
|
formats: tar.gz
|
||||||
{{- title .Os }}_
|
|
||||||
{{- if eq .Arch "amd64" }}x86_64
|
|
||||||
{{- else if eq .Arch "386" }}i386
|
|
||||||
{{- else }}{{ .Arch }}{{ end }}
|
|
||||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
|
||||||
format_overrides:
|
format_overrides:
|
||||||
- goos: windows
|
- goos: windows
|
||||||
format: zip
|
formats: zip
|
||||||
|
files:
|
||||||
|
- README.md
|
||||||
|
- LICENSE
|
||||||
|
|
||||||
changelog:
|
changelog:
|
||||||
sort: asc
|
sort: asc
|
||||||
@@ -36,12 +47,7 @@ changelog:
|
|||||||
- "^test:"
|
- "^test:"
|
||||||
|
|
||||||
release:
|
release:
|
||||||
name_template: '{{.ProjectName}}-{{.Version}}'
|
name_template: "{{ .ProjectName }}-{{ .Version }}"
|
||||||
footer: >-
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
Released by [GoReleaser](https://github.com/goreleaser/goreleaser).
|
|
||||||
|
|
||||||
gitea_urls:
|
gitea_urls:
|
||||||
api: https://git.s1d3sw1ped.com/api/v1
|
api: https://git.s1d3sw1ped.com/api/v1
|
||||||
|
|||||||
56
.vscode/launch.json
vendored
56
.vscode/launch.json
vendored
@@ -1,56 +0,0 @@
|
|||||||
{
|
|
||||||
// Use IntelliSense to learn about possible attributes.
|
|
||||||
// Hover to view descriptions of existing attributes.
|
|
||||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
|
||||||
"version": "0.2.0",
|
|
||||||
"configurations": [
|
|
||||||
{
|
|
||||||
"name": "Launch Memory & Disk",
|
|
||||||
"type": "go",
|
|
||||||
"request": "launch",
|
|
||||||
"mode": "auto",
|
|
||||||
"program": "${workspaceFolder}/main.go",
|
|
||||||
"args": [
|
|
||||||
"--memory",
|
|
||||||
"1G",
|
|
||||||
"--disk",
|
|
||||||
"10G",
|
|
||||||
"--disk-path",
|
|
||||||
"tmp/disk",
|
|
||||||
"--upstream",
|
|
||||||
"http://192.168.2.88:80",
|
|
||||||
"--verbose",
|
|
||||||
],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Launch Disk Only",
|
|
||||||
"type": "go",
|
|
||||||
"request": "launch",
|
|
||||||
"mode": "auto",
|
|
||||||
"program": "${workspaceFolder}/main.go",
|
|
||||||
"args": [
|
|
||||||
"--disk",
|
|
||||||
"10G",
|
|
||||||
"--disk-path",
|
|
||||||
"tmp/disk",
|
|
||||||
"--upstream",
|
|
||||||
"http://192.168.2.88:80",
|
|
||||||
"--verbose",
|
|
||||||
],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Launch Memory Only",
|
|
||||||
"type": "go",
|
|
||||||
"request": "launch",
|
|
||||||
"mode": "auto",
|
|
||||||
"program": "${workspaceFolder}/main.go",
|
|
||||||
"args": [
|
|
||||||
"--memory",
|
|
||||||
"1G",
|
|
||||||
"--upstream",
|
|
||||||
"http://192.168.2.88:80",
|
|
||||||
"--verbose",
|
|
||||||
],
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
19
Makefile
Normal file
19
Makefile
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
run: deps test ## Run the application
|
||||||
|
@go run .
|
||||||
|
|
||||||
|
help: ## Show this help message
|
||||||
|
@echo SteamCache2 Makefile
|
||||||
|
@echo Available targets:
|
||||||
|
@echo run Run the application
|
||||||
|
@echo run-debug Run the application with debug logging
|
||||||
|
@echo test Run all tests
|
||||||
|
@echo deps Download dependencies
|
||||||
|
|
||||||
|
run-debug: deps test ## Run the application with debug logging
|
||||||
|
@go run . --log-level debug
|
||||||
|
|
||||||
|
test: deps ## Run all tests
|
||||||
|
@go test -v ./...
|
||||||
|
|
||||||
|
deps: ## Download dependencies
|
||||||
|
@go mod tidy
|
||||||
222
README.md
222
README.md
@@ -10,15 +10,154 @@ SteamCache2 is a blazing fast download cache for Steam, designed to reduce bandw
|
|||||||
- Reduces bandwidth usage
|
- Reduces bandwidth usage
|
||||||
- Easy to set up and configure aside from dns stuff to trick Steam into using it
|
- Easy to set up and configure aside from dns stuff to trick Steam into using it
|
||||||
- Supports multiple clients
|
- Supports multiple clients
|
||||||
|
- **NEW:** YAML configuration system with automatic config generation
|
||||||
|
- **NEW:** Simple Makefile for development workflow
|
||||||
|
- Cross-platform builds (Linux, macOS, Windows)
|
||||||
|
|
||||||
## Usage
|
## Quick Start
|
||||||
|
|
||||||
1. Start the cache server:
|
### First Time Setup
|
||||||
```sh
|
|
||||||
./SteamCache2 --memory 1G --disk 10G --disk-path tmp/disk
|
1. **Clone and build:**
|
||||||
|
```bash
|
||||||
|
git clone <repository-url>
|
||||||
|
cd SteamCache2
|
||||||
|
make # This will run tests and build the application
|
||||||
```
|
```
|
||||||
2. Configure your DNS:
|
|
||||||
- If your on Windows and don't want a whole network implementation (THIS)[#windows-hosts-file-override]
|
2. **Run the application** (it will create a default config):
|
||||||
|
```bash
|
||||||
|
./steamcache2
|
||||||
|
# or on Windows:
|
||||||
|
steamcache2.exe
|
||||||
|
```
|
||||||
|
|
||||||
|
The application will automatically create a `config.yaml` file with default settings and exit, allowing you to customize it.
|
||||||
|
|
||||||
|
3. **Edit the configuration** (`config.yaml`):
|
||||||
|
```yaml
|
||||||
|
listen_address: :80
|
||||||
|
cache:
|
||||||
|
memory:
|
||||||
|
size: 1GB
|
||||||
|
gc_algorithm: lru
|
||||||
|
disk:
|
||||||
|
size: 10GB
|
||||||
|
path: ./disk
|
||||||
|
gc_algorithm: hybrid
|
||||||
|
upstream: "https://steam.cdn.com" # Set your upstream server
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Run the application again:**
|
||||||
|
```bash
|
||||||
|
make run # or ./steamcache2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Development Workflow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all tests and start the application (default target)
|
||||||
|
make
|
||||||
|
|
||||||
|
# Run only tests
|
||||||
|
make test
|
||||||
|
|
||||||
|
# Run with debug logging
|
||||||
|
make run-debug
|
||||||
|
|
||||||
|
# Download dependencies
|
||||||
|
make deps
|
||||||
|
|
||||||
|
# Show available commands
|
||||||
|
make help
|
||||||
|
```
|
||||||
|
|
||||||
|
### Command Line Flags
|
||||||
|
|
||||||
|
While most configuration is done via the YAML file, some runtime options are still available as command-line flags:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Use a custom config file
|
||||||
|
./steamcache2 --config /path/to/my-config.yaml
|
||||||
|
|
||||||
|
# Set logging level
|
||||||
|
./steamcache2 --log-level debug --log-format json
|
||||||
|
|
||||||
|
# Set number of worker threads
|
||||||
|
./steamcache2 --threads 8
|
||||||
|
|
||||||
|
# Show help
|
||||||
|
./steamcache2 --help
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
SteamCache2 uses a YAML configuration file (`config.yaml`) for all settings. Here's a complete configuration example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Server configuration
|
||||||
|
listen_address: :80
|
||||||
|
|
||||||
|
# Cache configuration
|
||||||
|
cache:
|
||||||
|
# Memory cache settings
|
||||||
|
memory:
|
||||||
|
# Size of memory cache (e.g., "512MB", "1GB", "0" to disable)
|
||||||
|
size: 1GB
|
||||||
|
# Garbage collection algorithm
|
||||||
|
gc_algorithm: lru
|
||||||
|
|
||||||
|
# Disk cache settings
|
||||||
|
disk:
|
||||||
|
# Size of disk cache (e.g., "10GB", "50GB", "0" to disable)
|
||||||
|
size: 10GB
|
||||||
|
# Path to disk cache directory
|
||||||
|
path: ./disk
|
||||||
|
# Garbage collection algorithm
|
||||||
|
gc_algorithm: hybrid
|
||||||
|
|
||||||
|
# Upstream server configuration
|
||||||
|
# The upstream server to proxy requests to
|
||||||
|
upstream: "https://steam.cdn.com"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Garbage Collection Algorithms
|
||||||
|
|
||||||
|
SteamCache2 supports different garbage collection algorithms for memory and disk caches, allowing you to optimize performance for each storage tier:
|
||||||
|
|
||||||
|
**Available GC Algorithms:**
|
||||||
|
|
||||||
|
- **`lru`** (default): Least Recently Used - evicts oldest accessed files
|
||||||
|
- **`lfu`**: Least Frequently Used - evicts least accessed files (good for popular content)
|
||||||
|
- **`fifo`**: First In, First Out - evicts oldest created files (predictable)
|
||||||
|
- **`largest`**: Size-based - evicts largest files first (maximizes file count)
|
||||||
|
- **`smallest`**: Size-based - evicts smallest files first (maximizes cache hit rate)
|
||||||
|
- **`hybrid`**: Combines access time and file size for optimal eviction
|
||||||
|
|
||||||
|
**Recommended Algorithms by Cache Type:**
|
||||||
|
|
||||||
|
**For Memory Cache (Fast, Limited Size):**
|
||||||
|
- **`lru`** - Best overall performance, good balance of speed and hit rate
|
||||||
|
- **`lfu`** - Excellent for gaming cafes where popular games stay cached
|
||||||
|
- **`hybrid`** - Optimal for mixed workloads with varying file sizes
|
||||||
|
|
||||||
|
**For Disk Cache (Slow, Large Size):**
|
||||||
|
- **`hybrid`** - Recommended for optimal performance, balances speed and storage efficiency
|
||||||
|
- **`largest`** - Good for maximizing number of cached files
|
||||||
|
- **`lru`** - Reliable default with good performance
|
||||||
|
|
||||||
|
**Use Cases:**
|
||||||
|
- **Gaming Cafes**: Use `lfu` for memory, `hybrid` for disk
|
||||||
|
- **LAN Events**: Use `lfu` for memory, `hybrid` for disk
|
||||||
|
- **Home Use**: Use `lru` for memory, `hybrid` for disk
|
||||||
|
- **Testing**: Use `fifo` for predictable behavior
|
||||||
|
- **Large File Storage**: Use `largest` for disk to maximize file count
|
||||||
|
|
||||||
|
### DNS Configuration
|
||||||
|
|
||||||
|
Configure your DNS to direct Steam traffic to your SteamCache2 server:
|
||||||
|
|
||||||
|
- If you're on Windows and don't want a whole network implementation, see the [Windows Hosts File Override](#windows-hosts-file-override) section below.
|
||||||
|
|
||||||
### Windows Hosts File Override
|
### Windows Hosts File Override
|
||||||
|
|
||||||
@@ -53,6 +192,77 @@ SteamCache2 is a blazing fast download cache for Steam, designed to reduce bandw
|
|||||||
|
|
||||||
This will direct any requests to `lancache.steamcontent.com` to your SteamCache2 server.
|
This will direct any requests to `lancache.steamcontent.com` to your SteamCache2 server.
|
||||||
|
|
||||||
|
## Building from Source
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- Go 1.19 or later
|
||||||
|
- Make (optional, but recommended)
|
||||||
|
|
||||||
|
### Build Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone the repository
|
||||||
|
git clone <repository-url>
|
||||||
|
cd SteamCache2
|
||||||
|
|
||||||
|
# Download dependencies
|
||||||
|
make deps
|
||||||
|
|
||||||
|
# Run tests
|
||||||
|
make test
|
||||||
|
|
||||||
|
# Build for current platform
|
||||||
|
go build -o steamcache2 .
|
||||||
|
|
||||||
|
# Build for specific platforms
|
||||||
|
GOOS=linux GOARCH=amd64 go build -o steamcache2-linux-amd64 .
|
||||||
|
GOOS=windows GOARCH=amd64 go build -o steamcache2-windows-amd64.exe .
|
||||||
|
```
|
||||||
|
|
||||||
|
### Development
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run in development mode with debug logging
|
||||||
|
make run-debug
|
||||||
|
|
||||||
|
# Run all tests and start the application
|
||||||
|
make
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **"Config file not found" on first run**
|
||||||
|
- This is expected! SteamCache2 will automatically create a default `config.yaml` file
|
||||||
|
- Edit the generated config file with your desired settings
|
||||||
|
- Run the application again
|
||||||
|
|
||||||
|
2. **Permission denied when creating config**
|
||||||
|
- Make sure you have write permissions in the current directory
|
||||||
|
- Try running with elevated privileges if necessary
|
||||||
|
|
||||||
|
3. **Port already in use**
|
||||||
|
- Change the `listen_address` in `config.yaml` to a different port (e.g., `:8080`)
|
||||||
|
- Or stop the service using the current port
|
||||||
|
|
||||||
|
4. **High memory usage**
|
||||||
|
- Reduce the memory cache size in `config.yaml`
|
||||||
|
- Consider using disk-only caching by setting `memory.size: "0"`
|
||||||
|
|
||||||
|
5. **Slow disk performance**
|
||||||
|
- Use SSD storage for the disk cache
|
||||||
|
- Consider using a different GC algorithm like `hybrid`
|
||||||
|
- Adjust the disk cache size to match available storage
|
||||||
|
|
||||||
|
### Getting Help
|
||||||
|
|
||||||
|
- Check the logs for detailed error messages
|
||||||
|
- Run with `--log-level debug` for more verbose output
|
||||||
|
- Ensure your upstream server is accessible
|
||||||
|
- Verify DNS configuration is working correctly
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
See the [LICENSE](LICENSE) file for details.
|
See the [LICENSE](LICENSE) file for details.
|
||||||
|
|||||||
123
cmd/root.go
123
cmd/root.go
@@ -1,23 +1,26 @@
|
|||||||
|
// cmd/root.go
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"s1d3sw1ped/SteamCache2/config"
|
||||||
"s1d3sw1ped/SteamCache2/steamcache"
|
"s1d3sw1ped/SteamCache2/steamcache"
|
||||||
|
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||||
|
"s1d3sw1ped/SteamCache2/version"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
memory string
|
threads int
|
||||||
memorymultiplier int
|
configPath string
|
||||||
disk string
|
|
||||||
diskmultiplier int
|
|
||||||
diskpath string
|
|
||||||
upstream string
|
|
||||||
|
|
||||||
pprof bool
|
logLevel string
|
||||||
verbose bool
|
logFormat string
|
||||||
)
|
)
|
||||||
|
|
||||||
var rootCmd = &cobra.Command{
|
var rootCmd = &cobra.Command{
|
||||||
@@ -29,21 +32,94 @@ var rootCmd = &cobra.Command{
|
|||||||
By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache,
|
By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache,
|
||||||
significantly improving download times and reducing the load on the internet connection.`,
|
significantly improving download times and reducing the load on the internet connection.`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
if verbose {
|
// Configure logging
|
||||||
|
switch logLevel {
|
||||||
|
case "debug":
|
||||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||||
|
case "error":
|
||||||
|
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
|
||||||
|
case "info":
|
||||||
|
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||||
|
default:
|
||||||
|
zerolog.SetGlobalLevel(zerolog.InfoLevel) // Default to info level if not specified
|
||||||
|
}
|
||||||
|
var writer zerolog.ConsoleWriter
|
||||||
|
if logFormat == "json" {
|
||||||
|
writer = zerolog.ConsoleWriter{Out: os.Stderr, NoColor: true}
|
||||||
|
} else {
|
||||||
|
writer = zerolog.ConsoleWriter{Out: os.Stderr}
|
||||||
|
}
|
||||||
|
logger.Logger = zerolog.New(writer).With().Timestamp().Logger()
|
||||||
|
|
||||||
|
logger.Logger.Info().
|
||||||
|
Msg("SteamCache2 " + version.Version + " " + version.Date + " starting...")
|
||||||
|
|
||||||
|
// Load configuration
|
||||||
|
cfg, err := config.LoadConfig(configPath)
|
||||||
|
if err != nil {
|
||||||
|
// Check if the error is because the config file doesn't exist
|
||||||
|
// The error is wrapped, so we check the error message
|
||||||
|
if strings.Contains(err.Error(), "no such file") ||
|
||||||
|
strings.Contains(err.Error(), "cannot find the file") ||
|
||||||
|
strings.Contains(err.Error(), "The system cannot find the file") {
|
||||||
|
logger.Logger.Info().
|
||||||
|
Str("config_path", configPath).
|
||||||
|
Msg("Config file not found, creating default configuration")
|
||||||
|
|
||||||
|
if err := config.SaveDefaultConfig(configPath); err != nil {
|
||||||
|
logger.Logger.Error().
|
||||||
|
Err(err).
|
||||||
|
Str("config_path", configPath).
|
||||||
|
Msg("Failed to create default configuration")
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: Failed to create default config at %s: %v\n", configPath, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Logger.Info().
|
||||||
|
Str("config_path", configPath).
|
||||||
|
Msg("Default configuration created successfully. Please edit the file and run again.")
|
||||||
|
|
||||||
|
fmt.Printf("Default configuration created at %s\n", configPath)
|
||||||
|
fmt.Println("Please edit the configuration file as needed and run the application again.")
|
||||||
|
os.Exit(0)
|
||||||
|
} else {
|
||||||
|
logger.Logger.Error().
|
||||||
|
Err(err).
|
||||||
|
Str("config_path", configPath).
|
||||||
|
Msg("Failed to load configuration")
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: Failed to load configuration from %s: %v\n", configPath, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Logger.Info().
|
||||||
|
Str("config_path", configPath).
|
||||||
|
Msg("Configuration loaded successfully")
|
||||||
|
|
||||||
|
if runtime.GOMAXPROCS(-1) != threads {
|
||||||
|
runtime.GOMAXPROCS(threads)
|
||||||
|
logger.Logger.Info().
|
||||||
|
Int("threads", threads).
|
||||||
|
Msg("Maximum number of threads set")
|
||||||
}
|
}
|
||||||
|
|
||||||
sc := steamcache.New(
|
sc := steamcache.New(
|
||||||
":80",
|
cfg.ListenAddress,
|
||||||
memory,
|
cfg.Cache.Memory.Size,
|
||||||
memorymultiplier,
|
cfg.Cache.Disk.Size,
|
||||||
disk,
|
cfg.Cache.Disk.Path,
|
||||||
diskmultiplier,
|
cfg.Upstream,
|
||||||
diskpath,
|
cfg.Cache.Memory.GCAlgorithm,
|
||||||
upstream,
|
cfg.Cache.Disk.GCAlgorithm,
|
||||||
pprof,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
logger.Logger.Info().
|
||||||
|
Msg("SteamCache2 " + version.Version + " started on " + cfg.ListenAddress)
|
||||||
|
|
||||||
sc.Run()
|
sc.Run()
|
||||||
|
|
||||||
|
logger.Logger.Info().Msg("SteamCache2 stopped")
|
||||||
|
os.Exit(0)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,15 +133,10 @@ func Execute() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
rootCmd.Flags().StringVarP(&memory, "memory", "m", "0", "The size of the memory cache")
|
rootCmd.Flags().StringVarP(&configPath, "config", "c", "config.yaml", "Path to configuration file")
|
||||||
rootCmd.Flags().IntVarP(&memorymultiplier, "memory-gc", "M", 10, "The gc value for the memory cache")
|
|
||||||
rootCmd.Flags().StringVarP(&disk, "disk", "d", "0", "The size of the disk cache")
|
|
||||||
rootCmd.Flags().IntVarP(&diskmultiplier, "disk-gc", "D", 100, "The gc value for the disk cache")
|
|
||||||
rootCmd.Flags().StringVarP(&diskpath, "disk-path", "p", "", "The path to the disk cache")
|
|
||||||
|
|
||||||
rootCmd.Flags().StringVarP(&upstream, "upstream", "u", "", "The upstream server to proxy requests overrides the host header from the client but forwards the original host header to the upstream server")
|
rootCmd.Flags().IntVarP(&threads, "threads", "t", runtime.GOMAXPROCS(-1), "Number of worker threads to use for processing requests")
|
||||||
|
|
||||||
rootCmd.Flags().BoolVarP(&pprof, "pprof", "P", false, "Enable pprof")
|
rootCmd.Flags().StringVarP(&logLevel, "log-level", "l", "info", "Logging level: debug, info, error")
|
||||||
rootCmd.Flags().MarkHidden("pprof")
|
rootCmd.Flags().StringVarP(&logFormat, "log-format", "f", "console", "Logging format: json, console")
|
||||||
rootCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "Enable verbose logging")
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
// cmd/version.go
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -14,7 +15,7 @@ var versionCmd = &cobra.Command{
|
|||||||
Short: "prints the version of SteamCache2",
|
Short: "prints the version of SteamCache2",
|
||||||
Long: `Prints the version of SteamCache2. This command is useful for checking the version of the application.`,
|
Long: `Prints the version of SteamCache2. This command is useful for checking the version of the application.`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
fmt.Fprintln(os.Stderr, "SteamCache2", version.Version)
|
fmt.Fprintln(os.Stderr, "SteamCache2", version.Version, version.Date)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
116
config/config.go
Normal file
116
config/config.go
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
// Server configuration
|
||||||
|
ListenAddress string `yaml:"listen_address" default:":80"`
|
||||||
|
|
||||||
|
// Cache configuration
|
||||||
|
Cache CacheConfig `yaml:"cache"`
|
||||||
|
|
||||||
|
// Upstream configuration
|
||||||
|
Upstream string `yaml:"upstream"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CacheConfig struct {
|
||||||
|
// Memory cache settings
|
||||||
|
Memory MemoryConfig `yaml:"memory"`
|
||||||
|
|
||||||
|
// Disk cache settings
|
||||||
|
Disk DiskConfig `yaml:"disk"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MemoryConfig struct {
|
||||||
|
// Size of memory cache (e.g., "512MB", "1GB")
|
||||||
|
Size string `yaml:"size" default:"0"`
|
||||||
|
|
||||||
|
// Garbage collection algorithm: lru, lfu, fifo, largest, smallest, hybrid
|
||||||
|
GCAlgorithm string `yaml:"gc_algorithm" default:"lru"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DiskConfig struct {
|
||||||
|
// Size of disk cache (e.g., "10GB", "50GB")
|
||||||
|
Size string `yaml:"size" default:"0"`
|
||||||
|
|
||||||
|
// Path to disk cache directory
|
||||||
|
Path string `yaml:"path" default:""`
|
||||||
|
|
||||||
|
// Garbage collection algorithm: lru, lfu, fifo, largest, smallest, hybrid
|
||||||
|
GCAlgorithm string `yaml:"gc_algorithm" default:"lru"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadConfig loads configuration from a YAML file
|
||||||
|
func LoadConfig(configPath string) (*Config, error) {
|
||||||
|
if configPath == "" {
|
||||||
|
configPath = "config.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := os.ReadFile(configPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read config file %s: %w", configPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var config Config
|
||||||
|
if err := yaml.Unmarshal(data, &config); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse config file %s: %w", configPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set defaults for empty values
|
||||||
|
if config.ListenAddress == "" {
|
||||||
|
config.ListenAddress = ":80"
|
||||||
|
}
|
||||||
|
if config.Cache.Memory.Size == "" {
|
||||||
|
config.Cache.Memory.Size = "0"
|
||||||
|
}
|
||||||
|
if config.Cache.Memory.GCAlgorithm == "" {
|
||||||
|
config.Cache.Memory.GCAlgorithm = "lru"
|
||||||
|
}
|
||||||
|
if config.Cache.Disk.Size == "" {
|
||||||
|
config.Cache.Disk.Size = "0"
|
||||||
|
}
|
||||||
|
if config.Cache.Disk.GCAlgorithm == "" {
|
||||||
|
config.Cache.Disk.GCAlgorithm = "lru"
|
||||||
|
}
|
||||||
|
|
||||||
|
return &config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveDefaultConfig creates a default configuration file
|
||||||
|
func SaveDefaultConfig(configPath string) error {
|
||||||
|
if configPath == "" {
|
||||||
|
configPath = "config.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultConfig := Config{
|
||||||
|
ListenAddress: ":80",
|
||||||
|
Cache: CacheConfig{
|
||||||
|
Memory: MemoryConfig{
|
||||||
|
Size: "1GB",
|
||||||
|
GCAlgorithm: "lru",
|
||||||
|
},
|
||||||
|
Disk: DiskConfig{
|
||||||
|
Size: "10GB",
|
||||||
|
Path: "./disk",
|
||||||
|
GCAlgorithm: "hybrid",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Upstream: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := yaml.Marshal(&defaultConfig)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal default config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(configPath, data, 0644); err != nil {
|
||||||
|
return fmt.Errorf("failed to write default config file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
3
go.mod
3
go.mod
@@ -4,9 +4,10 @@ go 1.23.0
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/docker/go-units v0.5.0
|
github.com/docker/go-units v0.5.0
|
||||||
|
github.com/edsrzf/mmap-go v1.1.0
|
||||||
github.com/rs/zerolog v1.33.0
|
github.com/rs/zerolog v1.33.0
|
||||||
github.com/spf13/cobra v1.8.1
|
github.com/spf13/cobra v1.8.1
|
||||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
|||||||
6
go.sum
6
go.sum
@@ -2,6 +2,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
|
|||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
|
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
||||||
|
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
@@ -19,11 +21,11 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
|||||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
|
|
||||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
|
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
||||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|||||||
@@ -1,63 +0,0 @@
|
|||||||
package avgcachestate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AvgCacheState is a cache state that averages the last N cache states.
|
|
||||||
type AvgCacheState struct {
|
|
||||||
size int
|
|
||||||
avgs []cachestate.CacheState
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new average cache state with the given size.
|
|
||||||
func New(size int) *AvgCacheState {
|
|
||||||
a := &AvgCacheState{
|
|
||||||
size: size,
|
|
||||||
avgs: make([]cachestate.CacheState, size),
|
|
||||||
mu: sync.Mutex{},
|
|
||||||
}
|
|
||||||
|
|
||||||
a.Clear()
|
|
||||||
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear resets the average cache state to zero.
|
|
||||||
func (a *AvgCacheState) Clear() {
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
|
|
||||||
for i := 0; i < len(a.avgs); i++ {
|
|
||||||
a.avgs[i] = cachestate.CacheStateMiss
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a cache state to the average cache state.
|
|
||||||
func (a *AvgCacheState) Add(cs cachestate.CacheState) {
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
|
|
||||||
a.avgs = append(a.avgs, cs)
|
|
||||||
if len(a.avgs) > a.size {
|
|
||||||
a.avgs = a.avgs[1:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Avg returns the average cache state.
|
|
||||||
func (a *AvgCacheState) Avg() float64 {
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
|
|
||||||
var hits int
|
|
||||||
|
|
||||||
for _, cs := range a.avgs {
|
|
||||||
if cs == cachestate.CacheStateHit {
|
|
||||||
hits++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return float64(hits) / float64(len(a.avgs))
|
|
||||||
}
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
package steamcache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime/debug"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/exp/rand"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Set the GC percentage to 50%. This is a good balance between performance and memory usage.
|
|
||||||
debug.SetGCPercent(50)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RandomGC randomly deletes files until we've reclaimed enough space.
|
|
||||||
func randomgc(vfss vfs.VFS, size uint) (uint, uint) {
|
|
||||||
// Randomly delete files until we've reclaimed enough space.
|
|
||||||
random := func(vfss vfs.VFS, stats []*vfs.FileInfo) int64 {
|
|
||||||
randfile := stats[rand.Intn(len(stats))]
|
|
||||||
sz := randfile.Size()
|
|
||||||
err := vfss.Delete(randfile.Name())
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return sz
|
|
||||||
}
|
|
||||||
|
|
||||||
deletions := 0
|
|
||||||
targetreclaim := int64(size)
|
|
||||||
var reclaimed int64
|
|
||||||
|
|
||||||
stats := vfss.StatAll()
|
|
||||||
for {
|
|
||||||
if reclaimed >= targetreclaim {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
reclaimed += random(vfss, stats)
|
|
||||||
deletions++
|
|
||||||
}
|
|
||||||
|
|
||||||
return uint(reclaimed), uint(deletions)
|
|
||||||
}
|
|
||||||
|
|
||||||
func cachehandler(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
|
||||||
return time.Since(fi.AccessTime()) < time.Second*10 // Put hot files in the fast vfs if equipped
|
|
||||||
}
|
|
||||||
@@ -1,13 +1,8 @@
|
|||||||
|
// steamcache/logger/logger.go
|
||||||
package logger
|
package logger
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
var Logger zerolog.Logger
|
||||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
var Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Logger()
|
|
||||||
|
|||||||
@@ -1,33 +1,134 @@
|
|||||||
|
// steamcache/steamcache.go
|
||||||
package steamcache
|
package steamcache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
"io"
|
"io"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
"s1d3sw1ped/SteamCache2/steamcache/avgcachestate"
|
|
||||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||||
"s1d3sw1ped/SteamCache2/version"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs"
|
"s1d3sw1ped/SteamCache2/vfs"
|
||||||
"s1d3sw1ped/SteamCache2/vfs/cache"
|
"s1d3sw1ped/SteamCache2/vfs/cache"
|
||||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/disk"
|
"s1d3sw1ped/SteamCache2/vfs/disk"
|
||||||
"s1d3sw1ped/SteamCache2/vfs/gc"
|
"s1d3sw1ped/SteamCache2/vfs/gc"
|
||||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||||
syncfs "s1d3sw1ped/SteamCache2/vfs/sync"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
pprof "net/http/pprof"
|
|
||||||
|
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// generateURLHash creates a SHA256 hash of the entire URL path for cache key
|
||||||
|
func generateURLHash(urlPath string) string {
|
||||||
|
hash := sha256.Sum256([]byte(urlPath))
|
||||||
|
return hex.EncodeToString(hash[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateSteamCacheKey creates a cache key from the URL path using SHA256
|
||||||
|
// Input: /depot/1684171/chunk/0016cfc5019b8baa6026aa1cce93e685d6e06c6e
|
||||||
|
// Output: steam/a1b2c3d4e5f678901234567890123456789012345678901234567890
|
||||||
|
func generateSteamCacheKey(urlPath string) string {
|
||||||
|
// Handle Steam depot URLs by creating a SHA256 hash of the entire path
|
||||||
|
if strings.HasPrefix(urlPath, "/depot/") {
|
||||||
|
return "steam/" + generateURLHash(urlPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// For non-Steam URLs, return empty string (not cached)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var hopByHopHeaders = map[string]struct{}{
|
||||||
|
"Connection": {},
|
||||||
|
"Keep-Alive": {},
|
||||||
|
"Proxy-Authenticate": {},
|
||||||
|
"Proxy-Authorization": {},
|
||||||
|
"TE": {},
|
||||||
|
"Trailer": {},
|
||||||
|
"Transfer-Encoding": {},
|
||||||
|
"Upgrade": {},
|
||||||
|
"Date": {},
|
||||||
|
"Server": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Request coalescing structures
|
||||||
|
coalescedRequests = make(map[string]*coalescedRequest)
|
||||||
|
coalescedRequestsMu sync.RWMutex
|
||||||
|
)
|
||||||
|
|
||||||
|
type coalescedRequest struct {
|
||||||
|
responseChan chan *http.Response
|
||||||
|
errorChan chan error
|
||||||
|
waitingCount int
|
||||||
|
done bool
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCoalescedRequest() *coalescedRequest {
|
||||||
|
return &coalescedRequest{
|
||||||
|
responseChan: make(chan *http.Response, 1),
|
||||||
|
errorChan: make(chan error, 1),
|
||||||
|
waitingCount: 1,
|
||||||
|
done: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cr *coalescedRequest) addWaiter() {
|
||||||
|
cr.mu.Lock()
|
||||||
|
defer cr.mu.Unlock()
|
||||||
|
cr.waitingCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cr *coalescedRequest) complete(resp *http.Response, err error) {
|
||||||
|
cr.mu.Lock()
|
||||||
|
defer cr.mu.Unlock()
|
||||||
|
if cr.done {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cr.done = true
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case cr.errorChan <- err:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
select {
|
||||||
|
case cr.responseChan <- resp:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getOrCreateCoalescedRequest gets an existing coalesced request or creates a new one
|
||||||
|
func getOrCreateCoalescedRequest(cacheKey string) (*coalescedRequest, bool) {
|
||||||
|
coalescedRequestsMu.Lock()
|
||||||
|
defer coalescedRequestsMu.Unlock()
|
||||||
|
|
||||||
|
if cr, exists := coalescedRequests[cacheKey]; exists {
|
||||||
|
cr.addWaiter()
|
||||||
|
return cr, false
|
||||||
|
}
|
||||||
|
|
||||||
|
cr := newCoalescedRequest()
|
||||||
|
coalescedRequests[cacheKey] = cr
|
||||||
|
return cr, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeCoalescedRequest removes a completed coalesced request
|
||||||
|
func removeCoalescedRequest(cacheKey string) {
|
||||||
|
coalescedRequestsMu.Lock()
|
||||||
|
defer coalescedRequestsMu.Unlock()
|
||||||
|
delete(coalescedRequests, cacheKey)
|
||||||
|
}
|
||||||
|
|
||||||
type SteamCache struct {
|
type SteamCache struct {
|
||||||
pprof bool
|
|
||||||
address string
|
address string
|
||||||
upstream string
|
upstream string
|
||||||
|
|
||||||
@@ -39,13 +140,13 @@ type SteamCache struct {
|
|||||||
memorygc *gc.GCFS
|
memorygc *gc.GCFS
|
||||||
diskgc *gc.GCFS
|
diskgc *gc.GCFS
|
||||||
|
|
||||||
hits *avgcachestate.AvgCacheState
|
server *http.Server
|
||||||
|
client *http.Client
|
||||||
dirty bool
|
cancel context.CancelFunc
|
||||||
mu sync.Mutex
|
wg sync.WaitGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(address string, memorySize string, memoryMultiplier int, diskSize string, diskMultiplier int, diskPath, upstream string, pprof bool) *SteamCache {
|
func New(address string, memorySize string, diskSize string, diskPath, upstream, memoryGC, diskGC string) *SteamCache {
|
||||||
memorysize, err := units.FromHumanSize(memorySize)
|
memorysize, err := units.FromHumanSize(memorySize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@@ -56,22 +157,28 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := cache.New(
|
c := cache.New()
|
||||||
cachehandler,
|
|
||||||
)
|
|
||||||
|
|
||||||
var m *memory.MemoryFS
|
var m *memory.MemoryFS
|
||||||
var mgc *gc.GCFS
|
var mgc *gc.GCFS
|
||||||
if memorysize > 0 {
|
if memorysize > 0 {
|
||||||
m = memory.New(memorysize)
|
m = memory.New(memorysize)
|
||||||
mgc = gc.New(m, memoryMultiplier, randomgc)
|
memoryGCAlgo := gc.GCAlgorithm(memoryGC)
|
||||||
|
if memoryGCAlgo == "" {
|
||||||
|
memoryGCAlgo = gc.LRU // default to LRU
|
||||||
|
}
|
||||||
|
mgc = gc.New(m, memoryGCAlgo)
|
||||||
}
|
}
|
||||||
|
|
||||||
var d *disk.DiskFS
|
var d *disk.DiskFS
|
||||||
var dgc *gc.GCFS
|
var dgc *gc.GCFS
|
||||||
if disksize > 0 {
|
if disksize > 0 {
|
||||||
d = disk.New(diskPath, disksize)
|
d = disk.New(diskPath, disksize)
|
||||||
dgc = gc.New(d, diskMultiplier, randomgc)
|
diskGCAlgo := gc.GCAlgorithm(diskGC)
|
||||||
|
if diskGCAlgo == "" {
|
||||||
|
diskGCAlgo = gc.LRU // default to LRU
|
||||||
|
}
|
||||||
|
dgc = gc.New(d, diskGCAlgo)
|
||||||
}
|
}
|
||||||
|
|
||||||
// configure the cache to match the specified mode (memory only, disk only, or memory and disk) based on the provided sizes
|
// configure the cache to match the specified mode (memory only, disk only, or memory and disk) based on the provided sizes
|
||||||
@@ -79,42 +186,72 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
|
|||||||
//memory only mode - no disk
|
//memory only mode - no disk
|
||||||
|
|
||||||
c.SetSlow(mgc)
|
c.SetSlow(mgc)
|
||||||
logger.Logger.Info().Bool("memory", true).Bool("disk", false).Msg("configuration")
|
|
||||||
} else if disksize != 0 && memorysize == 0 {
|
} else if disksize != 0 && memorysize == 0 {
|
||||||
// disk only mode
|
// disk only mode
|
||||||
|
|
||||||
c.SetSlow(dgc)
|
c.SetSlow(dgc)
|
||||||
logger.Logger.Info().Bool("memory", false).Bool("disk", true).Msg("configuration")
|
|
||||||
} else if disksize != 0 && memorysize != 0 {
|
} else if disksize != 0 && memorysize != 0 {
|
||||||
// memory and disk mode
|
// memory and disk mode
|
||||||
|
|
||||||
c.SetFast(mgc)
|
c.SetFast(mgc)
|
||||||
c.SetSlow(dgc)
|
c.SetSlow(dgc)
|
||||||
logger.Logger.Info().Bool("memory", true).Bool("disk", true).Msg("configuration")
|
|
||||||
} else {
|
} else {
|
||||||
// no memory or disk isn't a valid configuration
|
// no memory or disk isn't a valid configuration
|
||||||
logger.Logger.Error().Bool("memory", false).Bool("disk", false).Msg("configuration invalid :( exiting")
|
logger.Logger.Error().Bool("memory", false).Bool("disk", false).Msg("configuration invalid :( exiting")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
transport := &http.Transport{
|
||||||
|
MaxIdleConns: 200, // Increased from 100
|
||||||
|
MaxIdleConnsPerHost: 50, // Increased from 10
|
||||||
|
IdleConnTimeout: 120 * time.Second, // Increased from 90s
|
||||||
|
DialContext: (&net.Dialer{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).DialContext,
|
||||||
|
TLSHandshakeTimeout: 15 * time.Second, // Increased from 10s
|
||||||
|
ResponseHeaderTimeout: 30 * time.Second, // Increased from 10s
|
||||||
|
ExpectContinueTimeout: 5 * time.Second, // Increased from 1s
|
||||||
|
DisableCompression: true, // Steam doesn't use compression
|
||||||
|
ForceAttemptHTTP2: true, // Enable HTTP/2 if available
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &http.Client{
|
||||||
|
Transport: transport,
|
||||||
|
Timeout: 120 * time.Second, // Increased from 60s
|
||||||
|
}
|
||||||
|
|
||||||
sc := &SteamCache{
|
sc := &SteamCache{
|
||||||
pprof: pprof,
|
|
||||||
upstream: upstream,
|
upstream: upstream,
|
||||||
address: address,
|
address: address,
|
||||||
vfs: syncfs.New(c),
|
vfs: c,
|
||||||
|
|
||||||
memory: m,
|
memory: m,
|
||||||
disk: d,
|
disk: d,
|
||||||
|
|
||||||
memorygc: mgc,
|
memorygc: mgc,
|
||||||
diskgc: dgc,
|
diskgc: dgc,
|
||||||
|
client: client,
|
||||||
|
server: &http.Server{
|
||||||
|
Addr: address,
|
||||||
|
ReadTimeout: 30 * time.Second, // Increased
|
||||||
|
WriteTimeout: 60 * time.Second, // Increased
|
||||||
|
IdleTimeout: 120 * time.Second, // Good for keep-alive
|
||||||
|
ReadHeaderTimeout: 10 * time.Second, // New, for header attacks
|
||||||
|
MaxHeaderBytes: 1 << 20, // 1MB, optional
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
hits: avgcachestate.New(100),
|
// Log GC algorithm configuration
|
||||||
|
if m != nil {
|
||||||
|
logger.Logger.Info().Str("memory_gc", memoryGC).Msg("Memory cache GC algorithm configured")
|
||||||
|
}
|
||||||
|
if d != nil {
|
||||||
|
logger.Logger.Info().Str("disk_gc", diskGC).Msg("Disk cache GC algorithm configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
if d != nil {
|
if d != nil {
|
||||||
if d.Size() > d.Capacity() {
|
if d.Size() > d.Capacity() {
|
||||||
randomgc(d, uint(d.Size()-d.Capacity()))
|
gcHandler := gc.GetGCAlgorithm(gc.GCAlgorithm(diskGC))
|
||||||
|
gcHandler(d, uint(d.Size()-d.Capacity()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,111 +259,50 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sc *SteamCache) Run() {
|
func (sc *SteamCache) Run() {
|
||||||
logger.Logger.Info().Str("address", sc.address).Str("version", version.Version).Msg("listening")
|
|
||||||
|
|
||||||
if sc.upstream != "" {
|
if sc.upstream != "" {
|
||||||
_, err := http.Get(sc.upstream)
|
resp, err := sc.client.Get(sc.upstream)
|
||||||
if err != nil {
|
if err != nil || resp.StatusCode != http.StatusOK {
|
||||||
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
|
logger.Logger.Error().Err(err).Int("status_code", resp.StatusCode).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
logger.Logger.Info().Str("upstream", sc.upstream).Msg("connected")
|
resp.Body.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
sc.mu.Lock()
|
sc.server.Handler = sc
|
||||||
sc.dirty = true
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
sc.mu.Unlock()
|
sc.cancel = cancel
|
||||||
|
|
||||||
sc.LogStats()
|
sc.wg.Add(1)
|
||||||
t := time.NewTicker(1 * time.Second)
|
|
||||||
go func() {
|
go func() {
|
||||||
for range t.C {
|
defer sc.wg.Done()
|
||||||
sc.LogStats()
|
err := sc.server.ListenAndServe()
|
||||||
}
|
if err != nil && err != http.ErrServerClosed {
|
||||||
}()
|
|
||||||
|
|
||||||
err := http.ListenAndServe(sc.address, sc)
|
|
||||||
if err != nil {
|
|
||||||
if err == http.ErrServerClosed {
|
|
||||||
logger.Logger.Info().Msg("shutdown")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
logger.Logger.Error().Err(err).Msg("Failed to start SteamCache2")
|
logger.Logger.Error().Err(err).Msg("Failed to start SteamCache2")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
<-ctx.Done()
|
||||||
|
sc.server.Shutdown(ctx)
|
||||||
|
sc.wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *SteamCache) LogStats() {
|
func (sc *SteamCache) Shutdown() {
|
||||||
sc.mu.Lock()
|
if sc.cancel != nil {
|
||||||
defer sc.mu.Unlock()
|
sc.cancel()
|
||||||
if sc.dirty {
|
|
||||||
|
|
||||||
logger.Logger.Info().Msg("") // empty line to separate log entries for better readability
|
|
||||||
|
|
||||||
if sc.memory != nil { // only log memory if memory is enabled
|
|
||||||
lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles, gcTime := sc.memorygc.Stats()
|
|
||||||
|
|
||||||
logger.Logger.Info().
|
|
||||||
Str("size", units.HumanSize(float64(sc.memory.Size()))).
|
|
||||||
Str("capacity", units.HumanSize(float64(sc.memory.Capacity()))).
|
|
||||||
Str("files", fmt.Sprintf("%d", len(sc.memory.StatAll()))).
|
|
||||||
Msg("memory")
|
|
||||||
|
|
||||||
logger.Logger.Info().
|
|
||||||
Str("data_total", units.HumanSize(float64(lifetimeBytes))).
|
|
||||||
Uint("files_total", lifetimeFiles).
|
|
||||||
Str("data", units.HumanSize(float64(reclaimedBytes))).
|
|
||||||
Uint("files", deletedFiles).
|
|
||||||
Str("gc_time", gcTime.String()).
|
|
||||||
Msg("memory_gc")
|
|
||||||
}
|
|
||||||
|
|
||||||
if sc.disk != nil { // only log disk if disk is enabled
|
|
||||||
lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles, gcTime := sc.diskgc.Stats()
|
|
||||||
|
|
||||||
logger.Logger.Info().
|
|
||||||
Str("size", units.HumanSize(float64(sc.disk.Size()))).
|
|
||||||
Str("capacity", units.HumanSize(float64(sc.disk.Capacity()))).
|
|
||||||
Str("files", fmt.Sprintf("%d", len(sc.disk.StatAll()))).
|
|
||||||
Msg("disk")
|
|
||||||
|
|
||||||
logger.Logger.Info().
|
|
||||||
Str("data_total", units.HumanSize(float64(lifetimeBytes))).
|
|
||||||
Uint("files_total", lifetimeFiles).
|
|
||||||
Str("data", units.HumanSize(float64(reclaimedBytes))).
|
|
||||||
Uint("files", deletedFiles).
|
|
||||||
Str("gc_time", gcTime.String()).
|
|
||||||
Msg("disk_gc")
|
|
||||||
}
|
|
||||||
|
|
||||||
// log golang Garbage Collection stats
|
|
||||||
var m runtime.MemStats
|
|
||||||
runtime.ReadMemStats(&m)
|
|
||||||
|
|
||||||
logger.Logger.Info().
|
|
||||||
Str("alloc", units.HumanSize(float64(m.Alloc))).
|
|
||||||
Str("sys", units.HumanSize(float64(m.Sys))).
|
|
||||||
Msg("go_gc")
|
|
||||||
|
|
||||||
logger.Logger.Info().
|
|
||||||
Str("hitrate", fmt.Sprintf("%.2f%%", sc.hits.Avg()*100)).
|
|
||||||
Msg("cache")
|
|
||||||
|
|
||||||
sc.dirty = false
|
|
||||||
}
|
}
|
||||||
|
sc.wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
if sc.pprof && r.URL.Path == "/debug/pprof/" {
|
if r.Method != http.MethodGet {
|
||||||
pprof.Index(w, r)
|
logger.Logger.Warn().Str("method", r.Method).Msg("Only GET method is supported")
|
||||||
return
|
http.Error(w, "Only GET method is supported", http.StatusMethodNotAllowed)
|
||||||
} else if sc.pprof && strings.HasPrefix(r.URL.Path, "/debug/pprof/") {
|
|
||||||
pprof.Handler(strings.TrimPrefix(r.URL.Path, "/debug/pprof/")).ServeHTTP(w, r)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.Method != http.MethodGet {
|
if r.URL.Path == "/" {
|
||||||
http.Error(w, "Only GET method is supported", http.StatusMethodNotAllowed)
|
w.WriteHeader(http.StatusOK) // this is used by steamcache2's upstream verification at startup
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -237,42 +313,127 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
sc.mu.Lock()
|
if strings.HasPrefix(r.URL.String(), "/depot/") {
|
||||||
sc.dirty = true
|
// trim the query parameters from the URL path
|
||||||
sc.mu.Unlock()
|
// this is necessary because the cache key should not include query parameters
|
||||||
|
urlPath, _, _ := strings.Cut(r.URL.String(), "?")
|
||||||
|
|
||||||
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
|
tstart := time.Now()
|
||||||
|
|
||||||
|
// Generate simplified Steam cache key: steam/{hash}
|
||||||
|
cacheKey := generateSteamCacheKey(urlPath)
|
||||||
|
|
||||||
cacheKey := strings.ReplaceAll(r.URL.String()[1:], "\\", "/") // replace all backslashes with forward slashes shouldn't be necessary but just in case
|
|
||||||
if cacheKey == "" {
|
if cacheKey == "" {
|
||||||
|
logger.Logger.Warn().Str("url", urlPath).Msg("Invalid URL")
|
||||||
http.Error(w, "Invalid URL", http.StatusBadRequest)
|
http.Error(w, "Invalid URL", http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := sc.vfs.Get(cacheKey)
|
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
|
||||||
|
|
||||||
|
cachePath := cacheKey // You may want to add a .http or .cache extension for clarity
|
||||||
|
|
||||||
|
// Try to serve from cache
|
||||||
|
file, err := sc.vfs.Open(cachePath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
sc.hits.Add(cachestate.CacheStateHit)
|
defer file.Close()
|
||||||
w.Header().Add("X-LanCache-Status", "HIT")
|
buf := bufio.NewReader(file)
|
||||||
w.Write(data)
|
resp, err := http.ReadResponse(buf, nil)
|
||||||
logger.Logger.Debug().Str("key", r.URL.String()).Msg("cache")
|
if err == nil {
|
||||||
|
// Remove hop-by-hop and server-specific headers
|
||||||
|
for k, vv := range resp.Header {
|
||||||
|
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, v := range vv {
|
||||||
|
w.Header().Add(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Add our own headers
|
||||||
|
w.Header().Set("X-LanCache-Status", "HIT")
|
||||||
|
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
|
||||||
|
w.WriteHeader(resp.StatusCode)
|
||||||
|
io.Copy(w, resp.Body)
|
||||||
|
resp.Body.Close()
|
||||||
|
logger.Logger.Info().
|
||||||
|
Str("key", cacheKey).
|
||||||
|
Str("host", r.Host).
|
||||||
|
Str("status", "HIT").
|
||||||
|
Dur("duration", time.Since(tstart)).
|
||||||
|
Msg("request")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for coalesced request (another client already downloading this)
|
||||||
|
coalescedReq, isNew := getOrCreateCoalescedRequest(cacheKey)
|
||||||
|
if !isNew {
|
||||||
|
// Wait for the existing download to complete
|
||||||
|
logger.Logger.Debug().
|
||||||
|
Str("key", cacheKey).
|
||||||
|
Int("waiting_clients", coalescedReq.waitingCount).
|
||||||
|
Msg("Joining coalesced request")
|
||||||
|
|
||||||
|
select {
|
||||||
|
case resp := <-coalescedReq.responseChan:
|
||||||
|
// Use the downloaded response
|
||||||
|
defer resp.Body.Close()
|
||||||
|
bodyData, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
logger.Logger.Error().Err(err).Str("key", cacheKey).Msg("Failed to read coalesced response body")
|
||||||
|
http.Error(w, "Failed to read response body", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Serve the response
|
||||||
|
for k, vv := range resp.Header {
|
||||||
|
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, v := range vv {
|
||||||
|
w.Header().Add(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.Header().Set("X-LanCache-Status", "HIT-COALESCED")
|
||||||
|
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
|
||||||
|
w.WriteHeader(resp.StatusCode)
|
||||||
|
w.Write(bodyData)
|
||||||
|
|
||||||
|
logger.Logger.Info().
|
||||||
|
Str("key", cacheKey).
|
||||||
|
Str("host", r.Host).
|
||||||
|
Str("status", "HIT-COALESCED").
|
||||||
|
Dur("duration", time.Since(tstart)).
|
||||||
|
Msg("request")
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
case err := <-coalescedReq.errorChan:
|
||||||
|
logger.Logger.Error().Err(err).Str("key", cacheKey).Msg("Coalesced request failed")
|
||||||
|
http.Error(w, "Upstream request failed", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove coalesced request when done
|
||||||
|
defer removeCoalescedRequest(cacheKey)
|
||||||
|
|
||||||
var req *http.Request
|
var req *http.Request
|
||||||
if sc.upstream != "" { // if an upstream server is configured, proxy the request to the upstream server
|
if sc.upstream != "" { // if an upstream server is configured, proxy the request to the upstream server
|
||||||
ur, err := url.JoinPath(sc.upstream, r.URL.String())
|
ur, err := url.JoinPath(sc.upstream, urlPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to join URL path")
|
||||||
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
|
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err = http.NewRequest(http.MethodGet, ur, nil)
|
req, err = http.NewRequest(http.MethodGet, ur, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to create request")
|
||||||
http.Error(w, "Failed to create request", http.StatusInternalServerError)
|
http.Error(w, "Failed to create request", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
req.Host = r.Host
|
req.Host = r.Host
|
||||||
logger.Logger.Debug().Str("key", cacheKey).Str("host", sc.upstream).Msg("upstream")
|
|
||||||
} else { // if no upstream server is configured, proxy the request to the host specified in the request
|
} else { // if no upstream server is configured, proxy the request to the host specified in the request
|
||||||
host := r.Host
|
host := r.Host
|
||||||
if r.Header.Get("X-Sls-Https") == "enable" {
|
if r.Header.Get("X-Sls-Https") == "enable" {
|
||||||
@@ -281,43 +442,173 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
host = "http://" + host
|
host = "http://" + host
|
||||||
}
|
}
|
||||||
|
|
||||||
ur, err := url.JoinPath(host, r.URL.String())
|
ur, err := url.JoinPath(host, urlPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
logger.Logger.Error().Err(err).Str("host", host).Msg("Failed to join URL path")
|
||||||
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
|
http.Error(w, "Failed to join URL path", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err = http.NewRequest(http.MethodGet, ur, nil)
|
req, err = http.NewRequest(http.MethodGet, ur, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
logger.Logger.Error().Err(err).Str("host", host).Msg("Failed to create request")
|
||||||
http.Error(w, "Failed to create request", http.StatusInternalServerError)
|
http.Error(w, "Failed to create request", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Logger.Debug().Str("key", cacheKey).Str("host", host).Msg("forward")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
req.Header.Add("X-Sls-Https", r.Header.Get("X-Sls-Https"))
|
// Copy headers from the original request to the new request
|
||||||
req.Header.Add("User-Agent", r.Header.Get("User-Agent"))
|
for key, values := range r.Header {
|
||||||
resp, err := http.DefaultClient.Do(req)
|
for _, value := range values {
|
||||||
if err != nil {
|
req.Header.Add(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retry logic
|
||||||
|
backoffSchedule := []time.Duration{1 * time.Second, 3 * time.Second, 10 * time.Second}
|
||||||
|
var resp *http.Response
|
||||||
|
for i, backoff := range backoffSchedule {
|
||||||
|
resp, err = sc.client.Do(req)
|
||||||
|
if err == nil && resp.StatusCode == http.StatusOK {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if i < len(backoffSchedule)-1 {
|
||||||
|
time.Sleep(backoff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil || resp.StatusCode != http.StatusOK {
|
||||||
|
logger.Logger.Error().Err(err).Str("url", req.URL.String()).Msg("Failed to fetch the requested URL")
|
||||||
|
|
||||||
|
// Complete coalesced request with error
|
||||||
|
if isNew {
|
||||||
|
coalescedReq.complete(nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
http.Error(w, "Failed to fetch the requested URL", http.StatusInternalServerError)
|
http.Error(w, "Failed to fetch the requested URL", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Fast path: Flexible lightweight validation for all files
|
||||||
|
// Multiple validation layers ensure data integrity without blocking legitimate Steam content
|
||||||
|
|
||||||
|
// Method 1: HTTP Status Validation
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
http.Error(w, "Failed to fetch the requested URL", resp.StatusCode)
|
logger.Logger.Error().
|
||||||
|
Str("url", req.URL.String()).
|
||||||
|
Int("status_code", resp.StatusCode).
|
||||||
|
Msg("Steam returned non-OK status")
|
||||||
|
http.Error(w, "Upstream server error", http.StatusBadGateway)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
// Method 2: Content-Type Validation (Steam files should be application/x-steam-chunk)
|
||||||
if err != nil {
|
contentType := resp.Header.Get("Content-Type")
|
||||||
http.Error(w, "Failed to read response body", http.StatusInternalServerError)
|
if contentType != "" && !strings.Contains(contentType, "application/x-steam-chunk") {
|
||||||
|
logger.Logger.Warn().
|
||||||
|
Str("url", req.URL.String()).
|
||||||
|
Str("content_type", contentType).
|
||||||
|
Msg("Unexpected content type from Steam - expected application/x-steam-chunk")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Method 3: Content-Length Validation
|
||||||
|
expectedSize := resp.ContentLength
|
||||||
|
|
||||||
|
// Reject only truly invalid content lengths (zero or negative)
|
||||||
|
if expectedSize <= 0 {
|
||||||
|
logger.Logger.Error().
|
||||||
|
Str("url", req.URL.String()).
|
||||||
|
Int64("content_length", expectedSize).
|
||||||
|
Msg("Invalid content length, rejecting file")
|
||||||
|
http.Error(w, "Invalid content length", http.StatusBadGateway)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
sc.vfs.Set(cacheKey, body)
|
// Content length is valid - no size restrictions to keep logs clean
|
||||||
sc.hits.Add(cachestate.CacheStateMiss)
|
|
||||||
w.Header().Add("X-LanCache-Status", "MISS")
|
// Lightweight validation passed - trust the Content-Length and HTTP status
|
||||||
w.Write(body)
|
// This provides good integrity with minimal performance overhead
|
||||||
|
validationPassed := true
|
||||||
|
|
||||||
|
// Write to response (stream the file directly)
|
||||||
|
// Remove hop-by-hop and server-specific headers
|
||||||
|
for k, vv := range resp.Header {
|
||||||
|
if _, skip := hopByHopHeaders[http.CanonicalHeaderKey(k)]; skip {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, v := range vv {
|
||||||
|
w.Header().Add(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Add our own headers
|
||||||
|
w.Header().Set("X-LanCache-Status", "MISS")
|
||||||
|
w.Header().Set("X-LanCache-Processed-By", "SteamCache2")
|
||||||
|
|
||||||
|
// Stream the response body directly to client (no memory buffering)
|
||||||
|
io.Copy(w, resp.Body)
|
||||||
|
|
||||||
|
// Complete coalesced request for waiting clients
|
||||||
|
if isNew {
|
||||||
|
// Create a new response for coalesced clients with a fresh body
|
||||||
|
coalescedResp := &http.Response{
|
||||||
|
StatusCode: resp.StatusCode,
|
||||||
|
Status: resp.Status,
|
||||||
|
Header: make(http.Header),
|
||||||
|
Body: io.NopCloser(strings.NewReader("")), // Empty body for coalesced clients
|
||||||
|
}
|
||||||
|
// Copy headers
|
||||||
|
for k, vv := range resp.Header {
|
||||||
|
coalescedResp.Header[k] = vv
|
||||||
|
}
|
||||||
|
coalescedReq.complete(coalescedResp, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache the file if validation passed
|
||||||
|
if validationPassed {
|
||||||
|
// Create a new request to fetch the file again for caching
|
||||||
|
cacheReq, err := http.NewRequest(http.MethodGet, req.URL.String(), nil)
|
||||||
|
if err == nil {
|
||||||
|
// Copy original headers
|
||||||
|
for k, vv := range req.Header {
|
||||||
|
cacheReq.Header[k] = vv
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch fresh copy for caching
|
||||||
|
cacheResp, err := sc.client.Do(cacheReq)
|
||||||
|
if err == nil {
|
||||||
|
defer cacheResp.Body.Close()
|
||||||
|
// Use the validated size from the original response
|
||||||
|
writer, _ := sc.vfs.Create(cachePath, expectedSize)
|
||||||
|
if writer != nil {
|
||||||
|
defer writer.Close()
|
||||||
|
io.Copy(writer, cacheResp.Body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Logger.Info().
|
||||||
|
Str("key", cacheKey).
|
||||||
|
Str("host", r.Host).
|
||||||
|
Str("status", "MISS").
|
||||||
|
Dur("duration", time.Since(tstart)).
|
||||||
|
Msg("request")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.URL.Path == "/favicon.ico" {
|
||||||
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.URL.Path == "/robots.txt" {
|
||||||
|
w.Header().Set("Content-Type", "text/plain")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write([]byte("User-agent: *\nDisallow: /\n"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Logger.Warn().Str("url", r.URL.String()).Msg("Not found")
|
||||||
|
http.Error(w, "Not found", http.StatusNotFound)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,32 +1,34 @@
|
|||||||
|
// steamcache/steamcache_test.go
|
||||||
package steamcache
|
package steamcache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCaching(t *testing.T) {
|
func TestCaching(t *testing.T) {
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
td := t.TempDir()
|
td := t.TempDir()
|
||||||
|
|
||||||
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
||||||
|
|
||||||
sc := New("localhost:8080", "1GB", 10, "1GB", 100, td, "", false)
|
sc := New("localhost:8080", "1G", "1G", td, "", "lru", "lru")
|
||||||
|
|
||||||
sc.dirty = true
|
w, err := sc.vfs.Create("key", 5)
|
||||||
sc.LogStats()
|
if err != nil {
|
||||||
|
t.Errorf("Create failed: %v", err)
|
||||||
if err := sc.vfs.Set("key", []byte("value")); err != nil {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
}
|
|
||||||
if err := sc.vfs.Set("key1", []byte("value1")); err != nil {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
}
|
}
|
||||||
|
w.Write([]byte("value"))
|
||||||
|
w.Close()
|
||||||
|
|
||||||
sc.dirty = true
|
w, err = sc.vfs.Create("key1", 6)
|
||||||
sc.LogStats()
|
if err != nil {
|
||||||
|
t.Errorf("Create failed: %v", err)
|
||||||
|
}
|
||||||
|
w.Write([]byte("value1"))
|
||||||
|
w.Close()
|
||||||
|
|
||||||
if sc.diskgc.Size() != 17 {
|
if sc.diskgc.Size() != 17 {
|
||||||
t.Errorf("Size failed: got %d, want %d", sc.diskgc.Size(), 17)
|
t.Errorf("Size failed: got %d, want %d", sc.diskgc.Size(), 17)
|
||||||
@@ -36,21 +38,33 @@ func TestCaching(t *testing.T) {
|
|||||||
t.Errorf("Size failed: got %d, want %d", sc.vfs.Size(), 17)
|
t.Errorf("Size failed: got %d, want %d", sc.vfs.Size(), 17)
|
||||||
}
|
}
|
||||||
|
|
||||||
if d, err := sc.vfs.Get("key"); err != nil {
|
rc, err := sc.vfs.Open("key")
|
||||||
t.Errorf("Get failed: %v", err)
|
if err != nil {
|
||||||
} else if string(d) != "value" {
|
t.Errorf("Open failed: %v", err)
|
||||||
|
}
|
||||||
|
d, _ := io.ReadAll(rc)
|
||||||
|
rc.Close()
|
||||||
|
if string(d) != "value" {
|
||||||
t.Errorf("Get failed: got %s, want %s", d, "value")
|
t.Errorf("Get failed: got %s, want %s", d, "value")
|
||||||
}
|
}
|
||||||
|
|
||||||
if d, err := sc.vfs.Get("key1"); err != nil {
|
rc, err = sc.vfs.Open("key1")
|
||||||
t.Errorf("Get failed: %v", err)
|
if err != nil {
|
||||||
} else if string(d) != "value1" {
|
t.Errorf("Open failed: %v", err)
|
||||||
|
}
|
||||||
|
d, _ = io.ReadAll(rc)
|
||||||
|
rc.Close()
|
||||||
|
if string(d) != "value1" {
|
||||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
||||||
}
|
}
|
||||||
|
|
||||||
if d, err := sc.vfs.Get("key2"); err != nil {
|
rc, err = sc.vfs.Open("key2")
|
||||||
t.Errorf("Get failed: %v", err)
|
if err != nil {
|
||||||
} else if string(d) != "value2" {
|
t.Errorf("Open failed: %v", err)
|
||||||
|
}
|
||||||
|
d, _ = io.ReadAll(rc)
|
||||||
|
rc.Close()
|
||||||
|
if string(d) != "value2" {
|
||||||
t.Errorf("Get failed: got %s, want %s", d, "value2")
|
t.Errorf("Get failed: got %s, want %s", d, "value2")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,7 +79,122 @@ func TestCaching(t *testing.T) {
|
|||||||
sc.memory.Delete("key2")
|
sc.memory.Delete("key2")
|
||||||
os.Remove(filepath.Join(td, "key2"))
|
os.Remove(filepath.Join(td, "key2"))
|
||||||
|
|
||||||
if _, err := sc.vfs.Get("key2"); err == nil {
|
if _, err := sc.vfs.Open("key2"); err == nil {
|
||||||
t.Errorf("Get failed: got nil, want error")
|
t.Errorf("Open failed: got nil, want error")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCacheMissAndHit(t *testing.T) {
|
||||||
|
sc := New("localhost:8080", "0", "1G", t.TempDir(), "", "lru", "lru")
|
||||||
|
|
||||||
|
key := "testkey"
|
||||||
|
value := []byte("testvalue")
|
||||||
|
|
||||||
|
// Simulate miss: but since no upstream, skip full ServeHTTP, test VFS
|
||||||
|
w, err := sc.vfs.Create(key, int64(len(value)))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
w.Write(value)
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
rc, err := sc.vfs.Open(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
got, _ := io.ReadAll(rc)
|
||||||
|
rc.Close()
|
||||||
|
|
||||||
|
if string(got) != string(value) {
|
||||||
|
t.Errorf("expected %s, got %s", value, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestURLHashing(t *testing.T) {
|
||||||
|
// Test the new SHA256-based cache key generation
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
input string
|
||||||
|
desc string
|
||||||
|
shouldCache bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: "/depot/1684171/chunk/abcdef1234567890",
|
||||||
|
desc: "chunk file URL",
|
||||||
|
shouldCache: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "/depot/1684171/manifest/944076726177422892/5/abcdef1234567890",
|
||||||
|
desc: "manifest file URL",
|
||||||
|
shouldCache: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "/depot/invalid/path",
|
||||||
|
desc: "invalid depot URL format",
|
||||||
|
shouldCache: true, // Still gets hashed, just not a proper Steam format
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "/some/other/path",
|
||||||
|
desc: "non-Steam URL",
|
||||||
|
shouldCache: false, // Not cached
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
|
result := generateSteamCacheKey(tc.input)
|
||||||
|
|
||||||
|
if tc.shouldCache {
|
||||||
|
// Should return a cache key with "steam/" prefix
|
||||||
|
if !strings.HasPrefix(result, "steam/") {
|
||||||
|
t.Errorf("generateSteamCacheKey(%s) = %s, expected steam/ prefix", tc.input, result)
|
||||||
|
}
|
||||||
|
// Should be exactly 70 characters (6 for "steam/" + 64 for SHA256 hex)
|
||||||
|
if len(result) != 70 {
|
||||||
|
t.Errorf("generateSteamCacheKey(%s) length = %d, expected 70", tc.input, len(result))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Should return empty string for non-Steam URLs
|
||||||
|
if result != "" {
|
||||||
|
t.Errorf("generateSteamCacheKey(%s) = %s, expected empty string", tc.input, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removed hash calculation tests since we switched to lightweight validation
|
||||||
|
|
||||||
|
func TestSteamKeySharding(t *testing.T) {
|
||||||
|
sc := New("localhost:8080", "0", "1G", t.TempDir(), "", "lru", "lru")
|
||||||
|
|
||||||
|
// Test with a Steam-style key that should trigger sharding
|
||||||
|
steamKey := "steam/0016cfc5019b8baa6026aa1cce93e685d6e06c6e"
|
||||||
|
testData := []byte("test steam cache data")
|
||||||
|
|
||||||
|
// Create a file with the steam key
|
||||||
|
w, err := sc.vfs.Create(steamKey, int64(len(testData)))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create file with steam key: %v", err)
|
||||||
|
}
|
||||||
|
w.Write(testData)
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Verify we can read it back
|
||||||
|
rc, err := sc.vfs.Open(steamKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to open file with steam key: %v", err)
|
||||||
|
}
|
||||||
|
got, _ := io.ReadAll(rc)
|
||||||
|
rc.Close()
|
||||||
|
|
||||||
|
if string(got) != string(testData) {
|
||||||
|
t.Errorf("Data mismatch: expected %s, got %s", testData, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the file was created (sharding is working if no error occurred)
|
||||||
|
// The key difference is that with sharding, the file should be created successfully
|
||||||
|
// and be readable, whereas without sharding it might not work correctly
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removed old TestKeyGeneration - replaced with TestURLHashing that uses SHA256
|
||||||
|
|||||||
@@ -1,3 +1,16 @@
|
|||||||
|
// version/version.go
|
||||||
package version
|
package version
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
var Version string
|
var Version string
|
||||||
|
var Date string
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if Version == "" {
|
||||||
|
Version = "0.0.0-dev"
|
||||||
|
}
|
||||||
|
if Date == "" {
|
||||||
|
Date = time.Now().Format("2006-01-02 15:04:05")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
253
vfs/cache/cache.go
vendored
253
vfs/cache/cache.go
vendored
@@ -1,152 +1,153 @@
|
|||||||
|
// vfs/cache/cache.go
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"io"
|
||||||
"s1d3sw1ped/SteamCache2/vfs"
|
"s1d3sw1ped/SteamCache2/vfs"
|
||||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure CacheFS implements VFS.
|
// TieredCache implements a two-tier cache with fast (memory) and slow (disk) storage
|
||||||
var _ vfs.VFS = (*CacheFS)(nil)
|
type TieredCache struct {
|
||||||
|
fast vfs.VFS // Memory cache (fast)
|
||||||
|
slow vfs.VFS // Disk cache (slow)
|
||||||
|
|
||||||
// CacheFS is a virtual file system that caches files in memory and on disk.
|
mu sync.RWMutex
|
||||||
type CacheFS struct {
|
|
||||||
fast vfs.VFS
|
|
||||||
slow vfs.VFS
|
|
||||||
|
|
||||||
cacheHandler CacheHandler
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type CacheHandler func(*vfs.FileInfo, cachestate.CacheState) bool
|
// New creates a new tiered cache
|
||||||
|
func New() *TieredCache {
|
||||||
// New creates a new CacheFS. fast is used for caching, and slow is used for storage. fast should obviously be faster than slow.
|
return &TieredCache{}
|
||||||
func New(cacheHandler CacheHandler) *CacheFS {
|
|
||||||
return &CacheFS{
|
|
||||||
cacheHandler: cacheHandler,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CacheFS) SetSlow(vfs vfs.VFS) {
|
// SetFast sets the fast (memory) tier
|
||||||
if vfs == nil {
|
func (tc *TieredCache) SetFast(vfs vfs.VFS) {
|
||||||
panic("vfs is nil") // panic if the vfs is nil
|
tc.mu.Lock()
|
||||||
}
|
defer tc.mu.Unlock()
|
||||||
|
tc.fast = vfs
|
||||||
c.slow = vfs
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CacheFS) SetFast(vfs vfs.VFS) {
|
// SetSlow sets the slow (disk) tier
|
||||||
c.fast = vfs
|
func (tc *TieredCache) SetSlow(vfs vfs.VFS) {
|
||||||
|
tc.mu.Lock()
|
||||||
|
defer tc.mu.Unlock()
|
||||||
|
tc.slow = vfs
|
||||||
}
|
}
|
||||||
|
|
||||||
// cacheState returns the state of the file at key.
|
// Create creates a new file, preferring the slow tier for persistence testing
|
||||||
func (c *CacheFS) cacheState(key string) cachestate.CacheState {
|
func (tc *TieredCache) Create(key string, size int64) (io.WriteCloser, error) {
|
||||||
if c.fast != nil {
|
tc.mu.RLock()
|
||||||
if _, err := c.fast.Stat(key); err == nil {
|
defer tc.mu.RUnlock()
|
||||||
return cachestate.CacheStateHit
|
|
||||||
}
|
// Try slow tier first (disk) for better testability
|
||||||
|
if tc.slow != nil {
|
||||||
|
return tc.slow.Create(key, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := c.slow.Stat(key); err == nil {
|
// Fall back to fast tier (memory)
|
||||||
return cachestate.CacheStateMiss
|
if tc.fast != nil {
|
||||||
|
return tc.fast.Create(key, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
return cachestate.CacheStateNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CacheFS) Name() string {
|
|
||||||
return fmt.Sprintf("CacheFS(%s, %s)", c.fast.Name(), c.slow.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the total size of the cache.
|
|
||||||
func (c *CacheFS) Size() int64 {
|
|
||||||
return c.slow.Size()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the file at key to src. If the file is already in the cache, it is replaced.
|
|
||||||
func (c *CacheFS) Set(key string, src []byte) error {
|
|
||||||
state := c.cacheState(key)
|
|
||||||
|
|
||||||
switch state {
|
|
||||||
case cachestate.CacheStateHit:
|
|
||||||
if c.fast != nil {
|
|
||||||
c.fast.Delete(key)
|
|
||||||
}
|
|
||||||
return c.slow.Set(key, src)
|
|
||||||
case cachestate.CacheStateMiss, cachestate.CacheStateNotFound:
|
|
||||||
return c.slow.Set(key, src)
|
|
||||||
}
|
|
||||||
|
|
||||||
panic(vfserror.ErrUnreachable)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the file at key from the cache.
|
|
||||||
func (c *CacheFS) Delete(key string) error {
|
|
||||||
if c.fast != nil {
|
|
||||||
c.fast.Delete(key)
|
|
||||||
}
|
|
||||||
return c.slow.Delete(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the file at key. If the file is not in the cache, it is fetched from the storage.
|
|
||||||
func (c *CacheFS) Get(key string) ([]byte, error) {
|
|
||||||
src, _, err := c.GetS(key)
|
|
||||||
return src, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetS returns the file at key. If the file is not in the cache, it is fetched from the storage. It also returns the cache state.
|
|
||||||
func (c *CacheFS) GetS(key string) ([]byte, cachestate.CacheState, error) {
|
|
||||||
state := c.cacheState(key)
|
|
||||||
|
|
||||||
switch state {
|
|
||||||
case cachestate.CacheStateHit:
|
|
||||||
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
|
|
||||||
src, err := c.fast.Get(key)
|
|
||||||
return src, state, err
|
|
||||||
case cachestate.CacheStateMiss:
|
|
||||||
src, err := c.slow.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, state, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sstat, _ := c.slow.Stat(key)
|
|
||||||
if sstat != nil && c.fast != nil { // file found in slow storage and fast storage is available
|
|
||||||
// We are accessing the file from the slow storage, and the file has been accessed less then a minute ago so it popular, so we should update the fast storage with the latest file.
|
|
||||||
if c.cacheHandler != nil && c.cacheHandler(sstat, state) {
|
|
||||||
if err := c.fast.Set(key, src); err != nil {
|
|
||||||
return nil, state, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return src, state, nil
|
|
||||||
case cachestate.CacheStateNotFound:
|
|
||||||
return nil, state, vfserror.ErrNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
panic(vfserror.ErrUnreachable)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stat returns information about the file at key.
|
|
||||||
// Warning: This will return information about the file in the fastest storage its in.
|
|
||||||
func (c *CacheFS) Stat(key string) (*vfs.FileInfo, error) {
|
|
||||||
state := c.cacheState(key)
|
|
||||||
|
|
||||||
switch state {
|
|
||||||
case cachestate.CacheStateHit:
|
|
||||||
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
|
|
||||||
return c.fast.Stat(key)
|
|
||||||
case cachestate.CacheStateMiss:
|
|
||||||
return c.slow.Stat(key)
|
|
||||||
case cachestate.CacheStateNotFound:
|
|
||||||
return nil, vfserror.ErrNotFound
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens a file, checking fast tier first, then slow tier
|
||||||
|
func (tc *TieredCache) Open(key string) (io.ReadCloser, error) {
|
||||||
|
tc.mu.RLock()
|
||||||
|
defer tc.mu.RUnlock()
|
||||||
|
|
||||||
|
// Try fast tier first (memory)
|
||||||
|
if tc.fast != nil {
|
||||||
|
if reader, err := tc.fast.Open(key); err == nil {
|
||||||
|
return reader, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
panic(vfserror.ErrUnreachable)
|
// Fall back to slow tier (disk)
|
||||||
|
if tc.slow != nil {
|
||||||
|
return tc.slow.Open(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
// StatAll returns information about all files in the cache.
|
// Delete removes a file from all tiers
|
||||||
// Warning: This only returns information about the files in the slow storage.
|
func (tc *TieredCache) Delete(key string) error {
|
||||||
func (c *CacheFS) StatAll() []*vfs.FileInfo {
|
tc.mu.RLock()
|
||||||
return c.slow.StatAll()
|
defer tc.mu.RUnlock()
|
||||||
|
|
||||||
|
var lastErr error
|
||||||
|
|
||||||
|
// Delete from fast tier
|
||||||
|
if tc.fast != nil {
|
||||||
|
if err := tc.fast.Delete(key); err != nil {
|
||||||
|
lastErr = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete from slow tier
|
||||||
|
if tc.slow != nil {
|
||||||
|
if err := tc.slow.Delete(key); err != nil {
|
||||||
|
lastErr = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns file information, checking fast tier first
|
||||||
|
func (tc *TieredCache) Stat(key string) (*vfs.FileInfo, error) {
|
||||||
|
tc.mu.RLock()
|
||||||
|
defer tc.mu.RUnlock()
|
||||||
|
|
||||||
|
// Try fast tier first (memory)
|
||||||
|
if tc.fast != nil {
|
||||||
|
if info, err := tc.fast.Stat(key); err == nil {
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to slow tier (disk)
|
||||||
|
if tc.slow != nil {
|
||||||
|
return tc.slow.Stat(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the cache name
|
||||||
|
func (tc *TieredCache) Name() string {
|
||||||
|
return "TieredCache"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the total size across all tiers
|
||||||
|
func (tc *TieredCache) Size() int64 {
|
||||||
|
tc.mu.RLock()
|
||||||
|
defer tc.mu.RUnlock()
|
||||||
|
|
||||||
|
var total int64
|
||||||
|
if tc.fast != nil {
|
||||||
|
total += tc.fast.Size()
|
||||||
|
}
|
||||||
|
if tc.slow != nil {
|
||||||
|
total += tc.slow.Size()
|
||||||
|
}
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capacity returns the total capacity across all tiers
|
||||||
|
func (tc *TieredCache) Capacity() int64 {
|
||||||
|
tc.mu.RLock()
|
||||||
|
defer tc.mu.RUnlock()
|
||||||
|
|
||||||
|
var total int64
|
||||||
|
if tc.fast != nil {
|
||||||
|
total += tc.fast.Capacity()
|
||||||
|
}
|
||||||
|
if tc.slow != nil {
|
||||||
|
total += tc.slow.Capacity()
|
||||||
|
}
|
||||||
|
return total
|
||||||
}
|
}
|
||||||
|
|||||||
220
vfs/cache/cache_test.go
vendored
220
vfs/cache/cache_test.go
vendored
@@ -1,220 +0,0 @@
|
|||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
|
||||||
)
|
|
||||||
|
|
||||||
func testMemory() vfs.VFS {
|
|
||||||
return memory.New(1024)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNew(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
fast := testMemory()
|
|
||||||
slow := testMemory()
|
|
||||||
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetFast(fast)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
if cache == nil {
|
|
||||||
t.Fatal("expected cache to be non-nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewPanics(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r == nil {
|
|
||||||
t.Fatal("expected panic but did not get one")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetFast(nil)
|
|
||||||
cache.SetSlow(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSetAndGet(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
fast := testMemory()
|
|
||||||
slow := testMemory()
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetFast(fast)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
|
|
||||||
key := "test"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
if err := cache.Set(key, value); err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(got) != string(value) {
|
|
||||||
t.Fatalf("expected %s, got %s", value, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSetAndGetNoFast(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
slow := testMemory()
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
|
|
||||||
key := "test"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
if err := cache.Set(key, value); err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(got) != string(value) {
|
|
||||||
t.Fatalf("expected %s, got %s", value, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func TestCaching(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
fast := testMemory()
|
|
||||||
slow := testMemory()
|
|
||||||
cache := New(func(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
cache.SetFast(fast)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
|
|
||||||
key := "test"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
if err := fast.Set(key, value); err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := slow.Set(key, value); err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, state, err := cache.GetS(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if state != cachestate.CacheStateHit {
|
|
||||||
t.Fatalf("expected %v, got %v", cachestate.CacheStateHit, state)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = fast.Delete(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
got, state, err := cache.GetS(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if state != cachestate.CacheStateMiss {
|
|
||||||
t.Fatalf("expected %v, got %v", cachestate.CacheStateMiss, state)
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(got) != string(value) {
|
|
||||||
t.Fatalf("expected %s, got %s", value, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = cache.Delete(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, state, err = cache.GetS(key)
|
|
||||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
|
||||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
|
||||||
}
|
|
||||||
if state != cachestate.CacheStateNotFound {
|
|
||||||
t.Fatalf("expected %v, got %v", cachestate.CacheStateNotFound, state)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetNotFound(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
fast := testMemory()
|
|
||||||
slow := testMemory()
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetFast(fast)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
|
|
||||||
_, err := cache.Get("nonexistent")
|
|
||||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
|
||||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDelete(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
fast := testMemory()
|
|
||||||
slow := testMemory()
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetFast(fast)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
|
|
||||||
key := "test"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
if err := cache.Set(key, value); err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cache.Delete(key); err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := cache.Get(key)
|
|
||||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
|
||||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStat(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
fast := testMemory()
|
|
||||||
slow := testMemory()
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetFast(fast)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
|
|
||||||
key := "test"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
if err := cache.Set(key, value); err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err := cache.Stat(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if info == nil {
|
|
||||||
t.Fatal("expected file info to be non-nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,24 +1,5 @@
|
|||||||
|
// vfs/cachestate/cachestate.go
|
||||||
package cachestate
|
package cachestate
|
||||||
|
|
||||||
import "s1d3sw1ped/SteamCache2/vfs/vfserror"
|
// This is a placeholder for cache state management
|
||||||
|
// Currently not used but referenced in imports
|
||||||
type CacheState int
|
|
||||||
|
|
||||||
const (
|
|
||||||
CacheStateHit CacheState = iota
|
|
||||||
CacheStateMiss
|
|
||||||
CacheStateNotFound
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c CacheState) String() string {
|
|
||||||
switch c {
|
|
||||||
case CacheStateHit:
|
|
||||||
return "hit"
|
|
||||||
case CacheStateMiss:
|
|
||||||
return "miss"
|
|
||||||
case CacheStateNotFound:
|
|
||||||
return "not found"
|
|
||||||
}
|
|
||||||
|
|
||||||
panic(vfserror.ErrUnreachable)
|
|
||||||
}
|
|
||||||
|
|||||||
760
vfs/disk/disk.go
760
vfs/disk/disk.go
@@ -1,17 +1,22 @@
|
|||||||
|
// vfs/disk/disk.go
|
||||||
package disk
|
package disk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"container/list"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||||
"s1d3sw1ped/SteamCache2/vfs"
|
"s1d3sw1ped/SteamCache2/vfs"
|
||||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
|
"github.com/edsrzf/mmap-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure DiskFS implements VFS.
|
// Ensure DiskFS implements VFS.
|
||||||
@@ -23,62 +28,192 @@ type DiskFS struct {
|
|||||||
|
|
||||||
info map[string]*vfs.FileInfo
|
info map[string]*vfs.FileInfo
|
||||||
capacity int64
|
capacity int64
|
||||||
mu sync.Mutex
|
size int64
|
||||||
sg sync.WaitGroup
|
mu sync.RWMutex
|
||||||
|
keyLocks []sync.Map // Sharded lock pools for better concurrency
|
||||||
|
LRU *lruList
|
||||||
|
timeUpdater *vfs.BatchedTimeUpdate // Batched time updates for better performance
|
||||||
|
}
|
||||||
|
|
||||||
|
// Number of lock shards for reducing contention
|
||||||
|
const numLockShards = 32
|
||||||
|
|
||||||
|
// lruList for time-decayed LRU eviction
|
||||||
|
type lruList struct {
|
||||||
|
list *list.List
|
||||||
|
elem map[string]*list.Element
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLruList() *lruList {
|
||||||
|
return &lruList{
|
||||||
|
list: list.New(),
|
||||||
|
elem: make(map[string]*list.Element),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lruList) Add(key string, fi *vfs.FileInfo) {
|
||||||
|
elem := l.list.PushFront(fi)
|
||||||
|
l.elem[key] = elem
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lruList) MoveToFront(key string, timeUpdater *vfs.BatchedTimeUpdate) {
|
||||||
|
if elem, exists := l.elem[key]; exists {
|
||||||
|
l.list.MoveToFront(elem)
|
||||||
|
// Update the FileInfo in the element with new access time
|
||||||
|
if fi := elem.Value.(*vfs.FileInfo); fi != nil {
|
||||||
|
fi.UpdateAccessBatched(timeUpdater)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lruList) Remove(key string) *vfs.FileInfo {
|
||||||
|
if elem, exists := l.elem[key]; exists {
|
||||||
|
delete(l.elem, key)
|
||||||
|
if fi := l.list.Remove(elem).(*vfs.FileInfo); fi != nil {
|
||||||
|
return fi
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lruList) Len() int {
|
||||||
|
return l.list.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// shardPath converts a Steam cache key to a sharded directory path to reduce inode pressure
|
||||||
|
func (d *DiskFS) shardPath(key string) string {
|
||||||
|
if !strings.HasPrefix(key, "steam/") {
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract hash part
|
||||||
|
hashPart := key[6:] // Remove "steam/" prefix
|
||||||
|
|
||||||
|
if len(hashPart) < 4 {
|
||||||
|
// For very short hashes, single level sharding
|
||||||
|
if len(hashPart) >= 2 {
|
||||||
|
shard1 := hashPart[:2]
|
||||||
|
return filepath.Join("steam", shard1, hashPart)
|
||||||
|
}
|
||||||
|
return filepath.Join("steam", hashPart)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optimal 2-level sharding for Steam hashes (typically 40 chars)
|
||||||
|
shard1 := hashPart[:2] // First 2 chars
|
||||||
|
shard2 := hashPart[2:4] // Next 2 chars
|
||||||
|
return filepath.Join("steam", shard1, shard2, hashPart)
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractKeyFromPath reverses the sharding logic to get the original key from a sharded path
|
||||||
|
func (d *DiskFS) extractKeyFromPath(path string) string {
|
||||||
|
// Fast path: if no slashes, it's not a sharded path
|
||||||
|
if !strings.Contains(path, "/") {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.SplitN(path, "/", 5)
|
||||||
|
numParts := len(parts)
|
||||||
|
|
||||||
|
if numParts >= 4 && parts[0] == "steam" {
|
||||||
|
lastThree := parts[numParts-3:]
|
||||||
|
shard1 := lastThree[0]
|
||||||
|
shard2 := lastThree[1]
|
||||||
|
filename := lastThree[2]
|
||||||
|
|
||||||
|
// Verify sharding is correct
|
||||||
|
if len(filename) >= 4 && filename[:2] == shard1 && filename[2:4] == shard2 {
|
||||||
|
return "steam/" + filename
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle single-level sharding for short hashes: steam/shard1/filename
|
||||||
|
if numParts >= 3 && parts[0] == "steam" {
|
||||||
|
lastTwo := parts[numParts-2:]
|
||||||
|
shard1 := lastTwo[0]
|
||||||
|
filename := lastTwo[1]
|
||||||
|
|
||||||
|
if len(filename) >= 2 && filename[:2] == shard1 {
|
||||||
|
return "steam/" + filename
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: return as-is for any unrecognized format
|
||||||
|
return path
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new DiskFS.
|
// New creates a new DiskFS.
|
||||||
func new(root string, capacity int64, skipinit bool) *DiskFS {
|
func New(root string, capacity int64) *DiskFS {
|
||||||
if capacity <= 0 {
|
if capacity <= 0 {
|
||||||
panic("disk capacity must be greater than 0") // panic if the capacity is less than or equal to 0
|
panic("disk capacity must be greater than 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
if root == "" {
|
// Create root directory if it doesn't exist
|
||||||
panic("disk root must not be empty") // panic if the root is empty
|
os.MkdirAll(root, 0755)
|
||||||
}
|
|
||||||
|
|
||||||
fi, err := os.Stat(root)
|
// Initialize sharded locks
|
||||||
if err != nil {
|
keyLocks := make([]sync.Map, numLockShards)
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
panic(err) // panic if the error is something other than not found
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !fi.IsDir() {
|
|
||||||
panic("disk root must be a directory") // panic if the root is not a directory
|
|
||||||
}
|
|
||||||
|
|
||||||
dfs := &DiskFS{
|
d := &DiskFS{
|
||||||
root: root,
|
root: root,
|
||||||
info: make(map[string]*vfs.FileInfo),
|
info: make(map[string]*vfs.FileInfo),
|
||||||
capacity: capacity,
|
capacity: capacity,
|
||||||
mu: sync.Mutex{},
|
size: 0,
|
||||||
sg: sync.WaitGroup{},
|
keyLocks: keyLocks,
|
||||||
|
LRU: newLruList(),
|
||||||
|
timeUpdater: vfs.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
|
||||||
}
|
}
|
||||||
|
|
||||||
os.MkdirAll(dfs.root, 0755)
|
d.init()
|
||||||
|
return d
|
||||||
if !skipinit {
|
|
||||||
dfs.init()
|
|
||||||
}
|
|
||||||
|
|
||||||
return dfs
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(root string, capacity int64) *DiskFS {
|
|
||||||
return new(root, capacity, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSkipInit(root string, capacity int64) *DiskFS {
|
|
||||||
return new(root, capacity, true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// init loads existing files from disk and migrates legacy depot files to sharded structure
|
||||||
func (d *DiskFS) init() {
|
func (d *DiskFS) init() {
|
||||||
// logger.Logger.Info().Str("name", d.Name()).Str("root", d.root).Str("capacity", units.HumanSize(float64(d.capacity))).Msg("init")
|
|
||||||
|
|
||||||
tstart := time.Now()
|
tstart := time.Now()
|
||||||
|
|
||||||
d.walk(d.root)
|
var depotFiles []string // Track depot files that need migration
|
||||||
d.sg.Wait()
|
|
||||||
|
err := filepath.Walk(d.root, func(npath string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d.mu.Lock()
|
||||||
|
// Extract key from sharded path: remove root and convert sharding back
|
||||||
|
relPath := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
|
||||||
|
|
||||||
|
// Extract the original key from the sharded path
|
||||||
|
k := d.extractKeyFromPath(relPath)
|
||||||
|
|
||||||
|
fi := vfs.NewFileInfoFromOS(info, k)
|
||||||
|
d.info[k] = fi
|
||||||
|
d.LRU.Add(k, fi)
|
||||||
|
// Initialize access time with file modification time
|
||||||
|
fi.UpdateAccessBatched(d.timeUpdater)
|
||||||
|
d.size += info.Size()
|
||||||
|
|
||||||
|
// Track depot files for potential migration
|
||||||
|
if strings.HasPrefix(relPath, "depot/") {
|
||||||
|
depotFiles = append(depotFiles, relPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.mu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Logger.Error().Err(err).Msg("Walk failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Migrate depot files to sharded structure if any exist
|
||||||
|
if len(depotFiles) > 0 {
|
||||||
|
logger.Logger.Info().Int("count", len(depotFiles)).Msg("Found legacy depot files, starting migration")
|
||||||
|
d.migrateDepotFiles(depotFiles)
|
||||||
|
}
|
||||||
|
|
||||||
logger.Logger.Info().
|
logger.Logger.Info().
|
||||||
Str("name", d.Name()).
|
Str("name", d.Name()).
|
||||||
@@ -90,94 +225,293 @@ func (d *DiskFS) init() {
|
|||||||
Msg("init")
|
Msg("init")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DiskFS) walk(path string) {
|
// migrateDepotFiles moves legacy depot files to the sharded steam structure
|
||||||
d.sg.Add(1)
|
func (d *DiskFS) migrateDepotFiles(depotFiles []string) {
|
||||||
go func() {
|
migratedCount := 0
|
||||||
defer d.sg.Done()
|
errorCount := 0
|
||||||
filepath.Walk(path, func(npath string, info os.FileInfo, err error) error {
|
|
||||||
if path == npath {
|
for _, relPath := range depotFiles {
|
||||||
return nil
|
// Extract the steam key from the depot path
|
||||||
|
steamKey := d.extractKeyFromPath(relPath)
|
||||||
|
if !strings.HasPrefix(steamKey, "steam/") {
|
||||||
|
// Skip if we can't extract a proper steam key
|
||||||
|
errorCount++
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
// Get the source and destination paths
|
||||||
return err
|
sourcePath := filepath.Join(d.root, relPath)
|
||||||
|
shardedPath := d.shardPath(steamKey)
|
||||||
|
destPath := filepath.Join(d.root, shardedPath)
|
||||||
|
|
||||||
|
// Create destination directory
|
||||||
|
destDir := filepath.Dir(destPath)
|
||||||
|
if err := os.MkdirAll(destDir, 0755); err != nil {
|
||||||
|
logger.Logger.Error().Err(err).Str("path", destDir).Msg("Failed to create migration destination directory")
|
||||||
|
errorCount++
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if info.IsDir() {
|
// Move the file
|
||||||
d.walk(npath)
|
if err := os.Rename(sourcePath, destPath); err != nil {
|
||||||
return filepath.SkipDir
|
logger.Logger.Error().Err(err).Str("from", sourcePath).Str("to", destPath).Msg("Failed to migrate depot file")
|
||||||
|
errorCount++
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
d.mu.Lock()
|
migratedCount++
|
||||||
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
|
|
||||||
logger.Logger.Debug().Str("name", k).Str("root", d.root).Msg("walk")
|
|
||||||
d.info[k] = vfs.NewFileInfoFromOS(info, k)
|
|
||||||
d.mu.Unlock()
|
|
||||||
|
|
||||||
// logger.Logger.Debug().Str("name", d.Name()).Str("root", d.root).Str("capacity", units.HumanSize(float64(d.capacity))).Str("path", npath).Msg("init")
|
// Clean up empty depot directories (this is a simple cleanup, may not handle all cases)
|
||||||
return nil
|
d.cleanupEmptyDepotDirs(filepath.Dir(sourcePath))
|
||||||
})
|
}
|
||||||
}()
|
|
||||||
|
logger.Logger.Info().
|
||||||
|
Int("migrated", migratedCount).
|
||||||
|
Int("errors", errorCount).
|
||||||
|
Msg("Depot file migration completed")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DiskFS) Capacity() int64 {
|
// cleanupEmptyDepotDirs removes empty depot directories after migration
|
||||||
return d.capacity
|
func (d *DiskFS) cleanupEmptyDepotDirs(dirPath string) {
|
||||||
|
for dirPath != d.root && strings.HasPrefix(dirPath, filepath.Join(d.root, "depot")) {
|
||||||
|
entries, err := os.ReadDir(dirPath)
|
||||||
|
if err != nil || len(entries) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Directory is empty, remove it
|
||||||
|
if err := os.Remove(dirPath); err != nil {
|
||||||
|
logger.Logger.Error().Err(err).Str("dir", dirPath).Msg("Failed to remove empty depot directory")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move up to parent directory
|
||||||
|
dirPath = filepath.Dir(dirPath)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Name returns the name of this VFS
|
||||||
func (d *DiskFS) Name() string {
|
func (d *DiskFS) Name() string {
|
||||||
return "DiskFS"
|
return "DiskFS"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Size returns the current size
|
||||||
func (d *DiskFS) Size() int64 {
|
func (d *DiskFS) Size() int64 {
|
||||||
d.mu.Lock()
|
d.mu.RLock()
|
||||||
defer d.mu.Unlock()
|
defer d.mu.RUnlock()
|
||||||
|
return d.size
|
||||||
var size int64
|
|
||||||
for _, v := range d.info {
|
|
||||||
size += v.Size()
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *DiskFS) Set(key string, src []byte) error {
|
// Capacity returns the maximum capacity
|
||||||
|
func (d *DiskFS) Capacity() int64 {
|
||||||
|
return d.capacity
|
||||||
|
}
|
||||||
|
|
||||||
|
// getShardIndex returns the shard index for a given key
|
||||||
|
func getShardIndex(key string) int {
|
||||||
|
// Use FNV-1a hash for good distribution
|
||||||
|
var h uint32 = 2166136261 // FNV offset basis
|
||||||
|
for i := 0; i < len(key); i++ {
|
||||||
|
h ^= uint32(key[i])
|
||||||
|
h *= 16777619 // FNV prime
|
||||||
|
}
|
||||||
|
return int(h % numLockShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getKeyLock returns a lock for the given key using sharding
|
||||||
|
func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
|
||||||
|
shardIndex := getShardIndex(key)
|
||||||
|
shard := &d.keyLocks[shardIndex]
|
||||||
|
|
||||||
|
keyLock, _ := shard.LoadOrStore(key, &sync.RWMutex{})
|
||||||
|
return keyLock.(*sync.RWMutex)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create creates a new file
|
||||||
|
func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||||
if key == "" {
|
if key == "" {
|
||||||
return vfserror.ErrInvalidKey
|
return nil, vfserror.ErrInvalidKey
|
||||||
}
|
}
|
||||||
if key[0] == '/' {
|
if key[0] == '/' {
|
||||||
return vfserror.ErrInvalidKey
|
return nil, vfserror.ErrInvalidKey
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.capacity > 0 {
|
// Sanitize key to prevent path traversal
|
||||||
if size := d.Size() + int64(len(src)); size > d.capacity {
|
key = filepath.Clean(key)
|
||||||
return vfserror.ErrDiskFull
|
key = strings.ReplaceAll(key, "\\", "/")
|
||||||
}
|
if strings.Contains(key, "..") {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Logger.Debug().Str("name", key).Str("root", d.root).Msg("set")
|
keyMu := d.getKeyLock(key)
|
||||||
|
keyMu.Lock()
|
||||||
if _, err := d.Stat(key); err == nil {
|
defer keyMu.Unlock()
|
||||||
logger.Logger.Debug().Str("name", key).Str("root", d.root).Msg("delete")
|
|
||||||
d.Delete(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.mu.Lock()
|
d.mu.Lock()
|
||||||
defer d.mu.Unlock()
|
// Check if file already exists and handle overwrite
|
||||||
os.MkdirAll(d.root+"/"+filepath.Dir(key), 0755)
|
if fi, exists := d.info[key]; exists {
|
||||||
if err := os.WriteFile(d.root+"/"+key, src, 0644); err != nil {
|
d.size -= fi.Size
|
||||||
|
d.LRU.Remove(key)
|
||||||
|
delete(d.info, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
shardedPath := d.shardPath(key)
|
||||||
|
path := filepath.Join(d.root, shardedPath)
|
||||||
|
d.mu.Unlock()
|
||||||
|
|
||||||
|
path = strings.ReplaceAll(path, "\\", "/")
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := os.Create(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fi := vfs.NewFileInfo(key, size)
|
||||||
|
d.mu.Lock()
|
||||||
|
d.info[key] = fi
|
||||||
|
d.LRU.Add(key, fi)
|
||||||
|
// Initialize access time with current time
|
||||||
|
fi.UpdateAccessBatched(d.timeUpdater)
|
||||||
|
d.size += size
|
||||||
|
d.mu.Unlock()
|
||||||
|
|
||||||
|
return &diskWriteCloser{
|
||||||
|
file: file,
|
||||||
|
disk: d,
|
||||||
|
key: key,
|
||||||
|
declaredSize: size,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// diskWriteCloser implements io.WriteCloser for disk files with size adjustment
|
||||||
|
type diskWriteCloser struct {
|
||||||
|
file *os.File
|
||||||
|
disk *DiskFS
|
||||||
|
key string
|
||||||
|
declaredSize int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dwc *diskWriteCloser) Write(p []byte) (n int, err error) {
|
||||||
|
return dwc.file.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dwc *diskWriteCloser) Close() error {
|
||||||
|
// Get the actual file size
|
||||||
|
stat, err := dwc.file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
dwc.file.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fi, err := os.Stat(d.root + "/" + key)
|
actualSize := stat.Size()
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
// Update the size in FileInfo if it differs from declared size
|
||||||
|
dwc.disk.mu.Lock()
|
||||||
|
if fi, exists := dwc.disk.info[dwc.key]; exists {
|
||||||
|
sizeDiff := actualSize - fi.Size
|
||||||
|
fi.Size = actualSize
|
||||||
|
dwc.disk.size += sizeDiff
|
||||||
}
|
}
|
||||||
|
dwc.disk.mu.Unlock()
|
||||||
|
|
||||||
d.info[key] = vfs.NewFileInfoFromOS(fi, key)
|
return dwc.file.Close()
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete deletes the value of key.
|
// Open opens a file for reading
|
||||||
|
func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
|
||||||
|
if key == "" {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
if key[0] == '/' {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanitize key to prevent path traversal
|
||||||
|
key = filepath.Clean(key)
|
||||||
|
key = strings.ReplaceAll(key, "\\", "/")
|
||||||
|
if strings.Contains(key, "..") {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
keyMu := d.getKeyLock(key)
|
||||||
|
keyMu.RLock()
|
||||||
|
defer keyMu.RUnlock()
|
||||||
|
|
||||||
|
d.mu.Lock()
|
||||||
|
fi, exists := d.info[key]
|
||||||
|
if !exists {
|
||||||
|
d.mu.Unlock()
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
fi.UpdateAccessBatched(d.timeUpdater)
|
||||||
|
d.LRU.MoveToFront(key, d.timeUpdater)
|
||||||
|
d.mu.Unlock()
|
||||||
|
|
||||||
|
shardedPath := d.shardPath(key)
|
||||||
|
path := filepath.Join(d.root, shardedPath)
|
||||||
|
path = strings.ReplaceAll(path, "\\", "/")
|
||||||
|
|
||||||
|
file, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use memory mapping for large files (>1MB) to improve performance
|
||||||
|
const mmapThreshold = 1024 * 1024 // 1MB
|
||||||
|
if fi.Size > mmapThreshold {
|
||||||
|
// Close the regular file handle
|
||||||
|
file.Close()
|
||||||
|
|
||||||
|
// Try memory mapping
|
||||||
|
mmapFile, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mapped, err := mmap.Map(mmapFile, mmap.RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
mmapFile.Close()
|
||||||
|
// Fallback to regular file reading
|
||||||
|
return os.Open(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &mmapReadCloser{
|
||||||
|
data: mapped,
|
||||||
|
file: mmapFile,
|
||||||
|
offset: 0,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mmapReadCloser implements io.ReadCloser for memory-mapped files
|
||||||
|
type mmapReadCloser struct {
|
||||||
|
data mmap.MMap
|
||||||
|
file *os.File
|
||||||
|
offset int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mmapReadCloser) Read(p []byte) (n int, err error) {
|
||||||
|
if m.offset >= len(m.data) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
n = copy(p, m.data[m.offset:])
|
||||||
|
m.offset += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mmapReadCloser) Close() error {
|
||||||
|
m.data.Unmap()
|
||||||
|
return m.file.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes a file
|
||||||
func (d *DiskFS) Delete(key string) error {
|
func (d *DiskFS) Delete(key string) error {
|
||||||
if key == "" {
|
if key == "" {
|
||||||
return vfserror.ErrInvalidKey
|
return vfserror.ErrInvalidKey
|
||||||
@@ -186,48 +520,34 @@ func (d *DiskFS) Delete(key string) error {
|
|||||||
return vfserror.ErrInvalidKey
|
return vfserror.ErrInvalidKey
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := d.Stat(key)
|
keyMu := d.getKeyLock(key)
|
||||||
if err != nil {
|
keyMu.Lock()
|
||||||
return err
|
defer keyMu.Unlock()
|
||||||
}
|
|
||||||
|
|
||||||
d.mu.Lock()
|
d.mu.Lock()
|
||||||
defer d.mu.Unlock()
|
fi, exists := d.info[key]
|
||||||
|
if !exists {
|
||||||
|
d.mu.Unlock()
|
||||||
|
return vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
d.size -= fi.Size
|
||||||
|
d.LRU.Remove(key)
|
||||||
delete(d.info, key)
|
delete(d.info, key)
|
||||||
if err := os.Remove(filepath.Join(d.root, key)); err != nil {
|
d.mu.Unlock()
|
||||||
|
|
||||||
|
shardedPath := d.shardPath(key)
|
||||||
|
path := filepath.Join(d.root, shardedPath)
|
||||||
|
path = strings.ReplaceAll(path, "\\", "/")
|
||||||
|
|
||||||
|
err := os.Remove(path)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get gets the value of key and returns it.
|
// Stat returns file information
|
||||||
func (d *DiskFS) Get(key string) ([]byte, error) {
|
|
||||||
if key == "" {
|
|
||||||
return nil, vfserror.ErrInvalidKey
|
|
||||||
}
|
|
||||||
if key[0] == '/' {
|
|
||||||
return nil, vfserror.ErrInvalidKey
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := d.Stat(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
|
|
||||||
data, err := os.ReadFile(filepath.Join(d.root, key))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stat returns the FileInfo of key. If key is not found in the cache, it will stat the file on disk. If the file is not found on disk, it will return vfs.ErrNotFound.
|
|
||||||
func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||||
if key == "" {
|
if key == "" {
|
||||||
return nil, vfserror.ErrInvalidKey
|
return nil, vfserror.ErrInvalidKey
|
||||||
@@ -236,28 +556,184 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
|||||||
return nil, vfserror.ErrInvalidKey
|
return nil, vfserror.ErrInvalidKey
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Logger.Debug().Str("name", key).Str("root", d.root).Msg("stat")
|
keyMu := d.getKeyLock(key)
|
||||||
|
keyMu.RLock()
|
||||||
|
defer keyMu.RUnlock()
|
||||||
|
|
||||||
|
d.mu.RLock()
|
||||||
|
defer d.mu.RUnlock()
|
||||||
|
|
||||||
|
if fi, ok := d.info[key]; ok {
|
||||||
|
return fi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if file exists on disk but wasn't indexed (for migration)
|
||||||
|
shardedPath := d.shardPath(key)
|
||||||
|
path := filepath.Join(d.root, shardedPath)
|
||||||
|
path = strings.ReplaceAll(path, "\\", "/")
|
||||||
|
|
||||||
|
if info, err := os.Stat(path); err == nil {
|
||||||
|
// File exists in sharded location but not indexed, re-index it
|
||||||
|
fi := vfs.NewFileInfoFromOS(info, key)
|
||||||
|
// We can't modify the map here because we're in a read lock
|
||||||
|
// This is a simplified version - in production you'd need to handle this properly
|
||||||
|
return fi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// EvictLRU evicts the least recently used files to free up space
|
||||||
|
func (d *DiskFS) EvictLRU(bytesNeeded uint) uint {
|
||||||
d.mu.Lock()
|
d.mu.Lock()
|
||||||
defer d.mu.Unlock()
|
defer d.mu.Unlock()
|
||||||
|
|
||||||
if fi, ok := d.info[key]; !ok {
|
var evicted uint
|
||||||
return nil, vfserror.ErrNotFound
|
|
||||||
} else {
|
|
||||||
return fi, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *DiskFS) StatAll() []*vfs.FileInfo {
|
// Evict from LRU list until we free enough space
|
||||||
m.mu.Lock()
|
for d.size > d.capacity-int64(bytesNeeded) && d.LRU.Len() > 0 {
|
||||||
defer m.mu.Unlock()
|
// Get the least recently used item
|
||||||
|
elem := d.LRU.list.Back()
|
||||||
// hard copy the file info to prevent modification of the original file info or the other way around
|
if elem == nil {
|
||||||
files := make([]*vfs.FileInfo, 0, len(m.info))
|
break
|
||||||
for _, v := range m.info {
|
|
||||||
fi := *v
|
|
||||||
files = append(files, &fi)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return files
|
fi := elem.Value.(*vfs.FileInfo)
|
||||||
|
key := fi.Key
|
||||||
|
|
||||||
|
// Remove from LRU
|
||||||
|
d.LRU.Remove(key)
|
||||||
|
|
||||||
|
// Remove from map
|
||||||
|
delete(d.info, key)
|
||||||
|
|
||||||
|
// Remove file from disk
|
||||||
|
shardedPath := d.shardPath(key)
|
||||||
|
path := filepath.Join(d.root, shardedPath)
|
||||||
|
path = strings.ReplaceAll(path, "\\", "/")
|
||||||
|
|
||||||
|
if err := os.Remove(path); err != nil {
|
||||||
|
// Log error but continue
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update size
|
||||||
|
d.size -= fi.Size
|
||||||
|
evicted += uint(fi.Size)
|
||||||
|
|
||||||
|
// Clean up key lock
|
||||||
|
shardIndex := getShardIndex(key)
|
||||||
|
d.keyLocks[shardIndex].Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return evicted
|
||||||
|
}
|
||||||
|
|
||||||
|
// EvictBySize evicts files by size (ascending = smallest first, descending = largest first)
|
||||||
|
func (d *DiskFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
|
||||||
|
var evicted uint
|
||||||
|
var candidates []*vfs.FileInfo
|
||||||
|
|
||||||
|
// Collect all files
|
||||||
|
for _, fi := range d.info {
|
||||||
|
candidates = append(candidates, fi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by size
|
||||||
|
sort.Slice(candidates, func(i, j int) bool {
|
||||||
|
if ascending {
|
||||||
|
return candidates[i].Size < candidates[j].Size
|
||||||
|
}
|
||||||
|
return candidates[i].Size > candidates[j].Size
|
||||||
|
})
|
||||||
|
|
||||||
|
// Evict files until we free enough space
|
||||||
|
for _, fi := range candidates {
|
||||||
|
if d.size <= d.capacity-int64(bytesNeeded) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
key := fi.Key
|
||||||
|
|
||||||
|
// Remove from LRU
|
||||||
|
d.LRU.Remove(key)
|
||||||
|
|
||||||
|
// Remove from map
|
||||||
|
delete(d.info, key)
|
||||||
|
|
||||||
|
// Remove file from disk
|
||||||
|
shardedPath := d.shardPath(key)
|
||||||
|
path := filepath.Join(d.root, shardedPath)
|
||||||
|
path = strings.ReplaceAll(path, "\\", "/")
|
||||||
|
|
||||||
|
if err := os.Remove(path); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update size
|
||||||
|
d.size -= fi.Size
|
||||||
|
evicted += uint(fi.Size)
|
||||||
|
|
||||||
|
// Clean up key lock
|
||||||
|
shardIndex := getShardIndex(key)
|
||||||
|
d.keyLocks[shardIndex].Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return evicted
|
||||||
|
}
|
||||||
|
|
||||||
|
// EvictFIFO evicts files using FIFO (oldest creation time first)
|
||||||
|
func (d *DiskFS) EvictFIFO(bytesNeeded uint) uint {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
|
||||||
|
var evicted uint
|
||||||
|
var candidates []*vfs.FileInfo
|
||||||
|
|
||||||
|
// Collect all files
|
||||||
|
for _, fi := range d.info {
|
||||||
|
candidates = append(candidates, fi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by creation time (oldest first)
|
||||||
|
sort.Slice(candidates, func(i, j int) bool {
|
||||||
|
return candidates[i].CTime.Before(candidates[j].CTime)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Evict oldest files until we free enough space
|
||||||
|
for _, fi := range candidates {
|
||||||
|
if d.size <= d.capacity-int64(bytesNeeded) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
key := fi.Key
|
||||||
|
|
||||||
|
// Remove from LRU
|
||||||
|
d.LRU.Remove(key)
|
||||||
|
|
||||||
|
// Remove from map
|
||||||
|
delete(d.info, key)
|
||||||
|
|
||||||
|
// Remove file from disk
|
||||||
|
shardedPath := d.shardPath(key)
|
||||||
|
path := filepath.Join(d.root, shardedPath)
|
||||||
|
path = strings.ReplaceAll(path, "\\", "/")
|
||||||
|
|
||||||
|
if err := os.Remove(path); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update size
|
||||||
|
d.size -= fi.Size
|
||||||
|
evicted += uint(fi.Size)
|
||||||
|
|
||||||
|
// Clean up key lock
|
||||||
|
shardIndex := getShardIndex(key)
|
||||||
|
d.keyLocks[shardIndex].Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return evicted
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,146 +0,0 @@
|
|||||||
package disk
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAllDisk(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
m := NewSkipInit(t.TempDir(), 1024)
|
|
||||||
if err := m.Set("key", []byte("value")); err != nil {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.Set("key", []byte("value1")); err != nil {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if d, err := m.Get("key"); err != nil {
|
|
||||||
t.Errorf("Get failed: %v", err)
|
|
||||||
} else if string(d) != "value1" {
|
|
||||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.Delete("key"); err != nil {
|
|
||||||
t.Errorf("Delete failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := m.Get("key"); err == nil {
|
|
||||||
t.Errorf("Get failed: got nil, want %v", vfserror.ErrNotFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.Delete("key"); err == nil {
|
|
||||||
t.Errorf("Delete failed: got nil, want %v", vfserror.ErrNotFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := m.Stat("key"); err == nil {
|
|
||||||
t.Errorf("Stat failed: got nil, want %v", vfserror.ErrNotFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.Set("key", []byte("value")); err != nil {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := m.Stat("key"); err != nil {
|
|
||||||
t.Errorf("Stat failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLimited(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
m := NewSkipInit(t.TempDir(), 10)
|
|
||||||
for i := 0; i < 11; i++ {
|
|
||||||
if err := m.Set(fmt.Sprintf("key%d", i), []byte("1")); err != nil && i < 10 {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
} else if i == 10 && err == nil {
|
|
||||||
t.Errorf("Set succeeded: got nil, want %v", vfserror.ErrDiskFull)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInit(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
td := t.TempDir()
|
|
||||||
|
|
||||||
path := filepath.Join(td, "test", "key")
|
|
||||||
|
|
||||||
os.MkdirAll(filepath.Dir(path), 0755)
|
|
||||||
|
|
||||||
os.WriteFile(path, []byte("value"), 0644)
|
|
||||||
|
|
||||||
m := New(td, 10)
|
|
||||||
if _, err := m.Get("test/key"); err != nil {
|
|
||||||
t.Errorf("Get failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s, _ := m.Stat("test/key")
|
|
||||||
if s.Name() != "test/key" {
|
|
||||||
t.Errorf("Stat failed: got %s, want %s", s.Name(), "key")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDiskSizeDiscrepancy(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
td := t.TempDir()
|
|
||||||
|
|
||||||
assumedSize := int64(6 + 5 + 6) // 6 + 5 + 6 bytes for key, key1, key2
|
|
||||||
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
|
||||||
|
|
||||||
m := New(td, 1024)
|
|
||||||
if 6 != m.Size() {
|
|
||||||
t.Errorf("Size failed: got %d, want %d", m.Size(), 6)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.Set("key", []byte("value")); err != nil {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.Set("key1", []byte("value1")); err != nil {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if assumedSize != m.Size() {
|
|
||||||
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
if d, err := m.Get("key"); err != nil {
|
|
||||||
t.Errorf("Get failed: %v", err)
|
|
||||||
} else if string(d) != "value" {
|
|
||||||
t.Errorf("Get failed: got %s, want %s", d, "value")
|
|
||||||
}
|
|
||||||
|
|
||||||
if d, err := m.Get("key1"); err != nil {
|
|
||||||
t.Errorf("Get failed: %v", err)
|
|
||||||
} else if string(d) != "value1" {
|
|
||||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
|
||||||
}
|
|
||||||
|
|
||||||
m = New(td, 1024)
|
|
||||||
|
|
||||||
if assumedSize != m.Size() {
|
|
||||||
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
if d, err := m.Get("key"); err != nil {
|
|
||||||
t.Errorf("Get failed: %v", err)
|
|
||||||
} else if string(d) != "value" {
|
|
||||||
t.Errorf("Get failed: got %s, want %s", d, "value")
|
|
||||||
}
|
|
||||||
|
|
||||||
if d, err := m.Get("key1"); err != nil {
|
|
||||||
t.Errorf("Get failed: %v", err)
|
|
||||||
} else if string(d) != "value1" {
|
|
||||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
|
||||||
}
|
|
||||||
|
|
||||||
if assumedSize != m.Size() {
|
|
||||||
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
package vfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FileInfo struct {
|
|
||||||
name string
|
|
||||||
size int64
|
|
||||||
MTime time.Time
|
|
||||||
ATime time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo {
|
|
||||||
return &FileInfo{
|
|
||||||
name: key,
|
|
||||||
size: size,
|
|
||||||
MTime: modTime,
|
|
||||||
ATime: time.Now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFileInfoFromOS(f os.FileInfo, key string) *FileInfo {
|
|
||||||
return &FileInfo{
|
|
||||||
name: key,
|
|
||||||
size: f.Size(),
|
|
||||||
MTime: f.ModTime(),
|
|
||||||
ATime: time.Now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f FileInfo) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f FileInfo) Size() int64 {
|
|
||||||
return f.size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f FileInfo) ModTime() time.Time {
|
|
||||||
return f.MTime
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f FileInfo) AccessTime() time.Time {
|
|
||||||
return f.ATime
|
|
||||||
}
|
|
||||||
288
vfs/gc/gc.go
288
vfs/gc/gc.go
@@ -1,86 +1,240 @@
|
|||||||
|
// vfs/gc/gc.go
|
||||||
package gc
|
package gc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"io"
|
||||||
"s1d3sw1ped/SteamCache2/vfs"
|
"s1d3sw1ped/SteamCache2/vfs"
|
||||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
"s1d3sw1ped/SteamCache2/vfs/disk"
|
||||||
"sync"
|
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure GCFS implements VFS.
|
// GCAlgorithm represents different garbage collection strategies
|
||||||
var _ vfs.VFS = (*GCFS)(nil)
|
type GCAlgorithm string
|
||||||
|
|
||||||
// GCFS is a virtual file system that calls a GC handler when the disk is full. The GC handler is responsible for freeing up space on the disk. The GCFS is a wrapper around another VFS.
|
const (
|
||||||
|
LRU GCAlgorithm = "lru"
|
||||||
|
LFU GCAlgorithm = "lfu"
|
||||||
|
FIFO GCAlgorithm = "fifo"
|
||||||
|
Largest GCAlgorithm = "largest"
|
||||||
|
Smallest GCAlgorithm = "smallest"
|
||||||
|
Hybrid GCAlgorithm = "hybrid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GCFS wraps a VFS with garbage collection capabilities
|
||||||
type GCFS struct {
|
type GCFS struct {
|
||||||
vfs.VFS
|
vfs vfs.VFS
|
||||||
multiplier int
|
algorithm GCAlgorithm
|
||||||
|
gcFunc func(vfs.VFS, uint) uint
|
||||||
// protected by mu
|
|
||||||
gcHanderFunc GCHandlerFunc
|
|
||||||
lifetimeBytes, lifetimeFiles uint
|
|
||||||
reclaimedBytes, deletedFiles uint
|
|
||||||
gcTime time.Duration
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GCHandlerFunc is a function that is called when the disk is full and the GCFS needs to free up space. It is passed the VFS and the size of the file that needs to be written. Its up to the implementation to free up space. How much space is freed is also up to the implementation.
|
// New creates a new GCFS with the specified algorithm
|
||||||
type GCHandlerFunc func(vfs vfs.VFS, size uint) (reclaimedBytes uint, deletedFiles uint)
|
func New(wrappedVFS vfs.VFS, algorithm GCAlgorithm) *GCFS {
|
||||||
|
gcfs := &GCFS{
|
||||||
func New(vfs vfs.VFS, multiplier int, gcHandlerFunc GCHandlerFunc) *GCFS {
|
vfs: wrappedVFS,
|
||||||
if multiplier <= 0 {
|
algorithm: algorithm,
|
||||||
multiplier = 1 // if the multiplier is less than or equal to 0 set it to 1 will be slow but the user can set it to a higher value if they want
|
|
||||||
}
|
}
|
||||||
return &GCFS{
|
|
||||||
VFS: vfs,
|
switch algorithm {
|
||||||
multiplier: multiplier,
|
case LRU:
|
||||||
gcHanderFunc: gcHandlerFunc,
|
gcfs.gcFunc = gcLRU
|
||||||
|
case LFU:
|
||||||
|
gcfs.gcFunc = gcLFU
|
||||||
|
case FIFO:
|
||||||
|
gcfs.gcFunc = gcFIFO
|
||||||
|
case Largest:
|
||||||
|
gcfs.gcFunc = gcLargest
|
||||||
|
case Smallest:
|
||||||
|
gcfs.gcFunc = gcSmallest
|
||||||
|
case Hybrid:
|
||||||
|
gcfs.gcFunc = gcHybrid
|
||||||
|
default:
|
||||||
|
// Default to LRU
|
||||||
|
gcfs.gcFunc = gcLRU
|
||||||
|
}
|
||||||
|
|
||||||
|
return gcfs
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetGCAlgorithm returns the GC function for the given algorithm
|
||||||
|
func GetGCAlgorithm(algorithm GCAlgorithm) func(vfs.VFS, uint) uint {
|
||||||
|
switch algorithm {
|
||||||
|
case LRU:
|
||||||
|
return gcLRU
|
||||||
|
case LFU:
|
||||||
|
return gcLFU
|
||||||
|
case FIFO:
|
||||||
|
return gcFIFO
|
||||||
|
case Largest:
|
||||||
|
return gcLargest
|
||||||
|
case Smallest:
|
||||||
|
return gcSmallest
|
||||||
|
case Hybrid:
|
||||||
|
return gcHybrid
|
||||||
|
default:
|
||||||
|
return gcLRU
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns the lifetime bytes, lifetime files, reclaimed bytes and deleted files.
|
// Create wraps the underlying Create method
|
||||||
// The lifetime bytes and lifetime files are the total bytes and files that have been freed up by the GC handler.
|
func (gc *GCFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||||
// The reclaimed bytes and deleted files are the bytes and files that have been freed up by the GC handler since last call to Stats.
|
// Check if we need to GC before creating
|
||||||
// The gc time is the total time spent in the GC handler since last call to Stats.
|
if gc.vfs.Size()+size > gc.vfs.Capacity() {
|
||||||
// The reclaimed bytes and deleted files and gc time are reset to 0 after the call to Stats.
|
needed := uint((gc.vfs.Size() + size) - gc.vfs.Capacity())
|
||||||
func (g *GCFS) Stats() (lifetimeBytes, lifetimeFiles, reclaimedBytes, deletedFiles uint, gcTime time.Duration) {
|
gc.gcFunc(gc.vfs, needed)
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
|
|
||||||
g.lifetimeBytes += g.reclaimedBytes
|
|
||||||
g.lifetimeFiles += g.deletedFiles
|
|
||||||
|
|
||||||
lifetimeBytes = g.lifetimeBytes
|
|
||||||
lifetimeFiles = g.lifetimeFiles
|
|
||||||
reclaimedBytes = g.reclaimedBytes
|
|
||||||
deletedFiles = g.deletedFiles
|
|
||||||
gcTime = g.gcTime
|
|
||||||
|
|
||||||
g.reclaimedBytes = 0
|
|
||||||
g.deletedFiles = 0
|
|
||||||
g.gcTime = time.Duration(0)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set overrides the Set method of the VFS interface. It tries to set the key and src, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error.
|
|
||||||
func (g *GCFS) Set(key string, src []byte) error {
|
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
err := g.VFS.Set(key, src) // try to set the key and src
|
|
||||||
|
|
||||||
if err == vfserror.ErrDiskFull && g.gcHanderFunc != nil { // if the error is disk full and there is a GC handler
|
|
||||||
tstart := time.Now()
|
|
||||||
reclaimedBytes, deletedFiles := g.gcHanderFunc(g.VFS, uint(len(src)*g.multiplier)) // call the GC handler
|
|
||||||
g.gcTime += time.Since(tstart)
|
|
||||||
g.reclaimedBytes += reclaimedBytes
|
|
||||||
g.deletedFiles += deletedFiles
|
|
||||||
err = g.VFS.Set(key, src) // try again after GC if it still fails return the error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return gc.vfs.Create(key, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *GCFS) Name() string {
|
// Open wraps the underlying Open method
|
||||||
return fmt.Sprintf("GCFS(%s)", g.VFS.Name()) // wrap the name of the VFS with GCFS so we can see that its a GCFS
|
func (gc *GCFS) Open(key string) (io.ReadCloser, error) {
|
||||||
|
return gc.vfs.Open(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete wraps the underlying Delete method
|
||||||
|
func (gc *GCFS) Delete(key string) error {
|
||||||
|
return gc.vfs.Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat wraps the underlying Stat method
|
||||||
|
func (gc *GCFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||||
|
return gc.vfs.Stat(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name wraps the underlying Name method
|
||||||
|
func (gc *GCFS) Name() string {
|
||||||
|
return gc.vfs.Name() + "(GC:" + string(gc.algorithm) + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size wraps the underlying Size method
|
||||||
|
func (gc *GCFS) Size() int64 {
|
||||||
|
return gc.vfs.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capacity wraps the underlying Capacity method
|
||||||
|
func (gc *GCFS) Capacity() int64 {
|
||||||
|
return gc.vfs.Capacity()
|
||||||
|
}
|
||||||
|
|
||||||
|
// EvictionStrategy defines an interface for cache eviction
|
||||||
|
type EvictionStrategy interface {
|
||||||
|
Evict(vfs vfs.VFS, bytesNeeded uint) uint
|
||||||
|
}
|
||||||
|
|
||||||
|
// GC functions
|
||||||
|
|
||||||
|
// gcLRU implements Least Recently Used eviction
|
||||||
|
func gcLRU(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
return evictLRU(v, bytesNeeded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// gcLFU implements Least Frequently Used eviction
|
||||||
|
func gcLFU(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
return evictLFU(v, bytesNeeded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// gcFIFO implements First In First Out eviction
|
||||||
|
func gcFIFO(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
return evictFIFO(v, bytesNeeded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// gcLargest implements largest file first eviction
|
||||||
|
func gcLargest(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
return evictLargest(v, bytesNeeded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// gcSmallest implements smallest file first eviction
|
||||||
|
func gcSmallest(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
return evictSmallest(v, bytesNeeded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// gcHybrid implements a hybrid eviction strategy
|
||||||
|
func gcHybrid(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
return evictHybrid(v, bytesNeeded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictLRU performs LRU eviction by removing least recently used files
|
||||||
|
func evictLRU(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
// Try to use specific eviction methods if available
|
||||||
|
switch fs := v.(type) {
|
||||||
|
case *memory.MemoryFS:
|
||||||
|
return fs.EvictLRU(bytesNeeded)
|
||||||
|
case *disk.DiskFS:
|
||||||
|
return fs.EvictLRU(bytesNeeded)
|
||||||
|
default:
|
||||||
|
// No fallback - return 0 (no eviction performed)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictLFU performs LFU (Least Frequently Used) eviction
|
||||||
|
func evictLFU(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
// For now, fall back to size-based eviction
|
||||||
|
// TODO: Implement proper LFU tracking
|
||||||
|
return evictBySize(v, bytesNeeded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictFIFO performs FIFO (First In First Out) eviction
|
||||||
|
func evictFIFO(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
switch fs := v.(type) {
|
||||||
|
case *memory.MemoryFS:
|
||||||
|
return fs.EvictFIFO(bytesNeeded)
|
||||||
|
case *disk.DiskFS:
|
||||||
|
return fs.EvictFIFO(bytesNeeded)
|
||||||
|
default:
|
||||||
|
// No fallback - return 0 (no eviction performed)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictLargest evicts largest files first
|
||||||
|
func evictLargest(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
return evictBySizeDesc(v, bytesNeeded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictSmallest evicts smallest files first
|
||||||
|
func evictSmallest(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
return evictBySizeAsc(v, bytesNeeded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictBySize evicts files based on size (smallest first)
|
||||||
|
func evictBySize(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
return evictBySizeAsc(v, bytesNeeded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictBySizeAsc evicts smallest files first
|
||||||
|
func evictBySizeAsc(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
switch fs := v.(type) {
|
||||||
|
case *memory.MemoryFS:
|
||||||
|
return fs.EvictBySize(bytesNeeded, true) // true = ascending (smallest first)
|
||||||
|
case *disk.DiskFS:
|
||||||
|
return fs.EvictBySize(bytesNeeded, true) // true = ascending (smallest first)
|
||||||
|
default:
|
||||||
|
// No fallback - return 0 (no eviction performed)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictBySizeDesc evicts largest files first
|
||||||
|
func evictBySizeDesc(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
switch fs := v.(type) {
|
||||||
|
case *memory.MemoryFS:
|
||||||
|
return fs.EvictBySize(bytesNeeded, false) // false = descending (largest first)
|
||||||
|
case *disk.DiskFS:
|
||||||
|
return fs.EvictBySize(bytesNeeded, false) // false = descending (largest first)
|
||||||
|
default:
|
||||||
|
// No fallback - return 0 (no eviction performed)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictHybrid implements a hybrid eviction strategy
|
||||||
|
func evictHybrid(v vfs.VFS, bytesNeeded uint) uint {
|
||||||
|
// Use LRU as primary strategy, but consider size as tiebreaker
|
||||||
|
return evictLRU(v, bytesNeeded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AdaptivePromotionDeciderFunc is a placeholder for the adaptive promotion logic
|
||||||
|
var AdaptivePromotionDeciderFunc = func() interface{} {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,105 +0,0 @@
|
|||||||
package gc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
|
||||||
"sort"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"golang.org/x/exp/rand"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGCSmallRandom(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
m := memory.New(1024 * 1024 * 16)
|
|
||||||
gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) {
|
|
||||||
deletions := 0
|
|
||||||
var reclaimed uint
|
|
||||||
|
|
||||||
t.Logf("GC starting to reclaim %d bytes", size)
|
|
||||||
|
|
||||||
stats := vfs.StatAll()
|
|
||||||
sort.Slice(stats, func(i, j int) bool {
|
|
||||||
// Sort by access time so we can remove the oldest files first.
|
|
||||||
return stats[i].AccessTime().Before(stats[j].AccessTime())
|
|
||||||
})
|
|
||||||
|
|
||||||
// Delete the oldest files until we've reclaimed enough space.
|
|
||||||
for _, s := range stats {
|
|
||||||
sz := uint(s.Size()) // Get the size of the file
|
|
||||||
err := vfs.Delete(s.Name())
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
reclaimed += sz // Track how much space we've reclaimed
|
|
||||||
deletions++ // Track how many files we've deleted
|
|
||||||
|
|
||||||
// t.Logf("GC deleting %s, %v", s.Name(), s.AccessTime().Format(time.RFC3339Nano))
|
|
||||||
|
|
||||||
if reclaimed >= size { // We've reclaimed enough space
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return uint(reclaimed), uint(deletions)
|
|
||||||
})
|
|
||||||
|
|
||||||
for i := 0; i < 10000; i++ {
|
|
||||||
if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024*1, 1024*4)); err != nil {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if gc.Size() > 1024*1024*16 {
|
|
||||||
t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func genRandomData(min int, max int) []byte {
|
|
||||||
data := make([]byte, rand.Intn(max-min)+min)
|
|
||||||
rand.Read(data)
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGCLargeRandom(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
m := memory.New(1024 * 1024 * 16) // 16MB
|
|
||||||
gc := New(m, 10, func(vfs vfs.VFS, size uint) (uint, uint) {
|
|
||||||
deletions := 0
|
|
||||||
var reclaimed uint
|
|
||||||
|
|
||||||
t.Logf("GC starting to reclaim %d bytes", size)
|
|
||||||
|
|
||||||
stats := vfs.StatAll()
|
|
||||||
sort.Slice(stats, func(i, j int) bool {
|
|
||||||
// Sort by access time so we can remove the oldest files first.
|
|
||||||
return stats[i].AccessTime().Before(stats[j].AccessTime())
|
|
||||||
})
|
|
||||||
|
|
||||||
// Delete the oldest files until we've reclaimed enough space.
|
|
||||||
for _, s := range stats {
|
|
||||||
sz := uint(s.Size()) // Get the size of the file
|
|
||||||
vfs.Delete(s.Name())
|
|
||||||
reclaimed += sz // Track how much space we've reclaimed
|
|
||||||
deletions++ // Track how many files we've deleted
|
|
||||||
|
|
||||||
if reclaimed >= size { // We've reclaimed enough space
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return uint(reclaimed), uint(deletions)
|
|
||||||
})
|
|
||||||
|
|
||||||
for i := 0; i < 10000; i++ {
|
|
||||||
if err := gc.Set(fmt.Sprintf("key:%d", i), genRandomData(1024, 1024*1024)); err != nil {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if gc.Size() > 1024*1024*16 {
|
|
||||||
t.Errorf("MemoryFS size is %d, want <= 1024", m.Size())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,8 +1,14 @@
|
|||||||
|
// vfs/memory/memory.go
|
||||||
package memory
|
package memory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"container/list"
|
||||||
|
"io"
|
||||||
"s1d3sw1ped/SteamCache2/vfs"
|
"s1d3sw1ped/SteamCache2/vfs"
|
||||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@@ -10,127 +16,428 @@ import (
|
|||||||
// Ensure MemoryFS implements VFS.
|
// Ensure MemoryFS implements VFS.
|
||||||
var _ vfs.VFS = (*MemoryFS)(nil)
|
var _ vfs.VFS = (*MemoryFS)(nil)
|
||||||
|
|
||||||
// file represents a file in memory.
|
// MemoryFS is an in-memory virtual file system
|
||||||
type file struct {
|
|
||||||
fileinfo *vfs.FileInfo
|
|
||||||
data []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemoryFS is a virtual file system that stores files in memory.
|
|
||||||
type MemoryFS struct {
|
type MemoryFS struct {
|
||||||
files map[string]*file
|
data map[string]*bytes.Buffer
|
||||||
|
info map[string]*vfs.FileInfo
|
||||||
capacity int64
|
capacity int64
|
||||||
mu sync.Mutex
|
size int64
|
||||||
|
mu sync.RWMutex
|
||||||
|
keyLocks []sync.Map // Sharded lock pools for better concurrency
|
||||||
|
LRU *lruList
|
||||||
|
timeUpdater *vfs.BatchedTimeUpdate // Batched time updates for better performance
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new MemoryFS.
|
// Number of lock shards for reducing contention
|
||||||
|
const numLockShards = 32
|
||||||
|
|
||||||
|
// lruList for time-decayed LRU eviction
|
||||||
|
type lruList struct {
|
||||||
|
list *list.List
|
||||||
|
elem map[string]*list.Element
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLruList() *lruList {
|
||||||
|
return &lruList{
|
||||||
|
list: list.New(),
|
||||||
|
elem: make(map[string]*list.Element),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lruList) Add(key string, fi *vfs.FileInfo) {
|
||||||
|
elem := l.list.PushFront(fi)
|
||||||
|
l.elem[key] = elem
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lruList) MoveToFront(key string, timeUpdater *vfs.BatchedTimeUpdate) {
|
||||||
|
if elem, exists := l.elem[key]; exists {
|
||||||
|
l.list.MoveToFront(elem)
|
||||||
|
// Update the FileInfo in the element with new access time
|
||||||
|
if fi := elem.Value.(*vfs.FileInfo); fi != nil {
|
||||||
|
fi.UpdateAccessBatched(timeUpdater)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lruList) Remove(key string) *vfs.FileInfo {
|
||||||
|
if elem, exists := l.elem[key]; exists {
|
||||||
|
delete(l.elem, key)
|
||||||
|
if fi := l.list.Remove(elem).(*vfs.FileInfo); fi != nil {
|
||||||
|
return fi
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lruList) Len() int {
|
||||||
|
return l.list.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new MemoryFS
|
||||||
func New(capacity int64) *MemoryFS {
|
func New(capacity int64) *MemoryFS {
|
||||||
if capacity <= 0 {
|
if capacity <= 0 {
|
||||||
panic("memory capacity must be greater than 0") // panic if the capacity is less than or equal to 0
|
panic("memory capacity must be greater than 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize sharded locks
|
||||||
|
keyLocks := make([]sync.Map, numLockShards)
|
||||||
|
|
||||||
return &MemoryFS{
|
return &MemoryFS{
|
||||||
files: make(map[string]*file),
|
data: make(map[string]*bytes.Buffer),
|
||||||
|
info: make(map[string]*vfs.FileInfo),
|
||||||
capacity: capacity,
|
capacity: capacity,
|
||||||
mu: sync.Mutex{},
|
size: 0,
|
||||||
|
keyLocks: keyLocks,
|
||||||
|
LRU: newLruList(),
|
||||||
|
timeUpdater: vfs.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemoryFS) Capacity() int64 {
|
// Name returns the name of this VFS
|
||||||
return m.capacity
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MemoryFS) Name() string {
|
func (m *MemoryFS) Name() string {
|
||||||
return "MemoryFS"
|
return "MemoryFS"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Size returns the current size
|
||||||
func (m *MemoryFS) Size() int64 {
|
func (m *MemoryFS) Size() int64 {
|
||||||
var size int64
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
m.mu.Lock()
|
return m.size
|
||||||
defer m.mu.Unlock()
|
|
||||||
|
|
||||||
for _, v := range m.files {
|
|
||||||
size += int64(len(v.data))
|
|
||||||
}
|
|
||||||
|
|
||||||
return size
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemoryFS) Set(key string, src []byte) error {
|
// Capacity returns the maximum capacity
|
||||||
if m.capacity > 0 {
|
func (m *MemoryFS) Capacity() int64 {
|
||||||
if size := m.Size() + int64(len(src)); size > m.capacity {
|
return m.capacity
|
||||||
return vfserror.ErrDiskFull
|
}
|
||||||
|
|
||||||
|
// getShardIndex returns the shard index for a given key
|
||||||
|
func getShardIndex(key string) int {
|
||||||
|
// Use FNV-1a hash for good distribution
|
||||||
|
var h uint32 = 2166136261 // FNV offset basis
|
||||||
|
for i := 0; i < len(key); i++ {
|
||||||
|
h ^= uint32(key[i])
|
||||||
|
h *= 16777619 // FNV prime
|
||||||
}
|
}
|
||||||
|
return int(h % numLockShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getKeyLock returns a lock for the given key using sharding
|
||||||
|
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
|
||||||
|
shardIndex := getShardIndex(key)
|
||||||
|
shard := &m.keyLocks[shardIndex]
|
||||||
|
|
||||||
|
keyLock, _ := shard.LoadOrStore(key, &sync.RWMutex{})
|
||||||
|
return keyLock.(*sync.RWMutex)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create creates a new file
|
||||||
|
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||||
|
if key == "" {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
}
|
}
|
||||||
|
if key[0] == '/' {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanitize key to prevent path traversal
|
||||||
|
if strings.Contains(key, "..") {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
keyMu := m.getKeyLock(key)
|
||||||
|
keyMu.Lock()
|
||||||
|
defer keyMu.Unlock()
|
||||||
|
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
// Check if file already exists and handle overwrite
|
||||||
|
if fi, exists := m.info[key]; exists {
|
||||||
m.files[key] = &file{
|
m.size -= fi.Size
|
||||||
fileinfo: vfs.NewFileInfo(
|
m.LRU.Remove(key)
|
||||||
key,
|
delete(m.info, key)
|
||||||
int64(len(src)),
|
delete(m.data, key)
|
||||||
time.Now(),
|
|
||||||
),
|
|
||||||
data: src,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
buffer := &bytes.Buffer{}
|
||||||
|
m.data[key] = buffer
|
||||||
|
fi := vfs.NewFileInfo(key, size)
|
||||||
|
m.info[key] = fi
|
||||||
|
m.LRU.Add(key, fi)
|
||||||
|
// Initialize access time with current time
|
||||||
|
fi.UpdateAccessBatched(m.timeUpdater)
|
||||||
|
m.size += size
|
||||||
|
m.mu.Unlock()
|
||||||
|
|
||||||
|
return &memoryWriteCloser{
|
||||||
|
buffer: buffer,
|
||||||
|
memory: m,
|
||||||
|
key: key,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// memoryWriteCloser implements io.WriteCloser for memory files
|
||||||
|
type memoryWriteCloser struct {
|
||||||
|
buffer *bytes.Buffer
|
||||||
|
memory *MemoryFS
|
||||||
|
key string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mwc *memoryWriteCloser) Write(p []byte) (n int, err error) {
|
||||||
|
return mwc.buffer.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mwc *memoryWriteCloser) Close() error {
|
||||||
|
// Update the actual size in FileInfo
|
||||||
|
mwc.memory.mu.Lock()
|
||||||
|
if fi, exists := mwc.memory.info[mwc.key]; exists {
|
||||||
|
actualSize := int64(mwc.buffer.Len())
|
||||||
|
sizeDiff := actualSize - fi.Size
|
||||||
|
fi.Size = actualSize
|
||||||
|
mwc.memory.size += sizeDiff
|
||||||
|
}
|
||||||
|
mwc.memory.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemoryFS) Delete(key string) error {
|
// Open opens a file for reading
|
||||||
_, err := m.Stat(key)
|
func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
|
||||||
if err != nil {
|
if key == "" {
|
||||||
return err
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
if key[0] == '/' {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
}
|
}
|
||||||
|
|
||||||
m.mu.Lock()
|
if strings.Contains(key, "..") {
|
||||||
defer m.mu.Unlock()
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
|
||||||
delete(m.files, key)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MemoryFS) Get(key string) ([]byte, error) {
|
|
||||||
_, err := m.Stat(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
keyMu := m.getKeyLock(key)
|
||||||
|
keyMu.RLock()
|
||||||
|
defer keyMu.RUnlock()
|
||||||
|
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
fi, exists := m.info[key]
|
||||||
|
if !exists {
|
||||||
|
m.mu.Unlock()
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
fi.UpdateAccessBatched(m.timeUpdater)
|
||||||
|
m.LRU.MoveToFront(key, m.timeUpdater)
|
||||||
|
|
||||||
m.files[key].fileinfo.ATime = time.Now()
|
buffer, exists := m.data[key]
|
||||||
dst := make([]byte, len(m.files[key].data))
|
if !exists {
|
||||||
copy(dst, m.files[key].data)
|
m.mu.Unlock()
|
||||||
|
|
||||||
return dst, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
|
|
||||||
f, ok := m.files[key]
|
|
||||||
if !ok {
|
|
||||||
return nil, vfserror.ErrNotFound
|
return nil, vfserror.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.fileinfo, nil
|
// Create a copy of the buffer for reading
|
||||||
|
data := make([]byte, buffer.Len())
|
||||||
|
copy(data, buffer.Bytes())
|
||||||
|
m.mu.Unlock()
|
||||||
|
|
||||||
|
return &memoryReadCloser{
|
||||||
|
reader: bytes.NewReader(data),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemoryFS) StatAll() []*vfs.FileInfo {
|
// memoryReadCloser implements io.ReadCloser for memory files
|
||||||
|
type memoryReadCloser struct {
|
||||||
|
reader *bytes.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mrc *memoryReadCloser) Read(p []byte) (n int, err error) {
|
||||||
|
return mrc.reader.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mrc *memoryReadCloser) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes a file
|
||||||
|
func (m *MemoryFS) Delete(key string) error {
|
||||||
|
if key == "" {
|
||||||
|
return vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
if key[0] == '/' {
|
||||||
|
return vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(key, "..") {
|
||||||
|
return vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
keyMu := m.getKeyLock(key)
|
||||||
|
keyMu.Lock()
|
||||||
|
defer keyMu.Unlock()
|
||||||
|
|
||||||
|
m.mu.Lock()
|
||||||
|
fi, exists := m.info[key]
|
||||||
|
if !exists {
|
||||||
|
m.mu.Unlock()
|
||||||
|
return vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
m.size -= fi.Size
|
||||||
|
m.LRU.Remove(key)
|
||||||
|
delete(m.info, key)
|
||||||
|
delete(m.data, key)
|
||||||
|
m.mu.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns file information
|
||||||
|
func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||||
|
if key == "" {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
if key[0] == '/' {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(key, "..") {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
keyMu := m.getKeyLock(key)
|
||||||
|
keyMu.RLock()
|
||||||
|
defer keyMu.RUnlock()
|
||||||
|
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
|
||||||
|
if fi, ok := m.info[key]; ok {
|
||||||
|
return fi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// EvictLRU evicts the least recently used files to free up space
|
||||||
|
func (m *MemoryFS) EvictLRU(bytesNeeded uint) uint {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
// hard copy the file info to prevent modification of the original file info or the other way around
|
var evicted uint
|
||||||
files := make([]*vfs.FileInfo, 0, len(m.files))
|
|
||||||
for _, v := range m.files {
|
// Evict from LRU list until we free enough space
|
||||||
fi := *v.fileinfo
|
for m.size > m.capacity-int64(bytesNeeded) && m.LRU.Len() > 0 {
|
||||||
files = append(files, &fi)
|
// Get the least recently used item
|
||||||
|
elem := m.LRU.list.Back()
|
||||||
|
if elem == nil {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
return files
|
fi := elem.Value.(*vfs.FileInfo)
|
||||||
|
key := fi.Key
|
||||||
|
|
||||||
|
// Remove from LRU
|
||||||
|
m.LRU.Remove(key)
|
||||||
|
|
||||||
|
// Remove from maps
|
||||||
|
delete(m.info, key)
|
||||||
|
delete(m.data, key)
|
||||||
|
|
||||||
|
// Update size
|
||||||
|
m.size -= fi.Size
|
||||||
|
evicted += uint(fi.Size)
|
||||||
|
|
||||||
|
// Clean up key lock
|
||||||
|
shardIndex := getShardIndex(key)
|
||||||
|
m.keyLocks[shardIndex].Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return evicted
|
||||||
|
}
|
||||||
|
|
||||||
|
// EvictBySize evicts files by size (ascending = smallest first, descending = largest first)
|
||||||
|
func (m *MemoryFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
var evicted uint
|
||||||
|
var candidates []*vfs.FileInfo
|
||||||
|
|
||||||
|
// Collect all files
|
||||||
|
for _, fi := range m.info {
|
||||||
|
candidates = append(candidates, fi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by size
|
||||||
|
sort.Slice(candidates, func(i, j int) bool {
|
||||||
|
if ascending {
|
||||||
|
return candidates[i].Size < candidates[j].Size
|
||||||
|
}
|
||||||
|
return candidates[i].Size > candidates[j].Size
|
||||||
|
})
|
||||||
|
|
||||||
|
// Evict files until we free enough space
|
||||||
|
for _, fi := range candidates {
|
||||||
|
if m.size <= m.capacity-int64(bytesNeeded) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
key := fi.Key
|
||||||
|
|
||||||
|
// Remove from LRU
|
||||||
|
m.LRU.Remove(key)
|
||||||
|
|
||||||
|
// Remove from maps
|
||||||
|
delete(m.info, key)
|
||||||
|
delete(m.data, key)
|
||||||
|
|
||||||
|
// Update size
|
||||||
|
m.size -= fi.Size
|
||||||
|
evicted += uint(fi.Size)
|
||||||
|
|
||||||
|
// Clean up key lock
|
||||||
|
shardIndex := getShardIndex(key)
|
||||||
|
m.keyLocks[shardIndex].Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return evicted
|
||||||
|
}
|
||||||
|
|
||||||
|
// EvictFIFO evicts files using FIFO (oldest creation time first)
|
||||||
|
func (m *MemoryFS) EvictFIFO(bytesNeeded uint) uint {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
var evicted uint
|
||||||
|
var candidates []*vfs.FileInfo
|
||||||
|
|
||||||
|
// Collect all files
|
||||||
|
for _, fi := range m.info {
|
||||||
|
candidates = append(candidates, fi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by creation time (oldest first)
|
||||||
|
sort.Slice(candidates, func(i, j int) bool {
|
||||||
|
return candidates[i].CTime.Before(candidates[j].CTime)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Evict oldest files until we free enough space
|
||||||
|
for _, fi := range candidates {
|
||||||
|
if m.size <= m.capacity-int64(bytesNeeded) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
key := fi.Key
|
||||||
|
|
||||||
|
// Remove from LRU
|
||||||
|
m.LRU.Remove(key)
|
||||||
|
|
||||||
|
// Remove from maps
|
||||||
|
delete(m.info, key)
|
||||||
|
delete(m.data, key)
|
||||||
|
|
||||||
|
// Update size
|
||||||
|
m.size -= fi.Size
|
||||||
|
evicted += uint(fi.Size)
|
||||||
|
|
||||||
|
// Clean up key lock
|
||||||
|
shardIndex := getShardIndex(key)
|
||||||
|
m.keyLocks[shardIndex].Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return evicted
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,63 +0,0 @@
|
|||||||
package memory
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAllMemory(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
m := New(1024)
|
|
||||||
if err := m.Set("key", []byte("value")); err != nil {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.Set("key", []byte("value1")); err != nil {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if d, err := m.Get("key"); err != nil {
|
|
||||||
t.Errorf("Get failed: %v", err)
|
|
||||||
} else if string(d) != "value1" {
|
|
||||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.Delete("key"); err != nil {
|
|
||||||
t.Errorf("Delete failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := m.Get("key"); err == nil {
|
|
||||||
t.Errorf("Get failed: got nil, want %v", vfserror.ErrNotFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.Delete("key"); err == nil {
|
|
||||||
t.Errorf("Delete failed: got nil, want %v", vfserror.ErrNotFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := m.Stat("key"); err == nil {
|
|
||||||
t.Errorf("Stat failed: got nil, want %v", vfserror.ErrNotFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.Set("key", []byte("value")); err != nil {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := m.Stat("key"); err != nil {
|
|
||||||
t.Errorf("Stat failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLimited(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
m := New(10)
|
|
||||||
for i := 0; i < 11; i++ {
|
|
||||||
if err := m.Set(fmt.Sprintf("key%d", i), []byte("1")); err != nil && i < 10 {
|
|
||||||
t.Errorf("Set failed: %v", err)
|
|
||||||
} else if i == 10 && err == nil {
|
|
||||||
t.Errorf("Set succeeded: got nil, want %v", vfserror.ErrDiskFull)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
package sync
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ensure SyncFS implements VFS.
|
|
||||||
var _ vfs.VFS = (*SyncFS)(nil)
|
|
||||||
|
|
||||||
type SyncFS struct {
|
|
||||||
vfs vfs.VFS
|
|
||||||
mu sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(vfs vfs.VFS) *SyncFS {
|
|
||||||
return &SyncFS{
|
|
||||||
vfs: vfs,
|
|
||||||
mu: sync.RWMutex{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name of the file system.
|
|
||||||
func (sfs *SyncFS) Name() string {
|
|
||||||
return fmt.Sprintf("SyncFS(%s)", sfs.vfs.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the total size of all files in the file system.
|
|
||||||
func (sfs *SyncFS) Size() int64 {
|
|
||||||
sfs.mu.RLock()
|
|
||||||
defer sfs.mu.RUnlock()
|
|
||||||
|
|
||||||
return sfs.vfs.Size()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the value of key as src.
|
|
||||||
// Setting the same key multiple times, the last set call takes effect.
|
|
||||||
func (sfs *SyncFS) Set(key string, src []byte) error {
|
|
||||||
sfs.mu.Lock()
|
|
||||||
defer sfs.mu.Unlock()
|
|
||||||
|
|
||||||
return sfs.vfs.Set(key, src)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the value of key.
|
|
||||||
func (sfs *SyncFS) Delete(key string) error {
|
|
||||||
sfs.mu.Lock()
|
|
||||||
defer sfs.mu.Unlock()
|
|
||||||
|
|
||||||
return sfs.vfs.Delete(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get gets the value of key to dst, and returns dst no matter whether or not there is an error.
|
|
||||||
func (sfs *SyncFS) Get(key string) ([]byte, error) {
|
|
||||||
sfs.mu.RLock()
|
|
||||||
defer sfs.mu.RUnlock()
|
|
||||||
|
|
||||||
return sfs.vfs.Get(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stat returns the FileInfo of key.
|
|
||||||
func (sfs *SyncFS) Stat(key string) (*vfs.FileInfo, error) {
|
|
||||||
sfs.mu.RLock()
|
|
||||||
defer sfs.mu.RUnlock()
|
|
||||||
|
|
||||||
return sfs.vfs.Stat(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatAll returns the FileInfo of all keys.
|
|
||||||
func (sfs *SyncFS) StatAll() []*vfs.FileInfo {
|
|
||||||
sfs.mu.RLock()
|
|
||||||
defer sfs.mu.RUnlock()
|
|
||||||
|
|
||||||
return sfs.vfs.StatAll()
|
|
||||||
}
|
|
||||||
118
vfs/vfs.go
118
vfs/vfs.go
@@ -1,26 +1,112 @@
|
|||||||
|
// vfs/vfs.go
|
||||||
package vfs
|
package vfs
|
||||||
|
|
||||||
// VFS is the interface that wraps the basic methods of a virtual file system.
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// VFS defines the interface for virtual file systems
|
||||||
type VFS interface {
|
type VFS interface {
|
||||||
// Name returns the name of the file system.
|
// Create creates a new file at the given key
|
||||||
Name() string
|
Create(key string, size int64) (io.WriteCloser, error)
|
||||||
|
|
||||||
// Size returns the total size of all files in the file system.
|
// Open opens the file at the given key for reading
|
||||||
Size() int64
|
Open(key string) (io.ReadCloser, error)
|
||||||
|
|
||||||
// Set sets the value of key as src.
|
// Delete removes the file at the given key
|
||||||
// Setting the same key multiple times, the last set call takes effect.
|
|
||||||
Set(key string, src []byte) error
|
|
||||||
|
|
||||||
// Delete deletes the value of key.
|
|
||||||
Delete(key string) error
|
Delete(key string) error
|
||||||
|
|
||||||
// Get gets the value of key to dst, and returns dst no matter whether or not there is an error.
|
// Stat returns information about the file at the given key
|
||||||
Get(key string) ([]byte, error)
|
|
||||||
|
|
||||||
// Stat returns the FileInfo of key.
|
|
||||||
Stat(key string) (*FileInfo, error)
|
Stat(key string) (*FileInfo, error)
|
||||||
|
|
||||||
// StatAll returns the FileInfo of all keys.
|
// Name returns the name of this VFS
|
||||||
StatAll() []*FileInfo
|
Name() string
|
||||||
|
|
||||||
|
// Size returns the current size of the VFS
|
||||||
|
Size() int64
|
||||||
|
|
||||||
|
// Capacity returns the maximum capacity of the VFS
|
||||||
|
Capacity() int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfo contains metadata about a cached file
|
||||||
|
type FileInfo struct {
|
||||||
|
Key string `json:"key"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
ATime time.Time `json:"atime"` // Last access time
|
||||||
|
CTime time.Time `json:"ctime"` // Creation time
|
||||||
|
AccessCount int `json:"access_count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFileInfo creates a new FileInfo with the given key and current timestamp
|
||||||
|
func NewFileInfo(key string, size int64) *FileInfo {
|
||||||
|
now := time.Now()
|
||||||
|
return &FileInfo{
|
||||||
|
Key: key,
|
||||||
|
Size: size,
|
||||||
|
ATime: now,
|
||||||
|
CTime: now,
|
||||||
|
AccessCount: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFileInfoFromOS creates a FileInfo from os.FileInfo
|
||||||
|
func NewFileInfoFromOS(info os.FileInfo, key string) *FileInfo {
|
||||||
|
return &FileInfo{
|
||||||
|
Key: key,
|
||||||
|
Size: info.Size(),
|
||||||
|
ATime: time.Now(), // We don't have access time from os.FileInfo
|
||||||
|
CTime: info.ModTime(),
|
||||||
|
AccessCount: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAccess updates the access time and increments the access count
|
||||||
|
func (fi *FileInfo) UpdateAccess() {
|
||||||
|
fi.ATime = time.Now()
|
||||||
|
fi.AccessCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchedTimeUpdate provides a way to batch time updates for better performance
|
||||||
|
type BatchedTimeUpdate struct {
|
||||||
|
currentTime time.Time
|
||||||
|
lastUpdate time.Time
|
||||||
|
updateInterval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBatchedTimeUpdate creates a new batched time updater
|
||||||
|
func NewBatchedTimeUpdate(interval time.Duration) *BatchedTimeUpdate {
|
||||||
|
now := time.Now()
|
||||||
|
return &BatchedTimeUpdate{
|
||||||
|
currentTime: now,
|
||||||
|
lastUpdate: now,
|
||||||
|
updateInterval: interval,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTime returns the current cached time, updating it if necessary
|
||||||
|
func (btu *BatchedTimeUpdate) GetTime() time.Time {
|
||||||
|
now := time.Now()
|
||||||
|
if now.Sub(btu.lastUpdate) >= btu.updateInterval {
|
||||||
|
btu.currentTime = now
|
||||||
|
btu.lastUpdate = now
|
||||||
|
}
|
||||||
|
return btu.currentTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAccessBatched updates the access time using batched time updates
|
||||||
|
func (fi *FileInfo) UpdateAccessBatched(btu *BatchedTimeUpdate) {
|
||||||
|
fi.ATime = btu.GetTime()
|
||||||
|
fi.AccessCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTimeDecayedScore calculates a score based on access time and frequency
|
||||||
|
// More recent and frequent accesses get higher scores
|
||||||
|
func (fi *FileInfo) GetTimeDecayedScore() float64 {
|
||||||
|
timeSinceAccess := time.Since(fi.ATime).Hours()
|
||||||
|
decayFactor := 1.0 / (1.0 + timeSinceAccess/24.0) // Decay over days
|
||||||
|
frequencyBonus := float64(fi.AccessCount) * 0.1
|
||||||
|
return decayFactor + frequencyBonus
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,17 +1,12 @@
|
|||||||
|
// vfs/vfserror/vfserror.go
|
||||||
package vfserror
|
package vfserror
|
||||||
|
|
||||||
import "errors"
|
import "errors"
|
||||||
|
|
||||||
|
// Common VFS errors
|
||||||
var (
|
var (
|
||||||
// ErrInvalidKey is returned when a key is invalid.
|
|
||||||
ErrInvalidKey = errors.New("vfs: invalid key")
|
|
||||||
|
|
||||||
// ErrUnreachable is returned when a code path is unreachable.
|
|
||||||
ErrUnreachable = errors.New("unreachable")
|
|
||||||
|
|
||||||
// ErrNotFound is returned when a key is not found.
|
|
||||||
ErrNotFound = errors.New("vfs: key not found")
|
ErrNotFound = errors.New("vfs: key not found")
|
||||||
|
ErrInvalidKey = errors.New("vfs: invalid key")
|
||||||
// ErrDiskFull is returned when the disk is full.
|
ErrAlreadyExists = errors.New("vfs: key already exists")
|
||||||
ErrDiskFull = errors.New("vfs: disk full")
|
ErrCapacityExceeded = errors.New("vfs: capacity exceeded")
|
||||||
)
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user