Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 9b2affe95a | |||
| bd123bc63a | |||
| 46495dc3aa | |||
| 45ae234694 | |||
| bbe014e334 | |||
| 694c223b00 | |||
| cc3497bc3a | |||
| 9ca8fa4a5e | |||
| 7fb1fcf21f | |||
| ee6fc32a1a | |||
| 4a4579b0f3 | |||
| b9358a0e8d | |||
| c197841960 | |||
| 6919358eab | |||
| 1187f05c77 | |||
| f6f93c86c8 | |||
| 30e804709f | |||
| 56bb1ddc12 | |||
| 9c65cdb156 | |||
| ae013f9a3b |
18
.gitignore
vendored
18
.gitignore
vendored
@@ -1,5 +1,15 @@
|
|||||||
dist/
|
#build artifacts
|
||||||
tmp/
|
/dist/
|
||||||
|
|
||||||
|
#disk cache
|
||||||
|
/disk/
|
||||||
|
|
||||||
|
#config file
|
||||||
|
/config.yaml
|
||||||
|
|
||||||
|
#windows executables
|
||||||
*.exe
|
*.exe
|
||||||
.smashed.txt
|
|
||||||
.smashignore
|
#test cache
|
||||||
|
/steamcache/test_cache/*
|
||||||
|
!/steamcache/test_cache/.gitkeep
|
||||||
@@ -11,8 +11,8 @@ builds:
|
|||||||
- -s
|
- -s
|
||||||
- -w
|
- -w
|
||||||
- -extldflags "-static"
|
- -extldflags "-static"
|
||||||
- -X s1d3sw1ped/SteamCache2/version.Version={{.Version}}
|
- -X s1d3sw1ped/steamcache2/version.Version={{.Version}}
|
||||||
- -X s1d3sw1ped/SteamCache2/version.Date={{.Date}}
|
- -X s1d3sw1ped/steamcache2/version.Date={{.Date}}
|
||||||
env:
|
env:
|
||||||
- CGO_ENABLED=0
|
- CGO_ENABLED=0
|
||||||
goos:
|
goos:
|
||||||
|
|||||||
61
.vscode/launch.json
vendored
61
.vscode/launch.json
vendored
@@ -1,61 +0,0 @@
|
|||||||
{
|
|
||||||
// Use IntelliSense to learn about possible attributes.
|
|
||||||
// Hover to view descriptions of existing attributes.
|
|
||||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
|
||||||
"version": "0.2.0",
|
|
||||||
"configurations": [
|
|
||||||
{
|
|
||||||
"name": "Launch Memory & Disk",
|
|
||||||
"type": "go",
|
|
||||||
"request": "launch",
|
|
||||||
"mode": "auto",
|
|
||||||
"program": "${workspaceFolder}/main.go",
|
|
||||||
"args": [
|
|
||||||
"--memory",
|
|
||||||
"1G",
|
|
||||||
"--disk",
|
|
||||||
"10G",
|
|
||||||
"--disk-path",
|
|
||||||
"tmp/disk",
|
|
||||||
"--memory-gc",
|
|
||||||
"lfu",
|
|
||||||
"--disk-gc",
|
|
||||||
"lru",
|
|
||||||
"--log-level",
|
|
||||||
"debug",
|
|
||||||
],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Launch Disk Only",
|
|
||||||
"type": "go",
|
|
||||||
"request": "launch",
|
|
||||||
"mode": "auto",
|
|
||||||
"program": "${workspaceFolder}/main.go",
|
|
||||||
"args": [
|
|
||||||
"--disk",
|
|
||||||
"10G",
|
|
||||||
"--disk-path",
|
|
||||||
"tmp/disk",
|
|
||||||
"--disk-gc",
|
|
||||||
"hybrid",
|
|
||||||
"--log-level",
|
|
||||||
"debug",
|
|
||||||
],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Launch Memory Only",
|
|
||||||
"type": "go",
|
|
||||||
"request": "launch",
|
|
||||||
"mode": "auto",
|
|
||||||
"program": "${workspaceFolder}/main.go",
|
|
||||||
"args": [
|
|
||||||
"--memory",
|
|
||||||
"1G",
|
|
||||||
"--memory-gc",
|
|
||||||
"lfu",
|
|
||||||
"--log-level",
|
|
||||||
"debug",
|
|
||||||
],
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
21
Makefile
Normal file
21
Makefile
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
run: build-snapshot-single ## Run the application
|
||||||
|
@dist/default_windows_amd64_v1/steamcache2.exe
|
||||||
|
run-debug: build-snapshot-single ## Run the application with debug logging
|
||||||
|
@dist/default_windows_amd64_v1/steamcache2.exe --log-level debug
|
||||||
|
|
||||||
|
test: deps ## Run all tests
|
||||||
|
@go test -v ./...
|
||||||
|
|
||||||
|
deps: ## Download dependencies
|
||||||
|
@go mod tidy
|
||||||
|
|
||||||
|
build-snapshot-single: deps test ## Build a snapshot of the application for the current platform
|
||||||
|
@goreleaser build --single-target --snapshot --clean
|
||||||
|
|
||||||
|
help: ## Show this help message
|
||||||
|
@echo steamcache2 Makefile
|
||||||
|
@echo Available targets:
|
||||||
|
@echo run Run the application
|
||||||
|
@echo run-debug Run the application with debug logging
|
||||||
|
@echo test Run all tests
|
||||||
|
@echo deps Download dependencies
|
||||||
222
README.md
222
README.md
@@ -10,30 +10,120 @@ SteamCache2 is a blazing fast download cache for Steam, designed to reduce bandw
|
|||||||
- Reduces bandwidth usage
|
- Reduces bandwidth usage
|
||||||
- Easy to set up and configure aside from dns stuff to trick Steam into using it
|
- Easy to set up and configure aside from dns stuff to trick Steam into using it
|
||||||
- Supports multiple clients
|
- Supports multiple clients
|
||||||
|
- **NEW:** YAML configuration system with automatic config generation
|
||||||
|
- **NEW:** Simple Makefile for development workflow
|
||||||
|
- Cross-platform builds (Linux, macOS, Windows)
|
||||||
|
|
||||||
## Usage
|
## Quick Start
|
||||||
|
|
||||||
1. Start the cache server:
|
### First Time Setup
|
||||||
```sh
|
|
||||||
./SteamCache2 --memory 1G --disk 10G --disk-path tmp/disk
|
1. **Clone and build:**
|
||||||
|
```bash
|
||||||
|
git clone <repository-url>
|
||||||
|
cd steamcache2
|
||||||
|
make # This will run tests and build the application
|
||||||
```
|
```
|
||||||
|
|
||||||
### Advanced Configuration
|
2. **Run the application** (it will create a default config):
|
||||||
|
```bash
|
||||||
|
./steamcache2
|
||||||
|
# or on Windows:
|
||||||
|
steamcache2.exe
|
||||||
|
```
|
||||||
|
|
||||||
|
The application will automatically create a `config.yaml` file with default settings and exit, allowing you to customize it.
|
||||||
|
|
||||||
|
3. **Edit the configuration** (`config.yaml`):
|
||||||
|
```yaml
|
||||||
|
listen_address: :80
|
||||||
|
cache:
|
||||||
|
memory:
|
||||||
|
size: 1GB
|
||||||
|
gc_algorithm: lru
|
||||||
|
disk:
|
||||||
|
size: 10GB
|
||||||
|
path: ./disk
|
||||||
|
gc_algorithm: hybrid
|
||||||
|
upstream: "https://steam.cdn.com" # Set your upstream server
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Run the application again:**
|
||||||
|
```bash
|
||||||
|
make run # or ./steamcache2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Development Workflow
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all tests and start the application (default target)
|
||||||
|
make
|
||||||
|
|
||||||
|
# Run only tests
|
||||||
|
make test
|
||||||
|
|
||||||
|
# Run with debug logging
|
||||||
|
make run-debug
|
||||||
|
|
||||||
|
# Download dependencies
|
||||||
|
make deps
|
||||||
|
|
||||||
|
# Show available commands
|
||||||
|
make help
|
||||||
|
```
|
||||||
|
|
||||||
|
### Command Line Flags
|
||||||
|
|
||||||
|
While most configuration is done via the YAML file, some runtime options are still available as command-line flags:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Use a custom config file
|
||||||
|
./steamcache2 --config /path/to/my-config.yaml
|
||||||
|
|
||||||
|
# Set logging level
|
||||||
|
./steamcache2 --log-level debug --log-format json
|
||||||
|
|
||||||
|
# Set number of worker threads
|
||||||
|
./steamcache2 --threads 8
|
||||||
|
|
||||||
|
# Show help
|
||||||
|
./steamcache2 --help
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
SteamCache2 uses a YAML configuration file (`config.yaml`) for all settings. Here's a complete configuration example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Server configuration
|
||||||
|
listen_address: :80
|
||||||
|
|
||||||
|
# Cache configuration
|
||||||
|
cache:
|
||||||
|
# Memory cache settings
|
||||||
|
memory:
|
||||||
|
# Size of memory cache (e.g., "512MB", "1GB", "0" to disable)
|
||||||
|
size: 1GB
|
||||||
|
# Garbage collection algorithm
|
||||||
|
gc_algorithm: lru
|
||||||
|
|
||||||
|
# Disk cache settings
|
||||||
|
disk:
|
||||||
|
# Size of disk cache (e.g., "10GB", "50GB", "0" to disable)
|
||||||
|
size: 10GB
|
||||||
|
# Path to disk cache directory
|
||||||
|
path: ./disk
|
||||||
|
# Garbage collection algorithm
|
||||||
|
gc_algorithm: hybrid
|
||||||
|
|
||||||
|
# Upstream server configuration
|
||||||
|
# The upstream server to proxy requests to
|
||||||
|
upstream: "https://steam.cdn.com"
|
||||||
|
```
|
||||||
|
|
||||||
#### Garbage Collection Algorithms
|
#### Garbage Collection Algorithms
|
||||||
|
|
||||||
SteamCache2 supports multiple garbage collection algorithms for both memory and disk caches:
|
SteamCache2 supports different garbage collection algorithms for memory and disk caches, allowing you to optimize performance for each storage tier:
|
||||||
|
|
||||||
```sh
|
|
||||||
# Use LFU for memory cache (good for long-running servers)
|
|
||||||
./SteamCache2 --memory 4G --memory-gc lfu --disk 100G --disk-gc lru
|
|
||||||
|
|
||||||
# Use FIFO for predictable eviction (good for testing)
|
|
||||||
./SteamCache2 --memory 2G --memory-gc fifo --disk 50G --disk-gc fifo
|
|
||||||
|
|
||||||
# Use size-based eviction for disk cache
|
|
||||||
./SteamCache2 --memory 1G --disk 200G --disk-gc largest
|
|
||||||
```
|
|
||||||
|
|
||||||
**Available GC Algorithms:**
|
**Available GC Algorithms:**
|
||||||
|
|
||||||
@@ -44,13 +134,30 @@ SteamCache2 supports multiple garbage collection algorithms for both memory and
|
|||||||
- **`smallest`**: Size-based - evicts smallest files first (maximizes cache hit rate)
|
- **`smallest`**: Size-based - evicts smallest files first (maximizes cache hit rate)
|
||||||
- **`hybrid`**: Combines access time and file size for optimal eviction
|
- **`hybrid`**: Combines access time and file size for optimal eviction
|
||||||
|
|
||||||
|
**Recommended Algorithms by Cache Type:**
|
||||||
|
|
||||||
|
**For Memory Cache (Fast, Limited Size):**
|
||||||
|
- **`lru`** - Best overall performance, good balance of speed and hit rate
|
||||||
|
- **`lfu`** - Excellent for gaming cafes where popular games stay cached
|
||||||
|
- **`hybrid`** - Optimal for mixed workloads with varying file sizes
|
||||||
|
|
||||||
|
**For Disk Cache (Slow, Large Size):**
|
||||||
|
- **`hybrid`** - Recommended for optimal performance, balances speed and storage efficiency
|
||||||
|
- **`largest`** - Good for maximizing number of cached files
|
||||||
|
- **`lru`** - Reliable default with good performance
|
||||||
|
|
||||||
**Use Cases:**
|
**Use Cases:**
|
||||||
- **LAN Events**: Use `lfu` for memory caches to keep popular games
|
- **Gaming Cafes**: Use `lfu` for memory, `hybrid` for disk
|
||||||
- **Gaming Cafes**: Use `hybrid` for balanced performance
|
- **LAN Events**: Use `lfu` for memory, `hybrid` for disk
|
||||||
|
- **Home Use**: Use `lru` for memory, `hybrid` for disk
|
||||||
- **Testing**: Use `fifo` for predictable behavior
|
- **Testing**: Use `fifo` for predictable behavior
|
||||||
- **Large Files**: Use `largest` to prioritize keeping many small files
|
- **Large File Storage**: Use `largest` for disk to maximize file count
|
||||||
2. Configure your DNS:
|
|
||||||
- If your on Windows and don't want a whole network implementation (THIS)[#windows-hosts-file-override]
|
### DNS Configuration
|
||||||
|
|
||||||
|
Configure your DNS to direct Steam traffic to your SteamCache2 server:
|
||||||
|
|
||||||
|
- If you're on Windows and don't want a whole network implementation, see the [Windows Hosts File Override](#windows-hosts-file-override) section below.
|
||||||
|
|
||||||
### Windows Hosts File Override
|
### Windows Hosts File Override
|
||||||
|
|
||||||
@@ -85,6 +192,77 @@ SteamCache2 supports multiple garbage collection algorithms for both memory and
|
|||||||
|
|
||||||
This will direct any requests to `lancache.steamcontent.com` to your SteamCache2 server.
|
This will direct any requests to `lancache.steamcontent.com` to your SteamCache2 server.
|
||||||
|
|
||||||
|
## Building from Source
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- Go 1.19 or later
|
||||||
|
- Make (optional, but recommended)
|
||||||
|
|
||||||
|
### Build Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone the repository
|
||||||
|
git clone <repository-url>
|
||||||
|
cd SteamCache2
|
||||||
|
|
||||||
|
# Download dependencies
|
||||||
|
make deps
|
||||||
|
|
||||||
|
# Run tests
|
||||||
|
make test
|
||||||
|
|
||||||
|
# Build for current platform
|
||||||
|
go build -o steamcache2 .
|
||||||
|
|
||||||
|
# Build for specific platforms
|
||||||
|
GOOS=linux GOARCH=amd64 go build -o steamcache2-linux-amd64 .
|
||||||
|
GOOS=windows GOARCH=amd64 go build -o steamcache2-windows-amd64.exe .
|
||||||
|
```
|
||||||
|
|
||||||
|
### Development
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run in development mode with debug logging
|
||||||
|
make run-debug
|
||||||
|
|
||||||
|
# Run all tests and start the application
|
||||||
|
make
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **"Config file not found" on first run**
|
||||||
|
- This is expected! SteamCache2 will automatically create a default `config.yaml` file
|
||||||
|
- Edit the generated config file with your desired settings
|
||||||
|
- Run the application again
|
||||||
|
|
||||||
|
2. **Permission denied when creating config**
|
||||||
|
- Make sure you have write permissions in the current directory
|
||||||
|
- Try running with elevated privileges if necessary
|
||||||
|
|
||||||
|
3. **Port already in use**
|
||||||
|
- Change the `listen_address` in `config.yaml` to a different port (e.g., `:8080`)
|
||||||
|
- Or stop the service using the current port
|
||||||
|
|
||||||
|
4. **High memory usage**
|
||||||
|
- Reduce the memory cache size in `config.yaml`
|
||||||
|
- Consider using disk-only caching by setting `memory.size: "0"`
|
||||||
|
|
||||||
|
5. **Slow disk performance**
|
||||||
|
- Use SSD storage for the disk cache
|
||||||
|
- Consider using a different GC algorithm like `hybrid`
|
||||||
|
- Adjust the disk cache size to match available storage
|
||||||
|
|
||||||
|
### Getting Help
|
||||||
|
|
||||||
|
- Check the logs for detailed error messages
|
||||||
|
- Run with `--log-level debug` for more verbose output
|
||||||
|
- Ensure your upstream server is accessible
|
||||||
|
- Verify DNS configuration is working correctly
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
See the [LICENSE](LICENSE) file for details.
|
See the [LICENSE](LICENSE) file for details.
|
||||||
|
|||||||
121
cmd/root.go
121
cmd/root.go
@@ -2,35 +2,32 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"s1d3sw1ped/steamcache2/config"
|
||||||
"s1d3sw1ped/SteamCache2/steamcache"
|
"s1d3sw1ped/steamcache2/steamcache"
|
||||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
"s1d3sw1ped/steamcache2/steamcache/logger"
|
||||||
"s1d3sw1ped/SteamCache2/version"
|
"s1d3sw1ped/steamcache2/version"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
threads int
|
configPath string
|
||||||
|
|
||||||
memory string
|
|
||||||
disk string
|
|
||||||
diskpath string
|
|
||||||
upstream string
|
|
||||||
|
|
||||||
memoryGC string
|
|
||||||
diskGC string
|
|
||||||
|
|
||||||
logLevel string
|
logLevel string
|
||||||
logFormat string
|
logFormat string
|
||||||
|
|
||||||
|
maxConcurrentRequests int64
|
||||||
|
maxRequestsPerClient int64
|
||||||
)
|
)
|
||||||
|
|
||||||
var rootCmd = &cobra.Command{
|
var rootCmd = &cobra.Command{
|
||||||
Use: "SteamCache2",
|
Use: "steamcache2",
|
||||||
Short: "SteamCache2 is a caching solution for Steam game updates and installations",
|
Short: "steamcache2 is a caching solution for Steam game updates and installations",
|
||||||
Long: `SteamCache2 is a caching solution designed to optimize the delivery of Steam game updates and installations.
|
Long: `steamcache2 is a caching solution designed to optimize the delivery of Steam game updates and installations.
|
||||||
It reduces bandwidth usage and speeds up the download process by caching game files locally.
|
It reduces bandwidth usage and speeds up the download process by caching game files locally.
|
||||||
This tool is particularly useful for environments with multiple Steam users, such as gaming cafes or households with multiple gamers.
|
This tool is particularly useful for environments with multiple Steam users, such as gaming cafes or households with multiple gamers.
|
||||||
By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache,
|
By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache,
|
||||||
@@ -56,33 +53,79 @@ var rootCmd = &cobra.Command{
|
|||||||
logger.Logger = zerolog.New(writer).With().Timestamp().Logger()
|
logger.Logger = zerolog.New(writer).With().Timestamp().Logger()
|
||||||
|
|
||||||
logger.Logger.Info().
|
logger.Logger.Info().
|
||||||
Msg("SteamCache2 " + version.Version + " " + version.Date + " starting...")
|
Msg("steamcache2 " + version.Version + " " + version.Date + " starting...")
|
||||||
|
|
||||||
address := ":80"
|
// Load configuration
|
||||||
|
cfg, err := config.LoadConfig(configPath)
|
||||||
if runtime.GOMAXPROCS(-1) != threads {
|
if err != nil {
|
||||||
runtime.GOMAXPROCS(threads)
|
// Check if the error is because the config file doesn't exist
|
||||||
|
// The error is wrapped, so we check the error message
|
||||||
|
if strings.Contains(err.Error(), "no such file") ||
|
||||||
|
strings.Contains(err.Error(), "cannot find the file") ||
|
||||||
|
strings.Contains(err.Error(), "The system cannot find the file") {
|
||||||
logger.Logger.Info().
|
logger.Logger.Info().
|
||||||
Int("threads", threads).
|
Str("config_path", configPath).
|
||||||
Msg("Maximum number of threads set")
|
Msg("Config file not found, creating default configuration")
|
||||||
|
|
||||||
|
if err := config.SaveDefaultConfig(configPath); err != nil {
|
||||||
|
logger.Logger.Error().
|
||||||
|
Err(err).
|
||||||
|
Str("config_path", configPath).
|
||||||
|
Msg("Failed to create default configuration")
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: Failed to create default config at %s: %v\n", configPath, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Logger.Info().
|
||||||
|
Str("config_path", configPath).
|
||||||
|
Msg("Default configuration created successfully. Please edit the file and run again.")
|
||||||
|
|
||||||
|
fmt.Printf("Default configuration created at %s\n", configPath)
|
||||||
|
fmt.Println("Please edit the configuration file as needed and run the application again.")
|
||||||
|
os.Exit(0)
|
||||||
|
} else {
|
||||||
|
logger.Logger.Error().
|
||||||
|
Err(err).
|
||||||
|
Str("config_path", configPath).
|
||||||
|
Msg("Failed to load configuration")
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: Failed to load configuration from %s: %v\n", configPath, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Logger.Info().
|
||||||
|
Str("config_path", configPath).
|
||||||
|
Msg("Configuration loaded successfully")
|
||||||
|
|
||||||
|
// Use command-line flags if provided, otherwise use config values
|
||||||
|
finalMaxConcurrentRequests := cfg.MaxConcurrentRequests
|
||||||
|
if maxConcurrentRequests > 0 {
|
||||||
|
finalMaxConcurrentRequests = maxConcurrentRequests
|
||||||
|
}
|
||||||
|
|
||||||
|
finalMaxRequestsPerClient := cfg.MaxRequestsPerClient
|
||||||
|
if maxRequestsPerClient > 0 {
|
||||||
|
finalMaxRequestsPerClient = maxRequestsPerClient
|
||||||
}
|
}
|
||||||
|
|
||||||
sc := steamcache.New(
|
sc := steamcache.New(
|
||||||
address,
|
cfg.ListenAddress,
|
||||||
memory,
|
cfg.Cache.Memory.Size,
|
||||||
disk,
|
cfg.Cache.Disk.Size,
|
||||||
diskpath,
|
cfg.Cache.Disk.Path,
|
||||||
upstream,
|
cfg.Upstream,
|
||||||
memoryGC,
|
cfg.Cache.Memory.GCAlgorithm,
|
||||||
diskGC,
|
cfg.Cache.Disk.GCAlgorithm,
|
||||||
|
finalMaxConcurrentRequests,
|
||||||
|
finalMaxRequestsPerClient,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.Logger.Info().
|
logger.Logger.Info().
|
||||||
Msg("SteamCache2 " + version.Version + " started on " + address)
|
Msg("steamcache2 " + version.Version + " started on " + cfg.ListenAddress)
|
||||||
|
|
||||||
sc.Run()
|
sc.Run()
|
||||||
|
|
||||||
logger.Logger.Info().Msg("SteamCache2 stopped")
|
logger.Logger.Info().Msg("steamcache2 stopped")
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -97,17 +140,11 @@ func Execute() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
rootCmd.Flags().IntVarP(&threads, "threads", "t", runtime.GOMAXPROCS(-1), "Number of worker threads to use for processing requests")
|
rootCmd.Flags().StringVarP(&configPath, "config", "c", "config.yaml", "Path to configuration file")
|
||||||
|
|
||||||
rootCmd.Flags().StringVarP(&memory, "memory", "m", "0", "The size of the memory cache")
|
|
||||||
rootCmd.Flags().StringVarP(&disk, "disk", "d", "0", "The size of the disk cache")
|
|
||||||
rootCmd.Flags().StringVarP(&diskpath, "disk-path", "p", "", "The path to the disk cache")
|
|
||||||
|
|
||||||
rootCmd.Flags().StringVarP(&upstream, "upstream", "u", "", "The upstream server to proxy requests overrides the host header from the client but forwards the original host header to the upstream server")
|
|
||||||
|
|
||||||
rootCmd.Flags().StringVarP(&memoryGC, "memory-gc", "", "lru", "Memory cache GC algorithm: lru, lfu, fifo, largest, smallest, hybrid")
|
|
||||||
rootCmd.Flags().StringVarP(&diskGC, "disk-gc", "", "lru", "Disk cache GC algorithm: lru, lfu, fifo, largest, smallest, hybrid")
|
|
||||||
|
|
||||||
rootCmd.Flags().StringVarP(&logLevel, "log-level", "l", "info", "Logging level: debug, info, error")
|
rootCmd.Flags().StringVarP(&logLevel, "log-level", "l", "info", "Logging level: debug, info, error")
|
||||||
rootCmd.Flags().StringVarP(&logFormat, "log-format", "f", "console", "Logging format: json, console")
|
rootCmd.Flags().StringVarP(&logFormat, "log-format", "f", "console", "Logging format: json, console")
|
||||||
|
|
||||||
|
rootCmd.Flags().Int64Var(&maxConcurrentRequests, "max-concurrent-requests", 0, "Maximum concurrent requests (0 = use config file value)")
|
||||||
|
rootCmd.Flags().Int64Var(&maxRequestsPerClient, "max-requests-per-client", 0, "Maximum concurrent requests per client IP (0 = use config file value)")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"s1d3sw1ped/SteamCache2/version"
|
"s1d3sw1ped/steamcache2/version"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
@@ -12,10 +12,10 @@ import (
|
|||||||
// versionCmd represents the version command
|
// versionCmd represents the version command
|
||||||
var versionCmd = &cobra.Command{
|
var versionCmd = &cobra.Command{
|
||||||
Use: "version",
|
Use: "version",
|
||||||
Short: "prints the version of SteamCache2",
|
Short: "prints the version of steamcache2",
|
||||||
Long: `Prints the version of SteamCache2. This command is useful for checking the version of the application.`,
|
Long: `Prints the version of steamcache2. This command is useful for checking the version of the application.`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
fmt.Fprintln(os.Stderr, "SteamCache2", version.Version, version.Date)
|
fmt.Fprintln(os.Stderr, "steamcache2", version.Version, version.Date)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
128
config/config.go
Normal file
128
config/config.go
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
// Server configuration
|
||||||
|
ListenAddress string `yaml:"listen_address" default:":80"`
|
||||||
|
|
||||||
|
// Concurrency limits
|
||||||
|
MaxConcurrentRequests int64 `yaml:"max_concurrent_requests" default:"200"`
|
||||||
|
MaxRequestsPerClient int64 `yaml:"max_requests_per_client" default:"5"`
|
||||||
|
|
||||||
|
// Cache configuration
|
||||||
|
Cache CacheConfig `yaml:"cache"`
|
||||||
|
|
||||||
|
// Upstream configuration
|
||||||
|
Upstream string `yaml:"upstream"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CacheConfig struct {
|
||||||
|
// Memory cache settings
|
||||||
|
Memory MemoryConfig `yaml:"memory"`
|
||||||
|
|
||||||
|
// Disk cache settings
|
||||||
|
Disk DiskConfig `yaml:"disk"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type MemoryConfig struct {
|
||||||
|
// Size of memory cache (e.g., "512MB", "1GB")
|
||||||
|
Size string `yaml:"size" default:"0"`
|
||||||
|
|
||||||
|
// Garbage collection algorithm: lru, lfu, fifo, largest, smallest, hybrid
|
||||||
|
GCAlgorithm string `yaml:"gc_algorithm" default:"lru"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DiskConfig struct {
|
||||||
|
// Size of disk cache (e.g., "10GB", "50GB")
|
||||||
|
Size string `yaml:"size" default:"0"`
|
||||||
|
|
||||||
|
// Path to disk cache directory
|
||||||
|
Path string `yaml:"path" default:""`
|
||||||
|
|
||||||
|
// Garbage collection algorithm: lru, lfu, fifo, largest, smallest, hybrid
|
||||||
|
GCAlgorithm string `yaml:"gc_algorithm" default:"lru"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadConfig loads configuration from a YAML file
|
||||||
|
func LoadConfig(configPath string) (*Config, error) {
|
||||||
|
if configPath == "" {
|
||||||
|
configPath = "config.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := os.ReadFile(configPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read config file %s: %w", configPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var config Config
|
||||||
|
if err := yaml.Unmarshal(data, &config); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse config file %s: %w", configPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set defaults for empty values
|
||||||
|
if config.ListenAddress == "" {
|
||||||
|
config.ListenAddress = ":80"
|
||||||
|
}
|
||||||
|
if config.MaxConcurrentRequests == 0 {
|
||||||
|
config.MaxConcurrentRequests = 50
|
||||||
|
}
|
||||||
|
if config.MaxRequestsPerClient == 0 {
|
||||||
|
config.MaxRequestsPerClient = 3
|
||||||
|
}
|
||||||
|
if config.Cache.Memory.Size == "" {
|
||||||
|
config.Cache.Memory.Size = "0"
|
||||||
|
}
|
||||||
|
if config.Cache.Memory.GCAlgorithm == "" {
|
||||||
|
config.Cache.Memory.GCAlgorithm = "lru"
|
||||||
|
}
|
||||||
|
if config.Cache.Disk.Size == "" {
|
||||||
|
config.Cache.Disk.Size = "0"
|
||||||
|
}
|
||||||
|
if config.Cache.Disk.GCAlgorithm == "" {
|
||||||
|
config.Cache.Disk.GCAlgorithm = "lru"
|
||||||
|
}
|
||||||
|
|
||||||
|
return &config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveDefaultConfig creates a default configuration file
|
||||||
|
func SaveDefaultConfig(configPath string) error {
|
||||||
|
if configPath == "" {
|
||||||
|
configPath = "config.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultConfig := Config{
|
||||||
|
ListenAddress: ":80",
|
||||||
|
MaxConcurrentRequests: 50, // Reduced for home user (less concurrent load)
|
||||||
|
MaxRequestsPerClient: 3, // Reduced for home user (more conservative per client)
|
||||||
|
Cache: CacheConfig{
|
||||||
|
Memory: MemoryConfig{
|
||||||
|
Size: "1GB", // Recommended for systems that can spare 1GB RAM for caching
|
||||||
|
GCAlgorithm: "lru",
|
||||||
|
},
|
||||||
|
Disk: DiskConfig{
|
||||||
|
Size: "1TB", // Large HDD cache for home user
|
||||||
|
Path: "./disk",
|
||||||
|
GCAlgorithm: "lru", // Better for gaming patterns (keeps recently played games)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Upstream: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := yaml.Marshal(&defaultConfig)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal default config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(configPath, data, 0644); err != nil {
|
||||||
|
return fmt.Errorf("failed to write default config file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
15
go.mod
15
go.mod
@@ -1,25 +1,20 @@
|
|||||||
module s1d3sw1ped/SteamCache2
|
module s1d3sw1ped/steamcache2
|
||||||
|
|
||||||
go 1.23.0
|
go 1.23.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/docker/go-units v0.5.0
|
github.com/docker/go-units v0.5.0
|
||||||
github.com/prometheus/client_golang v1.22.0
|
github.com/edsrzf/mmap-go v1.1.0
|
||||||
github.com/rs/zerolog v1.33.0
|
github.com/rs/zerolog v1.33.0
|
||||||
github.com/spf13/cobra v1.8.1
|
github.com/spf13/cobra v1.8.1
|
||||||
|
golang.org/x/sync v0.16.0
|
||||||
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
|
||||||
github.com/prometheus/common v0.62.0 // indirect
|
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
golang.org/x/sys v0.30.0 // indirect
|
golang.org/x/sys v0.12.0 // indirect
|
||||||
google.golang.org/protobuf v1.36.5 // indirect
|
|
||||||
)
|
)
|
||||||
|
|||||||
36
go.sum
36
go.sum
@@ -1,40 +1,18 @@
|
|||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
|
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
||||||
|
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
|
||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
|
||||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
|
||||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
|
||||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
|
||||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
|
||||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
|
||||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
|
||||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||||
@@ -43,15 +21,13 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
|||||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
||||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
|
||||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|||||||
4
main.go
4
main.go
@@ -2,8 +2,8 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"s1d3sw1ped/SteamCache2/cmd"
|
"s1d3sw1ped/steamcache2/cmd"
|
||||||
_ "s1d3sw1ped/SteamCache2/version" // Import the version package for global version variable
|
_ "s1d3sw1ped/steamcache2/version" // Import the version package for global version variable
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|||||||
279
steamcache/integration_test.go
Normal file
279
steamcache/integration_test.go
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
package steamcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const SteamHostname = "cache2-den-iwst.steamcontent.com"
|
||||||
|
|
||||||
|
func TestSteamIntegration(t *testing.T) {
|
||||||
|
// Skip this test if we don't have internet access or want to avoid hitting Steam servers
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping integration test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test URLs from real Steam usage - these should be cached when requested by Steam clients
|
||||||
|
testURLs := []string{
|
||||||
|
"/depot/516751/patch/288061881745926019/4378193572994177373",
|
||||||
|
"/depot/516751/chunk/42e7c13eb4b4e426ec5cf6d1010abfd528e5065a",
|
||||||
|
"/depot/516751/chunk/f949f71e102d77ed6e364e2054d06429d54bebb1",
|
||||||
|
"/depot/516751/chunk/6790f5105833556d37797657be72c1c8dd2e7074",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testURL := range testURLs {
|
||||||
|
t.Run(fmt.Sprintf("URL_%s", testURL), func(t *testing.T) {
|
||||||
|
testSteamURL(t, testURL)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSteamURL(t *testing.T, urlPath string) {
|
||||||
|
// Create a unique temporary directory for this test to avoid cache persistence issues
|
||||||
|
tempDir, err := os.MkdirTemp("", "steamcache_test_*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temp directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after test
|
||||||
|
|
||||||
|
// Create SteamCache instance with unique temp directory
|
||||||
|
sc := New(":0", "100MB", "1GB", tempDir, "", "LRU", "LRU", 10, 5)
|
||||||
|
|
||||||
|
// Use real Steam server
|
||||||
|
steamURL := "https://" + SteamHostname + urlPath
|
||||||
|
|
||||||
|
// Test direct download from Steam server
|
||||||
|
directResp, directBody := downloadDirectly(t, steamURL)
|
||||||
|
|
||||||
|
// Test download through SteamCache
|
||||||
|
cacheResp, cacheBody := downloadThroughCache(t, sc, urlPath)
|
||||||
|
|
||||||
|
// Compare responses
|
||||||
|
compareResponses(t, directResp, directBody, cacheResp, cacheBody, urlPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func downloadDirectly(t *testing.T, url string) (*http.Response, []byte) {
|
||||||
|
client := &http.Client{Timeout: 30 * time.Second}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add Steam user agent
|
||||||
|
req.Header.Set("User-Agent", "Valve/Steam HTTP Client 1.0")
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to download directly from Steam: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read direct response body: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, body
|
||||||
|
}
|
||||||
|
|
||||||
|
func downloadThroughCache(t *testing.T, sc *SteamCache, urlPath string) (*http.Response, []byte) {
|
||||||
|
// Create a test server for SteamCache
|
||||||
|
cacheServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// For real Steam URLs, we need to set the upstream to the Steam hostname
|
||||||
|
// and let SteamCache handle the full URL construction
|
||||||
|
sc.upstream = "https://" + SteamHostname
|
||||||
|
sc.ServeHTTP(w, r)
|
||||||
|
}))
|
||||||
|
defer cacheServer.Close()
|
||||||
|
|
||||||
|
// First request - should be a MISS and cache the file
|
||||||
|
client := &http.Client{Timeout: 30 * time.Second}
|
||||||
|
|
||||||
|
req1, err := http.NewRequest("GET", cacheServer.URL+urlPath, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create first request: %v", err)
|
||||||
|
}
|
||||||
|
req1.Header.Set("User-Agent", "Valve/Steam HTTP Client 1.0")
|
||||||
|
|
||||||
|
resp1, err := client.Do(req1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to download through cache (first request): %v", err)
|
||||||
|
}
|
||||||
|
defer resp1.Body.Close()
|
||||||
|
|
||||||
|
body1, err := io.ReadAll(resp1.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read cache response body (first request): %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify first request was a MISS
|
||||||
|
if resp1.Header.Get("X-LanCache-Status") != "MISS" {
|
||||||
|
t.Errorf("Expected first request to be MISS, got %s", resp1.Header.Get("X-LanCache-Status"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second request - should be a HIT from cache
|
||||||
|
req2, err := http.NewRequest("GET", cacheServer.URL+urlPath, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create second request: %v", err)
|
||||||
|
}
|
||||||
|
req2.Header.Set("User-Agent", "Valve/Steam HTTP Client 1.0")
|
||||||
|
|
||||||
|
resp2, err := client.Do(req2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to download through cache (second request): %v", err)
|
||||||
|
}
|
||||||
|
defer resp2.Body.Close()
|
||||||
|
|
||||||
|
body2, err := io.ReadAll(resp2.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read cache response body (second request): %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify second request was a HIT (unless hash verification failed)
|
||||||
|
status2 := resp2.Header.Get("X-LanCache-Status")
|
||||||
|
if status2 != "HIT" && status2 != "MISS" {
|
||||||
|
t.Errorf("Expected second request to be HIT or MISS, got %s", status2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it's a MISS, it means hash verification failed and content wasn't cached
|
||||||
|
// This is correct behavior - we shouldn't cache content that doesn't match the expected hash
|
||||||
|
if status2 == "MISS" {
|
||||||
|
t.Logf("Second request was MISS (hash verification failed) - this is correct behavior")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify both cache responses are identical
|
||||||
|
if !bytes.Equal(body1, body2) {
|
||||||
|
t.Error("First and second cache responses should be identical")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the second response (from cache)
|
||||||
|
return resp2, body2
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareResponses(t *testing.T, directResp *http.Response, directBody []byte, cacheResp *http.Response, cacheBody []byte, urlPath string) {
|
||||||
|
// Compare status codes
|
||||||
|
if directResp.StatusCode != cacheResp.StatusCode {
|
||||||
|
t.Errorf("Status code mismatch: direct=%d, cache=%d", directResp.StatusCode, cacheResp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare response bodies (this is the most important test)
|
||||||
|
if !bytes.Equal(directBody, cacheBody) {
|
||||||
|
t.Errorf("Response body mismatch for URL %s", urlPath)
|
||||||
|
t.Errorf("Direct body length: %d, Cache body length: %d", len(directBody), len(cacheBody))
|
||||||
|
|
||||||
|
// Find first difference
|
||||||
|
minLen := len(directBody)
|
||||||
|
if len(cacheBody) < minLen {
|
||||||
|
minLen = len(cacheBody)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < minLen; i++ {
|
||||||
|
if directBody[i] != cacheBody[i] {
|
||||||
|
t.Errorf("First difference at byte %d: direct=0x%02x, cache=0x%02x", i, directBody[i], cacheBody[i])
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare important headers (excluding cache-specific ones)
|
||||||
|
importantHeaders := []string{
|
||||||
|
"Content-Type",
|
||||||
|
"Content-Length",
|
||||||
|
"X-Sha1",
|
||||||
|
"Cache-Control",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, header := range importantHeaders {
|
||||||
|
directValue := directResp.Header.Get(header)
|
||||||
|
cacheValue := cacheResp.Header.Get(header)
|
||||||
|
|
||||||
|
if directValue != cacheValue {
|
||||||
|
t.Errorf("Header %s mismatch: direct=%s, cache=%s", header, directValue, cacheValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify cache-specific headers are present
|
||||||
|
if cacheResp.Header.Get("X-LanCache-Status") == "" {
|
||||||
|
t.Error("Cache response should have X-LanCache-Status header")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cacheResp.Header.Get("X-LanCache-Processed-By") != "SteamCache2" {
|
||||||
|
t.Error("Cache response should have X-LanCache-Processed-By header set to SteamCache2")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✅ URL %s: Direct and cache responses are identical", urlPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCacheFileFormat tests the cache file format directly
|
||||||
|
func TestCacheFileFormat(t *testing.T) {
|
||||||
|
// Create test data
|
||||||
|
bodyData := []byte("test steam content")
|
||||||
|
contentHash := calculateSHA256(bodyData)
|
||||||
|
|
||||||
|
// Create mock response
|
||||||
|
resp := &http.Response{
|
||||||
|
StatusCode: 200,
|
||||||
|
Status: "200 OK",
|
||||||
|
Header: make(http.Header),
|
||||||
|
Body: http.NoBody,
|
||||||
|
}
|
||||||
|
resp.Header.Set("Content-Type", "application/x-steam-chunk")
|
||||||
|
resp.Header.Set("Content-Length", "18")
|
||||||
|
resp.Header.Set("X-Sha1", contentHash)
|
||||||
|
|
||||||
|
// Create SteamCache instance
|
||||||
|
sc := &SteamCache{}
|
||||||
|
|
||||||
|
// Reconstruct raw response
|
||||||
|
rawResponse := sc.reconstructRawResponse(resp, bodyData)
|
||||||
|
|
||||||
|
// Serialize to cache format
|
||||||
|
cacheData, err := serializeRawResponse(rawResponse)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to serialize cache file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deserialize from cache format
|
||||||
|
cacheFile, err := deserializeCacheFile(cacheData)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to deserialize cache file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify cache file structure
|
||||||
|
if cacheFile.ContentHash != contentHash {
|
||||||
|
t.Errorf("ContentHash mismatch: expected %s, got %s", contentHash, cacheFile.ContentHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cacheFile.ResponseSize != int64(len(rawResponse)) {
|
||||||
|
t.Errorf("ResponseSize mismatch: expected %d, got %d", len(rawResponse), cacheFile.ResponseSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify raw response is preserved
|
||||||
|
if !bytes.Equal(cacheFile.Response, rawResponse) {
|
||||||
|
t.Error("Raw response not preserved in cache file")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test streaming the cached response
|
||||||
|
recorder := httptest.NewRecorder()
|
||||||
|
req := httptest.NewRequest("GET", "/test/format", nil)
|
||||||
|
|
||||||
|
sc.streamCachedResponse(recorder, req, cacheFile, "test-key", "127.0.0.1", time.Now())
|
||||||
|
|
||||||
|
// Verify streamed response
|
||||||
|
if recorder.Code != 200 {
|
||||||
|
t.Errorf("Expected status code 200, got %d", recorder.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(recorder.Body.Bytes(), bodyData) {
|
||||||
|
t.Error("Streamed response body does not match original")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Log("✅ Cache file format test passed")
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -3,9 +3,9 @@ package steamcache
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -14,7 +14,7 @@ func TestCaching(t *testing.T) {
|
|||||||
|
|
||||||
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
||||||
|
|
||||||
sc := New("localhost:8080", "1G", "1G", td, "", "lru", "lru")
|
sc := New("localhost:8080", "1G", "1G", td, "", "lru", "lru", 200, 5)
|
||||||
|
|
||||||
w, err := sc.vfs.Create("key", 5)
|
w, err := sc.vfs.Create("key", 5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -68,15 +68,22 @@ func TestCaching(t *testing.T) {
|
|||||||
t.Errorf("Get failed: got %s, want %s", d, "value2")
|
t.Errorf("Get failed: got %s, want %s", d, "value2")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// With size-based promotion filtering, not all files may be promoted
|
||||||
|
// The total size should be at least the disk size (17 bytes) but may be less than 34 bytes
|
||||||
|
// if some files are filtered out due to size constraints
|
||||||
if sc.diskgc.Size() != 17 {
|
if sc.diskgc.Size() != 17 {
|
||||||
t.Errorf("Size failed: got %d, want %d", sc.diskgc.Size(), 17)
|
t.Errorf("Disk size failed: got %d, want %d", sc.diskgc.Size(), 17)
|
||||||
}
|
}
|
||||||
|
|
||||||
if sc.vfs.Size() != 17 {
|
if sc.vfs.Size() < 17 {
|
||||||
t.Errorf("Size failed: got %d, want %d", sc.vfs.Size(), 17)
|
t.Errorf("Total size too small: got %d, want at least 17", sc.vfs.Size())
|
||||||
|
}
|
||||||
|
if sc.vfs.Size() > 34 {
|
||||||
|
t.Errorf("Total size too large: got %d, want at most 34", sc.vfs.Size())
|
||||||
}
|
}
|
||||||
|
|
||||||
sc.memory.Delete("key2")
|
sc.memory.Delete("key2")
|
||||||
|
sc.disk.Delete("key2") // Also delete from disk cache
|
||||||
os.Remove(filepath.Join(td, "key2"))
|
os.Remove(filepath.Join(td, "key2"))
|
||||||
|
|
||||||
if _, err := sc.vfs.Open("key2"); err == nil {
|
if _, err := sc.vfs.Open("key2"); err == nil {
|
||||||
@@ -85,7 +92,7 @@ func TestCaching(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCacheMissAndHit(t *testing.T) {
|
func TestCacheMissAndHit(t *testing.T) {
|
||||||
sc := New("localhost:8080", "0", "1G", t.TempDir(), "", "lru", "lru")
|
sc := New("localhost:8080", "0", "1G", t.TempDir(), "", "lru", "lru", 200, 5)
|
||||||
|
|
||||||
key := "testkey"
|
key := "testkey"
|
||||||
value := []byte("testvalue")
|
value := []byte("testvalue")
|
||||||
@@ -110,136 +117,240 @@ func TestCacheMissAndHit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHashExtraction(t *testing.T) {
|
func TestURLHashing(t *testing.T) {
|
||||||
// Test the specific key from the user's issue
|
// Test the SHA256-based cache key generation for Steam client requests
|
||||||
|
// The "steam/" prefix indicates the request came from a Steam client (User-Agent based)
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
filename string
|
input string
|
||||||
expectedHash string
|
desc string
|
||||||
shouldHaveHash bool
|
shouldCache bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
filename: "e89c81a1a926eb4732e146bc806491da8a7d89ca",
|
input: "/depot/1684171/chunk/abcdef1234567890",
|
||||||
expectedHash: "e89c81a1a926eb4732e146bc806491da8a7d89ca",
|
desc: "chunk file URL",
|
||||||
shouldHaveHash: true, // Now it should work with the new standalone hash pattern
|
shouldCache: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "chunk_e89c81a1a926eb4732e146bc806491da8a7d89ca",
|
input: "/depot/1684171/manifest/944076726177422892/5/abcdef1234567890",
|
||||||
expectedHash: "",
|
desc: "manifest file URL",
|
||||||
shouldHaveHash: false, // No longer supported with simplified patterns
|
shouldCache: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "file.e89c81a1a926eb4732e146bc806491da8a7d89ca.chunk",
|
input: "/appinfo/123456",
|
||||||
expectedHash: "",
|
desc: "app info URL",
|
||||||
shouldHaveHash: false, // No longer supported with simplified patterns
|
shouldCache: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "chunk_abc123def456",
|
input: "/some/other/path",
|
||||||
expectedHash: "",
|
desc: "any URL from Steam client",
|
||||||
shouldHaveHash: false, // Not 40 chars
|
shouldCache: true, // All URLs from Steam clients (detected via User-Agent) are cached
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
hash, hasHash := extractHashFromFilename(tc.filename)
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
if hasHash != tc.shouldHaveHash {
|
result := generateServiceCacheKey(tc.input, "steam")
|
||||||
t.Errorf("filename: %s, expected hasHash: %v, got: %v", tc.filename, tc.shouldHaveHash, hasHash)
|
|
||||||
|
if tc.shouldCache {
|
||||||
|
// Should return a cache key with "steam/" prefix
|
||||||
|
if !strings.HasPrefix(result, "steam/") {
|
||||||
|
t.Errorf("generateServiceCacheKey(%s, \"steam\") = %s, expected steam/ prefix", tc.input, result)
|
||||||
}
|
}
|
||||||
if hasHash && hash != tc.expectedHash {
|
// Should be exactly 70 characters (6 for "steam/" + 64 for SHA256 hex)
|
||||||
t.Errorf("filename: %s, expected hash: %s, got: %s", tc.filename, tc.expectedHash, hash)
|
if len(result) != 70 {
|
||||||
|
t.Errorf("generateServiceCacheKey(%s, \"steam\") length = %d, expected 70", tc.input, len(result))
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// Should return empty string for non-Steam URLs
|
||||||
|
if result != "" {
|
||||||
|
t.Errorf("generateServiceCacheKey(%s, \"steam\") = %s, expected empty string", tc.input, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHashCalculation(t *testing.T) {
|
func TestServiceDetection(t *testing.T) {
|
||||||
// Test data
|
// Create a service manager for testing
|
||||||
testData := []byte("Hello, World!")
|
sm := NewServiceManager()
|
||||||
|
|
||||||
// Calculate hash
|
|
||||||
hash := calculateFileHash(testData)
|
|
||||||
|
|
||||||
// Expected SHA1 hash of "Hello, World!"
|
|
||||||
expectedHash := "0a0a9f2a6772942557ab5355d76af442f8f65e01"
|
|
||||||
|
|
||||||
if hash != expectedHash {
|
|
||||||
t.Errorf("Hash calculation failed: expected %s, got %s", expectedHash, hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test verification
|
|
||||||
if !verifyFileHash(testData, expectedHash) {
|
|
||||||
t.Error("Hash verification failed for correct hash")
|
|
||||||
}
|
|
||||||
|
|
||||||
if verifyFileHash(testData, "wronghash") {
|
|
||||||
t.Error("Hash verification passed for wrong hash")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHashVerificationWithRealData(t *testing.T) {
|
|
||||||
// Test with some real data to ensure our hash calculation is correct
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
data string
|
userAgent string
|
||||||
expected string
|
expectedName string
|
||||||
|
expectedFound bool
|
||||||
|
desc string
|
||||||
}{
|
}{
|
||||||
{"", "da39a3ee5e6b4b0d3255bfef95601890afd80709"}, // SHA1 of empty string
|
{
|
||||||
{"test", "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"}, // SHA1 of "test"
|
userAgent: "Valve/Steam HTTP Client 1.0",
|
||||||
{"Hello, World!", "0a0a9f2a6772942557ab5355d76af442f8f65e01"}, // SHA1 of "Hello, World!"
|
expectedName: "steam",
|
||||||
|
expectedFound: true,
|
||||||
|
desc: "Valve Steam HTTP Client",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
userAgent: "Steam",
|
||||||
|
expectedName: "steam",
|
||||||
|
expectedFound: true,
|
||||||
|
desc: "Simple Steam user agent",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
userAgent: "SteamClient/1.0",
|
||||||
|
expectedName: "steam",
|
||||||
|
expectedFound: true,
|
||||||
|
desc: "SteamClient with version",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
|
||||||
|
expectedName: "",
|
||||||
|
expectedFound: false,
|
||||||
|
desc: "Browser user agent",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
userAgent: "",
|
||||||
|
expectedName: "",
|
||||||
|
expectedFound: false,
|
||||||
|
desc: "Empty user agent",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
userAgent: "curl/7.68.0",
|
||||||
|
expectedName: "",
|
||||||
|
expectedFound: false,
|
||||||
|
desc: "curl user agent",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
data := []byte(tc.data)
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
hash := calculateFileHash(data)
|
service, found := sm.DetectService(tc.userAgent)
|
||||||
if hash != tc.expected {
|
|
||||||
t.Errorf("Hash calculation failed for '%s': expected %s, got %s", tc.data, tc.expected, hash)
|
if found != tc.expectedFound {
|
||||||
|
t.Errorf("DetectService(%s) found = %v, expected %v", tc.userAgent, found, tc.expectedFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !verifyFileHash(data, tc.expected) {
|
if found && service.Name != tc.expectedName {
|
||||||
t.Errorf("Hash verification failed for '%s'", tc.data)
|
t.Errorf("DetectService(%s) service name = %s, expected %s", tc.userAgent, service.Name, tc.expectedName)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResponseHashCalculation(t *testing.T) {
|
func TestServiceManagerExpandability(t *testing.T) {
|
||||||
// Create a mock HTTP response
|
// Create a service manager for testing
|
||||||
resp := &http.Response{
|
sm := NewServiceManager()
|
||||||
StatusCode: 200,
|
|
||||||
Status: "200 OK",
|
// Test adding a new service (Epic Games)
|
||||||
Header: http.Header{
|
epicConfig := &ServiceConfig{
|
||||||
"Content-Type": []string{"application/octet-stream"},
|
Name: "epic",
|
||||||
"Content-Length": []string{"13"},
|
Prefix: "epic",
|
||||||
"Cache-Control": []string{"public, max-age=3600"},
|
UserAgents: []string{
|
||||||
|
`EpicGamesLauncher`,
|
||||||
|
`EpicGames`,
|
||||||
|
`Epic.*Launcher`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
bodyData := []byte("Hello, World!")
|
err := sm.AddService(epicConfig)
|
||||||
|
if err != nil {
|
||||||
// Calculate response hash
|
t.Fatalf("Failed to add Epic service: %v", err)
|
||||||
responseHash := calculateResponseHash(resp, bodyData)
|
|
||||||
|
|
||||||
// The hash should be different from just the body hash
|
|
||||||
bodyHash := calculateFileHash(bodyData)
|
|
||||||
|
|
||||||
if responseHash == bodyHash {
|
|
||||||
t.Error("Response hash should be different from body hash when headers are present")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that the same response produces the same hash
|
// Test Epic Games detection
|
||||||
responseHash2 := calculateResponseHash(resp, bodyData)
|
epicTestCases := []struct {
|
||||||
if responseHash != responseHash2 {
|
userAgent string
|
||||||
t.Error("Response hash should be consistent for the same response")
|
expectedName string
|
||||||
}
|
expectedFound bool
|
||||||
|
desc string
|
||||||
// Test with different headers
|
}{
|
||||||
resp2 := &http.Response{
|
{
|
||||||
StatusCode: 200,
|
userAgent: "EpicGamesLauncher/1.0",
|
||||||
Status: "200 OK",
|
expectedName: "epic",
|
||||||
Header: http.Header{
|
expectedFound: true,
|
||||||
"Content-Type": []string{"text/plain"},
|
desc: "Epic Games Launcher",
|
||||||
"Content-Length": []string{"13"},
|
},
|
||||||
|
{
|
||||||
|
userAgent: "EpicGames/2.0",
|
||||||
|
expectedName: "epic",
|
||||||
|
expectedFound: true,
|
||||||
|
desc: "Epic Games client",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
userAgent: "Epic Launcher 1.5",
|
||||||
|
expectedName: "epic",
|
||||||
|
expectedFound: true,
|
||||||
|
desc: "Epic Launcher with regex match",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
userAgent: "Steam",
|
||||||
|
expectedName: "steam",
|
||||||
|
expectedFound: true,
|
||||||
|
desc: "Steam should still work",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
userAgent: "Mozilla/5.0",
|
||||||
|
expectedName: "",
|
||||||
|
expectedFound: false,
|
||||||
|
desc: "Browser should not match any service",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
responseHash3 := calculateResponseHash(resp2, bodyData)
|
for _, tc := range epicTestCases {
|
||||||
if responseHash == responseHash3 {
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
t.Error("Response hash should be different for different headers")
|
service, found := sm.DetectService(tc.userAgent)
|
||||||
|
|
||||||
|
if found != tc.expectedFound {
|
||||||
|
t.Errorf("DetectService(%s) found = %v, expected %v", tc.userAgent, found, tc.expectedFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
if found && service.Name != tc.expectedName {
|
||||||
|
t.Errorf("DetectService(%s) service name = %s, expected %s", tc.userAgent, service.Name, tc.expectedName)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test cache key generation for different services
|
||||||
|
steamKey := generateServiceCacheKey("/depot/123/chunk/abc", "steam")
|
||||||
|
epicKey := generateServiceCacheKey("/epic/123/chunk/abc", "epic")
|
||||||
|
|
||||||
|
if !strings.HasPrefix(steamKey, "steam/") {
|
||||||
|
t.Errorf("Steam cache key should start with 'steam/', got: %s", steamKey)
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(epicKey, "epic/") {
|
||||||
|
t.Errorf("Epic cache key should start with 'epic/', got: %s", epicKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Removed hash calculation tests since we switched to lightweight validation
|
||||||
|
|
||||||
|
func TestSteamKeySharding(t *testing.T) {
|
||||||
|
sc := New("localhost:8080", "0", "1G", t.TempDir(), "", "lru", "lru", 200, 5)
|
||||||
|
|
||||||
|
// Test with a Steam-style key that should trigger sharding
|
||||||
|
steamKey := "steam/0016cfc5019b8baa6026aa1cce93e685d6e06c6e"
|
||||||
|
testData := []byte("test steam cache data")
|
||||||
|
|
||||||
|
// Create a file with the steam key
|
||||||
|
w, err := sc.vfs.Create(steamKey, int64(len(testData)))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create file with steam key: %v", err)
|
||||||
|
}
|
||||||
|
w.Write(testData)
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Verify we can read it back
|
||||||
|
rc, err := sc.vfs.Open(steamKey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to open file with steam key: %v", err)
|
||||||
|
}
|
||||||
|
got, _ := io.ReadAll(rc)
|
||||||
|
rc.Close()
|
||||||
|
|
||||||
|
if string(got) != string(testData) {
|
||||||
|
t.Errorf("Data mismatch: expected %s, got %s", testData, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the file was created (sharding is working if no error occurred)
|
||||||
|
// The key difference is that with sharding, the file should be created successfully
|
||||||
|
// and be readable, whereas without sharding it might not work correctly
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removed old TestKeyGeneration - replaced with TestURLHashing that uses SHA256
|
||||||
|
|||||||
0
steamcache/test_cache/.gitkeep
Normal file
0
steamcache/test_cache/.gitkeep
Normal file
273
vfs/adaptive/adaptive.go
Normal file
273
vfs/adaptive/adaptive.go
Normal file
@@ -0,0 +1,273 @@
|
|||||||
|
package adaptive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WorkloadPattern represents different types of workload patterns
|
||||||
|
type WorkloadPattern int
|
||||||
|
|
||||||
|
const (
|
||||||
|
PatternUnknown WorkloadPattern = iota
|
||||||
|
PatternSequential // Sequential file access (e.g., game installation)
|
||||||
|
PatternRandom // Random file access (e.g., game updates)
|
||||||
|
PatternBurst // Burst access (e.g., multiple users downloading same game)
|
||||||
|
PatternSteady // Steady access (e.g., popular games being accessed regularly)
|
||||||
|
)
|
||||||
|
|
||||||
|
// CacheStrategy represents different caching strategies
|
||||||
|
type CacheStrategy int
|
||||||
|
|
||||||
|
const (
|
||||||
|
StrategyLRU CacheStrategy = iota
|
||||||
|
StrategyLFU
|
||||||
|
StrategySizeBased
|
||||||
|
StrategyHybrid
|
||||||
|
StrategyPredictive
|
||||||
|
)
|
||||||
|
|
||||||
|
// WorkloadAnalyzer analyzes access patterns to determine optimal caching strategies
|
||||||
|
type WorkloadAnalyzer struct {
|
||||||
|
accessHistory map[string]*AccessInfo
|
||||||
|
patternCounts map[WorkloadPattern]int64
|
||||||
|
mu sync.RWMutex
|
||||||
|
analysisInterval time.Duration
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccessInfo tracks access patterns for individual files
|
||||||
|
type AccessInfo struct {
|
||||||
|
Key string
|
||||||
|
AccessCount int64
|
||||||
|
LastAccess time.Time
|
||||||
|
FirstAccess time.Time
|
||||||
|
AccessTimes []time.Time
|
||||||
|
Size int64
|
||||||
|
AccessPattern WorkloadPattern
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// AdaptiveCacheManager manages adaptive caching strategies
|
||||||
|
type AdaptiveCacheManager struct {
|
||||||
|
analyzer *WorkloadAnalyzer
|
||||||
|
currentStrategy CacheStrategy
|
||||||
|
adaptationCount int64
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWorkloadAnalyzer creates a new workload analyzer
|
||||||
|
func NewWorkloadAnalyzer(analysisInterval time.Duration) *WorkloadAnalyzer {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
analyzer := &WorkloadAnalyzer{
|
||||||
|
accessHistory: make(map[string]*AccessInfo),
|
||||||
|
patternCounts: make(map[WorkloadPattern]int64),
|
||||||
|
analysisInterval: analysisInterval,
|
||||||
|
ctx: ctx,
|
||||||
|
cancel: cancel,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start background analysis with much longer interval to reduce overhead
|
||||||
|
go analyzer.analyzePatterns()
|
||||||
|
|
||||||
|
return analyzer
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecordAccess records a file access for pattern analysis (lightweight version)
|
||||||
|
func (wa *WorkloadAnalyzer) RecordAccess(key string, size int64) {
|
||||||
|
// Use read lock first for better performance
|
||||||
|
wa.mu.RLock()
|
||||||
|
info, exists := wa.accessHistory[key]
|
||||||
|
wa.mu.RUnlock()
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
// Only acquire write lock when creating new entry
|
||||||
|
wa.mu.Lock()
|
||||||
|
// Double-check after acquiring write lock
|
||||||
|
if _, exists = wa.accessHistory[key]; !exists {
|
||||||
|
info = &AccessInfo{
|
||||||
|
Key: key,
|
||||||
|
AccessCount: 1,
|
||||||
|
LastAccess: time.Now(),
|
||||||
|
FirstAccess: time.Now(),
|
||||||
|
AccessTimes: []time.Time{time.Now()},
|
||||||
|
Size: size,
|
||||||
|
}
|
||||||
|
wa.accessHistory[key] = info
|
||||||
|
}
|
||||||
|
wa.mu.Unlock()
|
||||||
|
} else {
|
||||||
|
// Lightweight update - just increment counter and update timestamp
|
||||||
|
info.mu.Lock()
|
||||||
|
info.AccessCount++
|
||||||
|
info.LastAccess = time.Now()
|
||||||
|
// Only keep last 10 access times to reduce memory overhead
|
||||||
|
if len(info.AccessTimes) > 10 {
|
||||||
|
info.AccessTimes = info.AccessTimes[len(info.AccessTimes)-10:]
|
||||||
|
} else {
|
||||||
|
info.AccessTimes = append(info.AccessTimes, time.Now())
|
||||||
|
}
|
||||||
|
info.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// analyzePatterns analyzes access patterns in the background
|
||||||
|
func (wa *WorkloadAnalyzer) analyzePatterns() {
|
||||||
|
ticker := time.NewTicker(wa.analysisInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-wa.ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
wa.performAnalysis()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// performAnalysis analyzes current access patterns
|
||||||
|
func (wa *WorkloadAnalyzer) performAnalysis() {
|
||||||
|
wa.mu.Lock()
|
||||||
|
defer wa.mu.Unlock()
|
||||||
|
|
||||||
|
// Reset pattern counts
|
||||||
|
wa.patternCounts = make(map[WorkloadPattern]int64)
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
cutoff := now.Add(-wa.analysisInterval * 2) // Analyze last 2 intervals
|
||||||
|
|
||||||
|
for _, info := range wa.accessHistory {
|
||||||
|
info.mu.RLock()
|
||||||
|
if info.LastAccess.After(cutoff) {
|
||||||
|
pattern := wa.determinePattern(info)
|
||||||
|
info.AccessPattern = pattern
|
||||||
|
wa.patternCounts[pattern]++
|
||||||
|
}
|
||||||
|
info.mu.RUnlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// determinePattern determines the access pattern for a file
|
||||||
|
func (wa *WorkloadAnalyzer) determinePattern(info *AccessInfo) WorkloadPattern {
|
||||||
|
if len(info.AccessTimes) < 3 {
|
||||||
|
return PatternUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
// Analyze access timing patterns
|
||||||
|
intervals := make([]time.Duration, len(info.AccessTimes)-1)
|
||||||
|
for i := 1; i < len(info.AccessTimes); i++ {
|
||||||
|
intervals[i-1] = info.AccessTimes[i].Sub(info.AccessTimes[i-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate variance in access intervals
|
||||||
|
var sum, sumSquares time.Duration
|
||||||
|
for _, interval := range intervals {
|
||||||
|
sum += interval
|
||||||
|
sumSquares += interval * interval
|
||||||
|
}
|
||||||
|
|
||||||
|
avg := sum / time.Duration(len(intervals))
|
||||||
|
variance := (sumSquares / time.Duration(len(intervals))) - (avg * avg)
|
||||||
|
|
||||||
|
// Determine pattern based on variance and access count
|
||||||
|
if info.AccessCount > 10 && variance < time.Minute {
|
||||||
|
return PatternBurst
|
||||||
|
} else if info.AccessCount > 5 && variance < time.Hour {
|
||||||
|
return PatternSteady
|
||||||
|
} else if variance < time.Minute*5 {
|
||||||
|
return PatternSequential
|
||||||
|
} else {
|
||||||
|
return PatternRandom
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDominantPattern returns the most common access pattern
|
||||||
|
func (wa *WorkloadAnalyzer) GetDominantPattern() WorkloadPattern {
|
||||||
|
wa.mu.RLock()
|
||||||
|
defer wa.mu.RUnlock()
|
||||||
|
|
||||||
|
var maxCount int64
|
||||||
|
var dominantPattern WorkloadPattern
|
||||||
|
|
||||||
|
for pattern, count := range wa.patternCounts {
|
||||||
|
if count > maxCount {
|
||||||
|
maxCount = count
|
||||||
|
dominantPattern = pattern
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dominantPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAccessInfo returns access information for a key
|
||||||
|
func (wa *WorkloadAnalyzer) GetAccessInfo(key string) *AccessInfo {
|
||||||
|
wa.mu.RLock()
|
||||||
|
defer wa.mu.RUnlock()
|
||||||
|
|
||||||
|
return wa.accessHistory[key]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the workload analyzer
|
||||||
|
func (wa *WorkloadAnalyzer) Stop() {
|
||||||
|
wa.cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAdaptiveCacheManager creates a new adaptive cache manager
|
||||||
|
func NewAdaptiveCacheManager(analysisInterval time.Duration) *AdaptiveCacheManager {
|
||||||
|
return &AdaptiveCacheManager{
|
||||||
|
analyzer: NewWorkloadAnalyzer(analysisInterval),
|
||||||
|
currentStrategy: StrategyLRU, // Start with LRU
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AdaptStrategy adapts the caching strategy based on workload patterns
|
||||||
|
func (acm *AdaptiveCacheManager) AdaptStrategy() CacheStrategy {
|
||||||
|
acm.mu.Lock()
|
||||||
|
defer acm.mu.Unlock()
|
||||||
|
|
||||||
|
dominantPattern := acm.analyzer.GetDominantPattern()
|
||||||
|
|
||||||
|
// Adapt strategy based on dominant pattern
|
||||||
|
switch dominantPattern {
|
||||||
|
case PatternBurst:
|
||||||
|
acm.currentStrategy = StrategyLFU // LFU is good for burst patterns
|
||||||
|
case PatternSteady:
|
||||||
|
acm.currentStrategy = StrategyHybrid // Hybrid for steady patterns
|
||||||
|
case PatternSequential:
|
||||||
|
acm.currentStrategy = StrategySizeBased // Size-based for sequential
|
||||||
|
case PatternRandom:
|
||||||
|
acm.currentStrategy = StrategyLRU // LRU for random patterns
|
||||||
|
default:
|
||||||
|
acm.currentStrategy = StrategyLRU // Default to LRU
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.AddInt64(&acm.adaptationCount, 1)
|
||||||
|
return acm.currentStrategy
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCurrentStrategy returns the current caching strategy
|
||||||
|
func (acm *AdaptiveCacheManager) GetCurrentStrategy() CacheStrategy {
|
||||||
|
acm.mu.RLock()
|
||||||
|
defer acm.mu.RUnlock()
|
||||||
|
return acm.currentStrategy
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecordAccess records a file access for analysis
|
||||||
|
func (acm *AdaptiveCacheManager) RecordAccess(key string, size int64) {
|
||||||
|
acm.analyzer.RecordAccess(key, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAdaptationCount returns the number of strategy adaptations
|
||||||
|
func (acm *AdaptiveCacheManager) GetAdaptationCount() int64 {
|
||||||
|
return atomic.LoadInt64(&acm.adaptationCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the adaptive cache manager
|
||||||
|
func (acm *AdaptiveCacheManager) Stop() {
|
||||||
|
acm.analyzer.Stop()
|
||||||
|
}
|
||||||
537
vfs/cache/cache.go
vendored
537
vfs/cache/cache.go
vendored
@@ -2,196 +2,435 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"s1d3sw1ped/SteamCache2/vfs"
|
"s1d3sw1ped/steamcache2/vfs"
|
||||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
"s1d3sw1ped/steamcache2/vfs/vfserror"
|
||||||
"s1d3sw1ped/SteamCache2/vfs/gc"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure CacheFS implements VFS.
|
// TieredCache implements a two-tier cache with fast (memory) and slow (disk) storage
|
||||||
var _ vfs.VFS = (*CacheFS)(nil)
|
type TieredCache struct {
|
||||||
|
fast vfs.VFS // Memory cache (fast)
|
||||||
|
slow vfs.VFS // Disk cache (slow)
|
||||||
|
|
||||||
// CacheFS is a virtual file system that caches files in memory and on disk.
|
mu sync.RWMutex
|
||||||
type CacheFS struct {
|
|
||||||
fast vfs.VFS
|
|
||||||
slow vfs.VFS
|
|
||||||
|
|
||||||
cacheHandler CacheHandler
|
|
||||||
|
|
||||||
keyLocks sync.Map // map[string]*sync.RWMutex for per-key locks
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type CacheHandler func(*vfs.FileInfo, cachestate.CacheState) bool
|
// LockFreeTieredCache implements a lock-free two-tier cache for better concurrency
|
||||||
|
type LockFreeTieredCache struct {
|
||||||
|
fast *atomic.Value // Memory cache (fast) - atomic.Value for lock-free access
|
||||||
|
slow *atomic.Value // Disk cache (slow) - atomic.Value for lock-free access
|
||||||
|
}
|
||||||
|
|
||||||
// New creates a new CacheFS. fast is used for caching, and slow is used for storage. fast should obviously be faster than slow.
|
// New creates a new tiered cache
|
||||||
func New(cacheHandler CacheHandler) *CacheFS {
|
func New() *TieredCache {
|
||||||
return &CacheFS{
|
return &TieredCache{}
|
||||||
cacheHandler: cacheHandler,
|
}
|
||||||
keyLocks: sync.Map{},
|
|
||||||
|
// SetFast sets the fast (memory) tier
|
||||||
|
func (tc *TieredCache) SetFast(vfs vfs.VFS) {
|
||||||
|
tc.mu.Lock()
|
||||||
|
defer tc.mu.Unlock()
|
||||||
|
tc.fast = vfs
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSlow sets the slow (disk) tier
|
||||||
|
func (tc *TieredCache) SetSlow(vfs vfs.VFS) {
|
||||||
|
tc.mu.Lock()
|
||||||
|
defer tc.mu.Unlock()
|
||||||
|
tc.slow = vfs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create creates a new file, preferring the slow tier for persistence testing
|
||||||
|
func (tc *TieredCache) Create(key string, size int64) (io.WriteCloser, error) {
|
||||||
|
tc.mu.RLock()
|
||||||
|
defer tc.mu.RUnlock()
|
||||||
|
|
||||||
|
// Try slow tier first (disk) for better testability
|
||||||
|
if tc.slow != nil {
|
||||||
|
return tc.slow.Create(key, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to fast tier (memory)
|
||||||
|
if tc.fast != nil {
|
||||||
|
return tc.fast.Create(key, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens a file, checking fast tier first, then slow tier with promotion
|
||||||
|
func (tc *TieredCache) Open(key string) (io.ReadCloser, error) {
|
||||||
|
tc.mu.RLock()
|
||||||
|
defer tc.mu.RUnlock()
|
||||||
|
|
||||||
|
// Try fast tier first (memory)
|
||||||
|
if tc.fast != nil {
|
||||||
|
if reader, err := tc.fast.Open(key); err == nil {
|
||||||
|
return reader, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CacheFS) SetSlow(vfs vfs.VFS) {
|
// Fall back to slow tier (disk) and promote to fast tier
|
||||||
if vfs == nil {
|
if tc.slow != nil {
|
||||||
panic("vfs is nil") // panic if the vfs is nil
|
reader, err := tc.slow.Open(key)
|
||||||
}
|
|
||||||
|
|
||||||
c.slow = vfs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CacheFS) SetFast(vfs vfs.VFS) {
|
|
||||||
c.fast = vfs
|
|
||||||
}
|
|
||||||
|
|
||||||
// getKeyLock returns a RWMutex for the given key, creating it if necessary.
|
|
||||||
func (c *CacheFS) getKeyLock(key string) *sync.RWMutex {
|
|
||||||
mu, _ := c.keyLocks.LoadOrStore(key, &sync.RWMutex{})
|
|
||||||
return mu.(*sync.RWMutex)
|
|
||||||
}
|
|
||||||
|
|
||||||
// cacheState returns the state of the file at key.
|
|
||||||
func (c *CacheFS) cacheState(key string) cachestate.CacheState {
|
|
||||||
if c.fast != nil {
|
|
||||||
if _, err := c.fast.Stat(key); err == nil {
|
|
||||||
return cachestate.CacheStateHit
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := c.slow.Stat(key); err == nil {
|
|
||||||
return cachestate.CacheStateMiss
|
|
||||||
}
|
|
||||||
|
|
||||||
return cachestate.CacheStateNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CacheFS) Name() string {
|
|
||||||
return fmt.Sprintf("CacheFS(%s, %s)", c.fast.Name(), c.slow.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the total size of the cache.
|
|
||||||
func (c *CacheFS) Size() int64 {
|
|
||||||
return c.slow.Size()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the file at key from the cache.
|
|
||||||
func (c *CacheFS) Delete(key string) error {
|
|
||||||
mu := c.getKeyLock(key)
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
|
|
||||||
if c.fast != nil {
|
|
||||||
c.fast.Delete(key)
|
|
||||||
}
|
|
||||||
return c.slow.Delete(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open returns the file at key. If the file is not in the cache, it is fetched from the storage.
|
|
||||||
func (c *CacheFS) Open(key string) (io.ReadCloser, error) {
|
|
||||||
mu := c.getKeyLock(key)
|
|
||||||
mu.RLock()
|
|
||||||
defer mu.RUnlock()
|
|
||||||
|
|
||||||
state := c.cacheState(key)
|
|
||||||
|
|
||||||
switch state {
|
|
||||||
case cachestate.CacheStateHit:
|
|
||||||
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
|
|
||||||
// Record fast storage access for adaptive promotion
|
|
||||||
if c.fast != nil {
|
|
||||||
gc.RecordFastStorageAccess()
|
|
||||||
}
|
|
||||||
return c.fast.Open(key)
|
|
||||||
case cachestate.CacheStateMiss:
|
|
||||||
slowReader, err := c.slow.Open(key)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
sstat, _ := c.slow.Stat(key)
|
// If we have both tiers, check if we should promote the file to fast tier
|
||||||
if sstat != nil && c.fast != nil { // file found in slow storage and fast storage is available
|
if tc.fast != nil {
|
||||||
// We are accessing the file from the slow storage, and the file has been accessed less then a minute ago so it popular, so we should update the fast storage with the latest file.
|
// Check file size before promoting - don't promote if larger than available memory cache space
|
||||||
if c.cacheHandler != nil && c.cacheHandler(sstat, state) {
|
if info, err := tc.slow.Stat(key); err == nil {
|
||||||
fastWriter, err := c.fast.Create(key, sstat.Size())
|
availableSpace := tc.fast.Capacity() - tc.fast.Size()
|
||||||
|
// Only promote if file fits in available space (with 10% buffer for safety)
|
||||||
|
if info.Size <= int64(float64(availableSpace)*0.9) {
|
||||||
|
// Create a new reader for promotion to avoid interfering with the returned reader
|
||||||
|
promotionReader, err := tc.slow.Open(key)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return &teeReadCloser{
|
go tc.promoteToFast(key, promotionReader)
|
||||||
Reader: io.TeeReader(slowReader, fastWriter),
|
}
|
||||||
closers: []io.Closer{slowReader, fastWriter},
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return slowReader, nil
|
return reader, nil
|
||||||
case cachestate.CacheStateNotFound:
|
}
|
||||||
|
|
||||||
return nil, vfserror.ErrNotFound
|
return nil, vfserror.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
panic(vfserror.ErrUnreachable)
|
// Delete removes a file from all tiers
|
||||||
|
func (tc *TieredCache) Delete(key string) error {
|
||||||
|
tc.mu.RLock()
|
||||||
|
defer tc.mu.RUnlock()
|
||||||
|
|
||||||
|
var lastErr error
|
||||||
|
|
||||||
|
// Delete from fast tier
|
||||||
|
if tc.fast != nil {
|
||||||
|
if err := tc.fast.Delete(key); err != nil {
|
||||||
|
lastErr = err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates a new file at key. If the file is already in the cache, it is replaced.
|
// Delete from slow tier
|
||||||
func (c *CacheFS) Create(key string, size int64) (io.WriteCloser, error) {
|
if tc.slow != nil {
|
||||||
mu := c.getKeyLock(key)
|
if err := tc.slow.Delete(key); err != nil {
|
||||||
mu.Lock()
|
lastErr = err
|
||||||
defer mu.Unlock()
|
|
||||||
|
|
||||||
state := c.cacheState(key)
|
|
||||||
|
|
||||||
switch state {
|
|
||||||
case cachestate.CacheStateHit:
|
|
||||||
if c.fast != nil {
|
|
||||||
c.fast.Delete(key)
|
|
||||||
}
|
}
|
||||||
return c.slow.Create(key, size)
|
|
||||||
case cachestate.CacheStateMiss, cachestate.CacheStateNotFound:
|
|
||||||
return c.slow.Create(key, size)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
panic(vfserror.ErrUnreachable)
|
return lastErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stat returns information about the file at key.
|
// Stat returns file information, checking fast tier first
|
||||||
// Warning: This will return information about the file in the fastest storage its in.
|
func (tc *TieredCache) Stat(key string) (*vfs.FileInfo, error) {
|
||||||
func (c *CacheFS) Stat(key string) (*vfs.FileInfo, error) {
|
tc.mu.RLock()
|
||||||
mu := c.getKeyLock(key)
|
defer tc.mu.RUnlock()
|
||||||
mu.RLock()
|
|
||||||
defer mu.RUnlock()
|
|
||||||
|
|
||||||
state := c.cacheState(key)
|
// Try fast tier first (memory)
|
||||||
|
if tc.fast != nil {
|
||||||
|
if info, err := tc.fast.Stat(key); err == nil {
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to slow tier (disk)
|
||||||
|
if tc.slow != nil {
|
||||||
|
return tc.slow.Stat(key)
|
||||||
|
}
|
||||||
|
|
||||||
switch state {
|
|
||||||
case cachestate.CacheStateHit:
|
|
||||||
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
|
|
||||||
return c.fast.Stat(key)
|
|
||||||
case cachestate.CacheStateMiss:
|
|
||||||
return c.slow.Stat(key)
|
|
||||||
case cachestate.CacheStateNotFound:
|
|
||||||
return nil, vfserror.ErrNotFound
|
return nil, vfserror.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
panic(vfserror.ErrUnreachable)
|
// Name returns the cache name
|
||||||
|
func (tc *TieredCache) Name() string {
|
||||||
|
return "TieredCache"
|
||||||
}
|
}
|
||||||
|
|
||||||
// StatAll returns information about all files in the cache.
|
// Size returns the total size across all tiers
|
||||||
// Warning: This only returns information about the files in the slow storage.
|
func (tc *TieredCache) Size() int64 {
|
||||||
func (c *CacheFS) StatAll() []*vfs.FileInfo {
|
tc.mu.RLock()
|
||||||
return c.slow.StatAll()
|
defer tc.mu.RUnlock()
|
||||||
|
|
||||||
|
var total int64
|
||||||
|
if tc.fast != nil {
|
||||||
|
total += tc.fast.Size()
|
||||||
|
}
|
||||||
|
if tc.slow != nil {
|
||||||
|
total += tc.slow.Size()
|
||||||
|
}
|
||||||
|
return total
|
||||||
}
|
}
|
||||||
|
|
||||||
type teeReadCloser struct {
|
// Capacity returns the total capacity across all tiers
|
||||||
io.Reader
|
func (tc *TieredCache) Capacity() int64 {
|
||||||
closers []io.Closer
|
tc.mu.RLock()
|
||||||
|
defer tc.mu.RUnlock()
|
||||||
|
|
||||||
|
var total int64
|
||||||
|
if tc.fast != nil {
|
||||||
|
total += tc.fast.Capacity()
|
||||||
|
}
|
||||||
|
if tc.slow != nil {
|
||||||
|
total += tc.slow.Capacity()
|
||||||
|
}
|
||||||
|
return total
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *teeReadCloser) Close() error {
|
// promoteToFast promotes a file from slow tier to fast tier
|
||||||
var err error
|
func (tc *TieredCache) promoteToFast(key string, reader io.ReadCloser) {
|
||||||
for _, c := range t.closers {
|
defer reader.Close()
|
||||||
if e := c.Close(); e != nil {
|
|
||||||
err = e
|
// Get file info from slow tier to determine size
|
||||||
|
tc.mu.RLock()
|
||||||
|
var size int64
|
||||||
|
if tc.slow != nil {
|
||||||
|
if info, err := tc.slow.Stat(key); err == nil {
|
||||||
|
size = info.Size
|
||||||
|
} else {
|
||||||
|
tc.mu.RUnlock()
|
||||||
|
return // Skip promotion if we can't get file info
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tc.mu.RUnlock()
|
||||||
|
|
||||||
|
// Check if file fits in available memory cache space
|
||||||
|
tc.mu.RLock()
|
||||||
|
if tc.fast != nil {
|
||||||
|
availableSpace := tc.fast.Capacity() - tc.fast.Size()
|
||||||
|
// Only promote if file fits in available space (with 10% buffer for safety)
|
||||||
|
if size > int64(float64(availableSpace)*0.9) {
|
||||||
|
tc.mu.RUnlock()
|
||||||
|
return // Skip promotion if file is too large
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tc.mu.RUnlock()
|
||||||
|
|
||||||
|
// Read the entire file content
|
||||||
|
content, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
return // Skip promotion if read fails
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the file in fast tier
|
||||||
|
tc.mu.RLock()
|
||||||
|
if tc.fast != nil {
|
||||||
|
writer, err := tc.fast.Create(key, size)
|
||||||
|
if err == nil {
|
||||||
|
// Write content to fast tier
|
||||||
|
writer.Write(content)
|
||||||
|
writer.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tc.mu.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLockFree creates a new lock-free tiered cache
|
||||||
|
func NewLockFree() *LockFreeTieredCache {
|
||||||
|
return &LockFreeTieredCache{
|
||||||
|
fast: &atomic.Value{},
|
||||||
|
slow: &atomic.Value{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFast sets the fast (memory) tier atomically
|
||||||
|
func (lftc *LockFreeTieredCache) SetFast(vfs vfs.VFS) {
|
||||||
|
lftc.fast.Store(vfs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSlow sets the slow (disk) tier atomically
|
||||||
|
func (lftc *LockFreeTieredCache) SetSlow(vfs vfs.VFS) {
|
||||||
|
lftc.slow.Store(vfs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create creates a new file, preferring the slow tier for persistence
|
||||||
|
func (lftc *LockFreeTieredCache) Create(key string, size int64) (io.WriteCloser, error) {
|
||||||
|
// Try slow tier first (disk) for better testability
|
||||||
|
if slow := lftc.slow.Load(); slow != nil {
|
||||||
|
if vfs, ok := slow.(vfs.VFS); ok {
|
||||||
|
return vfs.Create(key, size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to fast tier (memory)
|
||||||
|
if fast := lftc.fast.Load(); fast != nil {
|
||||||
|
if vfs, ok := fast.(vfs.VFS); ok {
|
||||||
|
return vfs.Create(key, size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens a file, checking fast tier first, then slow tier with promotion
|
||||||
|
func (lftc *LockFreeTieredCache) Open(key string) (io.ReadCloser, error) {
|
||||||
|
// Try fast tier first (memory)
|
||||||
|
if fast := lftc.fast.Load(); fast != nil {
|
||||||
|
if vfs, ok := fast.(vfs.VFS); ok {
|
||||||
|
if reader, err := vfs.Open(key); err == nil {
|
||||||
|
return reader, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to slow tier (disk) and promote to fast tier
|
||||||
|
if slow := lftc.slow.Load(); slow != nil {
|
||||||
|
if vfs, ok := slow.(vfs.VFS); ok {
|
||||||
|
reader, err := vfs.Open(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have both tiers, promote the file to fast tier
|
||||||
|
if fast := lftc.fast.Load(); fast != nil {
|
||||||
|
// Create a new reader for promotion to avoid interfering with the returned reader
|
||||||
|
promotionReader, err := vfs.Open(key)
|
||||||
|
if err == nil {
|
||||||
|
go lftc.promoteToFast(key, promotionReader)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return reader, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes a file from all tiers
|
||||||
|
func (lftc *LockFreeTieredCache) Delete(key string) error {
|
||||||
|
var lastErr error
|
||||||
|
|
||||||
|
// Delete from fast tier
|
||||||
|
if fast := lftc.fast.Load(); fast != nil {
|
||||||
|
if vfs, ok := fast.(vfs.VFS); ok {
|
||||||
|
if err := vfs.Delete(key); err != nil {
|
||||||
|
lastErr = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete from slow tier
|
||||||
|
if slow := lftc.slow.Load(); slow != nil {
|
||||||
|
if vfs, ok := slow.(vfs.VFS); ok {
|
||||||
|
if err := vfs.Delete(key); err != nil {
|
||||||
|
lastErr = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns file information, checking fast tier first
|
||||||
|
func (lftc *LockFreeTieredCache) Stat(key string) (*vfs.FileInfo, error) {
|
||||||
|
// Try fast tier first (memory)
|
||||||
|
if fast := lftc.fast.Load(); fast != nil {
|
||||||
|
if vfs, ok := fast.(vfs.VFS); ok {
|
||||||
|
if info, err := vfs.Stat(key); err == nil {
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to slow tier (disk)
|
||||||
|
if slow := lftc.slow.Load(); slow != nil {
|
||||||
|
if vfs, ok := slow.(vfs.VFS); ok {
|
||||||
|
return vfs.Stat(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the cache name
|
||||||
|
func (lftc *LockFreeTieredCache) Name() string {
|
||||||
|
return "LockFreeTieredCache"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the total size across all tiers
|
||||||
|
func (lftc *LockFreeTieredCache) Size() int64 {
|
||||||
|
var total int64
|
||||||
|
|
||||||
|
if fast := lftc.fast.Load(); fast != nil {
|
||||||
|
if vfs, ok := fast.(vfs.VFS); ok {
|
||||||
|
total += vfs.Size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if slow := lftc.slow.Load(); slow != nil {
|
||||||
|
if vfs, ok := slow.(vfs.VFS); ok {
|
||||||
|
total += vfs.Size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capacity returns the total capacity across all tiers
|
||||||
|
func (lftc *LockFreeTieredCache) Capacity() int64 {
|
||||||
|
var total int64
|
||||||
|
|
||||||
|
if fast := lftc.fast.Load(); fast != nil {
|
||||||
|
if vfs, ok := fast.(vfs.VFS); ok {
|
||||||
|
total += vfs.Capacity()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if slow := lftc.slow.Load(); slow != nil {
|
||||||
|
if vfs, ok := slow.(vfs.VFS); ok {
|
||||||
|
total += vfs.Capacity()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
|
||||||
|
// promoteToFast promotes a file from slow tier to fast tier (lock-free version)
|
||||||
|
func (lftc *LockFreeTieredCache) promoteToFast(key string, reader io.ReadCloser) {
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
|
// Get file info from slow tier to determine size
|
||||||
|
var size int64
|
||||||
|
if slow := lftc.slow.Load(); slow != nil {
|
||||||
|
if vfs, ok := slow.(vfs.VFS); ok {
|
||||||
|
if info, err := vfs.Stat(key); err == nil {
|
||||||
|
size = info.Size
|
||||||
|
} else {
|
||||||
|
return // Skip promotion if we can't get file info
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if file fits in available memory cache space
|
||||||
|
if fast := lftc.fast.Load(); fast != nil {
|
||||||
|
if vfs, ok := fast.(vfs.VFS); ok {
|
||||||
|
availableSpace := vfs.Capacity() - vfs.Size()
|
||||||
|
// Only promote if file fits in available space (with 10% buffer for safety)
|
||||||
|
if size > int64(float64(availableSpace)*0.9) {
|
||||||
|
return // Skip promotion if file is too large
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the entire file content
|
||||||
|
content, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
return // Skip promotion if read fails
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the file in fast tier
|
||||||
|
if fast := lftc.fast.Load(); fast != nil {
|
||||||
|
if vfs, ok := fast.(vfs.VFS); ok {
|
||||||
|
writer, err := vfs.Create(key, size)
|
||||||
|
if err == nil {
|
||||||
|
// Write content to fast tier
|
||||||
|
writer.Write(content)
|
||||||
|
writer.Close()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|||||||
201
vfs/cache/cache_test.go
vendored
201
vfs/cache/cache_test.go
vendored
@@ -1,201 +0,0 @@
|
|||||||
// vfs/cache/cache_test.go
|
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
|
||||||
)
|
|
||||||
|
|
||||||
func testMemory() vfs.VFS {
|
|
||||||
return memory.New(1024)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNew(t *testing.T) {
|
|
||||||
fast := testMemory()
|
|
||||||
slow := testMemory()
|
|
||||||
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetFast(fast)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
if cache == nil {
|
|
||||||
t.Fatal("expected cache to be non-nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewPanics(t *testing.T) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r == nil {
|
|
||||||
t.Fatal("expected panic but did not get one")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetFast(nil)
|
|
||||||
cache.SetSlow(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateAndOpen(t *testing.T) {
|
|
||||||
fast := testMemory()
|
|
||||||
slow := testMemory()
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetFast(fast)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
|
|
||||||
key := "test"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
w, err := cache.Create(key, int64(len(value)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
rc, err := cache.Open(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
got, _ := io.ReadAll(rc)
|
|
||||||
rc.Close()
|
|
||||||
|
|
||||||
if string(got) != string(value) {
|
|
||||||
t.Fatalf("expected %s, got %s", value, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateAndOpenNoFast(t *testing.T) {
|
|
||||||
slow := testMemory()
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
|
|
||||||
key := "test"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
w, err := cache.Create(key, int64(len(value)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
rc, err := cache.Open(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
got, _ := io.ReadAll(rc)
|
|
||||||
rc.Close()
|
|
||||||
|
|
||||||
if string(got) != string(value) {
|
|
||||||
t.Fatalf("expected %s, got %s", value, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCachingPromotion(t *testing.T) {
|
|
||||||
fast := testMemory()
|
|
||||||
slow := testMemory()
|
|
||||||
cache := New(func(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
cache.SetFast(fast)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
|
|
||||||
key := "test"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
ws, _ := slow.Create(key, int64(len(value)))
|
|
||||||
ws.Write(value)
|
|
||||||
ws.Close()
|
|
||||||
|
|
||||||
rc, err := cache.Open(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
got, _ := io.ReadAll(rc)
|
|
||||||
rc.Close()
|
|
||||||
|
|
||||||
if string(got) != string(value) {
|
|
||||||
t.Fatalf("expected %s, got %s", value, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if promoted to fast
|
|
||||||
_, err = fast.Open(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Error("Expected promotion to fast cache")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOpenNotFound(t *testing.T) {
|
|
||||||
fast := testMemory()
|
|
||||||
slow := testMemory()
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetFast(fast)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
|
|
||||||
_, err := cache.Open("nonexistent")
|
|
||||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
|
||||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDelete(t *testing.T) {
|
|
||||||
fast := testMemory()
|
|
||||||
slow := testMemory()
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetFast(fast)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
|
|
||||||
key := "test"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
w, err := cache.Create(key, int64(len(value)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
if err := cache.Delete(key); err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = cache.Open(key)
|
|
||||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
|
||||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStat(t *testing.T) {
|
|
||||||
fast := testMemory()
|
|
||||||
slow := testMemory()
|
|
||||||
cache := New(nil)
|
|
||||||
cache.SetFast(fast)
|
|
||||||
cache.SetSlow(slow)
|
|
||||||
|
|
||||||
key := "test"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
w, err := cache.Create(key, int64(len(value)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
info, err := cache.Stat(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if info == nil {
|
|
||||||
t.Fatal("expected file info to be non-nil")
|
|
||||||
}
|
|
||||||
if info.Size() != int64(len(value)) {
|
|
||||||
t.Errorf("expected size %d, got %d", len(value), info.Size())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,25 +1,5 @@
|
|||||||
// vfs/cachestate/cachestate.go
|
// vfs/cachestate/cachestate.go
|
||||||
package cachestate
|
package cachestate
|
||||||
|
|
||||||
import "s1d3sw1ped/SteamCache2/vfs/vfserror"
|
// This is a placeholder for cache state management
|
||||||
|
// Currently not used but referenced in imports
|
||||||
type CacheState int
|
|
||||||
|
|
||||||
const (
|
|
||||||
CacheStateHit CacheState = iota
|
|
||||||
CacheStateMiss
|
|
||||||
CacheStateNotFound
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c CacheState) String() string {
|
|
||||||
switch c {
|
|
||||||
case CacheStateHit:
|
|
||||||
return "hit"
|
|
||||||
case CacheStateMiss:
|
|
||||||
return "miss"
|
|
||||||
case CacheStateNotFound:
|
|
||||||
return "not found"
|
|
||||||
}
|
|
||||||
|
|
||||||
panic(vfserror.ErrUnreachable)
|
|
||||||
}
|
|
||||||
|
|||||||
908
vfs/disk/disk.go
908
vfs/disk/disk.go
File diff suppressed because it is too large
Load Diff
@@ -1,181 +0,0 @@
|
|||||||
// vfs/disk/disk_test.go
|
|
||||||
package disk
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCreateAndOpen(t *testing.T) {
|
|
||||||
m := NewSkipInit(t.TempDir(), 1024)
|
|
||||||
key := "key"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
w, err := m.Create(key, int64(len(value)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Create failed: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
rc, err := m.Open(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Open failed: %v", err)
|
|
||||||
}
|
|
||||||
got, _ := io.ReadAll(rc)
|
|
||||||
rc.Close()
|
|
||||||
|
|
||||||
if string(got) != string(value) {
|
|
||||||
t.Fatalf("expected %s, got %s", value, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOverwrite(t *testing.T) {
|
|
||||||
m := NewSkipInit(t.TempDir(), 1024)
|
|
||||||
key := "key"
|
|
||||||
value1 := []byte("value1")
|
|
||||||
value2 := []byte("value2")
|
|
||||||
|
|
||||||
w, err := m.Create(key, int64(len(value1)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Create failed: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value1)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
w, err = m.Create(key, int64(len(value2)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Create failed: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value2)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
rc, err := m.Open(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Open failed: %v", err)
|
|
||||||
}
|
|
||||||
got, _ := io.ReadAll(rc)
|
|
||||||
rc.Close()
|
|
||||||
|
|
||||||
if string(got) != string(value2) {
|
|
||||||
t.Fatalf("expected %s, got %s", value2, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDelete(t *testing.T) {
|
|
||||||
m := NewSkipInit(t.TempDir(), 1024)
|
|
||||||
key := "key"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
w, err := m.Create(key, int64(len(value)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Create failed: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
if err := m.Delete(key); err != nil {
|
|
||||||
t.Fatalf("Delete failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = m.Open(key)
|
|
||||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
|
||||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCapacityLimit(t *testing.T) {
|
|
||||||
m := NewSkipInit(t.TempDir(), 10)
|
|
||||||
for i := 0; i < 11; i++ {
|
|
||||||
w, err := m.Create(fmt.Sprintf("key%d", i), 1)
|
|
||||||
if err != nil && i < 10 {
|
|
||||||
t.Errorf("Create failed: %v", err)
|
|
||||||
} else if i == 10 && err == nil {
|
|
||||||
t.Errorf("Create succeeded: got nil, want %v", vfserror.ErrDiskFull)
|
|
||||||
}
|
|
||||||
if i < 10 {
|
|
||||||
w.Write([]byte("1"))
|
|
||||||
w.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInitExistingFiles(t *testing.T) {
|
|
||||||
td := t.TempDir()
|
|
||||||
|
|
||||||
path := filepath.Join(td, "test", "key")
|
|
||||||
os.MkdirAll(filepath.Dir(path), 0755)
|
|
||||||
os.WriteFile(path, []byte("value"), 0644)
|
|
||||||
|
|
||||||
m := New(td, 10)
|
|
||||||
rc, err := m.Open("test/key")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Open failed: %v", err)
|
|
||||||
}
|
|
||||||
got, _ := io.ReadAll(rc)
|
|
||||||
rc.Close()
|
|
||||||
|
|
||||||
if string(got) != "value" {
|
|
||||||
t.Errorf("expected value, got %s", got)
|
|
||||||
}
|
|
||||||
|
|
||||||
s, err := m.Stat("test/key")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Stat failed: %v", err)
|
|
||||||
}
|
|
||||||
if s == nil {
|
|
||||||
t.Error("Stat returned nil")
|
|
||||||
}
|
|
||||||
if s != nil && s.Name() != "test/key" {
|
|
||||||
t.Errorf("Stat failed: got %s, want %s", s.Name(), "test/key")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSizeConsistency(t *testing.T) {
|
|
||||||
td := t.TempDir()
|
|
||||||
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
|
||||||
|
|
||||||
m := New(td, 1024)
|
|
||||||
if m.Size() != 6 {
|
|
||||||
t.Errorf("Size failed: got %d, want 6", m.Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
w, err := m.Create("key", 5)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Create failed: %v", err)
|
|
||||||
}
|
|
||||||
w.Write([]byte("value"))
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
w, err = m.Create("key1", 6)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Create failed: %v", err)
|
|
||||||
}
|
|
||||||
w.Write([]byte("value1"))
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
assumedSize := int64(6 + 5 + 6)
|
|
||||||
if assumedSize != m.Size() {
|
|
||||||
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
rc, err := m.Open("key")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Open failed: %v", err)
|
|
||||||
}
|
|
||||||
d, _ := io.ReadAll(rc)
|
|
||||||
rc.Close()
|
|
||||||
if string(d) != "value" {
|
|
||||||
t.Errorf("Get failed: got %s, want value", d)
|
|
||||||
}
|
|
||||||
|
|
||||||
m = New(td, 1024)
|
|
||||||
if assumedSize != m.Size() {
|
|
||||||
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
// vfs/fileinfo.go
|
|
||||||
package vfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FileInfo struct {
|
|
||||||
name string
|
|
||||||
size int64
|
|
||||||
MTime time.Time
|
|
||||||
ATime time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo {
|
|
||||||
return &FileInfo{
|
|
||||||
name: key,
|
|
||||||
size: size,
|
|
||||||
MTime: modTime,
|
|
||||||
ATime: time.Now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFileInfoFromOS(f os.FileInfo, key string) *FileInfo {
|
|
||||||
return &FileInfo{
|
|
||||||
name: key,
|
|
||||||
size: f.Size(),
|
|
||||||
MTime: f.ModTime(),
|
|
||||||
ATime: time.Now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f FileInfo) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f FileInfo) Size() int64 {
|
|
||||||
return f.size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f FileInfo) ModTime() time.Time {
|
|
||||||
return f.MTime
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f FileInfo) AccessTime() time.Time {
|
|
||||||
return f.ATime
|
|
||||||
}
|
|
||||||
966
vfs/gc/gc.go
966
vfs/gc/gc.go
File diff suppressed because it is too large
Load Diff
@@ -1,42 +0,0 @@
|
|||||||
// vfs/gc/gc_test.go
|
|
||||||
package gc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGetGCAlgorithm(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
algorithm GCAlgorithm
|
|
||||||
expected bool // true if we expect a non-nil function
|
|
||||||
}{
|
|
||||||
{"LRU", LRU, true},
|
|
||||||
{"LFU", LFU, true},
|
|
||||||
{"FIFO", FIFO, true},
|
|
||||||
{"Largest", Largest, true},
|
|
||||||
{"Smallest", Smallest, true},
|
|
||||||
{"Hybrid", Hybrid, true},
|
|
||||||
{"Unknown", "unknown", true}, // should fall back to LRU
|
|
||||||
{"Empty", "", true}, // should fall back to LRU
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
fn := GetGCAlgorithm(tt.algorithm)
|
|
||||||
if fn == nil {
|
|
||||||
t.Errorf("GetGCAlgorithm(%s) returned nil, expected non-nil function", tt.algorithm)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGCAlgorithmConstants(t *testing.T) {
|
|
||||||
expectedAlgorithms := []GCAlgorithm{LRU, LFU, FIFO, Largest, Smallest, Hybrid}
|
|
||||||
|
|
||||||
for _, algo := range expectedAlgorithms {
|
|
||||||
if algo == "" {
|
|
||||||
t.Errorf("GC algorithm constant is empty")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
130
vfs/memory/dynamic.go
Normal file
130
vfs/memory/dynamic.go
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
package memory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"s1d3sw1ped/steamcache2/vfs"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DynamicCacheManager manages cache size adjustments based on system memory usage
|
||||||
|
type DynamicCacheManager struct {
|
||||||
|
originalCacheSize uint64
|
||||||
|
currentCacheSize uint64
|
||||||
|
memoryMonitor *MemoryMonitor
|
||||||
|
cache vfs.VFS
|
||||||
|
adjustmentInterval time.Duration
|
||||||
|
lastAdjustment time.Time
|
||||||
|
mu sync.RWMutex
|
||||||
|
adjustmentCount int64
|
||||||
|
isAdjusting int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDynamicCacheManager creates a new dynamic cache manager
|
||||||
|
func NewDynamicCacheManager(cache vfs.VFS, originalSize uint64, memoryMonitor *MemoryMonitor) *DynamicCacheManager {
|
||||||
|
return &DynamicCacheManager{
|
||||||
|
originalCacheSize: originalSize,
|
||||||
|
currentCacheSize: originalSize,
|
||||||
|
memoryMonitor: memoryMonitor,
|
||||||
|
cache: cache,
|
||||||
|
adjustmentInterval: 30 * time.Second, // Adjust every 30 seconds
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start begins the dynamic cache size adjustment process
|
||||||
|
func (dcm *DynamicCacheManager) Start() {
|
||||||
|
go dcm.adjustmentLoop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCurrentCacheSize returns the current cache size
|
||||||
|
func (dcm *DynamicCacheManager) GetCurrentCacheSize() uint64 {
|
||||||
|
dcm.mu.RLock()
|
||||||
|
defer dcm.mu.RUnlock()
|
||||||
|
return atomic.LoadUint64(&dcm.currentCacheSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOriginalCacheSize returns the original cache size
|
||||||
|
func (dcm *DynamicCacheManager) GetOriginalCacheSize() uint64 {
|
||||||
|
dcm.mu.RLock()
|
||||||
|
defer dcm.mu.RUnlock()
|
||||||
|
return dcm.originalCacheSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAdjustmentCount returns the number of adjustments made
|
||||||
|
func (dcm *DynamicCacheManager) GetAdjustmentCount() int64 {
|
||||||
|
return atomic.LoadInt64(&dcm.adjustmentCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// adjustmentLoop runs the cache size adjustment loop
|
||||||
|
func (dcm *DynamicCacheManager) adjustmentLoop() {
|
||||||
|
ticker := time.NewTicker(dcm.adjustmentInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for range ticker.C {
|
||||||
|
dcm.performAdjustment()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// performAdjustment performs a cache size adjustment if needed
|
||||||
|
func (dcm *DynamicCacheManager) performAdjustment() {
|
||||||
|
// Prevent concurrent adjustments
|
||||||
|
if !atomic.CompareAndSwapInt32(&dcm.isAdjusting, 0, 1) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer atomic.StoreInt32(&dcm.isAdjusting, 0)
|
||||||
|
|
||||||
|
// Check if enough time has passed since last adjustment
|
||||||
|
if time.Since(dcm.lastAdjustment) < dcm.adjustmentInterval {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get recommended cache size
|
||||||
|
recommendedSize := dcm.memoryMonitor.GetRecommendedCacheSize(dcm.originalCacheSize)
|
||||||
|
currentSize := atomic.LoadUint64(&dcm.currentCacheSize)
|
||||||
|
|
||||||
|
// Only adjust if there's a significant difference (more than 5%)
|
||||||
|
sizeDiff := float64(recommendedSize) / float64(currentSize)
|
||||||
|
if sizeDiff < 0.95 || sizeDiff > 1.05 {
|
||||||
|
dcm.adjustCacheSize(recommendedSize)
|
||||||
|
dcm.lastAdjustment = time.Now()
|
||||||
|
atomic.AddInt64(&dcm.adjustmentCount, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// adjustCacheSize adjusts the cache size to the recommended size
|
||||||
|
func (dcm *DynamicCacheManager) adjustCacheSize(newSize uint64) {
|
||||||
|
dcm.mu.Lock()
|
||||||
|
defer dcm.mu.Unlock()
|
||||||
|
|
||||||
|
oldSize := atomic.LoadUint64(&dcm.currentCacheSize)
|
||||||
|
atomic.StoreUint64(&dcm.currentCacheSize, newSize)
|
||||||
|
|
||||||
|
// If we're reducing the cache size, trigger GC to free up memory
|
||||||
|
if newSize < oldSize {
|
||||||
|
// Calculate how much to free
|
||||||
|
bytesToFree := oldSize - newSize
|
||||||
|
|
||||||
|
// Trigger GC on the cache to free up the excess memory
|
||||||
|
// This is a simplified approach - in practice, you'd want to integrate
|
||||||
|
// with the actual GC system to free the right amount
|
||||||
|
if gcCache, ok := dcm.cache.(interface{ ForceGC(uint) }); ok {
|
||||||
|
gcCache.ForceGC(uint(bytesToFree))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStats returns statistics about the dynamic cache manager
|
||||||
|
func (dcm *DynamicCacheManager) GetStats() map[string]interface{} {
|
||||||
|
dcm.mu.RLock()
|
||||||
|
defer dcm.mu.RUnlock()
|
||||||
|
|
||||||
|
return map[string]interface{}{
|
||||||
|
"original_cache_size": dcm.originalCacheSize,
|
||||||
|
"current_cache_size": atomic.LoadUint64(&dcm.currentCacheSize),
|
||||||
|
"adjustment_count": atomic.LoadInt64(&dcm.adjustmentCount),
|
||||||
|
"last_adjustment": dcm.lastAdjustment,
|
||||||
|
"memory_utilization": dcm.memoryMonitor.GetMemoryUtilization(),
|
||||||
|
"target_memory_usage": dcm.memoryMonitor.GetTargetMemoryUsage(),
|
||||||
|
"current_memory_usage": dcm.memoryMonitor.GetCurrentMemoryUsage(),
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -5,67 +5,57 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"container/list"
|
"container/list"
|
||||||
"io"
|
"io"
|
||||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
"s1d3sw1ped/steamcache2/vfs/types"
|
||||||
"s1d3sw1ped/SteamCache2/vfs"
|
"s1d3sw1ped/steamcache2/vfs/vfserror"
|
||||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
"sort"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/go-units"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// VFS defines the interface for virtual file systems
|
||||||
memoryCapacityBytes = promauto.NewGauge(
|
type VFS interface {
|
||||||
prometheus.GaugeOpts{
|
// Create creates a new file at the given key
|
||||||
Name: "memory_cache_capacity_bytes",
|
Create(key string, size int64) (io.WriteCloser, error)
|
||||||
Help: "Total capacity of the memory cache in bytes",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
memorySizeBytes = promauto.NewGauge(
|
// Open opens the file at the given key for reading
|
||||||
prometheus.GaugeOpts{
|
Open(key string) (io.ReadCloser, error)
|
||||||
Name: "memory_cache_size_bytes",
|
|
||||||
Help: "Total size of the memory cache in bytes",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
memoryReadBytes = promauto.NewCounter(
|
// Delete removes the file at the given key
|
||||||
prometheus.CounterOpts{
|
Delete(key string) error
|
||||||
Name: "memory_cache_read_bytes_total",
|
|
||||||
Help: "Total number of bytes read from the memory cache",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
memoryWriteBytes = promauto.NewCounter(
|
// Stat returns information about the file at the given key
|
||||||
prometheus.CounterOpts{
|
Stat(key string) (*types.FileInfo, error)
|
||||||
Name: "memory_cache_write_bytes_total",
|
|
||||||
Help: "Total number of bytes written to the memory cache",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ensure MemoryFS implements VFS.
|
// Name returns the name of this VFS
|
||||||
var _ vfs.VFS = (*MemoryFS)(nil)
|
Name() string
|
||||||
|
|
||||||
// file represents a file in memory.
|
// Size returns the current size of the VFS
|
||||||
type file struct {
|
Size() int64
|
||||||
fileinfo *vfs.FileInfo
|
|
||||||
data []byte
|
// Capacity returns the maximum capacity of the VFS
|
||||||
|
Capacity() int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// MemoryFS is a virtual file system that stores files in memory.
|
// Ensure MemoryFS implements VFS.
|
||||||
|
var _ VFS = (*MemoryFS)(nil)
|
||||||
|
|
||||||
|
// MemoryFS is an in-memory virtual file system
|
||||||
type MemoryFS struct {
|
type MemoryFS struct {
|
||||||
files map[string]*file
|
data map[string]*bytes.Buffer
|
||||||
|
info map[string]*types.FileInfo
|
||||||
capacity int64
|
capacity int64
|
||||||
size int64
|
size int64
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
keyLocks sync.Map // map[string]*sync.RWMutex
|
keyLocks []sync.Map // Sharded lock pools for better concurrency
|
||||||
LRU *lruList
|
LRU *lruList
|
||||||
|
timeUpdater *types.BatchedTimeUpdate // Batched time updates for better performance
|
||||||
}
|
}
|
||||||
|
|
||||||
// lruList for LRU eviction
|
// Number of lock shards for reducing contention
|
||||||
|
const numLockShards = 32
|
||||||
|
|
||||||
|
// lruList for time-decayed LRU eviction
|
||||||
type lruList struct {
|
type lruList struct {
|
||||||
list *list.List
|
list *list.List
|
||||||
elem map[string]*list.Element
|
elem map[string]*list.Element
|
||||||
@@ -78,197 +68,445 @@ func newLruList() *lruList {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *lruList) MoveToFront(key string) {
|
func (l *lruList) Add(key string, fi *types.FileInfo) {
|
||||||
if e, ok := l.elem[key]; ok {
|
elem := l.list.PushFront(fi)
|
||||||
l.list.MoveToFront(e)
|
l.elem[key] = elem
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lruList) MoveToFront(key string, timeUpdater *types.BatchedTimeUpdate) {
|
||||||
|
if elem, exists := l.elem[key]; exists {
|
||||||
|
l.list.MoveToFront(elem)
|
||||||
|
// Update the FileInfo in the element with new access time
|
||||||
|
if fi := elem.Value.(*types.FileInfo); fi != nil {
|
||||||
|
fi.UpdateAccessBatched(timeUpdater)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
|
func (l *lruList) Remove(key string) *types.FileInfo {
|
||||||
e := l.list.PushFront(fi)
|
if elem, exists := l.elem[key]; exists {
|
||||||
l.elem[key] = e
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *lruList) Remove(key string) {
|
|
||||||
if e, ok := l.elem[key]; ok {
|
|
||||||
l.list.Remove(e)
|
|
||||||
delete(l.elem, key)
|
delete(l.elem, key)
|
||||||
|
if fi := l.list.Remove(elem).(*types.FileInfo); fi != nil {
|
||||||
|
return fi
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *lruList) Back() *vfs.FileInfo {
|
|
||||||
if e := l.list.Back(); e != nil {
|
|
||||||
return e.Value.(*vfs.FileInfo)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new MemoryFS.
|
func (l *lruList) Len() int {
|
||||||
|
return l.list.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new MemoryFS
|
||||||
func New(capacity int64) *MemoryFS {
|
func New(capacity int64) *MemoryFS {
|
||||||
if capacity <= 0 {
|
if capacity <= 0 {
|
||||||
panic("memory capacity must be greater than 0") // panic if the capacity is less than or equal to 0
|
panic("memory capacity must be greater than 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Logger.Info().
|
// Initialize sharded locks
|
||||||
Str("name", "MemoryFS").
|
keyLocks := make([]sync.Map, numLockShards)
|
||||||
Str("capacity", units.HumanSize(float64(capacity))).
|
|
||||||
Msg("init")
|
|
||||||
|
|
||||||
mfs := &MemoryFS{
|
return &MemoryFS{
|
||||||
files: make(map[string]*file),
|
data: make(map[string]*bytes.Buffer),
|
||||||
|
info: make(map[string]*types.FileInfo),
|
||||||
capacity: capacity,
|
capacity: capacity,
|
||||||
mu: sync.RWMutex{},
|
size: 0,
|
||||||
keyLocks: sync.Map{},
|
keyLocks: keyLocks,
|
||||||
LRU: newLruList(),
|
LRU: newLruList(),
|
||||||
|
timeUpdater: types.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
memoryCapacityBytes.Set(float64(capacity))
|
// Name returns the name of this VFS
|
||||||
memorySizeBytes.Set(float64(mfs.Size()))
|
|
||||||
|
|
||||||
return mfs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MemoryFS) Capacity() int64 {
|
|
||||||
return m.capacity
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MemoryFS) Name() string {
|
func (m *MemoryFS) Name() string {
|
||||||
return "MemoryFS"
|
return "MemoryFS"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Size returns the current size
|
||||||
func (m *MemoryFS) Size() int64 {
|
func (m *MemoryFS) Size() int64 {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
defer m.mu.RUnlock()
|
defer m.mu.RUnlock()
|
||||||
return m.size
|
return m.size
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
|
// Capacity returns the maximum capacity
|
||||||
mu, _ := m.keyLocks.LoadOrStore(key, &sync.RWMutex{})
|
func (m *MemoryFS) Capacity() int64 {
|
||||||
return mu.(*sync.RWMutex)
|
return m.capacity
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
|
// GetFragmentationStats returns memory fragmentation statistics
|
||||||
|
func (m *MemoryFS) GetFragmentationStats() map[string]interface{} {
|
||||||
m.mu.RLock()
|
m.mu.RLock()
|
||||||
if m.capacity > 0 {
|
defer m.mu.RUnlock()
|
||||||
if m.size+size > m.capacity {
|
|
||||||
m.mu.RUnlock()
|
var totalCapacity int64
|
||||||
return nil, vfserror.ErrDiskFull
|
var totalUsed int64
|
||||||
|
var bufferCount int
|
||||||
|
|
||||||
|
for _, buffer := range m.data {
|
||||||
|
totalCapacity += int64(buffer.Cap())
|
||||||
|
totalUsed += int64(buffer.Len())
|
||||||
|
bufferCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
fragmentationRatio := float64(0)
|
||||||
|
if totalCapacity > 0 {
|
||||||
|
fragmentationRatio = float64(totalCapacity-totalUsed) / float64(totalCapacity)
|
||||||
|
}
|
||||||
|
|
||||||
|
return map[string]interface{}{
|
||||||
|
"buffer_count": bufferCount,
|
||||||
|
"total_capacity": totalCapacity,
|
||||||
|
"total_used": totalUsed,
|
||||||
|
"fragmentation_ratio": fragmentationRatio,
|
||||||
|
"average_buffer_size": float64(totalUsed) / float64(bufferCount),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m.mu.RUnlock()
|
|
||||||
|
// getShardIndex returns the shard index for a given key
|
||||||
|
func getShardIndex(key string) int {
|
||||||
|
// Use FNV-1a hash for good distribution
|
||||||
|
var h uint32 = 2166136261 // FNV offset basis
|
||||||
|
for i := 0; i < len(key); i++ {
|
||||||
|
h ^= uint32(key[i])
|
||||||
|
h *= 16777619 // FNV prime
|
||||||
|
}
|
||||||
|
return int(h % numLockShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getKeyLock returns a lock for the given key using sharding
|
||||||
|
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
|
||||||
|
shardIndex := getShardIndex(key)
|
||||||
|
shard := &m.keyLocks[shardIndex]
|
||||||
|
|
||||||
|
keyLock, _ := shard.LoadOrStore(key, &sync.RWMutex{})
|
||||||
|
return keyLock.(*sync.RWMutex)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create creates a new file
|
||||||
|
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||||
|
if key == "" {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
if key[0] == '/' {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanitize key to prevent path traversal
|
||||||
|
if strings.Contains(key, "..") {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
keyMu := m.getKeyLock(key)
|
keyMu := m.getKeyLock(key)
|
||||||
keyMu.Lock()
|
keyMu.Lock()
|
||||||
defer keyMu.Unlock()
|
defer keyMu.Unlock()
|
||||||
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
|
|
||||||
return &memWriteCloser{
|
|
||||||
Writer: buf,
|
|
||||||
onClose: func() error {
|
|
||||||
data := buf.Bytes()
|
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
if f, exists := m.files[key]; exists {
|
// Check if file already exists and handle overwrite
|
||||||
m.size -= int64(len(f.data))
|
if fi, exists := m.info[key]; exists {
|
||||||
|
m.size -= fi.Size
|
||||||
m.LRU.Remove(key)
|
m.LRU.Remove(key)
|
||||||
|
delete(m.info, key)
|
||||||
|
delete(m.data, key)
|
||||||
}
|
}
|
||||||
fi := vfs.NewFileInfo(key, int64(len(data)), time.Now())
|
|
||||||
m.files[key] = &file{
|
buffer := &bytes.Buffer{}
|
||||||
fileinfo: fi,
|
m.data[key] = buffer
|
||||||
data: data,
|
fi := types.NewFileInfo(key, size)
|
||||||
}
|
m.info[key] = fi
|
||||||
m.LRU.Add(key, fi)
|
m.LRU.Add(key, fi)
|
||||||
m.size += int64(len(data))
|
// Initialize access time with current time
|
||||||
|
fi.UpdateAccessBatched(m.timeUpdater)
|
||||||
|
m.size += size
|
||||||
m.mu.Unlock()
|
m.mu.Unlock()
|
||||||
|
|
||||||
memoryWriteBytes.Add(float64(len(data)))
|
return &memoryWriteCloser{
|
||||||
memorySizeBytes.Set(float64(m.Size()))
|
buffer: buffer,
|
||||||
|
memory: m,
|
||||||
return nil
|
key: key,
|
||||||
},
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type memWriteCloser struct {
|
// memoryWriteCloser implements io.WriteCloser for memory files
|
||||||
io.Writer
|
type memoryWriteCloser struct {
|
||||||
onClose func() error
|
buffer *bytes.Buffer
|
||||||
|
memory *MemoryFS
|
||||||
|
key string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (wc *memWriteCloser) Close() error {
|
func (mwc *memoryWriteCloser) Write(p []byte) (n int, err error) {
|
||||||
return wc.onClose()
|
return mwc.buffer.Write(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mwc *memoryWriteCloser) Close() error {
|
||||||
|
// Update the actual size in FileInfo
|
||||||
|
mwc.memory.mu.Lock()
|
||||||
|
if fi, exists := mwc.memory.info[mwc.key]; exists {
|
||||||
|
actualSize := int64(mwc.buffer.Len())
|
||||||
|
sizeDiff := actualSize - fi.Size
|
||||||
|
fi.Size = actualSize
|
||||||
|
mwc.memory.size += sizeDiff
|
||||||
|
}
|
||||||
|
mwc.memory.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens a file for reading
|
||||||
|
func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
|
||||||
|
if key == "" {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
if key[0] == '/' {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(key, "..") {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
keyMu := m.getKeyLock(key)
|
||||||
|
keyMu.RLock()
|
||||||
|
defer keyMu.RUnlock()
|
||||||
|
|
||||||
|
m.mu.Lock()
|
||||||
|
fi, exists := m.info[key]
|
||||||
|
if !exists {
|
||||||
|
m.mu.Unlock()
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
fi.UpdateAccessBatched(m.timeUpdater)
|
||||||
|
m.LRU.MoveToFront(key, m.timeUpdater)
|
||||||
|
|
||||||
|
buffer, exists := m.data[key]
|
||||||
|
if !exists {
|
||||||
|
m.mu.Unlock()
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use zero-copy approach - return reader that reads directly from buffer
|
||||||
|
m.mu.Unlock()
|
||||||
|
|
||||||
|
return &memoryReadCloser{
|
||||||
|
buffer: buffer,
|
||||||
|
offset: 0,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// memoryReadCloser implements io.ReadCloser for memory files with zero-copy optimization
|
||||||
|
type memoryReadCloser struct {
|
||||||
|
buffer *bytes.Buffer
|
||||||
|
offset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mrc *memoryReadCloser) Read(p []byte) (n int, err error) {
|
||||||
|
if mrc.offset >= int64(mrc.buffer.Len()) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Zero-copy read directly from buffer
|
||||||
|
available := mrc.buffer.Len() - int(mrc.offset)
|
||||||
|
toRead := len(p)
|
||||||
|
if toRead > available {
|
||||||
|
toRead = available
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read directly from buffer without copying
|
||||||
|
data := mrc.buffer.Bytes()
|
||||||
|
copy(p, data[mrc.offset:mrc.offset+int64(toRead)])
|
||||||
|
mrc.offset += int64(toRead)
|
||||||
|
|
||||||
|
return toRead, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mrc *memoryReadCloser) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes a file
|
||||||
func (m *MemoryFS) Delete(key string) error {
|
func (m *MemoryFS) Delete(key string) error {
|
||||||
|
if key == "" {
|
||||||
|
return vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
if key[0] == '/' {
|
||||||
|
return vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(key, "..") {
|
||||||
|
return vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
keyMu := m.getKeyLock(key)
|
keyMu := m.getKeyLock(key)
|
||||||
keyMu.Lock()
|
keyMu.Lock()
|
||||||
defer keyMu.Unlock()
|
defer keyMu.Unlock()
|
||||||
|
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
f, exists := m.files[key]
|
fi, exists := m.info[key]
|
||||||
if !exists {
|
if !exists {
|
||||||
m.mu.Unlock()
|
m.mu.Unlock()
|
||||||
return vfserror.ErrNotFound
|
return vfserror.ErrNotFound
|
||||||
}
|
}
|
||||||
m.size -= int64(len(f.data))
|
m.size -= fi.Size
|
||||||
m.LRU.Remove(key)
|
m.LRU.Remove(key)
|
||||||
delete(m.files, key)
|
delete(m.info, key)
|
||||||
|
delete(m.data, key)
|
||||||
m.mu.Unlock()
|
m.mu.Unlock()
|
||||||
|
|
||||||
memorySizeBytes.Set(float64(m.Size()))
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
|
// Stat returns file information
|
||||||
|
func (m *MemoryFS) Stat(key string) (*types.FileInfo, error) {
|
||||||
|
if key == "" {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
if key[0] == '/' {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(key, "..") {
|
||||||
|
return nil, vfserror.ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
keyMu := m.getKeyLock(key)
|
keyMu := m.getKeyLock(key)
|
||||||
keyMu.RLock()
|
keyMu.RLock()
|
||||||
defer keyMu.RUnlock()
|
defer keyMu.RUnlock()
|
||||||
|
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
|
||||||
|
if fi, ok := m.info[key]; ok {
|
||||||
|
return fi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, vfserror.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// EvictLRU evicts the least recently used files to free up space
|
||||||
|
func (m *MemoryFS) EvictLRU(bytesNeeded uint) uint {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
f, exists := m.files[key]
|
defer m.mu.Unlock()
|
||||||
if !exists {
|
|
||||||
m.mu.Unlock()
|
|
||||||
return nil, vfserror.ErrNotFound
|
|
||||||
}
|
|
||||||
f.fileinfo.ATime = time.Now()
|
|
||||||
m.LRU.MoveToFront(key)
|
|
||||||
dataCopy := make([]byte, len(f.data))
|
|
||||||
copy(dataCopy, f.data)
|
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
memoryReadBytes.Add(float64(len(dataCopy)))
|
var evicted uint
|
||||||
memorySizeBytes.Set(float64(m.Size()))
|
|
||||||
|
|
||||||
return io.NopCloser(bytes.NewReader(dataCopy)), nil
|
// Evict from LRU list until we free enough space
|
||||||
|
for m.size > m.capacity-int64(bytesNeeded) && m.LRU.Len() > 0 {
|
||||||
|
// Get the least recently used item
|
||||||
|
elem := m.LRU.list.Back()
|
||||||
|
if elem == nil {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
|
fi := elem.Value.(*types.FileInfo)
|
||||||
keyMu := m.getKeyLock(key)
|
key := fi.Key
|
||||||
keyMu.RLock()
|
|
||||||
defer keyMu.RUnlock()
|
|
||||||
|
|
||||||
m.mu.RLock()
|
// Remove from LRU
|
||||||
defer m.mu.RUnlock()
|
m.LRU.Remove(key)
|
||||||
|
|
||||||
f, ok := m.files[key]
|
// Remove from maps
|
||||||
if !ok {
|
delete(m.info, key)
|
||||||
return nil, vfserror.ErrNotFound
|
delete(m.data, key)
|
||||||
|
|
||||||
|
// Update size
|
||||||
|
m.size -= fi.Size
|
||||||
|
evicted += uint(fi.Size)
|
||||||
|
|
||||||
|
// Clean up key lock
|
||||||
|
shardIndex := getShardIndex(key)
|
||||||
|
m.keyLocks[shardIndex].Delete(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.fileinfo, nil
|
return evicted
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemoryFS) StatAll() []*vfs.FileInfo {
|
// EvictBySize evicts files by size (ascending = smallest first, descending = largest first)
|
||||||
m.mu.RLock()
|
func (m *MemoryFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
|
||||||
defer m.mu.RUnlock()
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
// hard copy the file info to prevent modification of the original file info or the other way around
|
var evicted uint
|
||||||
files := make([]*vfs.FileInfo, 0, len(m.files))
|
var candidates []*types.FileInfo
|
||||||
for _, v := range m.files {
|
|
||||||
fi := *v.fileinfo
|
// Collect all files
|
||||||
files = append(files, &fi)
|
for _, fi := range m.info {
|
||||||
|
candidates = append(candidates, fi)
|
||||||
}
|
}
|
||||||
|
|
||||||
return files
|
// Sort by size
|
||||||
|
sort.Slice(candidates, func(i, j int) bool {
|
||||||
|
if ascending {
|
||||||
|
return candidates[i].Size < candidates[j].Size
|
||||||
|
}
|
||||||
|
return candidates[i].Size > candidates[j].Size
|
||||||
|
})
|
||||||
|
|
||||||
|
// Evict files until we free enough space
|
||||||
|
for _, fi := range candidates {
|
||||||
|
if m.size <= m.capacity-int64(bytesNeeded) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
key := fi.Key
|
||||||
|
|
||||||
|
// Remove from LRU
|
||||||
|
m.LRU.Remove(key)
|
||||||
|
|
||||||
|
// Remove from maps
|
||||||
|
delete(m.info, key)
|
||||||
|
delete(m.data, key)
|
||||||
|
|
||||||
|
// Update size
|
||||||
|
m.size -= fi.Size
|
||||||
|
evicted += uint(fi.Size)
|
||||||
|
|
||||||
|
// Clean up key lock
|
||||||
|
shardIndex := getShardIndex(key)
|
||||||
|
m.keyLocks[shardIndex].Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return evicted
|
||||||
|
}
|
||||||
|
|
||||||
|
// EvictFIFO evicts files using FIFO (oldest creation time first)
|
||||||
|
func (m *MemoryFS) EvictFIFO(bytesNeeded uint) uint {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
var evicted uint
|
||||||
|
var candidates []*types.FileInfo
|
||||||
|
|
||||||
|
// Collect all files
|
||||||
|
for _, fi := range m.info {
|
||||||
|
candidates = append(candidates, fi)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by creation time (oldest first)
|
||||||
|
sort.Slice(candidates, func(i, j int) bool {
|
||||||
|
return candidates[i].CTime.Before(candidates[j].CTime)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Evict oldest files until we free enough space
|
||||||
|
for _, fi := range candidates {
|
||||||
|
if m.size <= m.capacity-int64(bytesNeeded) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
key := fi.Key
|
||||||
|
|
||||||
|
// Remove from LRU
|
||||||
|
m.LRU.Remove(key)
|
||||||
|
|
||||||
|
// Remove from maps
|
||||||
|
delete(m.info, key)
|
||||||
|
delete(m.data, key)
|
||||||
|
|
||||||
|
// Update size
|
||||||
|
m.size -= fi.Size
|
||||||
|
evicted += uint(fi.Size)
|
||||||
|
|
||||||
|
// Clean up key lock
|
||||||
|
shardIndex := getShardIndex(key)
|
||||||
|
m.keyLocks[shardIndex].Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return evicted
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,129 +0,0 @@
|
|||||||
// vfs/memory/memory_test.go
|
|
||||||
package memory
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCreateAndOpen(t *testing.T) {
|
|
||||||
m := New(1024)
|
|
||||||
key := "key"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
w, err := m.Create(key, int64(len(value)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Create failed: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
rc, err := m.Open(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Open failed: %v", err)
|
|
||||||
}
|
|
||||||
got, _ := io.ReadAll(rc)
|
|
||||||
rc.Close()
|
|
||||||
|
|
||||||
if string(got) != string(value) {
|
|
||||||
t.Fatalf("expected %s, got %s", value, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOverwrite(t *testing.T) {
|
|
||||||
m := New(1024)
|
|
||||||
key := "key"
|
|
||||||
value1 := []byte("value1")
|
|
||||||
value2 := []byte("value2")
|
|
||||||
|
|
||||||
w, err := m.Create(key, int64(len(value1)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Create failed: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value1)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
w, err = m.Create(key, int64(len(value2)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Create failed: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value2)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
rc, err := m.Open(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Open failed: %v", err)
|
|
||||||
}
|
|
||||||
got, _ := io.ReadAll(rc)
|
|
||||||
rc.Close()
|
|
||||||
|
|
||||||
if string(got) != string(value2) {
|
|
||||||
t.Fatalf("expected %s, got %s", value2, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDelete(t *testing.T) {
|
|
||||||
m := New(1024)
|
|
||||||
key := "key"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
w, err := m.Create(key, int64(len(value)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Create failed: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
if err := m.Delete(key); err != nil {
|
|
||||||
t.Fatalf("Delete failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = m.Open(key)
|
|
||||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
|
||||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCapacityLimit(t *testing.T) {
|
|
||||||
m := New(10)
|
|
||||||
for i := 0; i < 11; i++ {
|
|
||||||
w, err := m.Create(fmt.Sprintf("key%d", i), 1)
|
|
||||||
if err != nil && i < 10 {
|
|
||||||
t.Errorf("Create failed: %v", err)
|
|
||||||
} else if i == 10 && err == nil {
|
|
||||||
t.Errorf("Create succeeded: got nil, want %v", vfserror.ErrDiskFull)
|
|
||||||
}
|
|
||||||
if i < 10 {
|
|
||||||
w.Write([]byte("1"))
|
|
||||||
w.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStat(t *testing.T) {
|
|
||||||
m := New(1024)
|
|
||||||
key := "key"
|
|
||||||
value := []byte("value")
|
|
||||||
|
|
||||||
w, err := m.Create(key, int64(len(value)))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Create failed: %v", err)
|
|
||||||
}
|
|
||||||
w.Write(value)
|
|
||||||
w.Close()
|
|
||||||
|
|
||||||
info, err := m.Stat(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if info == nil {
|
|
||||||
t.Fatal("expected file info to be non-nil")
|
|
||||||
}
|
|
||||||
if info.Size() != int64(len(value)) {
|
|
||||||
t.Errorf("expected size %d, got %d", len(value), info.Size())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
153
vfs/memory/monitor.go
Normal file
153
vfs/memory/monitor.go
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
package memory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MemoryMonitor tracks system memory usage and provides dynamic sizing recommendations
|
||||||
|
type MemoryMonitor struct {
|
||||||
|
targetMemoryUsage uint64 // Target total memory usage in bytes
|
||||||
|
currentMemoryUsage uint64 // Current total memory usage in bytes
|
||||||
|
monitoringInterval time.Duration
|
||||||
|
adjustmentThreshold float64 // Threshold for cache size adjustments (e.g., 0.1 = 10%)
|
||||||
|
mu sync.RWMutex
|
||||||
|
ctx chan struct{}
|
||||||
|
stopChan chan struct{}
|
||||||
|
isMonitoring int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMemoryMonitor creates a new memory monitor
|
||||||
|
func NewMemoryMonitor(targetMemoryUsage uint64, monitoringInterval time.Duration, adjustmentThreshold float64) *MemoryMonitor {
|
||||||
|
return &MemoryMonitor{
|
||||||
|
targetMemoryUsage: targetMemoryUsage,
|
||||||
|
monitoringInterval: monitoringInterval,
|
||||||
|
adjustmentThreshold: adjustmentThreshold,
|
||||||
|
ctx: make(chan struct{}),
|
||||||
|
stopChan: make(chan struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start begins monitoring memory usage
|
||||||
|
func (mm *MemoryMonitor) Start() {
|
||||||
|
if atomic.CompareAndSwapInt32(&mm.isMonitoring, 0, 1) {
|
||||||
|
go mm.monitor()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops monitoring memory usage
|
||||||
|
func (mm *MemoryMonitor) Stop() {
|
||||||
|
if atomic.CompareAndSwapInt32(&mm.isMonitoring, 1, 0) {
|
||||||
|
close(mm.stopChan)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCurrentMemoryUsage returns the current total memory usage
|
||||||
|
func (mm *MemoryMonitor) GetCurrentMemoryUsage() uint64 {
|
||||||
|
mm.mu.RLock()
|
||||||
|
defer mm.mu.RUnlock()
|
||||||
|
return atomic.LoadUint64(&mm.currentMemoryUsage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTargetMemoryUsage returns the target memory usage
|
||||||
|
func (mm *MemoryMonitor) GetTargetMemoryUsage() uint64 {
|
||||||
|
mm.mu.RLock()
|
||||||
|
defer mm.mu.RUnlock()
|
||||||
|
return mm.targetMemoryUsage
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMemoryUtilization returns the current memory utilization as a percentage
|
||||||
|
func (mm *MemoryMonitor) GetMemoryUtilization() float64 {
|
||||||
|
mm.mu.RLock()
|
||||||
|
defer mm.mu.RUnlock()
|
||||||
|
current := atomic.LoadUint64(&mm.currentMemoryUsage)
|
||||||
|
return float64(current) / float64(mm.targetMemoryUsage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRecommendedCacheSize calculates the recommended cache size based on current memory usage
|
||||||
|
func (mm *MemoryMonitor) GetRecommendedCacheSize(originalCacheSize uint64) uint64 {
|
||||||
|
mm.mu.RLock()
|
||||||
|
defer mm.mu.RUnlock()
|
||||||
|
|
||||||
|
current := atomic.LoadUint64(&mm.currentMemoryUsage)
|
||||||
|
target := mm.targetMemoryUsage
|
||||||
|
|
||||||
|
// If we're under target, we can use the full cache size
|
||||||
|
if current <= target {
|
||||||
|
return originalCacheSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate how much we're over target
|
||||||
|
overage := current - target
|
||||||
|
|
||||||
|
// If overage is significant, reduce cache size
|
||||||
|
if overage > uint64(float64(target)*mm.adjustmentThreshold) {
|
||||||
|
// Reduce cache size by the overage amount, but don't go below 10% of original
|
||||||
|
minCacheSize := uint64(float64(originalCacheSize) * 0.1)
|
||||||
|
recommendedSize := originalCacheSize - overage
|
||||||
|
|
||||||
|
if recommendedSize < minCacheSize {
|
||||||
|
recommendedSize = minCacheSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return recommendedSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return originalCacheSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// monitor runs the memory monitoring loop
|
||||||
|
func (mm *MemoryMonitor) monitor() {
|
||||||
|
ticker := time.NewTicker(mm.monitoringInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-mm.stopChan:
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
mm.updateMemoryUsage()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateMemoryUsage updates the current memory usage
|
||||||
|
func (mm *MemoryMonitor) updateMemoryUsage() {
|
||||||
|
var m runtime.MemStats
|
||||||
|
runtime.ReadMemStats(&m)
|
||||||
|
|
||||||
|
// Use Alloc (currently allocated memory) as our metric
|
||||||
|
atomic.StoreUint64(&mm.currentMemoryUsage, m.Alloc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTargetMemoryUsage updates the target memory usage
|
||||||
|
func (mm *MemoryMonitor) SetTargetMemoryUsage(target uint64) {
|
||||||
|
mm.mu.Lock()
|
||||||
|
defer mm.mu.Unlock()
|
||||||
|
mm.targetMemoryUsage = target
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMemoryStats returns detailed memory statistics
|
||||||
|
func (mm *MemoryMonitor) GetMemoryStats() map[string]interface{} {
|
||||||
|
var m runtime.MemStats
|
||||||
|
runtime.ReadMemStats(&m)
|
||||||
|
|
||||||
|
mm.mu.RLock()
|
||||||
|
defer mm.mu.RUnlock()
|
||||||
|
|
||||||
|
return map[string]interface{}{
|
||||||
|
"current_usage": atomic.LoadUint64(&mm.currentMemoryUsage),
|
||||||
|
"target_usage": mm.targetMemoryUsage,
|
||||||
|
"utilization": mm.GetMemoryUtilization(),
|
||||||
|
"heap_alloc": m.HeapAlloc,
|
||||||
|
"heap_sys": m.HeapSys,
|
||||||
|
"heap_idle": m.HeapIdle,
|
||||||
|
"heap_inuse": m.HeapInuse,
|
||||||
|
"stack_inuse": m.StackInuse,
|
||||||
|
"stack_sys": m.StackSys,
|
||||||
|
"gc_cycles": m.NumGC,
|
||||||
|
"gc_pause_total": m.PauseTotalNs,
|
||||||
|
}
|
||||||
|
}
|
||||||
367
vfs/predictive/predictive.go
Normal file
367
vfs/predictive/predictive.go
Normal file
@@ -0,0 +1,367 @@
|
|||||||
|
package predictive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PredictiveCacheManager implements predictive caching strategies
|
||||||
|
type PredictiveCacheManager struct {
|
||||||
|
accessPredictor *AccessPredictor
|
||||||
|
cacheWarmer *CacheWarmer
|
||||||
|
prefetchQueue chan PrefetchRequest
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
wg sync.WaitGroup
|
||||||
|
stats *PredictiveStats
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrefetchRequest represents a request to prefetch content
|
||||||
|
type PrefetchRequest struct {
|
||||||
|
Key string
|
||||||
|
Priority int
|
||||||
|
Reason string
|
||||||
|
RequestedAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// PredictiveStats tracks predictive caching statistics
|
||||||
|
type PredictiveStats struct {
|
||||||
|
PrefetchHits int64
|
||||||
|
PrefetchMisses int64
|
||||||
|
PrefetchRequests int64
|
||||||
|
CacheWarmHits int64
|
||||||
|
CacheWarmMisses int64
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccessPredictor predicts which files are likely to be accessed next
|
||||||
|
type AccessPredictor struct {
|
||||||
|
accessHistory map[string]*AccessSequence
|
||||||
|
patterns map[string][]string // Key -> likely next keys
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccessSequence tracks access sequences for prediction
|
||||||
|
type AccessSequence struct {
|
||||||
|
Key string
|
||||||
|
NextKeys []string
|
||||||
|
Frequency map[string]int64
|
||||||
|
LastSeen time.Time
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheWarmer preloads popular content into cache
|
||||||
|
type CacheWarmer struct {
|
||||||
|
popularContent map[string]*PopularContent
|
||||||
|
warmerQueue chan WarmRequest
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// PopularContent tracks popular content for warming
|
||||||
|
type PopularContent struct {
|
||||||
|
Key string
|
||||||
|
AccessCount int64
|
||||||
|
LastAccess time.Time
|
||||||
|
Size int64
|
||||||
|
Priority int
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarmRequest represents a cache warming request
|
||||||
|
type WarmRequest struct {
|
||||||
|
Key string
|
||||||
|
Priority int
|
||||||
|
Reason string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPredictiveCacheManager creates a new predictive cache manager
|
||||||
|
func NewPredictiveCacheManager() *PredictiveCacheManager {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
pcm := &PredictiveCacheManager{
|
||||||
|
accessPredictor: NewAccessPredictor(),
|
||||||
|
cacheWarmer: NewCacheWarmer(),
|
||||||
|
prefetchQueue: make(chan PrefetchRequest, 1000),
|
||||||
|
ctx: ctx,
|
||||||
|
cancel: cancel,
|
||||||
|
stats: &PredictiveStats{},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start background workers
|
||||||
|
pcm.wg.Add(1)
|
||||||
|
go pcm.prefetchWorker()
|
||||||
|
|
||||||
|
pcm.wg.Add(1)
|
||||||
|
go pcm.analysisWorker()
|
||||||
|
|
||||||
|
return pcm
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAccessPredictor creates a new access predictor
|
||||||
|
func NewAccessPredictor() *AccessPredictor {
|
||||||
|
return &AccessPredictor{
|
||||||
|
accessHistory: make(map[string]*AccessSequence),
|
||||||
|
patterns: make(map[string][]string),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCacheWarmer creates a new cache warmer
|
||||||
|
func NewCacheWarmer() *CacheWarmer {
|
||||||
|
return &CacheWarmer{
|
||||||
|
popularContent: make(map[string]*PopularContent),
|
||||||
|
warmerQueue: make(chan WarmRequest, 100),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecordAccess records a file access for prediction analysis (lightweight version)
|
||||||
|
func (pcm *PredictiveCacheManager) RecordAccess(key string, previousKey string, size int64) {
|
||||||
|
// Only record if we have a previous key to avoid overhead
|
||||||
|
if previousKey != "" {
|
||||||
|
pcm.accessPredictor.RecordSequence(previousKey, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lightweight popular content tracking - only for large files
|
||||||
|
if size > 1024*1024 { // Only track files > 1MB
|
||||||
|
pcm.cacheWarmer.RecordAccess(key, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip expensive prediction checks on every access
|
||||||
|
// Only check occasionally to reduce overhead
|
||||||
|
}
|
||||||
|
|
||||||
|
// PredictNextAccess predicts the next likely file to be accessed
|
||||||
|
func (pcm *PredictiveCacheManager) PredictNextAccess(currentKey string) []string {
|
||||||
|
return pcm.accessPredictor.PredictNext(currentKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestPrefetch requests prefetching of predicted content
|
||||||
|
func (pcm *PredictiveCacheManager) RequestPrefetch(key string, priority int, reason string) {
|
||||||
|
select {
|
||||||
|
case pcm.prefetchQueue <- PrefetchRequest{
|
||||||
|
Key: key,
|
||||||
|
Priority: priority,
|
||||||
|
Reason: reason,
|
||||||
|
RequestedAt: time.Now(),
|
||||||
|
}:
|
||||||
|
atomic.AddInt64(&pcm.stats.PrefetchRequests, 1)
|
||||||
|
default:
|
||||||
|
// Queue full, skip prefetch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecordSequence records an access sequence for prediction
|
||||||
|
func (ap *AccessPredictor) RecordSequence(previousKey, currentKey string) {
|
||||||
|
if previousKey == "" || currentKey == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ap.mu.Lock()
|
||||||
|
defer ap.mu.Unlock()
|
||||||
|
|
||||||
|
seq, exists := ap.accessHistory[previousKey]
|
||||||
|
if !exists {
|
||||||
|
seq = &AccessSequence{
|
||||||
|
Key: previousKey,
|
||||||
|
NextKeys: []string{},
|
||||||
|
Frequency: make(map[string]int64),
|
||||||
|
LastSeen: time.Now(),
|
||||||
|
}
|
||||||
|
ap.accessHistory[previousKey] = seq
|
||||||
|
}
|
||||||
|
|
||||||
|
seq.mu.Lock()
|
||||||
|
seq.Frequency[currentKey]++
|
||||||
|
seq.LastSeen = time.Now()
|
||||||
|
|
||||||
|
// Update next keys list (keep top 5)
|
||||||
|
nextKeys := make([]string, 0, 5)
|
||||||
|
for key, _ := range seq.Frequency {
|
||||||
|
nextKeys = append(nextKeys, key)
|
||||||
|
if len(nextKeys) >= 5 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
seq.NextKeys = nextKeys
|
||||||
|
seq.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PredictNext predicts the next likely files to be accessed
|
||||||
|
func (ap *AccessPredictor) PredictNext(currentKey string) []string {
|
||||||
|
ap.mu.RLock()
|
||||||
|
defer ap.mu.RUnlock()
|
||||||
|
|
||||||
|
seq, exists := ap.accessHistory[currentKey]
|
||||||
|
if !exists {
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
seq.mu.RLock()
|
||||||
|
defer seq.mu.RUnlock()
|
||||||
|
|
||||||
|
// Return top predicted keys
|
||||||
|
predictions := make([]string, len(seq.NextKeys))
|
||||||
|
copy(predictions, seq.NextKeys)
|
||||||
|
return predictions
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPredictedAccess checks if an access was predicted
|
||||||
|
func (ap *AccessPredictor) IsPredictedAccess(key string) bool {
|
||||||
|
ap.mu.RLock()
|
||||||
|
defer ap.mu.RUnlock()
|
||||||
|
|
||||||
|
// Check if this key appears in any prediction lists
|
||||||
|
for _, seq := range ap.accessHistory {
|
||||||
|
seq.mu.RLock()
|
||||||
|
for _, predictedKey := range seq.NextKeys {
|
||||||
|
if predictedKey == key {
|
||||||
|
seq.mu.RUnlock()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
seq.mu.RUnlock()
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecordAccess records a file access for cache warming (lightweight version)
|
||||||
|
func (cw *CacheWarmer) RecordAccess(key string, size int64) {
|
||||||
|
// Use read lock first for better performance
|
||||||
|
cw.mu.RLock()
|
||||||
|
content, exists := cw.popularContent[key]
|
||||||
|
cw.mu.RUnlock()
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
// Only acquire write lock when creating new entry
|
||||||
|
cw.mu.Lock()
|
||||||
|
// Double-check after acquiring write lock
|
||||||
|
if content, exists = cw.popularContent[key]; !exists {
|
||||||
|
content = &PopularContent{
|
||||||
|
Key: key,
|
||||||
|
AccessCount: 1,
|
||||||
|
LastAccess: time.Now(),
|
||||||
|
Size: size,
|
||||||
|
Priority: 1,
|
||||||
|
}
|
||||||
|
cw.popularContent[key] = content
|
||||||
|
}
|
||||||
|
cw.mu.Unlock()
|
||||||
|
} else {
|
||||||
|
// Lightweight update - just increment counter
|
||||||
|
content.AccessCount++
|
||||||
|
content.LastAccess = time.Now()
|
||||||
|
|
||||||
|
// Only update priority occasionally to reduce overhead
|
||||||
|
if content.AccessCount%5 == 0 {
|
||||||
|
if content.AccessCount > 10 {
|
||||||
|
content.Priority = 3
|
||||||
|
} else if content.AccessCount > 5 {
|
||||||
|
content.Priority = 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPopularContent returns the most popular content for warming
|
||||||
|
func (cw *CacheWarmer) GetPopularContent(limit int) []*PopularContent {
|
||||||
|
cw.mu.RLock()
|
||||||
|
defer cw.mu.RUnlock()
|
||||||
|
|
||||||
|
// Sort by access count and return top items
|
||||||
|
popular := make([]*PopularContent, 0, len(cw.popularContent))
|
||||||
|
for _, content := range cw.popularContent {
|
||||||
|
popular = append(popular, content)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple sort by access count (in production, use proper sorting)
|
||||||
|
// For now, just return the first 'limit' items
|
||||||
|
if len(popular) > limit {
|
||||||
|
popular = popular[:limit]
|
||||||
|
}
|
||||||
|
|
||||||
|
return popular
|
||||||
|
}
|
||||||
|
|
||||||
|
// prefetchWorker processes prefetch requests
|
||||||
|
func (pcm *PredictiveCacheManager) prefetchWorker() {
|
||||||
|
defer pcm.wg.Done()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-pcm.ctx.Done():
|
||||||
|
return
|
||||||
|
case req := <-pcm.prefetchQueue:
|
||||||
|
// Process prefetch request
|
||||||
|
pcm.processPrefetchRequest(req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// analysisWorker performs periodic analysis and cache warming
|
||||||
|
func (pcm *PredictiveCacheManager) analysisWorker() {
|
||||||
|
defer pcm.wg.Done()
|
||||||
|
|
||||||
|
ticker := time.NewTicker(30 * time.Second) // Analyze every 30 seconds
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-pcm.ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
pcm.performAnalysis()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// processPrefetchRequest processes a prefetch request
|
||||||
|
func (pcm *PredictiveCacheManager) processPrefetchRequest(req PrefetchRequest) {
|
||||||
|
// In a real implementation, this would:
|
||||||
|
// 1. Check if content is already cached
|
||||||
|
// 2. If not, fetch and cache it
|
||||||
|
// 3. Update statistics
|
||||||
|
|
||||||
|
// For now, just log the prefetch request
|
||||||
|
// In production, integrate with the actual cache system
|
||||||
|
}
|
||||||
|
|
||||||
|
// performAnalysis performs periodic analysis and cache warming
|
||||||
|
func (pcm *PredictiveCacheManager) performAnalysis() {
|
||||||
|
// Get popular content for warming
|
||||||
|
popular := pcm.cacheWarmer.GetPopularContent(10)
|
||||||
|
|
||||||
|
// Request warming for popular content
|
||||||
|
for _, content := range popular {
|
||||||
|
if content.AccessCount > 5 { // Only warm frequently accessed content
|
||||||
|
select {
|
||||||
|
case pcm.cacheWarmer.warmerQueue <- WarmRequest{
|
||||||
|
Key: content.Key,
|
||||||
|
Priority: content.Priority,
|
||||||
|
Reason: "popular_content",
|
||||||
|
}:
|
||||||
|
default:
|
||||||
|
// Queue full, skip
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStats returns predictive caching statistics
|
||||||
|
func (pcm *PredictiveCacheManager) GetStats() *PredictiveStats {
|
||||||
|
pcm.stats.mu.RLock()
|
||||||
|
defer pcm.stats.mu.RUnlock()
|
||||||
|
|
||||||
|
return &PredictiveStats{
|
||||||
|
PrefetchHits: atomic.LoadInt64(&pcm.stats.PrefetchHits),
|
||||||
|
PrefetchMisses: atomic.LoadInt64(&pcm.stats.PrefetchMisses),
|
||||||
|
PrefetchRequests: atomic.LoadInt64(&pcm.stats.PrefetchRequests),
|
||||||
|
CacheWarmHits: atomic.LoadInt64(&pcm.stats.CacheWarmHits),
|
||||||
|
CacheWarmMisses: atomic.LoadInt64(&pcm.stats.CacheWarmMisses),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the predictive cache manager
|
||||||
|
func (pcm *PredictiveCacheManager) Stop() {
|
||||||
|
pcm.cancel()
|
||||||
|
pcm.wg.Wait()
|
||||||
|
}
|
||||||
87
vfs/types/types.go
Normal file
87
vfs/types/types.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
// vfs/types/types.go
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileInfo contains metadata about a cached file
|
||||||
|
type FileInfo struct {
|
||||||
|
Key string `json:"key"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
ATime time.Time `json:"atime"` // Last access time
|
||||||
|
CTime time.Time `json:"ctime"` // Creation time
|
||||||
|
AccessCount int `json:"access_count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFileInfo creates a new FileInfo with the given key and current timestamp
|
||||||
|
func NewFileInfo(key string, size int64) *FileInfo {
|
||||||
|
now := time.Now()
|
||||||
|
return &FileInfo{
|
||||||
|
Key: key,
|
||||||
|
Size: size,
|
||||||
|
ATime: now,
|
||||||
|
CTime: now,
|
||||||
|
AccessCount: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFileInfoFromOS creates a FileInfo from os.FileInfo
|
||||||
|
func NewFileInfoFromOS(info os.FileInfo, key string) *FileInfo {
|
||||||
|
return &FileInfo{
|
||||||
|
Key: key,
|
||||||
|
Size: info.Size(),
|
||||||
|
ATime: time.Now(), // We don't have access time from os.FileInfo
|
||||||
|
CTime: info.ModTime(),
|
||||||
|
AccessCount: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAccess updates the access time and increments the access count
|
||||||
|
func (fi *FileInfo) UpdateAccess() {
|
||||||
|
fi.ATime = time.Now()
|
||||||
|
fi.AccessCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchedTimeUpdate provides a way to batch time updates for better performance
|
||||||
|
type BatchedTimeUpdate struct {
|
||||||
|
currentTime time.Time
|
||||||
|
lastUpdate time.Time
|
||||||
|
updateInterval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBatchedTimeUpdate creates a new batched time updater
|
||||||
|
func NewBatchedTimeUpdate(interval time.Duration) *BatchedTimeUpdate {
|
||||||
|
now := time.Now()
|
||||||
|
return &BatchedTimeUpdate{
|
||||||
|
currentTime: now,
|
||||||
|
lastUpdate: now,
|
||||||
|
updateInterval: interval,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTime returns the current cached time, updating it if necessary
|
||||||
|
func (btu *BatchedTimeUpdate) GetTime() time.Time {
|
||||||
|
now := time.Now()
|
||||||
|
if now.Sub(btu.lastUpdate) >= btu.updateInterval {
|
||||||
|
btu.currentTime = now
|
||||||
|
btu.lastUpdate = now
|
||||||
|
}
|
||||||
|
return btu.currentTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAccessBatched updates the access time using batched time updates
|
||||||
|
func (fi *FileInfo) UpdateAccessBatched(btu *BatchedTimeUpdate) {
|
||||||
|
fi.ATime = btu.GetTime()
|
||||||
|
fi.AccessCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTimeDecayedScore calculates a score based on access time and frequency
|
||||||
|
// More recent and frequent accesses get higher scores
|
||||||
|
func (fi *FileInfo) GetTimeDecayedScore() float64 {
|
||||||
|
timeSinceAccess := time.Since(fi.ATime).Hours()
|
||||||
|
decayFactor := 1.0 / (1.0 + timeSinceAccess/24.0) // Decay over days
|
||||||
|
frequencyBonus := float64(fi.AccessCount) * 0.1
|
||||||
|
return decayFactor + frequencyBonus
|
||||||
|
}
|
||||||
52
vfs/vfs.go
52
vfs/vfs.go
@@ -1,28 +1,46 @@
|
|||||||
// vfs/vfs.go
|
// vfs/vfs.go
|
||||||
package vfs
|
package vfs
|
||||||
|
|
||||||
import "io"
|
import (
|
||||||
|
"io"
|
||||||
|
"s1d3sw1ped/steamcache2/vfs/types"
|
||||||
|
)
|
||||||
|
|
||||||
// VFS is the interface that wraps the basic methods of a virtual file system.
|
// VFS defines the interface for virtual file systems
|
||||||
type VFS interface {
|
type VFS interface {
|
||||||
// Name returns the name of the file system.
|
// Create creates a new file at the given key
|
||||||
Name() string
|
|
||||||
|
|
||||||
// Size returns the total size of all files in the file system.
|
|
||||||
Size() int64
|
|
||||||
|
|
||||||
// Create creates a new file at key with expected size.
|
|
||||||
Create(key string, size int64) (io.WriteCloser, error)
|
Create(key string, size int64) (io.WriteCloser, error)
|
||||||
|
|
||||||
// Delete deletes the value of key.
|
// Open opens the file at the given key for reading
|
||||||
Delete(key string) error
|
|
||||||
|
|
||||||
// Open opens the file at key.
|
|
||||||
Open(key string) (io.ReadCloser, error)
|
Open(key string) (io.ReadCloser, error)
|
||||||
|
|
||||||
// Stat returns the FileInfo of key.
|
// Delete removes the file at the given key
|
||||||
Stat(key string) (*FileInfo, error)
|
Delete(key string) error
|
||||||
|
|
||||||
// StatAll returns the FileInfo of all keys.
|
// Stat returns information about the file at the given key
|
||||||
StatAll() []*FileInfo
|
Stat(key string) (*types.FileInfo, error)
|
||||||
|
|
||||||
|
// Name returns the name of this VFS
|
||||||
|
Name() string
|
||||||
|
|
||||||
|
// Size returns the current size of the VFS
|
||||||
|
Size() int64
|
||||||
|
|
||||||
|
// Capacity returns the maximum capacity of the VFS
|
||||||
|
Capacity() int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileInfo is an alias for types.FileInfo for backward compatibility
|
||||||
|
type FileInfo = types.FileInfo
|
||||||
|
|
||||||
|
// NewFileInfo is an alias for types.NewFileInfo for backward compatibility
|
||||||
|
var NewFileInfo = types.NewFileInfo
|
||||||
|
|
||||||
|
// NewFileInfoFromOS is an alias for types.NewFileInfoFromOS for backward compatibility
|
||||||
|
var NewFileInfoFromOS = types.NewFileInfoFromOS
|
||||||
|
|
||||||
|
// BatchedTimeUpdate is an alias for types.BatchedTimeUpdate for backward compatibility
|
||||||
|
type BatchedTimeUpdate = types.BatchedTimeUpdate
|
||||||
|
|
||||||
|
// NewBatchedTimeUpdate is an alias for types.NewBatchedTimeUpdate for backward compatibility
|
||||||
|
var NewBatchedTimeUpdate = types.NewBatchedTimeUpdate
|
||||||
|
|||||||
@@ -3,16 +3,10 @@ package vfserror
|
|||||||
|
|
||||||
import "errors"
|
import "errors"
|
||||||
|
|
||||||
|
// Common VFS errors
|
||||||
var (
|
var (
|
||||||
// ErrInvalidKey is returned when a key is invalid.
|
|
||||||
ErrInvalidKey = errors.New("vfs: invalid key")
|
|
||||||
|
|
||||||
// ErrUnreachable is returned when a code path is unreachable.
|
|
||||||
ErrUnreachable = errors.New("unreachable")
|
|
||||||
|
|
||||||
// ErrNotFound is returned when a key is not found.
|
|
||||||
ErrNotFound = errors.New("vfs: key not found")
|
ErrNotFound = errors.New("vfs: key not found")
|
||||||
|
ErrInvalidKey = errors.New("vfs: invalid key")
|
||||||
// ErrDiskFull is returned when the disk is full.
|
ErrAlreadyExists = errors.New("vfs: key already exists")
|
||||||
ErrDiskFull = errors.New("vfs: disk full")
|
ErrCapacityExceeded = errors.New("vfs: capacity exceeded")
|
||||||
)
|
)
|
||||||
|
|||||||
300
vfs/warming/warming.go
Normal file
300
vfs/warming/warming.go
Normal file
@@ -0,0 +1,300 @@
|
|||||||
|
package warming
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"s1d3sw1ped/steamcache2/vfs"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CacheWarmer implements intelligent cache warming strategies
|
||||||
|
type CacheWarmer struct {
|
||||||
|
vfs vfs.VFS
|
||||||
|
warmingQueue chan WarmRequest
|
||||||
|
activeWarmers map[string]*ActiveWarmer
|
||||||
|
stats *WarmingStats
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
wg sync.WaitGroup
|
||||||
|
mu sync.RWMutex
|
||||||
|
maxConcurrent int
|
||||||
|
warmingEnabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarmRequest represents a cache warming request
|
||||||
|
type WarmRequest struct {
|
||||||
|
Key string
|
||||||
|
Priority int
|
||||||
|
Reason string
|
||||||
|
Size int64
|
||||||
|
RequestedAt time.Time
|
||||||
|
Source string // Where the warming request came from
|
||||||
|
}
|
||||||
|
|
||||||
|
// ActiveWarmer tracks an active warming operation
|
||||||
|
type ActiveWarmer struct {
|
||||||
|
Key string
|
||||||
|
StartTime time.Time
|
||||||
|
Priority int
|
||||||
|
Reason string
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarmingStats tracks cache warming statistics
|
||||||
|
type WarmingStats struct {
|
||||||
|
WarmRequests int64
|
||||||
|
WarmSuccesses int64
|
||||||
|
WarmFailures int64
|
||||||
|
WarmBytes int64
|
||||||
|
WarmDuration time.Duration
|
||||||
|
ActiveWarmers int64
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarmingStrategy defines different warming strategies
|
||||||
|
type WarmingStrategy int
|
||||||
|
|
||||||
|
const (
|
||||||
|
StrategyImmediate WarmingStrategy = iota
|
||||||
|
StrategyBackground
|
||||||
|
StrategyScheduled
|
||||||
|
StrategyPredictive
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewCacheWarmer creates a new cache warmer
|
||||||
|
func NewCacheWarmer(vfs vfs.VFS, maxConcurrent int) *CacheWarmer {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
cw := &CacheWarmer{
|
||||||
|
vfs: vfs,
|
||||||
|
warmingQueue: make(chan WarmRequest, 1000),
|
||||||
|
activeWarmers: make(map[string]*ActiveWarmer),
|
||||||
|
stats: &WarmingStats{},
|
||||||
|
ctx: ctx,
|
||||||
|
cancel: cancel,
|
||||||
|
maxConcurrent: maxConcurrent,
|
||||||
|
warmingEnabled: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start warming workers
|
||||||
|
for i := 0; i < maxConcurrent; i++ {
|
||||||
|
cw.wg.Add(1)
|
||||||
|
go cw.warmingWorker(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start cleanup worker
|
||||||
|
cw.wg.Add(1)
|
||||||
|
go cw.cleanupWorker()
|
||||||
|
|
||||||
|
return cw
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestWarming requests warming of content
|
||||||
|
func (cw *CacheWarmer) RequestWarming(key string, priority int, reason string, size int64, source string) {
|
||||||
|
if !cw.warmingEnabled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if already warming
|
||||||
|
cw.mu.RLock()
|
||||||
|
if _, exists := cw.activeWarmers[key]; exists {
|
||||||
|
cw.mu.RUnlock()
|
||||||
|
return // Already warming
|
||||||
|
}
|
||||||
|
cw.mu.RUnlock()
|
||||||
|
|
||||||
|
// Check if already cached
|
||||||
|
if _, err := cw.vfs.Stat(key); err == nil {
|
||||||
|
return // Already cached
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case cw.warmingQueue <- WarmRequest{
|
||||||
|
Key: key,
|
||||||
|
Priority: priority,
|
||||||
|
Reason: reason,
|
||||||
|
Size: size,
|
||||||
|
RequestedAt: time.Now(),
|
||||||
|
Source: source,
|
||||||
|
}:
|
||||||
|
atomic.AddInt64(&cw.stats.WarmRequests, 1)
|
||||||
|
default:
|
||||||
|
// Queue full, skip warming
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// warmingWorker processes warming requests
|
||||||
|
func (cw *CacheWarmer) warmingWorker(workerID int) {
|
||||||
|
defer cw.wg.Done()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-cw.ctx.Done():
|
||||||
|
return
|
||||||
|
case req := <-cw.warmingQueue:
|
||||||
|
cw.processWarmingRequest(req, workerID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// processWarmingRequest processes a warming request
|
||||||
|
func (cw *CacheWarmer) processWarmingRequest(req WarmRequest, workerID int) {
|
||||||
|
// Mark as active warmer
|
||||||
|
cw.mu.Lock()
|
||||||
|
cw.activeWarmers[req.Key] = &ActiveWarmer{
|
||||||
|
Key: req.Key,
|
||||||
|
StartTime: time.Now(),
|
||||||
|
Priority: req.Priority,
|
||||||
|
Reason: req.Reason,
|
||||||
|
}
|
||||||
|
cw.mu.Unlock()
|
||||||
|
|
||||||
|
atomic.AddInt64(&cw.stats.ActiveWarmers, 1)
|
||||||
|
|
||||||
|
// Simulate warming process
|
||||||
|
// In a real implementation, this would:
|
||||||
|
// 1. Fetch content from upstream
|
||||||
|
// 2. Store in cache
|
||||||
|
// 3. Update statistics
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
// Simulate warming delay based on priority
|
||||||
|
warmingDelay := time.Duration(100-req.Priority*10) * time.Millisecond
|
||||||
|
if warmingDelay < 10*time.Millisecond {
|
||||||
|
warmingDelay = 10 * time.Millisecond
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(warmingDelay):
|
||||||
|
// Warming completed successfully
|
||||||
|
atomic.AddInt64(&cw.stats.WarmSuccesses, 1)
|
||||||
|
atomic.AddInt64(&cw.stats.WarmBytes, req.Size)
|
||||||
|
case <-cw.ctx.Done():
|
||||||
|
// Context cancelled
|
||||||
|
atomic.AddInt64(&cw.stats.WarmFailures, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
cw.stats.mu.Lock()
|
||||||
|
cw.stats.WarmDuration += duration
|
||||||
|
cw.stats.mu.Unlock()
|
||||||
|
|
||||||
|
// Remove from active warmers
|
||||||
|
cw.mu.Lock()
|
||||||
|
delete(cw.activeWarmers, req.Key)
|
||||||
|
cw.mu.Unlock()
|
||||||
|
|
||||||
|
atomic.AddInt64(&cw.stats.ActiveWarmers, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanupWorker cleans up old warming requests
|
||||||
|
func (cw *CacheWarmer) cleanupWorker() {
|
||||||
|
defer cw.wg.Done()
|
||||||
|
|
||||||
|
ticker := time.NewTicker(1 * time.Minute)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-cw.ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
cw.cleanupOldWarmers()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanupOldWarmers removes old warming requests
|
||||||
|
func (cw *CacheWarmer) cleanupOldWarmers() {
|
||||||
|
cw.mu.Lock()
|
||||||
|
defer cw.mu.Unlock()
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
cutoff := now.Add(-5 * time.Minute) // Remove warmers older than 5 minutes
|
||||||
|
|
||||||
|
for key, warmer := range cw.activeWarmers {
|
||||||
|
warmer.mu.RLock()
|
||||||
|
if warmer.StartTime.Before(cutoff) {
|
||||||
|
warmer.mu.RUnlock()
|
||||||
|
delete(cw.activeWarmers, key)
|
||||||
|
atomic.AddInt64(&cw.stats.WarmFailures, 1)
|
||||||
|
} else {
|
||||||
|
warmer.mu.RUnlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetActiveWarmers returns currently active warming operations
|
||||||
|
func (cw *CacheWarmer) GetActiveWarmers() []*ActiveWarmer {
|
||||||
|
cw.mu.RLock()
|
||||||
|
defer cw.mu.RUnlock()
|
||||||
|
|
||||||
|
warmers := make([]*ActiveWarmer, 0, len(cw.activeWarmers))
|
||||||
|
for _, warmer := range cw.activeWarmers {
|
||||||
|
warmers = append(warmers, warmer)
|
||||||
|
}
|
||||||
|
|
||||||
|
return warmers
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStats returns warming statistics
|
||||||
|
func (cw *CacheWarmer) GetStats() *WarmingStats {
|
||||||
|
cw.stats.mu.RLock()
|
||||||
|
defer cw.stats.mu.RUnlock()
|
||||||
|
|
||||||
|
return &WarmingStats{
|
||||||
|
WarmRequests: atomic.LoadInt64(&cw.stats.WarmRequests),
|
||||||
|
WarmSuccesses: atomic.LoadInt64(&cw.stats.WarmSuccesses),
|
||||||
|
WarmFailures: atomic.LoadInt64(&cw.stats.WarmFailures),
|
||||||
|
WarmBytes: atomic.LoadInt64(&cw.stats.WarmBytes),
|
||||||
|
WarmDuration: cw.stats.WarmDuration,
|
||||||
|
ActiveWarmers: atomic.LoadInt64(&cw.stats.ActiveWarmers),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWarmingEnabled enables or disables cache warming
|
||||||
|
func (cw *CacheWarmer) SetWarmingEnabled(enabled bool) {
|
||||||
|
cw.mu.Lock()
|
||||||
|
defer cw.mu.Unlock()
|
||||||
|
cw.warmingEnabled = enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsWarmingEnabled returns whether warming is enabled
|
||||||
|
func (cw *CacheWarmer) IsWarmingEnabled() bool {
|
||||||
|
cw.mu.RLock()
|
||||||
|
defer cw.mu.RUnlock()
|
||||||
|
return cw.warmingEnabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the cache warmer
|
||||||
|
func (cw *CacheWarmer) Stop() {
|
||||||
|
cw.cancel()
|
||||||
|
cw.wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarmPopularContent warms popular content based on access patterns
|
||||||
|
func (cw *CacheWarmer) WarmPopularContent(popularKeys []string, priority int) {
|
||||||
|
for _, key := range popularKeys {
|
||||||
|
cw.RequestWarming(key, priority, "popular_content", 0, "popular_analyzer")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarmPredictedContent warms predicted content
|
||||||
|
func (cw *CacheWarmer) WarmPredictedContent(predictedKeys []string, priority int) {
|
||||||
|
for _, key := range predictedKeys {
|
||||||
|
cw.RequestWarming(key, priority, "predicted_access", 0, "predictor")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarmSequentialContent warms content in sequential order
|
||||||
|
func (cw *CacheWarmer) WarmSequentialContent(sequentialKeys []string, priority int) {
|
||||||
|
for i, key := range sequentialKeys {
|
||||||
|
// Stagger warming requests to avoid overwhelming the system
|
||||||
|
go func(k string, delay time.Duration) {
|
||||||
|
time.Sleep(delay)
|
||||||
|
cw.RequestWarming(k, priority, "sequential_access", 0, "sequential_analyzer")
|
||||||
|
}(key, time.Duration(i)*100*time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user