Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| f945ccef05 | |||
| 3703e40442 | |||
| bfe29dea75 | |||
| 9b2affe95a | |||
| bd123bc63a | |||
| 46495dc3aa | |||
| 45ae234694 | |||
| bbe014e334 | |||
| 694c223b00 | |||
| cc3497bc3a | |||
| 9ca8fa4a5e | |||
| 7fb1fcf21f | |||
| ee6fc32a1a | |||
| 4a4579b0f3 | |||
| b9358a0e8d | |||
| c197841960 |
64
.cursor/rules/caching-patterns.mdc
Normal file
64
.cursor/rules/caching-patterns.mdc
Normal file
@@ -0,0 +1,64 @@
|
||||
---
|
||||
description: Caching system patterns and best practices
|
||||
---
|
||||
|
||||
# Caching System Patterns
|
||||
|
||||
## Cache Key Generation
|
||||
- Use SHA256 hashing for cache keys to ensure uniform distribution
|
||||
- Include service prefix (e.g., "steam/", "epic/") based on User-Agent detection
|
||||
- Never include query parameters in cache keys - strip them before hashing
|
||||
- Cache keys should be deterministic and consistent
|
||||
|
||||
## Cache File Format
|
||||
The cache uses a custom format with:
|
||||
- Magic number: "SC2C" (SteamCache2 Cache)
|
||||
- Content hash: SHA256 of response body
|
||||
- Response size: Total HTTP response size
|
||||
- Raw HTTP response: Complete response as received from upstream
|
||||
- Header line format: "SC2C <hash> <size>\n"
|
||||
- Integrity verification on read operations
|
||||
- Automatic corruption detection and cleanup
|
||||
|
||||
## Garbage Collection Algorithms
|
||||
Available algorithms and their use cases:
|
||||
- **LRU**: Best for general gaming patterns, keeps recently accessed content
|
||||
- **LFU**: Good for gaming cafes with popular games
|
||||
- **FIFO**: Predictable behavior, good for testing
|
||||
- **Largest**: Maximizes number of cached files
|
||||
- **Smallest**: Maximizes cache hit rate
|
||||
- **Hybrid**: Combines access time and file size for optimal performance
|
||||
|
||||
## Cache Validation
|
||||
- Always verify Content-Length matches received data
|
||||
- Use SHA256 hashing for content integrity
|
||||
- Don't cache chunked transfer encoding (no Content-Length)
|
||||
- Reject files with invalid or missing Content-Length
|
||||
|
||||
## Request Coalescing
|
||||
- Multiple clients requesting the same file should share the download
|
||||
- Use channels and mutexes to coordinate concurrent requests
|
||||
- Buffer response data for coalesced clients
|
||||
- Clean up coalesced request structures after completion
|
||||
|
||||
## Range Request Support
|
||||
- Always cache the full file, regardless of Range headers
|
||||
- Support serving partial content from cached full files
|
||||
- Parse Range headers correctly (bytes=start-end, bytes=start-, bytes=-suffix)
|
||||
- Return appropriate HTTP status codes (206 for partial content, 416 for invalid ranges)
|
||||
|
||||
## Service Detection
|
||||
- Use regex patterns to match User-Agent strings
|
||||
- Support multiple services (Steam, Epic Games, etc.)
|
||||
- Cache keys include service prefix for isolation
|
||||
- Default to Steam service configuration
|
||||
|
||||
## Memory vs Disk Caching
|
||||
- Memory cache: Fast access, limited size, use LRU or LFU
|
||||
- Disk cache: Slower access, large size, use Hybrid or Largest
|
||||
- Tiered caching: Memory as L1, disk as L2
|
||||
- Dynamic memory management with configurable thresholds
|
||||
- Cache promotion: Move frequently accessed files from disk to memory
|
||||
- Sharded storage: Use directory sharding for Steam keys to reduce inode pressure
|
||||
- Memory-mapped files: Use mmap for large disk operations
|
||||
- Batched operations: Group operations for better performance
|
||||
65
.cursor/rules/configuration-patterns.mdc
Normal file
65
.cursor/rules/configuration-patterns.mdc
Normal file
@@ -0,0 +1,65 @@
|
||||
---
|
||||
description: Configuration management patterns
|
||||
---
|
||||
|
||||
# Configuration Management Patterns
|
||||
|
||||
## YAML Configuration
|
||||
- Use YAML format for human-readable configuration
|
||||
- Provide sensible defaults for all configuration options
|
||||
- Validate configuration on startup
|
||||
- Generate default configuration file on first run
|
||||
|
||||
## Configuration Structure
|
||||
- Group related settings in nested structures
|
||||
- Use descriptive field names with YAML tags
|
||||
- Provide default values in struct tags where possible
|
||||
- Use appropriate data types (strings for sizes, ints for limits)
|
||||
|
||||
## Size Configuration
|
||||
- Use human-readable size strings (e.g., "1GB", "512MB")
|
||||
- Parse sizes using `github.com/docker/go-units`
|
||||
- Support "0" to disable cache layers
|
||||
- Validate size limits are reasonable
|
||||
|
||||
## Garbage Collection Configuration
|
||||
- Support multiple GC algorithms per cache layer
|
||||
- Provide algorithm-specific configuration options
|
||||
- Allow different algorithms for memory vs disk caches
|
||||
- Document algorithm characteristics and use cases
|
||||
|
||||
## Server Configuration
|
||||
- Configure listen address and port
|
||||
- Set concurrency limits (global and per-client)
|
||||
- Configure upstream server URL
|
||||
- Support both absolute and relative upstream URLs
|
||||
|
||||
## Runtime Configuration
|
||||
- Allow command-line overrides for critical settings
|
||||
- Support configuration file path specification
|
||||
- Provide help and version information
|
||||
- Validate configuration before starting services
|
||||
|
||||
## Default Configuration
|
||||
- Generate appropriate defaults for different use cases
|
||||
- Consider system resources when setting defaults
|
||||
- Provide conservative defaults for home users
|
||||
- Document configuration options in comments
|
||||
|
||||
## Configuration Validation
|
||||
- Validate required fields are present
|
||||
- Check that size limits are reasonable
|
||||
- Verify file paths are accessible
|
||||
- Test upstream server connectivity
|
||||
|
||||
## Configuration Updates
|
||||
- Support configuration reloading (if needed)
|
||||
- Handle configuration changes gracefully
|
||||
- Log configuration changes
|
||||
- Maintain backward compatibility
|
||||
|
||||
## Environment-Specific Configuration
|
||||
- Support different configurations for development/production
|
||||
- Allow environment variable overrides
|
||||
- Provide configuration templates for common scenarios
|
||||
- Document configuration best practices
|
||||
77
.cursor/rules/development-workflow.mdc
Normal file
77
.cursor/rules/development-workflow.mdc
Normal file
@@ -0,0 +1,77 @@
|
||||
---
|
||||
description: Development workflow and best practices
|
||||
---
|
||||
|
||||
# Development Workflow for SteamCache2
|
||||
|
||||
## Build System
|
||||
- Use the provided [Makefile](mdc:Makefile) for all build operations
|
||||
- Prefer `make` commands over direct `go` commands
|
||||
- Use `make test` to run all tests before committing
|
||||
- Use `make run-debug` for development with debug logging
|
||||
|
||||
## Code Organization
|
||||
- Keep related functionality in the same package
|
||||
- Use clear package boundaries and interfaces
|
||||
- Minimize dependencies between packages
|
||||
- Follow the existing project structure
|
||||
|
||||
## Git Workflow
|
||||
- Use descriptive commit messages
|
||||
- Keep commits focused and atomic
|
||||
- Test changes thoroughly before committing
|
||||
- Use meaningful branch names
|
||||
|
||||
## Code Review
|
||||
- Review code for correctness and performance
|
||||
- Check for proper error handling
|
||||
- Verify test coverage for new functionality
|
||||
- Ensure code follows project conventions
|
||||
|
||||
## Documentation
|
||||
- Update README.md for user-facing changes
|
||||
- Add comments for complex algorithms
|
||||
- Document configuration options
|
||||
- Keep API documentation current
|
||||
|
||||
## Testing Strategy
|
||||
- Write tests for new functionality
|
||||
- Maintain high test coverage
|
||||
- Test edge cases and error conditions
|
||||
- Run integration tests before major releases
|
||||
|
||||
## Performance Testing
|
||||
- Test with realistic data sizes
|
||||
- Measure performance impact of changes
|
||||
- Profile the application under load
|
||||
- Monitor memory usage and leaks
|
||||
|
||||
## Configuration Management
|
||||
- Test configuration changes thoroughly
|
||||
- Validate configuration on startup
|
||||
- Provide sensible defaults
|
||||
- Document configuration options
|
||||
|
||||
## Error Handling
|
||||
- Implement proper error handling
|
||||
- Use structured logging for errors
|
||||
- Provide meaningful error messages
|
||||
- Handle edge cases gracefully
|
||||
|
||||
## Security Considerations
|
||||
- Validate all inputs
|
||||
- Implement proper rate limiting
|
||||
- Log security-relevant events
|
||||
- Follow security best practices
|
||||
|
||||
## Release Process
|
||||
- Test thoroughly before releasing
|
||||
- Update version information
|
||||
- Create release notes
|
||||
- Tag releases appropriately
|
||||
|
||||
## Maintenance
|
||||
- Monitor application performance
|
||||
- Update dependencies regularly
|
||||
- Fix bugs promptly
|
||||
- Refactor code when needed
|
||||
62
.cursor/rules/golang-conventions.mdc
Normal file
62
.cursor/rules/golang-conventions.mdc
Normal file
@@ -0,0 +1,62 @@
|
||||
---
|
||||
globs: *.go
|
||||
---
|
||||
|
||||
# Go Language Conventions for SteamCache2
|
||||
|
||||
## Code Style
|
||||
- Use `gofmt` and `goimports` for formatting
|
||||
- Follow standard Go naming conventions (camelCase for private, PascalCase for public)
|
||||
- Use meaningful variable names that reflect their purpose
|
||||
- Prefer explicit error handling over panic (except in constructors where configuration is invalid)
|
||||
|
||||
## Package Organization
|
||||
- Keep packages focused and cohesive
|
||||
- Use internal packages for implementation details that shouldn't be exported
|
||||
- Group related functionality together (e.g., all VFS implementations in `vfs/`)
|
||||
- Use interface implementation verification: `var _ Interface = (*Implementation)(nil)`
|
||||
- Create type aliases for backward compatibility when refactoring
|
||||
- Use separate packages for different concerns (e.g., `vfserror`, `types`, `locks`)
|
||||
|
||||
## Error Handling
|
||||
- Always handle errors explicitly - never ignore them with `_`
|
||||
- Use `fmt.Errorf` with `%w` verb for error wrapping
|
||||
- Log errors with context using structured logging (zerolog)
|
||||
- Return meaningful error messages that help with debugging
|
||||
- Create custom error types for domain-specific errors (see `vfs/vfserror/`)
|
||||
- Use `errors.New()` for simple error constants
|
||||
- Include relevant context in error messages (file paths, operation names)
|
||||
|
||||
## Testing
|
||||
- All tests should run with a timeout (as per user rules)
|
||||
- Use table-driven tests for multiple test cases
|
||||
- Use `t.Helper()` in test helper functions
|
||||
- Test both success and failure cases
|
||||
- Use `t.TempDir()` for temporary files in tests
|
||||
|
||||
## Concurrency
|
||||
- Use `sync.RWMutex` for read-heavy operations
|
||||
- Prefer channels over shared memory when possible
|
||||
- Use `context.Context` for cancellation and timeouts
|
||||
- Be explicit about goroutine lifecycle management
|
||||
- Use sharded locks for high-concurrency scenarios (see `vfs/locks/sharding.go`)
|
||||
- Use `atomic.Value` for lock-free data structure updates
|
||||
- Use `sync.Map` for concurrent map operations when appropriate
|
||||
|
||||
## Performance
|
||||
- Use `io.ReadAll` sparingly - prefer streaming for large data
|
||||
- Use connection pooling for HTTP clients
|
||||
- Implement proper resource cleanup (defer statements)
|
||||
- Use buffered channels when appropriate
|
||||
|
||||
## Logging
|
||||
- Use structured logging with zerolog
|
||||
- Include relevant context in log messages (keys, URLs, client IPs)
|
||||
- Use appropriate log levels (Debug, Info, Warn, Error)
|
||||
- Avoid logging sensitive information
|
||||
|
||||
## Memory Management
|
||||
- Be mindful of memory usage in caching scenarios
|
||||
- Use appropriate data structures for the use case
|
||||
- Implement proper cleanup for long-running services
|
||||
- Monitor memory usage in production
|
||||
59
.cursor/rules/http-proxy-patterns.mdc
Normal file
59
.cursor/rules/http-proxy-patterns.mdc
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
description: HTTP proxy and server patterns
|
||||
---
|
||||
|
||||
# HTTP Proxy and Server Patterns
|
||||
|
||||
## Request Handling
|
||||
- Only support GET requests (Steam doesn't use other methods)
|
||||
- Reject non-GET requests with 405 Method Not Allowed
|
||||
- Handle health checks at "/" endpoint
|
||||
- Support LanCache heartbeat at "/lancache-heartbeat"
|
||||
|
||||
## Upstream Communication
|
||||
- Use optimized HTTP transport with connection pooling
|
||||
- Set appropriate timeouts (10s dial, 15s header, 60s total)
|
||||
- Enable HTTP/2 and keep-alives for better performance
|
||||
- Use large buffers (64KB) for better throughput
|
||||
|
||||
## Response Streaming
|
||||
- Stream responses directly to clients for better performance
|
||||
- Support both full file and range request streaming
|
||||
- Preserve original HTTP headers (excluding hop-by-hop headers)
|
||||
- Add cache-specific headers (X-LanCache-Status, X-LanCache-Processed-By)
|
||||
|
||||
## Error Handling
|
||||
- Implement retry logic with exponential backoff
|
||||
- Handle upstream server errors gracefully
|
||||
- Return appropriate HTTP status codes
|
||||
- Log errors with sufficient context for debugging
|
||||
|
||||
## Concurrency Control
|
||||
- Use semaphores to limit concurrent requests globally
|
||||
- Implement per-client rate limiting
|
||||
- Clean up old client limiters to prevent memory leaks
|
||||
- Use proper synchronization for shared data structures
|
||||
|
||||
## Header Management
|
||||
- Copy relevant headers from upstream responses
|
||||
- Exclude hop-by-hop headers (Connection, Keep-Alive, etc.)
|
||||
- Add cache status headers for monitoring
|
||||
- Preserve Content-Type and Content-Length headers
|
||||
|
||||
## Client IP Detection
|
||||
- Check X-Forwarded-For header first (for proxy setups)
|
||||
- Fall back to X-Real-IP header
|
||||
- Use RemoteAddr as final fallback
|
||||
- Handle comma-separated IP lists in X-Forwarded-For
|
||||
|
||||
## Performance Optimizations
|
||||
- Set keep-alive headers for better connection reuse
|
||||
- Use appropriate server timeouts
|
||||
- Implement request coalescing for duplicate requests
|
||||
- Use buffered I/O for better performance
|
||||
|
||||
## Security Considerations
|
||||
- Validate request URLs and paths
|
||||
- Implement rate limiting to prevent abuse
|
||||
- Log suspicious activity
|
||||
- Handle malformed requests gracefully
|
||||
87
.cursor/rules/logging-monitoring-patterns.mdc
Normal file
87
.cursor/rules/logging-monitoring-patterns.mdc
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
description: Logging and monitoring patterns for SteamCache2
|
||||
---
|
||||
|
||||
# Logging and Monitoring Patterns
|
||||
|
||||
## Structured Logging with Zerolog
|
||||
- Use zerolog for all logging operations
|
||||
- Include structured fields for better querying and analysis
|
||||
- Use appropriate log levels: Debug, Info, Warn, Error
|
||||
- Include timestamps and context in all log messages
|
||||
- Configure log format (JSON for production, console for development)
|
||||
|
||||
## Log Context and Fields
|
||||
- Always include relevant context in log messages
|
||||
- Use consistent field names: `client_ip`, `cache_key`, `url`, `service`
|
||||
- Include operation duration with `Dur()` for performance monitoring
|
||||
- Log cache hit/miss status for analytics
|
||||
- Include file sizes and operation counts for monitoring
|
||||
|
||||
## Performance Monitoring
|
||||
- Log request processing times with `zduration` field
|
||||
- Monitor cache hit/miss ratios
|
||||
- Track memory and disk usage
|
||||
- Log garbage collection events and statistics
|
||||
- Monitor concurrent request counts and limits
|
||||
|
||||
## Error Logging
|
||||
- Log errors with full context and stack traces
|
||||
- Include relevant request information in error logs
|
||||
- Use structured error logging with `Err()` field
|
||||
- Log configuration errors with file paths
|
||||
- Include upstream server errors with status codes
|
||||
|
||||
## Cache Operation Logging
|
||||
- Log cache hits with key and response time
|
||||
- Log cache misses with reason and upstream response time
|
||||
- Log cache corruption detection and cleanup
|
||||
- Log garbage collection operations and evicted items
|
||||
- Log cache promotion events (disk to memory)
|
||||
|
||||
## Service Detection Logging
|
||||
- Log service detection results (Steam, Epic, etc.)
|
||||
- Log User-Agent patterns and matches
|
||||
- Log service configuration changes
|
||||
- Log cache key generation for different services
|
||||
|
||||
## HTTP Request Logging
|
||||
- Log incoming requests with method, URL, and client IP
|
||||
- Log response status codes and sizes
|
||||
- Log upstream server communication
|
||||
- Log rate limiting events and client limits
|
||||
- Log health check and heartbeat requests
|
||||
|
||||
## Configuration Logging
|
||||
- Log configuration loading and validation
|
||||
- Log default configuration generation
|
||||
- Log configuration changes and overrides
|
||||
- Log startup parameters and settings
|
||||
|
||||
## Security Event Logging
|
||||
- Log suspicious request patterns
|
||||
- Log rate limiting violations
|
||||
- Log authentication failures (if applicable)
|
||||
- Log configuration security issues
|
||||
- Log potential security threats
|
||||
|
||||
## System Health Logging
|
||||
- Log memory usage and fragmentation
|
||||
- Log disk usage and capacity
|
||||
- Log connection pool statistics
|
||||
- Log goroutine counts and lifecycle
|
||||
- Log system resource utilization
|
||||
|
||||
## Log Rotation and Management
|
||||
- Implement log rotation for long-running services
|
||||
- Use appropriate log retention policies
|
||||
- Monitor log file sizes and disk usage
|
||||
- Configure log levels for different environments
|
||||
- Use structured logging for log analysis tools
|
||||
|
||||
## Monitoring Integration
|
||||
- Design logs for easy parsing by monitoring tools
|
||||
- Include metrics that can be scraped by Prometheus
|
||||
- Use consistent field naming for dashboard creation
|
||||
- Log events that can trigger alerts
|
||||
- Include correlation IDs for request tracing
|
||||
71
.cursor/rules/performance-optimization.mdc
Normal file
71
.cursor/rules/performance-optimization.mdc
Normal file
@@ -0,0 +1,71 @@
|
||||
---
|
||||
description: Performance optimization guidelines
|
||||
---
|
||||
|
||||
# Performance Optimization Guidelines
|
||||
|
||||
## Memory Management
|
||||
- Use appropriate data structures for the use case
|
||||
- Implement proper cleanup for long-running services
|
||||
- Monitor memory usage and implement limits
|
||||
- Use memory pools for frequently allocated objects
|
||||
|
||||
## I/O Optimization
|
||||
- Use buffered I/O for better performance
|
||||
- Implement connection pooling for HTTP clients
|
||||
- Use appropriate buffer sizes (64KB for HTTP)
|
||||
- Minimize system calls and context switches
|
||||
|
||||
## Concurrency Patterns
|
||||
- Use worker pools for CPU-intensive tasks
|
||||
- Implement proper backpressure with semaphores
|
||||
- Use channels for coordination between goroutines
|
||||
- Avoid excessive goroutine creation
|
||||
|
||||
## Caching Strategies
|
||||
- Use tiered caching (memory + disk) for optimal performance
|
||||
- Implement intelligent cache eviction policies
|
||||
- Use cache warming for predictable access patterns
|
||||
- Monitor cache hit ratios and adjust strategies
|
||||
|
||||
## Network Optimization
|
||||
- Use HTTP/2 when available
|
||||
- Enable connection keep-alives
|
||||
- Use appropriate timeouts for different operations
|
||||
- Implement request coalescing for duplicate requests
|
||||
|
||||
## Data Structures
|
||||
- Choose appropriate data structures for access patterns
|
||||
- Use sync.RWMutex for read-heavy operations
|
||||
- Consider lock-free data structures where appropriate
|
||||
- Minimize memory allocations in hot paths
|
||||
|
||||
## Algorithm Selection
|
||||
- Choose GC algorithms based on access patterns
|
||||
- Use LRU for general gaming workloads
|
||||
- Use LFU for gaming cafes with popular content
|
||||
- Use Hybrid algorithms for mixed workloads
|
||||
|
||||
## Monitoring and Profiling
|
||||
- Implement performance metrics collection
|
||||
- Use structured logging for performance analysis
|
||||
- Monitor key performance indicators
|
||||
- Profile the application under realistic loads
|
||||
|
||||
## Resource Management
|
||||
- Implement proper resource cleanup
|
||||
- Use context.Context for cancellation
|
||||
- Set appropriate limits on resource usage
|
||||
- Monitor resource consumption over time
|
||||
|
||||
## Scalability Considerations
|
||||
- Design for horizontal scaling where possible
|
||||
- Use sharding for large datasets
|
||||
- Implement proper load balancing
|
||||
- Consider distributed caching for large deployments
|
||||
|
||||
## Bottleneck Identification
|
||||
- Profile the application to identify bottlenecks
|
||||
- Focus optimization efforts on the most critical paths
|
||||
- Use appropriate tools for performance analysis
|
||||
- Test performance under realistic conditions
|
||||
57
.cursor/rules/project-structure.mdc
Normal file
57
.cursor/rules/project-structure.mdc
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
alwaysApply: true
|
||||
---
|
||||
|
||||
# SteamCache2 Project Structure Guide
|
||||
|
||||
This is a high-performance Steam download cache written in Go. The main entry point is [main.go](mdc:main.go), which delegates to the command structure in [cmd/](mdc:cmd/).
|
||||
|
||||
## Core Architecture
|
||||
|
||||
- **Main Entry**: [main.go](mdc:main.go) - Simple entry point that calls `cmd.Execute()`
|
||||
- **Command Layer**: [cmd/root.go](mdc:cmd/root.go) - CLI interface using Cobra, handles configuration loading and service startup
|
||||
- **Core Service**: [steamcache/steamcache.go](mdc:steamcache/steamcache.go) - Main HTTP proxy and caching logic
|
||||
- **Configuration**: [config/config.go](mdc:config/config.go) - YAML-based configuration management
|
||||
- **Virtual File System**: [vfs/](mdc:vfs/) - Abstracted storage layer supporting memory and disk caches
|
||||
|
||||
## Key Components
|
||||
|
||||
### VFS (Virtual File System)
|
||||
- [vfs/vfs.go](mdc:vfs/vfs.go) - Core VFS interface
|
||||
- [vfs/memory/](mdc:vfs/memory/) - In-memory cache implementation
|
||||
- [vfs/disk/](mdc:vfs/disk/) - Disk-based cache implementation
|
||||
- [vfs/cache/](mdc:vfs/cache/) - Cache coordination layer
|
||||
- [vfs/gc/](mdc:vfs/gc/) - Garbage collection algorithms (LRU, LFU, FIFO, etc.)
|
||||
|
||||
### Service Management
|
||||
- Service detection via User-Agent patterns
|
||||
- Support for multiple gaming services (Steam, Epic, etc.)
|
||||
- SHA256-based cache key generation with service prefixes
|
||||
|
||||
### Advanced Features
|
||||
- [vfs/adaptive/](mdc:vfs/adaptive/) - Adaptive caching strategies
|
||||
- [vfs/predictive/](mdc:vfs/predictive/) - Predictive cache warming
|
||||
- Request coalescing for concurrent downloads
|
||||
- Range request support for partial content
|
||||
|
||||
## Development Workflow
|
||||
|
||||
Use the [Makefile](mdc:Makefile) for development:
|
||||
- `make` - Run tests and build
|
||||
- `make test` - Run all tests
|
||||
- `make run` - Run the application
|
||||
- `make run-debug` - Run with debug logging
|
||||
|
||||
## Testing
|
||||
|
||||
- Unit tests: [steamcache/steamcache_test.go](mdc:steamcache/steamcache_test.go)
|
||||
- Integration tests: [steamcache/integration_test.go](mdc:steamcache/integration_test.go)
|
||||
- Test cache data: [steamcache/test_cache/](mdc:steamcache/test_cache/)
|
||||
|
||||
## Configuration
|
||||
|
||||
Default configuration is generated in [config.yaml](mdc:config.yaml) on first run. The application supports:
|
||||
- Memory and disk cache sizing
|
||||
- Garbage collection algorithm selection
|
||||
- Concurrency limits
|
||||
- Upstream server configuration
|
||||
89
.cursor/rules/security-validation-patterns.mdc
Normal file
89
.cursor/rules/security-validation-patterns.mdc
Normal file
@@ -0,0 +1,89 @@
|
||||
---
|
||||
description: Security and validation patterns for SteamCache2
|
||||
---
|
||||
|
||||
# Security and Validation Patterns
|
||||
|
||||
## Input Validation
|
||||
- Validate all HTTP request parameters and headers
|
||||
- Sanitize file paths and cache keys to prevent directory traversal
|
||||
- Validate URL paths before processing
|
||||
- Check Content-Length headers for reasonable values
|
||||
- Reject malformed or suspicious requests
|
||||
|
||||
## Cache Key Security
|
||||
- Use SHA256 hashing for all cache keys to prevent collisions
|
||||
- Never include user input directly in cache keys
|
||||
- Strip query parameters from URLs before hashing
|
||||
- Use service prefixes to isolate different services
|
||||
- Validate cache key format and length
|
||||
|
||||
## Content Integrity
|
||||
- Always verify Content-Length matches received data
|
||||
- Use SHA256 hashing for content integrity verification
|
||||
- Don't cache chunked transfer encoding (no Content-Length)
|
||||
- Reject files with invalid or missing Content-Length
|
||||
- Implement cache file format validation with magic numbers
|
||||
|
||||
## Rate Limiting and DoS Protection
|
||||
- Implement global concurrency limits with semaphores
|
||||
- Use per-client rate limiting to prevent abuse
|
||||
- Clean up old client limiters to prevent memory leaks
|
||||
- Set appropriate timeouts for all operations
|
||||
- Monitor and log suspicious activity
|
||||
|
||||
## HTTP Security
|
||||
- Only support GET requests (Steam doesn't use other methods)
|
||||
- Validate HTTP method and reject unsupported methods
|
||||
- Handle malformed HTTP requests gracefully
|
||||
- Implement proper error responses with appropriate status codes
|
||||
- Use hop-by-hop header filtering
|
||||
|
||||
## Client IP Detection
|
||||
- Check X-Forwarded-For header for proxy setups
|
||||
- Fall back to X-Real-IP header
|
||||
- Use RemoteAddr as final fallback
|
||||
- Handle comma-separated IP lists in X-Forwarded-For
|
||||
- Log client IPs for monitoring and debugging
|
||||
|
||||
## Service Detection Security
|
||||
- Use regex patterns for User-Agent matching
|
||||
- Validate service configurations before use
|
||||
- Support multiple services with proper isolation
|
||||
- Default to Steam service configuration
|
||||
- Log service detection for monitoring
|
||||
|
||||
## Error Handling Security
|
||||
- Don't expose internal system information in error messages
|
||||
- Log detailed errors for debugging but return generic messages to clients
|
||||
- Handle errors gracefully without crashing
|
||||
- Implement proper cleanup on errors
|
||||
- Use structured logging for security events
|
||||
|
||||
## Configuration Security
|
||||
- Validate configuration values on startup
|
||||
- Use sensible defaults for security-sensitive settings
|
||||
- Validate file paths and permissions
|
||||
- Check upstream server connectivity
|
||||
- Log configuration changes
|
||||
|
||||
## Memory and Resource Security
|
||||
- Implement memory limits to prevent OOM attacks
|
||||
- Use proper resource cleanup and garbage collection
|
||||
- Monitor memory usage and implement alerts
|
||||
- Use bounded data structures where possible
|
||||
- Implement proper connection limits
|
||||
|
||||
## Logging Security
|
||||
- Don't log sensitive information (passwords, tokens)
|
||||
- Use structured logging for security events
|
||||
- Include relevant context (IPs, URLs, timestamps)
|
||||
- Implement log rotation and retention policies
|
||||
- Monitor logs for security issues
|
||||
|
||||
## Network Security
|
||||
- Use HTTPS for upstream connections when possible
|
||||
- Implement proper TLS configuration
|
||||
- Use connection pooling with appropriate limits
|
||||
- Set reasonable timeouts for network operations
|
||||
- Monitor network traffic for anomalies
|
||||
48
.cursor/rules/steamcache2-overview.mdc
Normal file
48
.cursor/rules/steamcache2-overview.mdc
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
alwaysApply: true
|
||||
---
|
||||
|
||||
# SteamCache2 Overview
|
||||
|
||||
SteamCache2 is a high-performance HTTP proxy cache specifically designed for Steam game downloads. It reduces bandwidth usage and speeds up downloads by caching game files locally.
|
||||
|
||||
## Key Features
|
||||
- **Tiered Caching**: Memory + disk cache with intelligent promotion
|
||||
- **Service Detection**: Automatically detects Steam clients via User-Agent
|
||||
- **Request Coalescing**: Multiple clients share downloads of the same file
|
||||
- **Range Support**: Serves partial content from cached full files
|
||||
- **Garbage Collection**: Multiple algorithms (LRU, LFU, FIFO, Hybrid, etc.)
|
||||
- **Adaptive Caching**: Learns from access patterns for better performance
|
||||
|
||||
## Architecture
|
||||
- **HTTP Proxy**: Intercepts Steam requests and serves from cache when possible
|
||||
- **VFS Layer**: Abstracted storage supporting memory and disk caches
|
||||
- **Service Manager**: Handles multiple gaming services (Steam, Epic, etc.)
|
||||
- **GC System**: Intelligent cache eviction with configurable algorithms
|
||||
|
||||
## Development
|
||||
- **Language**: Go 1.23+
|
||||
- **Build**: Use `make` commands (see [Makefile](mdc:Makefile))
|
||||
- **Testing**: Comprehensive unit and integration tests
|
||||
- **Configuration**: YAML-based with automatic generation
|
||||
|
||||
## Performance
|
||||
- **Concurrency**: Configurable request limits and rate limiting
|
||||
- **Memory**: Dynamic memory management with configurable thresholds
|
||||
- **Network**: Optimized HTTP transport with connection pooling
|
||||
- **Storage**: Efficient cache file format with integrity verification
|
||||
|
||||
## Use Cases
|
||||
- **Gaming Cafes**: Reduce bandwidth costs and improve download speeds
|
||||
- **LAN Events**: Share game downloads across multiple clients
|
||||
- **Home Networks**: Speed up game updates for multiple gamers
|
||||
- **Development**: Test game downloads without hitting Steam servers
|
||||
|
||||
## Configuration
|
||||
Default configuration is generated on first run. Key settings:
|
||||
- Cache sizes (memory/disk)
|
||||
- Garbage collection algorithms
|
||||
- Concurrency limits
|
||||
- Upstream server configuration
|
||||
|
||||
See [config.yaml](mdc:config.yaml) for configuration options and [README.md](mdc:README.md) for detailed setup instructions.
|
||||
78
.cursor/rules/testing-guidelines.mdc
Normal file
78
.cursor/rules/testing-guidelines.mdc
Normal file
@@ -0,0 +1,78 @@
|
||||
---
|
||||
globs: *_test.go
|
||||
---
|
||||
|
||||
# Testing Guidelines for SteamCache2
|
||||
|
||||
## Test Structure
|
||||
- Use table-driven tests for multiple test cases
|
||||
- Group related tests in the same test function when appropriate
|
||||
- Use descriptive test names that explain what is being tested
|
||||
- Include both positive and negative test cases
|
||||
|
||||
## Test Data Management
|
||||
- Use `t.TempDir()` for temporary files and directories
|
||||
- Clean up resources in defer statements
|
||||
- Use unique temporary directories for each test to avoid conflicts
|
||||
- Don't rely on external services in unit tests
|
||||
|
||||
## Integration Testing
|
||||
- Mark integration tests with `testing.Short()` checks
|
||||
- Use real Steam URLs for integration tests when appropriate
|
||||
- Test both cache hits and cache misses
|
||||
- Verify response integrity between direct and cached responses
|
||||
- Test against actual Steam servers for real-world validation
|
||||
- Use `httptest.NewServer` for local testing scenarios
|
||||
- Compare direct vs cached responses byte-for-byte
|
||||
|
||||
## Mocking and Stubbing
|
||||
- Use `httptest.NewServer` for HTTP server mocking
|
||||
- Create mock responses that match real Steam responses
|
||||
- Test error conditions and edge cases
|
||||
- Use `httptest.NewRecorder` for response testing
|
||||
|
||||
## Performance Testing
|
||||
- Test with realistic data sizes
|
||||
- Measure cache hit/miss ratios
|
||||
- Test concurrent request handling
|
||||
- Verify memory usage doesn't grow unbounded
|
||||
|
||||
## Cache Testing
|
||||
- Test cache key generation and uniqueness
|
||||
- Verify cache file format serialization/deserialization
|
||||
- Test garbage collection algorithms
|
||||
- Test cache eviction policies
|
||||
- Test cache corruption scenarios and recovery
|
||||
- Verify cache file format integrity (magic numbers, hashes)
|
||||
- Test range request handling from cached files
|
||||
- Test request coalescing behavior
|
||||
|
||||
## Service Detection Testing
|
||||
- Test User-Agent pattern matching
|
||||
- Test service configuration management
|
||||
- Test cache key generation for different services
|
||||
- Test service expandability (adding new services)
|
||||
|
||||
## Error Handling Testing
|
||||
- Test network failures and timeouts
|
||||
- Test malformed requests and responses
|
||||
- Test cache corruption scenarios
|
||||
- Test resource exhaustion conditions
|
||||
|
||||
## Test Timeouts
|
||||
- All tests should run with appropriate timeouts
|
||||
- Use `context.WithTimeout` for long-running operations
|
||||
- Set reasonable timeouts for network operations
|
||||
- Fail fast on obvious errors
|
||||
|
||||
## Test Coverage
|
||||
- Aim for high test coverage on critical paths
|
||||
- Test edge cases and error conditions
|
||||
- Test concurrent access patterns
|
||||
- Test resource cleanup and memory management
|
||||
|
||||
## Test Documentation
|
||||
- Document complex test scenarios
|
||||
- Explain the purpose of integration tests
|
||||
- Include comments for non-obvious test logic
|
||||
- Document expected behavior and assumptions
|
||||
72
.cursor/rules/vfs-patterns.mdc
Normal file
72
.cursor/rules/vfs-patterns.mdc
Normal file
@@ -0,0 +1,72 @@
|
||||
---
|
||||
description: VFS (Virtual File System) patterns and architecture
|
||||
---
|
||||
|
||||
# VFS (Virtual File System) Patterns
|
||||
|
||||
## Core VFS Interface
|
||||
- Implement the `vfs.VFS` interface for all storage backends
|
||||
- Use interface implementation verification: `var _ vfs.VFS = (*Implementation)(nil)`
|
||||
- Support both memory and disk-based storage with the same interface
|
||||
- Provide size and capacity information for monitoring
|
||||
|
||||
## Tiered Cache Architecture
|
||||
- Use `vfs/cache/cache.go` for two-tier caching (memory + disk)
|
||||
- Implement lock-free tier switching with `atomic.Value`
|
||||
- Prefer disk tier for persistence, memory tier for speed
|
||||
- Support cache promotion from disk to memory
|
||||
|
||||
## Sharded File Systems
|
||||
- Use sharded directory structures for Steam cache keys
|
||||
- Implement 2-level sharding: `steam/XX/YY/hash` for optimal performance
|
||||
- Use `vfs/locks/sharding.go` for sharded locking
|
||||
- Reduce inode pressure with directory sharding
|
||||
|
||||
## Memory Management
|
||||
- Use `bytes.Buffer` for in-memory file storage
|
||||
- Implement batched time updates for performance
|
||||
- Use LRU lists for eviction tracking
|
||||
- Monitor memory fragmentation and usage
|
||||
|
||||
## Disk Storage
|
||||
- Use memory-mapped files (`mmap`) for large file operations
|
||||
- Implement efficient file path sharding
|
||||
- Use batched operations for better I/O performance
|
||||
- Support concurrent access with proper locking
|
||||
|
||||
## Garbage Collection Integration
|
||||
- Wrap VFS implementations with `vfs/gc/gc.go`
|
||||
- Support multiple GC algorithms (LRU, LFU, FIFO, etc.)
|
||||
- Implement async GC with configurable thresholds
|
||||
- Use eviction functions from `vfs/eviction/eviction.go`
|
||||
|
||||
## Performance Optimizations
|
||||
- Use sharded locks to reduce contention
|
||||
- Implement batched time updates (100ms intervals)
|
||||
- Use atomic operations for lock-free updates
|
||||
- Monitor and log performance metrics
|
||||
|
||||
## Error Handling
|
||||
- Use custom VFS errors from `vfs/vfserror/vfserror.go`
|
||||
- Handle capacity exceeded scenarios gracefully
|
||||
- Implement proper cleanup on errors
|
||||
- Log VFS operations with context
|
||||
|
||||
## File Information Management
|
||||
- Use `vfs/types/types.go` for file metadata
|
||||
- Track access times, sizes, and other statistics
|
||||
- Implement efficient file info storage and retrieval
|
||||
- Support batched metadata updates
|
||||
|
||||
## Adaptive and Predictive Features
|
||||
- Integrate with `vfs/adaptive/adaptive.go` for learning patterns
|
||||
- Use `vfs/predictive/predictive.go` for cache warming
|
||||
- Implement intelligent cache promotion strategies
|
||||
- Monitor access patterns for optimization
|
||||
|
||||
## Testing VFS Implementations
|
||||
- Test with realistic file sizes and access patterns
|
||||
- Verify concurrent access scenarios
|
||||
- Test garbage collection behavior
|
||||
- Validate sharding and path generation
|
||||
- Test error conditions and edge cases
|
||||
18
.gitignore
vendored
18
.gitignore
vendored
@@ -1,5 +1,15 @@
|
||||
dist/
|
||||
tmp/
|
||||
#build artifacts
|
||||
/dist/
|
||||
|
||||
#disk cache
|
||||
/disk/
|
||||
|
||||
#config file
|
||||
/config.yaml
|
||||
|
||||
#windows executables
|
||||
*.exe
|
||||
.smashed.txt
|
||||
.smashignore
|
||||
|
||||
#test cache
|
||||
/steamcache/test_cache/*
|
||||
!/steamcache/test_cache/.gitkeep
|
||||
@@ -11,8 +11,8 @@ builds:
|
||||
- -s
|
||||
- -w
|
||||
- -extldflags "-static"
|
||||
- -X s1d3sw1ped/SteamCache2/version.Version={{.Version}}
|
||||
- -X s1d3sw1ped/SteamCache2/version.Date={{.Date}}
|
||||
- -X s1d3sw1ped/steamcache2/version.Version={{.Version}}
|
||||
- -X s1d3sw1ped/steamcache2/version.Date={{.Date}}
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
|
||||
67
.vscode/launch.json
vendored
67
.vscode/launch.json
vendored
@@ -1,67 +0,0 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch Memory & Disk",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/main.go",
|
||||
"args": [
|
||||
"--memory",
|
||||
"1G",
|
||||
"--disk",
|
||||
"10G",
|
||||
"--disk-path",
|
||||
"tmp/disk",
|
||||
"--memory-gc",
|
||||
"lfu",
|
||||
"--disk-gc",
|
||||
"lru",
|
||||
"--log-level",
|
||||
"debug",
|
||||
// "--upstream",
|
||||
// "http://192.168.2.5:80",
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Launch Disk Only",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/main.go",
|
||||
"args": [
|
||||
"--disk",
|
||||
"10G",
|
||||
"--disk-path",
|
||||
"tmp/disk",
|
||||
"--disk-gc",
|
||||
"hybrid",
|
||||
"--log-level",
|
||||
"debug",
|
||||
// "--upstream",
|
||||
// "http://192.168.2.5:80",
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Launch Memory Only",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/main.go",
|
||||
"args": [
|
||||
"--memory",
|
||||
"1G",
|
||||
"--memory-gc",
|
||||
"lru",
|
||||
"--log-level",
|
||||
"debug",
|
||||
// "--upstream",
|
||||
// "http://192.168.2.5:80",
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
21
Makefile
Normal file
21
Makefile
Normal file
@@ -0,0 +1,21 @@
|
||||
run: build-snapshot-single ## Run the application
|
||||
@dist/default_windows_amd64_v1/steamcache2.exe
|
||||
run-debug: build-snapshot-single ## Run the application with debug logging
|
||||
@dist/default_windows_amd64_v1/steamcache2.exe --log-level debug
|
||||
|
||||
test: deps ## Run all tests
|
||||
@go test -v ./...
|
||||
|
||||
deps: ## Download dependencies
|
||||
@go mod tidy
|
||||
|
||||
build-snapshot-single: deps test ## Build a snapshot of the application for the current platform
|
||||
@goreleaser build --single-target --snapshot --clean
|
||||
|
||||
help: ## Show this help message
|
||||
@echo steamcache2 Makefile
|
||||
@echo Available targets:
|
||||
@echo run Run the application
|
||||
@echo run-debug Run the application with debug logging
|
||||
@echo test Run all tests
|
||||
@echo deps Download dependencies
|
||||
224
README.md
224
README.md
@@ -10,30 +10,120 @@ SteamCache2 is a blazing fast download cache for Steam, designed to reduce bandw
|
||||
- Reduces bandwidth usage
|
||||
- Easy to set up and configure aside from dns stuff to trick Steam into using it
|
||||
- Supports multiple clients
|
||||
- **NEW:** YAML configuration system with automatic config generation
|
||||
- **NEW:** Simple Makefile for development workflow
|
||||
- Cross-platform builds (Linux, macOS, Windows)
|
||||
|
||||
## Usage
|
||||
## Quick Start
|
||||
|
||||
1. Start the cache server:
|
||||
```sh
|
||||
./SteamCache2 --memory 1G --disk 10G --disk-path tmp/disk
|
||||
```
|
||||
### First Time Setup
|
||||
|
||||
### Advanced Configuration
|
||||
1. **Clone and build:**
|
||||
```bash
|
||||
git clone <repository-url>
|
||||
cd steamcache2
|
||||
make # This will run tests and build the application
|
||||
```
|
||||
|
||||
2. **Run the application** (it will create a default config):
|
||||
```bash
|
||||
./steamcache2
|
||||
# or on Windows:
|
||||
steamcache2.exe
|
||||
```
|
||||
|
||||
The application will automatically create a `config.yaml` file with default settings and exit, allowing you to customize it.
|
||||
|
||||
3. **Edit the configuration** (`config.yaml`):
|
||||
```yaml
|
||||
listen_address: :80
|
||||
cache:
|
||||
memory:
|
||||
size: 1GB
|
||||
gc_algorithm: lru
|
||||
disk:
|
||||
size: 10GB
|
||||
path: ./disk
|
||||
gc_algorithm: hybrid
|
||||
upstream: "https://steam.cdn.com" # Set your upstream server
|
||||
```
|
||||
|
||||
4. **Run the application again:**
|
||||
```bash
|
||||
make run # or ./steamcache2
|
||||
```
|
||||
|
||||
### Development Workflow
|
||||
|
||||
```bash
|
||||
# Run all tests and start the application (default target)
|
||||
make
|
||||
|
||||
# Run only tests
|
||||
make test
|
||||
|
||||
# Run with debug logging
|
||||
make run-debug
|
||||
|
||||
# Download dependencies
|
||||
make deps
|
||||
|
||||
# Show available commands
|
||||
make help
|
||||
```
|
||||
|
||||
### Command Line Flags
|
||||
|
||||
While most configuration is done via the YAML file, some runtime options are still available as command-line flags:
|
||||
|
||||
```bash
|
||||
# Use a custom config file
|
||||
./steamcache2 --config /path/to/my-config.yaml
|
||||
|
||||
# Set logging level
|
||||
./steamcache2 --log-level debug --log-format json
|
||||
|
||||
# Set number of worker threads
|
||||
./steamcache2 --threads 8
|
||||
|
||||
# Show help
|
||||
./steamcache2 --help
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
SteamCache2 uses a YAML configuration file (`config.yaml`) for all settings. Here's a complete configuration example:
|
||||
|
||||
```yaml
|
||||
# Server configuration
|
||||
listen_address: :80
|
||||
|
||||
# Cache configuration
|
||||
cache:
|
||||
# Memory cache settings
|
||||
memory:
|
||||
# Size of memory cache (e.g., "512MB", "1GB", "0" to disable)
|
||||
size: 1GB
|
||||
# Garbage collection algorithm
|
||||
gc_algorithm: lru
|
||||
|
||||
# Disk cache settings
|
||||
disk:
|
||||
# Size of disk cache (e.g., "10GB", "50GB", "0" to disable)
|
||||
size: 10GB
|
||||
# Path to disk cache directory
|
||||
path: ./disk
|
||||
# Garbage collection algorithm
|
||||
gc_algorithm: hybrid
|
||||
|
||||
# Upstream server configuration
|
||||
# The upstream server to proxy requests to
|
||||
upstream: "https://steam.cdn.com"
|
||||
```
|
||||
|
||||
#### Garbage Collection Algorithms
|
||||
|
||||
SteamCache2 supports multiple garbage collection algorithms for both memory and disk caches:
|
||||
|
||||
```sh
|
||||
# Use LFU for memory cache (good for long-running servers)
|
||||
./SteamCache2 --memory 4G --memory-gc lfu --disk 100G --disk-gc lru
|
||||
|
||||
# Use FIFO for predictable eviction (good for testing)
|
||||
./SteamCache2 --memory 2G --memory-gc fifo --disk 50G --disk-gc fifo
|
||||
|
||||
# Use size-based eviction for disk cache
|
||||
./SteamCache2 --memory 1G --disk 200G --disk-gc largest
|
||||
```
|
||||
SteamCache2 supports different garbage collection algorithms for memory and disk caches, allowing you to optimize performance for each storage tier:
|
||||
|
||||
**Available GC Algorithms:**
|
||||
|
||||
@@ -44,13 +134,30 @@ SteamCache2 supports multiple garbage collection algorithms for both memory and
|
||||
- **`smallest`**: Size-based - evicts smallest files first (maximizes cache hit rate)
|
||||
- **`hybrid`**: Combines access time and file size for optimal eviction
|
||||
|
||||
**Recommended Algorithms by Cache Type:**
|
||||
|
||||
**For Memory Cache (Fast, Limited Size):**
|
||||
- **`lru`** - Best overall performance, good balance of speed and hit rate
|
||||
- **`lfu`** - Excellent for gaming cafes where popular games stay cached
|
||||
- **`hybrid`** - Optimal for mixed workloads with varying file sizes
|
||||
|
||||
**For Disk Cache (Slow, Large Size):**
|
||||
- **`hybrid`** - Recommended for optimal performance, balances speed and storage efficiency
|
||||
- **`largest`** - Good for maximizing number of cached files
|
||||
- **`lru`** - Reliable default with good performance
|
||||
|
||||
**Use Cases:**
|
||||
- **LAN Events**: Use `lfu` for memory caches to keep popular games
|
||||
- **Gaming Cafes**: Use `hybrid` for balanced performance
|
||||
- **Gaming Cafes**: Use `lfu` for memory, `hybrid` for disk
|
||||
- **LAN Events**: Use `lfu` for memory, `hybrid` for disk
|
||||
- **Home Use**: Use `lru` for memory, `hybrid` for disk
|
||||
- **Testing**: Use `fifo` for predictable behavior
|
||||
- **Large Files**: Use `largest` to prioritize keeping many small files
|
||||
2. Configure your DNS:
|
||||
- If your on Windows and don't want a whole network implementation (THIS)[#windows-hosts-file-override]
|
||||
- **Large File Storage**: Use `largest` for disk to maximize file count
|
||||
|
||||
### DNS Configuration
|
||||
|
||||
Configure your DNS to direct Steam traffic to your SteamCache2 server:
|
||||
|
||||
- If you're on Windows and don't want a whole network implementation, see the [Windows Hosts File Override](#windows-hosts-file-override) section below.
|
||||
|
||||
### Windows Hosts File Override
|
||||
|
||||
@@ -85,6 +192,77 @@ SteamCache2 supports multiple garbage collection algorithms for both memory and
|
||||
|
||||
This will direct any requests to `lancache.steamcontent.com` to your SteamCache2 server.
|
||||
|
||||
## Building from Source
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Go 1.19 or later
|
||||
- Make (optional, but recommended)
|
||||
|
||||
### Build Commands
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone <repository-url>
|
||||
cd SteamCache2
|
||||
|
||||
# Download dependencies
|
||||
make deps
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
|
||||
# Build for current platform
|
||||
go build -o steamcache2 .
|
||||
|
||||
# Build for specific platforms
|
||||
GOOS=linux GOARCH=amd64 go build -o steamcache2-linux-amd64 .
|
||||
GOOS=windows GOARCH=amd64 go build -o steamcache2-windows-amd64.exe .
|
||||
```
|
||||
|
||||
### Development
|
||||
|
||||
```bash
|
||||
# Run in development mode with debug logging
|
||||
make run-debug
|
||||
|
||||
# Run all tests and start the application
|
||||
make
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **"Config file not found" on first run**
|
||||
- This is expected! SteamCache2 will automatically create a default `config.yaml` file
|
||||
- Edit the generated config file with your desired settings
|
||||
- Run the application again
|
||||
|
||||
2. **Permission denied when creating config**
|
||||
- Make sure you have write permissions in the current directory
|
||||
- Try running with elevated privileges if necessary
|
||||
|
||||
3. **Port already in use**
|
||||
- Change the `listen_address` in `config.yaml` to a different port (e.g., `:8080`)
|
||||
- Or stop the service using the current port
|
||||
|
||||
4. **High memory usage**
|
||||
- Reduce the memory cache size in `config.yaml`
|
||||
- Consider using disk-only caching by setting `memory.size: "0"`
|
||||
|
||||
5. **Slow disk performance**
|
||||
- Use SSD storage for the disk cache
|
||||
- Consider using a different GC algorithm like `hybrid`
|
||||
- Adjust the disk cache size to match available storage
|
||||
|
||||
### Getting Help
|
||||
|
||||
- Check the logs for detailed error messages
|
||||
- Run with `--log-level debug` for more verbose output
|
||||
- Ensure your upstream server is accessible
|
||||
- Verify DNS configuration is working correctly
|
||||
|
||||
## License
|
||||
|
||||
See the [LICENSE](LICENSE) file for details.
|
||||
|
||||
121
cmd/root.go
121
cmd/root.go
@@ -2,35 +2,32 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"s1d3sw1ped/SteamCache2/steamcache"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/version"
|
||||
"s1d3sw1ped/steamcache2/config"
|
||||
"s1d3sw1ped/steamcache2/steamcache"
|
||||
"s1d3sw1ped/steamcache2/steamcache/logger"
|
||||
"s1d3sw1ped/steamcache2/version"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
threads int
|
||||
|
||||
memory string
|
||||
disk string
|
||||
diskpath string
|
||||
upstream string
|
||||
|
||||
memoryGC string
|
||||
diskGC string
|
||||
configPath string
|
||||
|
||||
logLevel string
|
||||
logFormat string
|
||||
|
||||
maxConcurrentRequests int64
|
||||
maxRequestsPerClient int64
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "SteamCache2",
|
||||
Short: "SteamCache2 is a caching solution for Steam game updates and installations",
|
||||
Long: `SteamCache2 is a caching solution designed to optimize the delivery of Steam game updates and installations.
|
||||
Use: "steamcache2",
|
||||
Short: "steamcache2 is a caching solution for Steam game updates and installations",
|
||||
Long: `steamcache2 is a caching solution designed to optimize the delivery of Steam game updates and installations.
|
||||
It reduces bandwidth usage and speeds up the download process by caching game files locally.
|
||||
This tool is particularly useful for environments with multiple Steam users, such as gaming cafes or households with multiple gamers.
|
||||
By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache,
|
||||
@@ -56,33 +53,79 @@ var rootCmd = &cobra.Command{
|
||||
logger.Logger = zerolog.New(writer).With().Timestamp().Logger()
|
||||
|
||||
logger.Logger.Info().
|
||||
Msg("SteamCache2 " + version.Version + " " + version.Date + " starting...")
|
||||
Msg("steamcache2 " + version.Version + " " + version.Date + " starting...")
|
||||
|
||||
address := ":80"
|
||||
// Load configuration
|
||||
cfg, err := config.LoadConfig(configPath)
|
||||
if err != nil {
|
||||
// Check if the error is because the config file doesn't exist
|
||||
// The error is wrapped, so we check the error message
|
||||
if strings.Contains(err.Error(), "no such file") ||
|
||||
strings.Contains(err.Error(), "cannot find the file") ||
|
||||
strings.Contains(err.Error(), "The system cannot find the file") {
|
||||
logger.Logger.Info().
|
||||
Str("config_path", configPath).
|
||||
Msg("Config file not found, creating default configuration")
|
||||
|
||||
if runtime.GOMAXPROCS(-1) != threads {
|
||||
runtime.GOMAXPROCS(threads)
|
||||
logger.Logger.Info().
|
||||
Int("threads", threads).
|
||||
Msg("Maximum number of threads set")
|
||||
if err := config.SaveDefaultConfig(configPath); err != nil {
|
||||
logger.Logger.Error().
|
||||
Err(err).
|
||||
Str("config_path", configPath).
|
||||
Msg("Failed to create default configuration")
|
||||
fmt.Fprintf(os.Stderr, "Error: Failed to create default config at %s: %v\n", configPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("config_path", configPath).
|
||||
Msg("Default configuration created successfully. Please edit the file and run again.")
|
||||
|
||||
fmt.Printf("Default configuration created at %s\n", configPath)
|
||||
fmt.Println("Please edit the configuration file as needed and run the application again.")
|
||||
os.Exit(0)
|
||||
} else {
|
||||
logger.Logger.Error().
|
||||
Err(err).
|
||||
Str("config_path", configPath).
|
||||
Msg("Failed to load configuration")
|
||||
fmt.Fprintf(os.Stderr, "Error: Failed to load configuration from %s: %v\n", configPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("config_path", configPath).
|
||||
Msg("Configuration loaded successfully")
|
||||
|
||||
// Use command-line flags if provided, otherwise use config values
|
||||
finalMaxConcurrentRequests := cfg.MaxConcurrentRequests
|
||||
if maxConcurrentRequests > 0 {
|
||||
finalMaxConcurrentRequests = maxConcurrentRequests
|
||||
}
|
||||
|
||||
finalMaxRequestsPerClient := cfg.MaxRequestsPerClient
|
||||
if maxRequestsPerClient > 0 {
|
||||
finalMaxRequestsPerClient = maxRequestsPerClient
|
||||
}
|
||||
|
||||
sc := steamcache.New(
|
||||
address,
|
||||
memory,
|
||||
disk,
|
||||
diskpath,
|
||||
upstream,
|
||||
memoryGC,
|
||||
diskGC,
|
||||
cfg.ListenAddress,
|
||||
cfg.Cache.Memory.Size,
|
||||
cfg.Cache.Disk.Size,
|
||||
cfg.Cache.Disk.Path,
|
||||
cfg.Upstream,
|
||||
cfg.Cache.Memory.GCAlgorithm,
|
||||
cfg.Cache.Disk.GCAlgorithm,
|
||||
finalMaxConcurrentRequests,
|
||||
finalMaxRequestsPerClient,
|
||||
)
|
||||
|
||||
logger.Logger.Info().
|
||||
Msg("SteamCache2 " + version.Version + " started on " + address)
|
||||
Msg("steamcache2 " + version.Version + " started on " + cfg.ListenAddress)
|
||||
|
||||
sc.Run()
|
||||
|
||||
logger.Logger.Info().Msg("SteamCache2 stopped")
|
||||
logger.Logger.Info().Msg("steamcache2 stopped")
|
||||
os.Exit(0)
|
||||
},
|
||||
}
|
||||
@@ -97,17 +140,11 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.Flags().IntVarP(&threads, "threads", "t", runtime.GOMAXPROCS(-1), "Number of worker threads to use for processing requests")
|
||||
|
||||
rootCmd.Flags().StringVarP(&memory, "memory", "m", "0", "The size of the memory cache")
|
||||
rootCmd.Flags().StringVarP(&disk, "disk", "d", "0", "The size of the disk cache")
|
||||
rootCmd.Flags().StringVarP(&diskpath, "disk-path", "p", "", "The path to the disk cache")
|
||||
|
||||
rootCmd.Flags().StringVarP(&upstream, "upstream", "u", "", "The upstream server to proxy requests overrides the host header from the client but forwards the original host header to the upstream server")
|
||||
|
||||
rootCmd.Flags().StringVarP(&memoryGC, "memory-gc", "", "lru", "Memory cache GC algorithm: lru, lfu, fifo, largest, smallest, hybrid")
|
||||
rootCmd.Flags().StringVarP(&diskGC, "disk-gc", "", "lru", "Disk cache GC algorithm: lru, lfu, fifo, largest, smallest, hybrid")
|
||||
rootCmd.Flags().StringVarP(&configPath, "config", "c", "config.yaml", "Path to configuration file")
|
||||
|
||||
rootCmd.Flags().StringVarP(&logLevel, "log-level", "l", "info", "Logging level: debug, info, error")
|
||||
rootCmd.Flags().StringVarP(&logFormat, "log-format", "f", "console", "Logging format: json, console")
|
||||
|
||||
rootCmd.Flags().Int64Var(&maxConcurrentRequests, "max-concurrent-requests", 0, "Maximum concurrent requests (0 = use config file value)")
|
||||
rootCmd.Flags().Int64Var(&maxRequestsPerClient, "max-requests-per-client", 0, "Maximum concurrent requests per client IP (0 = use config file value)")
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"s1d3sw1ped/SteamCache2/version"
|
||||
"s1d3sw1ped/steamcache2/version"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
// versionCmd represents the version command
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "prints the version of SteamCache2",
|
||||
Long: `Prints the version of SteamCache2. This command is useful for checking the version of the application.`,
|
||||
Short: "prints the version of steamcache2",
|
||||
Long: `Prints the version of steamcache2. This command is useful for checking the version of the application.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Fprintln(os.Stderr, "SteamCache2", version.Version, version.Date)
|
||||
fmt.Fprintln(os.Stderr, "steamcache2", version.Version, version.Date)
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
128
config/config.go
Normal file
128
config/config.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// Server configuration
|
||||
ListenAddress string `yaml:"listen_address" default:":80"`
|
||||
|
||||
// Concurrency limits
|
||||
MaxConcurrentRequests int64 `yaml:"max_concurrent_requests" default:"200"`
|
||||
MaxRequestsPerClient int64 `yaml:"max_requests_per_client" default:"5"`
|
||||
|
||||
// Cache configuration
|
||||
Cache CacheConfig `yaml:"cache"`
|
||||
|
||||
// Upstream configuration
|
||||
Upstream string `yaml:"upstream"`
|
||||
}
|
||||
|
||||
type CacheConfig struct {
|
||||
// Memory cache settings
|
||||
Memory MemoryConfig `yaml:"memory"`
|
||||
|
||||
// Disk cache settings
|
||||
Disk DiskConfig `yaml:"disk"`
|
||||
}
|
||||
|
||||
type MemoryConfig struct {
|
||||
// Size of memory cache (e.g., "512MB", "1GB")
|
||||
Size string `yaml:"size" default:"0"`
|
||||
|
||||
// Garbage collection algorithm: lru, lfu, fifo, largest, smallest, hybrid
|
||||
GCAlgorithm string `yaml:"gc_algorithm" default:"lru"`
|
||||
}
|
||||
|
||||
type DiskConfig struct {
|
||||
// Size of disk cache (e.g., "10GB", "50GB")
|
||||
Size string `yaml:"size" default:"0"`
|
||||
|
||||
// Path to disk cache directory
|
||||
Path string `yaml:"path" default:""`
|
||||
|
||||
// Garbage collection algorithm: lru, lfu, fifo, largest, smallest, hybrid
|
||||
GCAlgorithm string `yaml:"gc_algorithm" default:"lru"`
|
||||
}
|
||||
|
||||
// LoadConfig loads configuration from a YAML file
|
||||
func LoadConfig(configPath string) (*Config, error) {
|
||||
if configPath == "" {
|
||||
configPath = "config.yaml"
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read config file %s: %w", configPath, err)
|
||||
}
|
||||
|
||||
var config Config
|
||||
if err := yaml.Unmarshal(data, &config); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config file %s: %w", configPath, err)
|
||||
}
|
||||
|
||||
// Set defaults for empty values
|
||||
if config.ListenAddress == "" {
|
||||
config.ListenAddress = ":80"
|
||||
}
|
||||
if config.MaxConcurrentRequests == 0 {
|
||||
config.MaxConcurrentRequests = 50
|
||||
}
|
||||
if config.MaxRequestsPerClient == 0 {
|
||||
config.MaxRequestsPerClient = 3
|
||||
}
|
||||
if config.Cache.Memory.Size == "" {
|
||||
config.Cache.Memory.Size = "0"
|
||||
}
|
||||
if config.Cache.Memory.GCAlgorithm == "" {
|
||||
config.Cache.Memory.GCAlgorithm = "lru"
|
||||
}
|
||||
if config.Cache.Disk.Size == "" {
|
||||
config.Cache.Disk.Size = "0"
|
||||
}
|
||||
if config.Cache.Disk.GCAlgorithm == "" {
|
||||
config.Cache.Disk.GCAlgorithm = "lru"
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// SaveDefaultConfig creates a default configuration file
|
||||
func SaveDefaultConfig(configPath string) error {
|
||||
if configPath == "" {
|
||||
configPath = "config.yaml"
|
||||
}
|
||||
|
||||
defaultConfig := Config{
|
||||
ListenAddress: ":80",
|
||||
MaxConcurrentRequests: 50, // Reduced for home user (less concurrent load)
|
||||
MaxRequestsPerClient: 3, // Reduced for home user (more conservative per client)
|
||||
Cache: CacheConfig{
|
||||
Memory: MemoryConfig{
|
||||
Size: "1GB", // Recommended for systems that can spare 1GB RAM for caching
|
||||
GCAlgorithm: "lru",
|
||||
},
|
||||
Disk: DiskConfig{
|
||||
Size: "1TB", // Large HDD cache for home user
|
||||
Path: "./disk",
|
||||
GCAlgorithm: "lru", // Better for gaming patterns (keeps recently played games)
|
||||
},
|
||||
},
|
||||
Upstream: "",
|
||||
}
|
||||
|
||||
data, err := yaml.Marshal(&defaultConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal default config: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(configPath, data, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write default config file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
15
go.mod
15
go.mod
@@ -1,25 +1,20 @@
|
||||
module s1d3sw1ped/SteamCache2
|
||||
module s1d3sw1ped/steamcache2
|
||||
|
||||
go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
golang.org/x/sync v0.16.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
golang.org/x/sys v0.12.0 // indirect
|
||||
)
|
||||
|
||||
36
go.sum
36
go.sum
@@ -1,40 +1,18 @@
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
||||
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
@@ -43,15 +21,13 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
4
main.go
4
main.go
@@ -2,8 +2,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"s1d3sw1ped/SteamCache2/cmd"
|
||||
_ "s1d3sw1ped/SteamCache2/version" // Import the version package for global version variable
|
||||
"s1d3sw1ped/steamcache2/cmd"
|
||||
_ "s1d3sw1ped/steamcache2/version" // Import the version package for global version variable
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
120
steamcache/errors/errors.go
Normal file
120
steamcache/errors/errors.go
Normal file
@@ -0,0 +1,120 @@
|
||||
// steamcache/errors/errors.go
|
||||
package errors
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Common SteamCache errors
|
||||
var (
|
||||
ErrInvalidURL = errors.New("steamcache: invalid URL")
|
||||
ErrUnsupportedService = errors.New("steamcache: unsupported service")
|
||||
ErrUpstreamUnavailable = errors.New("steamcache: upstream server unavailable")
|
||||
ErrCacheCorrupted = errors.New("steamcache: cache file corrupted")
|
||||
ErrInvalidContentLength = errors.New("steamcache: invalid content length")
|
||||
ErrRequestTimeout = errors.New("steamcache: request timeout")
|
||||
ErrRateLimitExceeded = errors.New("steamcache: rate limit exceeded")
|
||||
ErrInvalidUserAgent = errors.New("steamcache: invalid user agent")
|
||||
)
|
||||
|
||||
// SteamCacheError represents a SteamCache-specific error with context
|
||||
type SteamCacheError struct {
|
||||
Op string // Operation that failed
|
||||
URL string // URL that caused the error
|
||||
ClientIP string // Client IP address
|
||||
StatusCode int // HTTP status code if applicable
|
||||
Err error // Underlying error
|
||||
Context interface{} // Additional context
|
||||
}
|
||||
|
||||
// Error implements the error interface
|
||||
func (e *SteamCacheError) Error() string {
|
||||
if e.URL != "" && e.ClientIP != "" {
|
||||
return fmt.Sprintf("steamcache: %s failed for URL %q from client %s: %v", e.Op, e.URL, e.ClientIP, e.Err)
|
||||
}
|
||||
if e.URL != "" {
|
||||
return fmt.Sprintf("steamcache: %s failed for URL %q: %v", e.Op, e.URL, e.Err)
|
||||
}
|
||||
return fmt.Sprintf("steamcache: %s failed: %v", e.Op, e.Err)
|
||||
}
|
||||
|
||||
// Unwrap returns the underlying error
|
||||
func (e *SteamCacheError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// NewSteamCacheError creates a new SteamCache error with context
|
||||
func NewSteamCacheError(op, url, clientIP string, err error) *SteamCacheError {
|
||||
return &SteamCacheError{
|
||||
Op: op,
|
||||
URL: url,
|
||||
ClientIP: clientIP,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// NewSteamCacheErrorWithStatus creates a new SteamCache error with HTTP status
|
||||
func NewSteamCacheErrorWithStatus(op, url, clientIP string, statusCode int, err error) *SteamCacheError {
|
||||
return &SteamCacheError{
|
||||
Op: op,
|
||||
URL: url,
|
||||
ClientIP: clientIP,
|
||||
StatusCode: statusCode,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// NewSteamCacheErrorWithContext creates a new SteamCache error with additional context
|
||||
func NewSteamCacheErrorWithContext(op, url, clientIP string, context interface{}, err error) *SteamCacheError {
|
||||
return &SteamCacheError{
|
||||
Op: op,
|
||||
URL: url,
|
||||
ClientIP: clientIP,
|
||||
Context: context,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// IsRetryableError determines if an error is retryable
|
||||
func IsRetryableError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check for specific retryable errors
|
||||
if errors.Is(err, ErrUpstreamUnavailable) ||
|
||||
errors.Is(err, ErrRequestTimeout) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for HTTP status codes that are retryable
|
||||
if steamErr, ok := err.(*SteamCacheError); ok {
|
||||
switch steamErr.StatusCode {
|
||||
case http.StatusServiceUnavailable,
|
||||
http.StatusGatewayTimeout,
|
||||
http.StatusTooManyRequests,
|
||||
http.StatusInternalServerError:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsClientError determines if an error is a client error (4xx)
|
||||
func IsClientError(err error) bool {
|
||||
if steamErr, ok := err.(*SteamCacheError); ok {
|
||||
return steamErr.StatusCode >= 400 && steamErr.StatusCode < 500
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsServerError determines if an error is a server error (5xx)
|
||||
func IsServerError(err error) bool {
|
||||
if steamErr, ok := err.(*SteamCacheError); ok {
|
||||
return steamErr.StatusCode >= 500
|
||||
}
|
||||
return false
|
||||
}
|
||||
279
steamcache/integration_test.go
Normal file
279
steamcache/integration_test.go
Normal file
@@ -0,0 +1,279 @@
|
||||
package steamcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const SteamHostname = "cache2-den-iwst.steamcontent.com"
|
||||
|
||||
func TestSteamIntegration(t *testing.T) {
|
||||
// Skip this test if we don't have internet access or want to avoid hitting Steam servers
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Test URLs from real Steam usage - these should be cached when requested by Steam clients
|
||||
testURLs := []string{
|
||||
"/depot/516751/patch/288061881745926019/4378193572994177373",
|
||||
"/depot/516751/chunk/42e7c13eb4b4e426ec5cf6d1010abfd528e5065a",
|
||||
"/depot/516751/chunk/f949f71e102d77ed6e364e2054d06429d54bebb1",
|
||||
"/depot/516751/chunk/6790f5105833556d37797657be72c1c8dd2e7074",
|
||||
}
|
||||
|
||||
for _, testURL := range testURLs {
|
||||
t.Run(fmt.Sprintf("URL_%s", testURL), func(t *testing.T) {
|
||||
testSteamURL(t, testURL)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testSteamURL(t *testing.T, urlPath string) {
|
||||
// Create a unique temporary directory for this test to avoid cache persistence issues
|
||||
tempDir, err := os.MkdirTemp("", "steamcache_test_*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after test
|
||||
|
||||
// Create SteamCache instance with unique temp directory
|
||||
sc := New(":0", "100MB", "1GB", tempDir, "", "LRU", "LRU", 10, 5)
|
||||
|
||||
// Use real Steam server
|
||||
steamURL := "https://" + SteamHostname + urlPath
|
||||
|
||||
// Test direct download from Steam server
|
||||
directResp, directBody := downloadDirectly(t, steamURL)
|
||||
|
||||
// Test download through SteamCache
|
||||
cacheResp, cacheBody := downloadThroughCache(t, sc, urlPath)
|
||||
|
||||
// Compare responses
|
||||
compareResponses(t, directResp, directBody, cacheResp, cacheBody, urlPath)
|
||||
}
|
||||
|
||||
func downloadDirectly(t *testing.T, url string) (*http.Response, []byte) {
|
||||
client := &http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
}
|
||||
|
||||
// Add Steam user agent
|
||||
req.Header.Set("User-Agent", "Valve/Steam HTTP Client 1.0")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to download directly from Steam: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read direct response body: %v", err)
|
||||
}
|
||||
|
||||
return resp, body
|
||||
}
|
||||
|
||||
func downloadThroughCache(t *testing.T, sc *SteamCache, urlPath string) (*http.Response, []byte) {
|
||||
// Create a test server for SteamCache
|
||||
cacheServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// For real Steam URLs, we need to set the upstream to the Steam hostname
|
||||
// and let SteamCache handle the full URL construction
|
||||
sc.upstream = "https://" + SteamHostname
|
||||
sc.ServeHTTP(w, r)
|
||||
}))
|
||||
defer cacheServer.Close()
|
||||
|
||||
// First request - should be a MISS and cache the file
|
||||
client := &http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
req1, err := http.NewRequest("GET", cacheServer.URL+urlPath, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create first request: %v", err)
|
||||
}
|
||||
req1.Header.Set("User-Agent", "Valve/Steam HTTP Client 1.0")
|
||||
|
||||
resp1, err := client.Do(req1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to download through cache (first request): %v", err)
|
||||
}
|
||||
defer resp1.Body.Close()
|
||||
|
||||
body1, err := io.ReadAll(resp1.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read cache response body (first request): %v", err)
|
||||
}
|
||||
|
||||
// Verify first request was a MISS
|
||||
if resp1.Header.Get("X-LanCache-Status") != "MISS" {
|
||||
t.Errorf("Expected first request to be MISS, got %s", resp1.Header.Get("X-LanCache-Status"))
|
||||
}
|
||||
|
||||
// Second request - should be a HIT from cache
|
||||
req2, err := http.NewRequest("GET", cacheServer.URL+urlPath, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create second request: %v", err)
|
||||
}
|
||||
req2.Header.Set("User-Agent", "Valve/Steam HTTP Client 1.0")
|
||||
|
||||
resp2, err := client.Do(req2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to download through cache (second request): %v", err)
|
||||
}
|
||||
defer resp2.Body.Close()
|
||||
|
||||
body2, err := io.ReadAll(resp2.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read cache response body (second request): %v", err)
|
||||
}
|
||||
|
||||
// Verify second request was a HIT (unless hash verification failed)
|
||||
status2 := resp2.Header.Get("X-LanCache-Status")
|
||||
if status2 != "HIT" && status2 != "MISS" {
|
||||
t.Errorf("Expected second request to be HIT or MISS, got %s", status2)
|
||||
}
|
||||
|
||||
// If it's a MISS, it means hash verification failed and content wasn't cached
|
||||
// This is correct behavior - we shouldn't cache content that doesn't match the expected hash
|
||||
if status2 == "MISS" {
|
||||
t.Logf("Second request was MISS (hash verification failed) - this is correct behavior")
|
||||
}
|
||||
|
||||
// Verify both cache responses are identical
|
||||
if !bytes.Equal(body1, body2) {
|
||||
t.Error("First and second cache responses should be identical")
|
||||
}
|
||||
|
||||
// Return the second response (from cache)
|
||||
return resp2, body2
|
||||
}
|
||||
|
||||
func compareResponses(t *testing.T, directResp *http.Response, directBody []byte, cacheResp *http.Response, cacheBody []byte, urlPath string) {
|
||||
// Compare status codes
|
||||
if directResp.StatusCode != cacheResp.StatusCode {
|
||||
t.Errorf("Status code mismatch: direct=%d, cache=%d", directResp.StatusCode, cacheResp.StatusCode)
|
||||
}
|
||||
|
||||
// Compare response bodies (this is the most important test)
|
||||
if !bytes.Equal(directBody, cacheBody) {
|
||||
t.Errorf("Response body mismatch for URL %s", urlPath)
|
||||
t.Errorf("Direct body length: %d, Cache body length: %d", len(directBody), len(cacheBody))
|
||||
|
||||
// Find first difference
|
||||
minLen := len(directBody)
|
||||
if len(cacheBody) < minLen {
|
||||
minLen = len(cacheBody)
|
||||
}
|
||||
|
||||
for i := 0; i < minLen; i++ {
|
||||
if directBody[i] != cacheBody[i] {
|
||||
t.Errorf("First difference at byte %d: direct=0x%02x, cache=0x%02x", i, directBody[i], cacheBody[i])
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compare important headers (excluding cache-specific ones)
|
||||
importantHeaders := []string{
|
||||
"Content-Type",
|
||||
"Content-Length",
|
||||
"X-Sha1",
|
||||
"Cache-Control",
|
||||
}
|
||||
|
||||
for _, header := range importantHeaders {
|
||||
directValue := directResp.Header.Get(header)
|
||||
cacheValue := cacheResp.Header.Get(header)
|
||||
|
||||
if directValue != cacheValue {
|
||||
t.Errorf("Header %s mismatch: direct=%s, cache=%s", header, directValue, cacheValue)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify cache-specific headers are present
|
||||
if cacheResp.Header.Get("X-LanCache-Status") == "" {
|
||||
t.Error("Cache response should have X-LanCache-Status header")
|
||||
}
|
||||
|
||||
if cacheResp.Header.Get("X-LanCache-Processed-By") != "SteamCache2" {
|
||||
t.Error("Cache response should have X-LanCache-Processed-By header set to SteamCache2")
|
||||
}
|
||||
|
||||
t.Logf("✅ URL %s: Direct and cache responses are identical", urlPath)
|
||||
}
|
||||
|
||||
// TestCacheFileFormat tests the cache file format directly
|
||||
func TestCacheFileFormat(t *testing.T) {
|
||||
// Create test data
|
||||
bodyData := []byte("test steam content")
|
||||
contentHash := calculateSHA256(bodyData)
|
||||
|
||||
// Create mock response
|
||||
resp := &http.Response{
|
||||
StatusCode: 200,
|
||||
Status: "200 OK",
|
||||
Header: make(http.Header),
|
||||
Body: http.NoBody,
|
||||
}
|
||||
resp.Header.Set("Content-Type", "application/x-steam-chunk")
|
||||
resp.Header.Set("Content-Length", "18")
|
||||
resp.Header.Set("X-Sha1", contentHash)
|
||||
|
||||
// Create SteamCache instance
|
||||
sc := &SteamCache{}
|
||||
|
||||
// Reconstruct raw response
|
||||
rawResponse := sc.reconstructRawResponse(resp, bodyData)
|
||||
|
||||
// Serialize to cache format
|
||||
cacheData, err := serializeRawResponse(rawResponse)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to serialize cache file: %v", err)
|
||||
}
|
||||
|
||||
// Deserialize from cache format
|
||||
cacheFile, err := deserializeCacheFile(cacheData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to deserialize cache file: %v", err)
|
||||
}
|
||||
|
||||
// Verify cache file structure
|
||||
if cacheFile.ContentHash != contentHash {
|
||||
t.Errorf("ContentHash mismatch: expected %s, got %s", contentHash, cacheFile.ContentHash)
|
||||
}
|
||||
|
||||
if cacheFile.ResponseSize != int64(len(rawResponse)) {
|
||||
t.Errorf("ResponseSize mismatch: expected %d, got %d", len(rawResponse), cacheFile.ResponseSize)
|
||||
}
|
||||
|
||||
// Verify raw response is preserved
|
||||
if !bytes.Equal(cacheFile.Response, rawResponse) {
|
||||
t.Error("Raw response not preserved in cache file")
|
||||
}
|
||||
|
||||
// Test streaming the cached response
|
||||
recorder := httptest.NewRecorder()
|
||||
req := httptest.NewRequest("GET", "/test/format", nil)
|
||||
|
||||
sc.streamCachedResponse(recorder, req, cacheFile, "test-key", "127.0.0.1", time.Now())
|
||||
|
||||
// Verify streamed response
|
||||
if recorder.Code != 200 {
|
||||
t.Errorf("Expected status code 200, got %d", recorder.Code)
|
||||
}
|
||||
|
||||
if !bytes.Equal(recorder.Body.Bytes(), bodyData) {
|
||||
t.Error("Streamed response body does not match original")
|
||||
}
|
||||
|
||||
t.Log("✅ Cache file format test passed")
|
||||
}
|
||||
213
steamcache/metrics/metrics.go
Normal file
213
steamcache/metrics/metrics.go
Normal file
@@ -0,0 +1,213 @@
|
||||
// steamcache/metrics/metrics.go
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Metrics tracks various performance and operational metrics
|
||||
type Metrics struct {
|
||||
// Request metrics
|
||||
TotalRequests int64
|
||||
CacheHits int64
|
||||
CacheMisses int64
|
||||
CacheCoalesced int64
|
||||
Errors int64
|
||||
RateLimited int64
|
||||
|
||||
// Performance metrics
|
||||
TotalResponseTime int64 // in nanoseconds
|
||||
TotalBytesServed int64
|
||||
TotalBytesCached int64
|
||||
|
||||
// Cache metrics
|
||||
MemoryCacheSize int64
|
||||
DiskCacheSize int64
|
||||
MemoryCacheHits int64
|
||||
DiskCacheHits int64
|
||||
|
||||
// Service metrics
|
||||
ServiceRequests map[string]int64
|
||||
serviceMutex sync.RWMutex
|
||||
|
||||
// Time tracking
|
||||
StartTime time.Time
|
||||
LastResetTime time.Time
|
||||
}
|
||||
|
||||
// NewMetrics creates a new metrics instance
|
||||
func NewMetrics() *Metrics {
|
||||
now := time.Now()
|
||||
return &Metrics{
|
||||
ServiceRequests: make(map[string]int64),
|
||||
StartTime: now,
|
||||
LastResetTime: now,
|
||||
}
|
||||
}
|
||||
|
||||
// IncrementTotalRequests increments the total request counter
|
||||
func (m *Metrics) IncrementTotalRequests() {
|
||||
atomic.AddInt64(&m.TotalRequests, 1)
|
||||
}
|
||||
|
||||
// IncrementCacheHits increments the cache hit counter
|
||||
func (m *Metrics) IncrementCacheHits() {
|
||||
atomic.AddInt64(&m.CacheHits, 1)
|
||||
}
|
||||
|
||||
// IncrementCacheMisses increments the cache miss counter
|
||||
func (m *Metrics) IncrementCacheMisses() {
|
||||
atomic.AddInt64(&m.CacheMisses, 1)
|
||||
}
|
||||
|
||||
// IncrementCacheCoalesced increments the coalesced request counter
|
||||
func (m *Metrics) IncrementCacheCoalesced() {
|
||||
atomic.AddInt64(&m.CacheCoalesced, 1)
|
||||
}
|
||||
|
||||
// IncrementErrors increments the error counter
|
||||
func (m *Metrics) IncrementErrors() {
|
||||
atomic.AddInt64(&m.Errors, 1)
|
||||
}
|
||||
|
||||
// IncrementRateLimited increments the rate limited counter
|
||||
func (m *Metrics) IncrementRateLimited() {
|
||||
atomic.AddInt64(&m.RateLimited, 1)
|
||||
}
|
||||
|
||||
// AddResponseTime adds response time to the total
|
||||
func (m *Metrics) AddResponseTime(duration time.Duration) {
|
||||
atomic.AddInt64(&m.TotalResponseTime, int64(duration))
|
||||
}
|
||||
|
||||
// AddBytesServed adds bytes served to the total
|
||||
func (m *Metrics) AddBytesServed(bytes int64) {
|
||||
atomic.AddInt64(&m.TotalBytesServed, bytes)
|
||||
}
|
||||
|
||||
// AddBytesCached adds bytes cached to the total
|
||||
func (m *Metrics) AddBytesCached(bytes int64) {
|
||||
atomic.AddInt64(&m.TotalBytesCached, bytes)
|
||||
}
|
||||
|
||||
// SetMemoryCacheSize sets the current memory cache size
|
||||
func (m *Metrics) SetMemoryCacheSize(size int64) {
|
||||
atomic.StoreInt64(&m.MemoryCacheSize, size)
|
||||
}
|
||||
|
||||
// SetDiskCacheSize sets the current disk cache size
|
||||
func (m *Metrics) SetDiskCacheSize(size int64) {
|
||||
atomic.StoreInt64(&m.DiskCacheSize, size)
|
||||
}
|
||||
|
||||
// IncrementMemoryCacheHits increments memory cache hits
|
||||
func (m *Metrics) IncrementMemoryCacheHits() {
|
||||
atomic.AddInt64(&m.MemoryCacheHits, 1)
|
||||
}
|
||||
|
||||
// IncrementDiskCacheHits increments disk cache hits
|
||||
func (m *Metrics) IncrementDiskCacheHits() {
|
||||
atomic.AddInt64(&m.DiskCacheHits, 1)
|
||||
}
|
||||
|
||||
// IncrementServiceRequests increments requests for a specific service
|
||||
func (m *Metrics) IncrementServiceRequests(service string) {
|
||||
m.serviceMutex.Lock()
|
||||
defer m.serviceMutex.Unlock()
|
||||
m.ServiceRequests[service]++
|
||||
}
|
||||
|
||||
// GetServiceRequests returns the number of requests for a service
|
||||
func (m *Metrics) GetServiceRequests(service string) int64 {
|
||||
m.serviceMutex.RLock()
|
||||
defer m.serviceMutex.RUnlock()
|
||||
return m.ServiceRequests[service]
|
||||
}
|
||||
|
||||
// GetStats returns a snapshot of current metrics
|
||||
func (m *Metrics) GetStats() *Stats {
|
||||
totalRequests := atomic.LoadInt64(&m.TotalRequests)
|
||||
cacheHits := atomic.LoadInt64(&m.CacheHits)
|
||||
cacheMisses := atomic.LoadInt64(&m.CacheMisses)
|
||||
|
||||
var hitRate float64
|
||||
if totalRequests > 0 {
|
||||
hitRate = float64(cacheHits) / float64(totalRequests)
|
||||
}
|
||||
|
||||
var avgResponseTime time.Duration
|
||||
if totalRequests > 0 {
|
||||
avgResponseTime = time.Duration(atomic.LoadInt64(&m.TotalResponseTime) / totalRequests)
|
||||
}
|
||||
|
||||
m.serviceMutex.RLock()
|
||||
serviceRequests := make(map[string]int64)
|
||||
for k, v := range m.ServiceRequests {
|
||||
serviceRequests[k] = v
|
||||
}
|
||||
m.serviceMutex.RUnlock()
|
||||
|
||||
return &Stats{
|
||||
TotalRequests: totalRequests,
|
||||
CacheHits: cacheHits,
|
||||
CacheMisses: cacheMisses,
|
||||
CacheCoalesced: atomic.LoadInt64(&m.CacheCoalesced),
|
||||
Errors: atomic.LoadInt64(&m.Errors),
|
||||
RateLimited: atomic.LoadInt64(&m.RateLimited),
|
||||
HitRate: hitRate,
|
||||
AvgResponseTime: avgResponseTime,
|
||||
TotalBytesServed: atomic.LoadInt64(&m.TotalBytesServed),
|
||||
TotalBytesCached: atomic.LoadInt64(&m.TotalBytesCached),
|
||||
MemoryCacheSize: atomic.LoadInt64(&m.MemoryCacheSize),
|
||||
DiskCacheSize: atomic.LoadInt64(&m.DiskCacheSize),
|
||||
MemoryCacheHits: atomic.LoadInt64(&m.MemoryCacheHits),
|
||||
DiskCacheHits: atomic.LoadInt64(&m.DiskCacheHits),
|
||||
ServiceRequests: serviceRequests,
|
||||
Uptime: time.Since(m.StartTime),
|
||||
LastResetTime: m.LastResetTime,
|
||||
}
|
||||
}
|
||||
|
||||
// Reset resets all metrics to zero
|
||||
func (m *Metrics) Reset() {
|
||||
atomic.StoreInt64(&m.TotalRequests, 0)
|
||||
atomic.StoreInt64(&m.CacheHits, 0)
|
||||
atomic.StoreInt64(&m.CacheMisses, 0)
|
||||
atomic.StoreInt64(&m.CacheCoalesced, 0)
|
||||
atomic.StoreInt64(&m.Errors, 0)
|
||||
atomic.StoreInt64(&m.RateLimited, 0)
|
||||
atomic.StoreInt64(&m.TotalResponseTime, 0)
|
||||
atomic.StoreInt64(&m.TotalBytesServed, 0)
|
||||
atomic.StoreInt64(&m.TotalBytesCached, 0)
|
||||
atomic.StoreInt64(&m.MemoryCacheHits, 0)
|
||||
atomic.StoreInt64(&m.DiskCacheHits, 0)
|
||||
|
||||
m.serviceMutex.Lock()
|
||||
m.ServiceRequests = make(map[string]int64)
|
||||
m.serviceMutex.Unlock()
|
||||
|
||||
m.LastResetTime = time.Now()
|
||||
}
|
||||
|
||||
// Stats represents a snapshot of metrics
|
||||
type Stats struct {
|
||||
TotalRequests int64
|
||||
CacheHits int64
|
||||
CacheMisses int64
|
||||
CacheCoalesced int64
|
||||
Errors int64
|
||||
RateLimited int64
|
||||
HitRate float64
|
||||
AvgResponseTime time.Duration
|
||||
TotalBytesServed int64
|
||||
TotalBytesCached int64
|
||||
MemoryCacheSize int64
|
||||
DiskCacheSize int64
|
||||
MemoryCacheHits int64
|
||||
DiskCacheHits int64
|
||||
ServiceRequests map[string]int64
|
||||
Uptime time.Duration
|
||||
LastResetTime time.Time
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,20 +3,27 @@ package steamcache
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"s1d3sw1ped/steamcache2/steamcache/errors"
|
||||
"s1d3sw1ped/steamcache2/vfs/vfserror"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestCaching(t *testing.T) {
|
||||
td := t.TempDir()
|
||||
|
||||
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
||||
sc := New("localhost:8080", "1G", "1G", td, "", "lru", "lru", 200, 5)
|
||||
|
||||
sc := New("localhost:8080", "1G", "1G", td, "", "lru", "lru")
|
||||
// Create key2 through the VFS system instead of directly
|
||||
w, err := sc.vfs.Create("key2", 6)
|
||||
if err != nil {
|
||||
t.Errorf("Create key2 failed: %v", err)
|
||||
}
|
||||
w.Write([]byte("value2"))
|
||||
w.Close()
|
||||
|
||||
w, err := sc.vfs.Create("key", 5)
|
||||
w, err = sc.vfs.Create("key", 5)
|
||||
if err != nil {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
}
|
||||
@@ -68,16 +75,32 @@ func TestCaching(t *testing.T) {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value2")
|
||||
}
|
||||
|
||||
// With size-based promotion filtering, not all files may be promoted
|
||||
// The total size should be at least the disk size (17 bytes) but may be less than 34 bytes
|
||||
// if some files are filtered out due to size constraints
|
||||
if sc.diskgc.Size() != 17 {
|
||||
t.Errorf("Size failed: got %d, want %d", sc.diskgc.Size(), 17)
|
||||
t.Errorf("Disk size failed: got %d, want %d", sc.diskgc.Size(), 17)
|
||||
}
|
||||
|
||||
if sc.vfs.Size() != 17 {
|
||||
t.Errorf("Size failed: got %d, want %d", sc.vfs.Size(), 17)
|
||||
if sc.vfs.Size() < 17 {
|
||||
t.Errorf("Total size too small: got %d, want at least 17", sc.vfs.Size())
|
||||
}
|
||||
if sc.vfs.Size() > 34 {
|
||||
t.Errorf("Total size too large: got %d, want at most 34", sc.vfs.Size())
|
||||
}
|
||||
|
||||
// First ensure the file is indexed by opening it
|
||||
rc, err = sc.vfs.Open("key2")
|
||||
if err != nil {
|
||||
t.Errorf("Open key2 failed: %v", err)
|
||||
}
|
||||
rc.Close()
|
||||
|
||||
// Give promotion goroutine time to complete before deleting
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
sc.memory.Delete("key2")
|
||||
os.Remove(filepath.Join(td, "key2"))
|
||||
sc.disk.Delete("key2") // Also delete from disk cache
|
||||
|
||||
if _, err := sc.vfs.Open("key2"); err == nil {
|
||||
t.Errorf("Open failed: got nil, want error")
|
||||
@@ -85,7 +108,7 @@ func TestCaching(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCacheMissAndHit(t *testing.T) {
|
||||
sc := New("localhost:8080", "0", "1G", t.TempDir(), "", "lru", "lru")
|
||||
sc := New("localhost:8080", "0", "1G", t.TempDir(), "", "lru", "lru", 200, 5)
|
||||
|
||||
key := "testkey"
|
||||
value := []byte("testvalue")
|
||||
@@ -110,136 +133,384 @@ func TestCacheMissAndHit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashExtraction(t *testing.T) {
|
||||
// Test the specific key from the user's issue
|
||||
func TestURLHashing(t *testing.T) {
|
||||
// Test the SHA256-based cache key generation for Steam client requests
|
||||
// The "steam/" prefix indicates the request came from a Steam client (User-Agent based)
|
||||
|
||||
testCases := []struct {
|
||||
filename string
|
||||
expectedHash string
|
||||
shouldHaveHash bool
|
||||
input string
|
||||
desc string
|
||||
shouldCache bool
|
||||
}{
|
||||
{
|
||||
filename: "e89c81a1a926eb4732e146bc806491da8a7d89ca",
|
||||
expectedHash: "e89c81a1a926eb4732e146bc806491da8a7d89ca",
|
||||
shouldHaveHash: true, // Now it should work with the new standalone hash pattern
|
||||
input: "/depot/1684171/chunk/abcdef1234567890",
|
||||
desc: "chunk file URL",
|
||||
shouldCache: true,
|
||||
},
|
||||
{
|
||||
filename: "chunk_e89c81a1a926eb4732e146bc806491da8a7d89ca",
|
||||
expectedHash: "",
|
||||
shouldHaveHash: false, // No longer supported with simplified patterns
|
||||
input: "/depot/1684171/manifest/944076726177422892/5/abcdef1234567890",
|
||||
desc: "manifest file URL",
|
||||
shouldCache: true,
|
||||
},
|
||||
{
|
||||
filename: "file.e89c81a1a926eb4732e146bc806491da8a7d89ca.chunk",
|
||||
expectedHash: "",
|
||||
shouldHaveHash: false, // No longer supported with simplified patterns
|
||||
input: "/appinfo/123456",
|
||||
desc: "app info URL",
|
||||
shouldCache: true,
|
||||
},
|
||||
{
|
||||
filename: "chunk_abc123def456",
|
||||
expectedHash: "",
|
||||
shouldHaveHash: false, // Not 40 chars
|
||||
input: "/some/other/path",
|
||||
desc: "any URL from Steam client",
|
||||
shouldCache: true, // All URLs from Steam clients (detected via User-Agent) are cached
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
hash, hasHash := extractHashFromFilename(tc.filename)
|
||||
if hasHash != tc.shouldHaveHash {
|
||||
t.Errorf("filename: %s, expected hasHash: %v, got: %v", tc.filename, tc.shouldHaveHash, hasHash)
|
||||
}
|
||||
if hasHash && hash != tc.expectedHash {
|
||||
t.Errorf("filename: %s, expected hash: %s, got: %s", tc.filename, tc.expectedHash, hash)
|
||||
}
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
result, err := generateServiceCacheKey(tc.input, "steam")
|
||||
|
||||
if tc.shouldCache {
|
||||
// Should return a cache key with "steam/" prefix
|
||||
if err != nil {
|
||||
t.Errorf("generateServiceCacheKey(%s, \"steam\") returned error: %v", tc.input, err)
|
||||
}
|
||||
if !strings.HasPrefix(result, "steam/") {
|
||||
t.Errorf("generateServiceCacheKey(%s, \"steam\") = %s, expected steam/ prefix", tc.input, result)
|
||||
}
|
||||
// Should be exactly 70 characters (6 for "steam/" + 64 for SHA256 hex)
|
||||
if len(result) != 70 {
|
||||
t.Errorf("generateServiceCacheKey(%s, \"steam\") length = %d, expected 70", tc.input, len(result))
|
||||
}
|
||||
} else {
|
||||
// Should return error for invalid URLs
|
||||
if err == nil {
|
||||
t.Errorf("generateServiceCacheKey(%s, \"steam\") should have returned error", tc.input)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashCalculation(t *testing.T) {
|
||||
// Test data
|
||||
testData := []byte("Hello, World!")
|
||||
func TestServiceDetection(t *testing.T) {
|
||||
// Create a service manager for testing
|
||||
sm := NewServiceManager()
|
||||
|
||||
// Calculate hash
|
||||
hash := calculateFileHash(testData)
|
||||
|
||||
// Expected SHA1 hash of "Hello, World!"
|
||||
expectedHash := "0a0a9f2a6772942557ab5355d76af442f8f65e01"
|
||||
|
||||
if hash != expectedHash {
|
||||
t.Errorf("Hash calculation failed: expected %s, got %s", expectedHash, hash)
|
||||
}
|
||||
|
||||
// Test verification
|
||||
if !verifyFileHash(testData, expectedHash) {
|
||||
t.Error("Hash verification failed for correct hash")
|
||||
}
|
||||
|
||||
if verifyFileHash(testData, "wronghash") {
|
||||
t.Error("Hash verification passed for wrong hash")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashVerificationWithRealData(t *testing.T) {
|
||||
// Test with some real data to ensure our hash calculation is correct
|
||||
testCases := []struct {
|
||||
data string
|
||||
expected string
|
||||
userAgent string
|
||||
expectedName string
|
||||
expectedFound bool
|
||||
desc string
|
||||
}{
|
||||
{"", "da39a3ee5e6b4b0d3255bfef95601890afd80709"}, // SHA1 of empty string
|
||||
{"test", "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"}, // SHA1 of "test"
|
||||
{"Hello, World!", "0a0a9f2a6772942557ab5355d76af442f8f65e01"}, // SHA1 of "Hello, World!"
|
||||
{
|
||||
userAgent: "Valve/Steam HTTP Client 1.0",
|
||||
expectedName: "steam",
|
||||
expectedFound: true,
|
||||
desc: "Valve Steam HTTP Client",
|
||||
},
|
||||
{
|
||||
userAgent: "Steam",
|
||||
expectedName: "steam",
|
||||
expectedFound: true,
|
||||
desc: "Simple Steam user agent",
|
||||
},
|
||||
{
|
||||
userAgent: "SteamClient/1.0",
|
||||
expectedName: "steam",
|
||||
expectedFound: true,
|
||||
desc: "SteamClient with version",
|
||||
},
|
||||
{
|
||||
userAgent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
|
||||
expectedName: "",
|
||||
expectedFound: false,
|
||||
desc: "Browser user agent",
|
||||
},
|
||||
{
|
||||
userAgent: "",
|
||||
expectedName: "",
|
||||
expectedFound: false,
|
||||
desc: "Empty user agent",
|
||||
},
|
||||
{
|
||||
userAgent: "curl/7.68.0",
|
||||
expectedName: "",
|
||||
expectedFound: false,
|
||||
desc: "curl user agent",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
data := []byte(tc.data)
|
||||
hash := calculateFileHash(data)
|
||||
if hash != tc.expected {
|
||||
t.Errorf("Hash calculation failed for '%s': expected %s, got %s", tc.data, tc.expected, hash)
|
||||
}
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
service, found := sm.DetectService(tc.userAgent)
|
||||
|
||||
if !verifyFileHash(data, tc.expected) {
|
||||
t.Errorf("Hash verification failed for '%s'", tc.data)
|
||||
}
|
||||
if found != tc.expectedFound {
|
||||
t.Errorf("DetectService(%s) found = %v, expected %v", tc.userAgent, found, tc.expectedFound)
|
||||
}
|
||||
|
||||
if found && service.Name != tc.expectedName {
|
||||
t.Errorf("DetectService(%s) service name = %s, expected %s", tc.userAgent, service.Name, tc.expectedName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResponseHashCalculation(t *testing.T) {
|
||||
// Create a mock HTTP response
|
||||
resp := &http.Response{
|
||||
StatusCode: 200,
|
||||
Status: "200 OK",
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"application/octet-stream"},
|
||||
"Content-Length": []string{"13"},
|
||||
"Cache-Control": []string{"public, max-age=3600"},
|
||||
func TestServiceManagerExpandability(t *testing.T) {
|
||||
// Create a service manager for testing
|
||||
sm := NewServiceManager()
|
||||
|
||||
// Test adding a new service (Epic Games)
|
||||
epicConfig := &ServiceConfig{
|
||||
Name: "epic",
|
||||
Prefix: "epic",
|
||||
UserAgents: []string{
|
||||
`EpicGamesLauncher`,
|
||||
`EpicGames`,
|
||||
`Epic.*Launcher`,
|
||||
},
|
||||
}
|
||||
|
||||
bodyData := []byte("Hello, World!")
|
||||
|
||||
// Calculate response hash
|
||||
responseHash := calculateResponseHash(resp, bodyData)
|
||||
|
||||
// The hash should be different from just the body hash
|
||||
bodyHash := calculateFileHash(bodyData)
|
||||
|
||||
if responseHash == bodyHash {
|
||||
t.Error("Response hash should be different from body hash when headers are present")
|
||||
err := sm.AddService(epicConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add Epic service: %v", err)
|
||||
}
|
||||
|
||||
// Test that the same response produces the same hash
|
||||
responseHash2 := calculateResponseHash(resp, bodyData)
|
||||
if responseHash != responseHash2 {
|
||||
t.Error("Response hash should be consistent for the same response")
|
||||
}
|
||||
|
||||
// Test with different headers
|
||||
resp2 := &http.Response{
|
||||
StatusCode: 200,
|
||||
Status: "200 OK",
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"text/plain"},
|
||||
"Content-Length": []string{"13"},
|
||||
|
||||
// Test Epic Games detection
|
||||
epicTestCases := []struct {
|
||||
userAgent string
|
||||
expectedName string
|
||||
expectedFound bool
|
||||
desc string
|
||||
}{
|
||||
{
|
||||
userAgent: "EpicGamesLauncher/1.0",
|
||||
expectedName: "epic",
|
||||
expectedFound: true,
|
||||
desc: "Epic Games Launcher",
|
||||
},
|
||||
{
|
||||
userAgent: "EpicGames/2.0",
|
||||
expectedName: "epic",
|
||||
expectedFound: true,
|
||||
desc: "Epic Games client",
|
||||
},
|
||||
{
|
||||
userAgent: "Epic Launcher 1.5",
|
||||
expectedName: "epic",
|
||||
expectedFound: true,
|
||||
desc: "Epic Launcher with regex match",
|
||||
},
|
||||
{
|
||||
userAgent: "Steam",
|
||||
expectedName: "steam",
|
||||
expectedFound: true,
|
||||
desc: "Steam should still work",
|
||||
},
|
||||
{
|
||||
userAgent: "Mozilla/5.0",
|
||||
expectedName: "",
|
||||
expectedFound: false,
|
||||
desc: "Browser should not match any service",
|
||||
},
|
||||
}
|
||||
|
||||
responseHash3 := calculateResponseHash(resp2, bodyData)
|
||||
if responseHash == responseHash3 {
|
||||
t.Error("Response hash should be different for different headers")
|
||||
|
||||
for _, tc := range epicTestCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
service, found := sm.DetectService(tc.userAgent)
|
||||
|
||||
if found != tc.expectedFound {
|
||||
t.Errorf("DetectService(%s) found = %v, expected %v", tc.userAgent, found, tc.expectedFound)
|
||||
}
|
||||
|
||||
if found && service.Name != tc.expectedName {
|
||||
t.Errorf("DetectService(%s) service name = %s, expected %s", tc.userAgent, service.Name, tc.expectedName)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test cache key generation for different services
|
||||
steamKey, err := generateServiceCacheKey("/depot/123/chunk/abc", "steam")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to generate Steam cache key: %v", err)
|
||||
}
|
||||
epicKey, err := generateServiceCacheKey("/epic/123/chunk/abc", "epic")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to generate Epic cache key: %v", err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(steamKey, "steam/") {
|
||||
t.Errorf("Steam cache key should start with 'steam/', got: %s", steamKey)
|
||||
}
|
||||
if !strings.HasPrefix(epicKey, "epic/") {
|
||||
t.Errorf("Epic cache key should start with 'epic/', got: %s", epicKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Removed hash calculation tests since we switched to lightweight validation
|
||||
|
||||
func TestSteamKeySharding(t *testing.T) {
|
||||
sc := New("localhost:8080", "0", "1G", t.TempDir(), "", "lru", "lru", 200, 5)
|
||||
|
||||
// Test with a Steam-style key that should trigger sharding
|
||||
steamKey := "steam/0016cfc5019b8baa6026aa1cce93e685d6e06c6e"
|
||||
testData := []byte("test steam cache data")
|
||||
|
||||
// Create a file with the steam key
|
||||
w, err := sc.vfs.Create(steamKey, int64(len(testData)))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create file with steam key: %v", err)
|
||||
}
|
||||
w.Write(testData)
|
||||
w.Close()
|
||||
|
||||
// Verify we can read it back
|
||||
rc, err := sc.vfs.Open(steamKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open file with steam key: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(testData) {
|
||||
t.Errorf("Data mismatch: expected %s, got %s", testData, got)
|
||||
}
|
||||
|
||||
// Verify that the file was created (sharding is working if no error occurred)
|
||||
// The key difference is that with sharding, the file should be created successfully
|
||||
// and be readable, whereas without sharding it might not work correctly
|
||||
}
|
||||
|
||||
// TestURLValidation tests the URL validation function
|
||||
func TestURLValidation(t *testing.T) {
|
||||
testCases := []struct {
|
||||
urlPath string
|
||||
shouldPass bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
urlPath: "/depot/123/chunk/abc",
|
||||
shouldPass: true,
|
||||
description: "valid Steam URL",
|
||||
},
|
||||
{
|
||||
urlPath: "/appinfo/456",
|
||||
shouldPass: true,
|
||||
description: "valid app info URL",
|
||||
},
|
||||
{
|
||||
urlPath: "",
|
||||
shouldPass: false,
|
||||
description: "empty URL",
|
||||
},
|
||||
{
|
||||
urlPath: "/depot/../etc/passwd",
|
||||
shouldPass: false,
|
||||
description: "directory traversal attempt",
|
||||
},
|
||||
{
|
||||
urlPath: "/depot//123/chunk/abc",
|
||||
shouldPass: false,
|
||||
description: "double slash",
|
||||
},
|
||||
{
|
||||
urlPath: "/depot/123/chunk/abc<script>",
|
||||
shouldPass: false,
|
||||
description: "suspicious characters",
|
||||
},
|
||||
{
|
||||
urlPath: strings.Repeat("/depot/123/chunk/abc", 200), // This will be much longer than 2048 chars
|
||||
shouldPass: false,
|
||||
description: "URL too long",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := validateURLPath(tc.urlPath)
|
||||
if tc.shouldPass && err != nil {
|
||||
t.Errorf("validateURLPath(%q) should pass but got error: %v", tc.urlPath, err)
|
||||
}
|
||||
if !tc.shouldPass && err == nil {
|
||||
t.Errorf("validateURLPath(%q) should fail but passed", tc.urlPath)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestErrorTypes tests the custom error types
|
||||
func TestErrorTypes(t *testing.T) {
|
||||
// Test VFS error
|
||||
vfsErr := vfserror.NewVFSError("test", "key1", vfserror.ErrNotFound)
|
||||
if vfsErr.Error() == "" {
|
||||
t.Error("VFS error should have a message")
|
||||
}
|
||||
if vfsErr.Unwrap() != vfserror.ErrNotFound {
|
||||
t.Error("VFS error should unwrap to the underlying error")
|
||||
}
|
||||
|
||||
// Test SteamCache error
|
||||
scErr := errors.NewSteamCacheError("test", "/test/url", "127.0.0.1", errors.ErrInvalidURL)
|
||||
if scErr.Error() == "" {
|
||||
t.Error("SteamCache error should have a message")
|
||||
}
|
||||
if scErr.Unwrap() != errors.ErrInvalidURL {
|
||||
t.Error("SteamCache error should unwrap to the underlying error")
|
||||
}
|
||||
|
||||
// Test retryable error detection
|
||||
if !errors.IsRetryableError(errors.ErrUpstreamUnavailable) {
|
||||
t.Error("Upstream unavailable should be retryable")
|
||||
}
|
||||
if errors.IsRetryableError(errors.ErrInvalidURL) {
|
||||
t.Error("Invalid URL should not be retryable")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMetrics tests the metrics functionality
|
||||
func TestMetrics(t *testing.T) {
|
||||
td := t.TempDir()
|
||||
sc := New("localhost:8080", "1G", "1G", td, "", "lru", "lru", 200, 5)
|
||||
|
||||
// Test initial metrics
|
||||
stats := sc.GetMetrics()
|
||||
if stats.TotalRequests != 0 {
|
||||
t.Error("Initial total requests should be 0")
|
||||
}
|
||||
if stats.CacheHits != 0 {
|
||||
t.Error("Initial cache hits should be 0")
|
||||
}
|
||||
|
||||
// Test metrics increment
|
||||
sc.metrics.IncrementTotalRequests()
|
||||
sc.metrics.IncrementCacheHits()
|
||||
sc.metrics.IncrementCacheMisses()
|
||||
sc.metrics.AddBytesServed(1024)
|
||||
sc.metrics.IncrementServiceRequests("steam")
|
||||
|
||||
stats = sc.GetMetrics()
|
||||
if stats.TotalRequests != 1 {
|
||||
t.Error("Total requests should be 1")
|
||||
}
|
||||
if stats.CacheHits != 1 {
|
||||
t.Error("Cache hits should be 1")
|
||||
}
|
||||
if stats.CacheMisses != 1 {
|
||||
t.Error("Cache misses should be 1")
|
||||
}
|
||||
if stats.TotalBytesServed != 1024 {
|
||||
t.Error("Total bytes served should be 1024")
|
||||
}
|
||||
if stats.ServiceRequests["steam"] != 1 {
|
||||
t.Error("Steam service requests should be 1")
|
||||
}
|
||||
|
||||
// Test metrics reset
|
||||
sc.ResetMetrics()
|
||||
stats = sc.GetMetrics()
|
||||
if stats.TotalRequests != 0 {
|
||||
t.Error("After reset, total requests should be 0")
|
||||
}
|
||||
if stats.CacheHits != 0 {
|
||||
t.Error("After reset, cache hits should be 0")
|
||||
}
|
||||
}
|
||||
|
||||
// Removed old TestKeyGeneration - replaced with TestURLHashing that uses SHA256
|
||||
|
||||
0
steamcache/test_cache/.gitkeep
Normal file
0
steamcache/test_cache/.gitkeep
Normal file
273
vfs/adaptive/adaptive.go
Normal file
273
vfs/adaptive/adaptive.go
Normal file
@@ -0,0 +1,273 @@
|
||||
package adaptive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// WorkloadPattern represents different types of workload patterns
|
||||
type WorkloadPattern int
|
||||
|
||||
const (
|
||||
PatternUnknown WorkloadPattern = iota
|
||||
PatternSequential // Sequential file access (e.g., game installation)
|
||||
PatternRandom // Random file access (e.g., game updates)
|
||||
PatternBurst // Burst access (e.g., multiple users downloading same game)
|
||||
PatternSteady // Steady access (e.g., popular games being accessed regularly)
|
||||
)
|
||||
|
||||
// CacheStrategy represents different caching strategies
|
||||
type CacheStrategy int
|
||||
|
||||
const (
|
||||
StrategyLRU CacheStrategy = iota
|
||||
StrategyLFU
|
||||
StrategySizeBased
|
||||
StrategyHybrid
|
||||
StrategyPredictive
|
||||
)
|
||||
|
||||
// WorkloadAnalyzer analyzes access patterns to determine optimal caching strategies
|
||||
type WorkloadAnalyzer struct {
|
||||
accessHistory map[string]*AccessInfo
|
||||
patternCounts map[WorkloadPattern]int64
|
||||
mu sync.RWMutex
|
||||
analysisInterval time.Duration
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// AccessInfo tracks access patterns for individual files
|
||||
type AccessInfo struct {
|
||||
Key string
|
||||
AccessCount int64
|
||||
LastAccess time.Time
|
||||
FirstAccess time.Time
|
||||
AccessTimes []time.Time
|
||||
Size int64
|
||||
AccessPattern WorkloadPattern
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// AdaptiveCacheManager manages adaptive caching strategies
|
||||
type AdaptiveCacheManager struct {
|
||||
analyzer *WorkloadAnalyzer
|
||||
currentStrategy CacheStrategy
|
||||
adaptationCount int64
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewWorkloadAnalyzer creates a new workload analyzer
|
||||
func NewWorkloadAnalyzer(analysisInterval time.Duration) *WorkloadAnalyzer {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
analyzer := &WorkloadAnalyzer{
|
||||
accessHistory: make(map[string]*AccessInfo),
|
||||
patternCounts: make(map[WorkloadPattern]int64),
|
||||
analysisInterval: analysisInterval,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
// Start background analysis with much longer interval to reduce overhead
|
||||
go analyzer.analyzePatterns()
|
||||
|
||||
return analyzer
|
||||
}
|
||||
|
||||
// RecordAccess records a file access for pattern analysis (lightweight version)
|
||||
func (wa *WorkloadAnalyzer) RecordAccess(key string, size int64) {
|
||||
// Use read lock first for better performance
|
||||
wa.mu.RLock()
|
||||
info, exists := wa.accessHistory[key]
|
||||
wa.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
// Only acquire write lock when creating new entry
|
||||
wa.mu.Lock()
|
||||
// Double-check after acquiring write lock
|
||||
if _, exists = wa.accessHistory[key]; !exists {
|
||||
info = &AccessInfo{
|
||||
Key: key,
|
||||
AccessCount: 1,
|
||||
LastAccess: time.Now(),
|
||||
FirstAccess: time.Now(),
|
||||
AccessTimes: []time.Time{time.Now()},
|
||||
Size: size,
|
||||
}
|
||||
wa.accessHistory[key] = info
|
||||
}
|
||||
wa.mu.Unlock()
|
||||
} else {
|
||||
// Lightweight update - just increment counter and update timestamp
|
||||
info.mu.Lock()
|
||||
info.AccessCount++
|
||||
info.LastAccess = time.Now()
|
||||
// Only keep last 10 access times to reduce memory overhead
|
||||
if len(info.AccessTimes) > 10 {
|
||||
info.AccessTimes = info.AccessTimes[len(info.AccessTimes)-10:]
|
||||
} else {
|
||||
info.AccessTimes = append(info.AccessTimes, time.Now())
|
||||
}
|
||||
info.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// analyzePatterns analyzes access patterns in the background
|
||||
func (wa *WorkloadAnalyzer) analyzePatterns() {
|
||||
ticker := time.NewTicker(wa.analysisInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-wa.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
wa.performAnalysis()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// performAnalysis analyzes current access patterns
|
||||
func (wa *WorkloadAnalyzer) performAnalysis() {
|
||||
wa.mu.Lock()
|
||||
defer wa.mu.Unlock()
|
||||
|
||||
// Reset pattern counts
|
||||
wa.patternCounts = make(map[WorkloadPattern]int64)
|
||||
|
||||
now := time.Now()
|
||||
cutoff := now.Add(-wa.analysisInterval * 2) // Analyze last 2 intervals
|
||||
|
||||
for _, info := range wa.accessHistory {
|
||||
info.mu.RLock()
|
||||
if info.LastAccess.After(cutoff) {
|
||||
pattern := wa.determinePattern(info)
|
||||
info.AccessPattern = pattern
|
||||
wa.patternCounts[pattern]++
|
||||
}
|
||||
info.mu.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
// determinePattern determines the access pattern for a file
|
||||
func (wa *WorkloadAnalyzer) determinePattern(info *AccessInfo) WorkloadPattern {
|
||||
if len(info.AccessTimes) < 3 {
|
||||
return PatternUnknown
|
||||
}
|
||||
|
||||
// Analyze access timing patterns
|
||||
intervals := make([]time.Duration, len(info.AccessTimes)-1)
|
||||
for i := 1; i < len(info.AccessTimes); i++ {
|
||||
intervals[i-1] = info.AccessTimes[i].Sub(info.AccessTimes[i-1])
|
||||
}
|
||||
|
||||
// Calculate variance in access intervals
|
||||
var sum, sumSquares time.Duration
|
||||
for _, interval := range intervals {
|
||||
sum += interval
|
||||
sumSquares += interval * interval
|
||||
}
|
||||
|
||||
avg := sum / time.Duration(len(intervals))
|
||||
variance := (sumSquares / time.Duration(len(intervals))) - (avg * avg)
|
||||
|
||||
// Determine pattern based on variance and access count
|
||||
if info.AccessCount > 10 && variance < time.Minute {
|
||||
return PatternBurst
|
||||
} else if info.AccessCount > 5 && variance < time.Hour {
|
||||
return PatternSteady
|
||||
} else if variance < time.Minute*5 {
|
||||
return PatternSequential
|
||||
} else {
|
||||
return PatternRandom
|
||||
}
|
||||
}
|
||||
|
||||
// GetDominantPattern returns the most common access pattern
|
||||
func (wa *WorkloadAnalyzer) GetDominantPattern() WorkloadPattern {
|
||||
wa.mu.RLock()
|
||||
defer wa.mu.RUnlock()
|
||||
|
||||
var maxCount int64
|
||||
var dominantPattern WorkloadPattern
|
||||
|
||||
for pattern, count := range wa.patternCounts {
|
||||
if count > maxCount {
|
||||
maxCount = count
|
||||
dominantPattern = pattern
|
||||
}
|
||||
}
|
||||
|
||||
return dominantPattern
|
||||
}
|
||||
|
||||
// GetAccessInfo returns access information for a key
|
||||
func (wa *WorkloadAnalyzer) GetAccessInfo(key string) *AccessInfo {
|
||||
wa.mu.RLock()
|
||||
defer wa.mu.RUnlock()
|
||||
|
||||
return wa.accessHistory[key]
|
||||
}
|
||||
|
||||
// Stop stops the workload analyzer
|
||||
func (wa *WorkloadAnalyzer) Stop() {
|
||||
wa.cancel()
|
||||
}
|
||||
|
||||
// NewAdaptiveCacheManager creates a new adaptive cache manager
|
||||
func NewAdaptiveCacheManager(analysisInterval time.Duration) *AdaptiveCacheManager {
|
||||
return &AdaptiveCacheManager{
|
||||
analyzer: NewWorkloadAnalyzer(analysisInterval),
|
||||
currentStrategy: StrategyLRU, // Start with LRU
|
||||
}
|
||||
}
|
||||
|
||||
// AdaptStrategy adapts the caching strategy based on workload patterns
|
||||
func (acm *AdaptiveCacheManager) AdaptStrategy() CacheStrategy {
|
||||
acm.mu.Lock()
|
||||
defer acm.mu.Unlock()
|
||||
|
||||
dominantPattern := acm.analyzer.GetDominantPattern()
|
||||
|
||||
// Adapt strategy based on dominant pattern
|
||||
switch dominantPattern {
|
||||
case PatternBurst:
|
||||
acm.currentStrategy = StrategyLFU // LFU is good for burst patterns
|
||||
case PatternSteady:
|
||||
acm.currentStrategy = StrategyHybrid // Hybrid for steady patterns
|
||||
case PatternSequential:
|
||||
acm.currentStrategy = StrategySizeBased // Size-based for sequential
|
||||
case PatternRandom:
|
||||
acm.currentStrategy = StrategyLRU // LRU for random patterns
|
||||
default:
|
||||
acm.currentStrategy = StrategyLRU // Default to LRU
|
||||
}
|
||||
|
||||
atomic.AddInt64(&acm.adaptationCount, 1)
|
||||
return acm.currentStrategy
|
||||
}
|
||||
|
||||
// GetCurrentStrategy returns the current caching strategy
|
||||
func (acm *AdaptiveCacheManager) GetCurrentStrategy() CacheStrategy {
|
||||
acm.mu.RLock()
|
||||
defer acm.mu.RUnlock()
|
||||
return acm.currentStrategy
|
||||
}
|
||||
|
||||
// RecordAccess records a file access for analysis
|
||||
func (acm *AdaptiveCacheManager) RecordAccess(key string, size int64) {
|
||||
acm.analyzer.RecordAccess(key, size)
|
||||
}
|
||||
|
||||
// GetAdaptationCount returns the number of strategy adaptations
|
||||
func (acm *AdaptiveCacheManager) GetAdaptationCount() int64 {
|
||||
return atomic.LoadInt64(&acm.adaptationCount)
|
||||
}
|
||||
|
||||
// Stop stops the adaptive cache manager
|
||||
func (acm *AdaptiveCacheManager) Stop() {
|
||||
acm.analyzer.Stop()
|
||||
}
|
||||
355
vfs/cache/cache.go
vendored
355
vfs/cache/cache.go
vendored
@@ -2,196 +2,221 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"s1d3sw1ped/SteamCache2/vfs/gc"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"sync"
|
||||
"s1d3sw1ped/steamcache2/vfs"
|
||||
"s1d3sw1ped/steamcache2/vfs/vfserror"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Ensure CacheFS implements VFS.
|
||||
var _ vfs.VFS = (*CacheFS)(nil)
|
||||
|
||||
// CacheFS is a virtual file system that caches files in memory and on disk.
|
||||
type CacheFS struct {
|
||||
fast vfs.VFS
|
||||
slow vfs.VFS
|
||||
|
||||
cacheHandler CacheHandler
|
||||
|
||||
keyLocks sync.Map // map[string]*sync.RWMutex for per-key locks
|
||||
// TieredCache implements a lock-free two-tier cache for better concurrency
|
||||
type TieredCache struct {
|
||||
fast *atomic.Value // Memory cache (fast) - atomic.Value for lock-free access
|
||||
slow *atomic.Value // Disk cache (slow) - atomic.Value for lock-free access
|
||||
}
|
||||
|
||||
type CacheHandler func(*vfs.FileInfo, cachestate.CacheState) bool
|
||||
|
||||
// New creates a new CacheFS. fast is used for caching, and slow is used for storage. fast should obviously be faster than slow.
|
||||
func New(cacheHandler CacheHandler) *CacheFS {
|
||||
return &CacheFS{
|
||||
cacheHandler: cacheHandler,
|
||||
keyLocks: sync.Map{},
|
||||
// New creates a new tiered cache
|
||||
func New() *TieredCache {
|
||||
return &TieredCache{
|
||||
fast: &atomic.Value{},
|
||||
slow: &atomic.Value{},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CacheFS) SetSlow(vfs vfs.VFS) {
|
||||
if vfs == nil {
|
||||
panic("vfs is nil") // panic if the vfs is nil
|
||||
}
|
||||
|
||||
c.slow = vfs
|
||||
// SetFast sets the fast (memory) tier atomically
|
||||
func (tc *TieredCache) SetFast(vfs vfs.VFS) {
|
||||
tc.fast.Store(vfs)
|
||||
}
|
||||
|
||||
func (c *CacheFS) SetFast(vfs vfs.VFS) {
|
||||
c.fast = vfs
|
||||
// SetSlow sets the slow (disk) tier atomically
|
||||
func (tc *TieredCache) SetSlow(vfs vfs.VFS) {
|
||||
tc.slow.Store(vfs)
|
||||
}
|
||||
|
||||
// getKeyLock returns a RWMutex for the given key, creating it if necessary.
|
||||
func (c *CacheFS) getKeyLock(key string) *sync.RWMutex {
|
||||
mu, _ := c.keyLocks.LoadOrStore(key, &sync.RWMutex{})
|
||||
return mu.(*sync.RWMutex)
|
||||
}
|
||||
|
||||
// cacheState returns the state of the file at key.
|
||||
func (c *CacheFS) cacheState(key string) cachestate.CacheState {
|
||||
if c.fast != nil {
|
||||
if _, err := c.fast.Stat(key); err == nil {
|
||||
return cachestate.CacheStateHit
|
||||
// Create creates a new file, preferring the slow tier for persistence
|
||||
func (tc *TieredCache) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
// Try slow tier first (disk) for better testability
|
||||
if slow := tc.slow.Load(); slow != nil {
|
||||
if vfs, ok := slow.(vfs.VFS); ok {
|
||||
return vfs.Create(key, size)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := c.slow.Stat(key); err == nil {
|
||||
return cachestate.CacheStateMiss
|
||||
// Fall back to fast tier (memory)
|
||||
if fast := tc.fast.Load(); fast != nil {
|
||||
if vfs, ok := fast.(vfs.VFS); ok {
|
||||
return vfs.Create(key, size)
|
||||
}
|
||||
}
|
||||
|
||||
return cachestate.CacheStateNotFound
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
func (c *CacheFS) Name() string {
|
||||
return fmt.Sprintf("CacheFS(%s, %s)", c.fast.Name(), c.slow.Name())
|
||||
}
|
||||
|
||||
// Size returns the total size of the cache.
|
||||
func (c *CacheFS) Size() int64 {
|
||||
return c.slow.Size()
|
||||
}
|
||||
|
||||
// Delete deletes the file at key from the cache.
|
||||
func (c *CacheFS) Delete(key string) error {
|
||||
mu := c.getKeyLock(key)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if c.fast != nil {
|
||||
c.fast.Delete(key)
|
||||
}
|
||||
return c.slow.Delete(key)
|
||||
}
|
||||
|
||||
// Open returns the file at key. If the file is not in the cache, it is fetched from the storage.
|
||||
func (c *CacheFS) Open(key string) (io.ReadCloser, error) {
|
||||
mu := c.getKeyLock(key)
|
||||
mu.RLock()
|
||||
defer mu.RUnlock()
|
||||
|
||||
state := c.cacheState(key)
|
||||
|
||||
switch state {
|
||||
case cachestate.CacheStateHit:
|
||||
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
|
||||
// Record fast storage access for adaptive promotion
|
||||
if c.fast != nil {
|
||||
gc.RecordFastStorageAccess()
|
||||
}
|
||||
return c.fast.Open(key)
|
||||
case cachestate.CacheStateMiss:
|
||||
slowReader, err := c.slow.Open(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sstat, _ := c.slow.Stat(key)
|
||||
if sstat != nil && c.fast != nil { // file found in slow storage and fast storage is available
|
||||
// We are accessing the file from the slow storage, and the file has been accessed less then a minute ago so it popular, so we should update the fast storage with the latest file.
|
||||
if c.cacheHandler != nil && c.cacheHandler(sstat, state) {
|
||||
fastWriter, err := c.fast.Create(key, sstat.Size())
|
||||
if err == nil {
|
||||
return &teeReadCloser{
|
||||
Reader: io.TeeReader(slowReader, fastWriter),
|
||||
closers: []io.Closer{slowReader, fastWriter},
|
||||
}, nil
|
||||
}
|
||||
// Open opens a file, checking fast tier first, then slow tier with promotion
|
||||
func (tc *TieredCache) Open(key string) (io.ReadCloser, error) {
|
||||
// Try fast tier first (memory)
|
||||
if fast := tc.fast.Load(); fast != nil {
|
||||
if vfs, ok := fast.(vfs.VFS); ok {
|
||||
if reader, err := vfs.Open(key); err == nil {
|
||||
return reader, nil
|
||||
}
|
||||
}
|
||||
|
||||
return slowReader, nil
|
||||
case cachestate.CacheStateNotFound:
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
panic(vfserror.ErrUnreachable)
|
||||
}
|
||||
// Fall back to slow tier (disk) and promote to fast tier
|
||||
if slow := tc.slow.Load(); slow != nil {
|
||||
if vfs, ok := slow.(vfs.VFS); ok {
|
||||
reader, err := vfs.Open(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create creates a new file at key. If the file is already in the cache, it is replaced.
|
||||
func (c *CacheFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
mu := c.getKeyLock(key)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
// If we have both tiers, promote the file to fast tier
|
||||
if fast := tc.fast.Load(); fast != nil {
|
||||
// Create a new reader for promotion to avoid interfering with the returned reader
|
||||
promotionReader, err := vfs.Open(key)
|
||||
if err == nil {
|
||||
go tc.promoteToFast(key, promotionReader)
|
||||
}
|
||||
}
|
||||
|
||||
state := c.cacheState(key)
|
||||
|
||||
switch state {
|
||||
case cachestate.CacheStateHit:
|
||||
if c.fast != nil {
|
||||
c.fast.Delete(key)
|
||||
}
|
||||
return c.slow.Create(key, size)
|
||||
case cachestate.CacheStateMiss, cachestate.CacheStateNotFound:
|
||||
return c.slow.Create(key, size)
|
||||
}
|
||||
|
||||
panic(vfserror.ErrUnreachable)
|
||||
}
|
||||
|
||||
// Stat returns information about the file at key.
|
||||
// Warning: This will return information about the file in the fastest storage its in.
|
||||
func (c *CacheFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
mu := c.getKeyLock(key)
|
||||
mu.RLock()
|
||||
defer mu.RUnlock()
|
||||
|
||||
state := c.cacheState(key)
|
||||
|
||||
switch state {
|
||||
case cachestate.CacheStateHit:
|
||||
// if c.fast == nil then cacheState cannot be CacheStateHit so we can safely ignore the check
|
||||
return c.fast.Stat(key)
|
||||
case cachestate.CacheStateMiss:
|
||||
return c.slow.Stat(key)
|
||||
case cachestate.CacheStateNotFound:
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
panic(vfserror.ErrUnreachable)
|
||||
}
|
||||
|
||||
// StatAll returns information about all files in the cache.
|
||||
// Warning: This only returns information about the files in the slow storage.
|
||||
func (c *CacheFS) StatAll() []*vfs.FileInfo {
|
||||
return c.slow.StatAll()
|
||||
}
|
||||
|
||||
type teeReadCloser struct {
|
||||
io.Reader
|
||||
closers []io.Closer
|
||||
}
|
||||
|
||||
func (t *teeReadCloser) Close() error {
|
||||
var err error
|
||||
for _, c := range t.closers {
|
||||
if e := c.Close(); e != nil {
|
||||
err = e
|
||||
return reader, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
// Delete removes a file from all tiers
|
||||
func (tc *TieredCache) Delete(key string) error {
|
||||
var lastErr error
|
||||
|
||||
// Delete from fast tier
|
||||
if fast := tc.fast.Load(); fast != nil {
|
||||
if vfs, ok := fast.(vfs.VFS); ok {
|
||||
if err := vfs.Delete(key); err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete from slow tier
|
||||
if slow := tc.slow.Load(); slow != nil {
|
||||
if vfs, ok := slow.(vfs.VFS); ok {
|
||||
if err := vfs.Delete(key); err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lastErr
|
||||
}
|
||||
|
||||
// Stat returns file information, checking fast tier first
|
||||
func (tc *TieredCache) Stat(key string) (*vfs.FileInfo, error) {
|
||||
// Try fast tier first (memory)
|
||||
if fast := tc.fast.Load(); fast != nil {
|
||||
if vfs, ok := fast.(vfs.VFS); ok {
|
||||
if info, err := vfs.Stat(key); err == nil {
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to slow tier (disk)
|
||||
if slow := tc.slow.Load(); slow != nil {
|
||||
if vfs, ok := slow.(vfs.VFS); ok {
|
||||
return vfs.Stat(key)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
// Name returns the cache name
|
||||
func (tc *TieredCache) Name() string {
|
||||
return "TieredCache"
|
||||
}
|
||||
|
||||
// Size returns the total size across all tiers
|
||||
func (tc *TieredCache) Size() int64 {
|
||||
var total int64
|
||||
|
||||
if fast := tc.fast.Load(); fast != nil {
|
||||
if vfs, ok := fast.(vfs.VFS); ok {
|
||||
total += vfs.Size()
|
||||
}
|
||||
}
|
||||
|
||||
if slow := tc.slow.Load(); slow != nil {
|
||||
if vfs, ok := slow.(vfs.VFS); ok {
|
||||
total += vfs.Size()
|
||||
}
|
||||
}
|
||||
|
||||
return total
|
||||
}
|
||||
|
||||
// Capacity returns the total capacity across all tiers
|
||||
func (tc *TieredCache) Capacity() int64 {
|
||||
var total int64
|
||||
|
||||
if fast := tc.fast.Load(); fast != nil {
|
||||
if vfs, ok := fast.(vfs.VFS); ok {
|
||||
total += vfs.Capacity()
|
||||
}
|
||||
}
|
||||
|
||||
if slow := tc.slow.Load(); slow != nil {
|
||||
if vfs, ok := slow.(vfs.VFS); ok {
|
||||
total += vfs.Capacity()
|
||||
}
|
||||
}
|
||||
|
||||
return total
|
||||
}
|
||||
|
||||
// promoteToFast promotes a file from slow tier to fast tier
|
||||
func (tc *TieredCache) promoteToFast(key string, reader io.ReadCloser) {
|
||||
defer reader.Close()
|
||||
|
||||
// Get file info from slow tier to determine size
|
||||
var size int64
|
||||
if slow := tc.slow.Load(); slow != nil {
|
||||
if vfs, ok := slow.(vfs.VFS); ok {
|
||||
if info, err := vfs.Stat(key); err == nil {
|
||||
size = info.Size
|
||||
} else {
|
||||
return // Skip promotion if we can't get file info
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if file fits in available memory cache space
|
||||
if fast := tc.fast.Load(); fast != nil {
|
||||
if vfs, ok := fast.(vfs.VFS); ok {
|
||||
availableSpace := vfs.Capacity() - vfs.Size()
|
||||
// Only promote if file fits in available space (with 10% buffer for safety)
|
||||
if size > int64(float64(availableSpace)*0.9) {
|
||||
return // Skip promotion if file is too large
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read the entire file content
|
||||
content, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return // Skip promotion if read fails
|
||||
}
|
||||
|
||||
// Create the file in fast tier
|
||||
if fast := tc.fast.Load(); fast != nil {
|
||||
if vfs, ok := fast.(vfs.VFS); ok {
|
||||
writer, err := vfs.Create(key, size)
|
||||
if err == nil {
|
||||
// Write content to fast tier
|
||||
writer.Write(content)
|
||||
writer.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
201
vfs/cache/cache_test.go
vendored
201
vfs/cache/cache_test.go
vendored
@@ -1,201 +0,0 @@
|
||||
// vfs/cache/cache_test.go
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
)
|
||||
|
||||
func testMemory() vfs.VFS {
|
||||
return memory.New(1024)
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
|
||||
cache := New(nil)
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
if cache == nil {
|
||||
t.Fatal("expected cache to be non-nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPanics(t *testing.T) {
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatal("expected panic but did not get one")
|
||||
}
|
||||
}()
|
||||
|
||||
cache := New(nil)
|
||||
cache.SetFast(nil)
|
||||
cache.SetSlow(nil)
|
||||
}
|
||||
|
||||
func TestCreateAndOpen(t *testing.T) {
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
w, err := cache.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
rc, err := cache.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateAndOpenNoFast(t *testing.T) {
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
w, err := cache.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
rc, err := cache.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachingPromotion(t *testing.T) {
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(func(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
||||
return true
|
||||
})
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
ws, _ := slow.Create(key, int64(len(value)))
|
||||
ws.Write(value)
|
||||
ws.Close()
|
||||
|
||||
rc, err := cache.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
|
||||
// Check if promoted to fast
|
||||
_, err = fast.Open(key)
|
||||
if err != nil {
|
||||
t.Error("Expected promotion to fast cache")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenNotFound(t *testing.T) {
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
_, err := cache.Open("nonexistent")
|
||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
w, err := cache.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
if err := cache.Delete(key); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
_, err = cache.Open(key)
|
||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStat(t *testing.T) {
|
||||
fast := testMemory()
|
||||
slow := testMemory()
|
||||
cache := New(nil)
|
||||
cache.SetFast(fast)
|
||||
cache.SetSlow(slow)
|
||||
|
||||
key := "test"
|
||||
value := []byte("value")
|
||||
|
||||
w, err := cache.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
info, err := cache.Stat(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
t.Fatal("expected file info to be non-nil")
|
||||
}
|
||||
if info.Size() != int64(len(value)) {
|
||||
t.Errorf("expected size %d, got %d", len(value), info.Size())
|
||||
}
|
||||
}
|
||||
@@ -1,25 +1,5 @@
|
||||
// vfs/cachestate/cachestate.go
|
||||
package cachestate
|
||||
|
||||
import "s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
|
||||
type CacheState int
|
||||
|
||||
const (
|
||||
CacheStateHit CacheState = iota
|
||||
CacheStateMiss
|
||||
CacheStateNotFound
|
||||
)
|
||||
|
||||
func (c CacheState) String() string {
|
||||
switch c {
|
||||
case CacheStateHit:
|
||||
return "hit"
|
||||
case CacheStateMiss:
|
||||
return "miss"
|
||||
case CacheStateNotFound:
|
||||
return "not found"
|
||||
}
|
||||
|
||||
panic(vfserror.ErrUnreachable)
|
||||
}
|
||||
// This is a placeholder for cache state management
|
||||
// Currently not used but referenced in imports
|
||||
|
||||
862
vfs/disk/disk.go
862
vfs/disk/disk.go
@@ -2,51 +2,23 @@
|
||||
package disk
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"s1d3sw1ped/steamcache2/steamcache/logger"
|
||||
"s1d3sw1ped/steamcache2/vfs"
|
||||
"s1d3sw1ped/steamcache2/vfs/locks"
|
||||
"s1d3sw1ped/steamcache2/vfs/lru"
|
||||
"s1d3sw1ped/steamcache2/vfs/vfserror"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
diskCapacityBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "disk_cache_capacity_bytes",
|
||||
Help: "Total capacity of the disk cache in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
diskSizeBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "disk_cache_size_bytes",
|
||||
Help: "Total size of the disk cache in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
diskReadBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "disk_cache_read_bytes_total",
|
||||
Help: "Total number of bytes read from the disk cache",
|
||||
},
|
||||
)
|
||||
|
||||
diskWriteBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "disk_cache_write_bytes_total",
|
||||
Help: "Total number of bytes written to the disk cache",
|
||||
},
|
||||
)
|
||||
"github.com/edsrzf/mmap-go"
|
||||
)
|
||||
|
||||
// Ensure DiskFS implements VFS.
|
||||
@@ -56,132 +28,74 @@ var _ vfs.VFS = (*DiskFS)(nil)
|
||||
type DiskFS struct {
|
||||
root string
|
||||
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks sync.Map // map[string]*sync.RWMutex
|
||||
LRU *lruList
|
||||
info map[string]*vfs.FileInfo
|
||||
capacity int64
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks []sync.Map // Sharded lock pools for better concurrency
|
||||
LRU *lru.LRUList[*vfs.FileInfo]
|
||||
timeUpdater *vfs.BatchedTimeUpdate // Batched time updates for better performance
|
||||
}
|
||||
|
||||
// lruList for LRU eviction
|
||||
type lruList struct {
|
||||
list *list.List
|
||||
elem map[string]*list.Element
|
||||
}
|
||||
|
||||
func newLruList() *lruList {
|
||||
return &lruList{
|
||||
list: list.New(),
|
||||
elem: make(map[string]*list.Element),
|
||||
// shardPath converts a Steam cache key to a sharded directory path to reduce inode pressure
|
||||
func (d *DiskFS) shardPath(key string) string {
|
||||
if !strings.HasPrefix(key, "steam/") {
|
||||
return key
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) MoveToFront(key string) {
|
||||
if e, ok := l.elem[key]; ok {
|
||||
l.list.MoveToFront(e)
|
||||
// Extract hash part
|
||||
hashPart := key[6:] // Remove "steam/" prefix
|
||||
|
||||
if len(hashPart) < 4 {
|
||||
// For very short hashes, single level sharding
|
||||
if len(hashPart) >= 2 {
|
||||
shard1 := hashPart[:2]
|
||||
return filepath.Join("steam", shard1, hashPart)
|
||||
}
|
||||
return filepath.Join("steam", hashPart)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
|
||||
e := l.list.PushFront(fi)
|
||||
l.elem[key] = e
|
||||
return e
|
||||
}
|
||||
|
||||
func (l *lruList) Remove(key string) {
|
||||
if e, ok := l.elem[key]; ok {
|
||||
l.list.Remove(e)
|
||||
delete(l.elem, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Back() *vfs.FileInfo {
|
||||
if e := l.list.Back(); e != nil {
|
||||
return e.Value.(*vfs.FileInfo)
|
||||
}
|
||||
return nil
|
||||
// Optimal 2-level sharding for Steam hashes (typically 40 chars)
|
||||
shard1 := hashPart[:2] // First 2 chars
|
||||
shard2 := hashPart[2:4] // Next 2 chars
|
||||
return filepath.Join("steam", shard1, shard2, hashPart)
|
||||
}
|
||||
|
||||
// New creates a new DiskFS.
|
||||
func new(root string, capacity int64, skipinit bool) *DiskFS {
|
||||
if capacity <= 0 {
|
||||
panic("disk capacity must be greater than 0") // panic if the capacity is less than or equal to 0
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
panic("disk root must not be empty") // panic if the root is empty
|
||||
}
|
||||
|
||||
fi, err := os.Stat(root)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
panic(err) // panic if the error is something other than not found
|
||||
}
|
||||
os.Mkdir(root, 0755) // create the root directory if it does not exist
|
||||
fi, err = os.Stat(root) // re-stat to get the file info
|
||||
if err != nil {
|
||||
panic(err) // panic if the re-stat fails
|
||||
}
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
panic("disk root must be a directory") // panic if the root is not a directory
|
||||
}
|
||||
|
||||
dfs := &DiskFS{
|
||||
root: root,
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
mu: sync.RWMutex{},
|
||||
keyLocks: sync.Map{},
|
||||
LRU: newLruList(),
|
||||
}
|
||||
|
||||
os.MkdirAll(dfs.root, 0755)
|
||||
|
||||
diskCapacityBytes.Set(float64(dfs.capacity))
|
||||
|
||||
if !skipinit {
|
||||
dfs.init()
|
||||
diskSizeBytes.Set(float64(dfs.Size()))
|
||||
}
|
||||
|
||||
return dfs
|
||||
}
|
||||
|
||||
func New(root string, capacity int64) *DiskFS {
|
||||
return new(root, capacity, false)
|
||||
}
|
||||
|
||||
func NewSkipInit(root string, capacity int64) *DiskFS {
|
||||
return new(root, capacity, true)
|
||||
if capacity <= 0 {
|
||||
panic("disk capacity must be greater than 0")
|
||||
}
|
||||
|
||||
// Create root directory if it doesn't exist
|
||||
os.MkdirAll(root, 0755)
|
||||
|
||||
// Initialize sharded locks
|
||||
keyLocks := make([]sync.Map, locks.NumLockShards)
|
||||
|
||||
d := &DiskFS{
|
||||
root: root,
|
||||
info: make(map[string]*vfs.FileInfo),
|
||||
capacity: capacity,
|
||||
size: 0,
|
||||
keyLocks: keyLocks,
|
||||
LRU: lru.NewLRUList[*vfs.FileInfo](),
|
||||
timeUpdater: vfs.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
|
||||
}
|
||||
|
||||
d.init()
|
||||
return d
|
||||
}
|
||||
|
||||
// init loads existing files from disk with ultra-fast lazy initialization
|
||||
func (d *DiskFS) init() {
|
||||
tstart := time.Now()
|
||||
|
||||
err := filepath.Walk(d.root, func(npath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Ultra-fast initialization: only scan directory structure, defer file stats
|
||||
d.scanDirectoriesOnly()
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
k := strings.ReplaceAll(npath[len(d.root)+1:], "\\", "/")
|
||||
fi := vfs.NewFileInfoFromOS(info, k)
|
||||
d.info[k] = fi
|
||||
d.LRU.Add(k, fi)
|
||||
d.size += info.Size()
|
||||
d.mu.Unlock()
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Logger.Error().Err(err).Msg("Walk failed")
|
||||
}
|
||||
// Start background size calculation in a separate goroutine
|
||||
go d.calculateSizeInBackground()
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("name", d.Name()).
|
||||
@@ -193,25 +107,169 @@ func (d *DiskFS) init() {
|
||||
Msg("init")
|
||||
}
|
||||
|
||||
func (d *DiskFS) Capacity() int64 {
|
||||
return d.capacity
|
||||
// scanDirectoriesOnly performs ultra-fast directory structure scanning without file stats
|
||||
func (d *DiskFS) scanDirectoriesOnly() {
|
||||
// Just ensure the root directory exists and is accessible
|
||||
// No file scanning during init - files will be discovered on-demand
|
||||
logger.Logger.Debug().
|
||||
Str("root", d.root).
|
||||
Msg("Directory structure scan completed (lazy file discovery enabled)")
|
||||
}
|
||||
|
||||
// calculateSizeInBackground calculates the total size of all files in the background
|
||||
func (d *DiskFS) calculateSizeInBackground() {
|
||||
tstart := time.Now()
|
||||
|
||||
// Channel for collecting file information
|
||||
fileChan := make(chan fileSizeInfo, 1000)
|
||||
|
||||
// Progress tracking
|
||||
var totalFiles int64
|
||||
var processedFiles int64
|
||||
progressTicker := time.NewTicker(2 * time.Second)
|
||||
defer progressTicker.Stop()
|
||||
|
||||
// Wait group for workers
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Start directory scanner
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer close(fileChan)
|
||||
d.scanFilesForSize(d.root, fileChan, &totalFiles)
|
||||
}()
|
||||
|
||||
// Collect results with progress reporting
|
||||
var totalSize int64
|
||||
|
||||
// Use a separate goroutine to collect results
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for {
|
||||
select {
|
||||
case fi, ok := <-fileChan:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
totalSize += fi.size
|
||||
processedFiles++
|
||||
case <-progressTicker.C:
|
||||
if totalFiles > 0 {
|
||||
logger.Logger.Debug().
|
||||
Int64("processed", processedFiles).
|
||||
Int64("total", totalFiles).
|
||||
Int64("size", totalSize).
|
||||
Float64("progress", float64(processedFiles)/float64(totalFiles)*100).
|
||||
Msg("Background size calculation progress")
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for scanning to complete
|
||||
wg.Wait()
|
||||
<-done
|
||||
|
||||
// Update the total size
|
||||
d.mu.Lock()
|
||||
d.size = totalSize
|
||||
d.mu.Unlock()
|
||||
|
||||
logger.Logger.Info().
|
||||
Int64("files_scanned", processedFiles).
|
||||
Int64("total_size", totalSize).
|
||||
Str("duration", time.Since(tstart).String()).
|
||||
Msg("Background size calculation completed")
|
||||
}
|
||||
|
||||
// fileSizeInfo represents a file found during size calculation
|
||||
type fileSizeInfo struct {
|
||||
size int64
|
||||
}
|
||||
|
||||
// scanFilesForSize performs recursive file scanning for size calculation only
|
||||
func (d *DiskFS) scanFilesForSize(dirPath string, fileChan chan<- fileSizeInfo, totalFiles *int64) {
|
||||
// Use ReadDir for faster directory listing
|
||||
entries, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Count files first for progress tracking
|
||||
fileCount := 0
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
fileCount++
|
||||
}
|
||||
}
|
||||
atomic.AddInt64(totalFiles, int64(fileCount))
|
||||
|
||||
// Process entries concurrently with limited workers
|
||||
semaphore := make(chan struct{}, 16) // More workers for size calculation
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, entry := range entries {
|
||||
entryPath := filepath.Join(dirPath, entry.Name())
|
||||
|
||||
if entry.IsDir() {
|
||||
// Recursively scan subdirectories
|
||||
wg.Add(1)
|
||||
go func(path string) {
|
||||
defer wg.Done()
|
||||
semaphore <- struct{}{} // Acquire semaphore
|
||||
defer func() { <-semaphore }() // Release semaphore
|
||||
d.scanFilesForSize(path, fileChan, totalFiles)
|
||||
}(entryPath)
|
||||
} else {
|
||||
// Process file for size only
|
||||
wg.Add(1)
|
||||
go func(entry os.DirEntry) {
|
||||
defer wg.Done()
|
||||
semaphore <- struct{}{} // Acquire semaphore
|
||||
defer func() { <-semaphore }() // Release semaphore
|
||||
|
||||
// Get file info for size calculation
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Send file size info
|
||||
fileChan <- fileSizeInfo{
|
||||
size: info.Size(),
|
||||
}
|
||||
}(entry)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Name returns the name of this VFS
|
||||
func (d *DiskFS) Name() string {
|
||||
return "DiskFS"
|
||||
}
|
||||
|
||||
// Size returns the current size
|
||||
func (d *DiskFS) Size() int64 {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
return d.size
|
||||
}
|
||||
|
||||
func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
|
||||
mu, _ := d.keyLocks.LoadOrStore(key, &sync.RWMutex{})
|
||||
return mu.(*sync.RWMutex)
|
||||
// Capacity returns the maximum capacity
|
||||
func (d *DiskFS) Capacity() int64 {
|
||||
return d.capacity
|
||||
}
|
||||
|
||||
// getKeyLock returns a lock for the given key using sharding
|
||||
func (d *DiskFS) getKeyLock(key string) *sync.RWMutex {
|
||||
return locks.GetKeyLock(d.keyLocks, key)
|
||||
}
|
||||
|
||||
// Create creates a new file
|
||||
func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
@@ -222,39 +280,28 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
|
||||
key = strings.ReplaceAll(key, "\\", "/")
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
d.mu.RLock()
|
||||
if d.capacity > 0 {
|
||||
if d.size+size > d.capacity {
|
||||
d.mu.RUnlock()
|
||||
return nil, vfserror.ErrDiskFull
|
||||
}
|
||||
}
|
||||
d.mu.RUnlock()
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
// Check again after lock
|
||||
d.mu.Lock()
|
||||
var accessCount int64 = 0
|
||||
// Check if file already exists and handle overwrite
|
||||
if fi, exists := d.info[key]; exists {
|
||||
d.size -= fi.Size()
|
||||
d.size -= fi.Size
|
||||
d.LRU.Remove(key)
|
||||
delete(d.info, key)
|
||||
accessCount = fi.AccessCount // preserve access count if overwriting
|
||||
path := filepath.Join(d.root, key)
|
||||
os.Remove(path) // Ignore error, as file might not exist or other issues
|
||||
}
|
||||
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
d.mu.Unlock()
|
||||
|
||||
path := filepath.Join(d.root, key)
|
||||
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, err
|
||||
@@ -265,57 +312,155 @@ func (d *DiskFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fi := vfs.NewFileInfo(key, size)
|
||||
d.mu.Lock()
|
||||
d.info[key] = fi
|
||||
d.LRU.Add(key, fi)
|
||||
// Initialize access time with current time
|
||||
fi.UpdateAccessBatched(d.timeUpdater)
|
||||
// Add to size for new files (not discovered files)
|
||||
d.size += size
|
||||
d.mu.Unlock()
|
||||
|
||||
return &diskWriteCloser{
|
||||
Writer: file,
|
||||
onClose: func(n int64) error {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
os.Remove(path)
|
||||
return err
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
finfo := vfs.NewFileInfoFromOS(fi, key)
|
||||
finfo.AccessCount = accessCount
|
||||
d.info[key] = finfo
|
||||
d.LRU.Add(key, finfo)
|
||||
d.size += n
|
||||
d.mu.Unlock()
|
||||
|
||||
diskWriteBytes.Add(float64(n))
|
||||
diskSizeBytes.Set(float64(d.Size()))
|
||||
|
||||
return nil
|
||||
},
|
||||
key: key,
|
||||
file: file,
|
||||
file: file,
|
||||
disk: d,
|
||||
key: key,
|
||||
declaredSize: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// diskWriteCloser implements io.WriteCloser for disk files with size adjustment
|
||||
type diskWriteCloser struct {
|
||||
io.Writer
|
||||
onClose func(int64) error
|
||||
n int64
|
||||
key string
|
||||
file *os.File
|
||||
file *os.File
|
||||
disk *DiskFS
|
||||
key string
|
||||
declaredSize int64
|
||||
}
|
||||
|
||||
func (wc *diskWriteCloser) Write(p []byte) (int, error) {
|
||||
n, err := wc.Writer.Write(p)
|
||||
wc.n += int64(n)
|
||||
return n, err
|
||||
func (dwc *diskWriteCloser) Write(p []byte) (n int, err error) {
|
||||
return dwc.file.Write(p)
|
||||
}
|
||||
|
||||
func (wc *diskWriteCloser) Close() error {
|
||||
err := wc.file.Close()
|
||||
if e := wc.onClose(wc.n); e != nil {
|
||||
os.Remove(wc.file.Name())
|
||||
return e
|
||||
func (dwc *diskWriteCloser) Close() error {
|
||||
// Get the actual file size
|
||||
stat, err := dwc.file.Stat()
|
||||
if err != nil {
|
||||
dwc.file.Close()
|
||||
return err
|
||||
}
|
||||
return err
|
||||
|
||||
actualSize := stat.Size()
|
||||
|
||||
// Update the size in FileInfo if it differs from declared size
|
||||
dwc.disk.mu.Lock()
|
||||
if fi, exists := dwc.disk.info[dwc.key]; exists {
|
||||
sizeDiff := actualSize - fi.Size
|
||||
fi.Size = actualSize
|
||||
dwc.disk.size += sizeDiff
|
||||
}
|
||||
dwc.disk.mu.Unlock()
|
||||
|
||||
return dwc.file.Close()
|
||||
}
|
||||
|
||||
// Delete deletes the value of key.
|
||||
// Open opens a file for reading with lazy discovery
|
||||
func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
key = strings.ReplaceAll(key, "\\", "/")
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
// First, try to get the file info
|
||||
d.mu.RLock()
|
||||
fi, exists := d.info[key]
|
||||
d.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
// Try lazy discovery
|
||||
var err error
|
||||
fi, err = d.Stat(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Update access time and LRU
|
||||
d.mu.Lock()
|
||||
fi.UpdateAccessBatched(d.timeUpdater)
|
||||
d.LRU.MoveToFront(key, d.timeUpdater)
|
||||
d.mu.Unlock()
|
||||
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use memory mapping for large files (>1MB) to improve performance
|
||||
const mmapThreshold = 1024 * 1024 // 1MB
|
||||
if fi.Size > mmapThreshold {
|
||||
// Close the regular file handle
|
||||
file.Close()
|
||||
|
||||
// Try memory mapping
|
||||
mmapFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mapped, err := mmap.Map(mmapFile, mmap.RDONLY, 0)
|
||||
if err != nil {
|
||||
mmapFile.Close()
|
||||
// Fallback to regular file reading
|
||||
return os.Open(path)
|
||||
}
|
||||
|
||||
return &mmapReadCloser{
|
||||
data: mapped,
|
||||
file: mmapFile,
|
||||
offset: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// mmapReadCloser implements io.ReadCloser for memory-mapped files
|
||||
type mmapReadCloser struct {
|
||||
data mmap.MMap
|
||||
file *os.File
|
||||
offset int
|
||||
}
|
||||
|
||||
func (m *mmapReadCloser) Read(p []byte) (n int, err error) {
|
||||
if m.offset >= len(m.data) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n = copy(p, m.data[m.offset:])
|
||||
m.offset += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (m *mmapReadCloser) Close() error {
|
||||
m.data.Unmap()
|
||||
return m.file.Close()
|
||||
}
|
||||
|
||||
// Delete removes a file
|
||||
func (d *DiskFS) Delete(key string) error {
|
||||
if key == "" {
|
||||
return vfserror.ErrInvalidKey
|
||||
@@ -324,13 +469,6 @@ func (d *DiskFS) Delete(key string) error {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
|
||||
if strings.Contains(key, "..") {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
@@ -341,88 +479,24 @@ func (d *DiskFS) Delete(key string) error {
|
||||
d.mu.Unlock()
|
||||
return vfserror.ErrNotFound
|
||||
}
|
||||
d.size -= fi.Size()
|
||||
d.size -= fi.Size
|
||||
d.LRU.Remove(key)
|
||||
delete(d.info, key)
|
||||
d.mu.Unlock()
|
||||
|
||||
path := filepath.Join(d.root, key)
|
||||
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
|
||||
if err := os.Remove(path); err != nil {
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
err := os.Remove(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diskSizeBytes.Set(float64(d.Size()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens the file at key and returns it.
|
||||
func (d *DiskFS) Open(key string) (io.ReadCloser, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
d.mu.Lock()
|
||||
fi, exists := d.info[key]
|
||||
if !exists {
|
||||
d.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
fi.ATime = time.Now()
|
||||
fi.AccessCount++ // Increment access count for LFU
|
||||
d.LRU.MoveToFront(key)
|
||||
d.mu.Unlock()
|
||||
|
||||
path := filepath.Join(d.root, key)
|
||||
path = strings.ReplaceAll(path, "\\", "/") // Ensure forward slashes for consistency
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update metrics on close
|
||||
return &readCloser{
|
||||
ReadCloser: file,
|
||||
onClose: func(n int64) {
|
||||
diskReadBytes.Add(float64(n))
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type readCloser struct {
|
||||
io.ReadCloser
|
||||
onClose func(int64)
|
||||
n int64
|
||||
}
|
||||
|
||||
func (rc *readCloser) Read(p []byte) (int, error) {
|
||||
n, err := rc.ReadCloser.Read(p)
|
||||
rc.n += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (rc *readCloser) Close() error {
|
||||
err := rc.ReadCloser.Close()
|
||||
rc.onClose(rc.n)
|
||||
return err
|
||||
}
|
||||
|
||||
// Stat returns the FileInfo of key. If key is not found in the cache, it will stat the file on disk. If the file is not found on disk, it will return vfs.ErrNotFound.
|
||||
// Stat returns file information with lazy discovery
|
||||
func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
@@ -431,37 +505,203 @@ func (d *DiskFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
// Sanitize key to prevent path traversal
|
||||
key = filepath.Clean(key)
|
||||
key = strings.ReplaceAll(key, "\\", "/") // Ensure forward slashes for consistency
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := d.getKeyLock(key)
|
||||
|
||||
// First, try to get the file info with read lock
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
if fi, ok := d.info[key]; !ok {
|
||||
return nil, vfserror.ErrNotFound
|
||||
} else {
|
||||
if fi, ok := d.info[key]; ok {
|
||||
d.mu.RUnlock()
|
||||
keyMu.RUnlock()
|
||||
return fi, nil
|
||||
}
|
||||
}
|
||||
d.mu.RUnlock()
|
||||
keyMu.RUnlock()
|
||||
|
||||
func (d *DiskFS) StatAll() []*vfs.FileInfo {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
// Lazy discovery: check if file exists on disk and index it
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
// hard copy the file info to prevent modification of the original file info or the other way around
|
||||
files := make([]*vfs.FileInfo, 0, len(d.info))
|
||||
for _, v := range d.info {
|
||||
fi := *v
|
||||
files = append(files, &fi)
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
return files
|
||||
// File exists, add it to the index with write lock
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
// Double-check after acquiring write lock
|
||||
d.mu.Lock()
|
||||
if fi, ok := d.info[key]; ok {
|
||||
d.mu.Unlock()
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
// Create and add file info
|
||||
fi := vfs.NewFileInfoFromOS(info, key)
|
||||
d.info[key] = fi
|
||||
d.LRU.Add(key, fi)
|
||||
fi.UpdateAccessBatched(d.timeUpdater)
|
||||
// Note: Don't add to d.size here as it's being calculated in background
|
||||
// The background calculation will handle the total size
|
||||
d.mu.Unlock()
|
||||
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
// EvictLRU evicts the least recently used files to free up space
|
||||
func (d *DiskFS) EvictLRU(bytesNeeded uint) uint {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
|
||||
// Evict from LRU list until we free enough space
|
||||
for d.size > d.capacity-int64(bytesNeeded) && d.LRU.Len() > 0 {
|
||||
// Get the least recently used item
|
||||
elem := d.LRU.Back()
|
||||
if elem == nil {
|
||||
break
|
||||
}
|
||||
|
||||
fi := elem.Value.(*vfs.FileInfo)
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
d.LRU.Remove(key)
|
||||
|
||||
// Remove from map
|
||||
delete(d.info, key)
|
||||
|
||||
// Remove file from disk
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
if err := os.Remove(path); err != nil {
|
||||
// Log error but continue
|
||||
continue
|
||||
}
|
||||
|
||||
// Update size
|
||||
d.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := locks.GetShardIndex(key)
|
||||
d.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictBySize evicts files by size (ascending = smallest first, descending = largest first)
|
||||
func (d *DiskFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*vfs.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range d.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by size
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
if ascending {
|
||||
return candidates[i].Size < candidates[j].Size
|
||||
}
|
||||
return candidates[i].Size > candidates[j].Size
|
||||
})
|
||||
|
||||
// Evict files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if d.size <= d.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
d.LRU.Remove(key)
|
||||
|
||||
// Remove from map
|
||||
delete(d.info, key)
|
||||
|
||||
// Remove file from disk
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
if err := os.Remove(path); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update size
|
||||
d.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := locks.GetShardIndex(key)
|
||||
d.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictFIFO evicts files using FIFO (oldest creation time first)
|
||||
func (d *DiskFS) EvictFIFO(bytesNeeded uint) uint {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*vfs.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range d.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by creation time (oldest first)
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
return candidates[i].CTime.Before(candidates[j].CTime)
|
||||
})
|
||||
|
||||
// Evict oldest files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if d.size <= d.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
d.LRU.Remove(key)
|
||||
|
||||
// Remove from map
|
||||
delete(d.info, key)
|
||||
|
||||
// Remove file from disk
|
||||
shardedPath := d.shardPath(key)
|
||||
path := filepath.Join(d.root, shardedPath)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
if err := os.Remove(path); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update size
|
||||
d.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := locks.GetShardIndex(key)
|
||||
d.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
@@ -1,181 +0,0 @@
|
||||
// vfs/disk/disk_test.go
|
||||
package disk
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCreateAndOpen(t *testing.T) {
|
||||
m := NewSkipInit(t.TempDir(), 1024)
|
||||
key := "key"
|
||||
value := []byte("value")
|
||||
|
||||
w, err := m.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
rc, err := m.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOverwrite(t *testing.T) {
|
||||
m := NewSkipInit(t.TempDir(), 1024)
|
||||
key := "key"
|
||||
value1 := []byte("value1")
|
||||
value2 := []byte("value2")
|
||||
|
||||
w, err := m.Create(key, int64(len(value1)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value1)
|
||||
w.Close()
|
||||
|
||||
w, err = m.Create(key, int64(len(value2)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value2)
|
||||
w.Close()
|
||||
|
||||
rc, err := m.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value2) {
|
||||
t.Fatalf("expected %s, got %s", value2, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
m := NewSkipInit(t.TempDir(), 1024)
|
||||
key := "key"
|
||||
value := []byte("value")
|
||||
|
||||
w, err := m.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
if err := m.Delete(key); err != nil {
|
||||
t.Fatalf("Delete failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = m.Open(key)
|
||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCapacityLimit(t *testing.T) {
|
||||
m := NewSkipInit(t.TempDir(), 10)
|
||||
for i := 0; i < 11; i++ {
|
||||
w, err := m.Create(fmt.Sprintf("key%d", i), 1)
|
||||
if err != nil && i < 10 {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
} else if i == 10 && err == nil {
|
||||
t.Errorf("Create succeeded: got nil, want %v", vfserror.ErrDiskFull)
|
||||
}
|
||||
if i < 10 {
|
||||
w.Write([]byte("1"))
|
||||
w.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitExistingFiles(t *testing.T) {
|
||||
td := t.TempDir()
|
||||
|
||||
path := filepath.Join(td, "test", "key")
|
||||
os.MkdirAll(filepath.Dir(path), 0755)
|
||||
os.WriteFile(path, []byte("value"), 0644)
|
||||
|
||||
m := New(td, 10)
|
||||
rc, err := m.Open("test/key")
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != "value" {
|
||||
t.Errorf("expected value, got %s", got)
|
||||
}
|
||||
|
||||
s, err := m.Stat("test/key")
|
||||
if err != nil {
|
||||
t.Fatalf("Stat failed: %v", err)
|
||||
}
|
||||
if s == nil {
|
||||
t.Error("Stat returned nil")
|
||||
}
|
||||
if s != nil && s.Name() != "test/key" {
|
||||
t.Errorf("Stat failed: got %s, want %s", s.Name(), "test/key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSizeConsistency(t *testing.T) {
|
||||
td := t.TempDir()
|
||||
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
||||
|
||||
m := New(td, 1024)
|
||||
if m.Size() != 6 {
|
||||
t.Errorf("Size failed: got %d, want 6", m.Size())
|
||||
}
|
||||
|
||||
w, err := m.Create("key", 5)
|
||||
if err != nil {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
}
|
||||
w.Write([]byte("value"))
|
||||
w.Close()
|
||||
|
||||
w, err = m.Create("key1", 6)
|
||||
if err != nil {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
}
|
||||
w.Write([]byte("value1"))
|
||||
w.Close()
|
||||
|
||||
assumedSize := int64(6 + 5 + 6)
|
||||
if assumedSize != m.Size() {
|
||||
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
|
||||
}
|
||||
|
||||
rc, err := m.Open("key")
|
||||
if err != nil {
|
||||
t.Errorf("Open failed: %v", err)
|
||||
}
|
||||
d, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if string(d) != "value" {
|
||||
t.Errorf("Get failed: got %s, want value", d)
|
||||
}
|
||||
|
||||
m = New(td, 1024)
|
||||
if assumedSize != m.Size() {
|
||||
t.Errorf("Size failed: got %d, want %d", m.Size(), assumedSize)
|
||||
}
|
||||
}
|
||||
110
vfs/eviction/eviction.go
Normal file
110
vfs/eviction/eviction.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package eviction
|
||||
|
||||
import (
|
||||
"s1d3sw1ped/steamcache2/vfs"
|
||||
"s1d3sw1ped/steamcache2/vfs/disk"
|
||||
"s1d3sw1ped/steamcache2/vfs/memory"
|
||||
)
|
||||
|
||||
// EvictionStrategy defines different eviction strategies
|
||||
type EvictionStrategy string
|
||||
|
||||
const (
|
||||
StrategyLRU EvictionStrategy = "lru"
|
||||
StrategyLFU EvictionStrategy = "lfu"
|
||||
StrategyFIFO EvictionStrategy = "fifo"
|
||||
StrategyLargest EvictionStrategy = "largest"
|
||||
StrategySmallest EvictionStrategy = "smallest"
|
||||
StrategyHybrid EvictionStrategy = "hybrid"
|
||||
)
|
||||
|
||||
// EvictLRU performs LRU eviction by removing least recently used files
|
||||
func EvictLRU(v vfs.VFS, bytesNeeded uint) uint {
|
||||
switch fs := v.(type) {
|
||||
case *memory.MemoryFS:
|
||||
return fs.EvictLRU(bytesNeeded)
|
||||
case *disk.DiskFS:
|
||||
return fs.EvictLRU(bytesNeeded)
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// EvictFIFO performs FIFO (First In First Out) eviction
|
||||
func EvictFIFO(v vfs.VFS, bytesNeeded uint) uint {
|
||||
switch fs := v.(type) {
|
||||
case *memory.MemoryFS:
|
||||
return fs.EvictFIFO(bytesNeeded)
|
||||
case *disk.DiskFS:
|
||||
return fs.EvictFIFO(bytesNeeded)
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// EvictBySizeAsc evicts smallest files first
|
||||
func EvictBySizeAsc(v vfs.VFS, bytesNeeded uint) uint {
|
||||
switch fs := v.(type) {
|
||||
case *memory.MemoryFS:
|
||||
return fs.EvictBySize(bytesNeeded, true) // true = ascending (smallest first)
|
||||
case *disk.DiskFS:
|
||||
return fs.EvictBySize(bytesNeeded, true) // true = ascending (smallest first)
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// EvictBySizeDesc evicts largest files first
|
||||
func EvictBySizeDesc(v vfs.VFS, bytesNeeded uint) uint {
|
||||
switch fs := v.(type) {
|
||||
case *memory.MemoryFS:
|
||||
return fs.EvictBySize(bytesNeeded, false) // false = descending (largest first)
|
||||
case *disk.DiskFS:
|
||||
return fs.EvictBySize(bytesNeeded, false) // false = descending (largest first)
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// EvictLargest evicts largest files first
|
||||
func EvictLargest(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return EvictBySizeDesc(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// EvictSmallest evicts smallest files first
|
||||
func EvictSmallest(v vfs.VFS, bytesNeeded uint) uint {
|
||||
return EvictBySizeAsc(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// EvictLFU performs LFU (Least Frequently Used) eviction
|
||||
func EvictLFU(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// For now, fall back to size-based eviction
|
||||
// TODO: Implement proper LFU tracking
|
||||
return EvictBySizeAsc(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// EvictHybrid implements a hybrid eviction strategy
|
||||
func EvictHybrid(v vfs.VFS, bytesNeeded uint) uint {
|
||||
// Use LRU as primary strategy, but consider size as tiebreaker
|
||||
return EvictLRU(v, bytesNeeded)
|
||||
}
|
||||
|
||||
// GetEvictionFunction returns the eviction function for the given strategy
|
||||
func GetEvictionFunction(strategy EvictionStrategy) func(vfs.VFS, uint) uint {
|
||||
switch strategy {
|
||||
case StrategyLRU:
|
||||
return EvictLRU
|
||||
case StrategyLFU:
|
||||
return EvictLFU
|
||||
case StrategyFIFO:
|
||||
return EvictFIFO
|
||||
case StrategyLargest:
|
||||
return EvictLargest
|
||||
case StrategySmallest:
|
||||
return EvictSmallest
|
||||
case StrategyHybrid:
|
||||
return EvictHybrid
|
||||
default:
|
||||
return EvictLRU
|
||||
}
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
// vfs/fileinfo.go
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
type FileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
MTime time.Time
|
||||
ATime time.Time
|
||||
AccessCount int64 // Number of times the file has been accessed
|
||||
}
|
||||
|
||||
func NewFileInfo(key string, size int64, modTime time.Time) *FileInfo {
|
||||
return &FileInfo{
|
||||
name: key,
|
||||
size: size,
|
||||
MTime: modTime,
|
||||
ATime: time.Now(),
|
||||
AccessCount: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func NewFileInfoFromOS(f os.FileInfo, key string) *FileInfo {
|
||||
return &FileInfo{
|
||||
name: key,
|
||||
size: f.Size(),
|
||||
MTime: f.ModTime(),
|
||||
ATime: time.Now(),
|
||||
AccessCount: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (f FileInfo) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f FileInfo) Size() int64 {
|
||||
return f.size
|
||||
}
|
||||
|
||||
func (f FileInfo) ModTime() time.Time {
|
||||
return f.MTime
|
||||
}
|
||||
|
||||
func (f FileInfo) AccessTime() time.Time {
|
||||
return f.ATime
|
||||
}
|
||||
933
vfs/gc/gc.go
933
vfs/gc/gc.go
@@ -2,60 +2,13 @@
|
||||
package gc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"context"
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"s1d3sw1ped/SteamCache2/vfs/disk"
|
||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"sort"
|
||||
"s1d3sw1ped/steamcache2/vfs"
|
||||
"s1d3sw1ped/steamcache2/vfs/eviction"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInsufficientSpace is returned when there are no files to delete in the VFS.
|
||||
ErrInsufficientSpace = fmt.Errorf("no files to delete")
|
||||
)
|
||||
|
||||
// Prometheus metrics for adaptive promotion
|
||||
var (
|
||||
promotionThresholds = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "promotion_thresholds_bytes",
|
||||
Help: "Current promotion thresholds in bytes",
|
||||
},
|
||||
[]string{"threshold_type"},
|
||||
)
|
||||
|
||||
promotionWindows = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "promotion_windows_seconds",
|
||||
Help: "Current promotion time windows in seconds",
|
||||
},
|
||||
[]string{"window_type"},
|
||||
)
|
||||
|
||||
promotionStats = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "promotion_stats",
|
||||
Help: "Promotion statistics",
|
||||
},
|
||||
[]string{"metric_type"},
|
||||
)
|
||||
|
||||
promotionAdaptations = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "promotion_adaptations_total",
|
||||
Help: "Total number of promotion threshold adaptations",
|
||||
},
|
||||
[]string{"direction"},
|
||||
)
|
||||
)
|
||||
|
||||
// GCAlgorithm represents different garbage collection strategies
|
||||
@@ -70,677 +23,235 @@ const (
|
||||
Hybrid GCAlgorithm = "hybrid"
|
||||
)
|
||||
|
||||
// LRUGC deletes files in LRU order until enough space is reclaimed.
|
||||
func LRUGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LRU GC")
|
||||
// GCFS wraps a VFS with garbage collection capabilities
|
||||
type GCFS struct {
|
||||
vfs vfs.VFS
|
||||
algorithm GCAlgorithm
|
||||
gcFunc func(vfs.VFS, uint) uint
|
||||
}
|
||||
|
||||
var reclaimed uint // reclaimed space in bytes
|
||||
deleted := false
|
||||
// New creates a new GCFS with the specified algorithm
|
||||
func New(wrappedVFS vfs.VFS, algorithm GCAlgorithm) *GCFS {
|
||||
gcfs := &GCFS{
|
||||
vfs: wrappedVFS,
|
||||
algorithm: algorithm,
|
||||
}
|
||||
|
||||
gcfs.gcFunc = eviction.GetEvictionFunction(eviction.EvictionStrategy(algorithm))
|
||||
|
||||
return gcfs
|
||||
}
|
||||
|
||||
// GetGCAlgorithm returns the GC function for the given algorithm
|
||||
func GetGCAlgorithm(algorithm GCAlgorithm) func(vfs.VFS, uint) uint {
|
||||
return eviction.GetEvictionFunction(eviction.EvictionStrategy(algorithm))
|
||||
}
|
||||
|
||||
// Create wraps the underlying Create method
|
||||
func (gc *GCFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
// Check if we need to GC before creating
|
||||
if gc.vfs.Size()+size > gc.vfs.Capacity() {
|
||||
needed := uint((gc.vfs.Size() + size) - gc.vfs.Capacity())
|
||||
gc.gcFunc(gc.vfs, needed)
|
||||
}
|
||||
|
||||
return gc.vfs.Create(key, size)
|
||||
}
|
||||
|
||||
// Open wraps the underlying Open method
|
||||
func (gc *GCFS) Open(key string) (io.ReadCloser, error) {
|
||||
return gc.vfs.Open(key)
|
||||
}
|
||||
|
||||
// Delete wraps the underlying Delete method
|
||||
func (gc *GCFS) Delete(key string) error {
|
||||
return gc.vfs.Delete(key)
|
||||
}
|
||||
|
||||
// Stat wraps the underlying Stat method
|
||||
func (gc *GCFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
return gc.vfs.Stat(key)
|
||||
}
|
||||
|
||||
// Name wraps the underlying Name method
|
||||
func (gc *GCFS) Name() string {
|
||||
return gc.vfs.Name() + "(GC:" + string(gc.algorithm) + ")"
|
||||
}
|
||||
|
||||
// Size wraps the underlying Size method
|
||||
func (gc *GCFS) Size() int64 {
|
||||
return gc.vfs.Size()
|
||||
}
|
||||
|
||||
// Capacity wraps the underlying Capacity method
|
||||
func (gc *GCFS) Capacity() int64 {
|
||||
return gc.vfs.Capacity()
|
||||
}
|
||||
|
||||
// EvictionStrategy defines an interface for cache eviction
|
||||
type EvictionStrategy interface {
|
||||
Evict(vfs vfs.VFS, bytesNeeded uint) uint
|
||||
}
|
||||
|
||||
// AdaptivePromotionDeciderFunc is a placeholder for the adaptive promotion logic
|
||||
var AdaptivePromotionDeciderFunc = func() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AsyncGCFS wraps a GCFS with asynchronous garbage collection capabilities
|
||||
type AsyncGCFS struct {
|
||||
*GCFS
|
||||
gcQueue chan gcRequest
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
gcRunning int32
|
||||
preemptive bool
|
||||
asyncThreshold float64 // Async GC threshold as percentage of capacity (e.g., 0.8 = 80%)
|
||||
syncThreshold float64 // Sync GC threshold as percentage of capacity (e.g., 0.95 = 95%)
|
||||
hardLimit float64 // Hard limit threshold (e.g., 1.0 = 100%)
|
||||
}
|
||||
|
||||
type gcRequest struct {
|
||||
bytesNeeded uint
|
||||
priority int // Higher number = higher priority
|
||||
}
|
||||
|
||||
// NewAsync creates a new AsyncGCFS with asynchronous garbage collection
|
||||
func NewAsync(wrappedVFS vfs.VFS, algorithm GCAlgorithm, preemptive bool, asyncThreshold, syncThreshold, hardLimit float64) *AsyncGCFS {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
asyncGC := &AsyncGCFS{
|
||||
GCFS: New(wrappedVFS, algorithm),
|
||||
gcQueue: make(chan gcRequest, 100), // Buffer for GC requests
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
preemptive: preemptive,
|
||||
asyncThreshold: asyncThreshold,
|
||||
syncThreshold: syncThreshold,
|
||||
hardLimit: hardLimit,
|
||||
}
|
||||
|
||||
// Start the background GC worker
|
||||
asyncGC.wg.Add(1)
|
||||
go asyncGC.gcWorker()
|
||||
|
||||
// Start preemptive GC if enabled
|
||||
if preemptive {
|
||||
asyncGC.wg.Add(1)
|
||||
go asyncGC.preemptiveGC()
|
||||
}
|
||||
|
||||
return asyncGC
|
||||
}
|
||||
|
||||
// Create wraps the underlying Create method with hybrid GC (async + sync hard limits)
|
||||
func (agc *AsyncGCFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
currentSize := agc.vfs.Size()
|
||||
capacity := agc.vfs.Capacity()
|
||||
projectedSize := currentSize + size
|
||||
|
||||
// Calculate utilization percentages
|
||||
currentUtilization := float64(currentSize) / float64(capacity)
|
||||
projectedUtilization := float64(projectedSize) / float64(capacity)
|
||||
|
||||
// Hard limit check - never exceed the hard limit
|
||||
if projectedUtilization > agc.hardLimit {
|
||||
needed := uint(projectedSize - capacity)
|
||||
// Immediate sync GC to prevent exceeding hard limit
|
||||
agc.gcFunc(agc.vfs, needed)
|
||||
} else if projectedUtilization > agc.syncThreshold {
|
||||
// Near hard limit - do immediate sync GC
|
||||
needed := uint(projectedSize - int64(float64(capacity)*agc.syncThreshold))
|
||||
agc.gcFunc(agc.vfs, needed)
|
||||
} else if currentUtilization > agc.asyncThreshold {
|
||||
// Above async threshold - queue for async GC
|
||||
needed := uint(projectedSize - int64(float64(capacity)*agc.asyncThreshold))
|
||||
select {
|
||||
case agc.gcQueue <- gcRequest{bytesNeeded: needed, priority: 2}:
|
||||
default:
|
||||
// Queue full, do immediate GC
|
||||
agc.gcFunc(agc.vfs, needed)
|
||||
}
|
||||
}
|
||||
|
||||
return agc.vfs.Create(key, size)
|
||||
}
|
||||
|
||||
// gcWorker processes GC requests asynchronously
|
||||
func (agc *AsyncGCFS) gcWorker() {
|
||||
defer agc.wg.Done()
|
||||
|
||||
ticker := time.NewTicker(100 * time.Millisecond) // Check every 100ms
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
switch fs := vfss.(type) {
|
||||
case *disk.DiskFS:
|
||||
fi := fs.LRU.Back()
|
||||
if fi == nil {
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC (at least one file deleted)")
|
||||
return nil
|
||||
select {
|
||||
case <-agc.ctx.Done():
|
||||
return
|
||||
case req := <-agc.gcQueue:
|
||||
atomic.StoreInt32(&agc.gcRunning, 1)
|
||||
agc.gcFunc(agc.vfs, req.bytesNeeded)
|
||||
atomic.StoreInt32(&agc.gcRunning, 0)
|
||||
case <-ticker.C:
|
||||
// Process any pending GC requests
|
||||
select {
|
||||
case req := <-agc.gcQueue:
|
||||
atomic.StoreInt32(&agc.gcRunning, 1)
|
||||
agc.gcFunc(agc.vfs, req.bytesNeeded)
|
||||
atomic.StoreInt32(&agc.gcRunning, 0)
|
||||
default:
|
||||
// No pending requests
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// preemptiveGC runs background GC to keep cache utilization below threshold
|
||||
func (agc *AsyncGCFS) preemptiveGC() {
|
||||
defer agc.wg.Done()
|
||||
|
||||
ticker := time.NewTicker(5 * time.Second) // Check every 5 seconds
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-agc.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
currentSize := agc.vfs.Size()
|
||||
capacity := agc.vfs.Capacity()
|
||||
currentUtilization := float64(currentSize) / float64(capacity)
|
||||
|
||||
// Check if we're above the async threshold
|
||||
if currentUtilization > agc.asyncThreshold {
|
||||
// Calculate how much to free to get back to async threshold
|
||||
targetSize := int64(float64(capacity) * agc.asyncThreshold)
|
||||
if currentSize > targetSize {
|
||||
overage := currentSize - targetSize
|
||||
select {
|
||||
case agc.gcQueue <- gcRequest{bytesNeeded: uint(overage), priority: 0}:
|
||||
default:
|
||||
// Queue full, skip this round
|
||||
}
|
||||
}
|
||||
return ErrInsufficientSpace // No files to delete
|
||||
}
|
||||
sz := uint(fi.Size())
|
||||
err := fs.Delete(fi.Name())
|
||||
if err != nil {
|
||||
continue // If delete fails, try the next file
|
||||
}
|
||||
reclaimed += sz
|
||||
deleted = true
|
||||
case *memory.MemoryFS:
|
||||
fi := fs.LRU.Back()
|
||||
if fi == nil {
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace // No files to delete
|
||||
}
|
||||
sz := uint(fi.Size())
|
||||
err := fs.Delete(fi.Name())
|
||||
if err != nil {
|
||||
continue // If delete fails, try the next file
|
||||
}
|
||||
reclaimed += sz
|
||||
deleted = true
|
||||
default:
|
||||
panic("unreachable: unsupported VFS type for LRU GC") // panic if the VFS is not disk or memory
|
||||
}
|
||||
|
||||
if deleted && (size == 0 || reclaimed >= size) {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LRU GC (at least one file deleted)")
|
||||
return nil // stop if enough space is reclaimed or at least one file deleted for size==0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LFUGC deletes files in LFU (Least Frequently Used) order until enough space is reclaimed.
|
||||
func LFUGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using LFU GC")
|
||||
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].AccessCount < files[j].AccessCount
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
deleted := false
|
||||
for _, fi := range files {
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
deleted = true
|
||||
if deleted && (size == 0 || reclaimed >= size) {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LFU GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using LFU GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
// Stop stops the async GC workers
|
||||
func (agc *AsyncGCFS) Stop() {
|
||||
agc.cancel()
|
||||
agc.wg.Wait()
|
||||
}
|
||||
|
||||
// FIFOGC deletes files in FIFO (First In, First Out) order until enough space is reclaimed.
|
||||
func FIFOGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using FIFO GC")
|
||||
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].MTime.Before(files[j].MTime)
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
deleted := false
|
||||
for _, fi := range files {
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
deleted = true
|
||||
if deleted && (size == 0 || reclaimed >= size) {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using FIFO GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using FIFO GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
// IsGCRunning returns true if GC is currently running
|
||||
func (agc *AsyncGCFS) IsGCRunning() bool {
|
||||
return atomic.LoadInt32(&agc.gcRunning) == 1
|
||||
}
|
||||
|
||||
// LargestGC deletes the largest files first until enough space is reclaimed.
|
||||
func LargestGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Largest GC")
|
||||
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].Size > files[j].Size
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
deleted := false
|
||||
for _, fi := range files {
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
deleted = true
|
||||
if deleted && (size == 0 || reclaimed >= size) {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Largest GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Largest GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// SmallestGC deletes the smallest files first until enough space is reclaimed.
|
||||
func SmallestGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Smallest GC")
|
||||
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].Size < files[j].Size
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
deleted := false
|
||||
for _, fi := range files {
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
deleted = true
|
||||
if deleted && (size == 0 || reclaimed >= size) {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Smallest GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Smallest GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// HybridGC combines LRU and size-based eviction with a scoring system.
|
||||
func HybridGC(vfss vfs.VFS, size uint) error {
|
||||
logger.Logger.Debug().Uint("target", size).Msg("Attempting to reclaim space using Hybrid GC")
|
||||
|
||||
files := getAllFiles(vfss)
|
||||
if len(files) == 0 {
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for i := range files {
|
||||
timeSinceAccess := now.Sub(files[i].ATime).Seconds()
|
||||
sizeMB := float64(files[i].Size) / (1024 * 1024)
|
||||
files[i].HybridScore = timeSinceAccess * sizeMB
|
||||
}
|
||||
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].HybridScore < files[j].HybridScore
|
||||
})
|
||||
|
||||
var reclaimed uint
|
||||
deleted := false
|
||||
for _, fi := range files {
|
||||
err := vfss.Delete(fi.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += uint(fi.Size)
|
||||
deleted = true
|
||||
if deleted && (size == 0 || reclaimed >= size) {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Hybrid GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if deleted {
|
||||
logger.Logger.Debug().Uint("target", size).Uint("achieved", reclaimed).Msg("Reclaimed enough space using Hybrid GC (at least one file deleted)")
|
||||
return nil
|
||||
}
|
||||
return ErrInsufficientSpace
|
||||
}
|
||||
|
||||
// fileInfoWithMetadata extends FileInfo with additional metadata for GC algorithms
|
||||
type fileInfoWithMetadata struct {
|
||||
Name string
|
||||
Size int64
|
||||
MTime time.Time
|
||||
ATime time.Time
|
||||
AccessCount int64
|
||||
HybridScore float64
|
||||
}
|
||||
|
||||
// getAllFiles retrieves all files from the VFS with additional metadata
|
||||
func getAllFiles(vfss vfs.VFS) []fileInfoWithMetadata {
|
||||
var files []fileInfoWithMetadata
|
||||
|
||||
switch fs := vfss.(type) {
|
||||
case *disk.DiskFS:
|
||||
allFiles := fs.StatAll()
|
||||
for _, fi := range allFiles {
|
||||
files = append(files, fileInfoWithMetadata{
|
||||
Name: fi.Name(),
|
||||
Size: fi.Size(),
|
||||
MTime: fi.ModTime(),
|
||||
ATime: fi.AccessTime(),
|
||||
AccessCount: fi.AccessCount,
|
||||
})
|
||||
}
|
||||
case *memory.MemoryFS:
|
||||
allFiles := fs.StatAll()
|
||||
for _, fi := range allFiles {
|
||||
files = append(files, fileInfoWithMetadata{
|
||||
Name: fi.Name(),
|
||||
Size: fi.Size(),
|
||||
MTime: fi.ModTime(),
|
||||
ATime: fi.AccessTime(),
|
||||
AccessCount: fi.AccessCount,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
// GetGCAlgorithm returns the appropriate GC function based on the algorithm name
|
||||
func GetGCAlgorithm(algorithm GCAlgorithm) GCHandlerFunc {
|
||||
switch algorithm {
|
||||
case LRU:
|
||||
return LRUGC
|
||||
case LFU:
|
||||
return LFUGC
|
||||
case FIFO:
|
||||
return FIFOGC
|
||||
case Largest:
|
||||
return LargestGC
|
||||
case Smallest:
|
||||
return SmallestGC
|
||||
case Hybrid:
|
||||
return HybridGC
|
||||
default:
|
||||
logger.Logger.Warn().Str("algorithm", string(algorithm)).Msg("Unknown GC algorithm, falling back to LRU")
|
||||
return LRUGC
|
||||
}
|
||||
}
|
||||
|
||||
func PromotionDecider(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
||||
return time.Since(fi.AccessTime()) < time.Second*60 // Put hot files in the fast vfs if equipped
|
||||
}
|
||||
|
||||
// AdaptivePromotionDecider automatically adjusts promotion thresholds based on usage patterns
|
||||
type AdaptivePromotionDecider struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// Current thresholds
|
||||
smallFileThreshold int64 // Size threshold for small files
|
||||
mediumFileThreshold int64 // Size threshold for medium files
|
||||
largeFileThreshold int64 // Size threshold for large files
|
||||
smallFileWindow time.Duration // Time window for small files
|
||||
mediumFileWindow time.Duration // Time window for medium files
|
||||
largeFileWindow time.Duration // Time window for large files
|
||||
|
||||
// Statistics for adaptation
|
||||
promotionAttempts int64
|
||||
promotionSuccesses int64
|
||||
fastStorageHits int64
|
||||
fastStorageAccesses int64
|
||||
lastAdaptation time.Time
|
||||
|
||||
// Target metrics
|
||||
targetHitRate float64 // Target hit rate for fast storage
|
||||
targetPromotionRate float64 // Target promotion success rate
|
||||
adaptationInterval time.Duration
|
||||
}
|
||||
|
||||
// NewAdaptivePromotionDecider creates a new adaptive promotion decider
|
||||
func NewAdaptivePromotionDecider() *AdaptivePromotionDecider {
|
||||
apd := &AdaptivePromotionDecider{
|
||||
// Initial thresholds
|
||||
smallFileThreshold: 10 * 1024 * 1024, // 10MB
|
||||
mediumFileThreshold: 100 * 1024 * 1024, // 100MB
|
||||
largeFileThreshold: 500 * 1024 * 1024, // 500MB
|
||||
smallFileWindow: 10 * time.Minute,
|
||||
mediumFileWindow: 2 * time.Minute,
|
||||
largeFileWindow: 30 * time.Second,
|
||||
|
||||
// Target metrics
|
||||
targetHitRate: 0.8, // 80% hit rate
|
||||
targetPromotionRate: 0.7, // 70% promotion success rate
|
||||
adaptationInterval: 5 * time.Minute,
|
||||
}
|
||||
|
||||
// Initialize Prometheus metrics
|
||||
apd.updatePrometheusMetrics()
|
||||
|
||||
return apd
|
||||
}
|
||||
|
||||
// ShouldPromote determines if a file should be promoted based on adaptive thresholds
|
||||
func (apd *AdaptivePromotionDecider) ShouldPromote(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
||||
apd.mu.Lock()
|
||||
defer apd.mu.Unlock()
|
||||
|
||||
// Check if it's time to adapt thresholds
|
||||
if time.Since(apd.lastAdaptation) > apd.adaptationInterval {
|
||||
apd.adaptThresholds()
|
||||
}
|
||||
|
||||
size := fi.Size()
|
||||
timeSinceAccess := time.Since(fi.AccessTime())
|
||||
|
||||
// Record promotion attempt
|
||||
apd.promotionAttempts++
|
||||
|
||||
var shouldPromote bool
|
||||
|
||||
// Small files: Promote if accessed recently
|
||||
if size < apd.smallFileThreshold {
|
||||
shouldPromote = timeSinceAccess < apd.smallFileWindow
|
||||
} else if size < apd.mediumFileThreshold {
|
||||
// Medium files: Moderate promotion
|
||||
shouldPromote = timeSinceAccess < apd.mediumFileWindow
|
||||
} else if size < apd.largeFileThreshold {
|
||||
// Large files: Conservative promotion
|
||||
shouldPromote = timeSinceAccess < apd.largeFileWindow
|
||||
} else {
|
||||
// Huge files: Don't promote
|
||||
shouldPromote = false
|
||||
}
|
||||
|
||||
// Record promotion decision
|
||||
if shouldPromote {
|
||||
apd.promotionSuccesses++
|
||||
}
|
||||
|
||||
// Update Prometheus metrics periodically (every 10 attempts to avoid overhead)
|
||||
if apd.promotionAttempts%10 == 0 {
|
||||
apd.updatePrometheusMetrics()
|
||||
}
|
||||
|
||||
return shouldPromote
|
||||
}
|
||||
|
||||
// RecordFastStorageAccess records when fast storage is accessed
|
||||
func (apd *AdaptivePromotionDecider) RecordFastStorageAccess() {
|
||||
apd.mu.Lock()
|
||||
defer apd.mu.Unlock()
|
||||
apd.fastStorageAccesses++
|
||||
|
||||
// Update Prometheus metrics periodically
|
||||
if apd.fastStorageAccesses%10 == 0 {
|
||||
apd.updatePrometheusMetrics()
|
||||
}
|
||||
}
|
||||
|
||||
// RecordFastStorageHit records when fast storage has a hit
|
||||
func (apd *AdaptivePromotionDecider) RecordFastStorageHit() {
|
||||
apd.mu.Lock()
|
||||
defer apd.mu.Unlock()
|
||||
apd.fastStorageHits++
|
||||
|
||||
// Update Prometheus metrics periodically
|
||||
if apd.fastStorageHits%10 == 0 {
|
||||
apd.updatePrometheusMetrics()
|
||||
}
|
||||
}
|
||||
|
||||
// adaptThresholds adjusts thresholds based on current performance
|
||||
func (apd *AdaptivePromotionDecider) adaptThresholds() {
|
||||
if apd.promotionAttempts < 10 || apd.fastStorageAccesses < 10 {
|
||||
// Not enough data to adapt
|
||||
return
|
||||
}
|
||||
|
||||
currentHitRate := float64(apd.fastStorageHits) / float64(apd.fastStorageAccesses)
|
||||
currentPromotionRate := float64(apd.promotionSuccesses) / float64(apd.promotionAttempts)
|
||||
|
||||
logger.Logger.Debug().
|
||||
Float64("hit_rate", currentHitRate).
|
||||
Float64("promotion_rate", currentPromotionRate).
|
||||
Float64("target_hit_rate", apd.targetHitRate).
|
||||
Float64("target_promotion_rate", apd.targetPromotionRate).
|
||||
Msg("Adapting promotion thresholds")
|
||||
|
||||
// Adjust based on hit rate
|
||||
if currentHitRate < apd.targetHitRate {
|
||||
// Hit rate too low - be more aggressive with promotion
|
||||
apd.adjustThresholdsMoreAggressive()
|
||||
} else if currentHitRate > apd.targetHitRate+0.1 {
|
||||
// Hit rate too high - be more conservative
|
||||
apd.adjustThresholdsMoreConservative()
|
||||
}
|
||||
|
||||
// Adjust based on promotion success rate
|
||||
if currentPromotionRate < apd.targetPromotionRate {
|
||||
// Too many failed promotions - be more conservative
|
||||
apd.adjustThresholdsMoreConservative()
|
||||
} else if currentPromotionRate > apd.targetPromotionRate+0.1 {
|
||||
// High promotion success - can be more aggressive
|
||||
apd.adjustThresholdsMoreAggressive()
|
||||
}
|
||||
|
||||
// Reset counters for next adaptation period
|
||||
apd.promotionAttempts = 0
|
||||
apd.promotionSuccesses = 0
|
||||
apd.fastStorageHits = 0
|
||||
apd.fastStorageAccesses = 0
|
||||
apd.lastAdaptation = time.Now()
|
||||
|
||||
logger.Logger.Info().
|
||||
Int64("small_threshold_mb", apd.smallFileThreshold/(1024*1024)).
|
||||
Int64("medium_threshold_mb", apd.mediumFileThreshold/(1024*1024)).
|
||||
Int64("large_threshold_mb", apd.largeFileThreshold/(1024*1024)).
|
||||
Dur("small_window", apd.smallFileWindow).
|
||||
Dur("medium_window", apd.mediumFileWindow).
|
||||
Dur("large_window", apd.largeFileWindow).
|
||||
Msg("Updated promotion thresholds")
|
||||
}
|
||||
|
||||
// updatePrometheusMetrics updates all Prometheus metrics with current values
|
||||
func (apd *AdaptivePromotionDecider) updatePrometheusMetrics() {
|
||||
// Update threshold metrics
|
||||
promotionThresholds.WithLabelValues("small").Set(float64(apd.smallFileThreshold))
|
||||
promotionThresholds.WithLabelValues("medium").Set(float64(apd.mediumFileThreshold))
|
||||
promotionThresholds.WithLabelValues("large").Set(float64(apd.largeFileThreshold))
|
||||
|
||||
// Update window metrics
|
||||
promotionWindows.WithLabelValues("small").Set(apd.smallFileWindow.Seconds())
|
||||
promotionWindows.WithLabelValues("medium").Set(apd.mediumFileWindow.Seconds())
|
||||
promotionWindows.WithLabelValues("large").Set(apd.largeFileWindow.Seconds())
|
||||
|
||||
// Update statistics metrics
|
||||
hitRate := 0.0
|
||||
if apd.fastStorageAccesses > 0 {
|
||||
hitRate = float64(apd.fastStorageHits) / float64(apd.fastStorageAccesses)
|
||||
}
|
||||
promotionRate := 0.0
|
||||
if apd.promotionAttempts > 0 {
|
||||
promotionRate = float64(apd.promotionSuccesses) / float64(apd.promotionAttempts)
|
||||
}
|
||||
|
||||
promotionStats.WithLabelValues("hit_rate").Set(hitRate)
|
||||
promotionStats.WithLabelValues("promotion_rate").Set(promotionRate)
|
||||
promotionStats.WithLabelValues("promotion_attempts").Set(float64(apd.promotionAttempts))
|
||||
promotionStats.WithLabelValues("promotion_successes").Set(float64(apd.promotionSuccesses))
|
||||
promotionStats.WithLabelValues("fast_storage_accesses").Set(float64(apd.fastStorageAccesses))
|
||||
promotionStats.WithLabelValues("fast_storage_hits").Set(float64(apd.fastStorageHits))
|
||||
}
|
||||
|
||||
// adjustThresholdsMoreAggressive makes promotion more aggressive
|
||||
func (apd *AdaptivePromotionDecider) adjustThresholdsMoreAggressive() {
|
||||
// Increase size thresholds (promote larger files)
|
||||
apd.smallFileThreshold = minInt64(apd.smallFileThreshold*11/10, 50*1024*1024) // Max 50MB
|
||||
apd.mediumFileThreshold = minInt64(apd.mediumFileThreshold*11/10, 200*1024*1024) // Max 200MB
|
||||
apd.largeFileThreshold = minInt64(apd.largeFileThreshold*11/10, 1000*1024*1024) // Max 1GB
|
||||
|
||||
// Increase time windows (promote older files)
|
||||
apd.smallFileWindow = minDuration(apd.smallFileWindow*11/10, 20*time.Minute)
|
||||
apd.mediumFileWindow = minDuration(apd.mediumFileWindow*11/10, 5*time.Minute)
|
||||
apd.largeFileWindow = minDuration(apd.largeFileWindow*11/10, 2*time.Minute)
|
||||
|
||||
// Record adaptation in Prometheus
|
||||
promotionAdaptations.WithLabelValues("aggressive").Inc()
|
||||
|
||||
// Update Prometheus metrics
|
||||
apd.updatePrometheusMetrics()
|
||||
}
|
||||
|
||||
// adjustThresholdsMoreConservative makes promotion more conservative
|
||||
func (apd *AdaptivePromotionDecider) adjustThresholdsMoreConservative() {
|
||||
// Decrease size thresholds (promote smaller files)
|
||||
apd.smallFileThreshold = maxInt64(apd.smallFileThreshold*9/10, 5*1024*1024) // Min 5MB
|
||||
apd.mediumFileThreshold = maxInt64(apd.mediumFileThreshold*9/10, 50*1024*1024) // Min 50MB
|
||||
apd.largeFileThreshold = maxInt64(apd.largeFileThreshold*9/10, 200*1024*1024) // Min 200MB
|
||||
|
||||
// Decrease time windows (promote only recent files)
|
||||
apd.smallFileWindow = maxDuration(apd.smallFileWindow*9/10, 5*time.Minute)
|
||||
apd.mediumFileWindow = maxDuration(apd.mediumFileWindow*9/10, 1*time.Minute)
|
||||
apd.largeFileWindow = maxDuration(apd.largeFileWindow*9/10, 15*time.Second)
|
||||
|
||||
// Record adaptation in Prometheus
|
||||
promotionAdaptations.WithLabelValues("conservative").Inc()
|
||||
|
||||
// Update Prometheus metrics
|
||||
apd.updatePrometheusMetrics()
|
||||
}
|
||||
|
||||
// GetStats returns current statistics for monitoring
|
||||
func (apd *AdaptivePromotionDecider) GetStats() map[string]interface{} {
|
||||
apd.mu.RLock()
|
||||
defer apd.mu.RUnlock()
|
||||
|
||||
hitRate := 0.0
|
||||
if apd.fastStorageAccesses > 0 {
|
||||
hitRate = float64(apd.fastStorageHits) / float64(apd.fastStorageAccesses)
|
||||
}
|
||||
|
||||
promotionRate := 0.0
|
||||
if apd.promotionAttempts > 0 {
|
||||
promotionRate = float64(apd.promotionSuccesses) / float64(apd.promotionAttempts)
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"small_file_threshold_mb": apd.smallFileThreshold / (1024 * 1024),
|
||||
"medium_file_threshold_mb": apd.mediumFileThreshold / (1024 * 1024),
|
||||
"large_file_threshold_mb": apd.largeFileThreshold / (1024 * 1024),
|
||||
"small_file_window_minutes": apd.smallFileWindow.Minutes(),
|
||||
"medium_file_window_minutes": apd.mediumFileWindow.Minutes(),
|
||||
"large_file_window_seconds": apd.largeFileWindow.Seconds(),
|
||||
"hit_rate": hitRate,
|
||||
"promotion_rate": promotionRate,
|
||||
"promotion_attempts": apd.promotionAttempts,
|
||||
"promotion_successes": apd.promotionSuccesses,
|
||||
"fast_storage_accesses": apd.fastStorageAccesses,
|
||||
"fast_storage_hits": apd.fastStorageHits,
|
||||
}
|
||||
}
|
||||
|
||||
// Global adaptive promotion decider instance
|
||||
var adaptivePromotionDecider *AdaptivePromotionDecider
|
||||
|
||||
func init() {
|
||||
adaptivePromotionDecider = NewAdaptivePromotionDecider()
|
||||
}
|
||||
|
||||
// AdaptivePromotionDeciderFunc returns the adaptive promotion decision function
|
||||
func AdaptivePromotionDeciderFunc(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
||||
return adaptivePromotionDecider.ShouldPromote(fi, cs)
|
||||
}
|
||||
|
||||
// RecordFastStorageAccess records fast storage access for adaptation
|
||||
func RecordFastStorageAccess() {
|
||||
adaptivePromotionDecider.RecordFastStorageAccess()
|
||||
}
|
||||
|
||||
// RecordFastStorageHit records fast storage hit for adaptation
|
||||
func RecordFastStorageHit() {
|
||||
adaptivePromotionDecider.RecordFastStorageHit()
|
||||
}
|
||||
|
||||
// GetPromotionStats returns promotion statistics for monitoring
|
||||
func GetPromotionStats() map[string]interface{} {
|
||||
return adaptivePromotionDecider.GetStats()
|
||||
}
|
||||
|
||||
// Helper functions for min/max operations
|
||||
func minInt64(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func maxInt64(a, b int64) int64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func minDuration(a, b time.Duration) time.Duration {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func maxDuration(a, b time.Duration) time.Duration {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Ensure GCFS implements VFS.
|
||||
var _ vfs.VFS = (*GCFS)(nil)
|
||||
|
||||
// GCFS is a virtual file system that calls a GC handler when the disk is full. The GC handler is responsible for freeing up space on the disk. The GCFS is a wrapper around another VFS.
|
||||
type GCFS struct {
|
||||
vfs.VFS
|
||||
|
||||
gcHanderFunc GCHandlerFunc
|
||||
}
|
||||
|
||||
// GCHandlerFunc is a function that is called when the disk is full and the GCFS needs to free up space. It is passed the VFS and the size of the file that needs to be written. Its up to the implementation to free up space. How much space is freed is also up to the implementation.
|
||||
type GCHandlerFunc func(vfs vfs.VFS, size uint) error
|
||||
|
||||
func New(vfs vfs.VFS, gcHandlerFunc GCHandlerFunc) *GCFS {
|
||||
return &GCFS{
|
||||
VFS: vfs,
|
||||
gcHanderFunc: gcHandlerFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// Create overrides the Create method of the VFS interface. It tries to create the key, if it fails due to disk full error, it calls the GC handler and tries again. If it still fails it returns the error.
|
||||
func (g *GCFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
w, err := g.VFS.Create(key, size) // try to create the key
|
||||
for err == vfserror.ErrDiskFull && g.gcHanderFunc != nil {
|
||||
errGC := g.gcHanderFunc(g.VFS, uint(size)) // call the GC handler
|
||||
if errGC == ErrInsufficientSpace {
|
||||
return nil, errGC // if the GC handler returns no files to delete, return the error
|
||||
}
|
||||
w, err = g.VFS.Create(key, size)
|
||||
if err == vfserror.ErrDiskFull {
|
||||
// GC handler did not free enough space, avoid infinite loop
|
||||
return nil, ErrInsufficientSpace
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if err == vfserror.ErrDiskFull {
|
||||
logger.Logger.Error().Str("key", key).Int64("size", size).Msg("Failed to create file due to disk full, even after GC")
|
||||
} else {
|
||||
logger.Logger.Error().Str("key", key).Int64("size", size).Err(err).Msg("Failed to create file")
|
||||
}
|
||||
}
|
||||
|
||||
return w, err
|
||||
}
|
||||
|
||||
func (g *GCFS) Name() string {
|
||||
return fmt.Sprintf("GCFS(%s)", g.VFS.Name()) // wrap the name of the VFS with GCFS so we can see that its a GCFS
|
||||
// ForceGC forces immediate garbage collection to free the specified number of bytes
|
||||
func (agc *AsyncGCFS) ForceGC(bytesNeeded uint) {
|
||||
agc.gcFunc(agc.vfs, bytesNeeded)
|
||||
}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
// vfs/gc/gc_test.go
|
||||
package gc
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetGCAlgorithm(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
algorithm GCAlgorithm
|
||||
expected bool // true if we expect a non-nil function
|
||||
}{
|
||||
{"LRU", LRU, true},
|
||||
{"LFU", LFU, true},
|
||||
{"FIFO", FIFO, true},
|
||||
{"Largest", Largest, true},
|
||||
{"Smallest", Smallest, true},
|
||||
{"Hybrid", Hybrid, true},
|
||||
{"Unknown", "unknown", true}, // should fall back to LRU
|
||||
{"Empty", "", true}, // should fall back to LRU
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fn := GetGCAlgorithm(tt.algorithm)
|
||||
if fn == nil {
|
||||
t.Errorf("GetGCAlgorithm(%s) returned nil, expected non-nil function", tt.algorithm)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGCAlgorithmConstants(t *testing.T) {
|
||||
expectedAlgorithms := []GCAlgorithm{LRU, LFU, FIFO, Largest, Smallest, Hybrid}
|
||||
|
||||
for _, algo := range expectedAlgorithms {
|
||||
if algo == "" {
|
||||
t.Errorf("GC algorithm constant is empty")
|
||||
}
|
||||
}
|
||||
}
|
||||
28
vfs/locks/sharding.go
Normal file
28
vfs/locks/sharding.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package locks
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Number of lock shards for reducing contention
|
||||
const NumLockShards = 32
|
||||
|
||||
// GetShardIndex returns the shard index for a given key using FNV-1a hash
|
||||
func GetShardIndex(key string) int {
|
||||
// Use FNV-1a hash for good distribution
|
||||
var h uint32 = 2166136261 // FNV offset basis
|
||||
for i := 0; i < len(key); i++ {
|
||||
h ^= uint32(key[i])
|
||||
h *= 16777619 // FNV prime
|
||||
}
|
||||
return int(h % NumLockShards)
|
||||
}
|
||||
|
||||
// GetKeyLock returns a lock for the given key using sharding
|
||||
func GetKeyLock(keyLocks []sync.Map, key string) *sync.RWMutex {
|
||||
shardIndex := GetShardIndex(key)
|
||||
shard := &keyLocks[shardIndex]
|
||||
|
||||
keyLock, _ := shard.LoadOrStore(key, &sync.RWMutex{})
|
||||
return keyLock.(*sync.RWMutex)
|
||||
}
|
||||
66
vfs/lru/lru.go
Normal file
66
vfs/lru/lru.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"s1d3sw1ped/steamcache2/vfs/types"
|
||||
)
|
||||
|
||||
// LRUList represents a least recently used list for cache eviction
|
||||
type LRUList[T any] struct {
|
||||
list *list.List
|
||||
elem map[string]*list.Element
|
||||
}
|
||||
|
||||
// NewLRUList creates a new LRU list
|
||||
func NewLRUList[T any]() *LRUList[T] {
|
||||
return &LRUList[T]{
|
||||
list: list.New(),
|
||||
elem: make(map[string]*list.Element),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds an item to the front of the LRU list
|
||||
func (l *LRUList[T]) Add(key string, item T) {
|
||||
elem := l.list.PushFront(item)
|
||||
l.elem[key] = elem
|
||||
}
|
||||
|
||||
// MoveToFront moves an item to the front of the LRU list
|
||||
func (l *LRUList[T]) MoveToFront(key string, timeUpdater *types.BatchedTimeUpdate) {
|
||||
if elem, exists := l.elem[key]; exists {
|
||||
l.list.MoveToFront(elem)
|
||||
// Update the FileInfo in the element with new access time
|
||||
if fi, ok := any(elem.Value).(interface {
|
||||
UpdateAccessBatched(*types.BatchedTimeUpdate)
|
||||
}); ok {
|
||||
fi.UpdateAccessBatched(timeUpdater)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes an item from the LRU list
|
||||
func (l *LRUList[T]) Remove(key string) (T, bool) {
|
||||
if elem, exists := l.elem[key]; exists {
|
||||
delete(l.elem, key)
|
||||
if item, ok := l.list.Remove(elem).(T); ok {
|
||||
return item, true
|
||||
}
|
||||
}
|
||||
var zero T
|
||||
return zero, false
|
||||
}
|
||||
|
||||
// Len returns the number of items in the LRU list
|
||||
func (l *LRUList[T]) Len() int {
|
||||
return l.list.Len()
|
||||
}
|
||||
|
||||
// Back returns the least recently used item (at the back of the list)
|
||||
func (l *LRUList[T]) Back() *list.Element {
|
||||
return l.list.Back()
|
||||
}
|
||||
|
||||
// Front returns the most recently used item (at the front of the list)
|
||||
func (l *LRUList[T]) Front() *list.Element {
|
||||
return l.list.Front()
|
||||
}
|
||||
@@ -3,276 +3,428 @@ package memory
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"container/list"
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"s1d3sw1ped/steamcache2/vfs"
|
||||
"s1d3sw1ped/steamcache2/vfs/locks"
|
||||
"s1d3sw1ped/steamcache2/vfs/lru"
|
||||
"s1d3sw1ped/steamcache2/vfs/types"
|
||||
"s1d3sw1ped/steamcache2/vfs/vfserror"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
memoryCapacityBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "memory_cache_capacity_bytes",
|
||||
Help: "Total capacity of the memory cache in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
memorySizeBytes = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "memory_cache_size_bytes",
|
||||
Help: "Total size of the memory cache in bytes",
|
||||
},
|
||||
)
|
||||
|
||||
memoryReadBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "memory_cache_read_bytes_total",
|
||||
Help: "Total number of bytes read from the memory cache",
|
||||
},
|
||||
)
|
||||
|
||||
memoryWriteBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "memory_cache_write_bytes_total",
|
||||
Help: "Total number of bytes written to the memory cache",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
// Ensure MemoryFS implements VFS.
|
||||
var _ vfs.VFS = (*MemoryFS)(nil)
|
||||
|
||||
// file represents a file in memory.
|
||||
type file struct {
|
||||
fileinfo *vfs.FileInfo
|
||||
data []byte
|
||||
}
|
||||
|
||||
// MemoryFS is a virtual file system that stores files in memory.
|
||||
// MemoryFS is an in-memory virtual file system
|
||||
type MemoryFS struct {
|
||||
files map[string]*file
|
||||
capacity int64
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks sync.Map // map[string]*sync.RWMutex
|
||||
LRU *lruList
|
||||
data map[string]*bytes.Buffer
|
||||
info map[string]*types.FileInfo
|
||||
capacity int64
|
||||
size int64
|
||||
mu sync.RWMutex
|
||||
keyLocks []sync.Map // Sharded lock pools for better concurrency
|
||||
LRU *lru.LRUList[*types.FileInfo]
|
||||
timeUpdater *types.BatchedTimeUpdate // Batched time updates for better performance
|
||||
}
|
||||
|
||||
// lruList for LRU eviction
|
||||
type lruList struct {
|
||||
list *list.List
|
||||
elem map[string]*list.Element
|
||||
}
|
||||
|
||||
func newLruList() *lruList {
|
||||
return &lruList{
|
||||
list: list.New(),
|
||||
elem: make(map[string]*list.Element),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) MoveToFront(key string) {
|
||||
if e, ok := l.elem[key]; ok {
|
||||
l.list.MoveToFront(e)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Add(key string, fi *vfs.FileInfo) *list.Element {
|
||||
e := l.list.PushFront(fi)
|
||||
l.elem[key] = e
|
||||
return e
|
||||
}
|
||||
|
||||
func (l *lruList) Remove(key string) {
|
||||
if e, ok := l.elem[key]; ok {
|
||||
l.list.Remove(e)
|
||||
delete(l.elem, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lruList) Back() *vfs.FileInfo {
|
||||
if e := l.list.Back(); e != nil {
|
||||
return e.Value.(*vfs.FileInfo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// New creates a new MemoryFS.
|
||||
// New creates a new MemoryFS
|
||||
func New(capacity int64) *MemoryFS {
|
||||
if capacity <= 0 {
|
||||
panic("memory capacity must be greater than 0") // panic if the capacity is less than or equal to 0
|
||||
panic("memory capacity must be greater than 0")
|
||||
}
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("name", "MemoryFS").
|
||||
Str("capacity", units.HumanSize(float64(capacity))).
|
||||
Msg("init")
|
||||
// Initialize sharded locks
|
||||
keyLocks := make([]sync.Map, locks.NumLockShards)
|
||||
|
||||
mfs := &MemoryFS{
|
||||
files: make(map[string]*file),
|
||||
capacity: capacity,
|
||||
mu: sync.RWMutex{},
|
||||
keyLocks: sync.Map{},
|
||||
LRU: newLruList(),
|
||||
return &MemoryFS{
|
||||
data: make(map[string]*bytes.Buffer),
|
||||
info: make(map[string]*types.FileInfo),
|
||||
capacity: capacity,
|
||||
size: 0,
|
||||
keyLocks: keyLocks,
|
||||
LRU: lru.NewLRUList[*types.FileInfo](),
|
||||
timeUpdater: types.NewBatchedTimeUpdate(100 * time.Millisecond), // Update time every 100ms
|
||||
}
|
||||
|
||||
memoryCapacityBytes.Set(float64(capacity))
|
||||
memorySizeBytes.Set(float64(mfs.Size()))
|
||||
|
||||
return mfs
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Capacity() int64 {
|
||||
return m.capacity
|
||||
}
|
||||
|
||||
// Name returns the name of this VFS
|
||||
func (m *MemoryFS) Name() string {
|
||||
return "MemoryFS"
|
||||
}
|
||||
|
||||
// Size returns the current size
|
||||
func (m *MemoryFS) Size() int64 {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.size
|
||||
}
|
||||
|
||||
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
|
||||
mu, _ := m.keyLocks.LoadOrStore(key, &sync.RWMutex{})
|
||||
return mu.(*sync.RWMutex)
|
||||
// Capacity returns the maximum capacity
|
||||
func (m *MemoryFS) Capacity() int64 {
|
||||
return m.capacity
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
// GetFragmentationStats returns memory fragmentation statistics
|
||||
func (m *MemoryFS) GetFragmentationStats() map[string]interface{} {
|
||||
m.mu.RLock()
|
||||
if m.capacity > 0 {
|
||||
if m.size+size > m.capacity {
|
||||
m.mu.RUnlock()
|
||||
return nil, vfserror.ErrDiskFull
|
||||
}
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
var totalCapacity int64
|
||||
var totalUsed int64
|
||||
var bufferCount int
|
||||
|
||||
for _, buffer := range m.data {
|
||||
totalCapacity += int64(buffer.Cap())
|
||||
totalUsed += int64(buffer.Len())
|
||||
bufferCount++
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
fragmentationRatio := float64(0)
|
||||
if totalCapacity > 0 {
|
||||
fragmentationRatio = float64(totalCapacity-totalUsed) / float64(totalCapacity)
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
return &memWriteCloser{
|
||||
Writer: buf,
|
||||
onClose: func() error {
|
||||
data := buf.Bytes()
|
||||
m.mu.Lock()
|
||||
var accessCount int64 = 0
|
||||
if f, exists := m.files[key]; exists {
|
||||
m.size -= int64(len(f.data))
|
||||
m.LRU.Remove(key)
|
||||
accessCount = f.fileinfo.AccessCount // preserve access count if overwriting
|
||||
}
|
||||
fi := vfs.NewFileInfo(key, int64(len(data)), time.Now())
|
||||
fi.AccessCount = accessCount
|
||||
m.files[key] = &file{
|
||||
fileinfo: fi,
|
||||
data: data,
|
||||
}
|
||||
m.LRU.Add(key, fi)
|
||||
m.size += int64(len(data))
|
||||
m.mu.Unlock()
|
||||
|
||||
memoryWriteBytes.Add(float64(len(data)))
|
||||
memorySizeBytes.Set(float64(m.Size()))
|
||||
|
||||
return nil
|
||||
},
|
||||
}, nil
|
||||
return map[string]interface{}{
|
||||
"buffer_count": bufferCount,
|
||||
"total_capacity": totalCapacity,
|
||||
"total_used": totalUsed,
|
||||
"fragmentation_ratio": fragmentationRatio,
|
||||
"average_buffer_size": float64(totalUsed) / float64(bufferCount),
|
||||
}
|
||||
}
|
||||
|
||||
type memWriteCloser struct {
|
||||
io.Writer
|
||||
onClose func() error
|
||||
// getKeyLock returns a lock for the given key using sharding
|
||||
func (m *MemoryFS) getKeyLock(key string) *sync.RWMutex {
|
||||
return locks.GetKeyLock(m.keyLocks, key)
|
||||
}
|
||||
|
||||
func (wc *memWriteCloser) Close() error {
|
||||
return wc.onClose()
|
||||
}
|
||||
// Create creates a new file
|
||||
func (m *MemoryFS) Create(key string, size int64) (io.WriteCloser, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
// Sanitize key to prevent path traversal
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Delete(key string) error {
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
m.mu.Lock()
|
||||
f, exists := m.files[key]
|
||||
// Check if file already exists and handle overwrite
|
||||
if fi, exists := m.info[key]; exists {
|
||||
m.size -= fi.Size
|
||||
m.LRU.Remove(key)
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
}
|
||||
|
||||
buffer := &bytes.Buffer{}
|
||||
m.data[key] = buffer
|
||||
fi := types.NewFileInfo(key, size)
|
||||
m.info[key] = fi
|
||||
m.LRU.Add(key, fi)
|
||||
// Initialize access time with current time
|
||||
fi.UpdateAccessBatched(m.timeUpdater)
|
||||
m.size += size
|
||||
m.mu.Unlock()
|
||||
|
||||
return &memoryWriteCloser{
|
||||
buffer: buffer,
|
||||
memory: m,
|
||||
key: key,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// memoryWriteCloser implements io.WriteCloser for memory files
|
||||
type memoryWriteCloser struct {
|
||||
buffer *bytes.Buffer
|
||||
memory *MemoryFS
|
||||
key string
|
||||
}
|
||||
|
||||
func (mwc *memoryWriteCloser) Write(p []byte) (n int, err error) {
|
||||
return mwc.buffer.Write(p)
|
||||
}
|
||||
|
||||
func (mwc *memoryWriteCloser) Close() error {
|
||||
// Update the actual size in FileInfo
|
||||
mwc.memory.mu.Lock()
|
||||
if fi, exists := mwc.memory.info[mwc.key]; exists {
|
||||
actualSize := int64(mwc.buffer.Len())
|
||||
sizeDiff := actualSize - fi.Size
|
||||
fi.Size = actualSize
|
||||
mwc.memory.size += sizeDiff
|
||||
}
|
||||
mwc.memory.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens a file for reading
|
||||
func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
m.mu.Lock()
|
||||
fi, exists := m.info[key]
|
||||
if !exists {
|
||||
m.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
fi.UpdateAccessBatched(m.timeUpdater)
|
||||
m.LRU.MoveToFront(key, m.timeUpdater)
|
||||
|
||||
buffer, exists := m.data[key]
|
||||
if !exists {
|
||||
m.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
// Use zero-copy approach - return reader that reads directly from buffer
|
||||
m.mu.Unlock()
|
||||
|
||||
return &memoryReadCloser{
|
||||
buffer: buffer,
|
||||
offset: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// memoryReadCloser implements io.ReadCloser for memory files with zero-copy optimization
|
||||
type memoryReadCloser struct {
|
||||
buffer *bytes.Buffer
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (mrc *memoryReadCloser) Read(p []byte) (n int, err error) {
|
||||
if mrc.offset >= int64(mrc.buffer.Len()) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Zero-copy read directly from buffer
|
||||
available := mrc.buffer.Len() - int(mrc.offset)
|
||||
toRead := len(p)
|
||||
if toRead > available {
|
||||
toRead = available
|
||||
}
|
||||
|
||||
// Read directly from buffer without copying
|
||||
data := mrc.buffer.Bytes()
|
||||
copy(p, data[mrc.offset:mrc.offset+int64(toRead)])
|
||||
mrc.offset += int64(toRead)
|
||||
|
||||
return toRead, nil
|
||||
}
|
||||
|
||||
func (mrc *memoryReadCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a file
|
||||
func (m *MemoryFS) Delete(key string) error {
|
||||
if key == "" {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
if strings.Contains(key, "..") {
|
||||
return vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.Lock()
|
||||
defer keyMu.Unlock()
|
||||
|
||||
m.mu.Lock()
|
||||
fi, exists := m.info[key]
|
||||
if !exists {
|
||||
m.mu.Unlock()
|
||||
return vfserror.ErrNotFound
|
||||
}
|
||||
m.size -= int64(len(f.data))
|
||||
m.size -= fi.Size
|
||||
m.LRU.Remove(key)
|
||||
delete(m.files, key)
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
m.mu.Unlock()
|
||||
|
||||
memorySizeBytes.Set(float64(m.Size()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Open(key string) (io.ReadCloser, error) {
|
||||
// Stat returns file information
|
||||
func (m *MemoryFS) Stat(key string) (*types.FileInfo, error) {
|
||||
if key == "" {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
if key[0] == '/' {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
if strings.Contains(key, "..") {
|
||||
return nil, vfserror.ErrInvalidKey
|
||||
}
|
||||
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
if fi, ok := m.info[key]; ok {
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
|
||||
// EvictLRU evicts the least recently used files to free up space
|
||||
func (m *MemoryFS) EvictLRU(bytesNeeded uint) uint {
|
||||
m.mu.Lock()
|
||||
f, exists := m.files[key]
|
||||
if !exists {
|
||||
m.mu.Unlock()
|
||||
return nil, vfserror.ErrNotFound
|
||||
}
|
||||
f.fileinfo.ATime = time.Now()
|
||||
f.fileinfo.AccessCount++ // Increment access count for LFU
|
||||
m.LRU.MoveToFront(key)
|
||||
dataCopy := make([]byte, len(f.data))
|
||||
copy(dataCopy, f.data)
|
||||
m.mu.Unlock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
memoryReadBytes.Add(float64(len(dataCopy)))
|
||||
memorySizeBytes.Set(float64(m.Size()))
|
||||
var evicted uint
|
||||
|
||||
return io.NopCloser(bytes.NewReader(dataCopy)), nil
|
||||
}
|
||||
// Evict from LRU list until we free enough space
|
||||
for m.size > m.capacity-int64(bytesNeeded) && m.LRU.Len() > 0 {
|
||||
// Get the least recently used item
|
||||
elem := m.LRU.Back()
|
||||
if elem == nil {
|
||||
break
|
||||
}
|
||||
|
||||
func (m *MemoryFS) Stat(key string) (*vfs.FileInfo, error) {
|
||||
keyMu := m.getKeyLock(key)
|
||||
keyMu.RLock()
|
||||
defer keyMu.RUnlock()
|
||||
fi := elem.Value.(*types.FileInfo)
|
||||
key := fi.Key
|
||||
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
// Remove from LRU
|
||||
m.LRU.Remove(key)
|
||||
|
||||
f, ok := m.files[key]
|
||||
if !ok {
|
||||
return nil, vfserror.ErrNotFound
|
||||
// Remove from maps
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
|
||||
// Update size
|
||||
m.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := locks.GetShardIndex(key)
|
||||
m.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return f.fileinfo, nil
|
||||
return evicted
|
||||
}
|
||||
|
||||
func (m *MemoryFS) StatAll() []*vfs.FileInfo {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
// EvictBySize evicts files by size (ascending = smallest first, descending = largest first)
|
||||
func (m *MemoryFS) EvictBySize(bytesNeeded uint, ascending bool) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// hard copy the file info to prevent modification of the original file info or the other way around
|
||||
files := make([]*vfs.FileInfo, 0, len(m.files))
|
||||
for _, v := range m.files {
|
||||
fi := *v.fileinfo
|
||||
files = append(files, &fi)
|
||||
var evicted uint
|
||||
var candidates []*types.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range m.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
return files
|
||||
// Sort by size
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
if ascending {
|
||||
return candidates[i].Size < candidates[j].Size
|
||||
}
|
||||
return candidates[i].Size > candidates[j].Size
|
||||
})
|
||||
|
||||
// Evict files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if m.size <= m.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
m.LRU.Remove(key)
|
||||
|
||||
// Remove from maps
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
|
||||
// Update size
|
||||
m.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := locks.GetShardIndex(key)
|
||||
m.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
// EvictFIFO evicts files using FIFO (oldest creation time first)
|
||||
func (m *MemoryFS) EvictFIFO(bytesNeeded uint) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
var evicted uint
|
||||
var candidates []*types.FileInfo
|
||||
|
||||
// Collect all files
|
||||
for _, fi := range m.info {
|
||||
candidates = append(candidates, fi)
|
||||
}
|
||||
|
||||
// Sort by creation time (oldest first)
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
return candidates[i].CTime.Before(candidates[j].CTime)
|
||||
})
|
||||
|
||||
// Evict oldest files until we free enough space
|
||||
for _, fi := range candidates {
|
||||
if m.size <= m.capacity-int64(bytesNeeded) {
|
||||
break
|
||||
}
|
||||
|
||||
key := fi.Key
|
||||
|
||||
// Remove from LRU
|
||||
m.LRU.Remove(key)
|
||||
|
||||
// Remove from maps
|
||||
delete(m.info, key)
|
||||
delete(m.data, key)
|
||||
|
||||
// Update size
|
||||
m.size -= fi.Size
|
||||
evicted += uint(fi.Size)
|
||||
|
||||
// Clean up key lock
|
||||
shardIndex := locks.GetShardIndex(key)
|
||||
m.keyLocks[shardIndex].Delete(key)
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
// vfs/memory/memory_test.go
|
||||
package memory
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"s1d3sw1ped/SteamCache2/vfs/vfserror"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCreateAndOpen(t *testing.T) {
|
||||
m := New(1024)
|
||||
key := "key"
|
||||
value := []byte("value")
|
||||
|
||||
w, err := m.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
rc, err := m.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value) {
|
||||
t.Fatalf("expected %s, got %s", value, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOverwrite(t *testing.T) {
|
||||
m := New(1024)
|
||||
key := "key"
|
||||
value1 := []byte("value1")
|
||||
value2 := []byte("value2")
|
||||
|
||||
w, err := m.Create(key, int64(len(value1)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value1)
|
||||
w.Close()
|
||||
|
||||
w, err = m.Create(key, int64(len(value2)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value2)
|
||||
w.Close()
|
||||
|
||||
rc, err := m.Open(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
got, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
|
||||
if string(got) != string(value2) {
|
||||
t.Fatalf("expected %s, got %s", value2, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
m := New(1024)
|
||||
key := "key"
|
||||
value := []byte("value")
|
||||
|
||||
w, err := m.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
if err := m.Delete(key); err != nil {
|
||||
t.Fatalf("Delete failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = m.Open(key)
|
||||
if !errors.Is(err, vfserror.ErrNotFound) {
|
||||
t.Fatalf("expected %v, got %v", vfserror.ErrNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCapacityLimit(t *testing.T) {
|
||||
m := New(10)
|
||||
for i := 0; i < 11; i++ {
|
||||
w, err := m.Create(fmt.Sprintf("key%d", i), 1)
|
||||
if err != nil && i < 10 {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
} else if i == 10 && err == nil {
|
||||
t.Errorf("Create succeeded: got nil, want %v", vfserror.ErrDiskFull)
|
||||
}
|
||||
if i < 10 {
|
||||
w.Write([]byte("1"))
|
||||
w.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStat(t *testing.T) {
|
||||
m := New(1024)
|
||||
key := "key"
|
||||
value := []byte("value")
|
||||
|
||||
w, err := m.Create(key, int64(len(value)))
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
w.Write(value)
|
||||
w.Close()
|
||||
|
||||
info, err := m.Stat(key)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
t.Fatal("expected file info to be non-nil")
|
||||
}
|
||||
if info.Size() != int64(len(value)) {
|
||||
t.Errorf("expected size %d, got %d", len(value), info.Size())
|
||||
}
|
||||
}
|
||||
274
vfs/memory/monitor.go
Normal file
274
vfs/memory/monitor.go
Normal file
@@ -0,0 +1,274 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MemoryMonitor tracks system memory usage and provides dynamic sizing recommendations
|
||||
type MemoryMonitor struct {
|
||||
targetMemoryUsage uint64 // Target total memory usage in bytes
|
||||
currentMemoryUsage uint64 // Current total memory usage in bytes
|
||||
monitoringInterval time.Duration
|
||||
adjustmentThreshold float64 // Threshold for cache size adjustments (e.g., 0.1 = 10%)
|
||||
mu sync.RWMutex
|
||||
ctx chan struct{}
|
||||
stopChan chan struct{}
|
||||
isMonitoring int32
|
||||
|
||||
// Dynamic cache management fields
|
||||
originalCacheSize uint64
|
||||
currentCacheSize uint64
|
||||
cache interface{} // Generic cache interface
|
||||
adjustmentInterval time.Duration
|
||||
lastAdjustment time.Time
|
||||
adjustmentCount int64
|
||||
isAdjusting int32
|
||||
}
|
||||
|
||||
// NewMemoryMonitor creates a new memory monitor
|
||||
func NewMemoryMonitor(targetMemoryUsage uint64, monitoringInterval time.Duration, adjustmentThreshold float64) *MemoryMonitor {
|
||||
return &MemoryMonitor{
|
||||
targetMemoryUsage: targetMemoryUsage,
|
||||
monitoringInterval: monitoringInterval,
|
||||
adjustmentThreshold: adjustmentThreshold,
|
||||
ctx: make(chan struct{}),
|
||||
stopChan: make(chan struct{}),
|
||||
adjustmentInterval: 30 * time.Second, // Default adjustment interval
|
||||
}
|
||||
}
|
||||
|
||||
// NewMemoryMonitorWithCache creates a new memory monitor with cache management
|
||||
func NewMemoryMonitorWithCache(targetMemoryUsage uint64, monitoringInterval time.Duration, adjustmentThreshold float64, cache interface{}, originalCacheSize uint64) *MemoryMonitor {
|
||||
mm := NewMemoryMonitor(targetMemoryUsage, monitoringInterval, adjustmentThreshold)
|
||||
mm.cache = cache
|
||||
mm.originalCacheSize = originalCacheSize
|
||||
mm.currentCacheSize = originalCacheSize
|
||||
return mm
|
||||
}
|
||||
|
||||
// Start begins monitoring memory usage
|
||||
func (mm *MemoryMonitor) Start() {
|
||||
if atomic.CompareAndSwapInt32(&mm.isMonitoring, 0, 1) {
|
||||
go mm.monitor()
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops monitoring memory usage
|
||||
func (mm *MemoryMonitor) Stop() {
|
||||
if atomic.CompareAndSwapInt32(&mm.isMonitoring, 1, 0) {
|
||||
close(mm.stopChan)
|
||||
}
|
||||
}
|
||||
|
||||
// GetCurrentMemoryUsage returns the current total memory usage
|
||||
func (mm *MemoryMonitor) GetCurrentMemoryUsage() uint64 {
|
||||
mm.mu.RLock()
|
||||
defer mm.mu.RUnlock()
|
||||
return atomic.LoadUint64(&mm.currentMemoryUsage)
|
||||
}
|
||||
|
||||
// GetTargetMemoryUsage returns the target memory usage
|
||||
func (mm *MemoryMonitor) GetTargetMemoryUsage() uint64 {
|
||||
mm.mu.RLock()
|
||||
defer mm.mu.RUnlock()
|
||||
return mm.targetMemoryUsage
|
||||
}
|
||||
|
||||
// GetMemoryUtilization returns the current memory utilization as a percentage
|
||||
func (mm *MemoryMonitor) GetMemoryUtilization() float64 {
|
||||
mm.mu.RLock()
|
||||
defer mm.mu.RUnlock()
|
||||
current := atomic.LoadUint64(&mm.currentMemoryUsage)
|
||||
return float64(current) / float64(mm.targetMemoryUsage)
|
||||
}
|
||||
|
||||
// GetRecommendedCacheSize calculates the recommended cache size based on current memory usage
|
||||
func (mm *MemoryMonitor) GetRecommendedCacheSize(originalCacheSize uint64) uint64 {
|
||||
mm.mu.RLock()
|
||||
defer mm.mu.RUnlock()
|
||||
|
||||
current := atomic.LoadUint64(&mm.currentMemoryUsage)
|
||||
target := mm.targetMemoryUsage
|
||||
|
||||
// If we're under target, we can use the full cache size
|
||||
if current <= target {
|
||||
return originalCacheSize
|
||||
}
|
||||
|
||||
// Calculate how much we're over target
|
||||
overage := current - target
|
||||
|
||||
// If overage is significant, reduce cache size
|
||||
if overage > uint64(float64(target)*mm.adjustmentThreshold) {
|
||||
// Reduce cache size by the overage amount, but don't go below 10% of original
|
||||
minCacheSize := uint64(float64(originalCacheSize) * 0.1)
|
||||
recommendedSize := originalCacheSize - overage
|
||||
|
||||
if recommendedSize < minCacheSize {
|
||||
recommendedSize = minCacheSize
|
||||
}
|
||||
|
||||
return recommendedSize
|
||||
}
|
||||
|
||||
return originalCacheSize
|
||||
}
|
||||
|
||||
// monitor runs the memory monitoring loop
|
||||
func (mm *MemoryMonitor) monitor() {
|
||||
ticker := time.NewTicker(mm.monitoringInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-mm.stopChan:
|
||||
return
|
||||
case <-ticker.C:
|
||||
mm.updateMemoryUsage()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateMemoryUsage updates the current memory usage
|
||||
func (mm *MemoryMonitor) updateMemoryUsage() {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
// Use Alloc (currently allocated memory) as our metric
|
||||
atomic.StoreUint64(&mm.currentMemoryUsage, m.Alloc)
|
||||
}
|
||||
|
||||
// SetTargetMemoryUsage updates the target memory usage
|
||||
func (mm *MemoryMonitor) SetTargetMemoryUsage(target uint64) {
|
||||
mm.mu.Lock()
|
||||
defer mm.mu.Unlock()
|
||||
mm.targetMemoryUsage = target
|
||||
}
|
||||
|
||||
// GetMemoryStats returns detailed memory statistics
|
||||
func (mm *MemoryMonitor) GetMemoryStats() map[string]interface{} {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
mm.mu.RLock()
|
||||
defer mm.mu.RUnlock()
|
||||
|
||||
return map[string]interface{}{
|
||||
"current_usage": atomic.LoadUint64(&mm.currentMemoryUsage),
|
||||
"target_usage": mm.targetMemoryUsage,
|
||||
"utilization": mm.GetMemoryUtilization(),
|
||||
"heap_alloc": m.HeapAlloc,
|
||||
"heap_sys": m.HeapSys,
|
||||
"heap_idle": m.HeapIdle,
|
||||
"heap_inuse": m.HeapInuse,
|
||||
"stack_inuse": m.StackInuse,
|
||||
"stack_sys": m.StackSys,
|
||||
"gc_cycles": m.NumGC,
|
||||
"gc_pause_total": m.PauseTotalNs,
|
||||
}
|
||||
}
|
||||
|
||||
// Dynamic Cache Management Methods
|
||||
|
||||
// StartDynamicAdjustment begins the dynamic cache size adjustment process
|
||||
func (mm *MemoryMonitor) StartDynamicAdjustment() {
|
||||
if mm.cache != nil {
|
||||
go mm.adjustmentLoop()
|
||||
}
|
||||
}
|
||||
|
||||
// GetCurrentCacheSize returns the current cache size
|
||||
func (mm *MemoryMonitor) GetCurrentCacheSize() uint64 {
|
||||
mm.mu.RLock()
|
||||
defer mm.mu.RUnlock()
|
||||
return atomic.LoadUint64(&mm.currentCacheSize)
|
||||
}
|
||||
|
||||
// GetOriginalCacheSize returns the original cache size
|
||||
func (mm *MemoryMonitor) GetOriginalCacheSize() uint64 {
|
||||
mm.mu.RLock()
|
||||
defer mm.mu.RUnlock()
|
||||
return mm.originalCacheSize
|
||||
}
|
||||
|
||||
// GetAdjustmentCount returns the number of adjustments made
|
||||
func (mm *MemoryMonitor) GetAdjustmentCount() int64 {
|
||||
return atomic.LoadInt64(&mm.adjustmentCount)
|
||||
}
|
||||
|
||||
// adjustmentLoop runs the cache size adjustment loop
|
||||
func (mm *MemoryMonitor) adjustmentLoop() {
|
||||
ticker := time.NewTicker(mm.adjustmentInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
mm.performAdjustment()
|
||||
}
|
||||
}
|
||||
|
||||
// performAdjustment performs a cache size adjustment if needed
|
||||
func (mm *MemoryMonitor) performAdjustment() {
|
||||
// Prevent concurrent adjustments
|
||||
if !atomic.CompareAndSwapInt32(&mm.isAdjusting, 0, 1) {
|
||||
return
|
||||
}
|
||||
defer atomic.StoreInt32(&mm.isAdjusting, 0)
|
||||
|
||||
// Check if enough time has passed since last adjustment
|
||||
if time.Since(mm.lastAdjustment) < mm.adjustmentInterval {
|
||||
return
|
||||
}
|
||||
|
||||
// Get recommended cache size
|
||||
recommendedSize := mm.GetRecommendedCacheSize(mm.originalCacheSize)
|
||||
currentSize := atomic.LoadUint64(&mm.currentCacheSize)
|
||||
|
||||
// Only adjust if there's a significant difference (more than 5%)
|
||||
sizeDiff := float64(recommendedSize) / float64(currentSize)
|
||||
if sizeDiff < 0.95 || sizeDiff > 1.05 {
|
||||
mm.adjustCacheSize(recommendedSize)
|
||||
mm.lastAdjustment = time.Now()
|
||||
atomic.AddInt64(&mm.adjustmentCount, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// adjustCacheSize adjusts the cache size to the recommended size
|
||||
func (mm *MemoryMonitor) adjustCacheSize(newSize uint64) {
|
||||
mm.mu.Lock()
|
||||
defer mm.mu.Unlock()
|
||||
|
||||
oldSize := atomic.LoadUint64(&mm.currentCacheSize)
|
||||
atomic.StoreUint64(&mm.currentCacheSize, newSize)
|
||||
|
||||
// If we're reducing the cache size, trigger GC to free up memory
|
||||
if newSize < oldSize {
|
||||
// Calculate how much to free
|
||||
bytesToFree := oldSize - newSize
|
||||
|
||||
// Trigger GC on the cache to free up the excess memory
|
||||
// This is a simplified approach - in practice, you'd want to integrate
|
||||
// with the actual GC system to free the right amount
|
||||
if gcCache, ok := mm.cache.(interface{ ForceGC(uint) }); ok {
|
||||
gcCache.ForceGC(uint(bytesToFree))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetDynamicStats returns statistics about the dynamic cache manager
|
||||
func (mm *MemoryMonitor) GetDynamicStats() map[string]interface{} {
|
||||
mm.mu.RLock()
|
||||
defer mm.mu.RUnlock()
|
||||
|
||||
return map[string]interface{}{
|
||||
"original_cache_size": mm.originalCacheSize,
|
||||
"current_cache_size": atomic.LoadUint64(&mm.currentCacheSize),
|
||||
"adjustment_count": atomic.LoadInt64(&mm.adjustmentCount),
|
||||
"last_adjustment": mm.lastAdjustment,
|
||||
"memory_utilization": mm.GetMemoryUtilization(),
|
||||
"target_memory_usage": mm.GetTargetMemoryUsage(),
|
||||
"current_memory_usage": mm.GetCurrentMemoryUsage(),
|
||||
}
|
||||
}
|
||||
425
vfs/predictive/predictive.go
Normal file
425
vfs/predictive/predictive.go
Normal file
@@ -0,0 +1,425 @@
|
||||
package predictive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PredictiveCacheManager implements predictive caching strategies
|
||||
type PredictiveCacheManager struct {
|
||||
accessPredictor *AccessPredictor
|
||||
cacheWarmer *CacheWarmer
|
||||
prefetchQueue chan PrefetchRequest
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
stats *PredictiveStats
|
||||
}
|
||||
|
||||
// PrefetchRequest represents a request to prefetch content
|
||||
type PrefetchRequest struct {
|
||||
Key string
|
||||
Priority int
|
||||
Reason string
|
||||
RequestedAt time.Time
|
||||
}
|
||||
|
||||
// PredictiveStats tracks predictive caching statistics
|
||||
type PredictiveStats struct {
|
||||
PrefetchHits int64
|
||||
PrefetchMisses int64
|
||||
PrefetchRequests int64
|
||||
CacheWarmHits int64
|
||||
CacheWarmMisses int64
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// AccessPredictor predicts which files are likely to be accessed next
|
||||
type AccessPredictor struct {
|
||||
accessHistory map[string]*AccessSequence
|
||||
patterns map[string][]string // Key -> likely next keys
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// AccessSequence tracks access sequences for prediction
|
||||
type AccessSequence struct {
|
||||
Key string
|
||||
NextKeys []string
|
||||
Frequency map[string]int64
|
||||
LastSeen time.Time
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// CacheWarmer preloads popular content into cache
|
||||
type CacheWarmer struct {
|
||||
popularContent map[string]*PopularContent
|
||||
warmerQueue chan WarmRequest
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// PopularContent tracks popular content for warming
|
||||
type PopularContent struct {
|
||||
Key string
|
||||
AccessCount int64
|
||||
LastAccess time.Time
|
||||
Size int64
|
||||
Priority int
|
||||
}
|
||||
|
||||
// WarmRequest represents a cache warming request
|
||||
type WarmRequest struct {
|
||||
Key string
|
||||
Priority int
|
||||
Reason string
|
||||
Size int64
|
||||
RequestedAt time.Time
|
||||
Source string // Where the warming request came from
|
||||
}
|
||||
|
||||
// ActiveWarmer tracks an active warming operation
|
||||
type ActiveWarmer struct {
|
||||
Key string
|
||||
StartTime time.Time
|
||||
Priority int
|
||||
Reason string
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// WarmingStats tracks cache warming statistics
|
||||
type WarmingStats struct {
|
||||
WarmRequests int64
|
||||
WarmSuccesses int64
|
||||
WarmFailures int64
|
||||
WarmBytes int64
|
||||
WarmDuration time.Duration
|
||||
PrefetchRequests int64
|
||||
PrefetchSuccesses int64
|
||||
PrefetchFailures int64
|
||||
PrefetchBytes int64
|
||||
PrefetchDuration time.Duration
|
||||
}
|
||||
|
||||
// NewPredictiveCacheManager creates a new predictive cache manager
|
||||
func NewPredictiveCacheManager() *PredictiveCacheManager {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
pcm := &PredictiveCacheManager{
|
||||
accessPredictor: NewAccessPredictor(),
|
||||
cacheWarmer: NewCacheWarmer(),
|
||||
prefetchQueue: make(chan PrefetchRequest, 1000),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
stats: &PredictiveStats{},
|
||||
}
|
||||
|
||||
// Start background workers
|
||||
pcm.wg.Add(1)
|
||||
go pcm.prefetchWorker()
|
||||
|
||||
pcm.wg.Add(1)
|
||||
go pcm.analysisWorker()
|
||||
|
||||
return pcm
|
||||
}
|
||||
|
||||
// NewAccessPredictor creates a new access predictor
|
||||
func NewAccessPredictor() *AccessPredictor {
|
||||
return &AccessPredictor{
|
||||
accessHistory: make(map[string]*AccessSequence),
|
||||
patterns: make(map[string][]string),
|
||||
}
|
||||
}
|
||||
|
||||
// NewCacheWarmer creates a new cache warmer
|
||||
func NewCacheWarmer() *CacheWarmer {
|
||||
return &CacheWarmer{
|
||||
popularContent: make(map[string]*PopularContent),
|
||||
warmerQueue: make(chan WarmRequest, 100),
|
||||
}
|
||||
}
|
||||
|
||||
// NewWarmingStats creates a new warming stats tracker
|
||||
func NewWarmingStats() *WarmingStats {
|
||||
return &WarmingStats{}
|
||||
}
|
||||
|
||||
// NewActiveWarmer creates a new active warmer tracker
|
||||
func NewActiveWarmer(key string, priority int, reason string) *ActiveWarmer {
|
||||
return &ActiveWarmer{
|
||||
Key: key,
|
||||
StartTime: time.Now(),
|
||||
Priority: priority,
|
||||
Reason: reason,
|
||||
}
|
||||
}
|
||||
|
||||
// RecordAccess records a file access for prediction analysis (lightweight version)
|
||||
func (pcm *PredictiveCacheManager) RecordAccess(key string, previousKey string, size int64) {
|
||||
// Only record if we have a previous key to avoid overhead
|
||||
if previousKey != "" {
|
||||
pcm.accessPredictor.RecordSequence(previousKey, key)
|
||||
}
|
||||
|
||||
// Lightweight popular content tracking - only for large files
|
||||
if size > 1024*1024 { // Only track files > 1MB
|
||||
pcm.cacheWarmer.RecordAccess(key, size)
|
||||
}
|
||||
|
||||
// Skip expensive prediction checks on every access
|
||||
// Only check occasionally to reduce overhead
|
||||
}
|
||||
|
||||
// PredictNextAccess predicts the next likely file to be accessed
|
||||
func (pcm *PredictiveCacheManager) PredictNextAccess(currentKey string) []string {
|
||||
return pcm.accessPredictor.PredictNext(currentKey)
|
||||
}
|
||||
|
||||
// RequestPrefetch requests prefetching of predicted content
|
||||
func (pcm *PredictiveCacheManager) RequestPrefetch(key string, priority int, reason string) {
|
||||
select {
|
||||
case pcm.prefetchQueue <- PrefetchRequest{
|
||||
Key: key,
|
||||
Priority: priority,
|
||||
Reason: reason,
|
||||
RequestedAt: time.Now(),
|
||||
}:
|
||||
atomic.AddInt64(&pcm.stats.PrefetchRequests, 1)
|
||||
default:
|
||||
// Queue full, skip prefetch
|
||||
}
|
||||
}
|
||||
|
||||
// RecordSequence records an access sequence for prediction
|
||||
func (ap *AccessPredictor) RecordSequence(previousKey, currentKey string) {
|
||||
if previousKey == "" || currentKey == "" {
|
||||
return
|
||||
}
|
||||
|
||||
ap.mu.Lock()
|
||||
defer ap.mu.Unlock()
|
||||
|
||||
seq, exists := ap.accessHistory[previousKey]
|
||||
if !exists {
|
||||
seq = &AccessSequence{
|
||||
Key: previousKey,
|
||||
NextKeys: []string{},
|
||||
Frequency: make(map[string]int64),
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
ap.accessHistory[previousKey] = seq
|
||||
}
|
||||
|
||||
seq.mu.Lock()
|
||||
seq.Frequency[currentKey]++
|
||||
seq.LastSeen = time.Now()
|
||||
|
||||
// Update next keys list (keep top 5)
|
||||
nextKeys := make([]string, 0, 5)
|
||||
for key, _ := range seq.Frequency {
|
||||
nextKeys = append(nextKeys, key)
|
||||
if len(nextKeys) >= 5 {
|
||||
break
|
||||
}
|
||||
}
|
||||
seq.NextKeys = nextKeys
|
||||
seq.mu.Unlock()
|
||||
}
|
||||
|
||||
// PredictNext predicts the next likely files to be accessed
|
||||
func (ap *AccessPredictor) PredictNext(currentKey string) []string {
|
||||
ap.mu.RLock()
|
||||
defer ap.mu.RUnlock()
|
||||
|
||||
seq, exists := ap.accessHistory[currentKey]
|
||||
if !exists {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
seq.mu.RLock()
|
||||
defer seq.mu.RUnlock()
|
||||
|
||||
// Return top predicted keys
|
||||
predictions := make([]string, len(seq.NextKeys))
|
||||
copy(predictions, seq.NextKeys)
|
||||
return predictions
|
||||
}
|
||||
|
||||
// IsPredictedAccess checks if an access was predicted
|
||||
func (ap *AccessPredictor) IsPredictedAccess(key string) bool {
|
||||
ap.mu.RLock()
|
||||
defer ap.mu.RUnlock()
|
||||
|
||||
// Check if this key appears in any prediction lists
|
||||
for _, seq := range ap.accessHistory {
|
||||
seq.mu.RLock()
|
||||
for _, predictedKey := range seq.NextKeys {
|
||||
if predictedKey == key {
|
||||
seq.mu.RUnlock()
|
||||
return true
|
||||
}
|
||||
}
|
||||
seq.mu.RUnlock()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RecordAccess records a file access for cache warming (lightweight version)
|
||||
func (cw *CacheWarmer) RecordAccess(key string, size int64) {
|
||||
// Use read lock first for better performance
|
||||
cw.mu.RLock()
|
||||
content, exists := cw.popularContent[key]
|
||||
cw.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
// Only acquire write lock when creating new entry
|
||||
cw.mu.Lock()
|
||||
// Double-check after acquiring write lock
|
||||
if content, exists = cw.popularContent[key]; !exists {
|
||||
content = &PopularContent{
|
||||
Key: key,
|
||||
AccessCount: 1,
|
||||
LastAccess: time.Now(),
|
||||
Size: size,
|
||||
Priority: 1,
|
||||
}
|
||||
cw.popularContent[key] = content
|
||||
}
|
||||
cw.mu.Unlock()
|
||||
} else {
|
||||
// Lightweight update - just increment counter
|
||||
content.AccessCount++
|
||||
content.LastAccess = time.Now()
|
||||
|
||||
// Only update priority occasionally to reduce overhead
|
||||
if content.AccessCount%5 == 0 {
|
||||
if content.AccessCount > 10 {
|
||||
content.Priority = 3
|
||||
} else if content.AccessCount > 5 {
|
||||
content.Priority = 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetPopularContent returns the most popular content for warming
|
||||
func (cw *CacheWarmer) GetPopularContent(limit int) []*PopularContent {
|
||||
cw.mu.RLock()
|
||||
defer cw.mu.RUnlock()
|
||||
|
||||
// Sort by access count and return top items
|
||||
popular := make([]*PopularContent, 0, len(cw.popularContent))
|
||||
for _, content := range cw.popularContent {
|
||||
popular = append(popular, content)
|
||||
}
|
||||
|
||||
// Simple sort by access count (in production, use proper sorting)
|
||||
// For now, just return the first 'limit' items
|
||||
if len(popular) > limit {
|
||||
popular = popular[:limit]
|
||||
}
|
||||
|
||||
return popular
|
||||
}
|
||||
|
||||
// RequestWarming requests warming of a specific key
|
||||
func (cw *CacheWarmer) RequestWarming(key string, priority int, reason string, size int64) {
|
||||
select {
|
||||
case cw.warmerQueue <- WarmRequest{
|
||||
Key: key,
|
||||
Priority: priority,
|
||||
Reason: reason,
|
||||
Size: size,
|
||||
RequestedAt: time.Now(),
|
||||
Source: "predictive",
|
||||
}:
|
||||
// Successfully queued
|
||||
default:
|
||||
// Queue full, skip warming
|
||||
}
|
||||
}
|
||||
|
||||
// prefetchWorker processes prefetch requests
|
||||
func (pcm *PredictiveCacheManager) prefetchWorker() {
|
||||
defer pcm.wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-pcm.ctx.Done():
|
||||
return
|
||||
case req := <-pcm.prefetchQueue:
|
||||
// Process prefetch request
|
||||
pcm.processPrefetchRequest(req)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// analysisWorker performs periodic analysis and cache warming
|
||||
func (pcm *PredictiveCacheManager) analysisWorker() {
|
||||
defer pcm.wg.Done()
|
||||
|
||||
ticker := time.NewTicker(30 * time.Second) // Analyze every 30 seconds
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-pcm.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
pcm.performAnalysis()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processPrefetchRequest processes a prefetch request
|
||||
func (pcm *PredictiveCacheManager) processPrefetchRequest(req PrefetchRequest) {
|
||||
// In a real implementation, this would:
|
||||
// 1. Check if content is already cached
|
||||
// 2. If not, fetch and cache it
|
||||
// 3. Update statistics
|
||||
|
||||
// For now, just log the prefetch request
|
||||
// In production, integrate with the actual cache system
|
||||
}
|
||||
|
||||
// performAnalysis performs periodic analysis and cache warming
|
||||
func (pcm *PredictiveCacheManager) performAnalysis() {
|
||||
// Get popular content for warming
|
||||
popular := pcm.cacheWarmer.GetPopularContent(10)
|
||||
|
||||
// Request warming for popular content
|
||||
for _, content := range popular {
|
||||
if content.AccessCount > 5 { // Only warm frequently accessed content
|
||||
select {
|
||||
case pcm.cacheWarmer.warmerQueue <- WarmRequest{
|
||||
Key: content.Key,
|
||||
Priority: content.Priority,
|
||||
Reason: "popular_content",
|
||||
}:
|
||||
default:
|
||||
// Queue full, skip
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetStats returns predictive caching statistics
|
||||
func (pcm *PredictiveCacheManager) GetStats() *PredictiveStats {
|
||||
pcm.stats.mu.RLock()
|
||||
defer pcm.stats.mu.RUnlock()
|
||||
|
||||
return &PredictiveStats{
|
||||
PrefetchHits: atomic.LoadInt64(&pcm.stats.PrefetchHits),
|
||||
PrefetchMisses: atomic.LoadInt64(&pcm.stats.PrefetchMisses),
|
||||
PrefetchRequests: atomic.LoadInt64(&pcm.stats.PrefetchRequests),
|
||||
CacheWarmHits: atomic.LoadInt64(&pcm.stats.CacheWarmHits),
|
||||
CacheWarmMisses: atomic.LoadInt64(&pcm.stats.CacheWarmMisses),
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the predictive cache manager
|
||||
func (pcm *PredictiveCacheManager) Stop() {
|
||||
pcm.cancel()
|
||||
pcm.wg.Wait()
|
||||
}
|
||||
87
vfs/types/types.go
Normal file
87
vfs/types/types.go
Normal file
@@ -0,0 +1,87 @@
|
||||
// vfs/types/types.go
|
||||
package types
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileInfo contains metadata about a cached file
|
||||
type FileInfo struct {
|
||||
Key string `json:"key"`
|
||||
Size int64 `json:"size"`
|
||||
ATime time.Time `json:"atime"` // Last access time
|
||||
CTime time.Time `json:"ctime"` // Creation time
|
||||
AccessCount int `json:"access_count"`
|
||||
}
|
||||
|
||||
// NewFileInfo creates a new FileInfo with the given key and current timestamp
|
||||
func NewFileInfo(key string, size int64) *FileInfo {
|
||||
now := time.Now()
|
||||
return &FileInfo{
|
||||
Key: key,
|
||||
Size: size,
|
||||
ATime: now,
|
||||
CTime: now,
|
||||
AccessCount: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFileInfoFromOS creates a FileInfo from os.FileInfo
|
||||
func NewFileInfoFromOS(info os.FileInfo, key string) *FileInfo {
|
||||
return &FileInfo{
|
||||
Key: key,
|
||||
Size: info.Size(),
|
||||
ATime: time.Now(), // We don't have access time from os.FileInfo
|
||||
CTime: info.ModTime(),
|
||||
AccessCount: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateAccess updates the access time and increments the access count
|
||||
func (fi *FileInfo) UpdateAccess() {
|
||||
fi.ATime = time.Now()
|
||||
fi.AccessCount++
|
||||
}
|
||||
|
||||
// BatchedTimeUpdate provides a way to batch time updates for better performance
|
||||
type BatchedTimeUpdate struct {
|
||||
currentTime time.Time
|
||||
lastUpdate time.Time
|
||||
updateInterval time.Duration
|
||||
}
|
||||
|
||||
// NewBatchedTimeUpdate creates a new batched time updater
|
||||
func NewBatchedTimeUpdate(interval time.Duration) *BatchedTimeUpdate {
|
||||
now := time.Now()
|
||||
return &BatchedTimeUpdate{
|
||||
currentTime: now,
|
||||
lastUpdate: now,
|
||||
updateInterval: interval,
|
||||
}
|
||||
}
|
||||
|
||||
// GetTime returns the current cached time, updating it if necessary
|
||||
func (btu *BatchedTimeUpdate) GetTime() time.Time {
|
||||
now := time.Now()
|
||||
if now.Sub(btu.lastUpdate) >= btu.updateInterval {
|
||||
btu.currentTime = now
|
||||
btu.lastUpdate = now
|
||||
}
|
||||
return btu.currentTime
|
||||
}
|
||||
|
||||
// UpdateAccessBatched updates the access time using batched time updates
|
||||
func (fi *FileInfo) UpdateAccessBatched(btu *BatchedTimeUpdate) {
|
||||
fi.ATime = btu.GetTime()
|
||||
fi.AccessCount++
|
||||
}
|
||||
|
||||
// GetTimeDecayedScore calculates a score based on access time and frequency
|
||||
// More recent and frequent accesses get higher scores
|
||||
func (fi *FileInfo) GetTimeDecayedScore() float64 {
|
||||
timeSinceAccess := time.Since(fi.ATime).Hours()
|
||||
decayFactor := 1.0 / (1.0 + timeSinceAccess/24.0) // Decay over days
|
||||
frequencyBonus := float64(fi.AccessCount) * 0.1
|
||||
return decayFactor + frequencyBonus
|
||||
}
|
||||
52
vfs/vfs.go
52
vfs/vfs.go
@@ -1,28 +1,46 @@
|
||||
// vfs/vfs.go
|
||||
package vfs
|
||||
|
||||
import "io"
|
||||
import (
|
||||
"io"
|
||||
"s1d3sw1ped/steamcache2/vfs/types"
|
||||
)
|
||||
|
||||
// VFS is the interface that wraps the basic methods of a virtual file system.
|
||||
// VFS defines the interface for virtual file systems
|
||||
type VFS interface {
|
||||
// Name returns the name of the file system.
|
||||
Name() string
|
||||
|
||||
// Size returns the total size of all files in the file system.
|
||||
Size() int64
|
||||
|
||||
// Create creates a new file at key with expected size.
|
||||
// Create creates a new file at the given key
|
||||
Create(key string, size int64) (io.WriteCloser, error)
|
||||
|
||||
// Delete deletes the value of key.
|
||||
Delete(key string) error
|
||||
|
||||
// Open opens the file at key.
|
||||
// Open opens the file at the given key for reading
|
||||
Open(key string) (io.ReadCloser, error)
|
||||
|
||||
// Stat returns the FileInfo of key.
|
||||
Stat(key string) (*FileInfo, error)
|
||||
// Delete removes the file at the given key
|
||||
Delete(key string) error
|
||||
|
||||
// StatAll returns the FileInfo of all keys.
|
||||
StatAll() []*FileInfo
|
||||
// Stat returns information about the file at the given key
|
||||
Stat(key string) (*types.FileInfo, error)
|
||||
|
||||
// Name returns the name of this VFS
|
||||
Name() string
|
||||
|
||||
// Size returns the current size of the VFS
|
||||
Size() int64
|
||||
|
||||
// Capacity returns the maximum capacity of the VFS
|
||||
Capacity() int64
|
||||
}
|
||||
|
||||
// FileInfo is an alias for types.FileInfo for backward compatibility
|
||||
type FileInfo = types.FileInfo
|
||||
|
||||
// NewFileInfo is an alias for types.NewFileInfo for backward compatibility
|
||||
var NewFileInfo = types.NewFileInfo
|
||||
|
||||
// NewFileInfoFromOS is an alias for types.NewFileInfoFromOS for backward compatibility
|
||||
var NewFileInfoFromOS = types.NewFileInfoFromOS
|
||||
|
||||
// BatchedTimeUpdate is an alias for types.BatchedTimeUpdate for backward compatibility
|
||||
type BatchedTimeUpdate = types.BatchedTimeUpdate
|
||||
|
||||
// NewBatchedTimeUpdate is an alias for types.NewBatchedTimeUpdate for backward compatibility
|
||||
var NewBatchedTimeUpdate = types.NewBatchedTimeUpdate
|
||||
|
||||
@@ -1,18 +1,58 @@
|
||||
// vfs/vfserror/vfserror.go
|
||||
package vfserror
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrInvalidKey is returned when a key is invalid.
|
||||
ErrInvalidKey = errors.New("vfs: invalid key")
|
||||
|
||||
// ErrUnreachable is returned when a code path is unreachable.
|
||||
ErrUnreachable = errors.New("unreachable")
|
||||
|
||||
// ErrNotFound is returned when a key is not found.
|
||||
ErrNotFound = errors.New("vfs: key not found")
|
||||
|
||||
// ErrDiskFull is returned when the disk is full.
|
||||
ErrDiskFull = errors.New("vfs: disk full")
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Common VFS errors
|
||||
var (
|
||||
ErrNotFound = errors.New("vfs: key not found")
|
||||
ErrInvalidKey = errors.New("vfs: invalid key")
|
||||
ErrAlreadyExists = errors.New("vfs: key already exists")
|
||||
ErrCapacityExceeded = errors.New("vfs: capacity exceeded")
|
||||
ErrCorruptedFile = errors.New("vfs: corrupted file")
|
||||
ErrInvalidSize = errors.New("vfs: invalid size")
|
||||
ErrOperationTimeout = errors.New("vfs: operation timeout")
|
||||
)
|
||||
|
||||
// VFSError represents a VFS-specific error with context
|
||||
type VFSError struct {
|
||||
Op string // Operation that failed
|
||||
Key string // Key that caused the error
|
||||
Err error // Underlying error
|
||||
Size int64 // Size information if relevant
|
||||
}
|
||||
|
||||
// Error implements the error interface
|
||||
func (e *VFSError) Error() string {
|
||||
if e.Key != "" {
|
||||
return fmt.Sprintf("vfs: %s failed for key %q: %v", e.Op, e.Key, e.Err)
|
||||
}
|
||||
return fmt.Sprintf("vfs: %s failed: %v", e.Op, e.Err)
|
||||
}
|
||||
|
||||
// Unwrap returns the underlying error
|
||||
func (e *VFSError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// NewVFSError creates a new VFS error with context
|
||||
func NewVFSError(op, key string, err error) *VFSError {
|
||||
return &VFSError{
|
||||
Op: op,
|
||||
Key: key,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// NewVFSErrorWithSize creates a new VFS error with size context
|
||||
func NewVFSErrorWithSize(op, key string, size int64, err error) *VFSError {
|
||||
return &VFSError{
|
||||
Op: op,
|
||||
Key: key,
|
||||
Size: size,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user