2 Commits

Author SHA1 Message Date
9b2affe95a Refactor disk initialization and file processing in DiskFS
All checks were successful
Release Tag / release (push) Successful in 9s
- Replaced legacy depot file migration logic with concurrent directory scanning for improved performance.
- Introduced batch processing of files to minimize lock contention during initialization.
- Simplified the init function by removing unnecessary complexity and focusing on efficient file handling.
- Enhanced logging to provide better insights into directory scan progress and completion.
2025-09-22 00:51:51 -05:00
bd123bc63a Refactor module naming and update references to steamcache2
All checks were successful
Release Tag / release (push) Successful in 9s
- Changed module name from `s1d3sw1ped/SteamCache2` to `s1d3sw1ped/steamcache2` for consistency.
- Updated all import paths and references throughout the codebase to reflect the new module name.
- Adjusted README and Makefile to use the updated module name, ensuring clarity in usage instructions.
2025-09-21 23:10:21 -05:00
15 changed files with 238 additions and 147 deletions

View File

@@ -11,8 +11,8 @@ builds:
- -s
- -w
- -extldflags "-static"
- -X s1d3sw1ped/SteamCache2/version.Version={{.Version}}
- -X s1d3sw1ped/SteamCache2/version.Date={{.Date}}
- -X s1d3sw1ped/steamcache2/version.Version={{.Version}}
- -X s1d3sw1ped/steamcache2/version.Date={{.Date}}
env:
- CGO_ENABLED=0
goos:

View File

@@ -13,7 +13,7 @@ build-snapshot-single: deps test ## Build a snapshot of the application for the
@goreleaser build --single-target --snapshot --clean
help: ## Show this help message
@echo SteamCache2 Makefile
@echo steamcache2 Makefile
@echo Available targets:
@echo run Run the application
@echo run-debug Run the application with debug logging

View File

@@ -21,7 +21,7 @@ SteamCache2 is a blazing fast download cache for Steam, designed to reduce bandw
1. **Clone and build:**
```bash
git clone <repository-url>
cd SteamCache2
cd steamcache2
make # This will run tests and build the application
```

View File

@@ -4,10 +4,10 @@ package cmd
import (
"fmt"
"os"
"s1d3sw1ped/SteamCache2/config"
"s1d3sw1ped/SteamCache2/steamcache"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/version"
"s1d3sw1ped/steamcache2/config"
"s1d3sw1ped/steamcache2/steamcache"
"s1d3sw1ped/steamcache2/steamcache/logger"
"s1d3sw1ped/steamcache2/version"
"strings"
"github.com/rs/zerolog"
@@ -25,9 +25,9 @@ var (
)
var rootCmd = &cobra.Command{
Use: "SteamCache2",
Short: "SteamCache2 is a caching solution for Steam game updates and installations",
Long: `SteamCache2 is a caching solution designed to optimize the delivery of Steam game updates and installations.
Use: "steamcache2",
Short: "steamcache2 is a caching solution for Steam game updates and installations",
Long: `steamcache2 is a caching solution designed to optimize the delivery of Steam game updates and installations.
It reduces bandwidth usage and speeds up the download process by caching game files locally.
This tool is particularly useful for environments with multiple Steam users, such as gaming cafes or households with multiple gamers.
By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache,
@@ -53,7 +53,7 @@ var rootCmd = &cobra.Command{
logger.Logger = zerolog.New(writer).With().Timestamp().Logger()
logger.Logger.Info().
Msg("SteamCache2 " + version.Version + " " + version.Date + " starting...")
Msg("steamcache2 " + version.Version + " " + version.Date + " starting...")
// Load configuration
cfg, err := config.LoadConfig(configPath)
@@ -121,11 +121,11 @@ var rootCmd = &cobra.Command{
)
logger.Logger.Info().
Msg("SteamCache2 " + version.Version + " started on " + cfg.ListenAddress)
Msg("steamcache2 " + version.Version + " started on " + cfg.ListenAddress)
sc.Run()
logger.Logger.Info().Msg("SteamCache2 stopped")
logger.Logger.Info().Msg("steamcache2 stopped")
os.Exit(0)
},
}

View File

@@ -4,7 +4,7 @@ package cmd
import (
"fmt"
"os"
"s1d3sw1ped/SteamCache2/version"
"s1d3sw1ped/steamcache2/version"
"github.com/spf13/cobra"
)
@@ -12,10 +12,10 @@ import (
// versionCmd represents the version command
var versionCmd = &cobra.Command{
Use: "version",
Short: "prints the version of SteamCache2",
Long: `Prints the version of SteamCache2. This command is useful for checking the version of the application.`,
Short: "prints the version of steamcache2",
Long: `Prints the version of steamcache2. This command is useful for checking the version of the application.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Fprintln(os.Stderr, "SteamCache2", version.Version, version.Date)
fmt.Fprintln(os.Stderr, "steamcache2", version.Version, version.Date)
},
}

2
go.mod
View File

@@ -1,4 +1,4 @@
module s1d3sw1ped/SteamCache2
module s1d3sw1ped/steamcache2
go 1.23.0

View File

@@ -2,8 +2,8 @@
package main
import (
"s1d3sw1ped/SteamCache2/cmd"
_ "s1d3sw1ped/SteamCache2/version" // Import the version package for global version variable
"s1d3sw1ped/steamcache2/cmd"
_ "s1d3sw1ped/steamcache2/version" // Import the version package for global version variable
)
func main() {

View File

@@ -13,15 +13,15 @@ import (
"net/url"
"os"
"regexp"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/adaptive"
"s1d3sw1ped/SteamCache2/vfs/cache"
"s1d3sw1ped/SteamCache2/vfs/disk"
"s1d3sw1ped/SteamCache2/vfs/gc"
"s1d3sw1ped/SteamCache2/vfs/memory"
"s1d3sw1ped/SteamCache2/vfs/predictive"
"s1d3sw1ped/SteamCache2/vfs/warming"
"s1d3sw1ped/steamcache2/steamcache/logger"
"s1d3sw1ped/steamcache2/vfs"
"s1d3sw1ped/steamcache2/vfs/adaptive"
"s1d3sw1ped/steamcache2/vfs/cache"
"s1d3sw1ped/steamcache2/vfs/disk"
"s1d3sw1ped/steamcache2/vfs/gc"
"s1d3sw1ped/steamcache2/vfs/memory"
"s1d3sw1ped/steamcache2/vfs/predictive"
"s1d3sw1ped/steamcache2/vfs/warming"
"strconv"
"strings"
"sync"

4
vfs/cache/cache.go vendored
View File

@@ -3,8 +3,8 @@ package cache
import (
"io"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"s1d3sw1ped/steamcache2/vfs"
"s1d3sw1ped/steamcache2/vfs/vfserror"
"sync"
"sync/atomic"
)

View File

@@ -7,12 +7,13 @@ import (
"io"
"os"
"path/filepath"
"s1d3sw1ped/SteamCache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"s1d3sw1ped/steamcache2/steamcache/logger"
"s1d3sw1ped/steamcache2/vfs"
"s1d3sw1ped/steamcache2/vfs/vfserror"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/docker/go-units"
@@ -167,58 +168,15 @@ func New(root string, capacity int64) *DiskFS {
return d
}
// init loads existing files from disk and migrates legacy depot files to sharded structure
// init loads existing files from disk
func (d *DiskFS) init() {
tstart := time.Now()
var depotFiles []string // Track depot files that need migration
// Use concurrent directory scanning for blazing fast initialization
fileInfos := d.scanDirectoryConcurrently()
err := filepath.Walk(d.root, func(npath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
d.mu.Lock()
// Extract key from sharded path: remove root and convert sharding back
// Handle both "./disk" and "disk" root paths
rootPath := d.root
if strings.HasPrefix(rootPath, "./") {
rootPath = rootPath[2:] // Remove "./" prefix
}
relPath := strings.ReplaceAll(npath[len(rootPath)+1:], "\\", "/")
// Extract the original key from the sharded path
k := d.extractKeyFromPath(relPath)
fi := vfs.NewFileInfoFromOS(info, k)
d.info[k] = fi
d.LRU.Add(k, fi)
// Initialize access time with file modification time
fi.UpdateAccessBatched(d.timeUpdater)
d.size += info.Size()
// Track depot files for potential migration
if strings.HasPrefix(relPath, "depot/") {
depotFiles = append(depotFiles, relPath)
}
d.mu.Unlock()
return nil
})
if err != nil {
logger.Logger.Error().Err(err).Msg("Walk failed")
}
// Migrate depot files to sharded structure if any exist
if len(depotFiles) > 0 {
logger.Logger.Info().Int("count", len(depotFiles)).Msg("Found legacy depot files, starting migration")
d.migrateDepotFiles(depotFiles)
}
// Batch process all files to minimize lock contention
d.batchProcessFiles(fileInfos)
logger.Logger.Info().
Str("name", d.Name()).
@@ -230,68 +188,201 @@ func (d *DiskFS) init() {
Msg("init")
}
// migrateDepotFiles moves legacy depot files to the sharded steam structure
func (d *DiskFS) migrateDepotFiles(depotFiles []string) {
migratedCount := 0
errorCount := 0
for _, relPath := range depotFiles {
// Extract the steam key from the depot path
steamKey := d.extractKeyFromPath(relPath)
if !strings.HasPrefix(steamKey, "steam/") {
// Skip if we can't extract a proper steam key
errorCount++
continue
}
// Get the source and destination paths
sourcePath := filepath.Join(d.root, relPath)
shardedPath := d.shardPath(steamKey)
destPath := filepath.Join(d.root, shardedPath)
// Create destination directory
destDir := filepath.Dir(destPath)
if err := os.MkdirAll(destDir, 0755); err != nil {
logger.Logger.Error().Err(err).Str("path", destDir).Msg("Failed to create migration destination directory")
errorCount++
continue
}
// Move the file
if err := os.Rename(sourcePath, destPath); err != nil {
logger.Logger.Error().Err(err).Str("from", sourcePath).Str("to", destPath).Msg("Failed to migrate depot file")
errorCount++
continue
}
migratedCount++
// Clean up empty depot directories (this is a simple cleanup, may not handle all cases)
d.cleanupEmptyDepotDirs(filepath.Dir(sourcePath))
}
logger.Logger.Info().
Int("migrated", migratedCount).
Int("errors", errorCount).
Msg("Depot file migration completed")
// fileInfo represents a file found during directory scanning
type fileInfo struct {
path string
relPath string
key string
size int64
modTime time.Time
isDepot bool
}
// cleanupEmptyDepotDirs removes empty depot directories after migration
func (d *DiskFS) cleanupEmptyDepotDirs(dirPath string) {
for dirPath != d.root && strings.HasPrefix(dirPath, filepath.Join(d.root, "depot")) {
entries, err := os.ReadDir(dirPath)
if err != nil || len(entries) > 0 {
break
// scanDirectoryConcurrently performs fast concurrent directory scanning
func (d *DiskFS) scanDirectoryConcurrently() []fileInfo {
// Channel for collecting file information
fileChan := make(chan fileInfo, 1000)
// Progress tracking
var totalFiles int64
var processedFiles int64
progressTicker := time.NewTicker(500 * time.Millisecond)
defer progressTicker.Stop()
// Wait group for workers
var wg sync.WaitGroup
// Start directory scanner
wg.Add(1)
go func() {
defer wg.Done()
defer close(fileChan)
d.scanDirectoryRecursive(d.root, fileChan, &totalFiles)
}()
// Collect results with progress reporting
var fileInfos []fileInfo
// Use a separate goroutine to collect results
done := make(chan struct{})
go func() {
defer close(done)
for {
select {
case fi, ok := <-fileChan:
if !ok {
return
}
fileInfos = append(fileInfos, fi)
processedFiles++
case <-progressTicker.C:
if totalFiles > 0 {
logger.Logger.Debug().
Int64("processed", processedFiles).
Int64("total", totalFiles).
Float64("progress", float64(processedFiles)/float64(totalFiles)*100).
Msg("Directory scan progress")
}
}
}
}()
// Wait for scanning to complete
wg.Wait()
<-done
logger.Logger.Info().
Int64("files_scanned", processedFiles).
Msg("Directory scan completed")
return fileInfos
}
// scanDirectoryRecursive performs recursive directory scanning with early termination
func (d *DiskFS) scanDirectoryRecursive(dirPath string, fileChan chan<- fileInfo, totalFiles *int64) {
// Use ReadDir for faster directory listing (no stat calls)
entries, err := os.ReadDir(dirPath)
if err != nil {
return
}
// Count files first for progress tracking
fileCount := 0
for _, entry := range entries {
if !entry.IsDir() {
fileCount++
}
}
atomic.AddInt64(totalFiles, int64(fileCount))
// Process entries concurrently with limited workers
semaphore := make(chan struct{}, 8) // Limit concurrent processing
var wg sync.WaitGroup
for _, entry := range entries {
entryPath := filepath.Join(dirPath, entry.Name())
if entry.IsDir() {
// Recursively scan subdirectories
wg.Add(1)
go func(path string) {
defer wg.Done()
semaphore <- struct{}{} // Acquire semaphore
defer func() { <-semaphore }() // Release semaphore
d.scanDirectoryRecursive(path, fileChan, totalFiles)
}(entryPath)
} else {
// Process file with lazy loading
wg.Add(1)
go func(path string, name string, entry os.DirEntry) {
defer wg.Done()
semaphore <- struct{}{} // Acquire semaphore
defer func() { <-semaphore }() // Release semaphore
// Extract relative path and key first (no stat call)
rootPath := d.root
rootPath = strings.TrimPrefix(rootPath, "./")
relPath := strings.ReplaceAll(path[len(rootPath)+1:], "\\", "/")
key := d.extractKeyFromPath(relPath)
// Get file info only when needed (lazy loading)
info, err := entry.Info()
if err != nil {
return
}
// Send file info
fileChan <- fileInfo{
path: path,
relPath: relPath,
key: key,
size: info.Size(),
modTime: info.ModTime(),
isDepot: false, // No longer tracking depot files
}
}(entryPath, entry.Name(), entry)
}
}
wg.Wait()
}
// batchProcessFiles processes all files in batches to minimize lock contention
func (d *DiskFS) batchProcessFiles(fileInfos []fileInfo) {
const batchSize = 1000 // Process files in batches
// Sort files by key for consistent ordering
sort.Slice(fileInfos, func(i, j int) bool {
return fileInfos[i].key < fileInfos[j].key
})
// Process in batches with progress reporting
totalBatches := (len(fileInfos) + batchSize - 1) / batchSize
for i := 0; i < len(fileInfos); i += batchSize {
end := i + batchSize
if end > len(fileInfos) {
end = len(fileInfos)
}
// Directory is empty, remove it
if err := os.Remove(dirPath); err != nil {
logger.Logger.Error().Err(err).Str("dir", dirPath).Msg("Failed to remove empty depot directory")
break
batch := fileInfos[i:end]
d.processBatch(batch)
// Log progress every 10 batches
if (i/batchSize+1)%10 == 0 || i+batchSize >= len(fileInfos) {
logger.Logger.Debug().
Int("batch", i/batchSize+1).
Int("total_batches", totalBatches).
Int("files_processed", end).
Int("total_files", len(fileInfos)).
Msg("Batch processing progress")
}
}
}
// processBatch processes a batch of files with a single lock acquisition
func (d *DiskFS) processBatch(batch []fileInfo) {
d.mu.Lock()
defer d.mu.Unlock()
for _, fi := range batch {
// Create FileInfo from batch data
fileInfo := &vfs.FileInfo{
Key: fi.key,
Size: fi.size,
CTime: fi.modTime,
ATime: fi.modTime,
AccessCount: 1,
}
// Move up to parent directory
dirPath = filepath.Dir(dirPath)
// Add to maps
d.info[fi.key] = fileInfo
d.LRU.Add(fi.key, fileInfo)
// Initialize access time
fileInfo.UpdateAccessBatched(d.timeUpdater)
// Update total size
d.size += fi.size
}
}

View File

@@ -4,9 +4,9 @@ package gc
import (
"context"
"io"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/disk"
"s1d3sw1ped/SteamCache2/vfs/memory"
"s1d3sw1ped/steamcache2/vfs"
"s1d3sw1ped/steamcache2/vfs/disk"
"s1d3sw1ped/steamcache2/vfs/memory"
"sync"
"sync/atomic"
"time"

View File

@@ -1,7 +1,7 @@
package memory
import (
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/steamcache2/vfs"
"sync"
"sync/atomic"
"time"

View File

@@ -5,8 +5,8 @@ import (
"bytes"
"container/list"
"io"
"s1d3sw1ped/SteamCache2/vfs/types"
"s1d3sw1ped/SteamCache2/vfs/vfserror"
"s1d3sw1ped/steamcache2/vfs/types"
"s1d3sw1ped/steamcache2/vfs/vfserror"
"sort"
"strings"
"sync"

View File

@@ -3,7 +3,7 @@ package vfs
import (
"io"
"s1d3sw1ped/SteamCache2/vfs/types"
"s1d3sw1ped/steamcache2/vfs/types"
)
// VFS defines the interface for virtual file systems

View File

@@ -2,7 +2,7 @@ package warming
import (
"context"
"s1d3sw1ped/SteamCache2/vfs"
"s1d3sw1ped/steamcache2/vfs"
"sync"
"sync/atomic"
"time"