2 Commits

Author SHA1 Message Date
9b2affe95a Refactor disk initialization and file processing in DiskFS
All checks were successful
Release Tag / release (push) Successful in 9s
- Replaced legacy depot file migration logic with concurrent directory scanning for improved performance.
- Introduced batch processing of files to minimize lock contention during initialization.
- Simplified the init function by removing unnecessary complexity and focusing on efficient file handling.
- Enhanced logging to provide better insights into directory scan progress and completion.
2025-09-22 00:51:51 -05:00
bd123bc63a Refactor module naming and update references to steamcache2
All checks were successful
Release Tag / release (push) Successful in 9s
- Changed module name from `s1d3sw1ped/SteamCache2` to `s1d3sw1ped/steamcache2` for consistency.
- Updated all import paths and references throughout the codebase to reflect the new module name.
- Adjusted README and Makefile to use the updated module name, ensuring clarity in usage instructions.
2025-09-21 23:10:21 -05:00
15 changed files with 238 additions and 147 deletions

View File

@@ -11,8 +11,8 @@ builds:
- -s - -s
- -w - -w
- -extldflags "-static" - -extldflags "-static"
- -X s1d3sw1ped/SteamCache2/version.Version={{.Version}} - -X s1d3sw1ped/steamcache2/version.Version={{.Version}}
- -X s1d3sw1ped/SteamCache2/version.Date={{.Date}} - -X s1d3sw1ped/steamcache2/version.Date={{.Date}}
env: env:
- CGO_ENABLED=0 - CGO_ENABLED=0
goos: goos:

View File

@@ -13,7 +13,7 @@ build-snapshot-single: deps test ## Build a snapshot of the application for the
@goreleaser build --single-target --snapshot --clean @goreleaser build --single-target --snapshot --clean
help: ## Show this help message help: ## Show this help message
@echo SteamCache2 Makefile @echo steamcache2 Makefile
@echo Available targets: @echo Available targets:
@echo run Run the application @echo run Run the application
@echo run-debug Run the application with debug logging @echo run-debug Run the application with debug logging

View File

@@ -21,7 +21,7 @@ SteamCache2 is a blazing fast download cache for Steam, designed to reduce bandw
1. **Clone and build:** 1. **Clone and build:**
```bash ```bash
git clone <repository-url> git clone <repository-url>
cd SteamCache2 cd steamcache2
make # This will run tests and build the application make # This will run tests and build the application
``` ```

View File

@@ -4,10 +4,10 @@ package cmd
import ( import (
"fmt" "fmt"
"os" "os"
"s1d3sw1ped/SteamCache2/config" "s1d3sw1ped/steamcache2/config"
"s1d3sw1ped/SteamCache2/steamcache" "s1d3sw1ped/steamcache2/steamcache"
"s1d3sw1ped/SteamCache2/steamcache/logger" "s1d3sw1ped/steamcache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/version" "s1d3sw1ped/steamcache2/version"
"strings" "strings"
"github.com/rs/zerolog" "github.com/rs/zerolog"
@@ -25,9 +25,9 @@ var (
) )
var rootCmd = &cobra.Command{ var rootCmd = &cobra.Command{
Use: "SteamCache2", Use: "steamcache2",
Short: "SteamCache2 is a caching solution for Steam game updates and installations", Short: "steamcache2 is a caching solution for Steam game updates and installations",
Long: `SteamCache2 is a caching solution designed to optimize the delivery of Steam game updates and installations. Long: `steamcache2 is a caching solution designed to optimize the delivery of Steam game updates and installations.
It reduces bandwidth usage and speeds up the download process by caching game files locally. It reduces bandwidth usage and speeds up the download process by caching game files locally.
This tool is particularly useful for environments with multiple Steam users, such as gaming cafes or households with multiple gamers. This tool is particularly useful for environments with multiple Steam users, such as gaming cafes or households with multiple gamers.
By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache, By caching game files, SteamCache2 ensures that subsequent downloads of the same files are served from the local cache,
@@ -53,7 +53,7 @@ var rootCmd = &cobra.Command{
logger.Logger = zerolog.New(writer).With().Timestamp().Logger() logger.Logger = zerolog.New(writer).With().Timestamp().Logger()
logger.Logger.Info(). logger.Logger.Info().
Msg("SteamCache2 " + version.Version + " " + version.Date + " starting...") Msg("steamcache2 " + version.Version + " " + version.Date + " starting...")
// Load configuration // Load configuration
cfg, err := config.LoadConfig(configPath) cfg, err := config.LoadConfig(configPath)
@@ -121,11 +121,11 @@ var rootCmd = &cobra.Command{
) )
logger.Logger.Info(). logger.Logger.Info().
Msg("SteamCache2 " + version.Version + " started on " + cfg.ListenAddress) Msg("steamcache2 " + version.Version + " started on " + cfg.ListenAddress)
sc.Run() sc.Run()
logger.Logger.Info().Msg("SteamCache2 stopped") logger.Logger.Info().Msg("steamcache2 stopped")
os.Exit(0) os.Exit(0)
}, },
} }

View File

@@ -4,7 +4,7 @@ package cmd
import ( import (
"fmt" "fmt"
"os" "os"
"s1d3sw1ped/SteamCache2/version" "s1d3sw1ped/steamcache2/version"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -12,10 +12,10 @@ import (
// versionCmd represents the version command // versionCmd represents the version command
var versionCmd = &cobra.Command{ var versionCmd = &cobra.Command{
Use: "version", Use: "version",
Short: "prints the version of SteamCache2", Short: "prints the version of steamcache2",
Long: `Prints the version of SteamCache2. This command is useful for checking the version of the application.`, Long: `Prints the version of steamcache2. This command is useful for checking the version of the application.`,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
fmt.Fprintln(os.Stderr, "SteamCache2", version.Version, version.Date) fmt.Fprintln(os.Stderr, "steamcache2", version.Version, version.Date)
}, },
} }

2
go.mod
View File

@@ -1,4 +1,4 @@
module s1d3sw1ped/SteamCache2 module s1d3sw1ped/steamcache2
go 1.23.0 go 1.23.0

View File

@@ -2,8 +2,8 @@
package main package main
import ( import (
"s1d3sw1ped/SteamCache2/cmd" "s1d3sw1ped/steamcache2/cmd"
_ "s1d3sw1ped/SteamCache2/version" // Import the version package for global version variable _ "s1d3sw1ped/steamcache2/version" // Import the version package for global version variable
) )
func main() { func main() {

View File

@@ -13,15 +13,15 @@ import (
"net/url" "net/url"
"os" "os"
"regexp" "regexp"
"s1d3sw1ped/SteamCache2/steamcache/logger" "s1d3sw1ped/steamcache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs" "s1d3sw1ped/steamcache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/adaptive" "s1d3sw1ped/steamcache2/vfs/adaptive"
"s1d3sw1ped/SteamCache2/vfs/cache" "s1d3sw1ped/steamcache2/vfs/cache"
"s1d3sw1ped/SteamCache2/vfs/disk" "s1d3sw1ped/steamcache2/vfs/disk"
"s1d3sw1ped/SteamCache2/vfs/gc" "s1d3sw1ped/steamcache2/vfs/gc"
"s1d3sw1ped/SteamCache2/vfs/memory" "s1d3sw1ped/steamcache2/vfs/memory"
"s1d3sw1ped/SteamCache2/vfs/predictive" "s1d3sw1ped/steamcache2/vfs/predictive"
"s1d3sw1ped/SteamCache2/vfs/warming" "s1d3sw1ped/steamcache2/vfs/warming"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"

4
vfs/cache/cache.go vendored
View File

@@ -3,8 +3,8 @@ package cache
import ( import (
"io" "io"
"s1d3sw1ped/SteamCache2/vfs" "s1d3sw1ped/steamcache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/vfserror" "s1d3sw1ped/steamcache2/vfs/vfserror"
"sync" "sync"
"sync/atomic" "sync/atomic"
) )

View File

@@ -7,12 +7,13 @@ import (
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"s1d3sw1ped/SteamCache2/steamcache/logger" "s1d3sw1ped/steamcache2/steamcache/logger"
"s1d3sw1ped/SteamCache2/vfs" "s1d3sw1ped/steamcache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/vfserror" "s1d3sw1ped/steamcache2/vfs/vfserror"
"sort" "sort"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/docker/go-units" "github.com/docker/go-units"
@@ -167,58 +168,15 @@ func New(root string, capacity int64) *DiskFS {
return d return d
} }
// init loads existing files from disk and migrates legacy depot files to sharded structure // init loads existing files from disk
func (d *DiskFS) init() { func (d *DiskFS) init() {
tstart := time.Now() tstart := time.Now()
var depotFiles []string // Track depot files that need migration // Use concurrent directory scanning for blazing fast initialization
fileInfos := d.scanDirectoryConcurrently()
err := filepath.Walk(d.root, func(npath string, info os.FileInfo, err error) error { // Batch process all files to minimize lock contention
if err != nil { d.batchProcessFiles(fileInfos)
return err
}
if info.IsDir() {
return nil
}
d.mu.Lock()
// Extract key from sharded path: remove root and convert sharding back
// Handle both "./disk" and "disk" root paths
rootPath := d.root
if strings.HasPrefix(rootPath, "./") {
rootPath = rootPath[2:] // Remove "./" prefix
}
relPath := strings.ReplaceAll(npath[len(rootPath)+1:], "\\", "/")
// Extract the original key from the sharded path
k := d.extractKeyFromPath(relPath)
fi := vfs.NewFileInfoFromOS(info, k)
d.info[k] = fi
d.LRU.Add(k, fi)
// Initialize access time with file modification time
fi.UpdateAccessBatched(d.timeUpdater)
d.size += info.Size()
// Track depot files for potential migration
if strings.HasPrefix(relPath, "depot/") {
depotFiles = append(depotFiles, relPath)
}
d.mu.Unlock()
return nil
})
if err != nil {
logger.Logger.Error().Err(err).Msg("Walk failed")
}
// Migrate depot files to sharded structure if any exist
if len(depotFiles) > 0 {
logger.Logger.Info().Int("count", len(depotFiles)).Msg("Found legacy depot files, starting migration")
d.migrateDepotFiles(depotFiles)
}
logger.Logger.Info(). logger.Logger.Info().
Str("name", d.Name()). Str("name", d.Name()).
@@ -230,68 +188,201 @@ func (d *DiskFS) init() {
Msg("init") Msg("init")
} }
// migrateDepotFiles moves legacy depot files to the sharded steam structure // fileInfo represents a file found during directory scanning
func (d *DiskFS) migrateDepotFiles(depotFiles []string) { type fileInfo struct {
migratedCount := 0 path string
errorCount := 0 relPath string
key string
for _, relPath := range depotFiles { size int64
// Extract the steam key from the depot path modTime time.Time
steamKey := d.extractKeyFromPath(relPath) isDepot bool
if !strings.HasPrefix(steamKey, "steam/") {
// Skip if we can't extract a proper steam key
errorCount++
continue
} }
// Get the source and destination paths // scanDirectoryConcurrently performs fast concurrent directory scanning
sourcePath := filepath.Join(d.root, relPath) func (d *DiskFS) scanDirectoryConcurrently() []fileInfo {
shardedPath := d.shardPath(steamKey) // Channel for collecting file information
destPath := filepath.Join(d.root, shardedPath) fileChan := make(chan fileInfo, 1000)
// Create destination directory // Progress tracking
destDir := filepath.Dir(destPath) var totalFiles int64
if err := os.MkdirAll(destDir, 0755); err != nil { var processedFiles int64
logger.Logger.Error().Err(err).Str("path", destDir).Msg("Failed to create migration destination directory") progressTicker := time.NewTicker(500 * time.Millisecond)
errorCount++ defer progressTicker.Stop()
continue
// Wait group for workers
var wg sync.WaitGroup
// Start directory scanner
wg.Add(1)
go func() {
defer wg.Done()
defer close(fileChan)
d.scanDirectoryRecursive(d.root, fileChan, &totalFiles)
}()
// Collect results with progress reporting
var fileInfos []fileInfo
// Use a separate goroutine to collect results
done := make(chan struct{})
go func() {
defer close(done)
for {
select {
case fi, ok := <-fileChan:
if !ok {
return
} }
fileInfos = append(fileInfos, fi)
// Move the file processedFiles++
if err := os.Rename(sourcePath, destPath); err != nil { case <-progressTicker.C:
logger.Logger.Error().Err(err).Str("from", sourcePath).Str("to", destPath).Msg("Failed to migrate depot file") if totalFiles > 0 {
errorCount++ logger.Logger.Debug().
continue Int64("processed", processedFiles).
Int64("total", totalFiles).
Float64("progress", float64(processedFiles)/float64(totalFiles)*100).
Msg("Directory scan progress")
} }
migratedCount++
// Clean up empty depot directories (this is a simple cleanup, may not handle all cases)
d.cleanupEmptyDepotDirs(filepath.Dir(sourcePath))
} }
}
}()
// Wait for scanning to complete
wg.Wait()
<-done
logger.Logger.Info(). logger.Logger.Info().
Int("migrated", migratedCount). Int64("files_scanned", processedFiles).
Int("errors", errorCount). Msg("Directory scan completed")
Msg("Depot file migration completed")
return fileInfos
} }
// cleanupEmptyDepotDirs removes empty depot directories after migration // scanDirectoryRecursive performs recursive directory scanning with early termination
func (d *DiskFS) cleanupEmptyDepotDirs(dirPath string) { func (d *DiskFS) scanDirectoryRecursive(dirPath string, fileChan chan<- fileInfo, totalFiles *int64) {
for dirPath != d.root && strings.HasPrefix(dirPath, filepath.Join(d.root, "depot")) { // Use ReadDir for faster directory listing (no stat calls)
entries, err := os.ReadDir(dirPath) entries, err := os.ReadDir(dirPath)
if err != nil || len(entries) > 0 { if err != nil {
break return
} }
// Directory is empty, remove it // Count files first for progress tracking
if err := os.Remove(dirPath); err != nil { fileCount := 0
logger.Logger.Error().Err(err).Str("dir", dirPath).Msg("Failed to remove empty depot directory") for _, entry := range entries {
break if !entry.IsDir() {
fileCount++
}
}
atomic.AddInt64(totalFiles, int64(fileCount))
// Process entries concurrently with limited workers
semaphore := make(chan struct{}, 8) // Limit concurrent processing
var wg sync.WaitGroup
for _, entry := range entries {
entryPath := filepath.Join(dirPath, entry.Name())
if entry.IsDir() {
// Recursively scan subdirectories
wg.Add(1)
go func(path string) {
defer wg.Done()
semaphore <- struct{}{} // Acquire semaphore
defer func() { <-semaphore }() // Release semaphore
d.scanDirectoryRecursive(path, fileChan, totalFiles)
}(entryPath)
} else {
// Process file with lazy loading
wg.Add(1)
go func(path string, name string, entry os.DirEntry) {
defer wg.Done()
semaphore <- struct{}{} // Acquire semaphore
defer func() { <-semaphore }() // Release semaphore
// Extract relative path and key first (no stat call)
rootPath := d.root
rootPath = strings.TrimPrefix(rootPath, "./")
relPath := strings.ReplaceAll(path[len(rootPath)+1:], "\\", "/")
key := d.extractKeyFromPath(relPath)
// Get file info only when needed (lazy loading)
info, err := entry.Info()
if err != nil {
return
} }
// Move up to parent directory // Send file info
dirPath = filepath.Dir(dirPath) fileChan <- fileInfo{
path: path,
relPath: relPath,
key: key,
size: info.Size(),
modTime: info.ModTime(),
isDepot: false, // No longer tracking depot files
}
}(entryPath, entry.Name(), entry)
}
}
wg.Wait()
}
// batchProcessFiles processes all files in batches to minimize lock contention
func (d *DiskFS) batchProcessFiles(fileInfos []fileInfo) {
const batchSize = 1000 // Process files in batches
// Sort files by key for consistent ordering
sort.Slice(fileInfos, func(i, j int) bool {
return fileInfos[i].key < fileInfos[j].key
})
// Process in batches with progress reporting
totalBatches := (len(fileInfos) + batchSize - 1) / batchSize
for i := 0; i < len(fileInfos); i += batchSize {
end := i + batchSize
if end > len(fileInfos) {
end = len(fileInfos)
}
batch := fileInfos[i:end]
d.processBatch(batch)
// Log progress every 10 batches
if (i/batchSize+1)%10 == 0 || i+batchSize >= len(fileInfos) {
logger.Logger.Debug().
Int("batch", i/batchSize+1).
Int("total_batches", totalBatches).
Int("files_processed", end).
Int("total_files", len(fileInfos)).
Msg("Batch processing progress")
}
}
}
// processBatch processes a batch of files with a single lock acquisition
func (d *DiskFS) processBatch(batch []fileInfo) {
d.mu.Lock()
defer d.mu.Unlock()
for _, fi := range batch {
// Create FileInfo from batch data
fileInfo := &vfs.FileInfo{
Key: fi.key,
Size: fi.size,
CTime: fi.modTime,
ATime: fi.modTime,
AccessCount: 1,
}
// Add to maps
d.info[fi.key] = fileInfo
d.LRU.Add(fi.key, fileInfo)
// Initialize access time
fileInfo.UpdateAccessBatched(d.timeUpdater)
// Update total size
d.size += fi.size
} }
} }

View File

@@ -4,9 +4,9 @@ package gc
import ( import (
"context" "context"
"io" "io"
"s1d3sw1ped/SteamCache2/vfs" "s1d3sw1ped/steamcache2/vfs"
"s1d3sw1ped/SteamCache2/vfs/disk" "s1d3sw1ped/steamcache2/vfs/disk"
"s1d3sw1ped/SteamCache2/vfs/memory" "s1d3sw1ped/steamcache2/vfs/memory"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"

View File

@@ -1,7 +1,7 @@
package memory package memory
import ( import (
"s1d3sw1ped/SteamCache2/vfs" "s1d3sw1ped/steamcache2/vfs"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"

View File

@@ -5,8 +5,8 @@ import (
"bytes" "bytes"
"container/list" "container/list"
"io" "io"
"s1d3sw1ped/SteamCache2/vfs/types" "s1d3sw1ped/steamcache2/vfs/types"
"s1d3sw1ped/SteamCache2/vfs/vfserror" "s1d3sw1ped/steamcache2/vfs/vfserror"
"sort" "sort"
"strings" "strings"
"sync" "sync"

View File

@@ -3,7 +3,7 @@ package vfs
import ( import (
"io" "io"
"s1d3sw1ped/SteamCache2/vfs/types" "s1d3sw1ped/steamcache2/vfs/types"
) )
// VFS defines the interface for virtual file systems // VFS defines the interface for virtual file systems

View File

@@ -2,7 +2,7 @@ package warming
import ( import (
"context" "context"
"s1d3sw1ped/SteamCache2/vfs" "s1d3sw1ped/steamcache2/vfs"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"