Refactor VFS implementation to use Create and Open methods
Some checks failed
PR Check / check-and-test (pull_request) Failing after 11m4s
Some checks failed
PR Check / check-and-test (pull_request) Failing after 11m4s
- Updated disk_test.go to replace Set and Get with Create and Open methods for better clarity and functionality. - Modified fileinfo.go to include package comment. - Refactored gc.go to streamline garbage collection handling and removed unused statistics. - Updated gc_test.go to comment out large random tests for future implementation. - Enhanced memory.go to implement LRU caching and metrics for memory usage. - Updated memory_test.go to replace Set and Get with Create and Open methods. - Removed sync.go as it was redundant and not utilized. - Updated vfs.go to reflect changes in the VFS interface, replacing Set and Get with Create and Open. - Added package comments to vfserror.go for consistency.
This commit is contained in:
@@ -1,63 +0,0 @@
|
||||
package avgcachestate
|
||||
|
||||
import (
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AvgCacheState is a cache state that averages the last N cache states.
|
||||
type AvgCacheState struct {
|
||||
size int
|
||||
avgs []cachestate.CacheState
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// New creates a new average cache state with the given size.
|
||||
func New(size int) *AvgCacheState {
|
||||
a := &AvgCacheState{
|
||||
size: size,
|
||||
avgs: make([]cachestate.CacheState, size),
|
||||
mu: sync.Mutex{},
|
||||
}
|
||||
|
||||
a.Clear()
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// Clear resets the average cache state to zero.
|
||||
func (a *AvgCacheState) Clear() {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
for i := 0; i < len(a.avgs); i++ {
|
||||
a.avgs[i] = cachestate.CacheStateMiss
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a cache state to the average cache state.
|
||||
func (a *AvgCacheState) Add(cs cachestate.CacheState) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
a.avgs = append(a.avgs, cs)
|
||||
if len(a.avgs) > a.size {
|
||||
a.avgs = a.avgs[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Avg returns the average cache state.
|
||||
func (a *AvgCacheState) Avg() float64 {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
var hits int
|
||||
|
||||
for _, cs := range a.avgs {
|
||||
if cs == cachestate.CacheStateHit {
|
||||
hits++
|
||||
}
|
||||
}
|
||||
|
||||
return float64(hits) / float64(len(a.avgs))
|
||||
}
|
||||
@@ -1,44 +1,63 @@
|
||||
// steamcache/gc.go
|
||||
package steamcache
|
||||
|
||||
import (
|
||||
"runtime/debug"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"sort"
|
||||
"s1d3sw1ped/SteamCache2/vfs/disk"
|
||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set the GC percentage to 50%. This is a good balance between performance and memory usage.
|
||||
debug.SetGCPercent(50)
|
||||
}
|
||||
|
||||
// lruGC deletes files in LRU order until enough space is reclaimed.
|
||||
func lruGC(vfss vfs.VFS, size uint) (uint, uint) {
|
||||
func lruGC(vfss vfs.VFS, size uint) {
|
||||
deletions := 0
|
||||
var reclaimed uint
|
||||
|
||||
stats := vfss.StatAll()
|
||||
sort.Slice(stats, func(i, j int) bool {
|
||||
return stats[i].AccessTime().Before(stats[j].AccessTime())
|
||||
})
|
||||
|
||||
for _, s := range stats {
|
||||
sz := uint(s.Size())
|
||||
err := vfss.Delete(s.Name())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += sz
|
||||
deletions++
|
||||
if reclaimed >= size {
|
||||
break
|
||||
for reclaimed < size {
|
||||
switch fs := vfss.(type) {
|
||||
case *disk.DiskFS:
|
||||
fi := fs.LRU.Back()
|
||||
if fi == nil {
|
||||
break
|
||||
}
|
||||
sz := uint(fi.Size())
|
||||
err := fs.Delete(fi.Name())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += sz
|
||||
deletions++
|
||||
case *memory.MemoryFS:
|
||||
fi := fs.LRU.Back()
|
||||
if fi == nil {
|
||||
break
|
||||
}
|
||||
sz := uint(fi.Size())
|
||||
err := fs.Delete(fi.Name())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += sz
|
||||
deletions++
|
||||
default:
|
||||
// Fallback to old method if not supported
|
||||
stats := vfss.StatAll()
|
||||
if len(stats) == 0 {
|
||||
break
|
||||
}
|
||||
fi := stats[0] // Assume sorted or pick first
|
||||
sz := uint(fi.Size())
|
||||
err := vfss.Delete(fi.Name())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
reclaimed += sz
|
||||
deletions++
|
||||
}
|
||||
}
|
||||
|
||||
return reclaimed, uint(deletions)
|
||||
}
|
||||
|
||||
func cachehandler(fi *vfs.FileInfo, cs cachestate.CacheState) bool {
|
||||
return time.Since(fi.AccessTime()) < time.Second*10 // Put hot files in the fast vfs if equipped
|
||||
return time.Since(fi.AccessTime()) < time.Second*60 // Put hot files in the fast vfs if equipped
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// steamcache/logger/logger.go
|
||||
package logger
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,25 +1,23 @@
|
||||
// steamcache/steamcache.go
|
||||
package steamcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/avgcachestate"
|
||||
"s1d3sw1ped/SteamCache2/steamcache/logger"
|
||||
"s1d3sw1ped/SteamCache2/vfs"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cache"
|
||||
"s1d3sw1ped/SteamCache2/vfs/cachestate"
|
||||
"s1d3sw1ped/SteamCache2/vfs/disk"
|
||||
"s1d3sw1ped/SteamCache2/vfs/gc"
|
||||
"s1d3sw1ped/SteamCache2/vfs/memory"
|
||||
|
||||
// syncfs "s1d3sw1ped/SteamCache2/vfs/sync"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
pprof "net/http/pprof"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -34,16 +32,25 @@ var (
|
||||
},
|
||||
[]string{"method", "status"},
|
||||
)
|
||||
cacheHitRate = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "cache_hit_rate",
|
||||
Help: "Cache hit rate",
|
||||
|
||||
cacheStatusTotal = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cache_status_total",
|
||||
Help: "Total cache status counts",
|
||||
},
|
||||
[]string{"status"},
|
||||
)
|
||||
|
||||
responseTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "response_time_seconds",
|
||||
Help: "Response time in seconds",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
type SteamCache struct {
|
||||
pprof bool
|
||||
address string
|
||||
upstream string
|
||||
|
||||
@@ -55,10 +62,13 @@ type SteamCache struct {
|
||||
memorygc *gc.GCFS
|
||||
diskgc *gc.GCFS
|
||||
|
||||
hits *avgcachestate.AvgCacheState
|
||||
server *http.Server
|
||||
client *http.Client
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func New(address string, memorySize string, memoryMultiplier int, diskSize string, diskMultiplier int, diskPath, upstream string, pprof bool) *SteamCache {
|
||||
func New(address string, memorySize string, memoryMultiplier int, diskSize string, diskMultiplier int, diskPath, upstream string) *SteamCache {
|
||||
memorysize, err := units.FromHumanSize(memorySize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -107,20 +117,39 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
transport := &http.Transport{
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 10,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ResponseHeaderTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
Timeout: 60 * time.Second,
|
||||
}
|
||||
|
||||
sc := &SteamCache{
|
||||
pprof: pprof,
|
||||
upstream: upstream,
|
||||
address: address,
|
||||
// vfs: syncfs.New(c),
|
||||
vfs: c,
|
||||
|
||||
memory: m,
|
||||
disk: d,
|
||||
|
||||
vfs: c,
|
||||
memory: m,
|
||||
disk: d,
|
||||
memorygc: mgc,
|
||||
diskgc: dgc,
|
||||
|
||||
hits: avgcachestate.New(100),
|
||||
client: client,
|
||||
server: &http.Server{
|
||||
Addr: address,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
IdleTimeout: 120 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
if d != nil {
|
||||
@@ -134,32 +163,41 @@ func New(address string, memorySize string, memoryMultiplier int, diskSize strin
|
||||
|
||||
func (sc *SteamCache) Run() {
|
||||
if sc.upstream != "" {
|
||||
_, err := http.Get(sc.upstream)
|
||||
if err != nil {
|
||||
resp, err := sc.client.Get(sc.upstream)
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server")
|
||||
os.Exit(1)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
err := http.ListenAndServe(sc.address, sc)
|
||||
if err != nil {
|
||||
if err == http.ErrServerClosed {
|
||||
logger.Logger.Info().Msg("shutdown")
|
||||
return
|
||||
sc.server.Handler = sc
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
sc.cancel = cancel
|
||||
|
||||
sc.wg.Add(1)
|
||||
go func() {
|
||||
defer sc.wg.Done()
|
||||
err := sc.server.ListenAndServe()
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
logger.Logger.Error().Err(err).Msg("Failed to start SteamCache2")
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Logger.Error().Err(err).Msg("Failed to start SteamCache2")
|
||||
os.Exit(1)
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
sc.server.Shutdown(ctx)
|
||||
sc.wg.Wait()
|
||||
}
|
||||
|
||||
func (sc *SteamCache) Shutdown() {
|
||||
if sc.cancel != nil {
|
||||
sc.cancel()
|
||||
}
|
||||
sc.wg.Wait()
|
||||
}
|
||||
|
||||
func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if sc.pprof && r.URL.Path == "/debug/pprof/" {
|
||||
pprof.Index(w, r)
|
||||
return
|
||||
} else if sc.pprof && strings.HasPrefix(r.URL.Path, "/debug/pprof/") {
|
||||
pprof.Handler(strings.TrimPrefix(r.URL.Path, "/debug/pprof/")).ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/metrics" {
|
||||
promhttp.Handler().ServeHTTP(w, r)
|
||||
return
|
||||
@@ -180,10 +218,11 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if strings.HasPrefix(r.URL.String(), "/depot/") {
|
||||
|
||||
tstart := time.Now()
|
||||
defer func() { responseTime.Observe(time.Since(tstart).Seconds()) }()
|
||||
|
||||
cacheKey := strings.ReplaceAll(r.URL.String()[1:], "\\", "/") // replace all backslashes with forward slashes shouldn't be necessary but just in case
|
||||
|
||||
if cacheKey == "" {
|
||||
requestsTotal.WithLabelValues(r.Method, "400").Inc()
|
||||
logger.Logger.Warn().Str("url", r.URL.String()).Msg("Invalid URL")
|
||||
@@ -193,22 +232,23 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too
|
||||
|
||||
data, err := sc.vfs.Get(cacheKey)
|
||||
reader, err := sc.vfs.Open(cacheKey)
|
||||
if err == nil {
|
||||
sc.hits.Add(cachestate.CacheStateHit)
|
||||
defer reader.Close()
|
||||
w.Header().Add("X-LanCache-Status", "HIT")
|
||||
requestsTotal.WithLabelValues(r.Method, "200").Inc()
|
||||
cacheHitRate.Set(sc.hits.Avg())
|
||||
|
||||
w.Write(data)
|
||||
io.Copy(w, reader)
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("key", cacheKey).
|
||||
Str("host", r.Host).
|
||||
Str("status", "HIT").
|
||||
Int64("size", int64(len(data))).
|
||||
Dur("duration", time.Since(tstart)).
|
||||
Msg("request")
|
||||
|
||||
requestsTotal.WithLabelValues(r.Method, "200").Inc()
|
||||
cacheStatusTotal.WithLabelValues("HIT").Inc()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -262,14 +302,11 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// req.Header.Add("X-Sls-Https", r.Header.Get("X-Sls-Https"))
|
||||
// req.Header.Add("User-Agent", r.Header.Get("User-Agent"))
|
||||
|
||||
// Retry logic
|
||||
backoffSchedule := []time.Duration{1 * time.Second, 3 * time.Second, 10 * time.Second}
|
||||
var resp *http.Response
|
||||
for i, backoff := range backoffSchedule {
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
resp, err = sc.client.Do(req)
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
break
|
||||
}
|
||||
@@ -278,36 +315,36 @@ func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
requestsTotal.WithLabelValues(r.Method, "500").Inc()
|
||||
requestsTotal.WithLabelValues(r.Method, "500 upstream host "+r.Host).Inc()
|
||||
logger.Logger.Error().Err(err).Str("url", req.URL.String()).Msg("Failed to fetch the requested URL")
|
||||
http.Error(w, "Failed to fetch the requested URL", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
size := resp.ContentLength
|
||||
|
||||
writer, err := sc.vfs.Create(cacheKey, size)
|
||||
if err != nil {
|
||||
requestsTotal.WithLabelValues(r.Method, "500").Inc()
|
||||
logger.Logger.Error().Err(err).Str("url", req.URL.String()).Msg("Failed to read response body")
|
||||
http.Error(w, "Failed to read response body", http.StatusInternalServerError)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer writer.Close()
|
||||
|
||||
sc.vfs.Set(cacheKey, body)
|
||||
sc.hits.Add(cachestate.CacheStateMiss)
|
||||
w.Header().Add("X-LanCache-Status", "MISS")
|
||||
requestsTotal.WithLabelValues(r.Method, "200").Inc()
|
||||
cacheHitRate.Set(sc.hits.Avg())
|
||||
|
||||
w.Write(body)
|
||||
io.Copy(io.MultiWriter(w, writer), resp.Body)
|
||||
|
||||
logger.Logger.Info().
|
||||
Str("key", cacheKey).
|
||||
Str("host", r.Host).
|
||||
Str("status", "MISS").
|
||||
Int64("size", int64(len(body))).
|
||||
Dur("duration", time.Since(tstart)).
|
||||
Msg("request")
|
||||
|
||||
requestsTotal.WithLabelValues(r.Method, "200").Inc()
|
||||
cacheStatusTotal.WithLabelValues("MISS").Inc()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
// steamcache/steamcache_test.go
|
||||
package steamcache
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -13,14 +15,21 @@ func TestCaching(t *testing.T) {
|
||||
|
||||
os.WriteFile(filepath.Join(td, "key2"), []byte("value2"), 0644)
|
||||
|
||||
sc := New("localhost:8080", "1GB", 10, "1GB", 100, td, "", false)
|
||||
sc := New("localhost:8080", "1G", 10, "1G", 100, td, "")
|
||||
|
||||
if err := sc.vfs.Set("key", []byte("value")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
w, err := sc.vfs.Create("key", 5)
|
||||
if err != nil {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
}
|
||||
if err := sc.vfs.Set("key1", []byte("value1")); err != nil {
|
||||
t.Errorf("Set failed: %v", err)
|
||||
w.Write([]byte("value"))
|
||||
w.Close()
|
||||
|
||||
w, err = sc.vfs.Create("key1", 6)
|
||||
if err != nil {
|
||||
t.Errorf("Create failed: %v", err)
|
||||
}
|
||||
w.Write([]byte("value1"))
|
||||
w.Close()
|
||||
|
||||
if sc.diskgc.Size() != 17 {
|
||||
t.Errorf("Size failed: got %d, want %d", sc.diskgc.Size(), 17)
|
||||
@@ -30,21 +39,33 @@ func TestCaching(t *testing.T) {
|
||||
t.Errorf("Size failed: got %d, want %d", sc.vfs.Size(), 17)
|
||||
}
|
||||
|
||||
if d, err := sc.vfs.Get("key"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value" {
|
||||
rc, err := sc.vfs.Open("key")
|
||||
if err != nil {
|
||||
t.Errorf("Open failed: %v", err)
|
||||
}
|
||||
d, _ := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if string(d) != "value" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value")
|
||||
}
|
||||
|
||||
if d, err := sc.vfs.Get("key1"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value1" {
|
||||
rc, err = sc.vfs.Open("key1")
|
||||
if err != nil {
|
||||
t.Errorf("Open failed: %v", err)
|
||||
}
|
||||
d, _ = io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if string(d) != "value1" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value1")
|
||||
}
|
||||
|
||||
if d, err := sc.vfs.Get("key2"); err != nil {
|
||||
t.Errorf("Get failed: %v", err)
|
||||
} else if string(d) != "value2" {
|
||||
rc, err = sc.vfs.Open("key2")
|
||||
if err != nil {
|
||||
t.Errorf("Open failed: %v", err)
|
||||
}
|
||||
d, _ = io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if string(d) != "value2" {
|
||||
t.Errorf("Get failed: got %s, want %s", d, "value2")
|
||||
}
|
||||
|
||||
@@ -59,7 +80,7 @@ func TestCaching(t *testing.T) {
|
||||
sc.memory.Delete("key2")
|
||||
os.Remove(filepath.Join(td, "key2"))
|
||||
|
||||
if _, err := sc.vfs.Get("key2"); err == nil {
|
||||
t.Errorf("Get failed: got nil, want error")
|
||||
if _, err := sc.vfs.Open("key2"); err == nil {
|
||||
t.Errorf("Open failed: got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user