package steamcache import ( "io" "net/http" "net/url" "os" "s1d3sw1ped/SteamCache2/steamcache/avgcachestate" "s1d3sw1ped/SteamCache2/steamcache/logger" "s1d3sw1ped/SteamCache2/vfs" "s1d3sw1ped/SteamCache2/vfs/cache" "s1d3sw1ped/SteamCache2/vfs/cachestate" "s1d3sw1ped/SteamCache2/vfs/disk" "s1d3sw1ped/SteamCache2/vfs/gc" "s1d3sw1ped/SteamCache2/vfs/memory" // syncfs "s1d3sw1ped/SteamCache2/vfs/sync" "strings" "time" pprof "net/http/pprof" "github.com/docker/go-units" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promhttp" ) var ( requestsTotal = promauto.NewCounterVec( prometheus.CounterOpts{ Name: "http_requests_total", Help: "Total number of HTTP requests", }, []string{"method", "status"}, ) cacheHitRate = promauto.NewGauge( prometheus.GaugeOpts{ Name: "cache_hit_rate", Help: "Cache hit rate", }, ) ) type SteamCache struct { pprof bool address string upstream string vfs vfs.VFS memory *memory.MemoryFS disk *disk.DiskFS memorygc *gc.GCFS diskgc *gc.GCFS hits *avgcachestate.AvgCacheState } func New(address string, memorySize string, memoryMultiplier int, diskSize string, diskMultiplier int, diskPath, upstream string, pprof bool) *SteamCache { memorysize, err := units.FromHumanSize(memorySize) if err != nil { panic(err) } disksize, err := units.FromHumanSize(diskSize) if err != nil { panic(err) } c := cache.New( cachehandler, ) var m *memory.MemoryFS var mgc *gc.GCFS if memorysize > 0 { m = memory.New(memorysize) mgc = gc.New(m, memoryMultiplier, lruGC) } var d *disk.DiskFS var dgc *gc.GCFS if disksize > 0 { d = disk.New(diskPath, disksize) dgc = gc.New(d, diskMultiplier, lruGC) } // configure the cache to match the specified mode (memory only, disk only, or memory and disk) based on the provided sizes if disksize == 0 && memorysize != 0 { //memory only mode - no disk c.SetSlow(mgc) } else if disksize != 0 && memorysize == 0 { // disk only mode c.SetSlow(dgc) } else if disksize != 0 && memorysize != 0 { // memory and disk mode c.SetFast(mgc) c.SetSlow(dgc) } else { // no memory or disk isn't a valid configuration logger.Logger.Error().Bool("memory", false).Bool("disk", false).Msg("configuration invalid :( exiting") os.Exit(1) } sc := &SteamCache{ pprof: pprof, upstream: upstream, address: address, // vfs: syncfs.New(c), vfs: c, memory: m, disk: d, memorygc: mgc, diskgc: dgc, hits: avgcachestate.New(100), } if d != nil { if d.Size() > d.Capacity() { lruGC(d, uint(d.Size()-d.Capacity())) } } return sc } func (sc *SteamCache) Run() { if sc.upstream != "" { _, err := http.Get(sc.upstream) if err != nil { logger.Logger.Error().Err(err).Str("upstream", sc.upstream).Msg("Failed to connect to upstream server") os.Exit(1) } } err := http.ListenAndServe(sc.address, sc) if err != nil { if err == http.ErrServerClosed { logger.Logger.Info().Msg("shutdown") return } logger.Logger.Error().Err(err).Msg("Failed to start SteamCache2") os.Exit(1) } } func (sc *SteamCache) ServeHTTP(w http.ResponseWriter, r *http.Request) { if sc.pprof && r.URL.Path == "/debug/pprof/" { pprof.Index(w, r) return } else if sc.pprof && strings.HasPrefix(r.URL.Path, "/debug/pprof/") { pprof.Handler(strings.TrimPrefix(r.URL.Path, "/debug/pprof/")).ServeHTTP(w, r) return } if r.URL.Path == "/metrics" { promhttp.Handler().ServeHTTP(w, r) return } if r.Method != http.MethodGet { requestsTotal.WithLabelValues(r.Method, "405").Inc() http.Error(w, "Only GET method is supported", http.StatusMethodNotAllowed) return } if r.URL.String() == "/lancache-heartbeat" { w.Header().Add("X-LanCache-Processed-By", "SteamCache2") w.WriteHeader(http.StatusNoContent) w.Write(nil) return } tstart := time.Now() cacheKey := strings.ReplaceAll(r.URL.String()[1:], "\\", "/") // replace all backslashes with forward slashes shouldn't be necessary but just in case if cacheKey == "" { requestsTotal.WithLabelValues(r.Method, "400").Inc() http.Error(w, "Invalid URL", http.StatusBadRequest) return } w.Header().Add("X-LanCache-Processed-By", "SteamCache2") // SteamPrefill uses this header to determine if the request was processed by the cache maybe steam uses it too data, err := sc.vfs.Get(cacheKey) if err == nil { sc.hits.Add(cachestate.CacheStateHit) w.Header().Add("X-LanCache-Status", "HIT") requestsTotal.WithLabelValues(r.Method, "200").Inc() cacheHitRate.Set(sc.hits.Avg()) w.Write(data) logger.Logger.Info(). Str("key", cacheKey). Str("host", r.Host). Str("status", "HIT"). Int64("size", int64(len(data))). Dur("duration", time.Since(tstart)). Msg("request") return } var req *http.Request if sc.upstream != "" { // if an upstream server is configured, proxy the request to the upstream server ur, err := url.JoinPath(sc.upstream, r.URL.String()) if err != nil { requestsTotal.WithLabelValues(r.Method, "500").Inc() http.Error(w, "Failed to join URL path", http.StatusInternalServerError) return } req, err = http.NewRequest(http.MethodGet, ur, nil) if err != nil { requestsTotal.WithLabelValues(r.Method, "500").Inc() http.Error(w, "Failed to create request", http.StatusInternalServerError) return } req.Host = r.Host } else { // if no upstream server is configured, proxy the request to the host specified in the request host := r.Host if r.Header.Get("X-Sls-Https") == "enable" { host = "https://" + host } else { host = "http://" + host } ur, err := url.JoinPath(host, r.URL.String()) if err != nil { requestsTotal.WithLabelValues(r.Method, "500").Inc() http.Error(w, "Failed to join URL path", http.StatusInternalServerError) return } req, err = http.NewRequest(http.MethodGet, ur, nil) if err != nil { requestsTotal.WithLabelValues(r.Method, "500").Inc() http.Error(w, "Failed to create request", http.StatusInternalServerError) return } } // Copy headers from the original request to the new request for key, values := range r.Header { for _, value := range values { req.Header.Add(key, value) } } // req.Header.Add("X-Sls-Https", r.Header.Get("X-Sls-Https")) // req.Header.Add("User-Agent", r.Header.Get("User-Agent")) // Retry logic backoffSchedule := []time.Duration{1 * time.Second, 3 * time.Second, 10 * time.Second} var resp *http.Response for i, backoff := range backoffSchedule { resp, err = http.DefaultClient.Do(req) if err == nil && resp.StatusCode == http.StatusOK { break } if i < len(backoffSchedule)-1 { time.Sleep(backoff) } } if err != nil || resp.StatusCode != http.StatusOK { requestsTotal.WithLabelValues(r.Method, "500").Inc() http.Error(w, "Failed to fetch the requested URL", http.StatusInternalServerError) return } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { requestsTotal.WithLabelValues(r.Method, "500").Inc() http.Error(w, "Failed to read response body", http.StatusInternalServerError) return } sc.vfs.Set(cacheKey, body) sc.hits.Add(cachestate.CacheStateMiss) w.Header().Add("X-LanCache-Status", "MISS") requestsTotal.WithLabelValues(r.Method, "200").Inc() cacheHitRate.Set(sc.hits.Avg()) w.Write(body) logger.Logger.Info(). Str("key", cacheKey). Str("host", r.Host). Str("status", "MISS"). Int64("size", int64(len(body))). Dur("duration", time.Since(tstart)). Msg("request") }