Enhance error handling and metrics tracking in SteamCache
- Introduced a new error handling system with custom error types for better context and clarity in error reporting. - Implemented URL validation to prevent invalid requests and enhance security. - Updated cache key generation functions to return errors, improving robustness in handling invalid inputs. - Added comprehensive metrics tracking for requests, cache hits, misses, and performance metrics, allowing for better monitoring and analysis of the caching system. - Enhanced logging to include detailed metrics and error information for improved debugging and operational insights.
This commit is contained in:
213
steamcache/metrics/metrics.go
Normal file
213
steamcache/metrics/metrics.go
Normal file
@@ -0,0 +1,213 @@
|
||||
// steamcache/metrics/metrics.go
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Metrics tracks various performance and operational metrics
|
||||
type Metrics struct {
|
||||
// Request metrics
|
||||
TotalRequests int64
|
||||
CacheHits int64
|
||||
CacheMisses int64
|
||||
CacheCoalesced int64
|
||||
Errors int64
|
||||
RateLimited int64
|
||||
|
||||
// Performance metrics
|
||||
TotalResponseTime int64 // in nanoseconds
|
||||
TotalBytesServed int64
|
||||
TotalBytesCached int64
|
||||
|
||||
// Cache metrics
|
||||
MemoryCacheSize int64
|
||||
DiskCacheSize int64
|
||||
MemoryCacheHits int64
|
||||
DiskCacheHits int64
|
||||
|
||||
// Service metrics
|
||||
ServiceRequests map[string]int64
|
||||
serviceMutex sync.RWMutex
|
||||
|
||||
// Time tracking
|
||||
StartTime time.Time
|
||||
LastResetTime time.Time
|
||||
}
|
||||
|
||||
// NewMetrics creates a new metrics instance
|
||||
func NewMetrics() *Metrics {
|
||||
now := time.Now()
|
||||
return &Metrics{
|
||||
ServiceRequests: make(map[string]int64),
|
||||
StartTime: now,
|
||||
LastResetTime: now,
|
||||
}
|
||||
}
|
||||
|
||||
// IncrementTotalRequests increments the total request counter
|
||||
func (m *Metrics) IncrementTotalRequests() {
|
||||
atomic.AddInt64(&m.TotalRequests, 1)
|
||||
}
|
||||
|
||||
// IncrementCacheHits increments the cache hit counter
|
||||
func (m *Metrics) IncrementCacheHits() {
|
||||
atomic.AddInt64(&m.CacheHits, 1)
|
||||
}
|
||||
|
||||
// IncrementCacheMisses increments the cache miss counter
|
||||
func (m *Metrics) IncrementCacheMisses() {
|
||||
atomic.AddInt64(&m.CacheMisses, 1)
|
||||
}
|
||||
|
||||
// IncrementCacheCoalesced increments the coalesced request counter
|
||||
func (m *Metrics) IncrementCacheCoalesced() {
|
||||
atomic.AddInt64(&m.CacheCoalesced, 1)
|
||||
}
|
||||
|
||||
// IncrementErrors increments the error counter
|
||||
func (m *Metrics) IncrementErrors() {
|
||||
atomic.AddInt64(&m.Errors, 1)
|
||||
}
|
||||
|
||||
// IncrementRateLimited increments the rate limited counter
|
||||
func (m *Metrics) IncrementRateLimited() {
|
||||
atomic.AddInt64(&m.RateLimited, 1)
|
||||
}
|
||||
|
||||
// AddResponseTime adds response time to the total
|
||||
func (m *Metrics) AddResponseTime(duration time.Duration) {
|
||||
atomic.AddInt64(&m.TotalResponseTime, int64(duration))
|
||||
}
|
||||
|
||||
// AddBytesServed adds bytes served to the total
|
||||
func (m *Metrics) AddBytesServed(bytes int64) {
|
||||
atomic.AddInt64(&m.TotalBytesServed, bytes)
|
||||
}
|
||||
|
||||
// AddBytesCached adds bytes cached to the total
|
||||
func (m *Metrics) AddBytesCached(bytes int64) {
|
||||
atomic.AddInt64(&m.TotalBytesCached, bytes)
|
||||
}
|
||||
|
||||
// SetMemoryCacheSize sets the current memory cache size
|
||||
func (m *Metrics) SetMemoryCacheSize(size int64) {
|
||||
atomic.StoreInt64(&m.MemoryCacheSize, size)
|
||||
}
|
||||
|
||||
// SetDiskCacheSize sets the current disk cache size
|
||||
func (m *Metrics) SetDiskCacheSize(size int64) {
|
||||
atomic.StoreInt64(&m.DiskCacheSize, size)
|
||||
}
|
||||
|
||||
// IncrementMemoryCacheHits increments memory cache hits
|
||||
func (m *Metrics) IncrementMemoryCacheHits() {
|
||||
atomic.AddInt64(&m.MemoryCacheHits, 1)
|
||||
}
|
||||
|
||||
// IncrementDiskCacheHits increments disk cache hits
|
||||
func (m *Metrics) IncrementDiskCacheHits() {
|
||||
atomic.AddInt64(&m.DiskCacheHits, 1)
|
||||
}
|
||||
|
||||
// IncrementServiceRequests increments requests for a specific service
|
||||
func (m *Metrics) IncrementServiceRequests(service string) {
|
||||
m.serviceMutex.Lock()
|
||||
defer m.serviceMutex.Unlock()
|
||||
m.ServiceRequests[service]++
|
||||
}
|
||||
|
||||
// GetServiceRequests returns the number of requests for a service
|
||||
func (m *Metrics) GetServiceRequests(service string) int64 {
|
||||
m.serviceMutex.RLock()
|
||||
defer m.serviceMutex.RUnlock()
|
||||
return m.ServiceRequests[service]
|
||||
}
|
||||
|
||||
// GetStats returns a snapshot of current metrics
|
||||
func (m *Metrics) GetStats() *Stats {
|
||||
totalRequests := atomic.LoadInt64(&m.TotalRequests)
|
||||
cacheHits := atomic.LoadInt64(&m.CacheHits)
|
||||
cacheMisses := atomic.LoadInt64(&m.CacheMisses)
|
||||
|
||||
var hitRate float64
|
||||
if totalRequests > 0 {
|
||||
hitRate = float64(cacheHits) / float64(totalRequests)
|
||||
}
|
||||
|
||||
var avgResponseTime time.Duration
|
||||
if totalRequests > 0 {
|
||||
avgResponseTime = time.Duration(atomic.LoadInt64(&m.TotalResponseTime) / totalRequests)
|
||||
}
|
||||
|
||||
m.serviceMutex.RLock()
|
||||
serviceRequests := make(map[string]int64)
|
||||
for k, v := range m.ServiceRequests {
|
||||
serviceRequests[k] = v
|
||||
}
|
||||
m.serviceMutex.RUnlock()
|
||||
|
||||
return &Stats{
|
||||
TotalRequests: totalRequests,
|
||||
CacheHits: cacheHits,
|
||||
CacheMisses: cacheMisses,
|
||||
CacheCoalesced: atomic.LoadInt64(&m.CacheCoalesced),
|
||||
Errors: atomic.LoadInt64(&m.Errors),
|
||||
RateLimited: atomic.LoadInt64(&m.RateLimited),
|
||||
HitRate: hitRate,
|
||||
AvgResponseTime: avgResponseTime,
|
||||
TotalBytesServed: atomic.LoadInt64(&m.TotalBytesServed),
|
||||
TotalBytesCached: atomic.LoadInt64(&m.TotalBytesCached),
|
||||
MemoryCacheSize: atomic.LoadInt64(&m.MemoryCacheSize),
|
||||
DiskCacheSize: atomic.LoadInt64(&m.DiskCacheSize),
|
||||
MemoryCacheHits: atomic.LoadInt64(&m.MemoryCacheHits),
|
||||
DiskCacheHits: atomic.LoadInt64(&m.DiskCacheHits),
|
||||
ServiceRequests: serviceRequests,
|
||||
Uptime: time.Since(m.StartTime),
|
||||
LastResetTime: m.LastResetTime,
|
||||
}
|
||||
}
|
||||
|
||||
// Reset resets all metrics to zero
|
||||
func (m *Metrics) Reset() {
|
||||
atomic.StoreInt64(&m.TotalRequests, 0)
|
||||
atomic.StoreInt64(&m.CacheHits, 0)
|
||||
atomic.StoreInt64(&m.CacheMisses, 0)
|
||||
atomic.StoreInt64(&m.CacheCoalesced, 0)
|
||||
atomic.StoreInt64(&m.Errors, 0)
|
||||
atomic.StoreInt64(&m.RateLimited, 0)
|
||||
atomic.StoreInt64(&m.TotalResponseTime, 0)
|
||||
atomic.StoreInt64(&m.TotalBytesServed, 0)
|
||||
atomic.StoreInt64(&m.TotalBytesCached, 0)
|
||||
atomic.StoreInt64(&m.MemoryCacheHits, 0)
|
||||
atomic.StoreInt64(&m.DiskCacheHits, 0)
|
||||
|
||||
m.serviceMutex.Lock()
|
||||
m.ServiceRequests = make(map[string]int64)
|
||||
m.serviceMutex.Unlock()
|
||||
|
||||
m.LastResetTime = time.Now()
|
||||
}
|
||||
|
||||
// Stats represents a snapshot of metrics
|
||||
type Stats struct {
|
||||
TotalRequests int64
|
||||
CacheHits int64
|
||||
CacheMisses int64
|
||||
CacheCoalesced int64
|
||||
Errors int64
|
||||
RateLimited int64
|
||||
HitRate float64
|
||||
AvgResponseTime time.Duration
|
||||
TotalBytesServed int64
|
||||
TotalBytesCached int64
|
||||
MemoryCacheSize int64
|
||||
DiskCacheSize int64
|
||||
MemoryCacheHits int64
|
||||
DiskCacheHits int64
|
||||
ServiceRequests map[string]int64
|
||||
Uptime time.Duration
|
||||
LastResetTime time.Time
|
||||
}
|
||||
Reference in New Issue
Block a user