Update .gitignore to include log files and database journal files. Modify go.mod to update dependencies for go-sqlite3 and cloud.google.com/go/compute/metadata. Enhance Makefile to include logging options for manager and runner commands. Introduce new job token handling in auth package and implement database migration scripts. Refactor manager and runner components to improve job processing and metadata extraction. Add support for video preview in frontend components and enhance WebSocket management for channel subscriptions.

This commit is contained in:
2026-01-02 13:55:19 -06:00
parent edc8ea160c
commit 94490237fe
44 changed files with 9463 additions and 7875 deletions

View File

@@ -2,26 +2,44 @@ package database
import (
"database/sql"
"embed"
"fmt"
"io/fs"
"log"
"sync"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/sqlite3"
"github.com/golang-migrate/migrate/v4/source/iofs"
_ "github.com/mattn/go-sqlite3"
)
// DB wraps the database connection with mutex protection
//go:embed migrations/*.sql
var migrationsFS embed.FS
// DB wraps the database connection
// Note: No mutex needed - we only have one connection per process and SQLite with WAL mode
// handles concurrent access safely
type DB struct {
db *sql.DB
mu sync.Mutex
db *sql.DB
}
// NewDB creates a new database connection
func NewDB(dbPath string) (*DB, error) {
db, err := sql.Open("sqlite3", dbPath)
// Use WAL mode for better concurrency (allows readers and writers simultaneously)
// Add timeout and busy handler for better concurrent access
db, err := sql.Open("sqlite3", dbPath+"?_journal_mode=WAL&_busy_timeout=5000")
if err != nil {
return nil, fmt.Errorf("failed to open database: %w", err)
}
// Configure connection pool for better concurrency
// SQLite with WAL mode supports multiple concurrent readers and one writer
// Increasing pool size allows multiple HTTP requests to query the database simultaneously
// This prevents blocking when multiple requests come in (e.g., on page refresh)
db.SetMaxOpenConns(10) // Allow up to 10 concurrent connections
db.SetMaxIdleConns(5) // Keep 5 idle connections ready
db.SetConnMaxLifetime(0) // Connections don't expire
if err := db.Ping(); err != nil {
return nil, fmt.Errorf("failed to ping database: %w", err)
}
@@ -31,30 +49,37 @@ func NewDB(dbPath string) (*DB, error) {
return nil, fmt.Errorf("failed to enable foreign keys: %w", err)
}
// Enable WAL mode explicitly (in case the connection string didn't work)
if _, err := db.Exec("PRAGMA journal_mode = WAL"); err != nil {
log.Printf("Warning: Failed to enable WAL mode: %v", err)
}
database := &DB{db: db}
if err := database.migrate(); err != nil {
return nil, fmt.Errorf("failed to migrate database: %w", err)
}
// Verify connection is still open after migration
if err := db.Ping(); err != nil {
return nil, fmt.Errorf("database connection closed after migration: %w", err)
}
return database, nil
}
// With executes a function with mutex-protected access to the database
// With executes a function with access to the database
// The function receives the underlying *sql.DB connection
// No mutex needed - single connection + WAL mode handles concurrency
func (db *DB) With(fn func(*sql.DB) error) error {
db.mu.Lock()
defer db.mu.Unlock()
return fn(db.db)
}
// WithTx executes a function within a transaction with mutex protection
// WithTx executes a function within a transaction
// The function receives a *sql.Tx transaction
// If the function returns an error, the transaction is rolled back
// If the function returns nil, the transaction is committed
// No mutex needed - single connection + WAL mode handles concurrency
func (db *DB) WithTx(fn func(*sql.Tx) error) error {
db.mu.Lock()
defer db.mu.Unlock()
tx, err := db.db.Begin()
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
@@ -74,234 +99,61 @@ func (db *DB) WithTx(fn func(*sql.Tx) error) error {
return nil
}
// migrate runs database migrations
// migrate runs database migrations using golang-migrate
func (db *DB) migrate() error {
// SQLite uses INTEGER PRIMARY KEY AUTOINCREMENT instead of sequences
schema := `
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
email TEXT UNIQUE NOT NULL,
name TEXT NOT NULL,
oauth_provider TEXT NOT NULL,
oauth_id TEXT NOT NULL,
password_hash TEXT,
is_admin INTEGER NOT NULL DEFAULT 0,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
UNIQUE(oauth_provider, oauth_id)
);
CREATE TABLE IF NOT EXISTS runner_api_keys (
id INTEGER PRIMARY KEY AUTOINCREMENT,
key_prefix TEXT NOT NULL,
key_hash TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
scope TEXT NOT NULL DEFAULT 'user',
is_active INTEGER NOT NULL DEFAULT 1,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by INTEGER,
FOREIGN KEY (created_by) REFERENCES users(id),
UNIQUE(key_prefix)
);
CREATE TABLE IF NOT EXISTS jobs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER NOT NULL,
job_type TEXT NOT NULL DEFAULT 'render',
name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
progress REAL NOT NULL DEFAULT 0.0,
frame_start INTEGER,
frame_end INTEGER,
output_format TEXT,
allow_parallel_runners INTEGER NOT NULL DEFAULT 1,
timeout_seconds INTEGER DEFAULT 86400,
blend_metadata TEXT,
retry_count INTEGER NOT NULL DEFAULT 0,
max_retries INTEGER NOT NULL DEFAULT 3,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
started_at TIMESTAMP,
completed_at TIMESTAMP,
error_message TEXT,
FOREIGN KEY (user_id) REFERENCES users(id)
);
CREATE TABLE IF NOT EXISTS runners (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
hostname TEXT NOT NULL,
ip_address TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'offline',
last_heartbeat TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
capabilities TEXT,
api_key_id INTEGER,
api_key_scope TEXT NOT NULL DEFAULT 'user',
priority INTEGER NOT NULL DEFAULT 100,
fingerprint TEXT,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (api_key_id) REFERENCES runner_api_keys(id)
);
CREATE TABLE IF NOT EXISTS tasks (
id INTEGER PRIMARY KEY AUTOINCREMENT,
job_id INTEGER NOT NULL,
runner_id INTEGER,
frame_start INTEGER NOT NULL,
frame_end INTEGER NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
output_path TEXT,
task_type TEXT NOT NULL DEFAULT 'render',
current_step TEXT,
retry_count INTEGER NOT NULL DEFAULT 0,
max_retries INTEGER NOT NULL DEFAULT 3,
timeout_seconds INTEGER,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
started_at TIMESTAMP,
completed_at TIMESTAMP,
error_message TEXT,
FOREIGN KEY (job_id) REFERENCES jobs(id),
FOREIGN KEY (runner_id) REFERENCES runners(id)
);
CREATE TABLE IF NOT EXISTS job_files (
id INTEGER PRIMARY KEY AUTOINCREMENT,
job_id INTEGER NOT NULL,
file_type TEXT NOT NULL,
file_path TEXT NOT NULL,
file_name TEXT NOT NULL,
file_size INTEGER NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (job_id) REFERENCES jobs(id)
);
CREATE TABLE IF NOT EXISTS manager_secrets (
id INTEGER PRIMARY KEY AUTOINCREMENT,
secret TEXT UNIQUE NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS task_logs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
task_id INTEGER NOT NULL,
runner_id INTEGER,
log_level TEXT NOT NULL,
message TEXT NOT NULL,
step_name TEXT,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (task_id) REFERENCES tasks(id),
FOREIGN KEY (runner_id) REFERENCES runners(id)
);
CREATE TABLE IF NOT EXISTS task_steps (
id INTEGER PRIMARY KEY AUTOINCREMENT,
task_id INTEGER NOT NULL,
step_name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
started_at TIMESTAMP,
completed_at TIMESTAMP,
duration_ms INTEGER,
error_message TEXT,
FOREIGN KEY (task_id) REFERENCES tasks(id)
);
CREATE INDEX IF NOT EXISTS idx_jobs_user_id ON jobs(user_id);
CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status);
CREATE INDEX IF NOT EXISTS idx_jobs_user_status_created ON jobs(user_id, status, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_tasks_job_id ON tasks(job_id);
CREATE INDEX IF NOT EXISTS idx_tasks_runner_id ON tasks(runner_id);
CREATE INDEX IF NOT EXISTS idx_tasks_status ON tasks(status);
CREATE INDEX IF NOT EXISTS idx_tasks_job_status ON tasks(job_id, status);
CREATE INDEX IF NOT EXISTS idx_tasks_started_at ON tasks(started_at);
CREATE INDEX IF NOT EXISTS idx_job_files_job_id ON job_files(job_id);
CREATE INDEX IF NOT EXISTS idx_runner_api_keys_prefix ON runner_api_keys(key_prefix);
CREATE INDEX IF NOT EXISTS idx_runner_api_keys_active ON runner_api_keys(is_active);
CREATE INDEX IF NOT EXISTS idx_runner_api_keys_created_by ON runner_api_keys(created_by);
CREATE INDEX IF NOT EXISTS idx_runners_api_key_id ON runners(api_key_id);
CREATE INDEX IF NOT EXISTS idx_task_logs_task_id_created_at ON task_logs(task_id, created_at);
CREATE INDEX IF NOT EXISTS idx_task_logs_task_id_id ON task_logs(task_id, id DESC);
CREATE INDEX IF NOT EXISTS idx_task_logs_runner_id ON task_logs(runner_id);
CREATE INDEX IF NOT EXISTS idx_task_steps_task_id ON task_steps(task_id);
CREATE INDEX IF NOT EXISTS idx_runners_last_heartbeat ON runners(last_heartbeat);
CREATE TABLE IF NOT EXISTS settings (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS sessions (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT UNIQUE NOT NULL,
user_id INTEGER NOT NULL,
email TEXT NOT NULL,
name TEXT NOT NULL,
is_admin INTEGER NOT NULL DEFAULT 0,
expires_at TIMESTAMP NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (user_id) REFERENCES users(id)
);
CREATE INDEX IF NOT EXISTS idx_sessions_session_id ON sessions(session_id);
CREATE INDEX IF NOT EXISTS idx_sessions_user_id ON sessions(user_id);
CREATE INDEX IF NOT EXISTS idx_sessions_expires_at ON sessions(expires_at);
`
if err := db.With(func(conn *sql.DB) error {
_, err := conn.Exec(schema)
return err
}); err != nil {
return fmt.Errorf("failed to create schema: %w", err)
// Create SQLite driver instance
// Note: We use db.db directly since we're in the same package and this is called during initialization
driver, err := sqlite3.WithInstance(db.db, &sqlite3.Config{})
if err != nil {
return fmt.Errorf("failed to create sqlite3 driver: %w", err)
}
// Database migrations for schema updates
// NOTE: Migrations are currently disabled since the database is cleared by 'make cleanup-manager'
// before running. All schema changes have been rolled into the main schema above.
// When ready to implement proper migrations for production, uncomment and populate this array.
// TODO: Implement proper database migration system for production use
migrations := []string{
// Future migrations will go here when we implement proper migration handling
// Create embedded filesystem source
migrationFS, err := fs.Sub(migrationsFS, "migrations")
if err != nil {
return fmt.Errorf("failed to create migration filesystem: %w", err)
}
for _, migration := range migrations {
if err := db.With(func(conn *sql.DB) error {
_, err := conn.Exec(migration)
return err
}); err != nil {
// Log but don't fail - column might already exist or table might not exist yet
// This is fine for migrations that run after schema creation
log.Printf("Migration warning: %v", err)
sourceDriver, err := iofs.New(migrationFS, ".")
if err != nil {
return fmt.Errorf("failed to create iofs source driver: %w", err)
}
// Create migrate instance
m, err := migrate.NewWithInstance("iofs", sourceDriver, "sqlite3", driver)
if err != nil {
return fmt.Errorf("failed to create migrate instance: %w", err)
}
// Run migrations
if err := m.Up(); err != nil {
// If the error is "no change", that's fine - database is already up to date
if err == migrate.ErrNoChange {
log.Printf("Database is already up to date")
// Don't close migrate instance - it may close the database connection
// The migrate instance will be garbage collected
return nil
}
// Don't close migrate instance on error either - it may close the DB
return fmt.Errorf("failed to run migrations: %w", err)
}
// Initialize registration_enabled setting (default: true) if it doesn't exist
var settingCount int
err := db.With(func(conn *sql.DB) error {
return conn.QueryRow("SELECT COUNT(*) FROM settings WHERE key = ?", "registration_enabled").Scan(&settingCount)
})
if err == nil && settingCount == 0 {
err = db.With(func(conn *sql.DB) error {
_, err := conn.Exec("INSERT INTO settings (key, value) VALUES (?, ?)", "registration_enabled", "true")
return err
})
if err != nil {
// Log but don't fail - setting might have been created by another process
log.Printf("Note: Could not initialize registration_enabled setting: %v", err)
}
}
// Don't close the migrate instance - with sqlite3.WithInstance, closing it
// may close the underlying database connection. The migrate instance will
// be garbage collected when it goes out of scope.
// If we need to close it later, we can store it in the DB struct and close
// it when DB.Close() is called, but for now we'll let it be GC'd.
log.Printf("Database migrations completed successfully")
return nil
}
// Ping checks the database connection
func (db *DB) Ping() error {
db.mu.Lock()
defer db.mu.Unlock()
return db.db.Ping()
}
// Close closes the database connection
func (db *DB) Close() error {
db.mu.Lock()
defer db.mu.Unlock()
return db.db.Close()
}