Files
jiggablend/internal/database/schema.go

262 lines
9.7 KiB
Go

package database
import (
"database/sql"
"fmt"
"log"
_ "github.com/marcboeker/go-duckdb/v2"
)
// DB wraps the database connection
type DB struct {
*sql.DB
}
// NewDB creates a new database connection
func NewDB(dbPath string) (*DB, error) {
db, err := sql.Open("duckdb", dbPath)
if err != nil {
return nil, fmt.Errorf("failed to open database: %w", err)
}
if err := db.Ping(); err != nil {
return nil, fmt.Errorf("failed to ping database: %w", err)
}
database := &DB{DB: db}
if err := database.migrate(); err != nil {
return nil, fmt.Errorf("failed to migrate database: %w", err)
}
return database, nil
}
// migrate runs database migrations
func (db *DB) migrate() error {
// Create sequences for auto-incrementing primary keys
sequences := []string{
`CREATE SEQUENCE IF NOT EXISTS seq_users_id START 1`,
`CREATE SEQUENCE IF NOT EXISTS seq_jobs_id START 1`,
`CREATE SEQUENCE IF NOT EXISTS seq_runners_id START 1`,
`CREATE SEQUENCE IF NOT EXISTS seq_tasks_id START 1`,
`CREATE SEQUENCE IF NOT EXISTS seq_job_files_id START 1`,
`CREATE SEQUENCE IF NOT EXISTS seq_manager_secrets_id START 1`,
`CREATE SEQUENCE IF NOT EXISTS seq_registration_tokens_id START 1`,
`CREATE SEQUENCE IF NOT EXISTS seq_task_logs_id START 1`,
`CREATE SEQUENCE IF NOT EXISTS seq_task_steps_id START 1`,
}
for _, seq := range sequences {
if _, err := db.Exec(seq); err != nil {
return fmt.Errorf("failed to create sequence: %w", err)
}
}
schema := `
CREATE TABLE IF NOT EXISTS users (
id BIGINT PRIMARY KEY DEFAULT nextval('seq_users_id'),
email TEXT UNIQUE NOT NULL,
name TEXT NOT NULL,
oauth_provider TEXT NOT NULL,
oauth_id TEXT NOT NULL,
password_hash TEXT,
is_admin BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
UNIQUE(oauth_provider, oauth_id)
);
CREATE TABLE IF NOT EXISTS jobs (
id BIGINT PRIMARY KEY DEFAULT nextval('seq_jobs_id'),
user_id BIGINT NOT NULL,
job_type TEXT NOT NULL DEFAULT 'render',
name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
progress REAL NOT NULL DEFAULT 0.0,
frame_start INTEGER,
frame_end INTEGER,
output_format TEXT,
allow_parallel_runners BOOLEAN,
timeout_seconds INTEGER DEFAULT 86400,
blend_metadata TEXT,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
started_at TIMESTAMP,
completed_at TIMESTAMP,
error_message TEXT,
FOREIGN KEY (user_id) REFERENCES users(id)
);
CREATE TABLE IF NOT EXISTS runners (
id BIGINT PRIMARY KEY DEFAULT nextval('seq_runners_id'),
name TEXT NOT NULL,
hostname TEXT NOT NULL,
ip_address TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'offline',
last_heartbeat TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
capabilities TEXT,
registration_token TEXT,
runner_secret TEXT,
manager_secret TEXT,
verified BOOLEAN NOT NULL DEFAULT false,
priority INTEGER NOT NULL DEFAULT 100,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS tasks (
id BIGINT PRIMARY KEY DEFAULT nextval('seq_tasks_id'),
job_id BIGINT NOT NULL,
runner_id BIGINT,
frame_start INTEGER NOT NULL,
frame_end INTEGER NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
output_path TEXT,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
started_at TIMESTAMP,
completed_at TIMESTAMP,
error_message TEXT
);
CREATE TABLE IF NOT EXISTS job_files (
id BIGINT PRIMARY KEY DEFAULT nextval('seq_job_files_id'),
job_id BIGINT NOT NULL,
file_type TEXT NOT NULL,
file_path TEXT NOT NULL,
file_name TEXT NOT NULL,
file_size BIGINT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS manager_secrets (
id BIGINT PRIMARY KEY DEFAULT nextval('seq_manager_secrets_id'),
secret TEXT UNIQUE NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS registration_tokens (
id BIGINT PRIMARY KEY DEFAULT nextval('seq_registration_tokens_id'),
token TEXT UNIQUE NOT NULL,
expires_at TIMESTAMP NOT NULL,
used BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by BIGINT,
FOREIGN KEY (created_by) REFERENCES users(id)
);
CREATE TABLE IF NOT EXISTS task_logs (
id BIGINT PRIMARY KEY DEFAULT nextval('seq_task_logs_id'),
task_id BIGINT NOT NULL,
runner_id BIGINT,
log_level TEXT NOT NULL,
message TEXT NOT NULL,
step_name TEXT,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS task_steps (
id BIGINT PRIMARY KEY DEFAULT nextval('seq_task_steps_id'),
task_id BIGINT NOT NULL,
step_name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
started_at TIMESTAMP,
completed_at TIMESTAMP,
duration_ms INTEGER,
error_message TEXT
);
CREATE INDEX IF NOT EXISTS idx_jobs_user_id ON jobs(user_id);
CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status);
CREATE INDEX IF NOT EXISTS idx_jobs_user_status_created ON jobs(user_id, status, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_tasks_job_id ON tasks(job_id);
CREATE INDEX IF NOT EXISTS idx_tasks_runner_id ON tasks(runner_id);
CREATE INDEX IF NOT EXISTS idx_tasks_status ON tasks(status);
CREATE INDEX IF NOT EXISTS idx_tasks_job_status ON tasks(job_id, status);
CREATE INDEX IF NOT EXISTS idx_tasks_started_at ON tasks(started_at);
CREATE INDEX IF NOT EXISTS idx_job_files_job_id ON job_files(job_id);
CREATE INDEX IF NOT EXISTS idx_registration_tokens_token ON registration_tokens(token);
CREATE INDEX IF NOT EXISTS idx_registration_tokens_expires_at ON registration_tokens(expires_at);
CREATE INDEX IF NOT EXISTS idx_task_logs_task_id_created_at ON task_logs(task_id, created_at);
CREATE INDEX IF NOT EXISTS idx_task_logs_task_id_id ON task_logs(task_id, id DESC);
CREATE INDEX IF NOT EXISTS idx_task_logs_runner_id ON task_logs(runner_id);
CREATE INDEX IF NOT EXISTS idx_task_steps_task_id ON task_steps(task_id);
CREATE INDEX IF NOT EXISTS idx_runners_last_heartbeat ON runners(last_heartbeat);
CREATE TABLE IF NOT EXISTS settings (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
`
if _, err := db.Exec(schema); err != nil {
return fmt.Errorf("failed to create schema: %w", err)
}
// Migrate existing tables to add new columns
migrations := []string{
// Add is_admin to users if it doesn't exist
`ALTER TABLE users ADD COLUMN IF NOT EXISTS is_admin BOOLEAN NOT NULL DEFAULT false`,
// Add new columns to runners if they don't exist
`ALTER TABLE runners ADD COLUMN IF NOT EXISTS registration_token TEXT`,
`ALTER TABLE runners ADD COLUMN IF NOT EXISTS runner_secret TEXT`,
`ALTER TABLE runners ADD COLUMN IF NOT EXISTS manager_secret TEXT`,
`ALTER TABLE runners ADD COLUMN IF NOT EXISTS verified BOOLEAN NOT NULL DEFAULT false`,
`ALTER TABLE runners ADD COLUMN IF NOT EXISTS priority INTEGER NOT NULL DEFAULT 100`,
// Add allow_parallel_runners to jobs if it doesn't exist
`ALTER TABLE jobs ADD COLUMN IF NOT EXISTS allow_parallel_runners BOOLEAN NOT NULL DEFAULT true`,
// Add timeout_seconds to jobs if it doesn't exist
`ALTER TABLE jobs ADD COLUMN IF NOT EXISTS timeout_seconds INTEGER DEFAULT 86400`,
// Add blend_metadata to jobs if it doesn't exist
`ALTER TABLE jobs ADD COLUMN IF NOT EXISTS blend_metadata TEXT`,
// Add job_type to jobs if it doesn't exist
`ALTER TABLE jobs ADD COLUMN IF NOT EXISTS job_type TEXT DEFAULT 'render'`,
// Add task_type to tasks if it doesn't exist
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS task_type TEXT DEFAULT 'render'`,
// Add new columns to tasks if they don't exist
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS current_step TEXT`,
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS retry_count INTEGER DEFAULT 0`,
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS max_retries INTEGER DEFAULT 3`,
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS timeout_seconds INTEGER`,
// Add updated_at columns for ETag support
`ALTER TABLE jobs ADD COLUMN IF NOT EXISTS updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP`,
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP`,
// Migrate file_size from INTEGER to BIGINT to support large files (>2GB)
// DuckDB doesn't support direct ALTER COLUMN TYPE, so we use a workaround:
// 1. Add new column as BIGINT
// 2. Copy data from old column
// 3. Drop old column
// 4. Rename new column
// Note: This will only run if the column exists and is INTEGER
`ALTER TABLE job_files ADD COLUMN IF NOT EXISTS file_size_new BIGINT`,
`UPDATE job_files SET file_size_new = CAST(file_size AS BIGINT) WHERE file_size_new IS NULL`,
`ALTER TABLE job_files DROP COLUMN IF EXISTS file_size`,
`ALTER TABLE job_files RENAME COLUMN file_size_new TO file_size`,
}
for _, migration := range migrations {
// DuckDB supports IF NOT EXISTS for ALTER TABLE, so we can safely execute
if _, err := db.Exec(migration); err != nil {
// Log but don't fail - column might already exist or table might not exist yet
// This is fine for migrations that run after schema creation
// For the file_size migration, if it fails (e.g., already BIGINT), that's fine
}
}
// Initialize registration_enabled setting (default: true) if it doesn't exist
var settingCount int
err := db.QueryRow("SELECT COUNT(*) FROM settings WHERE key = ?", "registration_enabled").Scan(&settingCount)
if err == nil && settingCount == 0 {
_, err = db.Exec("INSERT INTO settings (key, value) VALUES (?, ?)", "registration_enabled", "true")
if err != nil {
// Log but don't fail - setting might have been created by another process
log.Printf("Note: Could not initialize registration_enabled setting: %v", err)
}
}
return nil
}
// Close closes the database connection
func (db *DB) Close() error {
return db.DB.Close()
}