Update .gitignore to include log files and database journal files. Modify go.mod to update dependencies for go-sqlite3 and cloud.google.com/go/compute/metadata. Enhance Makefile to include logging options for manager and runner commands. Introduce new job token handling in auth package and implement database migration scripts. Refactor manager and runner components to improve job processing and metadata extraction. Add support for video preview in frontend components and enhance WebSocket management for channel subscriptions.
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -27,7 +27,11 @@ go.work
|
||||
jiggablend.db
|
||||
jiggablend.db.wal
|
||||
jiggablend.db-shm
|
||||
jiggablend.db-journal
|
||||
|
||||
# Log files
|
||||
*.log
|
||||
logs/
|
||||
# Secrets and configuration
|
||||
runner-secrets.json
|
||||
runner-secrets-*.json
|
||||
|
||||
8
Makefile
8
Makefile
@@ -36,20 +36,20 @@ run: cleanup build init-test
|
||||
@echo "Starting manager and runner in parallel..."
|
||||
@echo "Press Ctrl+C to stop both..."
|
||||
@trap 'kill $$MANAGER_PID $$RUNNER_PID 2>/dev/null; exit' INT TERM; \
|
||||
bin/jiggablend manager & \
|
||||
bin/jiggablend manager -l manager.log & \
|
||||
MANAGER_PID=$$!; \
|
||||
sleep 2; \
|
||||
bin/jiggablend runner --api-key=jk_r0_test_key_123456789012345678901234567890 & \
|
||||
bin/jiggablend runner -l runner.log --api-key=jk_r0_test_key_123456789012345678901234567890 & \
|
||||
RUNNER_PID=$$!; \
|
||||
wait $$MANAGER_PID $$RUNNER_PID
|
||||
|
||||
# Run manager server
|
||||
run-manager: cleanup-manager build init-test
|
||||
bin/jiggablend manager
|
||||
bin/jiggablend manager -l manager.log
|
||||
|
||||
# Run runner
|
||||
run-runner: cleanup-runner build
|
||||
bin/jiggablend runner --api-key=jk_r0_test_key_123456789012345678901234567890
|
||||
bin/jiggablend runner -l runner.log --api-key=jk_r0_test_key_123456789012345678901234567890
|
||||
|
||||
# Initialize for testing (first run setup)
|
||||
init-test: build
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"jiggablend/internal/api"
|
||||
"jiggablend/internal/auth"
|
||||
"jiggablend/internal/config"
|
||||
"jiggablend/internal/database"
|
||||
"jiggablend/internal/logger"
|
||||
manager "jiggablend/internal/manager"
|
||||
"jiggablend/internal/storage"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -117,8 +117,16 @@ func runManager(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
logger.Info("Blender is available")
|
||||
|
||||
// Create API server
|
||||
server, err := api.NewServer(db, cfg, authHandler, storageHandler)
|
||||
// Check if ImageMagick is available
|
||||
if err := checkImageMagickAvailable(); err != nil {
|
||||
logger.Fatalf("ImageMagick is not available: %v\n"+
|
||||
"The manager requires ImageMagick to be installed and in PATH for EXR preview conversion.\n"+
|
||||
"Please install ImageMagick and ensure 'magick' or 'convert' command is accessible.", err)
|
||||
}
|
||||
logger.Info("ImageMagick is available")
|
||||
|
||||
// Create manager server
|
||||
server, err := manager.NewManager(db, cfg, authHandler, storageHandler)
|
||||
if err != nil {
|
||||
logger.Fatalf("Failed to create server: %v", err)
|
||||
}
|
||||
@@ -150,3 +158,20 @@ func checkBlenderAvailable() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkImageMagickAvailable() error {
|
||||
// Try 'magick' first (ImageMagick 7+)
|
||||
cmd := exec.Command("magick", "--version")
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fall back to 'convert' (ImageMagick 6 or legacy mode)
|
||||
cmd = exec.Command("convert", "--version")
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run 'magick --version' or 'convert --version': %w (output: %s)", err, string(output))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ func init() {
|
||||
runnerCmd.Flags().StringP("log-file", "l", "", "Log file path (truncated on start, if not set logs only to stdout)")
|
||||
runnerCmd.Flags().String("log-level", "info", "Log level (debug, info, warn, error)")
|
||||
runnerCmd.Flags().BoolP("verbose", "v", false, "Enable verbose logging (same as --log-level=debug)")
|
||||
runnerCmd.Flags().Duration("poll-interval", 5*time.Second, "Job polling interval")
|
||||
|
||||
// Bind flags to viper with JIGGABLEND_ prefix
|
||||
runnerViper.SetEnvPrefix("JIGGABLEND")
|
||||
@@ -49,6 +50,7 @@ func init() {
|
||||
runnerViper.BindPFlag("log_file", runnerCmd.Flags().Lookup("log-file"))
|
||||
runnerViper.BindPFlag("log_level", runnerCmd.Flags().Lookup("log-level"))
|
||||
runnerViper.BindPFlag("verbose", runnerCmd.Flags().Lookup("verbose"))
|
||||
runnerViper.BindPFlag("poll_interval", runnerCmd.Flags().Lookup("poll-interval"))
|
||||
}
|
||||
|
||||
func runRunner(cmd *cobra.Command, args []string) {
|
||||
@@ -60,14 +62,15 @@ func runRunner(cmd *cobra.Command, args []string) {
|
||||
logFile := runnerViper.GetString("log_file")
|
||||
logLevel := runnerViper.GetString("log_level")
|
||||
verbose := runnerViper.GetBool("verbose")
|
||||
pollInterval := runnerViper.GetDuration("poll_interval")
|
||||
|
||||
var client *runner.Client
|
||||
var r *runner.Runner
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logger.Errorf("Runner panicked: %v", r)
|
||||
if client != nil {
|
||||
client.CleanupWorkspace()
|
||||
if rec := recover(); rec != nil {
|
||||
logger.Errorf("Runner panicked: %v", rec)
|
||||
if r != nil {
|
||||
r.Cleanup()
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -77,7 +80,7 @@ func runRunner(cmd *cobra.Command, args []string) {
|
||||
hostname, _ = os.Hostname()
|
||||
}
|
||||
|
||||
// Generate unique runner ID
|
||||
// Generate unique runner ID suffix
|
||||
runnerIDStr := generateShortID()
|
||||
|
||||
// Generate runner name with ID if not provided
|
||||
@@ -114,23 +117,24 @@ func runRunner(cmd *cobra.Command, args []string) {
|
||||
logger.Infof("Logging to file: %s", logFile)
|
||||
}
|
||||
|
||||
client = runner.NewClient(managerURL, name, hostname)
|
||||
// Create runner
|
||||
r = runner.New(managerURL, name, hostname)
|
||||
|
||||
// Check for required tools early to fail fast
|
||||
if err := r.CheckRequiredTools(); err != nil {
|
||||
logger.Fatalf("Required tool check failed: %v", err)
|
||||
}
|
||||
|
||||
// Clean up orphaned workspace directories
|
||||
client.CleanupWorkspace()
|
||||
r.Cleanup()
|
||||
|
||||
// Probe capabilities
|
||||
// Probe capabilities and log them
|
||||
logger.Debug("Probing runner capabilities...")
|
||||
client.ProbeCapabilities()
|
||||
capabilities := client.GetCapabilities()
|
||||
capabilities := r.ProbeCapabilities()
|
||||
capList := []string{}
|
||||
for cap, value := range capabilities {
|
||||
if enabled, ok := value.(bool); ok && enabled {
|
||||
capList = append(capList, cap)
|
||||
} else if count, ok := value.(int); ok && count > 0 {
|
||||
capList = append(capList, fmt.Sprintf("%s=%d", cap, count))
|
||||
} else if count, ok := value.(float64); ok && count > 0 {
|
||||
capList = append(capList, fmt.Sprintf("%s=%.0f", cap, count))
|
||||
}
|
||||
}
|
||||
if len(capList) > 0 {
|
||||
@@ -154,7 +158,7 @@ func runRunner(cmd *cobra.Command, args []string) {
|
||||
|
||||
for {
|
||||
var err error
|
||||
runnerID, _, _, err = client.Register(apiKey)
|
||||
runnerID, err = r.Register(apiKey)
|
||||
if err == nil {
|
||||
logger.Infof("Registered runner with ID: %d", runnerID)
|
||||
break
|
||||
@@ -178,14 +182,6 @@ func runRunner(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Start WebSocket connection
|
||||
go client.ConnectWebSocketWithReconnect()
|
||||
|
||||
// Start heartbeat loop
|
||||
go client.HeartbeatLoop()
|
||||
|
||||
logger.Info("Runner started, connecting to manager via WebSocket...")
|
||||
|
||||
// Signal handlers
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
|
||||
@@ -193,13 +189,14 @@ func runRunner(cmd *cobra.Command, args []string) {
|
||||
go func() {
|
||||
sig := <-sigChan
|
||||
logger.Infof("Received signal: %v, killing all processes and cleaning up...", sig)
|
||||
client.KillAllProcesses()
|
||||
client.CleanupWorkspace()
|
||||
r.KillAllProcesses()
|
||||
r.Cleanup()
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
||||
// Block forever
|
||||
select {}
|
||||
// Start polling for jobs
|
||||
logger.Infof("Runner started, polling for jobs (interval: %v)...", pollInterval)
|
||||
r.Start(pollInterval)
|
||||
}
|
||||
|
||||
func generateShortID() string {
|
||||
|
||||
BIN
examples/frame_0800.exr
Normal file
BIN
examples/frame_0800.exr
Normal file
Binary file not shown.
BIN
examples/frame_0800.png
Normal file
BIN
examples/frame_0800.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 24 MiB |
9
go.mod
9
go.mod
@@ -4,10 +4,9 @@ go 1.25.4
|
||||
|
||||
require (
|
||||
github.com/go-chi/chi/v5 v5.2.3
|
||||
github.com/go-chi/cors v1.2.2
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/mattn/go-sqlite3 v1.14.22
|
||||
github.com/mattn/go-sqlite3 v1.14.32
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/viper v1.21.0
|
||||
golang.org/x/crypto v0.45.0
|
||||
@@ -15,10 +14,14 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/go-chi/cors v1.2.2 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/golang-migrate/migrate/v4 v4.19.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
|
||||
11
go.sum
11
go.sum
@@ -1,5 +1,7 @@
|
||||
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
|
||||
cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -13,12 +15,19 @@ github.com/go-chi/cors v1.2.2 h1:Jmey33TE+b+rB7fT8MUy1u0I4L+NARQlK6LhzKPSyQE=
|
||||
github.com/go-chi/cors v1.2.2/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/golang-migrate/migrate/v4 v4.19.0 h1:RcjOnCGz3Or6HQYEJ/EEVLfWnmw9KnoigPSjzhCuaSE=
|
||||
github.com/golang-migrate/migrate/v4 v4.19.0/go.mod h1:9dyEcu+hO+G9hPSw8AIg50yg622pXJsoHItQnDGZkI0=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
@@ -27,6 +36,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
|
||||
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
115
internal/auth/jobtoken.go
Normal file
115
internal/auth/jobtoken.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// JobTokenDuration is the validity period for job tokens
|
||||
const JobTokenDuration = 1 * time.Hour
|
||||
|
||||
// JobTokenClaims represents the claims in a job token
|
||||
type JobTokenClaims struct {
|
||||
JobID int64 `json:"job_id"`
|
||||
RunnerID int64 `json:"runner_id"`
|
||||
TaskID int64 `json:"task_id"`
|
||||
Exp int64 `json:"exp"` // Unix timestamp
|
||||
}
|
||||
|
||||
// jobTokenSecret is the secret used to sign job tokens
|
||||
// Generated once at startup and kept in memory
|
||||
var jobTokenSecret []byte
|
||||
|
||||
func init() {
|
||||
// Generate a random secret for signing job tokens
|
||||
// This means tokens are invalidated on server restart, which is acceptable
|
||||
// for short-lived job tokens
|
||||
jobTokenSecret = make([]byte, 32)
|
||||
if _, err := rand.Read(jobTokenSecret); err != nil {
|
||||
panic(fmt.Sprintf("failed to generate job token secret: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateJobToken creates a new job token for a specific job/runner/task combination
|
||||
func GenerateJobToken(jobID, runnerID, taskID int64) (string, error) {
|
||||
claims := JobTokenClaims{
|
||||
JobID: jobID,
|
||||
RunnerID: runnerID,
|
||||
TaskID: taskID,
|
||||
Exp: time.Now().Add(JobTokenDuration).Unix(),
|
||||
}
|
||||
|
||||
// Encode claims to JSON
|
||||
claimsJSON, err := json.Marshal(claims)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal claims: %w", err)
|
||||
}
|
||||
|
||||
// Create HMAC signature
|
||||
h := hmac.New(sha256.New, jobTokenSecret)
|
||||
h.Write(claimsJSON)
|
||||
signature := h.Sum(nil)
|
||||
|
||||
// Combine claims and signature: base64(claims).base64(signature)
|
||||
token := base64.RawURLEncoding.EncodeToString(claimsJSON) + "." +
|
||||
base64.RawURLEncoding.EncodeToString(signature)
|
||||
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// ValidateJobToken validates a job token and returns the claims if valid
|
||||
func ValidateJobToken(token string) (*JobTokenClaims, error) {
|
||||
// Split token into claims and signature
|
||||
var claimsB64, sigB64 string
|
||||
dotIdx := -1
|
||||
for i := len(token) - 1; i >= 0; i-- {
|
||||
if token[i] == '.' {
|
||||
dotIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if dotIdx == -1 {
|
||||
return nil, fmt.Errorf("invalid token format")
|
||||
}
|
||||
claimsB64 = token[:dotIdx]
|
||||
sigB64 = token[dotIdx+1:]
|
||||
|
||||
// Decode claims
|
||||
claimsJSON, err := base64.RawURLEncoding.DecodeString(claimsB64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid token encoding: %w", err)
|
||||
}
|
||||
|
||||
// Decode signature
|
||||
signature, err := base64.RawURLEncoding.DecodeString(sigB64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid signature encoding: %w", err)
|
||||
}
|
||||
|
||||
// Verify signature
|
||||
h := hmac.New(sha256.New, jobTokenSecret)
|
||||
h.Write(claimsJSON)
|
||||
expectedSig := h.Sum(nil)
|
||||
if !hmac.Equal(signature, expectedSig) {
|
||||
return nil, fmt.Errorf("invalid signature")
|
||||
}
|
||||
|
||||
// Parse claims
|
||||
var claims JobTokenClaims
|
||||
if err := json.Unmarshal(claimsJSON, &claims); err != nil {
|
||||
return nil, fmt.Errorf("invalid claims: %w", err)
|
||||
}
|
||||
|
||||
// Check expiration
|
||||
if time.Now().Unix() > claims.Exp {
|
||||
return nil, fmt.Errorf("token expired")
|
||||
}
|
||||
|
||||
return &claims, nil
|
||||
}
|
||||
|
||||
36
internal/database/migrations/000001_initial_schema.down.sql
Normal file
36
internal/database/migrations/000001_initial_schema.down.sql
Normal file
@@ -0,0 +1,36 @@
|
||||
-- Drop indexes
|
||||
DROP INDEX IF EXISTS idx_sessions_expires_at;
|
||||
DROP INDEX IF EXISTS idx_sessions_user_id;
|
||||
DROP INDEX IF EXISTS idx_sessions_session_id;
|
||||
DROP INDEX IF EXISTS idx_runners_last_heartbeat;
|
||||
DROP INDEX IF EXISTS idx_task_steps_task_id;
|
||||
DROP INDEX IF EXISTS idx_task_logs_runner_id;
|
||||
DROP INDEX IF EXISTS idx_task_logs_task_id_id;
|
||||
DROP INDEX IF EXISTS idx_task_logs_task_id_created_at;
|
||||
DROP INDEX IF EXISTS idx_runners_api_key_id;
|
||||
DROP INDEX IF EXISTS idx_runner_api_keys_created_by;
|
||||
DROP INDEX IF EXISTS idx_runner_api_keys_active;
|
||||
DROP INDEX IF EXISTS idx_runner_api_keys_prefix;
|
||||
DROP INDEX IF EXISTS idx_job_files_job_id;
|
||||
DROP INDEX IF EXISTS idx_tasks_started_at;
|
||||
DROP INDEX IF EXISTS idx_tasks_job_status;
|
||||
DROP INDEX IF EXISTS idx_tasks_status;
|
||||
DROP INDEX IF EXISTS idx_tasks_runner_id;
|
||||
DROP INDEX IF EXISTS idx_tasks_job_id;
|
||||
DROP INDEX IF EXISTS idx_jobs_user_status_created;
|
||||
DROP INDEX IF EXISTS idx_jobs_status;
|
||||
DROP INDEX IF EXISTS idx_jobs_user_id;
|
||||
|
||||
-- Drop tables (order matters due to foreign keys)
|
||||
DROP TABLE IF EXISTS sessions;
|
||||
DROP TABLE IF EXISTS settings;
|
||||
DROP TABLE IF EXISTS task_steps;
|
||||
DROP TABLE IF EXISTS task_logs;
|
||||
DROP TABLE IF EXISTS manager_secrets;
|
||||
DROP TABLE IF EXISTS job_files;
|
||||
DROP TABLE IF EXISTS tasks;
|
||||
DROP TABLE IF EXISTS runners;
|
||||
DROP TABLE IF EXISTS jobs;
|
||||
DROP TABLE IF EXISTS runner_api_keys;
|
||||
DROP TABLE IF EXISTS users;
|
||||
|
||||
184
internal/database/migrations/000001_initial_schema.up.sql
Normal file
184
internal/database/migrations/000001_initial_schema.up.sql
Normal file
@@ -0,0 +1,184 @@
|
||||
-- Enable foreign keys for SQLite
|
||||
PRAGMA foreign_keys = ON;
|
||||
|
||||
-- Users table
|
||||
CREATE TABLE users (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
email TEXT UNIQUE NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
oauth_provider TEXT NOT NULL,
|
||||
oauth_id TEXT NOT NULL,
|
||||
password_hash TEXT,
|
||||
is_admin INTEGER NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(oauth_provider, oauth_id)
|
||||
);
|
||||
|
||||
-- Runner API keys table
|
||||
CREATE TABLE runner_api_keys (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
key_prefix TEXT NOT NULL,
|
||||
key_hash TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
description TEXT,
|
||||
scope TEXT NOT NULL DEFAULT 'user',
|
||||
is_active INTEGER NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
created_by INTEGER,
|
||||
FOREIGN KEY (created_by) REFERENCES users(id),
|
||||
UNIQUE(key_prefix)
|
||||
);
|
||||
|
||||
-- Jobs table
|
||||
CREATE TABLE jobs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
user_id INTEGER NOT NULL,
|
||||
job_type TEXT NOT NULL DEFAULT 'render',
|
||||
name TEXT NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'pending',
|
||||
progress REAL NOT NULL DEFAULT 0.0,
|
||||
frame_start INTEGER,
|
||||
frame_end INTEGER,
|
||||
output_format TEXT,
|
||||
blend_metadata TEXT,
|
||||
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||
max_retries INTEGER NOT NULL DEFAULT 3,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
started_at TIMESTAMP,
|
||||
completed_at TIMESTAMP,
|
||||
error_message TEXT,
|
||||
assigned_runner_id INTEGER,
|
||||
FOREIGN KEY (user_id) REFERENCES users(id)
|
||||
);
|
||||
|
||||
-- Runners table
|
||||
CREATE TABLE runners (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL,
|
||||
hostname TEXT NOT NULL,
|
||||
ip_address TEXT NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'offline',
|
||||
last_heartbeat TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
capabilities TEXT,
|
||||
api_key_id INTEGER,
|
||||
api_key_scope TEXT NOT NULL DEFAULT 'user',
|
||||
priority INTEGER NOT NULL DEFAULT 100,
|
||||
fingerprint TEXT,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (api_key_id) REFERENCES runner_api_keys(id)
|
||||
);
|
||||
|
||||
-- Tasks table
|
||||
CREATE TABLE tasks (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
job_id INTEGER NOT NULL,
|
||||
runner_id INTEGER,
|
||||
frame INTEGER NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'pending',
|
||||
output_path TEXT,
|
||||
task_type TEXT NOT NULL DEFAULT 'render',
|
||||
current_step TEXT,
|
||||
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||
max_retries INTEGER NOT NULL DEFAULT 3,
|
||||
runner_failure_count INTEGER NOT NULL DEFAULT 0,
|
||||
timeout_seconds INTEGER,
|
||||
condition TEXT,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
started_at TIMESTAMP,
|
||||
completed_at TIMESTAMP,
|
||||
error_message TEXT,
|
||||
FOREIGN KEY (job_id) REFERENCES jobs(id),
|
||||
FOREIGN KEY (runner_id) REFERENCES runners(id)
|
||||
);
|
||||
|
||||
-- Job files table
|
||||
CREATE TABLE job_files (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
job_id INTEGER NOT NULL,
|
||||
file_type TEXT NOT NULL,
|
||||
file_path TEXT NOT NULL,
|
||||
file_name TEXT NOT NULL,
|
||||
file_size INTEGER NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (job_id) REFERENCES jobs(id)
|
||||
);
|
||||
|
||||
-- Manager secrets table
|
||||
CREATE TABLE manager_secrets (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
secret TEXT UNIQUE NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Task logs table
|
||||
CREATE TABLE task_logs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
task_id INTEGER NOT NULL,
|
||||
runner_id INTEGER,
|
||||
log_level TEXT NOT NULL,
|
||||
message TEXT NOT NULL,
|
||||
step_name TEXT,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (task_id) REFERENCES tasks(id),
|
||||
FOREIGN KEY (runner_id) REFERENCES runners(id)
|
||||
);
|
||||
|
||||
-- Task steps table
|
||||
CREATE TABLE task_steps (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
task_id INTEGER NOT NULL,
|
||||
step_name TEXT NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'pending',
|
||||
started_at TIMESTAMP,
|
||||
completed_at TIMESTAMP,
|
||||
duration_ms INTEGER,
|
||||
error_message TEXT,
|
||||
FOREIGN KEY (task_id) REFERENCES tasks(id)
|
||||
);
|
||||
|
||||
-- Settings table
|
||||
CREATE TABLE settings (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- Sessions table
|
||||
CREATE TABLE sessions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
session_id TEXT UNIQUE NOT NULL,
|
||||
user_id INTEGER NOT NULL,
|
||||
email TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
is_admin INTEGER NOT NULL DEFAULT 0,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (user_id) REFERENCES users(id)
|
||||
);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX idx_jobs_user_id ON jobs(user_id);
|
||||
CREATE INDEX idx_jobs_status ON jobs(status);
|
||||
CREATE INDEX idx_jobs_user_status_created ON jobs(user_id, status, created_at DESC);
|
||||
CREATE INDEX idx_tasks_job_id ON tasks(job_id);
|
||||
CREATE INDEX idx_tasks_runner_id ON tasks(runner_id);
|
||||
CREATE INDEX idx_tasks_status ON tasks(status);
|
||||
CREATE INDEX idx_tasks_job_status ON tasks(job_id, status);
|
||||
CREATE INDEX idx_tasks_started_at ON tasks(started_at);
|
||||
CREATE INDEX idx_job_files_job_id ON job_files(job_id);
|
||||
CREATE INDEX idx_runner_api_keys_prefix ON runner_api_keys(key_prefix);
|
||||
CREATE INDEX idx_runner_api_keys_active ON runner_api_keys(is_active);
|
||||
CREATE INDEX idx_runner_api_keys_created_by ON runner_api_keys(created_by);
|
||||
CREATE INDEX idx_runners_api_key_id ON runners(api_key_id);
|
||||
CREATE INDEX idx_task_logs_task_id_created_at ON task_logs(task_id, created_at);
|
||||
CREATE INDEX idx_task_logs_task_id_id ON task_logs(task_id, id DESC);
|
||||
CREATE INDEX idx_task_logs_runner_id ON task_logs(runner_id);
|
||||
CREATE INDEX idx_task_steps_task_id ON task_steps(task_id);
|
||||
CREATE INDEX idx_runners_last_heartbeat ON runners(last_heartbeat);
|
||||
CREATE INDEX idx_sessions_session_id ON sessions(session_id);
|
||||
CREATE INDEX idx_sessions_user_id ON sessions(user_id);
|
||||
CREATE INDEX idx_sessions_expires_at ON sessions(expires_at);
|
||||
|
||||
-- Initialize registration_enabled setting
|
||||
INSERT INTO settings (key, value, updated_at) VALUES ('registration_enabled', 'true', CURRENT_TIMESTAMP);
|
||||
|
||||
@@ -2,26 +2,44 @@ package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"embed"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"github.com/golang-migrate/migrate/v4/database/sqlite3"
|
||||
"github.com/golang-migrate/migrate/v4/source/iofs"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
// DB wraps the database connection with mutex protection
|
||||
//go:embed migrations/*.sql
|
||||
var migrationsFS embed.FS
|
||||
|
||||
// DB wraps the database connection
|
||||
// Note: No mutex needed - we only have one connection per process and SQLite with WAL mode
|
||||
// handles concurrent access safely
|
||||
type DB struct {
|
||||
db *sql.DB
|
||||
mu sync.Mutex
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewDB creates a new database connection
|
||||
func NewDB(dbPath string) (*DB, error) {
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
// Use WAL mode for better concurrency (allows readers and writers simultaneously)
|
||||
// Add timeout and busy handler for better concurrent access
|
||||
db, err := sql.Open("sqlite3", dbPath+"?_journal_mode=WAL&_busy_timeout=5000")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open database: %w", err)
|
||||
}
|
||||
|
||||
// Configure connection pool for better concurrency
|
||||
// SQLite with WAL mode supports multiple concurrent readers and one writer
|
||||
// Increasing pool size allows multiple HTTP requests to query the database simultaneously
|
||||
// This prevents blocking when multiple requests come in (e.g., on page refresh)
|
||||
db.SetMaxOpenConns(10) // Allow up to 10 concurrent connections
|
||||
db.SetMaxIdleConns(5) // Keep 5 idle connections ready
|
||||
db.SetConnMaxLifetime(0) // Connections don't expire
|
||||
|
||||
if err := db.Ping(); err != nil {
|
||||
return nil, fmt.Errorf("failed to ping database: %w", err)
|
||||
}
|
||||
@@ -31,30 +49,37 @@ func NewDB(dbPath string) (*DB, error) {
|
||||
return nil, fmt.Errorf("failed to enable foreign keys: %w", err)
|
||||
}
|
||||
|
||||
// Enable WAL mode explicitly (in case the connection string didn't work)
|
||||
if _, err := db.Exec("PRAGMA journal_mode = WAL"); err != nil {
|
||||
log.Printf("Warning: Failed to enable WAL mode: %v", err)
|
||||
}
|
||||
|
||||
database := &DB{db: db}
|
||||
if err := database.migrate(); err != nil {
|
||||
return nil, fmt.Errorf("failed to migrate database: %w", err)
|
||||
}
|
||||
|
||||
// Verify connection is still open after migration
|
||||
if err := db.Ping(); err != nil {
|
||||
return nil, fmt.Errorf("database connection closed after migration: %w", err)
|
||||
}
|
||||
|
||||
return database, nil
|
||||
}
|
||||
|
||||
// With executes a function with mutex-protected access to the database
|
||||
// With executes a function with access to the database
|
||||
// The function receives the underlying *sql.DB connection
|
||||
// No mutex needed - single connection + WAL mode handles concurrency
|
||||
func (db *DB) With(fn func(*sql.DB) error) error {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
return fn(db.db)
|
||||
}
|
||||
|
||||
// WithTx executes a function within a transaction with mutex protection
|
||||
// WithTx executes a function within a transaction
|
||||
// The function receives a *sql.Tx transaction
|
||||
// If the function returns an error, the transaction is rolled back
|
||||
// If the function returns nil, the transaction is committed
|
||||
// No mutex needed - single connection + WAL mode handles concurrency
|
||||
func (db *DB) WithTx(fn func(*sql.Tx) error) error {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
|
||||
tx, err := db.db.Begin()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||
@@ -74,234 +99,61 @@ func (db *DB) WithTx(fn func(*sql.Tx) error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrate runs database migrations
|
||||
// migrate runs database migrations using golang-migrate
|
||||
func (db *DB) migrate() error {
|
||||
// SQLite uses INTEGER PRIMARY KEY AUTOINCREMENT instead of sequences
|
||||
schema := `
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
email TEXT UNIQUE NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
oauth_provider TEXT NOT NULL,
|
||||
oauth_id TEXT NOT NULL,
|
||||
password_hash TEXT,
|
||||
is_admin INTEGER NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(oauth_provider, oauth_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS runner_api_keys (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
key_prefix TEXT NOT NULL,
|
||||
key_hash TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
description TEXT,
|
||||
scope TEXT NOT NULL DEFAULT 'user',
|
||||
is_active INTEGER NOT NULL DEFAULT 1,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
created_by INTEGER,
|
||||
FOREIGN KEY (created_by) REFERENCES users(id),
|
||||
UNIQUE(key_prefix)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS jobs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
user_id INTEGER NOT NULL,
|
||||
job_type TEXT NOT NULL DEFAULT 'render',
|
||||
name TEXT NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'pending',
|
||||
progress REAL NOT NULL DEFAULT 0.0,
|
||||
frame_start INTEGER,
|
||||
frame_end INTEGER,
|
||||
output_format TEXT,
|
||||
allow_parallel_runners INTEGER NOT NULL DEFAULT 1,
|
||||
timeout_seconds INTEGER DEFAULT 86400,
|
||||
blend_metadata TEXT,
|
||||
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||
max_retries INTEGER NOT NULL DEFAULT 3,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
started_at TIMESTAMP,
|
||||
completed_at TIMESTAMP,
|
||||
error_message TEXT,
|
||||
FOREIGN KEY (user_id) REFERENCES users(id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS runners (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL,
|
||||
hostname TEXT NOT NULL,
|
||||
ip_address TEXT NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'offline',
|
||||
last_heartbeat TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
capabilities TEXT,
|
||||
api_key_id INTEGER,
|
||||
api_key_scope TEXT NOT NULL DEFAULT 'user',
|
||||
priority INTEGER NOT NULL DEFAULT 100,
|
||||
fingerprint TEXT,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (api_key_id) REFERENCES runner_api_keys(id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tasks (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
job_id INTEGER NOT NULL,
|
||||
runner_id INTEGER,
|
||||
frame_start INTEGER NOT NULL,
|
||||
frame_end INTEGER NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'pending',
|
||||
output_path TEXT,
|
||||
task_type TEXT NOT NULL DEFAULT 'render',
|
||||
current_step TEXT,
|
||||
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||
max_retries INTEGER NOT NULL DEFAULT 3,
|
||||
timeout_seconds INTEGER,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
started_at TIMESTAMP,
|
||||
completed_at TIMESTAMP,
|
||||
error_message TEXT,
|
||||
FOREIGN KEY (job_id) REFERENCES jobs(id),
|
||||
FOREIGN KEY (runner_id) REFERENCES runners(id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS job_files (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
job_id INTEGER NOT NULL,
|
||||
file_type TEXT NOT NULL,
|
||||
file_path TEXT NOT NULL,
|
||||
file_name TEXT NOT NULL,
|
||||
file_size INTEGER NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (job_id) REFERENCES jobs(id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS manager_secrets (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
secret TEXT UNIQUE NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS task_logs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
task_id INTEGER NOT NULL,
|
||||
runner_id INTEGER,
|
||||
log_level TEXT NOT NULL,
|
||||
message TEXT NOT NULL,
|
||||
step_name TEXT,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (task_id) REFERENCES tasks(id),
|
||||
FOREIGN KEY (runner_id) REFERENCES runners(id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS task_steps (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
task_id INTEGER NOT NULL,
|
||||
step_name TEXT NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'pending',
|
||||
started_at TIMESTAMP,
|
||||
completed_at TIMESTAMP,
|
||||
duration_ms INTEGER,
|
||||
error_message TEXT,
|
||||
FOREIGN KEY (task_id) REFERENCES tasks(id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_user_id ON jobs(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_user_status_created ON jobs(user_id, status, created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_job_id ON tasks(job_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_runner_id ON tasks(runner_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_status ON tasks(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_job_status ON tasks(job_id, status);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_started_at ON tasks(started_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_job_files_job_id ON job_files(job_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_runner_api_keys_prefix ON runner_api_keys(key_prefix);
|
||||
CREATE INDEX IF NOT EXISTS idx_runner_api_keys_active ON runner_api_keys(is_active);
|
||||
CREATE INDEX IF NOT EXISTS idx_runner_api_keys_created_by ON runner_api_keys(created_by);
|
||||
CREATE INDEX IF NOT EXISTS idx_runners_api_key_id ON runners(api_key_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_task_logs_task_id_created_at ON task_logs(task_id, created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_task_logs_task_id_id ON task_logs(task_id, id DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_task_logs_runner_id ON task_logs(runner_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_task_steps_task_id ON task_steps(task_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_runners_last_heartbeat ON runners(last_heartbeat);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS settings (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
session_id TEXT UNIQUE NOT NULL,
|
||||
user_id INTEGER NOT NULL,
|
||||
email TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
is_admin INTEGER NOT NULL DEFAULT 0,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (user_id) REFERENCES users(id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_session_id ON sessions(session_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_user_id ON sessions(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_expires_at ON sessions(expires_at);
|
||||
`
|
||||
|
||||
if err := db.With(func(conn *sql.DB) error {
|
||||
_, err := conn.Exec(schema)
|
||||
return err
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to create schema: %w", err)
|
||||
// Create SQLite driver instance
|
||||
// Note: We use db.db directly since we're in the same package and this is called during initialization
|
||||
driver, err := sqlite3.WithInstance(db.db, &sqlite3.Config{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create sqlite3 driver: %w", err)
|
||||
}
|
||||
|
||||
// Database migrations for schema updates
|
||||
// NOTE: Migrations are currently disabled since the database is cleared by 'make cleanup-manager'
|
||||
// before running. All schema changes have been rolled into the main schema above.
|
||||
// When ready to implement proper migrations for production, uncomment and populate this array.
|
||||
// TODO: Implement proper database migration system for production use
|
||||
migrations := []string{
|
||||
// Future migrations will go here when we implement proper migration handling
|
||||
// Create embedded filesystem source
|
||||
migrationFS, err := fs.Sub(migrationsFS, "migrations")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create migration filesystem: %w", err)
|
||||
}
|
||||
|
||||
for _, migration := range migrations {
|
||||
if err := db.With(func(conn *sql.DB) error {
|
||||
_, err := conn.Exec(migration)
|
||||
return err
|
||||
}); err != nil {
|
||||
// Log but don't fail - column might already exist or table might not exist yet
|
||||
// This is fine for migrations that run after schema creation
|
||||
log.Printf("Migration warning: %v", err)
|
||||
sourceDriver, err := iofs.New(migrationFS, ".")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create iofs source driver: %w", err)
|
||||
}
|
||||
|
||||
// Create migrate instance
|
||||
m, err := migrate.NewWithInstance("iofs", sourceDriver, "sqlite3", driver)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create migrate instance: %w", err)
|
||||
}
|
||||
|
||||
// Run migrations
|
||||
if err := m.Up(); err != nil {
|
||||
// If the error is "no change", that's fine - database is already up to date
|
||||
if err == migrate.ErrNoChange {
|
||||
log.Printf("Database is already up to date")
|
||||
// Don't close migrate instance - it may close the database connection
|
||||
// The migrate instance will be garbage collected
|
||||
return nil
|
||||
}
|
||||
// Don't close migrate instance on error either - it may close the DB
|
||||
return fmt.Errorf("failed to run migrations: %w", err)
|
||||
}
|
||||
|
||||
// Initialize registration_enabled setting (default: true) if it doesn't exist
|
||||
var settingCount int
|
||||
err := db.With(func(conn *sql.DB) error {
|
||||
return conn.QueryRow("SELECT COUNT(*) FROM settings WHERE key = ?", "registration_enabled").Scan(&settingCount)
|
||||
})
|
||||
if err == nil && settingCount == 0 {
|
||||
err = db.With(func(conn *sql.DB) error {
|
||||
_, err := conn.Exec("INSERT INTO settings (key, value) VALUES (?, ?)", "registration_enabled", "true")
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
// Log but don't fail - setting might have been created by another process
|
||||
log.Printf("Note: Could not initialize registration_enabled setting: %v", err)
|
||||
}
|
||||
}
|
||||
// Don't close the migrate instance - with sqlite3.WithInstance, closing it
|
||||
// may close the underlying database connection. The migrate instance will
|
||||
// be garbage collected when it goes out of scope.
|
||||
// If we need to close it later, we can store it in the DB struct and close
|
||||
// it when DB.Close() is called, but for now we'll let it be GC'd.
|
||||
|
||||
log.Printf("Database migrations completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ping checks the database connection
|
||||
func (db *DB) Ping() error {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
return db.db.Ping()
|
||||
}
|
||||
|
||||
// Close closes the database connection
|
||||
func (db *DB) Close() error {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
return db.db.Close()
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
// handleGenerateRunnerAPIKey generates a new runner API key
|
||||
func (s *Server) handleGenerateRunnerAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleGenerateRunnerAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
userID, err := getUserID(r)
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusUnauthorized, err.Error())
|
||||
@@ -62,7 +62,7 @@ func (s *Server) handleGenerateRunnerAPIKey(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
// handleListRunnerAPIKeys lists all runner API keys
|
||||
func (s *Server) handleListRunnerAPIKeys(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleListRunnerAPIKeys(w http.ResponseWriter, r *http.Request) {
|
||||
keys, err := s.secrets.ListRunnerAPIKeys()
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to list API keys: %v", err))
|
||||
@@ -90,7 +90,7 @@ func (s *Server) handleListRunnerAPIKeys(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
// handleRevokeRunnerAPIKey revokes a runner API key
|
||||
func (s *Server) handleRevokeRunnerAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleRevokeRunnerAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
keyID, err := parseID(r, "id")
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusBadRequest, err.Error())
|
||||
@@ -106,7 +106,7 @@ func (s *Server) handleRevokeRunnerAPIKey(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
// handleDeleteRunnerAPIKey deletes a runner API key
|
||||
func (s *Server) handleDeleteRunnerAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleDeleteRunnerAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
keyID, err := parseID(r, "id")
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusBadRequest, err.Error())
|
||||
@@ -122,7 +122,7 @@ func (s *Server) handleDeleteRunnerAPIKey(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
// handleVerifyRunner manually verifies a runner
|
||||
func (s *Server) handleVerifyRunner(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleVerifyRunner(w http.ResponseWriter, r *http.Request) {
|
||||
runnerID, err := parseID(r, "id")
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusBadRequest, err.Error())
|
||||
@@ -153,7 +153,7 @@ func (s *Server) handleVerifyRunner(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// handleDeleteRunner removes a runner
|
||||
func (s *Server) handleDeleteRunner(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleDeleteRunner(w http.ResponseWriter, r *http.Request) {
|
||||
runnerID, err := parseID(r, "id")
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusBadRequest, err.Error())
|
||||
@@ -184,15 +184,15 @@ func (s *Server) handleDeleteRunner(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// handleListRunnersAdmin lists all runners with admin details
|
||||
func (s *Server) handleListRunnersAdmin(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleListRunnersAdmin(w http.ResponseWriter, r *http.Request) {
|
||||
var rows *sql.Rows
|
||||
err := s.db.With(func(conn *sql.DB) error {
|
||||
var err error
|
||||
rows, err = conn.Query(
|
||||
`SELECT id, name, hostname, status, last_heartbeat, capabilities,
|
||||
`SELECT id, name, hostname, status, last_heartbeat, capabilities,
|
||||
api_key_id, api_key_scope, priority, created_at
|
||||
FROM runners ORDER BY created_at DESC`,
|
||||
)
|
||||
)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -201,15 +201,6 @@ func (s *Server) handleListRunnersAdmin(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// Get the set of currently connected runners via WebSocket
|
||||
// This is the source of truth for online status
|
||||
s.runnerConnsMu.RLock()
|
||||
connectedRunners := make(map[int64]bool)
|
||||
for runnerID := range s.runnerConns {
|
||||
connectedRunners[runnerID] = true
|
||||
}
|
||||
s.runnerConnsMu.RUnlock()
|
||||
|
||||
runners := []map[string]interface{}{}
|
||||
for rows.Next() {
|
||||
var runner types.Runner
|
||||
@@ -226,21 +217,13 @@ func (s *Server) handleListRunnersAdmin(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
// Override status based on actual WebSocket connection state
|
||||
// The WebSocket connection is the source of truth for runner status
|
||||
actualStatus := runner.Status
|
||||
if connectedRunners[runner.ID] {
|
||||
actualStatus = types.RunnerStatusOnline
|
||||
} else if runner.Status == types.RunnerStatusOnline {
|
||||
// Database says online but not connected via WebSocket - mark as offline
|
||||
actualStatus = types.RunnerStatusOffline
|
||||
}
|
||||
|
||||
// In polling model, database status is the source of truth
|
||||
// Runners update their status when they poll for jobs
|
||||
runners = append(runners, map[string]interface{}{
|
||||
"id": runner.ID,
|
||||
"name": runner.Name,
|
||||
"hostname": runner.Hostname,
|
||||
"status": actualStatus,
|
||||
"status": runner.Status,
|
||||
"last_heartbeat": runner.LastHeartbeat,
|
||||
"capabilities": runner.Capabilities,
|
||||
"api_key_id": apiKeyID.Int64,
|
||||
@@ -254,7 +237,7 @@ func (s *Server) handleListRunnersAdmin(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
// handleListUsers lists all users
|
||||
func (s *Server) handleListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
// Get first user ID to mark it in the response
|
||||
firstUserID, err := s.auth.GetFirstUserID()
|
||||
if err != nil {
|
||||
@@ -266,9 +249,9 @@ func (s *Server) handleListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
err = s.db.With(func(conn *sql.DB) error {
|
||||
var err error
|
||||
rows, err = conn.Query(
|
||||
`SELECT id, email, name, oauth_provider, is_admin, created_at
|
||||
`SELECT id, email, name, oauth_provider, is_admin, created_at
|
||||
FROM users ORDER BY created_at DESC`,
|
||||
)
|
||||
)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -315,7 +298,7 @@ func (s *Server) handleListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// handleGetUserJobs gets all jobs for a specific user
|
||||
func (s *Server) handleGetUserJobs(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleGetUserJobs(w http.ResponseWriter, r *http.Request) {
|
||||
userID, err := parseID(r, "id")
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusBadRequest, err.Error())
|
||||
@@ -336,11 +319,11 @@ func (s *Server) handleGetUserJobs(w http.ResponseWriter, r *http.Request) {
|
||||
err = s.db.With(func(conn *sql.DB) error {
|
||||
var err error
|
||||
rows, err = conn.Query(
|
||||
`SELECT id, user_id, job_type, name, status, progress, frame_start, frame_end, output_format,
|
||||
allow_parallel_runners, timeout_seconds, blend_metadata, created_at, started_at, completed_at, error_message
|
||||
`SELECT id, user_id, job_type, name, status, progress, frame_start, frame_end, output_format,
|
||||
blend_metadata, created_at, started_at, completed_at, error_message
|
||||
FROM jobs WHERE user_id = ? ORDER BY created_at DESC`,
|
||||
userID,
|
||||
)
|
||||
userID,
|
||||
)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -358,11 +341,9 @@ func (s *Server) handleGetUserJobs(w http.ResponseWriter, r *http.Request) {
|
||||
var errorMessage sql.NullString
|
||||
var frameStart, frameEnd sql.NullInt64
|
||||
var outputFormat sql.NullString
|
||||
var allowParallelRunners sql.NullBool
|
||||
|
||||
err := rows.Scan(
|
||||
&job.ID, &job.UserID, &jobType, &job.Name, &job.Status, &job.Progress,
|
||||
&frameStart, &frameEnd, &outputFormat, &allowParallelRunners, &job.TimeoutSeconds,
|
||||
&frameStart, &frameEnd, &outputFormat,
|
||||
&blendMetadataJSON, &job.CreatedAt, &startedAt, &completedAt, &errorMessage,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -382,9 +363,6 @@ func (s *Server) handleGetUserJobs(w http.ResponseWriter, r *http.Request) {
|
||||
if outputFormat.Valid {
|
||||
job.OutputFormat = &outputFormat.String
|
||||
}
|
||||
if allowParallelRunners.Valid {
|
||||
job.AllowParallelRunners = &allowParallelRunners.Bool
|
||||
}
|
||||
if startedAt.Valid {
|
||||
job.StartedAt = &startedAt.Time
|
||||
}
|
||||
@@ -408,7 +386,7 @@ func (s *Server) handleGetUserJobs(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// handleGetRegistrationEnabled gets the registration enabled setting
|
||||
func (s *Server) handleGetRegistrationEnabled(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleGetRegistrationEnabled(w http.ResponseWriter, r *http.Request) {
|
||||
enabled, err := s.auth.IsRegistrationEnabled()
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to get registration setting: %v", err))
|
||||
@@ -418,7 +396,7 @@ func (s *Server) handleGetRegistrationEnabled(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// handleSetRegistrationEnabled sets the registration enabled setting
|
||||
func (s *Server) handleSetRegistrationEnabled(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleSetRegistrationEnabled(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
@@ -436,7 +414,7 @@ func (s *Server) handleSetRegistrationEnabled(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// handleSetUserAdminStatus sets a user's admin status (admin only)
|
||||
func (s *Server) handleSetUserAdminStatus(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleSetUserAdminStatus(w http.ResponseWriter, r *http.Request) {
|
||||
targetUserID, err := parseID(r, "id")
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusBadRequest, err.Error())
|
||||
831
internal/manager/blender.go
Normal file
831
internal/manager/blender.go
Normal file
@@ -0,0 +1,831 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
BlenderDownloadBaseURL = "https://download.blender.org/release/"
|
||||
BlenderVersionCacheTTL = 1 * time.Hour
|
||||
)
|
||||
|
||||
// BlenderVersion represents a parsed Blender version
|
||||
type BlenderVersion struct {
|
||||
Major int `json:"major"`
|
||||
Minor int `json:"minor"`
|
||||
Patch int `json:"patch"`
|
||||
Full string `json:"full"` // e.g., "4.2.3"
|
||||
DirName string `json:"dir_name"` // e.g., "Blender4.2"
|
||||
Filename string `json:"filename"` // e.g., "blender-4.2.3-linux-x64.tar.xz"
|
||||
URL string `json:"url"` // Full download URL
|
||||
}
|
||||
|
||||
// BlenderVersionCache caches available Blender versions
|
||||
type BlenderVersionCache struct {
|
||||
versions []BlenderVersion
|
||||
fetchedAt time.Time
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
var blenderVersionCache = &BlenderVersionCache{}
|
||||
|
||||
// FetchBlenderVersions fetches available Blender versions from download.blender.org
|
||||
// Returns versions sorted by version number (newest first)
|
||||
func (s *Manager) FetchBlenderVersions() ([]BlenderVersion, error) {
|
||||
// Check cache first
|
||||
blenderVersionCache.mu.RLock()
|
||||
if time.Since(blenderVersionCache.fetchedAt) < BlenderVersionCacheTTL && len(blenderVersionCache.versions) > 0 {
|
||||
versions := make([]BlenderVersion, len(blenderVersionCache.versions))
|
||||
copy(versions, blenderVersionCache.versions)
|
||||
blenderVersionCache.mu.RUnlock()
|
||||
return versions, nil
|
||||
}
|
||||
blenderVersionCache.mu.RUnlock()
|
||||
|
||||
// Fetch from website with timeout
|
||||
client := &http.Client{
|
||||
Timeout: WSWriteDeadline,
|
||||
}
|
||||
resp, err := client.Get(BlenderDownloadBaseURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch blender releases: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("failed to fetch blender releases: status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response: %w", err)
|
||||
}
|
||||
|
||||
// Parse directory listing for Blender version folders
|
||||
// Looking for patterns like href="Blender4.2/" or href="Blender3.6/"
|
||||
dirPattern := regexp.MustCompile(`href="Blender(\d+)\.(\d+)/"`)
|
||||
log.Printf("Fetching Blender versions from %s", BlenderDownloadBaseURL)
|
||||
matches := dirPattern.FindAllStringSubmatch(string(body), -1)
|
||||
|
||||
// Fetch sub-versions concurrently to speed up the process
|
||||
type versionResult struct {
|
||||
versions []BlenderVersion
|
||||
err error
|
||||
}
|
||||
results := make(chan versionResult, len(matches))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, match := range matches {
|
||||
if len(match) < 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
major := 0
|
||||
minor := 0
|
||||
fmt.Sscanf(match[1], "%d", &major)
|
||||
fmt.Sscanf(match[2], "%d", &minor)
|
||||
|
||||
// Skip very old versions (pre-2.80)
|
||||
if major < 2 || (major == 2 && minor < 80) {
|
||||
continue
|
||||
}
|
||||
|
||||
dirName := fmt.Sprintf("Blender%d.%d", major, minor)
|
||||
|
||||
// Fetch the specific version directory concurrently
|
||||
wg.Add(1)
|
||||
go func(dn string, maj, min int) {
|
||||
defer wg.Done()
|
||||
subVersions, err := fetchSubVersions(dn, maj, min)
|
||||
results <- versionResult{versions: subVersions, err: err}
|
||||
}(dirName, major, minor)
|
||||
}
|
||||
|
||||
// Close results channel when all goroutines complete
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(results)
|
||||
}()
|
||||
|
||||
var versions []BlenderVersion
|
||||
for result := range results {
|
||||
if result.err != nil {
|
||||
log.Printf("Warning: failed to fetch sub-versions: %v", result.err)
|
||||
continue
|
||||
}
|
||||
versions = append(versions, result.versions...)
|
||||
}
|
||||
|
||||
// Sort by version (newest first)
|
||||
sort.Slice(versions, func(i, j int) bool {
|
||||
if versions[i].Major != versions[j].Major {
|
||||
return versions[i].Major > versions[j].Major
|
||||
}
|
||||
if versions[i].Minor != versions[j].Minor {
|
||||
return versions[i].Minor > versions[j].Minor
|
||||
}
|
||||
return versions[i].Patch > versions[j].Patch
|
||||
})
|
||||
|
||||
// Update cache
|
||||
blenderVersionCache.mu.Lock()
|
||||
blenderVersionCache.versions = versions
|
||||
blenderVersionCache.fetchedAt = time.Now()
|
||||
blenderVersionCache.mu.Unlock()
|
||||
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
// fetchSubVersions fetches specific version files from a Blender release directory
|
||||
func fetchSubVersions(dirName string, major, minor int) ([]BlenderVersion, error) {
|
||||
url := BlenderDownloadBaseURL + dirName + "/"
|
||||
client := &http.Client{
|
||||
Timeout: WSWriteDeadline,
|
||||
}
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Look for linux 64-bit tar.xz/bz2 files
|
||||
// Various naming conventions across versions:
|
||||
// - Modern (2.93+): blender-4.2.3-linux-x64.tar.xz
|
||||
// - 2.83 early: blender-2.83.0-linux64.tar.xz
|
||||
// - 2.80-2.82: blender-2.80-linux-glibc217-x86_64.tar.bz2
|
||||
// Skip: rc versions, alpha/beta, i686 (32-bit)
|
||||
filePatterns := []*regexp.Regexp{
|
||||
// Modern format: blender-X.Y.Z-linux-x64.tar.xz
|
||||
regexp.MustCompile(`blender-(\d+)\.(\d+)\.(\d+)-linux-x64\.tar\.(xz|bz2)`),
|
||||
// Older format: blender-X.Y.Z-linux64.tar.xz
|
||||
regexp.MustCompile(`blender-(\d+)\.(\d+)\.(\d+)-linux64\.tar\.(xz|bz2)`),
|
||||
// glibc format: blender-X.Y.Z-linux-glibc217-x86_64.tar.bz2 (prefer glibc217 for compatibility)
|
||||
regexp.MustCompile(`blender-(\d+)\.(\d+)\.(\d+)-linux-glibc217-x86_64\.tar\.(xz|bz2)`),
|
||||
}
|
||||
|
||||
var versions []BlenderVersion
|
||||
seen := make(map[string]bool)
|
||||
|
||||
for _, filePattern := range filePatterns {
|
||||
matches := filePattern.FindAllStringSubmatch(string(body), -1)
|
||||
|
||||
for _, match := range matches {
|
||||
if len(match) < 5 {
|
||||
continue
|
||||
}
|
||||
|
||||
patch := 0
|
||||
fmt.Sscanf(match[3], "%d", &patch)
|
||||
|
||||
full := fmt.Sprintf("%d.%d.%d", major, minor, patch)
|
||||
if seen[full] {
|
||||
continue
|
||||
}
|
||||
seen[full] = true
|
||||
|
||||
filename := match[0]
|
||||
versions = append(versions, BlenderVersion{
|
||||
Major: major,
|
||||
Minor: minor,
|
||||
Patch: patch,
|
||||
Full: full,
|
||||
DirName: dirName,
|
||||
Filename: filename,
|
||||
URL: url + filename,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
// GetLatestBlenderForMajorMinor returns the latest patch version for a given major.minor
|
||||
// If exact match not found, uses fuzzy matching to find the closest available version
|
||||
func (s *Manager) GetLatestBlenderForMajorMinor(major, minor int) (*BlenderVersion, error) {
|
||||
versions, err := s.FetchBlenderVersions()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(versions) == 0 {
|
||||
return nil, fmt.Errorf("no blender versions available")
|
||||
}
|
||||
|
||||
// Try exact match first - find the highest patch for this major.minor
|
||||
var exactMatch *BlenderVersion
|
||||
for i := range versions {
|
||||
v := &versions[i]
|
||||
if v.Major == major && v.Minor == minor {
|
||||
if exactMatch == nil || v.Patch > exactMatch.Patch {
|
||||
exactMatch = v
|
||||
}
|
||||
}
|
||||
}
|
||||
if exactMatch != nil {
|
||||
log.Printf("Found Blender %d.%d.%d for requested %d.%d", exactMatch.Major, exactMatch.Minor, exactMatch.Patch, major, minor)
|
||||
return exactMatch, nil
|
||||
}
|
||||
|
||||
// Fuzzy matching: find closest version
|
||||
// Priority: same major with closest minor > closest major
|
||||
log.Printf("No exact match for Blender %d.%d, using fuzzy matching", major, minor)
|
||||
|
||||
var bestMatch *BlenderVersion
|
||||
bestScore := -1000000 // Large negative number
|
||||
|
||||
for i := range versions {
|
||||
v := &versions[i]
|
||||
score := 0
|
||||
|
||||
if v.Major == major {
|
||||
// Same major version - prefer this
|
||||
score = 10000
|
||||
|
||||
// Prefer lower minor versions (more stable/compatible)
|
||||
// but not too far back
|
||||
minorDiff := minor - v.Minor
|
||||
if minorDiff >= 0 {
|
||||
// v.Minor <= minor (older or same) - prefer closer
|
||||
score += 1000 - minorDiff*10
|
||||
} else {
|
||||
// v.Minor > minor (newer) - less preferred but acceptable
|
||||
score += 500 + minorDiff*10
|
||||
}
|
||||
|
||||
// Higher patch is better
|
||||
score += v.Patch
|
||||
} else {
|
||||
// Different major - less preferred
|
||||
majorDiff := major - v.Major
|
||||
if majorDiff > 0 {
|
||||
// v.Major < major (older major) - acceptable fallback
|
||||
score = 5000 - majorDiff*1000 + v.Minor*10 + v.Patch
|
||||
} else {
|
||||
// v.Major > major (newer major) - avoid if possible
|
||||
score = -majorDiff * 1000
|
||||
}
|
||||
}
|
||||
|
||||
if score > bestScore {
|
||||
bestScore = score
|
||||
bestMatch = v
|
||||
}
|
||||
}
|
||||
|
||||
if bestMatch != nil {
|
||||
log.Printf("Fuzzy match: requested %d.%d, using %d.%d.%d", major, minor, bestMatch.Major, bestMatch.Minor, bestMatch.Patch)
|
||||
return bestMatch, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no blender version found for %d.%d", major, minor)
|
||||
}
|
||||
|
||||
// GetBlenderArchivePath returns the path to the cached blender archive for a specific version
|
||||
// Downloads from blender.org and decompresses to .tar if not already cached
|
||||
// The manager caches as uncompressed .tar to save decompression time on runners
|
||||
func (s *Manager) GetBlenderArchivePath(version *BlenderVersion) (string, error) {
|
||||
// Base directory for blender archives
|
||||
blenderDir := filepath.Join(s.storage.BasePath(), "blender-versions")
|
||||
if err := os.MkdirAll(blenderDir, 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create blender directory: %w", err)
|
||||
}
|
||||
|
||||
// Cache as uncompressed .tar for faster runner downloads
|
||||
// Convert filename like "blender-4.2.3-linux-x64.tar.xz" to "blender-4.2.3-linux-x64.tar"
|
||||
tarFilename := version.Filename
|
||||
tarFilename = strings.TrimSuffix(tarFilename, ".xz")
|
||||
tarFilename = strings.TrimSuffix(tarFilename, ".bz2")
|
||||
archivePath := filepath.Join(blenderDir, tarFilename)
|
||||
|
||||
// Check if already cached as .tar
|
||||
if _, err := os.Stat(archivePath); err == nil {
|
||||
log.Printf("Using cached Blender %s at %s", version.Full, archivePath)
|
||||
// Clean up any extracted folders that might exist
|
||||
s.cleanupExtractedBlenderFolders(blenderDir, version)
|
||||
return archivePath, nil
|
||||
}
|
||||
|
||||
// Need to download and decompress
|
||||
log.Printf("Downloading Blender %s from %s", version.Full, version.URL)
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: 0, // No timeout for large downloads
|
||||
}
|
||||
resp, err := client.Get(version.URL)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to download blender: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("failed to download blender: status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Download to temp file first
|
||||
compressedPath := filepath.Join(blenderDir, "download-"+version.Filename)
|
||||
compressedFile, err := os.Create(compressedPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create temp file: %w", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(compressedFile, resp.Body); err != nil {
|
||||
compressedFile.Close()
|
||||
os.Remove(compressedPath)
|
||||
return "", fmt.Errorf("failed to download blender: %w", err)
|
||||
}
|
||||
compressedFile.Close()
|
||||
|
||||
log.Printf("Downloaded Blender %s, decompressing to .tar...", version.Full)
|
||||
|
||||
// Decompress to .tar
|
||||
if err := decompressToTar(compressedPath, archivePath); err != nil {
|
||||
os.Remove(compressedPath)
|
||||
os.Remove(archivePath)
|
||||
return "", fmt.Errorf("failed to decompress blender archive: %w", err)
|
||||
}
|
||||
|
||||
// Remove compressed file
|
||||
os.Remove(compressedPath)
|
||||
|
||||
// Clean up any extracted folders for this version (if they exist)
|
||||
s.cleanupExtractedBlenderFolders(blenderDir, version)
|
||||
|
||||
log.Printf("Blender %s cached at %s", version.Full, archivePath)
|
||||
return archivePath, nil
|
||||
}
|
||||
|
||||
// decompressToTar decompresses a .tar.xz or .tar.bz2 file to a plain .tar file
|
||||
func decompressToTar(compressedPath, tarPath string) error {
|
||||
if strings.HasSuffix(compressedPath, ".tar.xz") {
|
||||
// Use xz command for decompression
|
||||
cmd := exec.Command("xz", "-d", "-k", "-c", compressedPath)
|
||||
outFile, err := os.Create(tarPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
cmd.Stdout = outFile
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("xz decompression failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
} else if strings.HasSuffix(compressedPath, ".tar.bz2") {
|
||||
// Use bzip2 for decompression
|
||||
inFile, err := os.Open(compressedPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer inFile.Close()
|
||||
|
||||
bzReader := bzip2.NewReader(inFile)
|
||||
outFile, err := os.Create(tarPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
if _, err := io.Copy(outFile, bzReader); err != nil {
|
||||
return fmt.Errorf("bzip2 decompression failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported compression format: %s", compressedPath)
|
||||
}
|
||||
|
||||
// cleanupExtractedBlenderFolders removes any extracted Blender folders for the given version
|
||||
// This ensures we only keep the .tar file and not extracted folders
|
||||
func (s *Manager) cleanupExtractedBlenderFolders(blenderDir string, version *BlenderVersion) {
|
||||
// Look for folders matching the version (e.g., "4.2.3", "2.83.20")
|
||||
versionDirs := []string{
|
||||
filepath.Join(blenderDir, version.Full), // e.g., "4.2.3"
|
||||
filepath.Join(blenderDir, fmt.Sprintf("%d.%d", version.Major, version.Minor)), // e.g., "4.2"
|
||||
}
|
||||
|
||||
for _, dir := range versionDirs {
|
||||
if info, err := os.Stat(dir); err == nil && info.IsDir() {
|
||||
log.Printf("Removing extracted Blender folder: %s", dir)
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
log.Printf("Warning: failed to remove extracted folder %s: %v", dir, err)
|
||||
} else {
|
||||
log.Printf("Removed extracted Blender folder: %s", dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ParseBlenderVersionFromFile parses the Blender version that a .blend file was saved with
|
||||
// This reads the file header to determine the version
|
||||
func ParseBlenderVersionFromFile(blendPath string) (major, minor int, err error) {
|
||||
file, err := os.Open(blendPath)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to open blend file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return ParseBlenderVersionFromReader(file)
|
||||
}
|
||||
|
||||
// ParseBlenderVersionFromReader parses the Blender version from a reader
|
||||
// Useful for reading from uploaded files without saving to disk first
|
||||
func ParseBlenderVersionFromReader(r io.ReadSeeker) (major, minor int, err error) {
|
||||
// Read the first 12 bytes of the blend file header
|
||||
// Format: BLENDER-v<major><minor><patch> or BLENDER_v<major><minor><patch>
|
||||
// The header is: "BLENDER" (7 bytes) + pointer size (1 byte: '-' for 64-bit, '_' for 32-bit)
|
||||
// + endianness (1 byte: 'v' for little-endian, 'V' for big-endian)
|
||||
// + version (3 bytes: e.g., "402" for 4.02)
|
||||
header := make([]byte, 12)
|
||||
n, err := r.Read(header)
|
||||
if err != nil || n < 12 {
|
||||
return 0, 0, fmt.Errorf("failed to read blend file header: %w", err)
|
||||
}
|
||||
|
||||
// Check for BLENDER magic
|
||||
if string(header[:7]) != "BLENDER" {
|
||||
// Might be compressed - try to decompress
|
||||
r.Seek(0, 0)
|
||||
return parseCompressedBlendVersion(r)
|
||||
}
|
||||
|
||||
// Parse version from bytes 9-11 (3 digits)
|
||||
versionStr := string(header[9:12])
|
||||
var vMajor, vMinor int
|
||||
|
||||
// Version format changed in Blender 3.0
|
||||
// Pre-3.0: "279" = 2.79, "280" = 2.80
|
||||
// 3.0+: "300" = 3.0, "402" = 4.02, "410" = 4.10
|
||||
if len(versionStr) == 3 {
|
||||
// First digit is major version
|
||||
fmt.Sscanf(string(versionStr[0]), "%d", &vMajor)
|
||||
// Next two digits are minor version
|
||||
fmt.Sscanf(versionStr[1:3], "%d", &vMinor)
|
||||
}
|
||||
|
||||
return vMajor, vMinor, nil
|
||||
}
|
||||
|
||||
// parseCompressedBlendVersion handles gzip and zstd compressed blend files
|
||||
func parseCompressedBlendVersion(r io.ReadSeeker) (major, minor int, err error) {
|
||||
// Check for compression magic bytes
|
||||
magic := make([]byte, 4)
|
||||
if _, err := r.Read(magic); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
r.Seek(0, 0)
|
||||
|
||||
if magic[0] == 0x1f && magic[1] == 0x8b {
|
||||
// gzip compressed
|
||||
gzReader, err := gzip.NewReader(r)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to create gzip reader: %w", err)
|
||||
}
|
||||
defer gzReader.Close()
|
||||
|
||||
header := make([]byte, 12)
|
||||
n, err := gzReader.Read(header)
|
||||
if err != nil || n < 12 {
|
||||
return 0, 0, fmt.Errorf("failed to read compressed blend header: %w", err)
|
||||
}
|
||||
|
||||
if string(header[:7]) != "BLENDER" {
|
||||
return 0, 0, fmt.Errorf("invalid blend file format")
|
||||
}
|
||||
|
||||
versionStr := string(header[9:12])
|
||||
var vMajor, vMinor int
|
||||
if len(versionStr) == 3 {
|
||||
fmt.Sscanf(string(versionStr[0]), "%d", &vMajor)
|
||||
fmt.Sscanf(versionStr[1:3], "%d", &vMinor)
|
||||
}
|
||||
|
||||
return vMajor, vMinor, nil
|
||||
}
|
||||
|
||||
// Check for zstd magic (Blender 3.0+): 0x28 0xB5 0x2F 0xFD
|
||||
if magic[0] == 0x28 && magic[1] == 0xb5 && magic[2] == 0x2f && magic[3] == 0xfd {
|
||||
return parseZstdBlendVersion(r)
|
||||
}
|
||||
|
||||
return 0, 0, fmt.Errorf("unknown blend file format")
|
||||
}
|
||||
|
||||
// parseZstdBlendVersion handles zstd-compressed blend files (Blender 3.0+)
|
||||
// Uses zstd command line tool since Go doesn't have native zstd support
|
||||
func parseZstdBlendVersion(r io.ReadSeeker) (major, minor int, err error) {
|
||||
r.Seek(0, 0)
|
||||
|
||||
// We need to decompress just enough to read the header
|
||||
// Use zstd command to decompress from stdin
|
||||
cmd := exec.Command("zstd", "-d", "-c")
|
||||
cmd.Stdin = r
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to create zstd stdout pipe: %w", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to start zstd decompression: %w", err)
|
||||
}
|
||||
|
||||
// Read just the header (12 bytes)
|
||||
header := make([]byte, 12)
|
||||
n, readErr := io.ReadFull(stdout, header)
|
||||
|
||||
// Kill the process early - we only need the header
|
||||
cmd.Process.Kill()
|
||||
cmd.Wait()
|
||||
|
||||
if readErr != nil || n < 12 {
|
||||
return 0, 0, fmt.Errorf("failed to read zstd compressed blend header: %v", readErr)
|
||||
}
|
||||
|
||||
if string(header[:7]) != "BLENDER" {
|
||||
return 0, 0, fmt.Errorf("invalid blend file format in zstd archive")
|
||||
}
|
||||
|
||||
versionStr := string(header[9:12])
|
||||
var vMajor, vMinor int
|
||||
if len(versionStr) == 3 {
|
||||
fmt.Sscanf(string(versionStr[0]), "%d", &vMajor)
|
||||
fmt.Sscanf(versionStr[1:3], "%d", &vMinor)
|
||||
}
|
||||
|
||||
return vMajor, vMinor, nil
|
||||
}
|
||||
|
||||
// handleGetBlenderVersions returns available Blender versions
|
||||
func (s *Manager) handleGetBlenderVersions(w http.ResponseWriter, r *http.Request) {
|
||||
versions, err := s.FetchBlenderVersions()
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("failed to fetch blender versions: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Group by major.minor for easier frontend display
|
||||
type VersionGroup struct {
|
||||
MajorMinor string `json:"major_minor"`
|
||||
Latest BlenderVersion `json:"latest"`
|
||||
All []BlenderVersion `json:"all"`
|
||||
}
|
||||
|
||||
groups := make(map[string]*VersionGroup)
|
||||
for _, v := range versions {
|
||||
key := fmt.Sprintf("%d.%d", v.Major, v.Minor)
|
||||
if groups[key] == nil {
|
||||
groups[key] = &VersionGroup{
|
||||
MajorMinor: key,
|
||||
Latest: v, // First one is latest due to sorting
|
||||
All: []BlenderVersion{v},
|
||||
}
|
||||
} else {
|
||||
groups[key].All = append(groups[key].All, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to slice and sort by version
|
||||
var groupedResult []VersionGroup
|
||||
for _, g := range groups {
|
||||
groupedResult = append(groupedResult, *g)
|
||||
}
|
||||
sort.Slice(groupedResult, func(i, j int) bool {
|
||||
// Parse major.minor for comparison
|
||||
var iMaj, iMin, jMaj, jMin int
|
||||
fmt.Sscanf(groupedResult[i].MajorMinor, "%d.%d", &iMaj, &iMin)
|
||||
fmt.Sscanf(groupedResult[j].MajorMinor, "%d.%d", &jMaj, &jMin)
|
||||
if iMaj != jMaj {
|
||||
return iMaj > jMaj
|
||||
}
|
||||
return iMin > jMin
|
||||
})
|
||||
|
||||
// Return both flat list and grouped for flexibility
|
||||
response := map[string]interface{}{
|
||||
"versions": versions, // Flat list of all versions (newest first)
|
||||
"grouped": groupedResult, // Grouped by major.minor
|
||||
}
|
||||
|
||||
s.respondJSON(w, http.StatusOK, response)
|
||||
}
|
||||
|
||||
// handleDownloadBlender serves a cached Blender archive to runners
|
||||
func (s *Manager) handleDownloadBlender(w http.ResponseWriter, r *http.Request) {
|
||||
version := r.URL.Query().Get("version")
|
||||
if version == "" {
|
||||
s.respondError(w, http.StatusBadRequest, "version parameter required")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse version string (e.g., "4.2.3" or "4.2")
|
||||
var major, minor, patch int
|
||||
parts := strings.Split(version, ".")
|
||||
if len(parts) < 2 {
|
||||
s.respondError(w, http.StatusBadRequest, "invalid version format, expected major.minor or major.minor.patch")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Sscanf(parts[0], "%d", &major)
|
||||
fmt.Sscanf(parts[1], "%d", &minor)
|
||||
if len(parts) >= 3 {
|
||||
fmt.Sscanf(parts[2], "%d", &patch)
|
||||
}
|
||||
|
||||
// Find the version
|
||||
var blenderVersion *BlenderVersion
|
||||
if len(parts) >= 3 {
|
||||
// Exact patch version requested - find it
|
||||
versions, err := s.FetchBlenderVersions()
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("failed to fetch versions: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
for _, v := range versions {
|
||||
if v.Major == major && v.Minor == minor && v.Patch == patch {
|
||||
blenderVersion = &v
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if blenderVersion == nil {
|
||||
s.respondError(w, http.StatusNotFound, fmt.Sprintf("blender version %s not found", version))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Major.minor only - use helper to get latest patch version
|
||||
var err error
|
||||
blenderVersion, err = s.GetLatestBlenderForMajorMinor(major, minor)
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusNotFound, fmt.Sprintf("blender version %s not found: %v", version, err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Get or download the archive
|
||||
archivePath, err := s.GetBlenderArchivePath(blenderVersion)
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("failed to get blender archive: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Serve the file
|
||||
file, err := os.Open(archivePath)
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("failed to open archive: %v", err))
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("failed to stat archive: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Filename is now .tar (decompressed)
|
||||
tarFilename := blenderVersion.Filename
|
||||
tarFilename = strings.TrimSuffix(tarFilename, ".xz")
|
||||
tarFilename = strings.TrimSuffix(tarFilename, ".bz2")
|
||||
|
||||
w.Header().Set("Content-Type", "application/x-tar")
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", tarFilename))
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", stat.Size()))
|
||||
w.Header().Set("X-Blender-Version", blenderVersion.Full)
|
||||
|
||||
io.Copy(w, file)
|
||||
}
|
||||
|
||||
// Unused functions from extraction - keeping for reference but not needed on manager
|
||||
var _ = extractBlenderArchive
|
||||
var _ = extractTarXz
|
||||
var _ = extractTar
|
||||
|
||||
// extractBlenderArchive extracts a blender archive (already decompressed to .tar by GetBlenderArchivePath)
|
||||
func extractBlenderArchive(archivePath string, version *BlenderVersion, destDir string) error {
|
||||
file, err := os.Open(archivePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// The archive is already decompressed to .tar by GetBlenderArchivePath
|
||||
// Just extract it directly
|
||||
if strings.HasSuffix(archivePath, ".tar") {
|
||||
tarReader := tar.NewReader(file)
|
||||
return extractTar(tarReader, version, destDir)
|
||||
}
|
||||
|
||||
// Fallback for any other format (shouldn't happen with current flow)
|
||||
if strings.HasSuffix(archivePath, ".tar.xz") {
|
||||
return extractTarXz(archivePath, version, destDir)
|
||||
} else if strings.HasSuffix(archivePath, ".tar.bz2") {
|
||||
bzReader := bzip2.NewReader(file)
|
||||
tarReader := tar.NewReader(bzReader)
|
||||
return extractTar(tarReader, version, destDir)
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported archive format: %s", archivePath)
|
||||
}
|
||||
|
||||
// extractTarXz extracts a tar.xz archive using the xz command
|
||||
func extractTarXz(archivePath string, version *BlenderVersion, destDir string) error {
|
||||
versionDir := filepath.Join(destDir, version.Full)
|
||||
if err := os.MkdirAll(versionDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.Command("tar", "-xJf", archivePath, "-C", versionDir, "--strip-components=1")
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("tar extraction failed: %v, output: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractTar extracts files from a tar reader
|
||||
func extractTar(tarReader *tar.Reader, version *BlenderVersion, destDir string) error {
|
||||
versionDir := filepath.Join(destDir, version.Full)
|
||||
if err := os.MkdirAll(versionDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stripPrefix := ""
|
||||
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if stripPrefix == "" {
|
||||
parts := strings.SplitN(header.Name, "/", 2)
|
||||
if len(parts) > 0 {
|
||||
stripPrefix = parts[0] + "/"
|
||||
}
|
||||
}
|
||||
|
||||
name := strings.TrimPrefix(header.Name, stripPrefix)
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
targetPath := filepath.Join(versionDir, name)
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil {
|
||||
return err
|
||||
}
|
||||
case tar.TypeReg:
|
||||
if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
outFile, err := os.OpenFile(targetPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(header.Mode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(outFile, tarReader); err != nil {
|
||||
outFile.Close()
|
||||
return err
|
||||
}
|
||||
outFile.Close()
|
||||
case tar.TypeSymlink:
|
||||
if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Symlink(header.Linkname, targetPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,7 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@@ -37,12 +38,10 @@ const (
|
||||
WSWriteDeadline = 10 * time.Second
|
||||
|
||||
// Task timeouts
|
||||
DefaultTaskTimeout = 300 // 5 minutes for frame rendering
|
||||
VideoGenerationTimeout = 86400 // 24 hours for video generation
|
||||
DefaultJobTimeout = 86400 // 24 hours
|
||||
RenderTimeout = 60 * 60 // 1 hour for frame rendering
|
||||
VideoEncodeTimeout = 60 * 60 * 24 // 24 hours for encoding
|
||||
|
||||
// Limits
|
||||
MaxFrameRange = 10000
|
||||
MaxUploadSize = 50 << 30 // 50 GB
|
||||
RunnerHeartbeatTimeout = 90 * time.Second
|
||||
TaskDistributionInterval = 10 * time.Second
|
||||
@@ -52,8 +51,8 @@ const (
|
||||
SessionCookieMaxAge = 86400 // 24 hours
|
||||
)
|
||||
|
||||
// Server represents the API server
|
||||
type Server struct {
|
||||
// Manager represents the manager server
|
||||
type Manager struct {
|
||||
db *database.DB
|
||||
cfg *config.Config
|
||||
auth *authpkg.Auth
|
||||
@@ -62,14 +61,9 @@ type Server struct {
|
||||
router *chi.Mux
|
||||
|
||||
// WebSocket connections
|
||||
wsUpgrader websocket.Upgrader
|
||||
runnerConns map[int64]*websocket.Conn
|
||||
runnerConnsMu sync.RWMutex
|
||||
// Mutexes for each runner connection to serialize writes
|
||||
runnerConnsWriteMu map[int64]*sync.Mutex
|
||||
runnerConnsWriteMuMu sync.RWMutex
|
||||
wsUpgrader websocket.Upgrader
|
||||
|
||||
// DEPRECATED: Old WebSocket connection maps (kept for backwards compatibility)
|
||||
// DEPRECATED: Old frontend WebSocket connection maps (kept for backwards compatibility)
|
||||
// These will be removed in a future release. Use clientConns instead.
|
||||
frontendConns map[string]*websocket.Conn // key: "jobId:taskId"
|
||||
frontendConnsMu sync.RWMutex
|
||||
@@ -82,18 +76,25 @@ type Server struct {
|
||||
jobConnsWriteMu map[string]*sync.Mutex
|
||||
jobConnsWriteMuMu sync.RWMutex
|
||||
|
||||
// Per-job runner WebSocket connections (polling-based flow)
|
||||
// Key is "job-{jobId}-task-{taskId}"
|
||||
runnerJobConns map[string]*websocket.Conn
|
||||
runnerJobConnsMu sync.RWMutex
|
||||
runnerJobConnsWriteMu map[string]*sync.Mutex
|
||||
runnerJobConnsWriteMuMu sync.RWMutex
|
||||
|
||||
// Throttling for progress updates (per job)
|
||||
progressUpdateTimes map[int64]time.Time // key: jobID
|
||||
progressUpdateTimesMu sync.RWMutex
|
||||
// Throttling for task status updates (per task)
|
||||
taskUpdateTimes map[int64]time.Time // key: taskID
|
||||
taskUpdateTimesMu sync.RWMutex
|
||||
// Task distribution serialization
|
||||
taskDistMu sync.Mutex // Mutex to prevent concurrent distribution
|
||||
|
||||
// Client WebSocket connections (new unified WebSocket)
|
||||
clientConns map[int64]*ClientConnection // key: userID
|
||||
// Key is "userID:connID" to support multiple tabs per user
|
||||
clientConns map[string]*ClientConnection
|
||||
clientConnsMu sync.RWMutex
|
||||
connIDCounter uint64 // Atomic counter for generating unique connection IDs
|
||||
|
||||
// Upload session tracking
|
||||
uploadSessions map[string]*UploadSession // sessionId -> session info
|
||||
@@ -110,6 +111,7 @@ type Server struct {
|
||||
type ClientConnection struct {
|
||||
Conn *websocket.Conn
|
||||
UserID int64
|
||||
ConnID string // Unique connection ID (userID:connID)
|
||||
IsAdmin bool
|
||||
Subscriptions map[string]bool // channel -> subscribed
|
||||
SubsMu sync.RWMutex // Protects Subscriptions map
|
||||
@@ -126,14 +128,14 @@ type UploadSession struct {
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
// NewServer creates a new API server
|
||||
func NewServer(db *database.DB, cfg *config.Config, auth *authpkg.Auth, storage *storage.Storage) (*Server, error) {
|
||||
// NewManager creates a new manager server
|
||||
func NewManager(db *database.DB, cfg *config.Config, auth *authpkg.Auth, storage *storage.Storage) (*Manager, error) {
|
||||
secrets, err := authpkg.NewSecrets(db, cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize secrets: %w", err)
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
s := &Manager{
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
auth: auth,
|
||||
@@ -146,9 +148,7 @@ func NewServer(db *database.DB, cfg *config.Config, auth *authpkg.Auth, storage
|
||||
ReadBufferSize: 1024,
|
||||
WriteBufferSize: 1024,
|
||||
},
|
||||
runnerConns: make(map[int64]*websocket.Conn),
|
||||
runnerConnsWriteMu: make(map[int64]*sync.Mutex),
|
||||
// DEPRECATED: Initialize old WebSocket maps for backward compatibility
|
||||
// DEPRECATED: Initialize old frontend WebSocket maps for backward compatibility
|
||||
frontendConns: make(map[string]*websocket.Conn),
|
||||
frontendConnsWriteMu: make(map[string]*sync.Mutex),
|
||||
jobListConns: make(map[int64]*websocket.Conn),
|
||||
@@ -156,8 +156,17 @@ func NewServer(db *database.DB, cfg *config.Config, auth *authpkg.Auth, storage
|
||||
jobConnsWriteMu: make(map[string]*sync.Mutex),
|
||||
progressUpdateTimes: make(map[int64]time.Time),
|
||||
taskUpdateTimes: make(map[int64]time.Time),
|
||||
clientConns: make(map[int64]*ClientConnection),
|
||||
clientConns: make(map[string]*ClientConnection),
|
||||
uploadSessions: make(map[string]*UploadSession),
|
||||
// Per-job runner WebSocket connections
|
||||
runnerJobConns: make(map[string]*websocket.Conn),
|
||||
runnerJobConnsWriteMu: make(map[string]*sync.Mutex),
|
||||
runnerJobConnsWriteMuMu: sync.RWMutex{}, // Initialize the new field
|
||||
}
|
||||
|
||||
// Check for required external tools
|
||||
if err := s.checkRequiredTools(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.setupMiddleware()
|
||||
@@ -171,6 +180,23 @@ func NewServer(db *database.DB, cfg *config.Config, auth *authpkg.Auth, storage
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// checkRequiredTools verifies that required external tools are available
|
||||
func (s *Manager) checkRequiredTools() error {
|
||||
// Check for zstd (required for zstd-compressed blend files)
|
||||
if err := exec.Command("zstd", "--version").Run(); err != nil {
|
||||
return fmt.Errorf("zstd not found - required for compressed blend file support. Install with: apt install zstd")
|
||||
}
|
||||
log.Printf("Found zstd for compressed blend file support")
|
||||
|
||||
// Check for xz (required for decompressing blender archives)
|
||||
if err := exec.Command("xz", "--version").Run(); err != nil {
|
||||
return fmt.Errorf("xz not found - required for decompressing blender archives. Install with: apt install xz-utils")
|
||||
}
|
||||
log.Printf("Found xz for blender archive decompression")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkWebSocketOrigin validates WebSocket connection origins
|
||||
// In production mode, only allows same-origin connections or configured allowed origins
|
||||
func checkWebSocketOrigin(r *http.Request) bool {
|
||||
@@ -323,7 +349,7 @@ func rateLimitMiddleware(limiter *RateLimiter) func(http.Handler) http.Handler {
|
||||
}
|
||||
|
||||
// setupMiddleware configures middleware
|
||||
func (s *Server) setupMiddleware() {
|
||||
func (s *Manager) setupMiddleware() {
|
||||
s.router.Use(middleware.Logger)
|
||||
s.router.Use(middleware.Recoverer)
|
||||
// Note: Timeout middleware is NOT applied globally to avoid conflicts with WebSocket connections
|
||||
@@ -416,7 +442,7 @@ func (w *gzipResponseWriter) WriteHeader(statusCode int) {
|
||||
}
|
||||
|
||||
// setupRoutes configures routes
|
||||
func (s *Server) setupRoutes() {
|
||||
func (s *Manager) setupRoutes() {
|
||||
// Health check endpoint (unauthenticated)
|
||||
s.router.Get("/api/health", s.handleHealthCheck)
|
||||
|
||||
@@ -457,13 +483,13 @@ func (s *Server) setupRoutes() {
|
||||
r.Get("/{id}/files/count", s.handleGetJobFilesCount)
|
||||
r.Get("/{id}/context", s.handleListContextArchive)
|
||||
r.Get("/{id}/files/{fileId}/download", s.handleDownloadJobFile)
|
||||
r.Get("/{id}/files/{fileId}/preview-exr", s.handlePreviewEXR)
|
||||
r.Get("/{id}/video", s.handleStreamVideo)
|
||||
r.Get("/{id}/metadata", s.handleGetJobMetadata)
|
||||
r.Get("/{id}/tasks", s.handleListJobTasks)
|
||||
r.Get("/{id}/tasks/summary", s.handleListJobTasksSummary)
|
||||
r.Post("/{id}/tasks/batch", s.handleBatchGetTasks)
|
||||
r.Get("/{id}/tasks/{taskId}/logs", s.handleGetTaskLogs)
|
||||
// Old WebSocket route removed - use client WebSocket with subscriptions instead
|
||||
r.Get("/{id}/tasks/{taskId}/steps", s.handleGetTaskSteps)
|
||||
r.Post("/{id}/tasks/{taskId}/retry", s.handleRetryTask)
|
||||
// WebSocket route for unified client WebSocket
|
||||
@@ -510,38 +536,40 @@ func (s *Server) setupRoutes() {
|
||||
// Registration doesn't require auth (uses token)
|
||||
r.With(middleware.Timeout(60*time.Second)).Post("/register", s.handleRegisterRunner)
|
||||
|
||||
// WebSocket endpoint (auth handled in handler) - no timeout middleware
|
||||
r.Get("/ws", s.handleRunnerWebSocket)
|
||||
// Polling-based endpoints (auth handled in handlers)
|
||||
r.Get("/workers/{id}/next-job", s.handleNextJob)
|
||||
|
||||
// File operations still use HTTP (WebSocket not suitable for large files)
|
||||
// Per-job endpoints with job_token auth (no middleware, auth in handler)
|
||||
r.Get("/jobs/{jobId}/ws", s.handleRunnerJobWebSocket)
|
||||
r.Get("/jobs/{jobId}/context.tar", s.handleDownloadJobContextWithToken)
|
||||
r.Post("/jobs/{jobId}/upload", s.handleUploadFileWithToken)
|
||||
|
||||
// Runner API endpoints (uses API key auth)
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(s.runnerAuthMiddleware(next.ServeHTTP))
|
||||
})
|
||||
r.Get("/ping", s.handleRunnerPing)
|
||||
r.Post("/tasks/{id}/progress", s.handleUpdateTaskProgress)
|
||||
r.Post("/tasks/{id}/steps", s.handleUpdateTaskStep)
|
||||
r.Get("/jobs/{jobId}/context.tar", s.handleDownloadJobContext)
|
||||
r.Get("/files/{jobId}/{fileName}", s.handleDownloadFileForRunner)
|
||||
r.Post("/files/{jobId}/upload", s.handleUploadFileFromRunner)
|
||||
r.Get("/jobs/{jobId}/status", s.handleGetJobStatusForRunner)
|
||||
r.Get("/blender/download", s.handleDownloadBlender)
|
||||
r.Get("/jobs/{jobId}/files", s.handleGetJobFilesForRunner)
|
||||
r.Get("/jobs/{jobId}/metadata", s.handleGetJobMetadataForRunner)
|
||||
r.Post("/jobs/{jobId}/metadata", s.handleSubmitMetadata)
|
||||
r.Get("/files/{jobId}/{fileName}", s.handleDownloadFileForRunner)
|
||||
})
|
||||
})
|
||||
|
||||
// Blender versions API (public, for job submission page)
|
||||
s.router.Get("/api/blender/versions", s.handleGetBlenderVersions)
|
||||
|
||||
// Serve static files (embedded React app with SPA fallback)
|
||||
s.router.Handle("/*", web.SPAHandler())
|
||||
}
|
||||
|
||||
// ServeHTTP implements http.Handler
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
s.router.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// JSON response helpers
|
||||
func (s *Server) respondJSON(w http.ResponseWriter, status int, data interface{}) {
|
||||
func (s *Manager) respondJSON(w http.ResponseWriter, status int, data interface{}) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
if err := json.NewEncoder(w).Encode(data); err != nil {
|
||||
@@ -549,7 +577,7 @@ func (s *Server) respondJSON(w http.ResponseWriter, status int, data interface{}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) respondError(w http.ResponseWriter, status int, message string) {
|
||||
func (s *Manager) respondError(w http.ResponseWriter, status int, message string) {
|
||||
s.respondJSON(w, status, map[string]string{"error": message})
|
||||
}
|
||||
|
||||
@@ -573,7 +601,7 @@ func createSessionCookie(sessionID string) *http.Cookie {
|
||||
}
|
||||
|
||||
// handleHealthCheck returns server health status
|
||||
func (s *Server) handleHealthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleHealthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
// Check database connectivity
|
||||
dbHealthy := true
|
||||
if err := s.db.Ping(); err != nil {
|
||||
@@ -581,10 +609,14 @@ func (s *Server) handleHealthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
log.Printf("Health check: database ping failed: %v", err)
|
||||
}
|
||||
|
||||
// Count connected runners
|
||||
s.runnerConnsMu.RLock()
|
||||
runnerCount := len(s.runnerConns)
|
||||
s.runnerConnsMu.RUnlock()
|
||||
// Count online runners (based on recent heartbeat)
|
||||
var runnerCount int
|
||||
s.db.With(func(conn *sql.DB) error {
|
||||
return conn.QueryRow(
|
||||
`SELECT COUNT(*) FROM runners WHERE status = ?`,
|
||||
types.RunnerStatusOnline,
|
||||
).Scan(&runnerCount)
|
||||
})
|
||||
|
||||
// Count connected clients
|
||||
s.clientConnsMu.RLock()
|
||||
@@ -624,7 +656,7 @@ func (s *Server) handleHealthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Auth handlers
|
||||
func (s *Server) handleGoogleLogin(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleGoogleLogin(w http.ResponseWriter, r *http.Request) {
|
||||
url, err := s.auth.GoogleLoginURL()
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, err.Error())
|
||||
@@ -633,7 +665,7 @@ func (s *Server) handleGoogleLogin(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, url, http.StatusFound)
|
||||
}
|
||||
|
||||
func (s *Server) handleGoogleCallback(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleGoogleCallback(w http.ResponseWriter, r *http.Request) {
|
||||
code := r.URL.Query().Get("code")
|
||||
if code == "" {
|
||||
s.respondError(w, http.StatusBadRequest, "Missing code parameter")
|
||||
@@ -657,7 +689,7 @@ func (s *Server) handleGoogleCallback(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, "/", http.StatusFound)
|
||||
}
|
||||
|
||||
func (s *Server) handleDiscordLogin(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleDiscordLogin(w http.ResponseWriter, r *http.Request) {
|
||||
url, err := s.auth.DiscordLoginURL()
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, err.Error())
|
||||
@@ -666,7 +698,7 @@ func (s *Server) handleDiscordLogin(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, url, http.StatusFound)
|
||||
}
|
||||
|
||||
func (s *Server) handleDiscordCallback(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleDiscordCallback(w http.ResponseWriter, r *http.Request) {
|
||||
code := r.URL.Query().Get("code")
|
||||
if code == "" {
|
||||
s.respondError(w, http.StatusBadRequest, "Missing code parameter")
|
||||
@@ -690,7 +722,7 @@ func (s *Server) handleDiscordCallback(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, "/", http.StatusFound)
|
||||
}
|
||||
|
||||
func (s *Server) handleLogout(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleLogout(w http.ResponseWriter, r *http.Request) {
|
||||
cookie, err := r.Cookie("session_id")
|
||||
if err == nil {
|
||||
s.auth.DeleteSession(cookie.Value)
|
||||
@@ -712,7 +744,7 @@ func (s *Server) handleLogout(w http.ResponseWriter, r *http.Request) {
|
||||
s.respondJSON(w, http.StatusOK, map[string]string{"message": "Logged out"})
|
||||
}
|
||||
|
||||
func (s *Server) handleGetMe(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleGetMe(w http.ResponseWriter, r *http.Request) {
|
||||
cookie, err := r.Cookie("session_id")
|
||||
if err != nil {
|
||||
log.Printf("Authentication failed: missing session cookie in /auth/me")
|
||||
@@ -735,7 +767,7 @@ func (s *Server) handleGetMe(w http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) handleGetAuthProviders(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleGetAuthProviders(w http.ResponseWriter, r *http.Request) {
|
||||
s.respondJSON(w, http.StatusOK, map[string]bool{
|
||||
"google": s.auth.IsGoogleOAuthConfigured(),
|
||||
"discord": s.auth.IsDiscordOAuthConfigured(),
|
||||
@@ -743,13 +775,13 @@ func (s *Server) handleGetAuthProviders(w http.ResponseWriter, r *http.Request)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) handleLocalLoginAvailable(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleLocalLoginAvailable(w http.ResponseWriter, r *http.Request) {
|
||||
s.respondJSON(w, http.StatusOK, map[string]bool{
|
||||
"available": s.auth.IsLocalLoginEnabled(),
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) handleLocalRegister(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleLocalRegister(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
Email string `json:"email"`
|
||||
Name string `json:"name"`
|
||||
@@ -791,7 +823,7 @@ func (s *Server) handleLocalRegister(w http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) handleLocalLogin(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleLocalLogin(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
@@ -828,7 +860,7 @@ func (s *Server) handleLocalLogin(w http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) handleChangePassword(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleChangePassword(w http.ResponseWriter, r *http.Request) {
|
||||
userID, err := getUserID(r)
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusUnauthorized, err.Error())
|
||||
@@ -902,7 +934,7 @@ func parseID(r *http.Request, param string) (int64, error) {
|
||||
}
|
||||
|
||||
// StartBackgroundTasks starts background goroutines for error recovery
|
||||
func (s *Server) StartBackgroundTasks() {
|
||||
func (s *Manager) StartBackgroundTasks() {
|
||||
go s.recoverStuckTasks()
|
||||
go s.cleanupOldRenderJobs()
|
||||
go s.cleanupOldTempDirectories()
|
||||
@@ -910,100 +942,63 @@ func (s *Server) StartBackgroundTasks() {
|
||||
go s.cleanupOldUploadSessions()
|
||||
}
|
||||
|
||||
// recoverRunnersOnStartup checks for runners marked as online but not actually connected
|
||||
// This runs once on startup to handle manager restarts where we lose track of connections
|
||||
func (s *Server) recoverRunnersOnStartup() {
|
||||
// Wait a short time for runners to reconnect after manager restart
|
||||
// This gives runners a chance to reconnect before we mark them as dead
|
||||
time.Sleep(5 * time.Second)
|
||||
// recoverRunnersOnStartup marks runners as offline on startup
|
||||
// In the polling model, runners will update their status when they poll for jobs
|
||||
func (s *Manager) recoverRunnersOnStartup() {
|
||||
log.Printf("Recovering runners on startup: marking all as offline...")
|
||||
|
||||
log.Printf("Recovering runners on startup: checking for disconnected runners...")
|
||||
|
||||
var onlineRunnerIDs []int64
|
||||
// Mark all runners as offline - they'll be marked online when they poll
|
||||
var runnersAffected int64
|
||||
err := s.db.With(func(conn *sql.DB) error {
|
||||
rows, err := conn.Query(
|
||||
`SELECT id FROM runners WHERE status = ?`,
|
||||
types.RunnerStatusOnline,
|
||||
result, err := conn.Exec(
|
||||
`UPDATE runners SET status = ? WHERE status = ?`,
|
||||
types.RunnerStatusOffline, types.RunnerStatusOnline,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var runnerID int64
|
||||
if err := rows.Scan(&runnerID); err == nil {
|
||||
onlineRunnerIDs = append(onlineRunnerIDs, runnerID)
|
||||
}
|
||||
}
|
||||
runnersAffected, _ = result.RowsAffected()
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf("Failed to query online runners on startup: %v", err)
|
||||
log.Printf("Failed to mark runners as offline on startup: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(onlineRunnerIDs) == 0 {
|
||||
log.Printf("No runners marked as online on startup")
|
||||
return
|
||||
if runnersAffected > 0 {
|
||||
log.Printf("Marked %d runners as offline on startup", runnersAffected)
|
||||
}
|
||||
|
||||
log.Printf("Found %d runners marked as online, checking actual connections...", len(onlineRunnerIDs))
|
||||
|
||||
// Check which runners are actually connected
|
||||
s.runnerConnsMu.RLock()
|
||||
deadRunnerIDs := make([]int64, 0)
|
||||
for _, runnerID := range onlineRunnerIDs {
|
||||
if _, connected := s.runnerConns[runnerID]; !connected {
|
||||
deadRunnerIDs = append(deadRunnerIDs, runnerID)
|
||||
// Reset any running tasks that were assigned to runners
|
||||
// They will be picked up by runners when they poll
|
||||
var tasksAffected int64
|
||||
err = s.db.With(func(conn *sql.DB) error {
|
||||
result, err := conn.Exec(
|
||||
`UPDATE tasks SET runner_id = NULL, status = ?, started_at = NULL
|
||||
WHERE status = ?`,
|
||||
types.TaskStatusPending, types.TaskStatusRunning,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.runnerConnsMu.RUnlock()
|
||||
|
||||
if len(deadRunnerIDs) == 0 {
|
||||
log.Printf("All runners marked as online are actually connected")
|
||||
tasksAffected, _ = result.RowsAffected()
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Failed to reset running tasks on startup: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Found %d runners marked as online but not connected, redistributing their tasks...", len(deadRunnerIDs))
|
||||
|
||||
// Redistribute tasks for disconnected runners
|
||||
for _, runnerID := range deadRunnerIDs {
|
||||
log.Printf("Recovering runner %d: redistributing tasks and marking as offline", runnerID)
|
||||
s.redistributeRunnerTasks(runnerID)
|
||||
|
||||
// Mark runner as offline
|
||||
s.db.With(func(conn *sql.DB) error {
|
||||
_, _ = conn.Exec(
|
||||
`UPDATE runners SET status = ?, last_heartbeat = ? WHERE id = ?`,
|
||||
types.RunnerStatusOffline, time.Now(), runnerID,
|
||||
)
|
||||
return nil
|
||||
})
|
||||
if tasksAffected > 0 {
|
||||
log.Printf("Reset %d running tasks to pending on startup", tasksAffected)
|
||||
}
|
||||
|
||||
log.Printf("Startup recovery complete: redistributed tasks from %d disconnected runners", len(deadRunnerIDs))
|
||||
|
||||
// Trigger task distribution to assign recovered tasks to available runners
|
||||
s.triggerTaskDistribution()
|
||||
}
|
||||
|
||||
// recoverStuckTasks periodically checks for dead runners and stuck tasks
|
||||
func (s *Server) recoverStuckTasks() {
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
func (s *Manager) recoverStuckTasks() {
|
||||
ticker := time.NewTicker(TaskDistributionInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Also distribute tasks every 10 seconds (reduced frequency since we have event-driven distribution)
|
||||
distributeTicker := time.NewTicker(10 * time.Second)
|
||||
defer distributeTicker.Stop()
|
||||
|
||||
go func() {
|
||||
for range distributeTicker.C {
|
||||
s.triggerTaskDistribution()
|
||||
}
|
||||
}()
|
||||
|
||||
for range ticker.C {
|
||||
func() {
|
||||
defer func() {
|
||||
@@ -1012,37 +1007,28 @@ func (s *Server) recoverStuckTasks() {
|
||||
}
|
||||
}()
|
||||
|
||||
// Find dead runners (no heartbeat for 90 seconds)
|
||||
// But only mark as dead if they're not actually connected via WebSocket
|
||||
// Find dead runners (no heartbeat for configured timeout)
|
||||
// In polling model, heartbeat is updated when runner polls for jobs
|
||||
var deadRunnerIDs []int64
|
||||
var stillConnectedIDs []int64
|
||||
cutoffTime := time.Now().Add(-RunnerHeartbeatTimeout)
|
||||
err := s.db.With(func(conn *sql.DB) error {
|
||||
rows, err := conn.Query(
|
||||
`SELECT id FROM runners
|
||||
WHERE last_heartbeat < datetime('now', '-90 seconds')
|
||||
WHERE last_heartbeat < ?
|
||||
AND status = ?`,
|
||||
types.RunnerStatusOnline,
|
||||
cutoffTime, types.RunnerStatusOnline,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
s.runnerConnsMu.RLock()
|
||||
for rows.Next() {
|
||||
var runnerID int64
|
||||
if err := rows.Scan(&runnerID); err == nil {
|
||||
// Only mark as dead if not actually connected via WebSocket
|
||||
// The WebSocket connection is the source of truth
|
||||
if _, stillConnected := s.runnerConns[runnerID]; !stillConnected {
|
||||
deadRunnerIDs = append(deadRunnerIDs, runnerID)
|
||||
} else {
|
||||
// Runner is still connected but heartbeat is stale - update it
|
||||
stillConnectedIDs = append(stillConnectedIDs, runnerID)
|
||||
}
|
||||
deadRunnerIDs = append(deadRunnerIDs, runnerID)
|
||||
}
|
||||
}
|
||||
s.runnerConnsMu.RUnlock()
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1050,27 +1036,9 @@ func (s *Server) recoverStuckTasks() {
|
||||
return
|
||||
}
|
||||
|
||||
// Update heartbeat for runners that are still connected but have stale heartbeats
|
||||
// This ensures the database stays in sync with actual connection state
|
||||
for _, runnerID := range stillConnectedIDs {
|
||||
s.db.With(func(conn *sql.DB) error {
|
||||
_, _ = conn.Exec(
|
||||
`UPDATE runners SET last_heartbeat = ?, status = ? WHERE id = ?`,
|
||||
time.Now(), types.RunnerStatusOnline, runnerID,
|
||||
)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if len(deadRunnerIDs) == 0 {
|
||||
// Check for task timeouts
|
||||
s.recoverTaskTimeouts()
|
||||
return
|
||||
}
|
||||
|
||||
// Reset tasks assigned to dead runners
|
||||
for _, runnerID := range deadRunnerIDs {
|
||||
s.redistributeRunnerTasks(runnerID)
|
||||
s.resetRunnerTasks(runnerID)
|
||||
|
||||
// Mark runner as offline
|
||||
s.db.With(func(conn *sql.DB) error {
|
||||
@@ -1084,31 +1052,29 @@ func (s *Server) recoverStuckTasks() {
|
||||
|
||||
// Check for task timeouts
|
||||
s.recoverTaskTimeouts()
|
||||
|
||||
// Distribute newly recovered tasks
|
||||
s.triggerTaskDistribution()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// recoverTaskTimeouts handles tasks that have exceeded their timeout
|
||||
func (s *Server) recoverTaskTimeouts() {
|
||||
// Timeouts are treated as runner failures (not task failures) and retry indefinitely
|
||||
func (s *Manager) recoverTaskTimeouts() {
|
||||
// Find tasks running longer than their timeout
|
||||
var tasks []struct {
|
||||
taskID int64
|
||||
jobID int64
|
||||
runnerID sql.NullInt64
|
||||
retryCount int
|
||||
maxRetries int
|
||||
timeoutSeconds sql.NullInt64
|
||||
startedAt time.Time
|
||||
}
|
||||
|
||||
err := s.db.With(func(conn *sql.DB) error {
|
||||
rows, err := conn.Query(
|
||||
`SELECT t.id, t.runner_id, t.retry_count, t.max_retries, t.timeout_seconds, t.started_at
|
||||
`SELECT t.id, t.job_id, t.runner_id, t.timeout_seconds, t.started_at
|
||||
FROM tasks t
|
||||
WHERE t.status = ?
|
||||
AND t.started_at IS NOT NULL
|
||||
AND (t.completed_at IS NULL OR t.completed_at < datetime('now', '-30 seconds'))
|
||||
AND (t.timeout_seconds IS NULL OR
|
||||
(julianday('now') - julianday(t.started_at)) * 86400 > t.timeout_seconds)`,
|
||||
types.TaskStatusRunning,
|
||||
@@ -1121,13 +1087,12 @@ func (s *Server) recoverTaskTimeouts() {
|
||||
for rows.Next() {
|
||||
var task struct {
|
||||
taskID int64
|
||||
jobID int64
|
||||
runnerID sql.NullInt64
|
||||
retryCount int
|
||||
maxRetries int
|
||||
timeoutSeconds sql.NullInt64
|
||||
startedAt time.Time
|
||||
}
|
||||
err := rows.Scan(&task.taskID, &task.runnerID, &task.retryCount, &task.maxRetries, &task.timeoutSeconds, &task.startedAt)
|
||||
err := rows.Scan(&task.taskID, &task.jobID, &task.runnerID, &task.timeoutSeconds, &task.startedAt)
|
||||
if err != nil {
|
||||
log.Printf("Failed to scan task row in recoverTaskTimeouts: %v", err)
|
||||
continue
|
||||
@@ -1143,8 +1108,7 @@ func (s *Server) recoverTaskTimeouts() {
|
||||
|
||||
for _, task := range tasks {
|
||||
taskID := task.taskID
|
||||
retryCount := task.retryCount
|
||||
maxRetries := task.maxRetries
|
||||
jobID := task.jobID
|
||||
timeoutSeconds := task.timeoutSeconds
|
||||
startedAt := task.startedAt
|
||||
|
||||
@@ -1159,51 +1123,60 @@ func (s *Server) recoverTaskTimeouts() {
|
||||
continue
|
||||
}
|
||||
|
||||
if retryCount >= maxRetries {
|
||||
// Mark as failed
|
||||
err = s.db.With(func(conn *sql.DB) error {
|
||||
_, err := conn.Exec(`UPDATE tasks SET status = ? WHERE id = ?`, types.TaskStatusFailed, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = conn.Exec(`UPDATE tasks SET error_message = ? WHERE id = ?`, "Task timeout exceeded, max retries reached", taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = conn.Exec(`UPDATE tasks SET runner_id = NULL WHERE id = ?`, taskID)
|
||||
return err
|
||||
})
|
||||
// Timeouts are runner failures - always reset to pending and increment runner_failure_count
|
||||
// This does NOT count against retry_count (which is for actual task failures like Blender crashes)
|
||||
err = s.db.With(func(conn *sql.DB) error {
|
||||
_, err := conn.Exec(`UPDATE tasks SET status = ? WHERE id = ?`, types.TaskStatusPending, taskID)
|
||||
if err != nil {
|
||||
log.Printf("Failed to mark task %d as failed: %v", taskID, err)
|
||||
}
|
||||
} else {
|
||||
// Reset to pending
|
||||
err = s.db.With(func(conn *sql.DB) error {
|
||||
_, err := conn.Exec(`UPDATE tasks SET status = ? WHERE id = ?`, types.TaskStatusPending, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = conn.Exec(`UPDATE tasks SET runner_id = NULL WHERE id = ?`, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = conn.Exec(`UPDATE tasks SET current_step = NULL WHERE id = ?`, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = conn.Exec(`UPDATE tasks SET retry_count = retry_count + 1 WHERE id = ?`, taskID)
|
||||
return err
|
||||
})
|
||||
if err == nil {
|
||||
// Add log entry using the helper function
|
||||
s.logTaskEvent(taskID, nil, types.LogLevelWarn, fmt.Sprintf("Task timeout exceeded, resetting (retry %d/%d)", retryCount+1, maxRetries), "")
|
||||
}
|
||||
_, err = conn.Exec(`UPDATE tasks SET runner_id = NULL WHERE id = ?`, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = conn.Exec(`UPDATE tasks SET current_step = NULL WHERE id = ?`, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = conn.Exec(`UPDATE tasks SET started_at = NULL WHERE id = ?`, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = conn.Exec(`UPDATE tasks SET runner_failure_count = runner_failure_count + 1 WHERE id = ?`, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Clear steps and logs for fresh retry
|
||||
_, err = conn.Exec(`DELETE FROM task_steps WHERE task_id = ?`, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = conn.Exec(`DELETE FROM task_logs WHERE task_id = ?`, taskID)
|
||||
return err
|
||||
})
|
||||
if err == nil {
|
||||
// Broadcast task reset to clients (includes steps_cleared and logs_cleared flags)
|
||||
s.broadcastTaskUpdate(jobID, taskID, "task_reset", map[string]interface{}{
|
||||
"status": types.TaskStatusPending,
|
||||
"runner_id": nil,
|
||||
"current_step": nil,
|
||||
"started_at": nil,
|
||||
"steps_cleared": true,
|
||||
"logs_cleared": true,
|
||||
})
|
||||
|
||||
// Update job status
|
||||
s.updateJobStatusFromTasks(jobID)
|
||||
|
||||
log.Printf("Reset timed out task %d: %v", taskID, err)
|
||||
} else {
|
||||
log.Printf("Failed to reset timed out task %d: %v", taskID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupOldTempDirectories periodically cleans up old temporary directories
|
||||
func (s *Server) cleanupOldTempDirectories() {
|
||||
func (s *Manager) cleanupOldTempDirectories() {
|
||||
// Run cleanup every hour
|
||||
ticker := time.NewTicker(1 * time.Hour)
|
||||
defer ticker.Stop()
|
||||
@@ -1217,7 +1190,7 @@ func (s *Server) cleanupOldTempDirectories() {
|
||||
}
|
||||
|
||||
// cleanupOldTempDirectoriesOnce removes temp directories older than 1 hour
|
||||
func (s *Server) cleanupOldTempDirectoriesOnce() {
|
||||
func (s *Manager) cleanupOldTempDirectoriesOnce() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("Panic in cleanupOldTempDirectories: %v", r)
|
||||
@@ -1285,7 +1258,7 @@ func (s *Server) cleanupOldTempDirectoriesOnce() {
|
||||
}
|
||||
|
||||
// cleanupOldUploadSessions periodically cleans up abandoned upload sessions
|
||||
func (s *Server) cleanupOldUploadSessions() {
|
||||
func (s *Manager) cleanupOldUploadSessions() {
|
||||
// Run cleanup every 10 minutes
|
||||
ticker := time.NewTicker(10 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
@@ -1299,7 +1272,7 @@ func (s *Server) cleanupOldUploadSessions() {
|
||||
}
|
||||
|
||||
// cleanupOldUploadSessionsOnce removes upload sessions older than 1 hour
|
||||
func (s *Server) cleanupOldUploadSessionsOnce() {
|
||||
func (s *Manager) cleanupOldUploadSessionsOnce() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("Panic in cleanupOldUploadSessions: %v", r)
|
||||
@@ -19,121 +19,8 @@ import (
|
||||
"jiggablend/pkg/types"
|
||||
)
|
||||
|
||||
// handleSubmitMetadata handles metadata submission from runner
|
||||
func (s *Server) handleSubmitMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
jobID, err := parseID(r, "jobId")
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Get runner ID from context (set by runnerAuthMiddleware)
|
||||
runnerID, ok := r.Context().Value(runnerIDContextKey).(int64)
|
||||
if !ok {
|
||||
s.respondError(w, http.StatusUnauthorized, "runner_id not found in context")
|
||||
return
|
||||
}
|
||||
|
||||
var metadata types.BlendMetadata
|
||||
if err := json.NewDecoder(r.Body).Decode(&metadata); err != nil {
|
||||
s.respondError(w, http.StatusBadRequest, fmt.Sprintf("Invalid metadata JSON: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Verify job exists
|
||||
var jobUserID int64
|
||||
err = s.db.With(func(conn *sql.DB) error {
|
||||
return conn.QueryRow("SELECT user_id FROM jobs WHERE id = ?", jobID).Scan(&jobUserID)
|
||||
})
|
||||
if err == sql.ErrNoRows {
|
||||
s.respondError(w, http.StatusNotFound, "Job not found")
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to verify job: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Find the metadata extraction task for this job
|
||||
// First try to find task assigned to this runner, then fall back to any metadata task for this job
|
||||
var taskID int64
|
||||
err = s.db.With(func(conn *sql.DB) error {
|
||||
err := conn.QueryRow(
|
||||
`SELECT id FROM tasks WHERE job_id = ? AND task_type = ? AND runner_id = ?`,
|
||||
jobID, types.TaskTypeMetadata, runnerID,
|
||||
).Scan(&taskID)
|
||||
if err == sql.ErrNoRows {
|
||||
// Fall back to any metadata task for this job (in case assignment changed)
|
||||
err = conn.QueryRow(
|
||||
`SELECT id FROM tasks WHERE job_id = ? AND task_type = ? ORDER BY created_at DESC LIMIT 1`,
|
||||
jobID, types.TaskTypeMetadata,
|
||||
).Scan(&taskID)
|
||||
if err == sql.ErrNoRows {
|
||||
return fmt.Errorf("metadata extraction task not found")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Update the task to be assigned to this runner if it wasn't already
|
||||
conn.Exec(
|
||||
`UPDATE tasks SET runner_id = ? WHERE id = ? AND runner_id IS NULL`,
|
||||
runnerID, taskID,
|
||||
)
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
if err.Error() == "metadata extraction task not found" {
|
||||
s.respondError(w, http.StatusNotFound, "Metadata extraction task not found")
|
||||
return
|
||||
}
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to find task: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Convert metadata to JSON
|
||||
metadataJSON, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, "Failed to marshal metadata")
|
||||
return
|
||||
}
|
||||
|
||||
// Update job with metadata
|
||||
err = s.db.With(func(conn *sql.DB) error {
|
||||
_, err := conn.Exec(
|
||||
`UPDATE jobs SET blend_metadata = ? WHERE id = ?`,
|
||||
string(metadataJSON), jobID,
|
||||
)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to update job metadata: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Mark task as completed
|
||||
err = s.db.With(func(conn *sql.DB) error {
|
||||
_, err := conn.Exec(`UPDATE tasks SET status = ? WHERE id = ?`, types.TaskStatusCompleted, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = conn.Exec(`UPDATE tasks SET completed_at = CURRENT_TIMESTAMP WHERE id = ?`, taskID)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Failed to mark metadata task as completed: %v", err)
|
||||
} else {
|
||||
// Update job status and progress after metadata task completes
|
||||
s.updateJobStatusFromTasks(jobID)
|
||||
}
|
||||
|
||||
log.Printf("Metadata extracted for job %d: frame_start=%d, frame_end=%d", jobID, metadata.FrameStart, metadata.FrameEnd)
|
||||
|
||||
s.respondJSON(w, http.StatusOK, map[string]string{"message": "Metadata submitted successfully"})
|
||||
}
|
||||
|
||||
// handleGetJobMetadata retrieves metadata for a job
|
||||
func (s *Server) handleGetJobMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Manager) handleGetJobMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
userID, err := getUserID(r)
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusUnauthorized, err.Error())
|
||||
@@ -151,9 +38,9 @@ func (s *Server) handleGetJobMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
var blendMetadataJSON sql.NullString
|
||||
err = s.db.With(func(conn *sql.DB) error {
|
||||
return conn.QueryRow(
|
||||
`SELECT user_id, blend_metadata FROM jobs WHERE id = ?`,
|
||||
jobID,
|
||||
).Scan(&jobUserID, &blendMetadataJSON)
|
||||
`SELECT user_id, blend_metadata FROM jobs WHERE id = ?`,
|
||||
jobID,
|
||||
).Scan(&jobUserID, &blendMetadataJSON)
|
||||
})
|
||||
if err == sql.ErrNoRows {
|
||||
s.respondError(w, http.StatusNotFound, "Job not found")
|
||||
@@ -184,7 +71,7 @@ func (s *Server) handleGetJobMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// extractMetadataFromContext extracts metadata from the blend file in a context archive
|
||||
// Returns the extracted metadata or an error
|
||||
func (s *Server) extractMetadataFromContext(jobID int64) (*types.BlendMetadata, error) {
|
||||
func (s *Manager) extractMetadataFromContext(jobID int64) (*types.BlendMetadata, error) {
|
||||
contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar")
|
||||
|
||||
// Check if context exists
|
||||
@@ -310,7 +197,7 @@ func (s *Server) extractMetadataFromContext(jobID int64) (*types.BlendMetadata,
|
||||
}
|
||||
|
||||
// extractTar extracts a tar archive to a destination directory
|
||||
func (s *Server) extractTar(tarPath, destDir string) error {
|
||||
func (s *Manager) extractTar(tarPath, destDir string) error {
|
||||
log.Printf("Extracting tar archive: %s -> %s", tarPath, destDir)
|
||||
|
||||
// Ensure destination directory exists
|
||||
@@ -355,7 +242,8 @@ func (s *Server) extractTar(tarPath, destDir string) error {
|
||||
}
|
||||
|
||||
// Write file
|
||||
if header.Typeflag == tar.TypeReg {
|
||||
switch header.Typeflag {
|
||||
case tar.TypeReg:
|
||||
outFile, err := os.Create(target)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create file: %w", err)
|
||||
@@ -367,7 +255,7 @@ func (s *Server) extractTar(tarPath, destDir string) error {
|
||||
}
|
||||
outFile.Close()
|
||||
fileCount++
|
||||
} else if header.Typeflag == tar.TypeDir {
|
||||
case tar.TypeDir:
|
||||
dirCount++
|
||||
}
|
||||
}
|
||||
2501
internal/manager/runners.go
Normal file
2501
internal/manager/runners.go
Normal file
File diff suppressed because it is too large
Load Diff
333
internal/runner/api/jobconn.go
Normal file
333
internal/runner/api/jobconn.go
Normal file
@@ -0,0 +1,333 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"jiggablend/pkg/types"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
// JobConnection wraps a WebSocket connection for job communication.
|
||||
type JobConnection struct {
|
||||
conn *websocket.Conn
|
||||
writeMu sync.Mutex
|
||||
stopPing chan struct{}
|
||||
stopHeartbeat chan struct{}
|
||||
isConnected bool
|
||||
connMu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewJobConnection creates a new job connection wrapper.
|
||||
func NewJobConnection() *JobConnection {
|
||||
return &JobConnection{}
|
||||
}
|
||||
|
||||
// Connect establishes a WebSocket connection for a job (no runnerID needed).
|
||||
func (j *JobConnection) Connect(managerURL, jobPath, jobToken string) error {
|
||||
wsPath := jobPath + "/ws"
|
||||
wsURL := strings.Replace(managerURL, "http://", "ws://", 1)
|
||||
wsURL = strings.Replace(wsURL, "https://", "wss://", 1)
|
||||
wsURL += wsPath
|
||||
|
||||
log.Printf("Connecting to job WebSocket: %s", wsPath)
|
||||
|
||||
dialer := websocket.Dialer{
|
||||
HandshakeTimeout: 10 * time.Second,
|
||||
}
|
||||
conn, _, err := dialer.Dial(wsURL, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect job WebSocket: %w", err)
|
||||
}
|
||||
|
||||
j.conn = conn
|
||||
|
||||
// Send auth message
|
||||
authMsg := map[string]interface{}{
|
||||
"type": "auth",
|
||||
"job_token": jobToken,
|
||||
}
|
||||
if err := conn.WriteJSON(authMsg); err != nil {
|
||||
conn.Close()
|
||||
return fmt.Errorf("failed to send auth: %w", err)
|
||||
}
|
||||
|
||||
// Wait for auth_ok
|
||||
conn.SetReadDeadline(time.Now().Add(30 * time.Second))
|
||||
var authResp map[string]string
|
||||
if err := conn.ReadJSON(&authResp); err != nil {
|
||||
conn.Close()
|
||||
return fmt.Errorf("failed to read auth response: %w", err)
|
||||
}
|
||||
if authResp["type"] == "error" {
|
||||
conn.Close()
|
||||
return fmt.Errorf("auth failed: %s", authResp["message"])
|
||||
}
|
||||
if authResp["type"] != "auth_ok" {
|
||||
conn.Close()
|
||||
return fmt.Errorf("unexpected auth response: %s", authResp["type"])
|
||||
}
|
||||
|
||||
// Clear read deadline after auth
|
||||
conn.SetReadDeadline(time.Time{})
|
||||
|
||||
// Set up ping/pong handler for keepalive
|
||||
conn.SetPongHandler(func(string) error {
|
||||
conn.SetReadDeadline(time.Now().Add(90 * time.Second))
|
||||
return nil
|
||||
})
|
||||
|
||||
// Start ping goroutine
|
||||
j.stopPing = make(chan struct{})
|
||||
j.connMu.Lock()
|
||||
j.isConnected = true
|
||||
j.connMu.Unlock()
|
||||
go j.pingLoop()
|
||||
|
||||
// Start WebSocket heartbeat goroutine
|
||||
j.stopHeartbeat = make(chan struct{})
|
||||
go j.heartbeatLoop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pingLoop sends periodic pings to keep the WebSocket connection alive.
|
||||
func (j *JobConnection) pingLoop() {
|
||||
defer func() {
|
||||
if rec := recover(); rec != nil {
|
||||
log.Printf("Ping loop panicked: %v", rec)
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-j.stopPing:
|
||||
return
|
||||
case <-ticker.C:
|
||||
j.writeMu.Lock()
|
||||
if j.conn != nil {
|
||||
deadline := time.Now().Add(10 * time.Second)
|
||||
if err := j.conn.WriteControl(websocket.PingMessage, []byte{}, deadline); err != nil {
|
||||
log.Printf("Failed to send ping, closing connection: %v", err)
|
||||
j.connMu.Lock()
|
||||
j.isConnected = false
|
||||
if j.conn != nil {
|
||||
j.conn.Close()
|
||||
j.conn = nil
|
||||
}
|
||||
j.connMu.Unlock()
|
||||
}
|
||||
}
|
||||
j.writeMu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Heartbeat sends a heartbeat message over WebSocket to keep runner online.
|
||||
func (j *JobConnection) Heartbeat() {
|
||||
if j.conn == nil {
|
||||
return
|
||||
}
|
||||
|
||||
j.writeMu.Lock()
|
||||
defer j.writeMu.Unlock()
|
||||
|
||||
msg := map[string]interface{}{
|
||||
"type": "runner_heartbeat",
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
|
||||
if err := j.conn.WriteJSON(msg); err != nil {
|
||||
log.Printf("Failed to send WebSocket heartbeat: %v", err)
|
||||
// Handle connection failure
|
||||
j.connMu.Lock()
|
||||
j.isConnected = false
|
||||
if j.conn != nil {
|
||||
j.conn.Close()
|
||||
j.conn = nil
|
||||
}
|
||||
j.connMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// heartbeatLoop sends periodic heartbeat messages over WebSocket.
|
||||
func (j *JobConnection) heartbeatLoop() {
|
||||
defer func() {
|
||||
if rec := recover(); rec != nil {
|
||||
log.Printf("WebSocket heartbeat loop panicked: %v", rec)
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-j.stopHeartbeat:
|
||||
return
|
||||
case <-ticker.C:
|
||||
j.Heartbeat()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the WebSocket connection.
|
||||
func (j *JobConnection) Close() {
|
||||
j.connMu.Lock()
|
||||
j.isConnected = false
|
||||
j.connMu.Unlock()
|
||||
|
||||
// Stop heartbeat goroutine
|
||||
if j.stopHeartbeat != nil {
|
||||
close(j.stopHeartbeat)
|
||||
j.stopHeartbeat = nil
|
||||
}
|
||||
|
||||
// Stop ping goroutine
|
||||
if j.stopPing != nil {
|
||||
close(j.stopPing)
|
||||
j.stopPing = nil
|
||||
}
|
||||
|
||||
if j.conn != nil {
|
||||
j.conn.Close()
|
||||
j.conn = nil
|
||||
}
|
||||
}
|
||||
|
||||
// IsConnected returns true if the connection is established.
|
||||
func (j *JobConnection) IsConnected() bool {
|
||||
j.connMu.RLock()
|
||||
defer j.connMu.RUnlock()
|
||||
return j.isConnected && j.conn != nil
|
||||
}
|
||||
|
||||
// Log sends a log entry to the manager.
|
||||
func (j *JobConnection) Log(taskID int64, level types.LogLevel, message string) {
|
||||
if j.conn == nil {
|
||||
return
|
||||
}
|
||||
|
||||
j.writeMu.Lock()
|
||||
defer j.writeMu.Unlock()
|
||||
|
||||
msg := map[string]interface{}{
|
||||
"type": "log_entry",
|
||||
"data": map[string]interface{}{
|
||||
"task_id": taskID,
|
||||
"log_level": string(level),
|
||||
"message": message,
|
||||
},
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
if err := j.conn.WriteJSON(msg); err != nil {
|
||||
log.Printf("Failed to send job log, connection may be broken: %v", err)
|
||||
// Close the connection on write error
|
||||
j.connMu.Lock()
|
||||
j.isConnected = false
|
||||
if j.conn != nil {
|
||||
j.conn.Close()
|
||||
j.conn = nil
|
||||
}
|
||||
j.connMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Progress sends a progress update to the manager.
|
||||
func (j *JobConnection) Progress(taskID int64, progress float64) {
|
||||
if j.conn == nil {
|
||||
return
|
||||
}
|
||||
|
||||
j.writeMu.Lock()
|
||||
defer j.writeMu.Unlock()
|
||||
|
||||
msg := map[string]interface{}{
|
||||
"type": "progress",
|
||||
"data": map[string]interface{}{
|
||||
"task_id": taskID,
|
||||
"progress": progress,
|
||||
},
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
if err := j.conn.WriteJSON(msg); err != nil {
|
||||
log.Printf("Failed to send job progress, connection may be broken: %v", err)
|
||||
// Close the connection on write error
|
||||
j.connMu.Lock()
|
||||
j.isConnected = false
|
||||
if j.conn != nil {
|
||||
j.conn.Close()
|
||||
j.conn = nil
|
||||
}
|
||||
j.connMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// OutputUploaded notifies that an output file was uploaded.
|
||||
func (j *JobConnection) OutputUploaded(taskID int64, fileName string) {
|
||||
if j.conn == nil {
|
||||
return
|
||||
}
|
||||
|
||||
j.writeMu.Lock()
|
||||
defer j.writeMu.Unlock()
|
||||
|
||||
msg := map[string]interface{}{
|
||||
"type": "output_uploaded",
|
||||
"data": map[string]interface{}{
|
||||
"task_id": taskID,
|
||||
"file_name": fileName,
|
||||
},
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
if err := j.conn.WriteJSON(msg); err != nil {
|
||||
log.Printf("Failed to send output uploaded, connection may be broken: %v", err)
|
||||
// Close the connection on write error
|
||||
j.connMu.Lock()
|
||||
j.isConnected = false
|
||||
if j.conn != nil {
|
||||
j.conn.Close()
|
||||
j.conn = nil
|
||||
}
|
||||
j.connMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Complete sends task completion to the manager.
|
||||
func (j *JobConnection) Complete(taskID int64, success bool, errorMsg error) {
|
||||
if j.conn == nil {
|
||||
log.Printf("Cannot send task complete: WebSocket connection is nil")
|
||||
return
|
||||
}
|
||||
|
||||
j.writeMu.Lock()
|
||||
defer j.writeMu.Unlock()
|
||||
|
||||
msg := map[string]interface{}{
|
||||
"type": "task_complete",
|
||||
"data": map[string]interface{}{
|
||||
"task_id": taskID,
|
||||
"success": success,
|
||||
"error": errorMsg,
|
||||
},
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
if err := j.conn.WriteJSON(msg); err != nil {
|
||||
log.Printf("Failed to send task complete, connection may be broken: %v", err)
|
||||
// Close the connection on write error
|
||||
j.connMu.Lock()
|
||||
j.isConnected = false
|
||||
if j.conn != nil {
|
||||
j.conn.Close()
|
||||
j.conn = nil
|
||||
}
|
||||
j.connMu.Unlock()
|
||||
}
|
||||
}
|
||||
421
internal/runner/api/manager.go
Normal file
421
internal/runner/api/manager.go
Normal file
@@ -0,0 +1,421 @@
|
||||
// Package api provides HTTP and WebSocket communication with the manager server.
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"jiggablend/pkg/types"
|
||||
)
|
||||
|
||||
// ManagerClient handles all HTTP communication with the manager server.
|
||||
type ManagerClient struct {
|
||||
baseURL string
|
||||
apiKey string
|
||||
runnerID int64
|
||||
httpClient *http.Client // Standard timeout for quick requests
|
||||
longClient *http.Client // No timeout for large file transfers
|
||||
}
|
||||
|
||||
// NewManagerClient creates a new manager client.
|
||||
func NewManagerClient(baseURL string) *ManagerClient {
|
||||
return &ManagerClient{
|
||||
baseURL: strings.TrimSuffix(baseURL, "/"),
|
||||
httpClient: &http.Client{Timeout: 30 * time.Second},
|
||||
longClient: &http.Client{Timeout: 0}, // No timeout for large transfers
|
||||
}
|
||||
}
|
||||
|
||||
// SetCredentials sets the API key and runner ID after registration.
|
||||
func (m *ManagerClient) SetCredentials(runnerID int64, apiKey string) {
|
||||
m.runnerID = runnerID
|
||||
m.apiKey = apiKey
|
||||
}
|
||||
|
||||
// GetRunnerID returns the registered runner ID.
|
||||
func (m *ManagerClient) GetRunnerID() int64 {
|
||||
return m.runnerID
|
||||
}
|
||||
|
||||
// GetAPIKey returns the API key.
|
||||
func (m *ManagerClient) GetAPIKey() string {
|
||||
return m.apiKey
|
||||
}
|
||||
|
||||
// GetBaseURL returns the base URL.
|
||||
func (m *ManagerClient) GetBaseURL() string {
|
||||
return m.baseURL
|
||||
}
|
||||
|
||||
// Request performs an authenticated HTTP request with standard timeout.
|
||||
func (m *ManagerClient) Request(method, path string, body []byte) (*http.Response, error) {
|
||||
return m.doRequest(method, path, body, m.httpClient)
|
||||
}
|
||||
|
||||
// RequestLong performs an authenticated HTTP request with no timeout.
|
||||
// Use for large file uploads/downloads.
|
||||
func (m *ManagerClient) RequestLong(method, path string, body []byte) (*http.Response, error) {
|
||||
return m.doRequest(method, path, body, m.longClient)
|
||||
}
|
||||
|
||||
func (m *ManagerClient) doRequest(method, path string, body []byte, client *http.Client) (*http.Response, error) {
|
||||
if m.apiKey == "" {
|
||||
return nil, fmt.Errorf("not authenticated")
|
||||
}
|
||||
|
||||
fullURL := m.baseURL + path
|
||||
req, err := http.NewRequest(method, fullURL, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", "Bearer "+m.apiKey)
|
||||
if len(body) > 0 {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
return client.Do(req)
|
||||
}
|
||||
|
||||
// RequestWithToken performs an authenticated HTTP request using a specific token.
|
||||
func (m *ManagerClient) RequestWithToken(method, path, token string, body []byte) (*http.Response, error) {
|
||||
return m.doRequestWithToken(method, path, token, body, m.httpClient)
|
||||
}
|
||||
|
||||
// RequestLongWithToken performs a long-running request with a specific token.
|
||||
func (m *ManagerClient) RequestLongWithToken(method, path, token string, body []byte) (*http.Response, error) {
|
||||
return m.doRequestWithToken(method, path, token, body, m.longClient)
|
||||
}
|
||||
|
||||
func (m *ManagerClient) doRequestWithToken(method, path, token string, body []byte, client *http.Client) (*http.Response, error) {
|
||||
fullURL := m.baseURL + path
|
||||
req, err := http.NewRequest(method, fullURL, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
if len(body) > 0 {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
return client.Do(req)
|
||||
}
|
||||
|
||||
// RegisterRequest is the request body for runner registration.
|
||||
type RegisterRequest struct {
|
||||
Name string `json:"name"`
|
||||
Hostname string `json:"hostname"`
|
||||
Capabilities string `json:"capabilities"`
|
||||
APIKey string `json:"api_key"`
|
||||
Fingerprint string `json:"fingerprint,omitempty"`
|
||||
}
|
||||
|
||||
// RegisterResponse is the response from runner registration.
|
||||
type RegisterResponse struct {
|
||||
ID int64 `json:"id"`
|
||||
}
|
||||
|
||||
// Register registers the runner with the manager.
|
||||
func (m *ManagerClient) Register(name, hostname string, capabilities map[string]interface{}, registrationToken, fingerprint string) (int64, error) {
|
||||
capsJSON, err := json.Marshal(capabilities)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to marshal capabilities: %w", err)
|
||||
}
|
||||
|
||||
reqBody := RegisterRequest{
|
||||
Name: name,
|
||||
Hostname: hostname,
|
||||
Capabilities: string(capsJSON),
|
||||
APIKey: registrationToken,
|
||||
}
|
||||
|
||||
// Only send fingerprint for non-fixed API keys
|
||||
if !strings.HasPrefix(registrationToken, "jk_r0_") {
|
||||
reqBody.Fingerprint = fingerprint
|
||||
}
|
||||
|
||||
body, _ := json.Marshal(reqBody)
|
||||
resp, err := m.httpClient.Post(
|
||||
m.baseURL+"/api/runner/register",
|
||||
"application/json",
|
||||
bytes.NewReader(body),
|
||||
)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("connection error: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
errorBody := string(bodyBytes)
|
||||
|
||||
// Check for token-related errors (should not retry)
|
||||
if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusBadRequest {
|
||||
errorLower := strings.ToLower(errorBody)
|
||||
if strings.Contains(errorLower, "invalid") ||
|
||||
strings.Contains(errorLower, "expired") ||
|
||||
strings.Contains(errorLower, "already used") ||
|
||||
strings.Contains(errorLower, "token") {
|
||||
return 0, fmt.Errorf("token error: %s", errorBody)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("registration failed (status %d): %s", resp.StatusCode, errorBody)
|
||||
}
|
||||
|
||||
var result RegisterResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return 0, fmt.Errorf("failed to decode response: %w", err)
|
||||
}
|
||||
|
||||
m.runnerID = result.ID
|
||||
m.apiKey = registrationToken
|
||||
|
||||
return result.ID, nil
|
||||
}
|
||||
|
||||
// NextJobResponse represents the response from the next-job endpoint.
|
||||
type NextJobResponse struct {
|
||||
JobToken string `json:"job_token"`
|
||||
JobPath string `json:"job_path"`
|
||||
Task NextJobTaskInfo `json:"task"`
|
||||
}
|
||||
|
||||
// NextJobTaskInfo contains task information from the next-job response.
|
||||
type NextJobTaskInfo struct {
|
||||
TaskID int64 `json:"task_id"`
|
||||
JobID int64 `json:"job_id"`
|
||||
JobName string `json:"job_name"`
|
||||
Frame int `json:"frame"`
|
||||
TaskType string `json:"task_type"`
|
||||
Metadata *types.BlendMetadata `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// PollNextJob polls the manager for the next available job.
|
||||
// Returns nil, nil if no job is available.
|
||||
func (m *ManagerClient) PollNextJob() (*NextJobResponse, error) {
|
||||
if m.runnerID == 0 || m.apiKey == "" {
|
||||
return nil, fmt.Errorf("runner not authenticated")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("/api/runner/workers/%d/next-job", m.runnerID)
|
||||
resp, err := m.Request("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to poll for job: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNoContent {
|
||||
return nil, nil // No job available
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("unexpected status %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var job NextJobResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&job); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode job response: %w", err)
|
||||
}
|
||||
|
||||
return &job, nil
|
||||
}
|
||||
|
||||
// DownloadContext downloads the job context tar file.
|
||||
func (m *ManagerClient) DownloadContext(contextPath, jobToken string) (io.ReadCloser, error) {
|
||||
resp, err := m.RequestLongWithToken("GET", contextPath, jobToken, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to download context: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("context download failed with status %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// UploadFile uploads a file to the manager.
|
||||
func (m *ManagerClient) UploadFile(uploadPath, jobToken, filePath string) error {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Create multipart form
|
||||
body := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(body)
|
||||
part, err := writer.CreateFormFile("file", filepath.Base(filePath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create form file: %w", err)
|
||||
}
|
||||
if _, err := io.Copy(part, file); err != nil {
|
||||
return fmt.Errorf("failed to copy file to form: %w", err)
|
||||
}
|
||||
writer.Close()
|
||||
|
||||
fullURL := m.baseURL + uploadPath
|
||||
req, err := http.NewRequest("POST", fullURL, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+jobToken)
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
resp, err := m.longClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK {
|
||||
respBody, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(respBody))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetJobMetadata retrieves job metadata from the manager.
|
||||
func (m *ManagerClient) GetJobMetadata(jobID int64) (*types.BlendMetadata, error) {
|
||||
path := fmt.Sprintf("/api/runner/jobs/%d/metadata?runner_id=%d", jobID, m.runnerID)
|
||||
resp, err := m.Request("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, nil // No metadata found
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("failed to get job metadata: %s", string(body))
|
||||
}
|
||||
|
||||
var metadata types.BlendMetadata
|
||||
if err := json.NewDecoder(resp.Body).Decode(&metadata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &metadata, nil
|
||||
}
|
||||
|
||||
// JobFile represents a file associated with a job.
|
||||
type JobFile struct {
|
||||
ID int64 `json:"id"`
|
||||
JobID int64 `json:"job_id"`
|
||||
FileType string `json:"file_type"`
|
||||
FilePath string `json:"file_path"`
|
||||
FileName string `json:"file_name"`
|
||||
FileSize int64 `json:"file_size"`
|
||||
}
|
||||
|
||||
// GetJobFiles retrieves the list of files for a job.
|
||||
func (m *ManagerClient) GetJobFiles(jobID int64) ([]JobFile, error) {
|
||||
path := fmt.Sprintf("/api/runner/jobs/%d/files?runner_id=%d", jobID, m.runnerID)
|
||||
resp, err := m.Request("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("failed to get job files: %s", string(body))
|
||||
}
|
||||
|
||||
var files []JobFile
|
||||
if err := json.NewDecoder(resp.Body).Decode(&files); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// DownloadFrame downloads a frame file from the manager.
|
||||
func (m *ManagerClient) DownloadFrame(jobID int64, fileName, destPath string) error {
|
||||
encodedFileName := url.PathEscape(fileName)
|
||||
path := fmt.Sprintf("/api/runner/files/%d/%s?runner_id=%d", jobID, encodedFileName, m.runnerID)
|
||||
resp, err := m.RequestLong("GET", path, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("download failed: %s", string(body))
|
||||
}
|
||||
|
||||
file, err := os.Create(destPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
_, err = io.Copy(file, resp.Body)
|
||||
return err
|
||||
}
|
||||
|
||||
// SubmitMetadata submits extracted metadata to the manager.
|
||||
func (m *ManagerClient) SubmitMetadata(jobID int64, metadata types.BlendMetadata) error {
|
||||
metadataJSON, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal metadata: %w", err)
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("/api/runner/jobs/%d/metadata?runner_id=%d", jobID, m.runnerID)
|
||||
fullURL := m.baseURL + path
|
||||
req, err := http.NewRequest("POST", fullURL, bytes.NewReader(metadataJSON))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+m.apiKey)
|
||||
|
||||
resp, err := m.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to submit metadata: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("metadata submission failed: %s", string(body))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DownloadBlender downloads a Blender version from the manager.
|
||||
func (m *ManagerClient) DownloadBlender(version string) (io.ReadCloser, error) {
|
||||
path := fmt.Sprintf("/api/runner/blender/download?version=%s&runner_id=%d", version, m.runnerID)
|
||||
resp, err := m.RequestLong("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to download blender from manager: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("failed to download blender: status %d, body: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
87
internal/runner/blender/binary.go
Normal file
87
internal/runner/blender/binary.go
Normal file
@@ -0,0 +1,87 @@
|
||||
// Package blender handles Blender binary management and execution.
|
||||
package blender
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"jiggablend/internal/runner/api"
|
||||
"jiggablend/internal/runner/workspace"
|
||||
)
|
||||
|
||||
// Manager handles Blender binary downloads and management.
|
||||
type Manager struct {
|
||||
manager *api.ManagerClient
|
||||
workspaceDir string
|
||||
}
|
||||
|
||||
// NewManager creates a new Blender manager.
|
||||
func NewManager(managerClient *api.ManagerClient, workspaceDir string) *Manager {
|
||||
return &Manager{
|
||||
manager: managerClient,
|
||||
workspaceDir: workspaceDir,
|
||||
}
|
||||
}
|
||||
|
||||
// GetBinaryPath returns the path to the Blender binary for a specific version.
|
||||
// Downloads from manager and extracts if not already present.
|
||||
func (m *Manager) GetBinaryPath(version string) (string, error) {
|
||||
blenderDir := filepath.Join(m.workspaceDir, "blender-versions")
|
||||
if err := os.MkdirAll(blenderDir, 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create blender directory: %w", err)
|
||||
}
|
||||
|
||||
// Check if already installed - look for version folder first
|
||||
versionDir := filepath.Join(blenderDir, version)
|
||||
binaryPath := filepath.Join(versionDir, "blender")
|
||||
|
||||
// Check if version folder exists and contains the binary
|
||||
if versionInfo, err := os.Stat(versionDir); err == nil && versionInfo.IsDir() {
|
||||
// Version folder exists, check if binary is present
|
||||
if binaryInfo, err := os.Stat(binaryPath); err == nil {
|
||||
// Verify it's actually a file (not a directory)
|
||||
if !binaryInfo.IsDir() {
|
||||
log.Printf("Found existing Blender %s installation at %s", version, binaryPath)
|
||||
return binaryPath, nil
|
||||
}
|
||||
}
|
||||
// Version folder exists but binary is missing - might be incomplete installation
|
||||
log.Printf("Version folder %s exists but binary not found, will re-download", versionDir)
|
||||
}
|
||||
|
||||
// Download from manager
|
||||
log.Printf("Downloading Blender %s from manager", version)
|
||||
|
||||
reader, err := m.manager.DownloadBlender(version)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
// Manager serves pre-decompressed .tar files - extract directly
|
||||
log.Printf("Extracting Blender %s...", version)
|
||||
if err := workspace.ExtractTarStripPrefix(reader, versionDir); err != nil {
|
||||
return "", fmt.Errorf("failed to extract blender: %w", err)
|
||||
}
|
||||
|
||||
// Verify binary exists
|
||||
if _, err := os.Stat(binaryPath); err != nil {
|
||||
return "", fmt.Errorf("blender binary not found after extraction")
|
||||
}
|
||||
|
||||
log.Printf("Blender %s installed at %s", version, binaryPath)
|
||||
return binaryPath, nil
|
||||
}
|
||||
|
||||
// GetBinaryForJob returns the Blender binary path for a job.
|
||||
// Uses the version from metadata or falls back to system blender.
|
||||
func (m *Manager) GetBinaryForJob(version string) (string, error) {
|
||||
if version == "" {
|
||||
return "blender", nil // System blender
|
||||
}
|
||||
|
||||
return m.GetBinaryPath(version)
|
||||
}
|
||||
|
||||
100
internal/runner/blender/logfilter.go
Normal file
100
internal/runner/blender/logfilter.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package blender
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"jiggablend/pkg/types"
|
||||
)
|
||||
|
||||
// FilterLog checks if a Blender log line should be filtered or downgraded.
|
||||
// Returns (shouldFilter, logLevel) - if shouldFilter is true, the log should be skipped.
|
||||
func FilterLog(line string) (shouldFilter bool, logLevel types.LogLevel) {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
|
||||
// Filter out empty lines
|
||||
if trimmed == "" {
|
||||
return true, types.LogLevelInfo
|
||||
}
|
||||
|
||||
// Filter out separator lines
|
||||
if trimmed == "--------------------------------------------------------------------" ||
|
||||
(strings.HasPrefix(trimmed, "-----") && strings.Contains(trimmed, "----")) {
|
||||
return true, types.LogLevelInfo
|
||||
}
|
||||
|
||||
// Filter out trace headers
|
||||
upperLine := strings.ToUpper(trimmed)
|
||||
upperOriginal := strings.ToUpper(line)
|
||||
|
||||
if trimmed == "Trace:" ||
|
||||
trimmed == "Depth Type Name" ||
|
||||
trimmed == "----- ---- ----" ||
|
||||
line == "Depth Type Name" ||
|
||||
line == "----- ---- ----" ||
|
||||
(strings.Contains(upperLine, "DEPTH") && strings.Contains(upperLine, "TYPE") && strings.Contains(upperLine, "NAME")) ||
|
||||
(strings.Contains(upperOriginal, "DEPTH") && strings.Contains(upperOriginal, "TYPE") && strings.Contains(upperOriginal, "NAME")) ||
|
||||
strings.Contains(line, "Depth Type Name") ||
|
||||
strings.Contains(line, "----- ---- ----") ||
|
||||
strings.HasPrefix(trimmed, "-----") ||
|
||||
regexp.MustCompile(`^[-]+\s+[-]+\s+[-]+$`).MatchString(trimmed) {
|
||||
return true, types.LogLevelInfo
|
||||
}
|
||||
|
||||
// Completely filter out dependency graph messages (they're just noise)
|
||||
dependencyGraphPatterns := []string{
|
||||
"Failed to add relation",
|
||||
"Could not find op_from",
|
||||
"OperationKey",
|
||||
"find_node_operation: Failed for",
|
||||
"BONE_DONE",
|
||||
"component name:",
|
||||
"operation code:",
|
||||
"rope_ctrl_rot_",
|
||||
}
|
||||
|
||||
for _, pattern := range dependencyGraphPatterns {
|
||||
if strings.Contains(line, pattern) {
|
||||
return true, types.LogLevelInfo
|
||||
}
|
||||
}
|
||||
|
||||
// Filter out animation system warnings (invalid drivers are common and harmless)
|
||||
animationSystemPatterns := []string{
|
||||
"BKE_animsys_eval_driver: invalid driver",
|
||||
"bke.anim_sys",
|
||||
"rotation_quaternion[",
|
||||
"constraints[",
|
||||
".influence[0]",
|
||||
"pose.bones[",
|
||||
}
|
||||
|
||||
for _, pattern := range animationSystemPatterns {
|
||||
if strings.Contains(line, pattern) {
|
||||
return true, types.LogLevelInfo
|
||||
}
|
||||
}
|
||||
|
||||
// Filter out modifier warnings (common when vertices change)
|
||||
modifierPatterns := []string{
|
||||
"BKE_modifier_set_error",
|
||||
"bke.modifier",
|
||||
"Vertices changed from",
|
||||
"Modifier:",
|
||||
}
|
||||
|
||||
for _, pattern := range modifierPatterns {
|
||||
if strings.Contains(line, pattern) {
|
||||
return true, types.LogLevelInfo
|
||||
}
|
||||
}
|
||||
|
||||
// Filter out lines that are just numbers or trace depth indicators
|
||||
// Pattern: number, word, word (e.g., "1 Object timer_box_franck")
|
||||
if matched, _ := regexp.MatchString(`^\d+\s+\w+\s+\w+`, trimmed); matched {
|
||||
return true, types.LogLevelInfo
|
||||
}
|
||||
|
||||
return false, types.LogLevelInfo
|
||||
}
|
||||
|
||||
143
internal/runner/blender/version.go
Normal file
143
internal/runner/blender/version.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package blender
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// ParseVersionFromFile parses the Blender version that a .blend file was saved with.
|
||||
// Returns major and minor version numbers.
|
||||
func ParseVersionFromFile(blendPath string) (major, minor int, err error) {
|
||||
file, err := os.Open(blendPath)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to open blend file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Read the first 12 bytes of the blend file header
|
||||
// Format: BLENDER-v<major><minor><patch> or BLENDER_v<major><minor><patch>
|
||||
// The header is: "BLENDER" (7 bytes) + pointer size (1 byte: '-' for 64-bit, '_' for 32-bit)
|
||||
// + endianness (1 byte: 'v' for little-endian, 'V' for big-endian)
|
||||
// + version (3 bytes: e.g., "402" for 4.02)
|
||||
header := make([]byte, 12)
|
||||
n, err := file.Read(header)
|
||||
if err != nil || n < 12 {
|
||||
return 0, 0, fmt.Errorf("failed to read blend file header: %w", err)
|
||||
}
|
||||
|
||||
// Check for BLENDER magic
|
||||
if string(header[:7]) != "BLENDER" {
|
||||
// Might be compressed - try to decompress
|
||||
file.Seek(0, 0)
|
||||
return parseCompressedVersion(file)
|
||||
}
|
||||
|
||||
// Parse version from bytes 9-11 (3 digits)
|
||||
versionStr := string(header[9:12])
|
||||
|
||||
// Version format changed in Blender 3.0
|
||||
// Pre-3.0: "279" = 2.79, "280" = 2.80
|
||||
// 3.0+: "300" = 3.0, "402" = 4.02, "410" = 4.10
|
||||
if len(versionStr) == 3 {
|
||||
// First digit is major version
|
||||
fmt.Sscanf(string(versionStr[0]), "%d", &major)
|
||||
// Next two digits are minor version
|
||||
fmt.Sscanf(versionStr[1:3], "%d", &minor)
|
||||
}
|
||||
|
||||
return major, minor, nil
|
||||
}
|
||||
|
||||
// parseCompressedVersion handles gzip and zstd compressed blend files.
|
||||
func parseCompressedVersion(file *os.File) (major, minor int, err error) {
|
||||
magic := make([]byte, 4)
|
||||
if _, err := file.Read(magic); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
file.Seek(0, 0)
|
||||
|
||||
if magic[0] == 0x1f && magic[1] == 0x8b {
|
||||
// gzip compressed
|
||||
gzReader, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to create gzip reader: %w", err)
|
||||
}
|
||||
defer gzReader.Close()
|
||||
|
||||
header := make([]byte, 12)
|
||||
n, err := gzReader.Read(header)
|
||||
if err != nil || n < 12 {
|
||||
return 0, 0, fmt.Errorf("failed to read compressed blend header: %w", err)
|
||||
}
|
||||
|
||||
if string(header[:7]) != "BLENDER" {
|
||||
return 0, 0, fmt.Errorf("invalid blend file format")
|
||||
}
|
||||
|
||||
versionStr := string(header[9:12])
|
||||
if len(versionStr) == 3 {
|
||||
fmt.Sscanf(string(versionStr[0]), "%d", &major)
|
||||
fmt.Sscanf(versionStr[1:3], "%d", &minor)
|
||||
}
|
||||
|
||||
return major, minor, nil
|
||||
}
|
||||
|
||||
// Check for zstd magic (Blender 3.0+): 0x28 0xB5 0x2F 0xFD
|
||||
if magic[0] == 0x28 && magic[1] == 0xb5 && magic[2] == 0x2f && magic[3] == 0xfd {
|
||||
return parseZstdVersion(file)
|
||||
}
|
||||
|
||||
return 0, 0, fmt.Errorf("unknown blend file format")
|
||||
}
|
||||
|
||||
// parseZstdVersion handles zstd-compressed blend files (Blender 3.0+).
|
||||
// Uses zstd command line tool since Go doesn't have native zstd support.
|
||||
func parseZstdVersion(file *os.File) (major, minor int, err error) {
|
||||
file.Seek(0, 0)
|
||||
|
||||
cmd := exec.Command("zstd", "-d", "-c")
|
||||
cmd.Stdin = file
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to create zstd stdout pipe: %w", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to start zstd decompression: %w", err)
|
||||
}
|
||||
|
||||
// Read just the header (12 bytes)
|
||||
header := make([]byte, 12)
|
||||
n, readErr := io.ReadFull(stdout, header)
|
||||
|
||||
// Kill the process early - we only need the header
|
||||
cmd.Process.Kill()
|
||||
cmd.Wait()
|
||||
|
||||
if readErr != nil || n < 12 {
|
||||
return 0, 0, fmt.Errorf("failed to read zstd compressed blend header: %v", readErr)
|
||||
}
|
||||
|
||||
if string(header[:7]) != "BLENDER" {
|
||||
return 0, 0, fmt.Errorf("invalid blend file format in zstd archive")
|
||||
}
|
||||
|
||||
versionStr := string(header[9:12])
|
||||
if len(versionStr) == 3 {
|
||||
fmt.Sscanf(string(versionStr[0]), "%d", &major)
|
||||
fmt.Sscanf(versionStr[1:3], "%d", &minor)
|
||||
}
|
||||
|
||||
return major, minor, nil
|
||||
}
|
||||
|
||||
// VersionString returns a formatted version string like "4.2".
|
||||
func VersionString(major, minor int) string {
|
||||
return fmt.Sprintf("%d.%d", major, minor)
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
71
internal/runner/encoding/encoder.go
Normal file
71
internal/runner/encoding/encoder.go
Normal file
@@ -0,0 +1,71 @@
|
||||
// Package encoding handles video encoding with software encoders.
|
||||
package encoding
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Encoder represents a video encoder.
|
||||
type Encoder interface {
|
||||
Name() string
|
||||
Codec() string
|
||||
Available() bool
|
||||
BuildCommand(config *EncodeConfig) *exec.Cmd
|
||||
}
|
||||
|
||||
// EncodeConfig holds configuration for video encoding.
|
||||
type EncodeConfig struct {
|
||||
InputPattern string // Input file pattern (e.g., "frame_%04d.exr")
|
||||
OutputPath string // Output file path
|
||||
StartFrame int // Starting frame number
|
||||
FrameRate float64 // Frame rate
|
||||
WorkDir string // Working directory
|
||||
UseAlpha bool // Whether to preserve alpha channel
|
||||
TwoPass bool // Whether to use 2-pass encoding
|
||||
SourceFormat string // Source format: "exr" or "png" (defaults to "exr")
|
||||
PreserveHDR bool // Whether to preserve HDR range for EXR (uses HLG with bt709 primaries)
|
||||
}
|
||||
|
||||
// Selector selects the software encoder.
|
||||
type Selector struct {
|
||||
h264Encoders []Encoder
|
||||
av1Encoders []Encoder
|
||||
vp9Encoders []Encoder
|
||||
}
|
||||
|
||||
// NewSelector creates a new encoder selector with software encoders.
|
||||
func NewSelector() *Selector {
|
||||
s := &Selector{}
|
||||
s.detectEncoders()
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Selector) detectEncoders() {
|
||||
// Use software encoding only - reliable and avoids hardware-specific colorspace issues
|
||||
s.h264Encoders = []Encoder{
|
||||
&SoftwareEncoder{codec: "libx264"},
|
||||
}
|
||||
|
||||
s.av1Encoders = []Encoder{
|
||||
&SoftwareEncoder{codec: "libaom-av1"},
|
||||
}
|
||||
|
||||
s.vp9Encoders = []Encoder{
|
||||
&SoftwareEncoder{codec: "libvpx-vp9"},
|
||||
}
|
||||
}
|
||||
|
||||
// SelectH264 returns the software H.264 encoder.
|
||||
func (s *Selector) SelectH264() Encoder {
|
||||
return &SoftwareEncoder{codec: "libx264"}
|
||||
}
|
||||
|
||||
// SelectAV1 returns the software AV1 encoder.
|
||||
func (s *Selector) SelectAV1() Encoder {
|
||||
return &SoftwareEncoder{codec: "libaom-av1"}
|
||||
}
|
||||
|
||||
// SelectVP9 returns the software VP9 encoder.
|
||||
func (s *Selector) SelectVP9() Encoder {
|
||||
return &SoftwareEncoder{codec: "libvpx-vp9"}
|
||||
}
|
||||
270
internal/runner/encoding/encoders.go
Normal file
270
internal/runner/encoding/encoders.go
Normal file
@@ -0,0 +1,270 @@
|
||||
package encoding
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// CRFH264 is the Constant Rate Factor for H.264 encoding (lower = higher quality, range 0-51)
|
||||
CRFH264 = 15
|
||||
// CRFAV1 is the Constant Rate Factor for AV1 encoding (lower = higher quality, range 0-63)
|
||||
CRFAV1 = 30
|
||||
// CRFVP9 is the Constant Rate Factor for VP9 encoding (lower = higher quality, range 0-63)
|
||||
CRFVP9 = 30
|
||||
)
|
||||
|
||||
// tonemapFilter returns the appropriate filter for EXR input.
|
||||
// For HDR preservation: converts linear RGB (EXR) to bt2020 YUV with HLG transfer function
|
||||
// Uses zscale to properly convert colorspace from linear RGB to bt2020 YUV while preserving HDR range
|
||||
// Step 1: Ensure format is gbrpf32le (linear RGB)
|
||||
// Step 2: Convert transfer function from linear to HLG (arib-std-b67) with bt2020 primaries/matrix
|
||||
// Step 3: Convert to YUV format
|
||||
func tonemapFilter(useAlpha bool) string {
|
||||
// Convert from linear RGB (gbrpf32le) to HLG with bt709 primaries to match PNG appearance
|
||||
// Based on best practices: convert linear RGB directly to HLG with bt709 primaries
|
||||
// This matches PNG color appearance (bt709 primaries) while preserving HDR range (HLG transfer)
|
||||
// zscale uses numeric values:
|
||||
// primaries: 1=bt709 (matches PNG), 9=bt2020
|
||||
// matrix: 1=bt709, 9=bt2020nc, 0=gbr (RGB input)
|
||||
// transfer: 8=linear, 18=arib-std-b67 (HLG)
|
||||
// Direct conversion: linear RGB -> HLG with bt709 primaries -> bt2020 YUV (for wider gamut metadata)
|
||||
// The bt709 primaries in the conversion match PNG, but we set bt2020 in metadata for HDR displays
|
||||
// Convert linear RGB to sRGB first, then convert to HLG
|
||||
// This approach: linear -> sRGB -> HLG -> bt2020
|
||||
// Fixes red tint by using sRGB conversion, preserves HDR range with HLG
|
||||
filter := "format=gbrpf32le,zscale=transferin=8:transfer=13:primariesin=1:primaries=1:matrixin=0:matrix=1:rangein=full:range=full,zscale=transferin=13:transfer=18:primariesin=1:primaries=9:matrixin=1:matrix=9:rangein=full:range=full"
|
||||
if useAlpha {
|
||||
return filter + ",format=yuva420p10le"
|
||||
}
|
||||
return filter + ",format=yuv420p10le"
|
||||
}
|
||||
|
||||
// SoftwareEncoder implements software encoding (libx264, libaom-av1, libvpx-vp9).
|
||||
type SoftwareEncoder struct {
|
||||
codec string
|
||||
}
|
||||
|
||||
func (e *SoftwareEncoder) Name() string { return "software" }
|
||||
func (e *SoftwareEncoder) Codec() string { return e.codec }
|
||||
|
||||
func (e *SoftwareEncoder) Available() bool {
|
||||
return true // Software encoding is always available
|
||||
}
|
||||
|
||||
func (e *SoftwareEncoder) BuildCommand(config *EncodeConfig) *exec.Cmd {
|
||||
// Use HDR pixel formats for EXR, SDR for PNG
|
||||
var pixFmt string
|
||||
var colorPrimaries, colorTrc, colorspace string
|
||||
if config.SourceFormat == "png" {
|
||||
// PNG: SDR format
|
||||
pixFmt = "yuv420p"
|
||||
if config.UseAlpha {
|
||||
pixFmt = "yuva420p"
|
||||
}
|
||||
colorPrimaries = "bt709"
|
||||
colorTrc = "bt709"
|
||||
colorspace = "bt709"
|
||||
} else {
|
||||
// EXR: Use HDR encoding if PreserveHDR is true, otherwise SDR (like PNG)
|
||||
if config.PreserveHDR {
|
||||
// HDR: Use HLG transfer with bt709 primaries to preserve HDR range while matching PNG color
|
||||
pixFmt = "yuv420p10le" // 10-bit to preserve HDR range
|
||||
if config.UseAlpha {
|
||||
pixFmt = "yuva420p10le"
|
||||
}
|
||||
colorPrimaries = "bt709" // bt709 primaries to match PNG color appearance
|
||||
colorTrc = "arib-std-b67" // HLG transfer function - preserves HDR range, works on SDR displays
|
||||
colorspace = "bt709" // bt709 colorspace to match PNG
|
||||
} else {
|
||||
// SDR: Treat as SDR (like PNG) - encode as bt709
|
||||
pixFmt = "yuv420p"
|
||||
if config.UseAlpha {
|
||||
pixFmt = "yuva420p"
|
||||
}
|
||||
colorPrimaries = "bt709"
|
||||
colorTrc = "bt709"
|
||||
colorspace = "bt709"
|
||||
}
|
||||
}
|
||||
|
||||
var codecArgs []string
|
||||
switch e.codec {
|
||||
case "libaom-av1":
|
||||
codecArgs = []string{"-crf", strconv.Itoa(CRFAV1), "-b:v", "0", "-tiles", "2x2", "-g", "240"}
|
||||
case "libvpx-vp9":
|
||||
// VP9 supports alpha and HDR, use good quality settings
|
||||
codecArgs = []string{"-crf", strconv.Itoa(CRFVP9), "-b:v", "0", "-row-mt", "1", "-g", "240"}
|
||||
default:
|
||||
// H.264: Use High 10 profile for HDR EXR (10-bit), High profile for SDR
|
||||
if config.SourceFormat != "png" && config.PreserveHDR {
|
||||
codecArgs = []string{"-preset", "veryslow", "-crf", strconv.Itoa(CRFH264), "-profile:v", "high10", "-level", "5.2", "-tune", "film", "-keyint_min", "24", "-g", "240", "-bf", "2", "-refs", "4"}
|
||||
} else {
|
||||
codecArgs = []string{"-preset", "veryslow", "-crf", strconv.Itoa(CRFH264), "-profile:v", "high", "-level", "5.2", "-tune", "film", "-keyint_min", "24", "-g", "240", "-bf", "2", "-refs", "4"}
|
||||
}
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-y",
|
||||
"-f", "image2",
|
||||
"-start_number", fmt.Sprintf("%d", config.StartFrame),
|
||||
"-framerate", fmt.Sprintf("%.2f", config.FrameRate),
|
||||
"-i", config.InputPattern,
|
||||
"-c:v", e.codec,
|
||||
"-pix_fmt", pixFmt,
|
||||
"-r", fmt.Sprintf("%.2f", config.FrameRate),
|
||||
"-color_primaries", colorPrimaries,
|
||||
"-color_trc", colorTrc,
|
||||
"-colorspace", colorspace,
|
||||
"-color_range", "tv",
|
||||
}
|
||||
|
||||
// Add video filter for EXR: convert linear RGB based on HDR setting
|
||||
// PNG doesn't need any filter as it's already in sRGB
|
||||
if config.SourceFormat != "png" {
|
||||
var vf string
|
||||
if config.PreserveHDR {
|
||||
// HDR: Convert linear RGB -> sRGB -> HLG with bt709 primaries
|
||||
// This preserves HDR range while matching PNG color appearance
|
||||
vf = "format=gbrpf32le,zscale=transferin=8:transfer=13:primariesin=1:primaries=1:matrixin=0:matrix=1:rangein=full:range=full,zscale=transferin=13:transfer=18:primariesin=1:primaries=1:matrixin=1:matrix=1:rangein=full:range=full"
|
||||
if config.UseAlpha {
|
||||
vf += ",format=yuva420p10le"
|
||||
} else {
|
||||
vf += ",format=yuv420p10le"
|
||||
}
|
||||
} else {
|
||||
// SDR: Convert linear RGB (EXR) to sRGB (bt709) - simple conversion like Krita does
|
||||
// zscale: linear (8) -> sRGB (13) with bt709 primaries/matrix
|
||||
vf = "format=gbrpf32le,zscale=transferin=8:transfer=13:primariesin=1:primaries=1:matrixin=0:matrix=1:rangein=full:range=full"
|
||||
if config.UseAlpha {
|
||||
vf += ",format=yuva420p"
|
||||
} else {
|
||||
vf += ",format=yuv420p"
|
||||
}
|
||||
}
|
||||
args = append(args, "-vf", vf)
|
||||
}
|
||||
args = append(args, codecArgs...)
|
||||
|
||||
if config.TwoPass {
|
||||
// For 2-pass, this builds pass 2 command
|
||||
args = append(args, "-pass", "2")
|
||||
}
|
||||
|
||||
args = append(args, config.OutputPath)
|
||||
|
||||
if config.TwoPass {
|
||||
log.Printf("Build Software Pass 2 command: ffmpeg %s", strings.Join(args, " "))
|
||||
} else {
|
||||
log.Printf("Build Software command: ffmpeg %s", strings.Join(args, " "))
|
||||
}
|
||||
cmd := exec.Command("ffmpeg", args...)
|
||||
cmd.Dir = config.WorkDir
|
||||
return cmd
|
||||
}
|
||||
|
||||
// BuildPass1Command builds the first pass command for 2-pass encoding.
|
||||
func (e *SoftwareEncoder) BuildPass1Command(config *EncodeConfig) *exec.Cmd {
|
||||
// Use HDR pixel formats for EXR, SDR for PNG
|
||||
var pixFmt string
|
||||
var colorPrimaries, colorTrc, colorspace string
|
||||
if config.SourceFormat == "png" {
|
||||
// PNG: SDR format
|
||||
pixFmt = "yuv420p"
|
||||
if config.UseAlpha {
|
||||
pixFmt = "yuva420p"
|
||||
}
|
||||
colorPrimaries = "bt709"
|
||||
colorTrc = "bt709"
|
||||
colorspace = "bt709"
|
||||
} else {
|
||||
// EXR: Use HDR encoding if PreserveHDR is true, otherwise SDR (like PNG)
|
||||
if config.PreserveHDR {
|
||||
// HDR: Use HLG transfer with bt709 primaries to preserve HDR range while matching PNG color
|
||||
pixFmt = "yuv420p10le" // 10-bit to preserve HDR range
|
||||
if config.UseAlpha {
|
||||
pixFmt = "yuva420p10le"
|
||||
}
|
||||
colorPrimaries = "bt709" // bt709 primaries to match PNG color appearance
|
||||
colorTrc = "arib-std-b67" // HLG transfer function - preserves HDR range, works on SDR displays
|
||||
colorspace = "bt709" // bt709 colorspace to match PNG
|
||||
} else {
|
||||
// SDR: Treat as SDR (like PNG) - encode as bt709
|
||||
pixFmt = "yuv420p"
|
||||
if config.UseAlpha {
|
||||
pixFmt = "yuva420p"
|
||||
}
|
||||
colorPrimaries = "bt709"
|
||||
colorTrc = "bt709"
|
||||
colorspace = "bt709"
|
||||
}
|
||||
}
|
||||
|
||||
var codecArgs []string
|
||||
switch e.codec {
|
||||
case "libaom-av1":
|
||||
codecArgs = []string{"-crf", strconv.Itoa(CRFAV1), "-b:v", "0", "-tiles", "2x2", "-g", "240"}
|
||||
case "libvpx-vp9":
|
||||
// VP9 supports alpha and HDR, use good quality settings
|
||||
codecArgs = []string{"-crf", strconv.Itoa(CRFVP9), "-b:v", "0", "-row-mt", "1", "-g", "240"}
|
||||
default:
|
||||
// H.264: Use High 10 profile for HDR EXR (10-bit), High profile for SDR
|
||||
if config.SourceFormat != "png" && config.PreserveHDR {
|
||||
codecArgs = []string{"-preset", "veryslow", "-crf", strconv.Itoa(CRFH264), "-profile:v", "high10", "-level", "5.2", "-tune", "film", "-keyint_min", "24", "-g", "240", "-bf", "2", "-refs", "4"}
|
||||
} else {
|
||||
codecArgs = []string{"-preset", "veryslow", "-crf", strconv.Itoa(CRFH264), "-profile:v", "high", "-level", "5.2", "-tune", "film", "-keyint_min", "24", "-g", "240", "-bf", "2", "-refs", "4"}
|
||||
}
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-y",
|
||||
"-f", "image2",
|
||||
"-start_number", fmt.Sprintf("%d", config.StartFrame),
|
||||
"-framerate", fmt.Sprintf("%.2f", config.FrameRate),
|
||||
"-i", config.InputPattern,
|
||||
"-c:v", e.codec,
|
||||
"-pix_fmt", pixFmt,
|
||||
"-r", fmt.Sprintf("%.2f", config.FrameRate),
|
||||
"-color_primaries", colorPrimaries,
|
||||
"-color_trc", colorTrc,
|
||||
"-colorspace", colorspace,
|
||||
"-color_range", "tv",
|
||||
}
|
||||
|
||||
// Add video filter for EXR: convert linear RGB based on HDR setting
|
||||
// PNG doesn't need any filter as it's already in sRGB
|
||||
if config.SourceFormat != "png" {
|
||||
var vf string
|
||||
if config.PreserveHDR {
|
||||
// HDR: Convert linear RGB -> sRGB -> HLG with bt709 primaries
|
||||
// This preserves HDR range while matching PNG color appearance
|
||||
vf = "format=gbrpf32le,zscale=transferin=8:transfer=13:primariesin=1:primaries=1:matrixin=0:matrix=1:rangein=full:range=full,zscale=transferin=13:transfer=18:primariesin=1:primaries=1:matrixin=1:matrix=1:rangein=full:range=full"
|
||||
if config.UseAlpha {
|
||||
vf += ",format=yuva420p10le"
|
||||
} else {
|
||||
vf += ",format=yuv420p10le"
|
||||
}
|
||||
} else {
|
||||
// SDR: Convert linear RGB (EXR) to sRGB (bt709) - simple conversion like Krita does
|
||||
// zscale: linear (8) -> sRGB (13) with bt709 primaries/matrix
|
||||
vf = "format=gbrpf32le,zscale=transferin=8:transfer=13:primariesin=1:primaries=1:matrixin=0:matrix=1:rangein=full:range=full"
|
||||
if config.UseAlpha {
|
||||
vf += ",format=yuva420p"
|
||||
} else {
|
||||
vf += ",format=yuv420p"
|
||||
}
|
||||
}
|
||||
args = append(args, "-vf", vf)
|
||||
}
|
||||
|
||||
args = append(args, codecArgs...)
|
||||
args = append(args, "-pass", "1", "-f", "null", "/dev/null")
|
||||
|
||||
log.Printf("Build Software Pass 1 command: ffmpeg %s", strings.Join(args, " "))
|
||||
cmd := exec.Command("ffmpeg", args...)
|
||||
cmd.Dir = config.WorkDir
|
||||
return cmd
|
||||
}
|
||||
980
internal/runner/encoding/encoders_test.go
Normal file
980
internal/runner/encoding/encoders_test.go
Normal file
@@ -0,0 +1,980 @@
|
||||
package encoding
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSoftwareEncoder_BuildCommand_H264_EXR(t *testing.T) {
|
||||
encoder := &SoftwareEncoder{codec: "libx264"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: "frame_%04d.exr",
|
||||
OutputPath: "output.mp4",
|
||||
StartFrame: 1,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: "/tmp",
|
||||
UseAlpha: false,
|
||||
TwoPass: true,
|
||||
SourceFormat: "exr",
|
||||
}
|
||||
|
||||
cmd := encoder.BuildCommand(config)
|
||||
if cmd == nil {
|
||||
t.Fatal("BuildCommand returned nil")
|
||||
}
|
||||
|
||||
if !strings.Contains(cmd.Path, "ffmpeg") {
|
||||
t.Errorf("Expected command path to contain 'ffmpeg', got '%s'", cmd.Path)
|
||||
}
|
||||
|
||||
if cmd.Dir != "/tmp" {
|
||||
t.Errorf("Expected work dir '/tmp', got '%s'", cmd.Dir)
|
||||
}
|
||||
|
||||
args := cmd.Args[1:] // Skip "ffmpeg"
|
||||
argsStr := strings.Join(args, " ")
|
||||
|
||||
// Check required arguments
|
||||
checks := []struct {
|
||||
name string
|
||||
expected string
|
||||
}{
|
||||
{"-y flag", "-y"},
|
||||
{"image2 format", "-f image2"},
|
||||
{"start number", "-start_number 1"},
|
||||
{"framerate", "-framerate 24.00"},
|
||||
{"input pattern", "-i frame_%04d.exr"},
|
||||
{"codec", "-c:v libx264"},
|
||||
{"pixel format", "-pix_fmt yuv420p"}, // EXR now treated as SDR (like PNG)
|
||||
{"frame rate", "-r 24.00"},
|
||||
{"color primaries", "-color_primaries bt709"}, // EXR now uses bt709 (SDR)
|
||||
{"color trc", "-color_trc bt709"}, // EXR now uses bt709 (SDR)
|
||||
{"colorspace", "-colorspace bt709"},
|
||||
{"color range", "-color_range tv"},
|
||||
{"video filter", "-vf"},
|
||||
{"preset", "-preset veryslow"},
|
||||
{"crf", "-crf 15"},
|
||||
{"profile", "-profile:v high"}, // EXR now uses high profile (SDR)
|
||||
{"pass 2", "-pass 2"},
|
||||
{"output path", "output.mp4"},
|
||||
}
|
||||
|
||||
for _, check := range checks {
|
||||
if !strings.Contains(argsStr, check.expected) {
|
||||
t.Errorf("Missing expected argument: %s", check.expected)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify filter is present for EXR (linear RGB to sRGB conversion, like Krita does)
|
||||
if !strings.Contains(argsStr, "format=gbrpf32le") {
|
||||
t.Error("Expected format conversion filter for EXR source, but not found")
|
||||
}
|
||||
if !strings.Contains(argsStr, "zscale=transferin=8:transfer=13") {
|
||||
t.Error("Expected linear to sRGB conversion for EXR source, but not found")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSoftwareEncoder_BuildCommand_H264_PNG(t *testing.T) {
|
||||
encoder := &SoftwareEncoder{codec: "libx264"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: "frame_%04d.png",
|
||||
OutputPath: "output.mp4",
|
||||
StartFrame: 1,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: "/tmp",
|
||||
UseAlpha: false,
|
||||
TwoPass: true,
|
||||
SourceFormat: "png",
|
||||
}
|
||||
|
||||
cmd := encoder.BuildCommand(config)
|
||||
args := cmd.Args[1:]
|
||||
argsStr := strings.Join(args, " ")
|
||||
|
||||
// PNG should NOT have video filter
|
||||
if strings.Contains(argsStr, "-vf") {
|
||||
t.Error("PNG source should not have video filter, but -vf was found")
|
||||
}
|
||||
|
||||
// Should still have all other required args
|
||||
if !strings.Contains(argsStr, "-c:v libx264") {
|
||||
t.Error("Missing codec argument")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSoftwareEncoder_BuildCommand_AV1_WithAlpha(t *testing.T) {
|
||||
encoder := &SoftwareEncoder{codec: "libaom-av1"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: "frame_%04d.exr",
|
||||
OutputPath: "output.mp4",
|
||||
StartFrame: 100,
|
||||
FrameRate: 30.0,
|
||||
WorkDir: "/tmp",
|
||||
UseAlpha: true,
|
||||
TwoPass: true,
|
||||
SourceFormat: "exr",
|
||||
}
|
||||
|
||||
cmd := encoder.BuildCommand(config)
|
||||
args := cmd.Args[1:]
|
||||
argsStr := strings.Join(args, " ")
|
||||
|
||||
// Check alpha-specific settings
|
||||
if !strings.Contains(argsStr, "-pix_fmt yuva420p") {
|
||||
t.Error("Expected yuva420p pixel format for alpha, but not found")
|
||||
}
|
||||
|
||||
// Check AV1-specific arguments
|
||||
av1Checks := []string{
|
||||
"-c:v libaom-av1",
|
||||
"-crf 30",
|
||||
"-b:v 0",
|
||||
"-tiles 2x2",
|
||||
"-g 240",
|
||||
}
|
||||
|
||||
for _, check := range av1Checks {
|
||||
if !strings.Contains(argsStr, check) {
|
||||
t.Errorf("Missing AV1 argument: %s", check)
|
||||
}
|
||||
}
|
||||
|
||||
// Check tonemap filter includes alpha format
|
||||
if !strings.Contains(argsStr, "format=yuva420p") {
|
||||
t.Error("Expected tonemap filter to output yuva420p for alpha, but not found")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSoftwareEncoder_BuildCommand_VP9(t *testing.T) {
|
||||
encoder := &SoftwareEncoder{codec: "libvpx-vp9"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: "frame_%04d.exr",
|
||||
OutputPath: "output.webm",
|
||||
StartFrame: 1,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: "/tmp",
|
||||
UseAlpha: true,
|
||||
TwoPass: true,
|
||||
SourceFormat: "exr",
|
||||
}
|
||||
|
||||
cmd := encoder.BuildCommand(config)
|
||||
args := cmd.Args[1:]
|
||||
argsStr := strings.Join(args, " ")
|
||||
|
||||
// Check VP9-specific arguments
|
||||
vp9Checks := []string{
|
||||
"-c:v libvpx-vp9",
|
||||
"-crf 30",
|
||||
"-b:v 0",
|
||||
"-row-mt 1",
|
||||
"-g 240",
|
||||
}
|
||||
|
||||
for _, check := range vp9Checks {
|
||||
if !strings.Contains(argsStr, check) {
|
||||
t.Errorf("Missing VP9 argument: %s", check)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSoftwareEncoder_BuildPass1Command(t *testing.T) {
|
||||
encoder := &SoftwareEncoder{codec: "libx264"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: "frame_%04d.exr",
|
||||
OutputPath: "output.mp4",
|
||||
StartFrame: 1,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: "/tmp",
|
||||
UseAlpha: false,
|
||||
TwoPass: true,
|
||||
SourceFormat: "exr",
|
||||
}
|
||||
|
||||
cmd := encoder.BuildPass1Command(config)
|
||||
args := cmd.Args[1:]
|
||||
argsStr := strings.Join(args, " ")
|
||||
|
||||
// Pass 1 should have -pass 1 and output to null
|
||||
if !strings.Contains(argsStr, "-pass 1") {
|
||||
t.Error("Pass 1 command should include '-pass 1'")
|
||||
}
|
||||
|
||||
if !strings.Contains(argsStr, "-f null") {
|
||||
t.Error("Pass 1 command should include '-f null'")
|
||||
}
|
||||
|
||||
if !strings.Contains(argsStr, "/dev/null") {
|
||||
t.Error("Pass 1 command should output to /dev/null")
|
||||
}
|
||||
|
||||
// Should NOT have output path
|
||||
if strings.Contains(argsStr, "output.mp4") {
|
||||
t.Error("Pass 1 command should not include output path")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSoftwareEncoder_BuildPass1Command_AV1(t *testing.T) {
|
||||
encoder := &SoftwareEncoder{codec: "libaom-av1"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: "frame_%04d.exr",
|
||||
OutputPath: "output.mp4",
|
||||
StartFrame: 1,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: "/tmp",
|
||||
UseAlpha: false,
|
||||
TwoPass: true,
|
||||
SourceFormat: "exr",
|
||||
}
|
||||
|
||||
cmd := encoder.BuildPass1Command(config)
|
||||
args := cmd.Args[1:]
|
||||
argsStr := strings.Join(args, " ")
|
||||
|
||||
// Pass 1 should have -pass 1 and output to null
|
||||
if !strings.Contains(argsStr, "-pass 1") {
|
||||
t.Error("Pass 1 command should include '-pass 1'")
|
||||
}
|
||||
|
||||
if !strings.Contains(argsStr, "-f null") {
|
||||
t.Error("Pass 1 command should include '-f null'")
|
||||
}
|
||||
|
||||
if !strings.Contains(argsStr, "/dev/null") {
|
||||
t.Error("Pass 1 command should output to /dev/null")
|
||||
}
|
||||
|
||||
// Check AV1-specific arguments in pass 1
|
||||
av1Checks := []string{
|
||||
"-c:v libaom-av1",
|
||||
"-crf 30",
|
||||
"-b:v 0",
|
||||
"-tiles 2x2",
|
||||
"-g 240",
|
||||
}
|
||||
|
||||
for _, check := range av1Checks {
|
||||
if !strings.Contains(argsStr, check) {
|
||||
t.Errorf("Missing AV1 argument in pass 1: %s", check)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSoftwareEncoder_BuildPass1Command_VP9(t *testing.T) {
|
||||
encoder := &SoftwareEncoder{codec: "libvpx-vp9"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: "frame_%04d.exr",
|
||||
OutputPath: "output.webm",
|
||||
StartFrame: 1,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: "/tmp",
|
||||
UseAlpha: false,
|
||||
TwoPass: true,
|
||||
SourceFormat: "exr",
|
||||
}
|
||||
|
||||
cmd := encoder.BuildPass1Command(config)
|
||||
args := cmd.Args[1:]
|
||||
argsStr := strings.Join(args, " ")
|
||||
|
||||
// Pass 1 should have -pass 1 and output to null
|
||||
if !strings.Contains(argsStr, "-pass 1") {
|
||||
t.Error("Pass 1 command should include '-pass 1'")
|
||||
}
|
||||
|
||||
if !strings.Contains(argsStr, "-f null") {
|
||||
t.Error("Pass 1 command should include '-f null'")
|
||||
}
|
||||
|
||||
if !strings.Contains(argsStr, "/dev/null") {
|
||||
t.Error("Pass 1 command should output to /dev/null")
|
||||
}
|
||||
|
||||
// Check VP9-specific arguments in pass 1
|
||||
vp9Checks := []string{
|
||||
"-c:v libvpx-vp9",
|
||||
"-crf 30",
|
||||
"-b:v 0",
|
||||
"-row-mt 1",
|
||||
"-g 240",
|
||||
}
|
||||
|
||||
for _, check := range vp9Checks {
|
||||
if !strings.Contains(argsStr, check) {
|
||||
t.Errorf("Missing VP9 argument in pass 1: %s", check)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSoftwareEncoder_BuildCommand_NoTwoPass(t *testing.T) {
|
||||
encoder := &SoftwareEncoder{codec: "libx264"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: "frame_%04d.exr",
|
||||
OutputPath: "output.mp4",
|
||||
StartFrame: 1,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: "/tmp",
|
||||
UseAlpha: false,
|
||||
TwoPass: false,
|
||||
SourceFormat: "exr",
|
||||
}
|
||||
|
||||
cmd := encoder.BuildCommand(config)
|
||||
args := cmd.Args[1:]
|
||||
argsStr := strings.Join(args, " ")
|
||||
|
||||
// Should NOT have -pass flag when TwoPass is false
|
||||
if strings.Contains(argsStr, "-pass") {
|
||||
t.Error("Command should not include -pass flag when TwoPass is false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelector_SelectH264(t *testing.T) {
|
||||
selector := NewSelector()
|
||||
encoder := selector.SelectH264()
|
||||
|
||||
if encoder == nil {
|
||||
t.Fatal("SelectH264 returned nil")
|
||||
}
|
||||
|
||||
if encoder.Codec() != "libx264" {
|
||||
t.Errorf("Expected codec 'libx264', got '%s'", encoder.Codec())
|
||||
}
|
||||
|
||||
if encoder.Name() != "software" {
|
||||
t.Errorf("Expected name 'software', got '%s'", encoder.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelector_SelectAV1(t *testing.T) {
|
||||
selector := NewSelector()
|
||||
encoder := selector.SelectAV1()
|
||||
|
||||
if encoder == nil {
|
||||
t.Fatal("SelectAV1 returned nil")
|
||||
}
|
||||
|
||||
if encoder.Codec() != "libaom-av1" {
|
||||
t.Errorf("Expected codec 'libaom-av1', got '%s'", encoder.Codec())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelector_SelectVP9(t *testing.T) {
|
||||
selector := NewSelector()
|
||||
encoder := selector.SelectVP9()
|
||||
|
||||
if encoder == nil {
|
||||
t.Fatal("SelectVP9 returned nil")
|
||||
}
|
||||
|
||||
if encoder.Codec() != "libvpx-vp9" {
|
||||
t.Errorf("Expected codec 'libvpx-vp9', got '%s'", encoder.Codec())
|
||||
}
|
||||
}
|
||||
|
||||
func TestTonemapFilter_WithAlpha(t *testing.T) {
|
||||
filter := tonemapFilter(true)
|
||||
|
||||
// Filter should convert from gbrpf32le to yuva420p10le with proper colorspace conversion
|
||||
if !strings.Contains(filter, "yuva420p10le") {
|
||||
t.Error("Tonemap filter with alpha should output yuva420p10le format for HDR")
|
||||
}
|
||||
|
||||
if !strings.Contains(filter, "gbrpf32le") {
|
||||
t.Error("Tonemap filter should start with gbrpf32le format")
|
||||
}
|
||||
|
||||
// Should use zscale for colorspace conversion from linear RGB to bt2020 YUV
|
||||
if !strings.Contains(filter, "zscale") {
|
||||
t.Error("Tonemap filter should use zscale for colorspace conversion")
|
||||
}
|
||||
|
||||
// Check for HLG transfer function (numeric value 18 or string arib-std-b67)
|
||||
if !strings.Contains(filter, "transfer=18") && !strings.Contains(filter, "transfer=arib-std-b67") {
|
||||
t.Error("Tonemap filter should use HLG transfer function (18 or arib-std-b67)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTonemapFilter_WithoutAlpha(t *testing.T) {
|
||||
filter := tonemapFilter(false)
|
||||
|
||||
// Filter should convert from gbrpf32le to yuv420p10le with proper colorspace conversion
|
||||
if !strings.Contains(filter, "yuv420p10le") {
|
||||
t.Error("Tonemap filter without alpha should output yuv420p10le format for HDR")
|
||||
}
|
||||
|
||||
if strings.Contains(filter, "yuva420p") {
|
||||
t.Error("Tonemap filter without alpha should not output yuva420p format")
|
||||
}
|
||||
|
||||
if !strings.Contains(filter, "gbrpf32le") {
|
||||
t.Error("Tonemap filter should start with gbrpf32le format")
|
||||
}
|
||||
|
||||
// Should use zscale for colorspace conversion from linear RGB to bt2020 YUV
|
||||
if !strings.Contains(filter, "zscale") {
|
||||
t.Error("Tonemap filter should use zscale for colorspace conversion")
|
||||
}
|
||||
|
||||
// Check for HLG transfer function (numeric value 18 or string arib-std-b67)
|
||||
if !strings.Contains(filter, "transfer=18") && !strings.Contains(filter, "transfer=arib-std-b67") {
|
||||
t.Error("Tonemap filter should use HLG transfer function (18 or arib-std-b67)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSoftwareEncoder_Available(t *testing.T) {
|
||||
encoder := &SoftwareEncoder{codec: "libx264"}
|
||||
if !encoder.Available() {
|
||||
t.Error("Software encoder should always be available")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeConfig_DefaultSourceFormat(t *testing.T) {
|
||||
config := &EncodeConfig{
|
||||
InputPattern: "frame_%04d.exr",
|
||||
OutputPath: "output.mp4",
|
||||
StartFrame: 1,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: "/tmp",
|
||||
UseAlpha: false,
|
||||
TwoPass: false,
|
||||
// SourceFormat not set, should default to empty string (treated as exr)
|
||||
}
|
||||
|
||||
encoder := &SoftwareEncoder{codec: "libx264"}
|
||||
cmd := encoder.BuildCommand(config)
|
||||
args := strings.Join(cmd.Args[1:], " ")
|
||||
|
||||
// Should still have tonemap filter when SourceFormat is empty (defaults to exr behavior)
|
||||
if !strings.Contains(args, "-vf") {
|
||||
t.Error("Empty SourceFormat should default to EXR behavior with tonemap filter")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommandOrder(t *testing.T) {
|
||||
encoder := &SoftwareEncoder{codec: "libx264"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: "frame_%04d.exr",
|
||||
OutputPath: "output.mp4",
|
||||
StartFrame: 1,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: "/tmp",
|
||||
UseAlpha: false,
|
||||
TwoPass: true,
|
||||
SourceFormat: "exr",
|
||||
}
|
||||
|
||||
cmd := encoder.BuildCommand(config)
|
||||
args := cmd.Args[1:]
|
||||
|
||||
// Verify argument order: input should come before codec
|
||||
inputIdx := -1
|
||||
codecIdx := -1
|
||||
vfIdx := -1
|
||||
|
||||
for i, arg := range args {
|
||||
if arg == "-i" && i+1 < len(args) && args[i+1] == "frame_%04d.exr" {
|
||||
inputIdx = i
|
||||
}
|
||||
if arg == "-c:v" && i+1 < len(args) && args[i+1] == "libx264" {
|
||||
codecIdx = i
|
||||
}
|
||||
if arg == "-vf" {
|
||||
vfIdx = i
|
||||
}
|
||||
}
|
||||
|
||||
if inputIdx == -1 {
|
||||
t.Fatal("Input pattern not found in command")
|
||||
}
|
||||
if codecIdx == -1 {
|
||||
t.Fatal("Codec not found in command")
|
||||
}
|
||||
if vfIdx == -1 {
|
||||
t.Fatal("Video filter not found in command")
|
||||
}
|
||||
|
||||
// Input should come before codec
|
||||
if inputIdx >= codecIdx {
|
||||
t.Error("Input pattern should come before codec in command")
|
||||
}
|
||||
|
||||
// Video filter should come after input (order: input -> codec -> colorspace -> filter -> codec args)
|
||||
// In practice, the filter comes after codec and colorspace metadata but before codec-specific args
|
||||
if vfIdx <= inputIdx {
|
||||
t.Error("Video filter should come after input")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommand_ColorspaceMetadata(t *testing.T) {
|
||||
encoder := &SoftwareEncoder{codec: "libx264"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: "frame_%04d.exr",
|
||||
OutputPath: "output.mp4",
|
||||
StartFrame: 1,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: "/tmp",
|
||||
UseAlpha: false,
|
||||
TwoPass: false,
|
||||
SourceFormat: "exr",
|
||||
PreserveHDR: false, // SDR encoding
|
||||
}
|
||||
|
||||
cmd := encoder.BuildCommand(config)
|
||||
args := cmd.Args[1:]
|
||||
argsStr := strings.Join(args, " ")
|
||||
|
||||
// Verify all SDR colorspace metadata is present for EXR (SDR encoding)
|
||||
colorspaceArgs := []string{
|
||||
"-color_primaries bt709", // EXR uses bt709 (SDR)
|
||||
"-color_trc bt709", // EXR uses bt709 (SDR)
|
||||
"-colorspace bt709",
|
||||
"-color_range tv",
|
||||
}
|
||||
|
||||
for _, arg := range colorspaceArgs {
|
||||
if !strings.Contains(argsStr, arg) {
|
||||
t.Errorf("Missing colorspace metadata: %s", arg)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify SDR pixel format
|
||||
if !strings.Contains(argsStr, "-pix_fmt yuv420p") {
|
||||
t.Error("SDR encoding should use yuv420p pixel format")
|
||||
}
|
||||
|
||||
// Verify H.264 high profile (not high10)
|
||||
if !strings.Contains(argsStr, "-profile:v high") {
|
||||
t.Error("SDR encoding should use high profile")
|
||||
}
|
||||
if strings.Contains(argsStr, "-profile:v high10") {
|
||||
t.Error("SDR encoding should not use high10 profile")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommand_HDR_ColorspaceMetadata(t *testing.T) {
|
||||
encoder := &SoftwareEncoder{codec: "libx264"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: "frame_%04d.exr",
|
||||
OutputPath: "output.mp4",
|
||||
StartFrame: 1,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: "/tmp",
|
||||
UseAlpha: false,
|
||||
TwoPass: false,
|
||||
SourceFormat: "exr",
|
||||
PreserveHDR: true, // HDR encoding
|
||||
}
|
||||
|
||||
cmd := encoder.BuildCommand(config)
|
||||
args := cmd.Args[1:]
|
||||
argsStr := strings.Join(args, " ")
|
||||
|
||||
// Verify all HDR colorspace metadata is present for EXR (HDR encoding)
|
||||
colorspaceArgs := []string{
|
||||
"-color_primaries bt709", // bt709 primaries to match PNG color appearance
|
||||
"-color_trc arib-std-b67", // HLG transfer function for HDR/SDR compatibility
|
||||
"-colorspace bt709", // bt709 colorspace to match PNG
|
||||
"-color_range tv",
|
||||
}
|
||||
|
||||
for _, arg := range colorspaceArgs {
|
||||
if !strings.Contains(argsStr, arg) {
|
||||
t.Errorf("Missing HDR colorspace metadata: %s", arg)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify HDR pixel format (10-bit)
|
||||
if !strings.Contains(argsStr, "-pix_fmt yuv420p10le") {
|
||||
t.Error("HDR encoding should use yuv420p10le pixel format")
|
||||
}
|
||||
|
||||
// Verify H.264 high10 profile (for 10-bit)
|
||||
if !strings.Contains(argsStr, "-profile:v high10") {
|
||||
t.Error("HDR encoding should use high10 profile")
|
||||
}
|
||||
|
||||
// Verify HDR filter chain (linear -> sRGB -> HLG)
|
||||
if !strings.Contains(argsStr, "-vf") {
|
||||
t.Fatal("HDR encoding should have video filter")
|
||||
}
|
||||
vfIdx := -1
|
||||
for i, arg := range args {
|
||||
if arg == "-vf" && i+1 < len(args) {
|
||||
vfIdx = i + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
if vfIdx == -1 {
|
||||
t.Fatal("Video filter not found")
|
||||
}
|
||||
filter := args[vfIdx]
|
||||
if !strings.Contains(filter, "transfer=18") {
|
||||
t.Error("HDR filter should convert to HLG (transfer=18)")
|
||||
}
|
||||
if !strings.Contains(filter, "yuv420p10le") {
|
||||
t.Error("HDR filter should output yuv420p10le format")
|
||||
}
|
||||
}
|
||||
|
||||
// Integration tests using example files
|
||||
func TestIntegration_Encode_EXR_H264(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Check if example file exists
|
||||
exampleDir := filepath.Join("..", "..", "..", "examples")
|
||||
exrFile := filepath.Join(exampleDir, "frame_0800.exr")
|
||||
if _, err := os.Stat(exrFile); os.IsNotExist(err) {
|
||||
t.Skipf("Example file not found: %s", exrFile)
|
||||
}
|
||||
|
||||
// Get absolute paths
|
||||
workspaceRoot, err := filepath.Abs(filepath.Join("..", "..", ".."))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get workspace root: %v", err)
|
||||
}
|
||||
exampleDirAbs, err := filepath.Abs(exampleDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get example directory: %v", err)
|
||||
}
|
||||
tmpDir := filepath.Join(workspaceRoot, "tmp")
|
||||
if err := os.MkdirAll(tmpDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create tmp directory: %v", err)
|
||||
}
|
||||
|
||||
encoder := &SoftwareEncoder{codec: "libx264"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: filepath.Join(exampleDirAbs, "frame_%04d.exr"),
|
||||
OutputPath: filepath.Join(tmpDir, "test_exr_h264.mp4"),
|
||||
StartFrame: 800,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: tmpDir,
|
||||
UseAlpha: false,
|
||||
TwoPass: false, // Use single pass for faster testing
|
||||
SourceFormat: "exr",
|
||||
}
|
||||
|
||||
// Build and run command
|
||||
cmd := encoder.BuildCommand(config)
|
||||
if cmd == nil {
|
||||
t.Fatal("BuildCommand returned nil")
|
||||
}
|
||||
|
||||
// Capture stderr to see what went wrong
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Errorf("FFmpeg command failed: %v\nCommand output: %s", err, string(output))
|
||||
return
|
||||
}
|
||||
|
||||
// Verify output file was created
|
||||
if _, err := os.Stat(config.OutputPath); os.IsNotExist(err) {
|
||||
t.Errorf("Output file was not created: %s\nCommand output: %s", config.OutputPath, string(output))
|
||||
} else {
|
||||
t.Logf("Successfully created output file: %s", config.OutputPath)
|
||||
// Verify file has content
|
||||
info, _ := os.Stat(config.OutputPath)
|
||||
if info.Size() == 0 {
|
||||
t.Errorf("Output file was created but is empty\nCommand output: %s", string(output))
|
||||
} else {
|
||||
t.Logf("Output file size: %d bytes", info.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Encode_PNG_H264(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Check if example file exists
|
||||
exampleDir := filepath.Join("..", "..", "..", "examples")
|
||||
pngFile := filepath.Join(exampleDir, "frame_0800.png")
|
||||
if _, err := os.Stat(pngFile); os.IsNotExist(err) {
|
||||
t.Skipf("Example file not found: %s", pngFile)
|
||||
}
|
||||
|
||||
// Get absolute paths
|
||||
workspaceRoot, err := filepath.Abs(filepath.Join("..", "..", ".."))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get workspace root: %v", err)
|
||||
}
|
||||
exampleDirAbs, err := filepath.Abs(exampleDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get example directory: %v", err)
|
||||
}
|
||||
tmpDir := filepath.Join(workspaceRoot, "tmp")
|
||||
if err := os.MkdirAll(tmpDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create tmp directory: %v", err)
|
||||
}
|
||||
|
||||
encoder := &SoftwareEncoder{codec: "libx264"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: filepath.Join(exampleDirAbs, "frame_%04d.png"),
|
||||
OutputPath: filepath.Join(tmpDir, "test_png_h264.mp4"),
|
||||
StartFrame: 800,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: tmpDir,
|
||||
UseAlpha: false,
|
||||
TwoPass: false, // Use single pass for faster testing
|
||||
SourceFormat: "png",
|
||||
}
|
||||
|
||||
// Build and run command
|
||||
cmd := encoder.BuildCommand(config)
|
||||
if cmd == nil {
|
||||
t.Fatal("BuildCommand returned nil")
|
||||
}
|
||||
|
||||
// Verify no video filter is used for PNG
|
||||
argsStr := strings.Join(cmd.Args, " ")
|
||||
if strings.Contains(argsStr, "-vf") {
|
||||
t.Error("PNG encoding should not use video filter, but -vf was found in command")
|
||||
}
|
||||
|
||||
// Run the command
|
||||
cmdOutput, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Errorf("FFmpeg command failed: %v\nCommand output: %s", err, string(cmdOutput))
|
||||
return
|
||||
}
|
||||
|
||||
// Verify output file was created
|
||||
if _, err := os.Stat(config.OutputPath); os.IsNotExist(err) {
|
||||
t.Errorf("Output file was not created: %s\nCommand output: %s", config.OutputPath, string(cmdOutput))
|
||||
} else {
|
||||
t.Logf("Successfully created output file: %s", config.OutputPath)
|
||||
info, _ := os.Stat(config.OutputPath)
|
||||
if info.Size() == 0 {
|
||||
t.Error("Output file was created but is empty")
|
||||
} else {
|
||||
t.Logf("Output file size: %d bytes", info.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Encode_EXR_VP9(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Check if example file exists
|
||||
exampleDir := filepath.Join("..", "..", "..", "examples")
|
||||
exrFile := filepath.Join(exampleDir, "frame_0800.exr")
|
||||
if _, err := os.Stat(exrFile); os.IsNotExist(err) {
|
||||
t.Skipf("Example file not found: %s", exrFile)
|
||||
}
|
||||
|
||||
// Check if VP9 encoder is available
|
||||
checkCmd := exec.Command("ffmpeg", "-hide_banner", "-encoders")
|
||||
checkOutput, err := checkCmd.CombinedOutput()
|
||||
if err != nil || !strings.Contains(string(checkOutput), "libvpx-vp9") {
|
||||
t.Skip("VP9 encoder (libvpx-vp9) not available in ffmpeg")
|
||||
}
|
||||
|
||||
// Get absolute paths
|
||||
workspaceRoot, err := filepath.Abs(filepath.Join("..", "..", ".."))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get workspace root: %v", err)
|
||||
}
|
||||
exampleDirAbs, err := filepath.Abs(exampleDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get example directory: %v", err)
|
||||
}
|
||||
tmpDir := filepath.Join(workspaceRoot, "tmp")
|
||||
if err := os.MkdirAll(tmpDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create tmp directory: %v", err)
|
||||
}
|
||||
|
||||
encoder := &SoftwareEncoder{codec: "libvpx-vp9"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: filepath.Join(exampleDirAbs, "frame_%04d.exr"),
|
||||
OutputPath: filepath.Join(tmpDir, "test_exr_vp9.webm"),
|
||||
StartFrame: 800,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: tmpDir,
|
||||
UseAlpha: false,
|
||||
TwoPass: false, // Use single pass for faster testing
|
||||
SourceFormat: "exr",
|
||||
}
|
||||
|
||||
// Build and run command
|
||||
cmd := encoder.BuildCommand(config)
|
||||
if cmd == nil {
|
||||
t.Fatal("BuildCommand returned nil")
|
||||
}
|
||||
|
||||
// Capture stderr to see what went wrong
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Errorf("FFmpeg command failed: %v\nCommand output: %s", err, string(output))
|
||||
return
|
||||
}
|
||||
|
||||
// Verify output file was created
|
||||
if _, err := os.Stat(config.OutputPath); os.IsNotExist(err) {
|
||||
t.Errorf("Output file was not created: %s\nCommand output: %s", config.OutputPath, string(output))
|
||||
} else {
|
||||
t.Logf("Successfully created output file: %s", config.OutputPath)
|
||||
// Verify file has content
|
||||
info, _ := os.Stat(config.OutputPath)
|
||||
if info.Size() == 0 {
|
||||
t.Errorf("Output file was created but is empty\nCommand output: %s", string(output))
|
||||
} else {
|
||||
t.Logf("Output file size: %d bytes", info.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Encode_EXR_AV1(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Check if example file exists
|
||||
exampleDir := filepath.Join("..", "..", "..", "examples")
|
||||
exrFile := filepath.Join(exampleDir, "frame_0800.exr")
|
||||
if _, err := os.Stat(exrFile); os.IsNotExist(err) {
|
||||
t.Skipf("Example file not found: %s", exrFile)
|
||||
}
|
||||
|
||||
// Check if AV1 encoder is available
|
||||
checkCmd := exec.Command("ffmpeg", "-hide_banner", "-encoders")
|
||||
output, err := checkCmd.CombinedOutput()
|
||||
if err != nil || !strings.Contains(string(output), "libaom-av1") {
|
||||
t.Skip("AV1 encoder (libaom-av1) not available in ffmpeg")
|
||||
}
|
||||
|
||||
// Get absolute paths
|
||||
workspaceRoot, err := filepath.Abs(filepath.Join("..", "..", ".."))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get workspace root: %v", err)
|
||||
}
|
||||
exampleDirAbs, err := filepath.Abs(exampleDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get example directory: %v", err)
|
||||
}
|
||||
tmpDir := filepath.Join(workspaceRoot, "tmp")
|
||||
if err := os.MkdirAll(tmpDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create tmp directory: %v", err)
|
||||
}
|
||||
|
||||
encoder := &SoftwareEncoder{codec: "libaom-av1"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: filepath.Join(exampleDirAbs, "frame_%04d.exr"),
|
||||
OutputPath: filepath.Join(tmpDir, "test_exr_av1.mp4"),
|
||||
StartFrame: 800,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: tmpDir,
|
||||
UseAlpha: false,
|
||||
TwoPass: false,
|
||||
SourceFormat: "exr",
|
||||
}
|
||||
|
||||
// Build and run command
|
||||
cmd := encoder.BuildCommand(config)
|
||||
cmdOutput, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Errorf("FFmpeg command failed: %v\nCommand output: %s", err, string(cmdOutput))
|
||||
return
|
||||
}
|
||||
|
||||
// Verify output file was created
|
||||
if _, err := os.Stat(config.OutputPath); os.IsNotExist(err) {
|
||||
t.Errorf("Output file was not created: %s\nCommand output: %s", config.OutputPath, string(cmdOutput))
|
||||
} else {
|
||||
t.Logf("Successfully created AV1 output file: %s", config.OutputPath)
|
||||
info, _ := os.Stat(config.OutputPath)
|
||||
if info.Size() == 0 {
|
||||
t.Errorf("Output file was created but is empty\nCommand output: %s", string(cmdOutput))
|
||||
} else {
|
||||
t.Logf("Output file size: %d bytes", info.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Encode_EXR_VP9_WithAlpha(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Check if example file exists
|
||||
exampleDir := filepath.Join("..", "..", "..", "examples")
|
||||
exrFile := filepath.Join(exampleDir, "frame_0800.exr")
|
||||
if _, err := os.Stat(exrFile); os.IsNotExist(err) {
|
||||
t.Skipf("Example file not found: %s", exrFile)
|
||||
}
|
||||
|
||||
// Check if VP9 encoder is available
|
||||
checkCmd := exec.Command("ffmpeg", "-hide_banner", "-encoders")
|
||||
output, err := checkCmd.CombinedOutput()
|
||||
if err != nil || !strings.Contains(string(output), "libvpx-vp9") {
|
||||
t.Skip("VP9 encoder (libvpx-vp9) not available in ffmpeg")
|
||||
}
|
||||
|
||||
// Get absolute paths
|
||||
workspaceRoot, err := filepath.Abs(filepath.Join("..", "..", ".."))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get workspace root: %v", err)
|
||||
}
|
||||
exampleDirAbs, err := filepath.Abs(exampleDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get example directory: %v", err)
|
||||
}
|
||||
tmpDir := filepath.Join(workspaceRoot, "tmp")
|
||||
if err := os.MkdirAll(tmpDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create tmp directory: %v", err)
|
||||
}
|
||||
|
||||
encoder := &SoftwareEncoder{codec: "libvpx-vp9"}
|
||||
config := &EncodeConfig{
|
||||
InputPattern: filepath.Join(exampleDirAbs, "frame_%04d.exr"),
|
||||
OutputPath: filepath.Join(tmpDir, "test_exr_vp9_alpha.webm"),
|
||||
StartFrame: 800,
|
||||
FrameRate: 24.0,
|
||||
WorkDir: tmpDir,
|
||||
UseAlpha: true, // Test with alpha
|
||||
TwoPass: false, // Use single pass for faster testing
|
||||
SourceFormat: "exr",
|
||||
}
|
||||
|
||||
// Build and run command
|
||||
cmd := encoder.BuildCommand(config)
|
||||
if cmd == nil {
|
||||
t.Fatal("BuildCommand returned nil")
|
||||
}
|
||||
|
||||
// Capture stderr to see what went wrong
|
||||
cmdOutput, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Errorf("FFmpeg command failed: %v\nCommand output: %s", err, string(cmdOutput))
|
||||
return
|
||||
}
|
||||
|
||||
// Verify output file was created
|
||||
if _, err := os.Stat(config.OutputPath); os.IsNotExist(err) {
|
||||
t.Errorf("Output file was not created: %s\nCommand output: %s", config.OutputPath, string(cmdOutput))
|
||||
} else {
|
||||
t.Logf("Successfully created VP9 output file with alpha: %s", config.OutputPath)
|
||||
info, _ := os.Stat(config.OutputPath)
|
||||
if info.Size() == 0 {
|
||||
t.Errorf("Output file was created but is empty\nCommand output: %s", string(cmdOutput))
|
||||
} else {
|
||||
t.Logf("Output file size: %d bytes", info.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to copy files
|
||||
func copyFile(src, dst string) error {
|
||||
data, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(dst, data, 0644)
|
||||
}
|
||||
361
internal/runner/runner.go
Normal file
361
internal/runner/runner.go
Normal file
@@ -0,0 +1,361 @@
|
||||
// Package runner provides the Jiggablend render runner.
|
||||
package runner
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"jiggablend/internal/runner/api"
|
||||
"jiggablend/internal/runner/blender"
|
||||
"jiggablend/internal/runner/encoding"
|
||||
"jiggablend/internal/runner/tasks"
|
||||
"jiggablend/internal/runner/workspace"
|
||||
"jiggablend/pkg/executils"
|
||||
"jiggablend/pkg/types"
|
||||
)
|
||||
|
||||
// Runner is the main render runner.
|
||||
type Runner struct {
|
||||
id int64
|
||||
name string
|
||||
hostname string
|
||||
|
||||
manager *api.ManagerClient
|
||||
workspace *workspace.Manager
|
||||
blender *blender.Manager
|
||||
encoder *encoding.Selector
|
||||
processes *executils.ProcessTracker
|
||||
|
||||
processors map[string]tasks.Processor
|
||||
stopChan chan struct{}
|
||||
|
||||
fingerprint string
|
||||
fingerprintMu sync.RWMutex
|
||||
}
|
||||
|
||||
// New creates a new runner.
|
||||
func New(managerURL, name, hostname string) *Runner {
|
||||
manager := api.NewManagerClient(managerURL)
|
||||
|
||||
r := &Runner{
|
||||
name: name,
|
||||
hostname: hostname,
|
||||
manager: manager,
|
||||
processes: executils.NewProcessTracker(),
|
||||
stopChan: make(chan struct{}),
|
||||
processors: make(map[string]tasks.Processor),
|
||||
}
|
||||
|
||||
// Generate fingerprint
|
||||
r.generateFingerprint()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// CheckRequiredTools verifies that required external tools are available.
|
||||
func (r *Runner) CheckRequiredTools() error {
|
||||
if err := exec.Command("zstd", "--version").Run(); err != nil {
|
||||
return fmt.Errorf("zstd not found - required for compressed blend file support. Install with: apt install zstd")
|
||||
}
|
||||
log.Printf("Found zstd for compressed blend file support")
|
||||
|
||||
if err := exec.Command("xvfb-run", "--help").Run(); err != nil {
|
||||
return fmt.Errorf("xvfb-run not found - required for headless Blender rendering. Install with: apt install xvfb")
|
||||
}
|
||||
log.Printf("Found xvfb-run for headless rendering without -b option")
|
||||
return nil
|
||||
}
|
||||
|
||||
var cachedCapabilities map[string]interface{} = nil
|
||||
|
||||
// ProbeCapabilities detects hardware capabilities.
|
||||
func (r *Runner) ProbeCapabilities() map[string]interface{} {
|
||||
if cachedCapabilities != nil {
|
||||
return cachedCapabilities
|
||||
}
|
||||
|
||||
caps := make(map[string]interface{})
|
||||
|
||||
// Check for ffmpeg and probe encoding capabilities
|
||||
if err := exec.Command("ffmpeg", "-version").Run(); err == nil {
|
||||
caps["ffmpeg"] = true
|
||||
} else {
|
||||
caps["ffmpeg"] = false
|
||||
}
|
||||
|
||||
cachedCapabilities = caps
|
||||
return caps
|
||||
}
|
||||
|
||||
// Register registers the runner with the manager.
|
||||
func (r *Runner) Register(apiKey string) (int64, error) {
|
||||
caps := r.ProbeCapabilities()
|
||||
|
||||
id, err := r.manager.Register(r.name, r.hostname, caps, apiKey, r.GetFingerprint())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
r.id = id
|
||||
|
||||
// Initialize workspace after registration
|
||||
r.workspace = workspace.NewManager(r.name)
|
||||
|
||||
// Initialize blender manager
|
||||
r.blender = blender.NewManager(r.manager, r.workspace.BaseDir())
|
||||
|
||||
// Initialize encoder selector
|
||||
r.encoder = encoding.NewSelector()
|
||||
|
||||
// Register task processors
|
||||
r.processors["render"] = tasks.NewRenderProcessor()
|
||||
r.processors["encode"] = tasks.NewEncodeProcessor()
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// Start starts the job polling loop.
|
||||
func (r *Runner) Start(pollInterval time.Duration) {
|
||||
log.Printf("Starting job polling loop (interval: %v)", pollInterval)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-r.stopChan:
|
||||
log.Printf("Stopping job polling loop")
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
log.Printf("Polling for next job (runner ID: %d)", r.id)
|
||||
job, err := r.manager.PollNextJob()
|
||||
if err != nil {
|
||||
log.Printf("Error polling for job: %v", err)
|
||||
time.Sleep(pollInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
if job == nil {
|
||||
log.Printf("No job available, sleeping for %v", pollInterval)
|
||||
time.Sleep(pollInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("Received job assignment: task=%d, job=%d, type=%s",
|
||||
job.Task.TaskID, job.Task.JobID, job.Task.TaskType)
|
||||
|
||||
if err := r.executeJob(job); err != nil {
|
||||
log.Printf("Error processing job: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the runner.
|
||||
func (r *Runner) Stop() {
|
||||
close(r.stopChan)
|
||||
}
|
||||
|
||||
// KillAllProcesses kills all running processes.
|
||||
func (r *Runner) KillAllProcesses() {
|
||||
log.Printf("Killing all running processes...")
|
||||
killedCount := r.processes.KillAll()
|
||||
|
||||
// Release all allocated devices
|
||||
if r.encoder != nil {
|
||||
// Device pool cleanup is handled internally
|
||||
}
|
||||
|
||||
log.Printf("Killed %d process(es)", killedCount)
|
||||
}
|
||||
|
||||
// Cleanup removes the workspace directory.
|
||||
func (r *Runner) Cleanup() {
|
||||
if r.workspace != nil {
|
||||
r.workspace.Cleanup()
|
||||
}
|
||||
}
|
||||
|
||||
// executeJob handles a job using per-job WebSocket connection.
|
||||
func (r *Runner) executeJob(job *api.NextJobResponse) (err error) {
|
||||
// Recover from panics to prevent runner process crashes during task execution
|
||||
defer func() {
|
||||
if rec := recover(); rec != nil {
|
||||
log.Printf("Task execution panicked: %v", rec)
|
||||
err = fmt.Errorf("task execution panicked: %v", rec)
|
||||
}
|
||||
}()
|
||||
|
||||
// Connect to job WebSocket (no runnerID needed - authentication handles it)
|
||||
jobConn := api.NewJobConnection()
|
||||
if err := jobConn.Connect(r.manager.GetBaseURL(), job.JobPath, job.JobToken); err != nil {
|
||||
return fmt.Errorf("failed to connect job WebSocket: %w", err)
|
||||
}
|
||||
defer jobConn.Close()
|
||||
|
||||
log.Printf("Job WebSocket authenticated for task %d", job.Task.TaskID)
|
||||
|
||||
// Create task context
|
||||
workDir := r.workspace.JobDir(job.Task.JobID)
|
||||
ctx := tasks.NewContext(
|
||||
job.Task.TaskID,
|
||||
job.Task.JobID,
|
||||
job.Task.JobName,
|
||||
job.Task.Frame,
|
||||
job.Task.TaskType,
|
||||
workDir,
|
||||
job.JobToken,
|
||||
job.Task.Metadata,
|
||||
r.manager,
|
||||
jobConn,
|
||||
r.workspace,
|
||||
r.blender,
|
||||
r.encoder,
|
||||
r.processes,
|
||||
)
|
||||
|
||||
ctx.Info(fmt.Sprintf("Task assignment received (job: %d, type: %s)",
|
||||
job.Task.JobID, job.Task.TaskType))
|
||||
|
||||
// Get processor for task type
|
||||
processor, ok := r.processors[job.Task.TaskType]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown task type: %s", job.Task.TaskType)
|
||||
}
|
||||
|
||||
// Process the task
|
||||
var processErr error
|
||||
switch job.Task.TaskType {
|
||||
case "render": // this task has a upload outputs step because the frames are not uploaded by the render task directly we have to do it manually here TODO: maybe we should make it work like the encode task
|
||||
// Download context
|
||||
contextPath := job.JobPath + "/context.tar"
|
||||
if err := r.downloadContext(job.Task.JobID, contextPath, job.JobToken); err != nil {
|
||||
jobConn.Log(job.Task.TaskID, types.LogLevelError, fmt.Sprintf("Failed to download context: %v", err))
|
||||
jobConn.Complete(job.Task.TaskID, false, fmt.Errorf("failed to download context: %v", err))
|
||||
return fmt.Errorf("failed to download context: %w", err)
|
||||
}
|
||||
processErr = processor.Process(ctx)
|
||||
if processErr == nil {
|
||||
processErr = r.uploadOutputs(ctx, job)
|
||||
}
|
||||
case "encode": // this task doesn't have a upload outputs step because the video is already uploaded by the encode task
|
||||
processErr = processor.Process(ctx)
|
||||
default:
|
||||
return fmt.Errorf("unknown task type: %s", job.Task.TaskType)
|
||||
}
|
||||
|
||||
if processErr != nil {
|
||||
ctx.Error(fmt.Sprintf("Task failed: %v", processErr))
|
||||
ctx.Complete(false, processErr)
|
||||
return processErr
|
||||
}
|
||||
|
||||
ctx.Complete(true, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Runner) downloadContext(jobID int64, contextPath, jobToken string) error {
|
||||
reader, err := r.manager.DownloadContext(contextPath, jobToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
jobDir := r.workspace.JobDir(jobID)
|
||||
return workspace.ExtractTar(reader, jobDir)
|
||||
}
|
||||
|
||||
func (r *Runner) uploadOutputs(ctx *tasks.Context, job *api.NextJobResponse) error {
|
||||
outputDir := ctx.WorkDir + "/output"
|
||||
uploadPath := fmt.Sprintf("/api/runner/jobs/%d/upload", job.Task.JobID)
|
||||
|
||||
entries, err := os.ReadDir(outputDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read output directory: %w", err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
filePath := outputDir + "/" + entry.Name()
|
||||
if err := r.manager.UploadFile(uploadPath, job.JobToken, filePath); err != nil {
|
||||
log.Printf("Failed to upload %s: %v", filePath, err)
|
||||
} else {
|
||||
ctx.OutputUploaded(entry.Name())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateFingerprint creates a unique hardware fingerprint.
|
||||
func (r *Runner) generateFingerprint() {
|
||||
r.fingerprintMu.Lock()
|
||||
defer r.fingerprintMu.Unlock()
|
||||
|
||||
var components []string
|
||||
components = append(components, r.hostname)
|
||||
|
||||
if machineID, err := os.ReadFile("/etc/machine-id"); err == nil {
|
||||
components = append(components, strings.TrimSpace(string(machineID)))
|
||||
}
|
||||
|
||||
if productUUID, err := os.ReadFile("/sys/class/dmi/id/product_uuid"); err == nil {
|
||||
components = append(components, strings.TrimSpace(string(productUUID)))
|
||||
}
|
||||
|
||||
if macAddr, err := r.getMACAddress(); err == nil {
|
||||
components = append(components, macAddr)
|
||||
}
|
||||
|
||||
if len(components) <= 1 {
|
||||
components = append(components, fmt.Sprintf("%d", os.Getpid()))
|
||||
components = append(components, fmt.Sprintf("%d", time.Now().Unix()))
|
||||
}
|
||||
|
||||
h := sha256.New()
|
||||
for _, comp := range components {
|
||||
h.Write([]byte(comp))
|
||||
h.Write([]byte{0})
|
||||
}
|
||||
|
||||
r.fingerprint = hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func (r *Runner) getMACAddress() (string, error) {
|
||||
interfaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, iface := range interfaces {
|
||||
if iface.Flags&net.FlagLoopback != 0 || iface.Flags&net.FlagUp == 0 {
|
||||
continue
|
||||
}
|
||||
if len(iface.HardwareAddr) == 0 {
|
||||
continue
|
||||
}
|
||||
return iface.HardwareAddr.String(), nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no suitable network interface found")
|
||||
}
|
||||
|
||||
// GetFingerprint returns the runner's hardware fingerprint.
|
||||
func (r *Runner) GetFingerprint() string {
|
||||
r.fingerprintMu.RLock()
|
||||
defer r.fingerprintMu.RUnlock()
|
||||
return r.fingerprint
|
||||
}
|
||||
|
||||
// GetID returns the runner ID.
|
||||
func (r *Runner) GetID() int64 {
|
||||
return r.id
|
||||
}
|
||||
588
internal/runner/tasks/encode.go
Normal file
588
internal/runner/tasks/encode.go
Normal file
@@ -0,0 +1,588 @@
|
||||
package tasks
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"jiggablend/internal/runner/encoding"
|
||||
)
|
||||
|
||||
// EncodeProcessor handles encode tasks.
|
||||
type EncodeProcessor struct{}
|
||||
|
||||
// NewEncodeProcessor creates a new encode processor.
|
||||
func NewEncodeProcessor() *EncodeProcessor {
|
||||
return &EncodeProcessor{}
|
||||
}
|
||||
|
||||
// Process executes an encode task.
|
||||
func (p *EncodeProcessor) Process(ctx *Context) error {
|
||||
ctx.Info(fmt.Sprintf("Starting encode task: job %d", ctx.JobID))
|
||||
log.Printf("Processing encode task %d for job %d", ctx.TaskID, ctx.JobID)
|
||||
|
||||
// Create temporary work directory
|
||||
workDir, err := ctx.Workspace.CreateVideoDir(ctx.JobID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create work directory: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := ctx.Workspace.CleanupVideoDir(ctx.JobID); err != nil {
|
||||
log.Printf("Warning: Failed to cleanup encode work directory: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Get output format and frame rate
|
||||
outputFormat := ctx.GetOutputFormat()
|
||||
if outputFormat == "" {
|
||||
outputFormat = "EXR_264_MP4"
|
||||
}
|
||||
frameRate := ctx.GetFrameRate()
|
||||
|
||||
ctx.Info(fmt.Sprintf("Encode: detected output format '%s'", outputFormat))
|
||||
ctx.Info(fmt.Sprintf("Encode: using frame rate %.2f fps", frameRate))
|
||||
|
||||
// Get job files
|
||||
files, err := ctx.Manager.GetJobFiles(ctx.JobID)
|
||||
if err != nil {
|
||||
ctx.Error(fmt.Sprintf("Failed to get job files: %v", err))
|
||||
return fmt.Errorf("failed to get job files: %w", err)
|
||||
}
|
||||
|
||||
ctx.Info(fmt.Sprintf("GetJobFiles returned %d total files for job %d", len(files), ctx.JobID))
|
||||
|
||||
// Log all files for debugging
|
||||
for _, file := range files {
|
||||
ctx.Info(fmt.Sprintf("File: %s (type: %s, size: %d)", file.FileName, file.FileType, file.FileSize))
|
||||
}
|
||||
|
||||
// Determine source format based on output format
|
||||
sourceFormat := "exr"
|
||||
fileExt := ".exr"
|
||||
|
||||
// Find and deduplicate frame files (EXR or PNG)
|
||||
frameFileSet := make(map[string]bool)
|
||||
var frameFilesList []string
|
||||
for _, file := range files {
|
||||
if file.FileType == "output" && strings.HasSuffix(strings.ToLower(file.FileName), fileExt) {
|
||||
// Deduplicate by filename
|
||||
if !frameFileSet[file.FileName] {
|
||||
frameFileSet[file.FileName] = true
|
||||
frameFilesList = append(frameFilesList, file.FileName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(frameFilesList) == 0 {
|
||||
// Log why no files matched (deduplicate for error reporting)
|
||||
outputFileSet := make(map[string]bool)
|
||||
frameFilesOtherTypeSet := make(map[string]bool)
|
||||
var outputFiles []string
|
||||
var frameFilesOtherType []string
|
||||
|
||||
for _, file := range files {
|
||||
if file.FileType == "output" {
|
||||
if !outputFileSet[file.FileName] {
|
||||
outputFileSet[file.FileName] = true
|
||||
outputFiles = append(outputFiles, file.FileName)
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(strings.ToLower(file.FileName), fileExt) {
|
||||
key := fmt.Sprintf("%s (type: %s)", file.FileName, file.FileType)
|
||||
if !frameFilesOtherTypeSet[key] {
|
||||
frameFilesOtherTypeSet[key] = true
|
||||
frameFilesOtherType = append(frameFilesOtherType, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.Error(fmt.Sprintf("no %s frame files found for encode: found %d total files, %d unique output files, %d unique %s files (with other types)", strings.ToUpper(fileExt[1:]), len(files), len(outputFiles), len(frameFilesOtherType), strings.ToUpper(fileExt[1:])))
|
||||
if len(outputFiles) > 0 {
|
||||
ctx.Error(fmt.Sprintf("Output files found: %v", outputFiles))
|
||||
}
|
||||
if len(frameFilesOtherType) > 0 {
|
||||
ctx.Error(fmt.Sprintf("%s files with wrong type: %v", strings.ToUpper(fileExt[1:]), frameFilesOtherType))
|
||||
}
|
||||
err := fmt.Errorf("no %s frame files found for encode", strings.ToUpper(fileExt[1:]))
|
||||
return err
|
||||
}
|
||||
|
||||
ctx.Info(fmt.Sprintf("Found %d %s frames for encode", len(frameFilesList), strings.ToUpper(fileExt[1:])))
|
||||
|
||||
// Download frames
|
||||
ctx.Info(fmt.Sprintf("Downloading %d %s frames for encode...", len(frameFilesList), strings.ToUpper(fileExt[1:])))
|
||||
|
||||
var frameFiles []string
|
||||
for i, fileName := range frameFilesList {
|
||||
ctx.Info(fmt.Sprintf("Downloading frame %d/%d: %s", i+1, len(frameFilesList), fileName))
|
||||
framePath := filepath.Join(workDir, fileName)
|
||||
if err := ctx.Manager.DownloadFrame(ctx.JobID, fileName, framePath); err != nil {
|
||||
ctx.Error(fmt.Sprintf("Failed to download %s frame %s: %v", strings.ToUpper(fileExt[1:]), fileName, err))
|
||||
log.Printf("Failed to download %s frame for encode %s: %v", strings.ToUpper(fileExt[1:]), fileName, err)
|
||||
continue
|
||||
}
|
||||
ctx.Info(fmt.Sprintf("Successfully downloaded frame %d/%d: %s", i+1, len(frameFilesList), fileName))
|
||||
frameFiles = append(frameFiles, framePath)
|
||||
}
|
||||
|
||||
if len(frameFiles) == 0 {
|
||||
err := fmt.Errorf("failed to download any %s frames for encode", strings.ToUpper(fileExt[1:]))
|
||||
ctx.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
sort.Strings(frameFiles)
|
||||
ctx.Info(fmt.Sprintf("Downloaded %d frames", len(frameFiles)))
|
||||
|
||||
// Check if EXR files have alpha channel and HDR content (only for EXR source format)
|
||||
hasAlpha := false
|
||||
hasHDR := false
|
||||
if sourceFormat == "exr" {
|
||||
// Check first frame for alpha channel and HDR using ffprobe
|
||||
firstFrame := frameFiles[0]
|
||||
hasAlpha = detectAlphaChannel(ctx, firstFrame)
|
||||
if hasAlpha {
|
||||
ctx.Info("Detected alpha channel in EXR files")
|
||||
} else {
|
||||
ctx.Info("No alpha channel detected in EXR files")
|
||||
}
|
||||
|
||||
hasHDR = detectHDR(ctx, firstFrame)
|
||||
if hasHDR {
|
||||
ctx.Info("Detected HDR content in EXR files")
|
||||
} else {
|
||||
ctx.Info("No HDR content detected in EXR files (SDR range)")
|
||||
}
|
||||
}
|
||||
|
||||
// Generate video
|
||||
// Use alpha if:
|
||||
// 1. User explicitly enabled it OR source has alpha channel AND
|
||||
// 2. Codec supports alpha (AV1 or VP9)
|
||||
preserveAlpha := ctx.ShouldPreserveAlpha()
|
||||
useAlpha := (preserveAlpha || hasAlpha) && (outputFormat == "EXR_AV1_MP4" || outputFormat == "EXR_VP9_WEBM")
|
||||
if (preserveAlpha || hasAlpha) && outputFormat == "EXR_264_MP4" {
|
||||
ctx.Warn("Alpha channel requested/detected but H.264 does not support alpha. Consider using EXR_AV1_MP4 or EXR_VP9_WEBM to preserve alpha.")
|
||||
}
|
||||
if preserveAlpha && !hasAlpha {
|
||||
ctx.Warn("Alpha preservation requested but no alpha channel detected in EXR files.")
|
||||
}
|
||||
if useAlpha {
|
||||
if preserveAlpha && hasAlpha {
|
||||
ctx.Info("Alpha preservation enabled: Using alpha channel encoding")
|
||||
} else if hasAlpha {
|
||||
ctx.Info("Alpha channel detected - automatically enabling alpha encoding")
|
||||
}
|
||||
}
|
||||
var outputExt string
|
||||
switch outputFormat {
|
||||
case "EXR_VP9_WEBM":
|
||||
outputExt = "webm"
|
||||
ctx.Info("Encoding WebM video with VP9 codec (with alpha channel and HDR support)...")
|
||||
case "EXR_AV1_MP4":
|
||||
outputExt = "mp4"
|
||||
ctx.Info("Encoding MP4 video with AV1 codec (with alpha channel)...")
|
||||
default:
|
||||
outputExt = "mp4"
|
||||
ctx.Info("Encoding MP4 video with H.264 codec...")
|
||||
}
|
||||
|
||||
outputVideo := filepath.Join(workDir, fmt.Sprintf("output_%d.%s", ctx.JobID, outputExt))
|
||||
|
||||
// Build input pattern
|
||||
firstFrame := frameFiles[0]
|
||||
baseName := filepath.Base(firstFrame)
|
||||
re := regexp.MustCompile(`_(\d+)\.`)
|
||||
var pattern string
|
||||
var startNumber int
|
||||
frameNumStr := re.FindStringSubmatch(baseName)
|
||||
if len(frameNumStr) > 1 {
|
||||
pattern = re.ReplaceAllString(baseName, "_%04d.")
|
||||
fmt.Sscanf(frameNumStr[1], "%d", &startNumber)
|
||||
} else {
|
||||
startNumber = extractFrameNumber(baseName)
|
||||
pattern = strings.Replace(baseName, fmt.Sprintf("%d", startNumber), "%04d", 1)
|
||||
}
|
||||
patternPath := filepath.Join(workDir, pattern)
|
||||
|
||||
// Select encoder and build command (software encoding only)
|
||||
var encoder encoding.Encoder
|
||||
switch outputFormat {
|
||||
case "EXR_AV1_MP4":
|
||||
encoder = ctx.Encoder.SelectAV1()
|
||||
case "EXR_VP9_WEBM":
|
||||
encoder = ctx.Encoder.SelectVP9()
|
||||
default:
|
||||
encoder = ctx.Encoder.SelectH264()
|
||||
}
|
||||
|
||||
ctx.Info(fmt.Sprintf("Using encoder: %s (%s)", encoder.Name(), encoder.Codec()))
|
||||
|
||||
// All software encoders use 2-pass for optimal quality
|
||||
ctx.Info("Starting 2-pass encode for optimal quality...")
|
||||
|
||||
// Pass 1
|
||||
ctx.Info("Pass 1/2: Analyzing content for optimal encode...")
|
||||
softEncoder := encoder.(*encoding.SoftwareEncoder)
|
||||
// Use HDR if: user explicitly enabled it OR HDR content was detected
|
||||
preserveHDR := (ctx.ShouldPreserveHDR() || hasHDR) && sourceFormat == "exr"
|
||||
if hasHDR && !ctx.ShouldPreserveHDR() {
|
||||
ctx.Info("HDR content detected - automatically enabling HDR preservation")
|
||||
}
|
||||
pass1Cmd := softEncoder.BuildPass1Command(&encoding.EncodeConfig{
|
||||
InputPattern: patternPath,
|
||||
OutputPath: outputVideo,
|
||||
StartFrame: startNumber,
|
||||
FrameRate: frameRate,
|
||||
WorkDir: workDir,
|
||||
UseAlpha: useAlpha,
|
||||
TwoPass: true,
|
||||
SourceFormat: sourceFormat,
|
||||
PreserveHDR: preserveHDR,
|
||||
})
|
||||
if err := pass1Cmd.Run(); err != nil {
|
||||
ctx.Warn(fmt.Sprintf("Pass 1 completed (warnings expected): %v", err))
|
||||
}
|
||||
|
||||
// Pass 2
|
||||
ctx.Info("Pass 2/2: Encoding with optimal quality...")
|
||||
|
||||
preserveHDR = (ctx.ShouldPreserveHDR() || hasHDR) && sourceFormat == "exr"
|
||||
if preserveHDR {
|
||||
if hasHDR && !ctx.ShouldPreserveHDR() {
|
||||
ctx.Info("HDR preservation enabled (auto-detected): Using HLG transfer with bt709 primaries")
|
||||
} else {
|
||||
ctx.Info("HDR preservation enabled: Using HLG transfer with bt709 primaries")
|
||||
}
|
||||
}
|
||||
|
||||
config := &encoding.EncodeConfig{
|
||||
InputPattern: patternPath,
|
||||
OutputPath: outputVideo,
|
||||
StartFrame: startNumber,
|
||||
FrameRate: frameRate,
|
||||
WorkDir: workDir,
|
||||
UseAlpha: useAlpha,
|
||||
TwoPass: true, // Software encoding always uses 2-pass for quality
|
||||
SourceFormat: sourceFormat,
|
||||
PreserveHDR: preserveHDR,
|
||||
}
|
||||
|
||||
cmd := encoder.BuildCommand(config)
|
||||
if cmd == nil {
|
||||
return errors.New("failed to build encode command")
|
||||
}
|
||||
|
||||
// Set up pipes
|
||||
stdoutPipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stdout pipe: %w", err)
|
||||
}
|
||||
|
||||
stderrPipe, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stderr pipe: %w", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start encode command: %w", err)
|
||||
}
|
||||
|
||||
ctx.Processes.Track(ctx.TaskID, cmd)
|
||||
defer ctx.Processes.Untrack(ctx.TaskID)
|
||||
|
||||
// Stream stdout
|
||||
stdoutDone := make(chan bool)
|
||||
go func() {
|
||||
defer close(stdoutDone)
|
||||
scanner := bufio.NewScanner(stdoutPipe)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line != "" {
|
||||
ctx.Info(line)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Stream stderr
|
||||
stderrDone := make(chan bool)
|
||||
go func() {
|
||||
defer close(stderrDone)
|
||||
scanner := bufio.NewScanner(stderrPipe)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line != "" {
|
||||
ctx.Warn(line)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = cmd.Wait()
|
||||
<-stdoutDone
|
||||
<-stderrDone
|
||||
|
||||
if err != nil {
|
||||
var errMsg string
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
if exitErr.ExitCode() == 137 {
|
||||
errMsg = "FFmpeg was killed due to excessive memory usage (OOM)"
|
||||
} else {
|
||||
errMsg = fmt.Sprintf("ffmpeg encoding failed: %v", err)
|
||||
}
|
||||
} else {
|
||||
errMsg = fmt.Sprintf("ffmpeg encoding failed: %v", err)
|
||||
}
|
||||
|
||||
if sizeErr := checkFFmpegSizeError(errMsg); sizeErr != nil {
|
||||
ctx.Error(sizeErr.Error())
|
||||
return sizeErr
|
||||
}
|
||||
|
||||
ctx.Error(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Verify output
|
||||
if _, err := os.Stat(outputVideo); os.IsNotExist(err) {
|
||||
err := fmt.Errorf("video %s file not created: %s", outputExt, outputVideo)
|
||||
ctx.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// Clean up 2-pass log files
|
||||
os.Remove(filepath.Join(workDir, "ffmpeg2pass-0.log"))
|
||||
os.Remove(filepath.Join(workDir, "ffmpeg2pass-0.log.mbtree"))
|
||||
|
||||
ctx.Info(fmt.Sprintf("%s video encoded successfully", strings.ToUpper(outputExt)))
|
||||
|
||||
// Upload video
|
||||
ctx.Info(fmt.Sprintf("Uploading encoded %s video...", strings.ToUpper(outputExt)))
|
||||
|
||||
uploadPath := fmt.Sprintf("/api/runner/jobs/%d/upload", ctx.JobID)
|
||||
if err := ctx.Manager.UploadFile(uploadPath, ctx.JobToken, outputVideo); err != nil {
|
||||
ctx.Error(fmt.Sprintf("Failed to upload %s: %v", strings.ToUpper(outputExt), err))
|
||||
return fmt.Errorf("failed to upload %s: %w", strings.ToUpper(outputExt), err)
|
||||
}
|
||||
|
||||
ctx.Info(fmt.Sprintf("Successfully uploaded %s: %s", strings.ToUpper(outputExt), filepath.Base(outputVideo)))
|
||||
|
||||
log.Printf("Successfully generated and uploaded %s for job %d: %s", strings.ToUpper(outputExt), ctx.JobID, filepath.Base(outputVideo))
|
||||
return nil
|
||||
}
|
||||
|
||||
// detectAlphaChannel checks if an EXR file has an alpha channel using ffprobe
|
||||
func detectAlphaChannel(ctx *Context, filePath string) bool {
|
||||
// Use ffprobe to check pixel format and stream properties
|
||||
// EXR files with alpha will have formats like gbrapf32le (RGBA) vs gbrpf32le (RGB)
|
||||
cmd := exec.Command("ffprobe",
|
||||
"-v", "error",
|
||||
"-select_streams", "v:0",
|
||||
"-show_entries", "stream=pix_fmt:stream=codec_name",
|
||||
"-of", "default=noprint_wrappers=1",
|
||||
filePath,
|
||||
)
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// If ffprobe fails, assume no alpha (conservative approach)
|
||||
ctx.Warn(fmt.Sprintf("Failed to detect alpha channel in %s: %v", filepath.Base(filePath), err))
|
||||
return false
|
||||
}
|
||||
|
||||
outputStr := string(output)
|
||||
// Check pixel format - EXR with alpha typically has 'a' in the format name (e.g., gbrapf32le)
|
||||
// Also check for formats that explicitly indicate alpha
|
||||
hasAlpha := strings.Contains(outputStr, "pix_fmt=gbrap") ||
|
||||
strings.Contains(outputStr, "pix_fmt=rgba") ||
|
||||
strings.Contains(outputStr, "pix_fmt=yuva") ||
|
||||
strings.Contains(outputStr, "pix_fmt=abgr")
|
||||
|
||||
if hasAlpha {
|
||||
ctx.Info(fmt.Sprintf("Detected alpha channel in EXR file: %s", filepath.Base(filePath)))
|
||||
}
|
||||
|
||||
return hasAlpha
|
||||
}
|
||||
|
||||
// detectHDR checks if an EXR file contains HDR content using ffprobe
|
||||
func detectHDR(ctx *Context, filePath string) bool {
|
||||
// First, check if the pixel format supports HDR (32-bit float)
|
||||
cmd := exec.Command("ffprobe",
|
||||
"-v", "error",
|
||||
"-select_streams", "v:0",
|
||||
"-show_entries", "stream=pix_fmt",
|
||||
"-of", "default=noprint_wrappers=1:nokey=1",
|
||||
filePath,
|
||||
)
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// If ffprobe fails, assume no HDR (conservative approach)
|
||||
ctx.Warn(fmt.Sprintf("Failed to detect HDR in %s: %v", filepath.Base(filePath), err))
|
||||
return false
|
||||
}
|
||||
|
||||
pixFmt := strings.TrimSpace(string(output))
|
||||
// EXR files with 32-bit float format (gbrpf32le, gbrapf32le) can contain HDR
|
||||
// Check if it's a 32-bit float format
|
||||
isFloat32 := strings.Contains(pixFmt, "f32") || strings.Contains(pixFmt, "f32le")
|
||||
|
||||
if !isFloat32 {
|
||||
// Not a float format, definitely not HDR
|
||||
return false
|
||||
}
|
||||
|
||||
// For 32-bit float EXR, sample pixels to check if values exceed SDR range (> 1.0)
|
||||
// Use ffmpeg to extract pixel statistics - check max pixel values
|
||||
// This is more efficient than sampling individual pixels
|
||||
cmd = exec.Command("ffmpeg",
|
||||
"-v", "error",
|
||||
"-i", filePath,
|
||||
"-vf", "signalstats",
|
||||
"-f", "null",
|
||||
"-",
|
||||
)
|
||||
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
// If stats extraction fails, try sampling a few pixels directly
|
||||
return detectHDRBySampling(ctx, filePath)
|
||||
}
|
||||
|
||||
// Check output for max pixel values
|
||||
outputStr := string(output)
|
||||
// Look for max values in the signalstats output
|
||||
// If we find values > 1.0, it's HDR
|
||||
if strings.Contains(outputStr, "MAX") {
|
||||
// Try to extract max values from signalstats output
|
||||
// Format is typically like: YMAX:1.234 UMAX:0.567 VMAX:0.890
|
||||
// For EXR (RGB), we need to check R, G, B channels
|
||||
// Since signalstats works on YUV, we'll use a different approach
|
||||
return detectHDRBySampling(ctx, filePath)
|
||||
}
|
||||
|
||||
// Fallback to pixel sampling
|
||||
return detectHDRBySampling(ctx, filePath)
|
||||
}
|
||||
|
||||
// detectHDRBySampling samples pixels from multiple regions to detect HDR content
|
||||
func detectHDRBySampling(ctx *Context, filePath string) bool {
|
||||
// Sample multiple 10x10 regions from different parts of the image
|
||||
// This gives us better coverage than a single sample
|
||||
sampleRegions := []string{
|
||||
"crop=10:10:iw/4:ih/4", // Top-left quadrant
|
||||
"crop=10:10:iw*3/4:ih/4", // Top-right quadrant
|
||||
"crop=10:10:iw/4:ih*3/4", // Bottom-left quadrant
|
||||
"crop=10:10:iw*3/4:ih*3/4", // Bottom-right quadrant
|
||||
"crop=10:10:iw/2:ih/2", // Center
|
||||
}
|
||||
|
||||
for _, region := range sampleRegions {
|
||||
cmd := exec.Command("ffmpeg",
|
||||
"-v", "error",
|
||||
"-i", filePath,
|
||||
"-vf", fmt.Sprintf("%s,scale=1:1", region),
|
||||
"-f", "rawvideo",
|
||||
"-pix_fmt", "gbrpf32le",
|
||||
"-",
|
||||
)
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
continue // Skip this region if sampling fails
|
||||
}
|
||||
|
||||
// Parse the float32 values (4 bytes per float, 3 channels RGB)
|
||||
if len(output) >= 12 { // At least 3 floats (RGB) = 12 bytes
|
||||
for i := 0; i < len(output)-11; i += 12 {
|
||||
// Read RGB values (little-endian float32)
|
||||
r := float32FromBytes(output[i : i+4])
|
||||
g := float32FromBytes(output[i+4 : i+8])
|
||||
b := float32FromBytes(output[i+8 : i+12])
|
||||
|
||||
// Check if any channel exceeds 1.0 (SDR range)
|
||||
if r > 1.0 || g > 1.0 || b > 1.0 {
|
||||
maxVal := max(r, max(g, b))
|
||||
ctx.Info(fmt.Sprintf("Detected HDR content in EXR file: %s (max value: %.2f)", filepath.Base(filePath), maxVal))
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we sampled multiple regions and none exceed 1.0, it's likely SDR content
|
||||
// But since it's 32-bit float format, user can still manually enable HDR if needed
|
||||
return false
|
||||
}
|
||||
|
||||
// float32FromBytes converts 4 bytes (little-endian) to float32
|
||||
func float32FromBytes(bytes []byte) float32 {
|
||||
if len(bytes) < 4 {
|
||||
return 0
|
||||
}
|
||||
bits := uint32(bytes[0]) | uint32(bytes[1])<<8 | uint32(bytes[2])<<16 | uint32(bytes[3])<<24
|
||||
return math.Float32frombits(bits)
|
||||
}
|
||||
|
||||
// max returns the maximum of two float32 values
|
||||
func max(a, b float32) float32 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func extractFrameNumber(filename string) int {
|
||||
parts := strings.Split(filepath.Base(filename), "_")
|
||||
if len(parts) < 2 {
|
||||
return 0
|
||||
}
|
||||
framePart := strings.Split(parts[1], ".")[0]
|
||||
var frameNum int
|
||||
fmt.Sscanf(framePart, "%d", &frameNum)
|
||||
return frameNum
|
||||
}
|
||||
|
||||
func checkFFmpegSizeError(output string) error {
|
||||
outputLower := strings.ToLower(output)
|
||||
|
||||
if strings.Contains(outputLower, "hardware does not support encoding at size") {
|
||||
constraintsMatch := regexp.MustCompile(`constraints:\s*width\s+(\d+)-(\d+)\s+height\s+(\d+)-(\d+)`).FindStringSubmatch(output)
|
||||
if len(constraintsMatch) == 5 {
|
||||
return fmt.Errorf("video frame size is outside hardware encoder limits. Hardware requires: width %s-%s, height %s-%s",
|
||||
constraintsMatch[1], constraintsMatch[2], constraintsMatch[3], constraintsMatch[4])
|
||||
}
|
||||
return fmt.Errorf("video frame size is outside hardware encoder limits")
|
||||
}
|
||||
|
||||
if strings.Contains(outputLower, "picture size") && strings.Contains(outputLower, "is invalid") {
|
||||
sizeMatch := regexp.MustCompile(`picture size\s+(\d+)x(\d+)`).FindStringSubmatch(output)
|
||||
if len(sizeMatch) == 3 {
|
||||
return fmt.Errorf("invalid video frame size: %sx%s", sizeMatch[1], sizeMatch[2])
|
||||
}
|
||||
return fmt.Errorf("invalid video frame size")
|
||||
}
|
||||
|
||||
if strings.Contains(outputLower, "error while opening encoder") &&
|
||||
(strings.Contains(outputLower, "width") || strings.Contains(outputLower, "height") || strings.Contains(outputLower, "size")) {
|
||||
sizeMatch := regexp.MustCompile(`at size\s+(\d+)x(\d+)`).FindStringSubmatch(output)
|
||||
if len(sizeMatch) == 3 {
|
||||
return fmt.Errorf("hardware encoder cannot encode frame size %sx%s", sizeMatch[1], sizeMatch[2])
|
||||
}
|
||||
return fmt.Errorf("hardware encoder error: frame size may be invalid")
|
||||
}
|
||||
|
||||
if strings.Contains(outputLower, "invalid") &&
|
||||
(strings.Contains(outputLower, "width") || strings.Contains(outputLower, "height") || strings.Contains(outputLower, "dimension")) {
|
||||
return fmt.Errorf("invalid frame dimensions detected")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
156
internal/runner/tasks/processor.go
Normal file
156
internal/runner/tasks/processor.go
Normal file
@@ -0,0 +1,156 @@
|
||||
// Package tasks provides task processing implementations.
|
||||
package tasks
|
||||
|
||||
import (
|
||||
"jiggablend/internal/runner/api"
|
||||
"jiggablend/internal/runner/blender"
|
||||
"jiggablend/internal/runner/encoding"
|
||||
"jiggablend/internal/runner/workspace"
|
||||
"jiggablend/pkg/executils"
|
||||
"jiggablend/pkg/types"
|
||||
)
|
||||
|
||||
// Processor handles a specific task type.
|
||||
type Processor interface {
|
||||
Process(ctx *Context) error
|
||||
}
|
||||
|
||||
// Context provides task execution context.
|
||||
type Context struct {
|
||||
TaskID int64
|
||||
JobID int64
|
||||
JobName string
|
||||
Frame int
|
||||
TaskType string
|
||||
WorkDir string
|
||||
JobToken string
|
||||
Metadata *types.BlendMetadata
|
||||
|
||||
Manager *api.ManagerClient
|
||||
JobConn *api.JobConnection
|
||||
Workspace *workspace.Manager
|
||||
Blender *blender.Manager
|
||||
Encoder *encoding.Selector
|
||||
Processes *executils.ProcessTracker
|
||||
}
|
||||
|
||||
// NewContext creates a new task context.
|
||||
func NewContext(
|
||||
taskID, jobID int64,
|
||||
jobName string,
|
||||
frame int,
|
||||
taskType string,
|
||||
workDir string,
|
||||
jobToken string,
|
||||
metadata *types.BlendMetadata,
|
||||
manager *api.ManagerClient,
|
||||
jobConn *api.JobConnection,
|
||||
ws *workspace.Manager,
|
||||
blenderMgr *blender.Manager,
|
||||
encoder *encoding.Selector,
|
||||
processes *executils.ProcessTracker,
|
||||
) *Context {
|
||||
return &Context{
|
||||
TaskID: taskID,
|
||||
JobID: jobID,
|
||||
JobName: jobName,
|
||||
Frame: frame,
|
||||
TaskType: taskType,
|
||||
WorkDir: workDir,
|
||||
JobToken: jobToken,
|
||||
Metadata: metadata,
|
||||
Manager: manager,
|
||||
JobConn: jobConn,
|
||||
Workspace: ws,
|
||||
Blender: blenderMgr,
|
||||
Encoder: encoder,
|
||||
Processes: processes,
|
||||
}
|
||||
}
|
||||
|
||||
// Log sends a log entry to the manager.
|
||||
func (c *Context) Log(level types.LogLevel, message string) {
|
||||
if c.JobConn != nil {
|
||||
c.JobConn.Log(c.TaskID, level, message)
|
||||
}
|
||||
}
|
||||
|
||||
// Info logs an info message.
|
||||
func (c *Context) Info(message string) {
|
||||
c.Log(types.LogLevelInfo, message)
|
||||
}
|
||||
|
||||
// Warn logs a warning message.
|
||||
func (c *Context) Warn(message string) {
|
||||
c.Log(types.LogLevelWarn, message)
|
||||
}
|
||||
|
||||
// Error logs an error message.
|
||||
func (c *Context) Error(message string) {
|
||||
c.Log(types.LogLevelError, message)
|
||||
}
|
||||
|
||||
// Progress sends a progress update.
|
||||
func (c *Context) Progress(progress float64) {
|
||||
if c.JobConn != nil {
|
||||
c.JobConn.Progress(c.TaskID, progress)
|
||||
}
|
||||
}
|
||||
|
||||
// OutputUploaded notifies that an output file was uploaded.
|
||||
func (c *Context) OutputUploaded(fileName string) {
|
||||
if c.JobConn != nil {
|
||||
c.JobConn.OutputUploaded(c.TaskID, fileName)
|
||||
}
|
||||
}
|
||||
|
||||
// Complete sends task completion.
|
||||
func (c *Context) Complete(success bool, errorMsg error) {
|
||||
if c.JobConn != nil {
|
||||
c.JobConn.Complete(c.TaskID, success, errorMsg)
|
||||
}
|
||||
}
|
||||
|
||||
// GetOutputFormat returns the output format from metadata or default.
|
||||
func (c *Context) GetOutputFormat() string {
|
||||
if c.Metadata != nil && c.Metadata.RenderSettings.OutputFormat != "" {
|
||||
return c.Metadata.RenderSettings.OutputFormat
|
||||
}
|
||||
return "PNG"
|
||||
}
|
||||
|
||||
// GetFrameRate returns the frame rate from metadata or default.
|
||||
func (c *Context) GetFrameRate() float64 {
|
||||
if c.Metadata != nil && c.Metadata.RenderSettings.FrameRate > 0 {
|
||||
return c.Metadata.RenderSettings.FrameRate
|
||||
}
|
||||
return 24.0
|
||||
}
|
||||
|
||||
// GetBlenderVersion returns the Blender version from metadata.
|
||||
func (c *Context) GetBlenderVersion() string {
|
||||
if c.Metadata != nil {
|
||||
return c.Metadata.BlenderVersion
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ShouldUnhideObjects returns whether to unhide objects.
|
||||
func (c *Context) ShouldUnhideObjects() bool {
|
||||
return c.Metadata != nil && c.Metadata.UnhideObjects != nil && *c.Metadata.UnhideObjects
|
||||
}
|
||||
|
||||
// ShouldEnableExecution returns whether to enable auto-execution.
|
||||
func (c *Context) ShouldEnableExecution() bool {
|
||||
return c.Metadata != nil && c.Metadata.EnableExecution != nil && *c.Metadata.EnableExecution
|
||||
}
|
||||
|
||||
// ShouldPreserveHDR returns whether to preserve HDR range for EXR encoding.
|
||||
func (c *Context) ShouldPreserveHDR() bool {
|
||||
return c.Metadata != nil && c.Metadata.PreserveHDR != nil && *c.Metadata.PreserveHDR
|
||||
}
|
||||
|
||||
// ShouldPreserveAlpha returns whether to preserve alpha channel for EXR encoding.
|
||||
func (c *Context) ShouldPreserveAlpha() bool {
|
||||
return c.Metadata != nil && c.Metadata.PreserveAlpha != nil && *c.Metadata.PreserveAlpha
|
||||
}
|
||||
301
internal/runner/tasks/render.go
Normal file
301
internal/runner/tasks/render.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package tasks
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"jiggablend/internal/runner/blender"
|
||||
"jiggablend/internal/runner/workspace"
|
||||
"jiggablend/pkg/scripts"
|
||||
"jiggablend/pkg/types"
|
||||
)
|
||||
|
||||
// RenderProcessor handles render tasks.
|
||||
type RenderProcessor struct{}
|
||||
|
||||
// NewRenderProcessor creates a new render processor.
|
||||
func NewRenderProcessor() *RenderProcessor {
|
||||
return &RenderProcessor{}
|
||||
}
|
||||
|
||||
// Process executes a render task.
|
||||
func (p *RenderProcessor) Process(ctx *Context) error {
|
||||
ctx.Info(fmt.Sprintf("Starting task: job %d, frame %d, format: %s",
|
||||
ctx.JobID, ctx.Frame, ctx.GetOutputFormat()))
|
||||
log.Printf("Processing task %d: job %d, frame %d", ctx.TaskID, ctx.JobID, ctx.Frame)
|
||||
|
||||
// Find .blend file
|
||||
blendFile, err := workspace.FindFirstBlendFile(ctx.WorkDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find blend file: %w", err)
|
||||
}
|
||||
|
||||
// Get Blender binary
|
||||
blenderBinary := "blender"
|
||||
if version := ctx.GetBlenderVersion(); version != "" {
|
||||
ctx.Info(fmt.Sprintf("Job requires Blender %s", version))
|
||||
binaryPath, err := ctx.Blender.GetBinaryPath(version)
|
||||
if err != nil {
|
||||
ctx.Warn(fmt.Sprintf("Could not get Blender %s, using system blender: %v", version, err))
|
||||
} else {
|
||||
blenderBinary = binaryPath
|
||||
ctx.Info(fmt.Sprintf("Using Blender binary: %s", blenderBinary))
|
||||
}
|
||||
} else {
|
||||
ctx.Info("No Blender version specified, using system blender")
|
||||
}
|
||||
|
||||
// Create output directory
|
||||
outputDir := filepath.Join(ctx.WorkDir, "output")
|
||||
if err := os.MkdirAll(outputDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
// Create home directory for Blender inside workspace
|
||||
blenderHome := filepath.Join(ctx.WorkDir, "home")
|
||||
if err := os.MkdirAll(blenderHome, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create Blender home directory: %w", err)
|
||||
}
|
||||
|
||||
// Determine render format
|
||||
outputFormat := ctx.GetOutputFormat()
|
||||
renderFormat := outputFormat
|
||||
if outputFormat == "EXR_264_MP4" || outputFormat == "EXR_AV1_MP4" || outputFormat == "EXR_VP9_WEBM" {
|
||||
renderFormat = "EXR" // Use EXR for maximum quality
|
||||
}
|
||||
|
||||
// Create render script
|
||||
if err := p.createRenderScript(ctx, renderFormat); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Render
|
||||
ctx.Info(fmt.Sprintf("Starting Blender render for frame %d...", ctx.Frame))
|
||||
if err := p.runBlender(ctx, blenderBinary, blendFile, outputDir, renderFormat, blenderHome); err != nil {
|
||||
ctx.Error(fmt.Sprintf("Blender render failed: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify output
|
||||
if _, err := p.findOutputFile(ctx, outputDir, renderFormat); err != nil {
|
||||
ctx.Error(fmt.Sprintf("Output verification failed: %v", err))
|
||||
return err
|
||||
}
|
||||
ctx.Info(fmt.Sprintf("Blender render completed for frame %d", ctx.Frame))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *RenderProcessor) createRenderScript(ctx *Context, renderFormat string) error {
|
||||
formatFilePath := filepath.Join(ctx.WorkDir, "output_format.txt")
|
||||
renderSettingsFilePath := filepath.Join(ctx.WorkDir, "render_settings.json")
|
||||
|
||||
// Build unhide code conditionally
|
||||
unhideCode := ""
|
||||
if ctx.ShouldUnhideObjects() {
|
||||
unhideCode = scripts.UnhideObjects
|
||||
}
|
||||
|
||||
// Load template and replace placeholders
|
||||
scriptContent := scripts.RenderBlenderTemplate
|
||||
scriptContent = strings.ReplaceAll(scriptContent, "{{UNHIDE_CODE}}", unhideCode)
|
||||
scriptContent = strings.ReplaceAll(scriptContent, "{{FORMAT_FILE_PATH}}", fmt.Sprintf("%q", formatFilePath))
|
||||
scriptContent = strings.ReplaceAll(scriptContent, "{{RENDER_SETTINGS_FILE}}", fmt.Sprintf("%q", renderSettingsFilePath))
|
||||
|
||||
scriptPath := filepath.Join(ctx.WorkDir, "enable_gpu.py")
|
||||
if err := os.WriteFile(scriptPath, []byte(scriptContent), 0644); err != nil {
|
||||
errMsg := fmt.Sprintf("failed to create GPU enable script: %v", err)
|
||||
ctx.Error(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Write output format
|
||||
outputFormat := ctx.GetOutputFormat()
|
||||
ctx.Info(fmt.Sprintf("Writing output format '%s' to format file", outputFormat))
|
||||
if err := os.WriteFile(formatFilePath, []byte(outputFormat), 0644); err != nil {
|
||||
errMsg := fmt.Sprintf("failed to create format file: %v", err)
|
||||
ctx.Error(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
|
||||
// Write render settings if available
|
||||
if ctx.Metadata != nil && ctx.Metadata.RenderSettings.EngineSettings != nil {
|
||||
settingsJSON, err := json.Marshal(ctx.Metadata.RenderSettings)
|
||||
if err == nil {
|
||||
if err := os.WriteFile(renderSettingsFilePath, settingsJSON, 0644); err != nil {
|
||||
ctx.Warn(fmt.Sprintf("Failed to write render settings file: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *RenderProcessor) runBlender(ctx *Context, blenderBinary, blendFile, outputDir, renderFormat, blenderHome string) error {
|
||||
scriptPath := filepath.Join(ctx.WorkDir, "enable_gpu.py")
|
||||
|
||||
args := []string{"-b", blendFile, "--python", scriptPath}
|
||||
if ctx.ShouldEnableExecution() {
|
||||
args = append(args, "--enable-autoexec")
|
||||
}
|
||||
|
||||
// Output pattern
|
||||
outputPattern := filepath.Join(outputDir, fmt.Sprintf("frame_####.%s", strings.ToLower(renderFormat)))
|
||||
outputAbsPattern, _ := filepath.Abs(outputPattern)
|
||||
args = append(args, "-o", outputAbsPattern)
|
||||
|
||||
args = append(args, "-f", fmt.Sprintf("%d", ctx.Frame))
|
||||
|
||||
// Wrap with xvfb-run
|
||||
xvfbArgs := []string{"-a", "-s", "-screen 0 800x600x24", blenderBinary}
|
||||
xvfbArgs = append(xvfbArgs, args...)
|
||||
cmd := exec.Command("xvfb-run", xvfbArgs...)
|
||||
cmd.Dir = ctx.WorkDir
|
||||
|
||||
// Set up environment with custom HOME directory
|
||||
env := os.Environ()
|
||||
// Remove existing HOME if present and add our custom one
|
||||
newEnv := make([]string, 0, len(env)+1)
|
||||
for _, e := range env {
|
||||
if !strings.HasPrefix(e, "HOME=") {
|
||||
newEnv = append(newEnv, e)
|
||||
}
|
||||
}
|
||||
newEnv = append(newEnv, fmt.Sprintf("HOME=%s", blenderHome))
|
||||
cmd.Env = newEnv
|
||||
|
||||
// Set up pipes
|
||||
stdoutPipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stdout pipe: %w", err)
|
||||
}
|
||||
|
||||
stderrPipe, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create stderr pipe: %w", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start blender: %w", err)
|
||||
}
|
||||
|
||||
// Track process
|
||||
ctx.Processes.Track(ctx.TaskID, cmd)
|
||||
defer ctx.Processes.Untrack(ctx.TaskID)
|
||||
|
||||
// Stream stdout
|
||||
stdoutDone := make(chan bool)
|
||||
go func() {
|
||||
defer close(stdoutDone)
|
||||
scanner := bufio.NewScanner(stdoutPipe)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line != "" {
|
||||
shouldFilter, logLevel := blender.FilterLog(line)
|
||||
if !shouldFilter {
|
||||
ctx.Log(logLevel, line)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Stream stderr
|
||||
stderrDone := make(chan bool)
|
||||
go func() {
|
||||
defer close(stderrDone)
|
||||
scanner := bufio.NewScanner(stderrPipe)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line != "" {
|
||||
shouldFilter, logLevel := blender.FilterLog(line)
|
||||
if !shouldFilter {
|
||||
if logLevel == types.LogLevelInfo {
|
||||
logLevel = types.LogLevelWarn
|
||||
}
|
||||
ctx.Log(logLevel, line)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for completion
|
||||
err = cmd.Wait()
|
||||
<-stdoutDone
|
||||
<-stderrDone
|
||||
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
if exitErr.ExitCode() == 137 {
|
||||
return errors.New("Blender was killed due to excessive memory usage (OOM)")
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("blender failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *RenderProcessor) findOutputFile(ctx *Context, outputDir, renderFormat string) (string, error) {
|
||||
entries, err := os.ReadDir(outputDir)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read output directory: %w", err)
|
||||
}
|
||||
|
||||
ctx.Info("Checking output directory for files...")
|
||||
|
||||
// Try exact match first
|
||||
expectedFile := filepath.Join(outputDir, fmt.Sprintf("frame_%04d.%s", ctx.Frame, strings.ToLower(renderFormat)))
|
||||
if _, err := os.Stat(expectedFile); err == nil {
|
||||
ctx.Info(fmt.Sprintf("Found output file: %s", filepath.Base(expectedFile)))
|
||||
return expectedFile, nil
|
||||
}
|
||||
|
||||
// Try without zero padding
|
||||
altFile := filepath.Join(outputDir, fmt.Sprintf("frame_%d.%s", ctx.Frame, strings.ToLower(renderFormat)))
|
||||
if _, err := os.Stat(altFile); err == nil {
|
||||
ctx.Info(fmt.Sprintf("Found output file: %s", filepath.Base(altFile)))
|
||||
return altFile, nil
|
||||
}
|
||||
|
||||
// Try just frame number
|
||||
altFile2 := filepath.Join(outputDir, fmt.Sprintf("%04d.%s", ctx.Frame, strings.ToLower(renderFormat)))
|
||||
if _, err := os.Stat(altFile2); err == nil {
|
||||
ctx.Info(fmt.Sprintf("Found output file: %s", filepath.Base(altFile2)))
|
||||
return altFile2, nil
|
||||
}
|
||||
|
||||
// Search through all files
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
fileName := entry.Name()
|
||||
if strings.Contains(fileName, "%04d") || strings.Contains(fileName, "%d") {
|
||||
ctx.Warn(fmt.Sprintf("Skipping file with literal pattern: %s", fileName))
|
||||
continue
|
||||
}
|
||||
frameStr := fmt.Sprintf("%d", ctx.Frame)
|
||||
frameStrPadded := fmt.Sprintf("%04d", ctx.Frame)
|
||||
if strings.Contains(fileName, frameStrPadded) ||
|
||||
(strings.Contains(fileName, frameStr) && strings.HasSuffix(strings.ToLower(fileName), strings.ToLower(renderFormat))) {
|
||||
outputFile := filepath.Join(outputDir, fileName)
|
||||
ctx.Info(fmt.Sprintf("Found output file: %s", fileName))
|
||||
return outputFile, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Not found
|
||||
fileList := []string{}
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
fileList = append(fileList, entry.Name())
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("output file not found: %s\nFiles in output directory: %v", expectedFile, fileList)
|
||||
}
|
||||
146
internal/runner/workspace/archive.go
Normal file
146
internal/runner/workspace/archive.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package workspace
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ExtractTar extracts a tar archive from a reader to a directory.
|
||||
func ExtractTar(reader io.Reader, destDir string) error {
|
||||
if err := os.MkdirAll(destDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create destination directory: %w", err)
|
||||
}
|
||||
|
||||
tarReader := tar.NewReader(reader)
|
||||
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read tar header: %w", err)
|
||||
}
|
||||
|
||||
// Sanitize path to prevent directory traversal
|
||||
targetPath := filepath.Join(destDir, header.Name)
|
||||
if !strings.HasPrefix(filepath.Clean(targetPath), filepath.Clean(destDir)+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("invalid file path in tar: %s", header.Name)
|
||||
}
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil {
|
||||
return fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
|
||||
case tar.TypeReg:
|
||||
if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil {
|
||||
return fmt.Errorf("failed to create parent directory: %w", err)
|
||||
}
|
||||
|
||||
outFile, err := os.Create(targetPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create file: %w", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(outFile, tarReader); err != nil {
|
||||
outFile.Close()
|
||||
return fmt.Errorf("failed to write file: %w", err)
|
||||
}
|
||||
outFile.Close()
|
||||
|
||||
if err := os.Chmod(targetPath, os.FileMode(header.Mode)); err != nil {
|
||||
log.Printf("Warning: failed to set file permissions: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtractTarStripPrefix extracts a tar archive, stripping the top-level directory.
|
||||
// Useful for Blender archives like "blender-4.2.3-linux-x64/".
|
||||
func ExtractTarStripPrefix(reader io.Reader, destDir string) error {
|
||||
if err := os.MkdirAll(destDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tarReader := tar.NewReader(reader)
|
||||
stripPrefix := ""
|
||||
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Determine strip prefix from first entry (e.g., "blender-4.2.3-linux-x64/")
|
||||
if stripPrefix == "" {
|
||||
parts := strings.SplitN(header.Name, "/", 2)
|
||||
if len(parts) > 0 {
|
||||
stripPrefix = parts[0] + "/"
|
||||
}
|
||||
}
|
||||
|
||||
// Strip the top-level directory
|
||||
name := strings.TrimPrefix(header.Name, stripPrefix)
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
targetPath := filepath.Join(destDir, name)
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeReg:
|
||||
if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
outFile, err := os.OpenFile(targetPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(header.Mode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(outFile, tarReader); err != nil {
|
||||
outFile.Close()
|
||||
return err
|
||||
}
|
||||
outFile.Close()
|
||||
|
||||
case tar.TypeSymlink:
|
||||
if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
os.Remove(targetPath) // Remove existing symlink if present
|
||||
if err := os.Symlink(header.Linkname, targetPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtractTarFile extracts a tar file to a directory.
|
||||
func ExtractTarFile(tarPath, destDir string) error {
|
||||
file, err := os.Open(tarPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open tar file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return ExtractTar(file, destDir)
|
||||
}
|
||||
|
||||
217
internal/runner/workspace/workspace.go
Normal file
217
internal/runner/workspace/workspace.go
Normal file
@@ -0,0 +1,217 @@
|
||||
// Package workspace manages runner workspace directories.
|
||||
package workspace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Manager handles workspace directory operations.
|
||||
type Manager struct {
|
||||
baseDir string
|
||||
runnerName string
|
||||
}
|
||||
|
||||
// NewManager creates a new workspace manager.
|
||||
func NewManager(runnerName string) *Manager {
|
||||
m := &Manager{
|
||||
runnerName: sanitizeName(runnerName),
|
||||
}
|
||||
m.init()
|
||||
return m
|
||||
}
|
||||
|
||||
func sanitizeName(name string) string {
|
||||
name = strings.ReplaceAll(name, " ", "_")
|
||||
name = strings.ReplaceAll(name, "/", "_")
|
||||
name = strings.ReplaceAll(name, "\\", "_")
|
||||
name = strings.ReplaceAll(name, ":", "_")
|
||||
return name
|
||||
}
|
||||
|
||||
func (m *Manager) init() {
|
||||
// Prefer current directory if writable, otherwise use temp
|
||||
baseDir := os.TempDir()
|
||||
if cwd, err := os.Getwd(); err == nil {
|
||||
baseDir = cwd
|
||||
}
|
||||
|
||||
m.baseDir = filepath.Join(baseDir, "jiggablend-workspaces", m.runnerName)
|
||||
if err := os.MkdirAll(m.baseDir, 0755); err != nil {
|
||||
log.Printf("Warning: Failed to create workspace directory %s: %v", m.baseDir, err)
|
||||
// Fallback to temp directory
|
||||
m.baseDir = filepath.Join(os.TempDir(), "jiggablend-workspaces", m.runnerName)
|
||||
if err := os.MkdirAll(m.baseDir, 0755); err != nil {
|
||||
log.Printf("Error: Failed to create fallback workspace directory: %v", err)
|
||||
// Last resort
|
||||
m.baseDir = filepath.Join(os.TempDir(), "jiggablend-runner")
|
||||
os.MkdirAll(m.baseDir, 0755)
|
||||
}
|
||||
}
|
||||
log.Printf("Runner workspace initialized at: %s", m.baseDir)
|
||||
}
|
||||
|
||||
// BaseDir returns the base workspace directory.
|
||||
func (m *Manager) BaseDir() string {
|
||||
return m.baseDir
|
||||
}
|
||||
|
||||
// JobDir returns the directory for a specific job.
|
||||
func (m *Manager) JobDir(jobID int64) string {
|
||||
return filepath.Join(m.baseDir, fmt.Sprintf("job-%d", jobID))
|
||||
}
|
||||
|
||||
// VideoDir returns the directory for encoding.
|
||||
func (m *Manager) VideoDir(jobID int64) string {
|
||||
return filepath.Join(m.baseDir, fmt.Sprintf("job-%d-video", jobID))
|
||||
}
|
||||
|
||||
// BlenderDir returns the directory for Blender installations.
|
||||
func (m *Manager) BlenderDir() string {
|
||||
return filepath.Join(m.baseDir, "blender-versions")
|
||||
}
|
||||
|
||||
// CreateJobDir creates and returns the job directory.
|
||||
func (m *Manager) CreateJobDir(jobID int64) (string, error) {
|
||||
dir := m.JobDir(jobID)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create job directory: %w", err)
|
||||
}
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
// CreateVideoDir creates and returns the encode directory.
|
||||
func (m *Manager) CreateVideoDir(jobID int64) (string, error) {
|
||||
dir := m.VideoDir(jobID)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create video directory: %w", err)
|
||||
}
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
// CleanupJobDir removes a job directory.
|
||||
func (m *Manager) CleanupJobDir(jobID int64) error {
|
||||
dir := m.JobDir(jobID)
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
// CleanupVideoDir removes an encode directory.
|
||||
func (m *Manager) CleanupVideoDir(jobID int64) error {
|
||||
dir := m.VideoDir(jobID)
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
// Cleanup removes the entire workspace directory.
|
||||
func (m *Manager) Cleanup() {
|
||||
if m.baseDir != "" {
|
||||
log.Printf("Cleaning up workspace directory: %s", m.baseDir)
|
||||
if err := os.RemoveAll(m.baseDir); err != nil {
|
||||
log.Printf("Warning: Failed to remove workspace directory %s: %v", m.baseDir, err)
|
||||
} else {
|
||||
log.Printf("Successfully removed workspace directory: %s", m.baseDir)
|
||||
}
|
||||
}
|
||||
|
||||
// Also clean up any orphaned jiggablend directories
|
||||
cleanupOrphanedWorkspaces()
|
||||
}
|
||||
|
||||
// cleanupOrphanedWorkspaces removes any jiggablend workspace directories
|
||||
// that might be left behind from previous runs or crashes.
|
||||
func cleanupOrphanedWorkspaces() {
|
||||
log.Printf("Cleaning up orphaned jiggablend workspace directories...")
|
||||
|
||||
dirsToCheck := []string{".", os.TempDir()}
|
||||
for _, baseDir := range dirsToCheck {
|
||||
workspaceDir := filepath.Join(baseDir, "jiggablend-workspaces")
|
||||
if _, err := os.Stat(workspaceDir); err == nil {
|
||||
log.Printf("Removing orphaned workspace directory: %s", workspaceDir)
|
||||
if err := os.RemoveAll(workspaceDir); err != nil {
|
||||
log.Printf("Warning: Failed to remove workspace directory %s: %v", workspaceDir, err)
|
||||
} else {
|
||||
log.Printf("Successfully removed workspace directory: %s", workspaceDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FindBlendFiles finds all .blend files in a directory.
|
||||
func FindBlendFiles(dir string) ([]string, error) {
|
||||
var blendFiles []string
|
||||
|
||||
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && strings.HasSuffix(strings.ToLower(info.Name()), ".blend") {
|
||||
// Check it's not a Blender save file (.blend1, .blend2, etc.)
|
||||
lower := strings.ToLower(info.Name())
|
||||
idx := strings.LastIndex(lower, ".blend")
|
||||
if idx != -1 {
|
||||
suffix := lower[idx+len(".blend"):]
|
||||
isSaveFile := false
|
||||
if len(suffix) > 0 {
|
||||
isSaveFile = true
|
||||
for _, r := range suffix {
|
||||
if r < '0' || r > '9' {
|
||||
isSaveFile = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !isSaveFile {
|
||||
relPath, _ := filepath.Rel(dir, path)
|
||||
blendFiles = append(blendFiles, relPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return blendFiles, err
|
||||
}
|
||||
|
||||
// FindFirstBlendFile finds the first .blend file in a directory.
|
||||
func FindFirstBlendFile(dir string) (string, error) {
|
||||
var blendFile string
|
||||
|
||||
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && strings.HasSuffix(strings.ToLower(info.Name()), ".blend") {
|
||||
lower := strings.ToLower(info.Name())
|
||||
idx := strings.LastIndex(lower, ".blend")
|
||||
if idx != -1 {
|
||||
suffix := lower[idx+len(".blend"):]
|
||||
isSaveFile := false
|
||||
if len(suffix) > 0 {
|
||||
isSaveFile = true
|
||||
for _, r := range suffix {
|
||||
if r < '0' || r > '9' {
|
||||
isSaveFile = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !isSaveFile {
|
||||
blendFile = path
|
||||
return filepath.SkipAll
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if blendFile == "" {
|
||||
return "", fmt.Errorf("no .blend file found in %s", dir)
|
||||
}
|
||||
return blendFile, nil
|
||||
}
|
||||
|
||||
BIN
jiggablend
BIN
jiggablend
Binary file not shown.
@@ -339,12 +339,8 @@ object_count = len(scene.objects)
|
||||
material_count = len(bpy.data.materials)
|
||||
|
||||
# Extract Blender version info
|
||||
# bpy.app.version gives the current running Blender version
|
||||
# For the file's saved version, we check bpy.data.version (version the file was saved with)
|
||||
blender_version = {
|
||||
"current": bpy.app.version_string, # Version of Blender running this script
|
||||
"file_saved_with": ".".join(map(str, bpy.data.version)) if hasattr(bpy.data, 'version') else None, # Version file was saved with
|
||||
}
|
||||
# bpy.data.version gives the version the file was saved with
|
||||
blender_version = ".".join(map(str, bpy.data.version)) if hasattr(bpy.data, 'version') else bpy.app.version_string
|
||||
|
||||
# Build metadata dictionary
|
||||
metadata = {
|
||||
|
||||
@@ -12,6 +12,52 @@ try:
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not make paths relative: {e}")
|
||||
|
||||
# Auto-enable addons from blender_addons folder in context
|
||||
# Supports .zip files (installed via Blender API) and already-extracted addons
|
||||
blend_dir = os.path.dirname(bpy.data.filepath) if bpy.data.filepath else os.getcwd()
|
||||
addons_dir = os.path.join(blend_dir, "blender_addons")
|
||||
|
||||
if os.path.isdir(addons_dir):
|
||||
print(f"Found blender_addons folder: {addons_dir}")
|
||||
|
||||
for item in os.listdir(addons_dir):
|
||||
item_path = os.path.join(addons_dir, item)
|
||||
|
||||
try:
|
||||
if item.endswith('.zip'):
|
||||
# Install and enable zip addon using Blender's API
|
||||
bpy.ops.preferences.addon_install(filepath=item_path)
|
||||
# Get module name from zip (usually the folder name inside)
|
||||
import zipfile
|
||||
with zipfile.ZipFile(item_path, 'r') as zf:
|
||||
# Find the top-level module name
|
||||
names = zf.namelist()
|
||||
if names:
|
||||
module_name = names[0].split('/')[0]
|
||||
if module_name.endswith('.py'):
|
||||
module_name = module_name[:-3]
|
||||
bpy.ops.preferences.addon_enable(module=module_name)
|
||||
print(f" Installed and enabled addon: {module_name}")
|
||||
|
||||
elif item.endswith('.py') and not item.startswith('__'):
|
||||
# Single-file addon
|
||||
bpy.ops.preferences.addon_install(filepath=item_path)
|
||||
module_name = item[:-3]
|
||||
bpy.ops.preferences.addon_enable(module=module_name)
|
||||
print(f" Installed and enabled addon: {module_name}")
|
||||
|
||||
elif os.path.isdir(item_path) and os.path.exists(os.path.join(item_path, '__init__.py')):
|
||||
# Multi-file addon directory - add to path and enable
|
||||
if addons_dir not in sys.path:
|
||||
sys.path.insert(0, addons_dir)
|
||||
bpy.ops.preferences.addon_enable(module=item)
|
||||
print(f" Enabled addon: {item}")
|
||||
|
||||
except Exception as e:
|
||||
print(f" Error with addon {item}: {e}")
|
||||
else:
|
||||
print(f"No blender_addons folder found at: {addons_dir}")
|
||||
|
||||
{{UNHIDE_CODE}}
|
||||
# Read output format from file (created by Go code)
|
||||
format_file_path = {{FORMAT_FILE_PATH}}
|
||||
@@ -53,10 +99,10 @@ print(f"Blend file output format: {current_output_format}")
|
||||
if output_format_override:
|
||||
print(f"Overriding output format from '{current_output_format}' to '{output_format_override}'")
|
||||
# Map common format names to Blender's format constants
|
||||
# For video formats (EXR_264_MP4, EXR_AV1_MP4), we render as EXR frames first
|
||||
# For video formats, we render as appropriate frame format first
|
||||
format_to_use = output_format_override.upper()
|
||||
if format_to_use in ['EXR_264_MP4', 'EXR_AV1_MP4']:
|
||||
format_to_use = 'EXR' # Render as EXR for video formats
|
||||
if format_to_use in ['EXR_264_MP4', 'EXR_AV1_MP4', 'EXR_VP9_WEBM']:
|
||||
format_to_use = 'EXR' # Render as EXR for EXR video formats
|
||||
|
||||
format_map = {
|
||||
'PNG': 'PNG',
|
||||
|
||||
@@ -32,22 +32,20 @@ const (
|
||||
|
||||
// Job represents a render job
|
||||
type Job struct {
|
||||
ID int64 `json:"id"`
|
||||
UserID int64 `json:"user_id"`
|
||||
JobType JobType `json:"job_type"` // "render"
|
||||
Name string `json:"name"`
|
||||
Status JobStatus `json:"status"`
|
||||
Progress float64 `json:"progress"` // 0.0 to 100.0
|
||||
FrameStart *int `json:"frame_start,omitempty"` // Only for render jobs
|
||||
FrameEnd *int `json:"frame_end,omitempty"` // Only for render jobs
|
||||
OutputFormat *string `json:"output_format,omitempty"` // Only for render jobs - PNG, JPEG, EXR, etc.
|
||||
AllowParallelRunners *bool `json:"allow_parallel_runners,omitempty"` // Only for render jobs
|
||||
TimeoutSeconds int `json:"timeout_seconds"` // Job-level timeout (24 hours default)
|
||||
BlendMetadata *BlendMetadata `json:"blend_metadata,omitempty"` // Extracted metadata from blend file
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
StartedAt *time.Time `json:"started_at,omitempty"`
|
||||
CompletedAt *time.Time `json:"completed_at,omitempty"`
|
||||
ErrorMessage string `json:"error_message,omitempty"`
|
||||
ID int64 `json:"id"`
|
||||
UserID int64 `json:"user_id"`
|
||||
JobType JobType `json:"job_type"` // "render"
|
||||
Name string `json:"name"`
|
||||
Status JobStatus `json:"status"`
|
||||
Progress float64 `json:"progress"` // 0.0 to 100.0
|
||||
FrameStart *int `json:"frame_start,omitempty"` // Only for render jobs
|
||||
FrameEnd *int `json:"frame_end,omitempty"` // Only for render jobs
|
||||
OutputFormat *string `json:"output_format,omitempty"` // Only for render jobs - PNG, JPEG, EXR, etc.
|
||||
BlendMetadata *BlendMetadata `json:"blend_metadata,omitempty"` // Extracted metadata from blend file
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
StartedAt *time.Time `json:"started_at,omitempty"`
|
||||
CompletedAt *time.Time `json:"completed_at,omitempty"`
|
||||
ErrorMessage string `json:"error_message,omitempty"`
|
||||
}
|
||||
|
||||
// RunnerStatus represents the status of a runner
|
||||
@@ -86,9 +84,8 @@ const (
|
||||
type TaskType string
|
||||
|
||||
const (
|
||||
TaskTypeRender TaskType = "render"
|
||||
TaskTypeMetadata TaskType = "metadata"
|
||||
TaskTypeVideoGeneration TaskType = "video_generation"
|
||||
TaskTypeRender TaskType = "render"
|
||||
TaskTypeEncode TaskType = "encode"
|
||||
)
|
||||
|
||||
// Task represents a render task assigned to a runner
|
||||
@@ -96,8 +93,7 @@ type Task struct {
|
||||
ID int64 `json:"id"`
|
||||
JobID int64 `json:"job_id"`
|
||||
RunnerID *int64 `json:"runner_id,omitempty"`
|
||||
FrameStart int `json:"frame_start"`
|
||||
FrameEnd int `json:"frame_end"`
|
||||
Frame int `json:"frame"`
|
||||
TaskType TaskType `json:"task_type"`
|
||||
Status TaskStatus `json:"status"`
|
||||
CurrentStep string `json:"current_step,omitempty"`
|
||||
@@ -132,16 +128,18 @@ type JobFile struct {
|
||||
|
||||
// CreateJobRequest represents a request to create a new job
|
||||
type CreateJobRequest struct {
|
||||
JobType JobType `json:"job_type"` // "render"
|
||||
Name string `json:"name"`
|
||||
FrameStart *int `json:"frame_start,omitempty"` // Required for render jobs
|
||||
FrameEnd *int `json:"frame_end,omitempty"` // Required for render jobs
|
||||
OutputFormat *string `json:"output_format,omitempty"` // Required for render jobs
|
||||
AllowParallelRunners *bool `json:"allow_parallel_runners,omitempty"` // Optional for render jobs, defaults to true
|
||||
RenderSettings *RenderSettings `json:"render_settings,omitempty"` // Optional: Override blend file render settings
|
||||
UploadSessionID *string `json:"upload_session_id,omitempty"` // Optional: Session ID from file upload
|
||||
UnhideObjects *bool `json:"unhide_objects,omitempty"` // Optional: Enable unhide tweaks for objects/collections
|
||||
EnableExecution *bool `json:"enable_execution,omitempty"` // Optional: Enable auto-execution in Blender (adds --enable-autoexec flag, defaults to false)
|
||||
JobType JobType `json:"job_type"` // "render"
|
||||
Name string `json:"name"`
|
||||
FrameStart *int `json:"frame_start,omitempty"` // Required for render jobs
|
||||
FrameEnd *int `json:"frame_end,omitempty"` // Required for render jobs
|
||||
OutputFormat *string `json:"output_format,omitempty"` // Required for render jobs
|
||||
RenderSettings *RenderSettings `json:"render_settings,omitempty"` // Optional: Override blend file render settings
|
||||
UploadSessionID *string `json:"upload_session_id,omitempty"` // Optional: Session ID from file upload
|
||||
UnhideObjects *bool `json:"unhide_objects,omitempty"` // Optional: Enable unhide tweaks for objects/collections
|
||||
EnableExecution *bool `json:"enable_execution,omitempty"` // Optional: Enable auto-execution in Blender (adds --enable-autoexec flag, defaults to false)
|
||||
BlenderVersion *string `json:"blender_version,omitempty"` // Optional: Override Blender version (e.g., "4.2" or "4.2.3")
|
||||
PreserveHDR *bool `json:"preserve_hdr,omitempty"` // Optional: Preserve HDR range for EXR encoding (uses HLG with bt709 primaries)
|
||||
PreserveAlpha *bool `json:"preserve_alpha,omitempty"` // Optional: Preserve alpha channel for EXR encoding (requires AV1 or VP9 codec)
|
||||
}
|
||||
|
||||
// UpdateJobProgressRequest represents a request to update job progress
|
||||
@@ -227,23 +225,26 @@ type TaskLogEntry struct {
|
||||
|
||||
// BlendMetadata represents extracted metadata from a blend file
|
||||
type BlendMetadata struct {
|
||||
FrameStart int `json:"frame_start"`
|
||||
FrameEnd int `json:"frame_end"`
|
||||
HasNegativeFrames bool `json:"has_negative_frames"` // True if blend file has negative frame numbers (not supported)
|
||||
RenderSettings RenderSettings `json:"render_settings"`
|
||||
SceneInfo SceneInfo `json:"scene_info"`
|
||||
FrameStart int `json:"frame_start"`
|
||||
FrameEnd int `json:"frame_end"`
|
||||
HasNegativeFrames bool `json:"has_negative_frames"` // True if blend file has negative frame numbers (not supported)
|
||||
RenderSettings RenderSettings `json:"render_settings"`
|
||||
SceneInfo SceneInfo `json:"scene_info"`
|
||||
MissingFilesInfo *MissingFilesInfo `json:"missing_files_info,omitempty"`
|
||||
UnhideObjects *bool `json:"unhide_objects,omitempty"` // Enable unhide tweaks for objects/collections
|
||||
EnableExecution *bool `json:"enable_execution,omitempty"` // Enable auto-execution in Blender (adds --enable-autoexec flag, defaults to false)
|
||||
UnhideObjects *bool `json:"unhide_objects,omitempty"` // Enable unhide tweaks for objects/collections
|
||||
EnableExecution *bool `json:"enable_execution,omitempty"` // Enable auto-execution in Blender (adds --enable-autoexec flag, defaults to false)
|
||||
BlenderVersion string `json:"blender_version,omitempty"` // Detected or overridden Blender version (e.g., "4.2" or "4.2.3")
|
||||
PreserveHDR *bool `json:"preserve_hdr,omitempty"` // Preserve HDR range for EXR encoding (uses HLG with bt709 primaries)
|
||||
PreserveAlpha *bool `json:"preserve_alpha,omitempty"` // Preserve alpha channel for EXR encoding (requires AV1 or VP9 codec)
|
||||
}
|
||||
|
||||
// MissingFilesInfo represents information about missing files/addons
|
||||
type MissingFilesInfo struct {
|
||||
Checked bool `json:"checked"`
|
||||
HasMissing bool `json:"has_missing"`
|
||||
MissingFiles []string `json:"missing_files,omitempty"`
|
||||
Checked bool `json:"checked"`
|
||||
HasMissing bool `json:"has_missing"`
|
||||
MissingFiles []string `json:"missing_files,omitempty"`
|
||||
MissingAddons []string `json:"missing_addons,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// RenderSettings represents render settings from a blend file
|
||||
|
||||
@@ -33,14 +33,16 @@ export default function AdminPanel() {
|
||||
}
|
||||
},
|
||||
message: (data) => {
|
||||
// Handle subscription responses
|
||||
// Handle subscription responses - update both local refs and wsManager
|
||||
if (data.type === 'subscribed' && data.channel) {
|
||||
pendingSubscriptionsRef.current.delete(data.channel);
|
||||
subscribedChannelsRef.current.add(data.channel);
|
||||
wsManager.confirmSubscription(data.channel);
|
||||
console.log('Successfully subscribed to channel:', data.channel);
|
||||
} else if (data.type === 'subscription_error' && data.channel) {
|
||||
pendingSubscriptionsRef.current.delete(data.channel);
|
||||
subscribedChannelsRef.current.delete(data.channel);
|
||||
wsManager.failSubscription(data.channel);
|
||||
console.error('Subscription failed for channel:', data.channel, data.error);
|
||||
}
|
||||
|
||||
@@ -83,27 +85,22 @@ export default function AdminPanel() {
|
||||
|
||||
const subscribeToRunners = () => {
|
||||
const channel = 'runners';
|
||||
if (wsManager.getReadyState() !== WebSocket.OPEN) {
|
||||
return;
|
||||
}
|
||||
// Don't subscribe if already subscribed or pending
|
||||
if (subscribedChannelsRef.current.has(channel) || pendingSubscriptionsRef.current.has(channel)) {
|
||||
return;
|
||||
}
|
||||
wsManager.send({ type: 'subscribe', channel });
|
||||
wsManager.subscribeToChannel(channel);
|
||||
subscribedChannelsRef.current.add(channel);
|
||||
pendingSubscriptionsRef.current.add(channel);
|
||||
console.log('Subscribing to runners channel');
|
||||
};
|
||||
|
||||
const unsubscribeFromRunners = () => {
|
||||
const channel = 'runners';
|
||||
if (wsManager.getReadyState() !== WebSocket.OPEN) {
|
||||
return;
|
||||
}
|
||||
if (!subscribedChannelsRef.current.has(channel)) {
|
||||
return; // Not subscribed
|
||||
}
|
||||
wsManager.send({ type: 'unsubscribe', channel });
|
||||
wsManager.unsubscribeFromChannel(channel);
|
||||
subscribedChannelsRef.current.delete(channel);
|
||||
pendingSubscriptionsRef.current.delete(channel);
|
||||
console.log('Unsubscribed from runners channel');
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useState } from 'react';
|
||||
|
||||
export default function FileExplorer({ files, onDownload, onPreview, isImageFile }) {
|
||||
export default function FileExplorer({ files, onDownload, onPreview, onVideoPreview, isImageFile }) {
|
||||
const [expandedPaths, setExpandedPaths] = useState(new Set()); // Root folder collapsed by default
|
||||
|
||||
// Build directory tree from file paths
|
||||
@@ -69,19 +69,29 @@ export default function FileExplorer({ files, onDownload, onPreview, isImageFile
|
||||
if (item.isFile) {
|
||||
const file = item.file;
|
||||
const isImage = isImageFile && isImageFile(file.file_name);
|
||||
const isVideo = file.file_name.toLowerCase().endsWith('.mp4');
|
||||
const sizeMB = (file.file_size / 1024 / 1024).toFixed(2);
|
||||
const isArchive = file.file_name.endsWith('.tar') || file.file_name.endsWith('.zip');
|
||||
|
||||
return (
|
||||
<div key={fullPath} className="flex items-center justify-between py-1.5 hover:bg-gray-800/50 rounded px-2" style={{ paddingLeft: `${indent + 8}px` }}>
|
||||
<div className="flex items-center gap-2 flex-1 min-w-0">
|
||||
<span className="text-gray-500 text-sm">{isArchive ? '📦' : '📄'}</span>
|
||||
<span className="text-gray-500 text-sm">{isArchive ? '📦' : isVideo ? '🎬' : '📄'}</span>
|
||||
<span className="text-gray-200 text-sm truncate" title={item.name}>
|
||||
{item.name}
|
||||
</span>
|
||||
<span className="text-gray-500 text-xs ml-2">{sizeMB} MB</span>
|
||||
</div>
|
||||
<div className="flex gap-2 ml-4 shrink-0">
|
||||
{isVideo && onVideoPreview && (
|
||||
<button
|
||||
onClick={() => onVideoPreview(file)}
|
||||
className="px-2 py-1 bg-purple-600 text-white rounded text-xs hover:bg-purple-500 transition-colors"
|
||||
title="Play Video"
|
||||
>
|
||||
▶
|
||||
</button>
|
||||
)}
|
||||
{isImage && onPreview && (
|
||||
<button
|
||||
onClick={() => onPreview(file)}
|
||||
|
||||
@@ -12,27 +12,34 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
const [contextFiles, setContextFiles] = useState([]);
|
||||
const [tasks, setTasks] = useState([]);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [videoUrl, setVideoUrl] = useState(null);
|
||||
// Store steps and logs per task: { taskId: { steps: [], logs: [] } }
|
||||
const [taskData, setTaskData] = useState({});
|
||||
// Track which tasks and steps are expanded
|
||||
// Track which tasks are expanded
|
||||
const [expandedTasks, setExpandedTasks] = useState(new Set());
|
||||
const [expandedSteps, setExpandedSteps] = useState(new Set());
|
||||
const [streaming, setStreaming] = useState(false);
|
||||
const [previewImage, setPreviewImage] = useState(null); // { url, fileName } or null
|
||||
const [previewVideo, setPreviewVideo] = useState(null); // { url, fileName } or null
|
||||
const listenerIdRef = useRef(null); // Listener ID for shared WebSocket
|
||||
const subscribedChannelsRef = useRef(new Set()); // Track confirmed subscribed channels
|
||||
const pendingSubscriptionsRef = useRef(new Set()); // Track pending subscriptions (waiting for confirmation)
|
||||
const logContainerRefs = useRef({}); // Refs for each step's log container
|
||||
const shouldAutoScrollRefs = useRef({}); // Auto-scroll state per step
|
||||
const logContainerRefs = useRef({}); // Refs for each task's log container
|
||||
const shouldAutoScrollRefs = useRef({}); // Auto-scroll state per task
|
||||
const abortControllerRef = useRef(null); // AbortController for HTTP requests
|
||||
|
||||
// Sync job prop to state when it changes
|
||||
useEffect(() => {
|
||||
setJobDetails(job);
|
||||
}, [job.id, job.status, job.progress]);
|
||||
if (job) {
|
||||
setJobDetails(job);
|
||||
}
|
||||
}, [job?.id, job?.status, job?.progress]);
|
||||
|
||||
useEffect(() => {
|
||||
// Guard against undefined job or job.id
|
||||
if (!job || !job.id) {
|
||||
console.warn('JobDetails: job or job.id is undefined, skipping initialization');
|
||||
return;
|
||||
}
|
||||
|
||||
// Create new AbortController for this effect
|
||||
abortControllerRef.current = new AbortController();
|
||||
|
||||
@@ -73,10 +80,10 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
listenerIdRef.current = null;
|
||||
}
|
||||
};
|
||||
}, [job.id]);
|
||||
}, [job?.id]);
|
||||
|
||||
useEffect(() => {
|
||||
// Update log subscriptions based on expanded tasks (not steps)
|
||||
// Update log subscriptions based on expanded tasks
|
||||
updateLogSubscriptions();
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [expandedTasks, tasks.length, jobDetails.status]); // Use tasks.length instead of tasks to avoid unnecessary re-runs
|
||||
@@ -105,6 +112,12 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
}, [taskData]);
|
||||
|
||||
const loadDetails = async () => {
|
||||
// Guard against undefined job or job.id
|
||||
if (!job || !job.id) {
|
||||
console.warn('JobDetails: Cannot load details - job or job.id is undefined');
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
setLoading(true);
|
||||
// Use summary endpoint for tasks initially - much faster
|
||||
@@ -112,7 +125,7 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
const [details, fileList, taskListResult] = await Promise.all([
|
||||
jobs.get(job.id, { signal }),
|
||||
jobs.getFiles(job.id, { limit: 50, signal }), // Only load first page of files
|
||||
jobs.getTasksSummary(job.id, { sort: 'frame_start:asc', signal }), // Get all tasks
|
||||
jobs.getTasksSummary(job.id, { sort: 'frame:asc', signal }), // Get all tasks
|
||||
]);
|
||||
|
||||
// Check if request was aborted
|
||||
@@ -139,8 +152,7 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
const tasksForDisplay = taskSummaries.map(summary => ({
|
||||
id: summary.id,
|
||||
job_id: job.id,
|
||||
frame_start: summary.frame_start,
|
||||
frame_end: summary.frame_end,
|
||||
frame: summary.frame,
|
||||
status: summary.status,
|
||||
task_type: summary.task_type,
|
||||
runner_id: summary.runner_id,
|
||||
@@ -180,14 +192,6 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
});
|
||||
}
|
||||
|
||||
// Check if there's an MP4 output file
|
||||
const fileArray = Array.isArray(fileData) ? fileData : [];
|
||||
const mp4File = fileArray.find(
|
||||
(f) => f.file_type === 'output' && f.file_name && f.file_name.endsWith('.mp4')
|
||||
);
|
||||
if (mp4File) {
|
||||
setVideoUrl(jobs.getVideoUrl(job.id));
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to load job details:', error);
|
||||
} finally {
|
||||
@@ -278,27 +282,17 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
};
|
||||
|
||||
const subscribe = (channel) => {
|
||||
if (wsManager.getReadyState() !== WebSocket.OPEN) {
|
||||
return;
|
||||
}
|
||||
// Don't subscribe if already subscribed or pending
|
||||
if (subscribedChannelsRef.current.has(channel) || pendingSubscriptionsRef.current.has(channel)) {
|
||||
return; // Already subscribed or subscription pending
|
||||
}
|
||||
wsManager.send({ type: 'subscribe', channel });
|
||||
pendingSubscriptionsRef.current.add(channel); // Mark as pending
|
||||
// Use wsManager's channel subscription (handles reconnect automatically)
|
||||
wsManager.subscribeToChannel(channel);
|
||||
subscribedChannelsRef.current.add(channel);
|
||||
pendingSubscriptionsRef.current.add(channel);
|
||||
};
|
||||
|
||||
const unsubscribe = (channel) => {
|
||||
if (wsManager.getReadyState() !== WebSocket.OPEN) {
|
||||
return;
|
||||
}
|
||||
if (!subscribedChannelsRef.current.has(channel)) {
|
||||
return; // Not subscribed
|
||||
}
|
||||
wsManager.send({ type: 'unsubscribe', channel });
|
||||
// Use wsManager's channel unsubscription
|
||||
wsManager.unsubscribeFromChannel(channel);
|
||||
subscribedChannelsRef.current.delete(channel);
|
||||
console.log('Unsubscribed from channel:', channel);
|
||||
pendingSubscriptionsRef.current.delete(channel);
|
||||
};
|
||||
|
||||
const unsubscribeAll = () => {
|
||||
@@ -308,7 +302,8 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
};
|
||||
|
||||
const updateLogSubscriptions = () => {
|
||||
if (wsManager.getReadyState() !== WebSocket.OPEN) {
|
||||
// Guard against undefined job or job.id
|
||||
if (!job || !job.id) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -326,7 +321,9 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
|
||||
// Subscribe to new channels
|
||||
shouldSubscribe.forEach(channel => {
|
||||
subscribe(channel);
|
||||
if (!subscribedChannelsRef.current.has(channel)) {
|
||||
subscribe(channel);
|
||||
}
|
||||
});
|
||||
|
||||
// Unsubscribe from channels that shouldn't be subscribed
|
||||
@@ -341,23 +338,28 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
try {
|
||||
console.log('JobDetails: Client WebSocket message received:', data.type, data.channel, data);
|
||||
|
||||
// Handle subscription responses
|
||||
// Handle subscription responses - update both local refs and wsManager
|
||||
if (data.type === 'subscribed' && data.channel) {
|
||||
pendingSubscriptionsRef.current.delete(data.channel); // Remove from pending
|
||||
subscribedChannelsRef.current.add(data.channel); // Add to confirmed
|
||||
pendingSubscriptionsRef.current.delete(data.channel);
|
||||
subscribedChannelsRef.current.add(data.channel);
|
||||
wsManager.confirmSubscription(data.channel);
|
||||
console.log('Successfully subscribed to channel:', data.channel, 'Total subscriptions:', subscribedChannelsRef.current.size);
|
||||
} else if (data.type === 'subscription_error' && data.channel) {
|
||||
pendingSubscriptionsRef.current.delete(data.channel); // Remove from pending
|
||||
subscribedChannelsRef.current.delete(data.channel); // Remove from confirmed (if it was there)
|
||||
pendingSubscriptionsRef.current.delete(data.channel);
|
||||
subscribedChannelsRef.current.delete(data.channel);
|
||||
wsManager.failSubscription(data.channel);
|
||||
console.error('Subscription failed for channel:', data.channel, data.error);
|
||||
// If it's the job channel, this is a critical error
|
||||
if (data.channel === `job:${job.id}`) {
|
||||
if (job && job.id && data.channel === `job:${job.id}`) {
|
||||
console.error('Failed to subscribe to job channel - job may not exist or access denied');
|
||||
}
|
||||
}
|
||||
|
||||
// Handle job channel messages
|
||||
// Check both explicit channel and job_id match (for backwards compatibility)
|
||||
// Guard against undefined job.id
|
||||
if (!job || !job.id) {
|
||||
return;
|
||||
}
|
||||
const isJobChannel = data.channel === `job:${job.id}` ||
|
||||
(data.job_id === job.id && !data.channel);
|
||||
if (isJobChannel) {
|
||||
@@ -449,7 +451,7 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
const reloadTasks = async () => {
|
||||
try {
|
||||
const signal = abortControllerRef.current?.signal;
|
||||
const taskListResult = await jobs.getTasksSummary(job.id, { sort: 'frame_start:asc', signal });
|
||||
const taskListResult = await jobs.getTasksSummary(job.id, { sort: 'frame:asc', signal });
|
||||
|
||||
// Check if request was aborted
|
||||
if (signal?.aborted) {
|
||||
@@ -465,8 +467,7 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
const tasksForDisplay = taskSummaries.map(summary => ({
|
||||
id: summary.id,
|
||||
job_id: job.id,
|
||||
frame_start: summary.frame_start,
|
||||
frame_end: summary.frame_end,
|
||||
frame: summary.frame,
|
||||
status: summary.status,
|
||||
task_type: summary.task_type,
|
||||
runner_id: summary.runner_id,
|
||||
@@ -488,13 +489,62 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
}, 100);
|
||||
return prevArray;
|
||||
});
|
||||
} else if (data.type === 'task_reset') {
|
||||
// Handle task_reset - task was reset to pending, steps and logs were cleared
|
||||
const taskId = data.task_id || (data.data && (data.data.id || data.data.task_id));
|
||||
console.log('Task reset received:', { task_id: taskId, data: data.data });
|
||||
|
||||
if (!taskId) {
|
||||
console.warn('task_reset message missing task_id:', data);
|
||||
return;
|
||||
}
|
||||
|
||||
// Update task in list
|
||||
setTasks(prev => {
|
||||
const prevArray = Array.isArray(prev) ? prev : [];
|
||||
const index = prevArray.findIndex(t => t.id === taskId);
|
||||
|
||||
if (index >= 0) {
|
||||
const updated = [...prevArray];
|
||||
const oldTask = updated[index];
|
||||
const newTask = {
|
||||
...oldTask,
|
||||
status: data.data?.status || 'pending',
|
||||
runner_id: null,
|
||||
current_step: null,
|
||||
started_at: null,
|
||||
error_message: data.data?.error_message || null,
|
||||
retry_count: data.data?.retry_count !== undefined ? data.data.retry_count : oldTask.retry_count,
|
||||
};
|
||||
updated[index] = newTask;
|
||||
console.log('Reset task at index', index, { task_id: taskId, new_task: newTask });
|
||||
return updated;
|
||||
}
|
||||
return prevArray;
|
||||
});
|
||||
|
||||
// Clear steps and logs for this task if flags indicate they were cleared
|
||||
if (data.data?.steps_cleared || data.data?.logs_cleared) {
|
||||
setTaskData(prev => {
|
||||
const current = prev[taskId];
|
||||
if (!current) return prev;
|
||||
return {
|
||||
...prev,
|
||||
[taskId]: {
|
||||
steps: data.data?.steps_cleared ? [] : current.steps,
|
||||
logs: data.data?.logs_cleared ? [] : current.logs,
|
||||
lastId: 0,
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
} else if (data.type === 'task_added' && data.data) {
|
||||
// New task was added - reload task summaries to get the new task
|
||||
console.log('task_added message received, reloading tasks...', data);
|
||||
const reloadTasks = async () => {
|
||||
try {
|
||||
const signal = abortControllerRef.current?.signal;
|
||||
const taskListResult = await jobs.getTasksSummary(job.id, { limit: 100, sort: 'frame_start:asc', signal });
|
||||
const taskListResult = await jobs.getTasksSummary(job.id, { limit: 100, sort: 'frame:asc', signal });
|
||||
|
||||
// Check if request was aborted
|
||||
if (signal?.aborted) {
|
||||
@@ -510,8 +560,7 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
const tasksForDisplay = taskSummaries.map(summary => ({
|
||||
id: summary.id,
|
||||
job_id: job.id,
|
||||
frame_start: summary.frame_start,
|
||||
frame_end: summary.frame_end,
|
||||
frame: summary.frame,
|
||||
status: summary.status,
|
||||
task_type: summary.task_type,
|
||||
runner_id: summary.runner_id,
|
||||
@@ -534,7 +583,7 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
const reloadTasks = async () => {
|
||||
try {
|
||||
const signal = abortControllerRef.current?.signal;
|
||||
const taskListResult = await jobs.getTasksSummary(job.id, { limit: 100, sort: 'frame_start:asc', signal });
|
||||
const taskListResult = await jobs.getTasksSummary(job.id, { limit: 100, sort: 'frame:asc', signal });
|
||||
|
||||
// Check if request was aborted
|
||||
if (signal?.aborted) {
|
||||
@@ -550,8 +599,7 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
const tasksForDisplay = taskSummaries.map(summary => ({
|
||||
id: summary.id,
|
||||
job_id: job.id,
|
||||
frame_start: summary.frame_start,
|
||||
frame_end: summary.frame_end,
|
||||
frame: summary.frame,
|
||||
status: summary.status,
|
||||
task_type: summary.task_type,
|
||||
runner_id: summary.runner_id,
|
||||
@@ -738,48 +786,35 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
setExpandedTasks(newExpanded);
|
||||
};
|
||||
|
||||
const toggleStep = (taskId, stepName) => {
|
||||
const key = `${taskId}-${stepName}`;
|
||||
const newExpanded = new Set(expandedSteps);
|
||||
if (newExpanded.has(key)) {
|
||||
newExpanded.delete(key);
|
||||
} else {
|
||||
newExpanded.add(key);
|
||||
// Initialize auto-scroll to true (default: on) when step is first expanded
|
||||
if (shouldAutoScrollRefs.current[key] === undefined) {
|
||||
shouldAutoScrollRefs.current[key] = true;
|
||||
}
|
||||
}
|
||||
setExpandedSteps(newExpanded);
|
||||
};
|
||||
|
||||
const toggleAutoScroll = (taskId, stepName) => {
|
||||
const key = `${taskId}-${stepName}`;
|
||||
const toggleAutoScroll = (taskId, containerName) => {
|
||||
const key = `${taskId}-${containerName}`;
|
||||
// Toggle auto-scroll state (default to true if undefined)
|
||||
const currentState = shouldAutoScrollRefs.current[key] !== false;
|
||||
shouldAutoScrollRefs.current[key] = !currentState;
|
||||
// Force re-render to update button state
|
||||
setExpandedSteps(new Set(expandedSteps));
|
||||
// We don't have expandedSteps anymore, so just trigger a re-render by updating a dummy state
|
||||
setExpandedTasks(new Set(expandedTasks));
|
||||
};
|
||||
|
||||
const handleLogWheel = (taskId, stepName) => {
|
||||
const key = `${taskId}-${stepName}`;
|
||||
const handleLogWheel = (taskId, containerName) => {
|
||||
const key = `${taskId}-${containerName}`;
|
||||
// Turn off auto-scroll when user scrolls with wheel
|
||||
if (shouldAutoScrollRefs.current[key] !== false) {
|
||||
shouldAutoScrollRefs.current[key] = false;
|
||||
// Force re-render to update button state
|
||||
setExpandedSteps(new Set(expandedSteps));
|
||||
setExpandedTasks(new Set(expandedTasks));
|
||||
}
|
||||
};
|
||||
|
||||
const handleLogClick = (taskId, stepName, e) => {
|
||||
const handleLogClick = (taskId, containerName, e) => {
|
||||
// Pause on left or right click
|
||||
if (e.button === 0 || e.button === 2) {
|
||||
const key = `${taskId}-${stepName}`;
|
||||
const key = `${taskId}-${containerName}`;
|
||||
if (shouldAutoScrollRefs.current[key] !== false) {
|
||||
shouldAutoScrollRefs.current[key] = false;
|
||||
// Force re-render to update button state
|
||||
setExpandedSteps(new Set(expandedSteps));
|
||||
setExpandedTasks(new Set(expandedTasks));
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -838,13 +873,23 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
const outputFiles = files.filter((f) => f.file_type === 'output');
|
||||
const inputFiles = files.filter((f) => f.file_type === 'input');
|
||||
|
||||
// Helper to check if a file is an image
|
||||
// Helper to check if a file is a browser-supported image (or EXR which we convert server-side)
|
||||
const isImageFile = (fileName) => {
|
||||
const imageExtensions = ['.png', '.jpg', '.jpeg', '.gif', '.webp', '.bmp', '.svg'];
|
||||
// Browser-supported image formats + EXR (converted server-side)
|
||||
const imageExtensions = [
|
||||
'.png', '.jpg', '.jpeg', '.gif', '.webp', '.bmp', '.svg',
|
||||
'.ico', '.avif', '.apng', '.jfif', '.pjpeg', '.pjp',
|
||||
'.exr' // EXR files are converted to PNG server-side
|
||||
];
|
||||
const lowerName = fileName.toLowerCase();
|
||||
return imageExtensions.some(ext => lowerName.endsWith(ext));
|
||||
};
|
||||
|
||||
// Helper to check if a file is an EXR file
|
||||
const isEXRFile = (fileName) => {
|
||||
return fileName.toLowerCase().endsWith('.exr');
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* Image Preview Modal */}
|
||||
@@ -887,6 +932,32 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Video Preview Modal */}
|
||||
{previewVideo && (
|
||||
<div
|
||||
className="fixed inset-0 bg-black bg-opacity-80 flex items-center justify-center z-[60] p-4"
|
||||
onClick={() => setPreviewVideo(null)}
|
||||
>
|
||||
<div
|
||||
className="bg-gray-900 rounded-lg shadow-xl max-w-5xl w-full max-h-[95vh] overflow-auto border border-gray-700 relative"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
<div className="sticky top-0 bg-gray-900 border-b border-gray-700 px-6 py-4 flex justify-between items-center">
|
||||
<h3 className="text-xl font-semibold text-gray-100">{previewVideo.fileName}</h3>
|
||||
<button
|
||||
onClick={() => setPreviewVideo(null)}
|
||||
className="text-gray-400 hover:text-gray-200 text-2xl font-bold"
|
||||
>
|
||||
×
|
||||
</button>
|
||||
</div>
|
||||
<div className="p-6 bg-black">
|
||||
<VideoPlayer videoUrl={previewVideo.url} />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="fixed inset-0 bg-black bg-opacity-70 flex items-center justify-center z-50 p-4">
|
||||
<div className="bg-gray-800 rounded-lg shadow-xl max-w-4xl w-full max-h-[90vh] overflow-y-auto border border-gray-700">
|
||||
<div className="sticky top-0 bg-gray-800 border-b border-gray-700 px-6 py-4 flex justify-between items-center">
|
||||
@@ -940,15 +1011,6 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{videoUrl && (jobDetails.output_format === 'EXR_264_MP4' || jobDetails.output_format === 'EXR_AV1_MP4') && (
|
||||
<div>
|
||||
<h3 className="text-lg font-semibold text-gray-100 mb-3">
|
||||
Video Preview
|
||||
</h3>
|
||||
<VideoPlayer videoUrl={videoUrl} />
|
||||
</div>
|
||||
)}
|
||||
|
||||
{contextFiles.length > 0 && (
|
||||
<div>
|
||||
<h3 className="text-lg font-semibold text-gray-100 mb-3">
|
||||
@@ -976,9 +1038,15 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
files={outputFiles}
|
||||
onDownload={handleDownload}
|
||||
onPreview={(file) => {
|
||||
const imageUrl = jobs.downloadFile(job.id, file.id);
|
||||
// Use EXR preview endpoint for EXR files, regular download for others
|
||||
const imageUrl = isEXRFile(file.file_name)
|
||||
? jobs.previewEXR(job.id, file.id)
|
||||
: jobs.downloadFile(job.id, file.id);
|
||||
setPreviewImage({ url: imageUrl, fileName: file.file_name });
|
||||
}}
|
||||
onVideoPreview={(file) => {
|
||||
setPreviewVideo({ url: jobs.getVideoUrl(job.id), fileName: file.file_name });
|
||||
}}
|
||||
isImageFile={isImageFile}
|
||||
/>
|
||||
</div>
|
||||
@@ -997,15 +1065,8 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
const taskInfo = taskData[task.id] || { steps: [], logs: [] };
|
||||
const { steps, logs } = taskInfo;
|
||||
|
||||
// Group logs by step_name
|
||||
const logsByStep = {};
|
||||
logs.forEach(log => {
|
||||
const stepName = log.step_name || 'general';
|
||||
if (!logsByStep[stepName]) {
|
||||
logsByStep[stepName] = [];
|
||||
}
|
||||
logsByStep[stepName].push(log);
|
||||
});
|
||||
// Sort all logs chronologically (no grouping by step_name)
|
||||
const sortedLogs = [...logs].sort((a, b) => new Date(a.created_at) - new Date(b.created_at));
|
||||
|
||||
return (
|
||||
<div key={task.id} className="bg-gray-900 rounded-lg border border-gray-700">
|
||||
@@ -1022,9 +1083,9 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
{task.status}
|
||||
</span>
|
||||
<span className="font-medium text-gray-100">
|
||||
{task.task_type === 'metadata' ? 'Metadata Extraction' : `Frame ${task.frame_start}${task.frame_end !== task.frame_start ? `-${task.frame_end}` : ''}`}
|
||||
{task.task_type === 'encode' ? `Encode (${jobDetails.frame_start} - ${jobDetails.frame_end})` : `Frame ${task.frame}`}
|
||||
</span>
|
||||
{task.task_type && task.task_type !== 'render' && (
|
||||
{task.task_type && task.task_type !== 'render' && task.task_type !== 'encode' && (
|
||||
<span className="text-xs text-gray-400">({task.task_type})</span>
|
||||
)}
|
||||
</div>
|
||||
@@ -1033,153 +1094,46 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Task Content (Steps and Logs) */}
|
||||
{/* Task Content (Continuous Log Stream) */}
|
||||
{isExpanded && (
|
||||
<div className="p-4 space-y-3">
|
||||
{/* General logs (logs without step_name) */}
|
||||
{logsByStep['general'] && logsByStep['general'].length > 0 && (() => {
|
||||
const generalKey = `${task.id}-general`;
|
||||
const isGeneralExpanded = expandedSteps.has(generalKey);
|
||||
const generalLogs = logsByStep['general'];
|
||||
|
||||
return (
|
||||
<div className="bg-gray-800 rounded-lg border border-gray-700">
|
||||
<div
|
||||
onClick={() => toggleStep(task.id, 'general')}
|
||||
className="flex items-center justify-between p-2 cursor-pointer hover:bg-gray-750 transition-colors"
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-gray-500 text-sm">
|
||||
{isGeneralExpanded ? '▼' : '▶'}
|
||||
</span>
|
||||
<span className="font-medium text-gray-100">General</span>
|
||||
</div>
|
||||
<span className="text-xs text-gray-400">
|
||||
{generalLogs.length} log{generalLogs.length !== 1 ? 's' : ''}
|
||||
</span>
|
||||
</div>
|
||||
{isGeneralExpanded && (
|
||||
<div className="p-3 border-t border-gray-700">
|
||||
<div className="flex items-center justify-between mb-2">
|
||||
<span className="text-sm text-gray-400">Logs</span>
|
||||
<button
|
||||
onClick={() => toggleAutoScroll(task.id, 'general')}
|
||||
className={`px-2 py-1 text-xs rounded ${
|
||||
shouldAutoScrollRefs.current[generalKey] !== false
|
||||
? 'bg-green-500/20 text-green-400 hover:bg-green-500/30'
|
||||
: 'bg-gray-500/20 text-gray-400 hover:bg-gray-500/30'
|
||||
} transition-colors`}
|
||||
title={shouldAutoScrollRefs.current[generalKey] !== false ? 'Auto-scroll: ON' : 'Auto-scroll: OFF'}
|
||||
>
|
||||
{shouldAutoScrollRefs.current[generalKey] !== false ? '📜 Follow' : '⏸ Paused'}
|
||||
</button>
|
||||
{/* Header with auto-scroll */}
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-1 text-sm text-gray-400">
|
||||
</div>
|
||||
<div
|
||||
ref={el => {
|
||||
if (el) {
|
||||
logContainerRefs.current[generalKey] = el;
|
||||
// Initialize auto-scroll to true (follow logs) when ref is first set
|
||||
if (shouldAutoScrollRefs.current[generalKey] === undefined) {
|
||||
shouldAutoScrollRefs.current[generalKey] = true;
|
||||
}
|
||||
}
|
||||
}}
|
||||
onWheel={() => handleLogWheel(task.id, 'general')}
|
||||
onMouseDown={(e) => handleLogClick(task.id, 'general', e)}
|
||||
onContextMenu={(e) => handleLogClick(task.id, 'general', e)}
|
||||
className="bg-black text-green-400 font-mono text-sm p-3 rounded max-h-64 overflow-y-auto"
|
||||
>
|
||||
{generalLogs.map((log) => (
|
||||
<div
|
||||
key={log.id}
|
||||
className={`${getLogLevelColor(log.log_level)} mb-1`}
|
||||
>
|
||||
<span className="text-gray-500">
|
||||
[{new Date(log.created_at).toLocaleTimeString()}]
|
||||
</span>
|
||||
<span className="ml-2">{log.message}</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
})()}
|
||||
|
||||
{/* Steps */}
|
||||
{steps.length > 0 ? (
|
||||
steps.map((step) => {
|
||||
const stepKey = `${task.id}-${step.step_name}`;
|
||||
const isStepExpanded = expandedSteps.has(stepKey);
|
||||
const stepLogs = logsByStep[step.step_name] || [];
|
||||
|
||||
return (
|
||||
<div key={step.id} className="bg-gray-800 rounded-lg border border-gray-700">
|
||||
{/* Step Header */}
|
||||
<div
|
||||
onClick={() => toggleStep(task.id, step.step_name)}
|
||||
className="flex items-center justify-between p-2 cursor-pointer hover:bg-gray-750 transition-colors"
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-gray-500 text-sm">
|
||||
{isStepExpanded ? '▼' : '▶'}
|
||||
</span>
|
||||
<span className="text-lg">
|
||||
{getStepStatusIcon(step.status)}
|
||||
</span>
|
||||
<span className="font-medium text-gray-100">{step.step_name}</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-3">
|
||||
{step.duration_ms && (
|
||||
<span className="text-sm text-gray-400">
|
||||
{(step.duration_ms / 1000).toFixed(2)}s
|
||||
</span>
|
||||
)}
|
||||
{stepLogs.length > 0 && (
|
||||
<span className="text-xs text-gray-400">
|
||||
{stepLogs.length} log{stepLogs.length !== 1 ? 's' : ''}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Step Logs */}
|
||||
{isStepExpanded && (
|
||||
<div className="p-3 border-t border-gray-700">
|
||||
<div className="flex items-center justify-between mb-2">
|
||||
<span className="text-sm text-gray-400">Logs</span>
|
||||
<button
|
||||
onClick={() => toggleAutoScroll(task.id, step.step_name)}
|
||||
onClick={() => toggleAutoScroll(task.id, 'logs')}
|
||||
className={`px-2 py-1 text-xs rounded ${
|
||||
shouldAutoScrollRefs.current[stepKey] !== false
|
||||
shouldAutoScrollRefs.current[`${task.id}-logs`] !== false
|
||||
? 'bg-green-500/20 text-green-400 hover:bg-green-500/30'
|
||||
: 'bg-gray-500/20 text-gray-400 hover:bg-gray-500/30'
|
||||
} transition-colors`}
|
||||
title={shouldAutoScrollRefs.current[stepKey] !== false ? 'Auto-scroll: ON' : 'Auto-scroll: OFF'}
|
||||
title={shouldAutoScrollRefs.current[`${task.id}-logs`] !== false ? 'Auto-scroll: ON' : 'Auto-scroll: OFF'}
|
||||
>
|
||||
{shouldAutoScrollRefs.current[stepKey] !== false ? '📜 Follow' : '⏸ Paused'}
|
||||
{shouldAutoScrollRefs.current[`${task.id}-logs`] !== false ? '📜 Follow' : '⏸ Paused'}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Logs */}
|
||||
<div
|
||||
ref={el => {
|
||||
if (el) {
|
||||
logContainerRefs.current[stepKey] = el;
|
||||
logContainerRefs.current[`${task.id}-logs`] = el;
|
||||
// Initialize auto-scroll to true (follow logs) when ref is first set
|
||||
if (shouldAutoScrollRefs.current[stepKey] === undefined) {
|
||||
shouldAutoScrollRefs.current[stepKey] = true;
|
||||
if (shouldAutoScrollRefs.current[`${task.id}-logs`] === undefined) {
|
||||
shouldAutoScrollRefs.current[`${task.id}-logs`] = true;
|
||||
}
|
||||
}
|
||||
}}
|
||||
onWheel={() => handleLogWheel(task.id, step.step_name)}
|
||||
onMouseDown={(e) => handleLogClick(task.id, step.step_name, e)}
|
||||
onContextMenu={(e) => handleLogClick(task.id, step.step_name, e)}
|
||||
className="bg-black text-green-400 font-mono text-sm p-3 rounded max-h-64 overflow-y-auto"
|
||||
onWheel={() => handleLogWheel(task.id, 'logs')}
|
||||
onMouseDown={(e) => handleLogClick(task.id, 'logs', e)}
|
||||
onContextMenu={(e) => handleLogClick(task.id, 'logs', e)}
|
||||
className="bg-black text-green-400 font-mono text-sm p-3 rounded max-h-96 overflow-y-auto"
|
||||
>
|
||||
{stepLogs.length === 0 ? (
|
||||
{sortedLogs.length === 0 ? (
|
||||
<p className="text-gray-500">No logs yet...</p>
|
||||
) : (
|
||||
stepLogs.map((log) => (
|
||||
sortedLogs.map((log) => (
|
||||
<div
|
||||
key={log.id}
|
||||
className={`${getLogLevelColor(log.log_level)} mb-1`}
|
||||
@@ -1192,16 +1146,6 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
))
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
})
|
||||
) : (
|
||||
logsByStep['general'] && logsByStep['general'].length > 0 ? null : (
|
||||
<p className="text-gray-400 text-sm">No steps yet...</p>
|
||||
)
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -12,10 +12,12 @@ export default function JobSubmission({ onSuccess }) {
|
||||
frame_start: 1,
|
||||
frame_end: 10,
|
||||
output_format: 'PNG',
|
||||
allow_parallel_runners: true,
|
||||
render_settings: null, // Will contain engine settings
|
||||
unhide_objects: false, // Unhide objects/collections tweak
|
||||
enable_execution: false, // Enable auto-execution in Blender
|
||||
blender_version: '', // Blender version override (empty = auto-detect)
|
||||
preserve_hdr: false, // Preserve HDR range for EXR encoding
|
||||
preserve_alpha: false, // Preserve alpha channel for EXR encoding
|
||||
});
|
||||
const [showAdvancedSettings, setShowAdvancedSettings] = useState(false);
|
||||
const [file, setFile] = useState(null);
|
||||
@@ -32,6 +34,8 @@ export default function JobSubmission({ onSuccess }) {
|
||||
const [selectedMainBlend, setSelectedMainBlend] = useState('');
|
||||
const [confirmedMissingFiles, setConfirmedMissingFiles] = useState(false); // Confirmation for missing files
|
||||
const [uploadTimeRemaining, setUploadTimeRemaining] = useState(null); // Estimated time remaining in seconds
|
||||
const [blenderVersions, setBlenderVersions] = useState([]); // Available Blender versions from server
|
||||
const [loadingBlenderVersions, setLoadingBlenderVersions] = useState(false);
|
||||
|
||||
// Use refs to track cancellation state across re-renders
|
||||
const isCancelledRef = useRef(false);
|
||||
@@ -72,6 +76,25 @@ export default function JobSubmission({ onSuccess }) {
|
||||
}
|
||||
};
|
||||
|
||||
// Fetch available Blender versions on mount
|
||||
useEffect(() => {
|
||||
const fetchBlenderVersions = async () => {
|
||||
setLoadingBlenderVersions(true);
|
||||
try {
|
||||
const response = await fetch('/api/blender/versions');
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
setBlenderVersions(data.versions || []);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to fetch Blender versions:', err);
|
||||
} finally {
|
||||
setLoadingBlenderVersions(false);
|
||||
}
|
||||
};
|
||||
fetchBlenderVersions();
|
||||
}, []);
|
||||
|
||||
// Connect to shared WebSocket on mount
|
||||
useEffect(() => {
|
||||
listenerIdRef.current = wsManager.subscribe('jobsubmission', {
|
||||
@@ -79,14 +102,16 @@ export default function JobSubmission({ onSuccess }) {
|
||||
console.log('JobSubmission: Shared WebSocket connected');
|
||||
},
|
||||
message: (data) => {
|
||||
// Handle subscription responses
|
||||
// Handle subscription responses - update both local refs and wsManager
|
||||
if (data.type === 'subscribed' && data.channel) {
|
||||
pendingSubscriptionsRef.current.delete(data.channel);
|
||||
subscribedChannelsRef.current.add(data.channel);
|
||||
wsManager.confirmSubscription(data.channel);
|
||||
console.log('Successfully subscribed to channel:', data.channel);
|
||||
} else if (data.type === 'subscription_error' && data.channel) {
|
||||
pendingSubscriptionsRef.current.delete(data.channel);
|
||||
subscribedChannelsRef.current.delete(data.channel);
|
||||
wsManager.failSubscription(data.channel);
|
||||
console.error('Subscription failed for channel:', data.channel, data.error);
|
||||
// If it's the upload channel we're trying to subscribe to, show error
|
||||
if (data.channel.startsWith('upload:')) {
|
||||
@@ -94,52 +119,7 @@ export default function JobSubmission({ onSuccess }) {
|
||||
}
|
||||
}
|
||||
|
||||
// Handle upload progress messages
|
||||
if (data.channel && data.channel.startsWith('upload:') && subscribedChannelsRef.current.has(data.channel)) {
|
||||
if (data.type === 'upload_progress' || data.type === 'processing_status') {
|
||||
const progress = data.data?.progress || 0;
|
||||
const status = data.data?.status || 'uploading';
|
||||
const message = data.data?.message || '';
|
||||
|
||||
setUploadProgress(progress);
|
||||
|
||||
// Calculate time remaining for upload progress
|
||||
if (status === 'uploading' && progress > 0 && progress < 100) {
|
||||
if (!uploadStartTimeRef.current) {
|
||||
uploadStartTimeRef.current = Date.now();
|
||||
}
|
||||
const elapsed = (Date.now() - uploadStartTimeRef.current) / 1000; // seconds
|
||||
const remaining = (elapsed / progress) * (100 - progress);
|
||||
setUploadTimeRemaining(remaining);
|
||||
} else if (status === 'completed' || status === 'error') {
|
||||
setUploadTimeRemaining(null);
|
||||
uploadStartTimeRef.current = null;
|
||||
}
|
||||
|
||||
if (status === 'uploading') {
|
||||
setMetadataStatus('extracting');
|
||||
} else if (status === 'processing' || status === 'extracting_zip' || status === 'extracting_metadata' || status === 'creating_context') {
|
||||
setMetadataStatus('processing');
|
||||
// Reset time remaining for processing phase
|
||||
setUploadTimeRemaining(null);
|
||||
} else if (status === 'completed') {
|
||||
setMetadataStatus('completed');
|
||||
setIsUploading(false);
|
||||
setUploadTimeRemaining(null);
|
||||
uploadStartTimeRef.current = null;
|
||||
// Unsubscribe from upload channel
|
||||
unsubscribeFromUploadChannel(data.channel);
|
||||
} else if (status === 'error') {
|
||||
setMetadataStatus('error');
|
||||
setIsUploading(false);
|
||||
setUploadTimeRemaining(null);
|
||||
uploadStartTimeRef.current = null;
|
||||
setError(message || 'Upload/processing failed');
|
||||
// Unsubscribe from upload channel
|
||||
unsubscribeFromUploadChannel(data.channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Upload progress is now handled via HTTP response - no WebSocket messages needed
|
||||
},
|
||||
error: (error) => {
|
||||
console.error('JobSubmission: Shared WebSocket error:', error);
|
||||
@@ -166,13 +146,10 @@ export default function JobSubmission({ onSuccess }) {
|
||||
|
||||
// Helper function to unsubscribe from upload channel
|
||||
const unsubscribeFromUploadChannel = (channel) => {
|
||||
if (wsManager.getReadyState() !== WebSocket.OPEN) {
|
||||
return;
|
||||
}
|
||||
if (!subscribedChannelsRef.current.has(channel)) {
|
||||
return; // Not subscribed
|
||||
}
|
||||
wsManager.send({ type: 'unsubscribe', channel });
|
||||
wsManager.unsubscribeFromChannel(channel);
|
||||
subscribedChannelsRef.current.delete(channel);
|
||||
pendingSubscriptionsRef.current.delete(channel);
|
||||
console.log('Unsubscribed from upload channel:', channel);
|
||||
@@ -180,11 +157,8 @@ export default function JobSubmission({ onSuccess }) {
|
||||
|
||||
// Helper function to unsubscribe from all channels
|
||||
const unsubscribeFromAllChannels = () => {
|
||||
if (wsManager.getReadyState() !== WebSocket.OPEN) {
|
||||
return;
|
||||
}
|
||||
subscribedChannelsRef.current.forEach(channel => {
|
||||
wsManager.send({ type: 'unsubscribe', channel });
|
||||
wsManager.unsubscribeFromChannel(channel);
|
||||
});
|
||||
subscribedChannelsRef.current.clear();
|
||||
pendingSubscriptionsRef.current.clear();
|
||||
@@ -223,41 +197,41 @@ export default function JobSubmission({ onSuccess }) {
|
||||
uploadStartTimeRef.current = Date.now();
|
||||
setMetadataStatus('extracting');
|
||||
|
||||
// Upload file to new endpoint (no job required)
|
||||
// Upload file and get metadata in HTTP response
|
||||
const result = await jobs.uploadFileForJobCreation(selectedFile, (progress) => {
|
||||
// XHR progress as fallback, but WebSocket is primary
|
||||
// Show upload progress during upload
|
||||
setUploadProgress(progress);
|
||||
// Calculate time remaining for XHR progress
|
||||
// Calculate time remaining for upload progress
|
||||
if (progress > 0 && progress < 100 && uploadStartTimeRef.current) {
|
||||
const elapsed = (Date.now() - uploadStartTimeRef.current) / 1000; // seconds
|
||||
const remaining = (elapsed / progress) * (100 - progress);
|
||||
setUploadTimeRemaining(remaining);
|
||||
} else if (progress >= 100) {
|
||||
// Upload complete - switch to processing status
|
||||
setUploadProgress(100);
|
||||
setMetadataStatus('processing');
|
||||
setUploadTimeRemaining(null);
|
||||
}
|
||||
}, selectedMainBlend || undefined);
|
||||
|
||||
// Store session ID for later use when creating the job
|
||||
if (result.session_id) {
|
||||
setUploadSessionId(result.session_id);
|
||||
|
||||
// Subscribe to upload progress channel
|
||||
if (wsManager.getReadyState() === WebSocket.OPEN) {
|
||||
const channel = `upload:${result.session_id}`;
|
||||
wsManager.send({ type: 'subscribe', channel });
|
||||
// Don't set subscribedUploadChannelRef yet - wait for confirmation
|
||||
console.log('Subscribing to upload channel:', channel);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if ZIP extraction found multiple blend files
|
||||
if (result.zip_extracted && result.blend_files && result.blend_files.length > 1) {
|
||||
setBlendFiles(result.blend_files);
|
||||
// Upload and processing complete - metadata is in the response
|
||||
setIsUploading(false);
|
||||
setUploadProgress(100);
|
||||
setUploadTimeRemaining(null);
|
||||
uploadStartTimeRef.current = null;
|
||||
|
||||
// Handle ZIP extraction results - multiple blend files found
|
||||
if (result.status === 'select_blend' || (result.zip_extracted && result.blend_files && result.blend_files.length > 1)) {
|
||||
setBlendFiles(result.blend_files || []);
|
||||
setMetadataStatus('select_blend');
|
||||
return;
|
||||
}
|
||||
|
||||
// Upload and processing complete
|
||||
setIsUploading(false);
|
||||
|
||||
// If metadata was extracted, use it
|
||||
if (result.metadata_extracted && result.metadata) {
|
||||
setMetadata(result.metadata);
|
||||
@@ -286,6 +260,7 @@ export default function JobSubmission({ onSuccess }) {
|
||||
...result.metadata.render_settings,
|
||||
engine_settings: result.metadata.render_settings.engine_settings || {},
|
||||
} : null,
|
||||
blender_version: result.metadata.blender_version || prev.blender_version,
|
||||
}));
|
||||
} else {
|
||||
setMetadataStatus('error');
|
||||
@@ -323,36 +298,30 @@ export default function JobSubmission({ onSuccess }) {
|
||||
|
||||
// Re-upload with selected main blend file
|
||||
const result = await jobs.uploadFileForJobCreation(file, (progress) => {
|
||||
// XHR progress as fallback, but WebSocket is primary
|
||||
// Show upload progress during upload
|
||||
setUploadProgress(progress);
|
||||
// Calculate time remaining for XHR progress
|
||||
// Calculate time remaining for upload progress
|
||||
if (progress > 0 && progress < 100 && uploadStartTimeRef.current) {
|
||||
const elapsed = (Date.now() - uploadStartTimeRef.current) / 1000; // seconds
|
||||
const remaining = (elapsed / progress) * (100 - progress);
|
||||
setUploadTimeRemaining(remaining);
|
||||
} else if (progress >= 100) {
|
||||
// Upload complete - switch to processing status
|
||||
setUploadProgress(100);
|
||||
setMetadataStatus('processing');
|
||||
setUploadTimeRemaining(null);
|
||||
}
|
||||
}, selectedMainBlend);
|
||||
|
||||
setBlendFiles([]);
|
||||
|
||||
// Store session ID and subscribe to upload progress
|
||||
// Store session ID
|
||||
if (result.session_id) {
|
||||
setUploadSessionId(result.session_id);
|
||||
|
||||
// Subscribe to upload progress channel
|
||||
if (wsManager.getReadyState() === WebSocket.OPEN) {
|
||||
const channel = `upload:${result.session_id}`;
|
||||
// Don't subscribe if already subscribed or pending
|
||||
if (!subscribedChannelsRef.current.has(channel) && !pendingSubscriptionsRef.current.has(channel)) {
|
||||
wsManager.send({ type: 'subscribe', channel });
|
||||
pendingSubscriptionsRef.current.add(channel);
|
||||
console.log('Subscribing to upload channel:', channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Upload and processing complete
|
||||
setIsUploading(false);
|
||||
// Upload and processing complete - metadata is in the response
|
||||
setIsUploading(false);
|
||||
|
||||
// If metadata was extracted, use it
|
||||
if (result.metadata_extracted && result.metadata) {
|
||||
@@ -382,6 +351,7 @@ export default function JobSubmission({ onSuccess }) {
|
||||
...result.metadata.render_settings,
|
||||
engine_settings: result.metadata.render_settings.engine_settings || {},
|
||||
} : null,
|
||||
blender_version: result.metadata.blender_version || prev.blender_version,
|
||||
}));
|
||||
} else {
|
||||
setMetadataStatus('error');
|
||||
@@ -477,11 +447,13 @@ export default function JobSubmission({ onSuccess }) {
|
||||
frame_start: parseInt(formData.frame_start),
|
||||
frame_end: parseInt(formData.frame_end),
|
||||
output_format: formData.output_format,
|
||||
allow_parallel_runners: formData.allow_parallel_runners,
|
||||
render_settings: renderSettings,
|
||||
upload_session_id: uploadSessionId || undefined, // Pass session ID to move context archive
|
||||
unhide_objects: formData.unhide_objects || undefined, // Pass unhide toggle
|
||||
enable_execution: formData.enable_execution || undefined, // Pass enable execution toggle
|
||||
preserve_hdr: formData.preserve_hdr || undefined, // Pass preserve HDR toggle
|
||||
preserve_alpha: formData.preserve_alpha || undefined, // Pass preserve alpha toggle
|
||||
blender_version: formData.blender_version || undefined, // Pass Blender version override
|
||||
});
|
||||
|
||||
// Fetch the full job details
|
||||
@@ -508,10 +480,12 @@ export default function JobSubmission({ onSuccess }) {
|
||||
frame_start: 1,
|
||||
frame_end: 10,
|
||||
output_format: 'PNG',
|
||||
allow_parallel_runners: true,
|
||||
render_settings: null,
|
||||
unhide_objects: false,
|
||||
enable_execution: false,
|
||||
blender_version: '',
|
||||
preserve_hdr: false,
|
||||
preserve_alpha: false,
|
||||
});
|
||||
setShowAdvancedSettings(false);
|
||||
formatManuallyChangedRef.current = false;
|
||||
@@ -534,6 +508,7 @@ export default function JobSubmission({ onSuccess }) {
|
||||
render_settings: null,
|
||||
unhide_objects: false,
|
||||
enable_execution: false,
|
||||
blender_version: '',
|
||||
});
|
||||
setShowAdvancedSettings(false);
|
||||
setFile(null);
|
||||
@@ -672,20 +647,9 @@ export default function JobSubmission({ onSuccess }) {
|
||||
</div>
|
||||
</div>
|
||||
) : metadataStatus === 'processing' ? (
|
||||
<div className="space-y-2">
|
||||
<div className="flex items-center justify-between text-xs">
|
||||
<span>Processing file and extracting metadata...</span>
|
||||
<span>{Math.round(uploadProgress)}%</span>
|
||||
</div>
|
||||
<div className="w-full bg-gray-700 rounded-full h-2">
|
||||
<div
|
||||
className="bg-orange-500 h-2 rounded-full transition-all duration-300"
|
||||
style={{ width: `${uploadProgress}%` }}
|
||||
></div>
|
||||
</div>
|
||||
<div className="text-xs text-orange-400/80 mt-1">
|
||||
This may take a moment for large files...
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="animate-spin rounded-full h-4 w-4 border-b-2 border-orange-500"></div>
|
||||
<span>Processing file and extracting metadata...</span>
|
||||
</div>
|
||||
) : (
|
||||
<div className="flex items-center gap-2">
|
||||
@@ -868,20 +832,35 @@ export default function JobSubmission({ onSuccess }) {
|
||||
<option value="EXR">EXR</option>
|
||||
<option value="EXR_264_MP4">EXR_264_MP4 (High Quality Video Without Alpha)</option>
|
||||
<option value="EXR_AV1_MP4">EXR_AV1_MP4 (High Quality Video With Alpha)</option>
|
||||
<option value="EXR_VP9_WEBM">EXR_VP9_WEBM (High Quality Video With Alpha & HDR)</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center">
|
||||
<input
|
||||
type="checkbox"
|
||||
id="allow_parallel_runners"
|
||||
checked={formData.allow_parallel_runners}
|
||||
onChange={(e) => setFormData({ ...formData, allow_parallel_runners: e.target.checked })}
|
||||
className="h-4 w-4 text-orange-600 focus:ring-orange-500 border-gray-600 bg-gray-900 rounded"
|
||||
/>
|
||||
<label htmlFor="allow_parallel_runners" className="ml-2 block text-sm text-gray-300">
|
||||
Allow multiple runners to work on this job simultaneously
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-gray-300 mb-2">
|
||||
Blender Version
|
||||
{metadata?.blender_version && (
|
||||
<span className="text-xs text-gray-400 ml-2">
|
||||
(detected: {metadata.blender_version})
|
||||
</span>
|
||||
)}
|
||||
</label>
|
||||
<select
|
||||
value={formData.blender_version}
|
||||
onChange={(e) => setFormData({ ...formData, blender_version: e.target.value })}
|
||||
className="w-full px-4 py-2 bg-gray-900 border border-gray-600 rounded-lg text-gray-100 focus:ring-2 focus:ring-orange-500 focus:border-transparent"
|
||||
disabled={loadingBlenderVersions}
|
||||
>
|
||||
<option value="">Auto-detect from blend file</option>
|
||||
{blenderVersions.map((v) => (
|
||||
<option key={v.full} value={v.full}>
|
||||
{v.full}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
<p className="mt-1 text-xs text-gray-400">
|
||||
Override the Blender version used for rendering. Leave as auto-detect to use the version the file was saved with.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div className="p-4 bg-blue-400/20 border border-blue-400/50 rounded-lg">
|
||||
@@ -920,6 +899,53 @@ export default function JobSubmission({ onSuccess }) {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{(formData.output_format === 'EXR_264_MP4' || formData.output_format === 'EXR_AV1_MP4' || formData.output_format === 'EXR_VP9_WEBM') && (
|
||||
<>
|
||||
<div className="p-3 bg-blue-400/10 border border-blue-400/30 rounded-lg mb-2">
|
||||
<p className="text-xs text-blue-400">
|
||||
<strong>Note:</strong> The preserve options below allow you to explicitly control HDR and alpha preservation. If autodetection finds HDR content or alpha channels in your EXR files, they will be automatically preserved even if these options are unchecked. <strong>Important:</strong> Alpha detection only checks the first frame, so if your render uses transparency later in the sequence, you should explicitly enable the preserve alpha option. HDR detection is not perfect and may miss some HDR content, so if you're certain your render contains HDR content, you should explicitly enable the preserve HDR option.
|
||||
</p>
|
||||
</div>
|
||||
<div className="p-4 bg-blue-400/20 border border-blue-400/50 rounded-lg">
|
||||
<div className="flex items-center">
|
||||
<input
|
||||
type="checkbox"
|
||||
id="preserve_hdr"
|
||||
checked={formData.preserve_hdr}
|
||||
onChange={(e) => setFormData({ ...formData, preserve_hdr: e.target.checked })}
|
||||
className="h-4 w-4 text-orange-600 focus:ring-orange-500 border-gray-600 bg-gray-900 rounded"
|
||||
/>
|
||||
<label htmlFor="preserve_hdr" className="ml-2 block text-sm text-gray-300">
|
||||
<span className="font-medium">Preserve HDR range</span>
|
||||
<span className="text-xs text-gray-400 block mt-1">
|
||||
Explicitly enable HDR preservation with HLG transfer function. Works on both HDR and SDR displays. HDR content will be automatically detected and preserved if present, but detection may miss some content. If you're certain your render contains HDR, enable this option.
|
||||
</span>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
|
||||
{(formData.output_format === 'EXR_AV1_MP4' || formData.output_format === 'EXR_VP9_WEBM') && (
|
||||
<div className="p-4 bg-blue-400/20 border border-blue-400/50 rounded-lg">
|
||||
<div className="flex items-center">
|
||||
<input
|
||||
type="checkbox"
|
||||
id="preserve_alpha"
|
||||
checked={formData.preserve_alpha}
|
||||
onChange={(e) => setFormData({ ...formData, preserve_alpha: e.target.checked })}
|
||||
className="h-4 w-4 text-orange-600 focus:ring-orange-500 border-gray-600 bg-gray-900 rounded"
|
||||
/>
|
||||
<label htmlFor="preserve_alpha" className="ml-2 block text-sm text-gray-300">
|
||||
<span className="font-medium">Preserve alpha channel</span>
|
||||
<span className="text-xs text-gray-400 block mt-1">
|
||||
Explicitly enable alpha channel encoding. Only available for AV1 and VP9 codecs. Alpha channels will be automatically detected and preserved if present in the first frame. Enable this if your render uses transparency later in the sequence.
|
||||
</span>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{metadata && metadataStatus === 'completed' && (
|
||||
<>
|
||||
<div className="p-4 bg-green-400/20 border border-green-400/50 rounded-lg text-sm mb-4">
|
||||
|
||||
@@ -376,6 +376,10 @@ export const jobs = {
|
||||
return `${API_BASE}/jobs/${jobId}/files/${fileId}/download`;
|
||||
},
|
||||
|
||||
previewEXR(jobId, fileId) {
|
||||
return `${API_BASE}/jobs/${jobId}/files/${fileId}/preview-exr`;
|
||||
},
|
||||
|
||||
getVideoUrl(jobId) {
|
||||
return `${API_BASE}/jobs/${jobId}/video`;
|
||||
},
|
||||
|
||||
@@ -10,6 +10,11 @@ class WebSocketManager {
|
||||
this.isConnecting = false;
|
||||
this.listenerIdCounter = 0;
|
||||
this.verboseLogging = false; // Set to true to enable verbose WebSocket logging
|
||||
|
||||
// Track server-side channel subscriptions for re-subscription on reconnect
|
||||
this.serverSubscriptions = new Set(); // Channels we want to be subscribed to
|
||||
this.confirmedSubscriptions = new Set(); // Channels confirmed by server
|
||||
this.pendingSubscriptions = new Set(); // Channels waiting for confirmation
|
||||
}
|
||||
|
||||
connect() {
|
||||
@@ -37,6 +42,10 @@ class WebSocketManager {
|
||||
console.log('Shared WebSocket connected');
|
||||
}
|
||||
this.isConnecting = false;
|
||||
|
||||
// Re-subscribe to all channels that were previously subscribed
|
||||
this.resubscribeToChannels();
|
||||
|
||||
this.notifyListeners('open', {});
|
||||
};
|
||||
|
||||
@@ -68,17 +77,24 @@ class WebSocketManager {
|
||||
}
|
||||
this.ws = null;
|
||||
this.isConnecting = false;
|
||||
|
||||
// Clear confirmed/pending but keep serverSubscriptions for re-subscription
|
||||
this.confirmedSubscriptions.clear();
|
||||
this.pendingSubscriptions.clear();
|
||||
|
||||
this.notifyListeners('close', event);
|
||||
|
||||
// Always retry connection
|
||||
if (this.reconnectTimeout) {
|
||||
clearTimeout(this.reconnectTimeout);
|
||||
}
|
||||
this.reconnectTimeout = setTimeout(() => {
|
||||
if (!this.ws || this.ws.readyState === WebSocket.CLOSED) {
|
||||
this.connect();
|
||||
// Always retry connection if we have listeners
|
||||
if (this.listeners.size > 0) {
|
||||
if (this.reconnectTimeout) {
|
||||
clearTimeout(this.reconnectTimeout);
|
||||
}
|
||||
}, this.reconnectDelay);
|
||||
this.reconnectTimeout = setTimeout(() => {
|
||||
if (!this.ws || this.ws.readyState === WebSocket.CLOSED) {
|
||||
this.connect();
|
||||
}
|
||||
}, this.reconnectDelay);
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Failed to create WebSocket:', error);
|
||||
@@ -159,6 +175,81 @@ class WebSocketManager {
|
||||
return this.ws ? this.ws.readyState : WebSocket.CLOSED;
|
||||
}
|
||||
|
||||
// Subscribe to a server-side channel (will be re-subscribed on reconnect)
|
||||
subscribeToChannel(channel) {
|
||||
if (this.serverSubscriptions.has(channel)) {
|
||||
// Already subscribed or pending
|
||||
return;
|
||||
}
|
||||
|
||||
this.serverSubscriptions.add(channel);
|
||||
|
||||
if (this.ws && this.ws.readyState === WebSocket.OPEN) {
|
||||
if (!this.confirmedSubscriptions.has(channel) && !this.pendingSubscriptions.has(channel)) {
|
||||
this.pendingSubscriptions.add(channel);
|
||||
this.send({ type: 'subscribe', channel });
|
||||
if (this.verboseLogging) {
|
||||
console.log('WebSocketManager: Subscribing to channel:', channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unsubscribe from a server-side channel (won't be re-subscribed on reconnect)
|
||||
unsubscribeFromChannel(channel) {
|
||||
this.serverSubscriptions.delete(channel);
|
||||
this.confirmedSubscriptions.delete(channel);
|
||||
this.pendingSubscriptions.delete(channel);
|
||||
|
||||
if (this.ws && this.ws.readyState === WebSocket.OPEN) {
|
||||
this.send({ type: 'unsubscribe', channel });
|
||||
if (this.verboseLogging) {
|
||||
console.log('WebSocketManager: Unsubscribing from channel:', channel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mark a channel subscription as confirmed (call this when server confirms)
|
||||
confirmSubscription(channel) {
|
||||
this.pendingSubscriptions.delete(channel);
|
||||
this.confirmedSubscriptions.add(channel);
|
||||
if (this.verboseLogging) {
|
||||
console.log('WebSocketManager: Subscription confirmed for channel:', channel);
|
||||
}
|
||||
}
|
||||
|
||||
// Mark a channel subscription as failed (call this when server rejects)
|
||||
failSubscription(channel) {
|
||||
this.pendingSubscriptions.delete(channel);
|
||||
this.serverSubscriptions.delete(channel);
|
||||
if (this.verboseLogging) {
|
||||
console.log('WebSocketManager: Subscription failed for channel:', channel);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if subscribed to a channel
|
||||
isSubscribedToChannel(channel) {
|
||||
return this.confirmedSubscriptions.has(channel);
|
||||
}
|
||||
|
||||
// Re-subscribe to all channels after reconnect
|
||||
resubscribeToChannels() {
|
||||
if (this.serverSubscriptions.size === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.verboseLogging) {
|
||||
console.log('WebSocketManager: Re-subscribing to channels:', Array.from(this.serverSubscriptions));
|
||||
}
|
||||
|
||||
for (const channel of this.serverSubscriptions) {
|
||||
if (!this.pendingSubscriptions.has(channel)) {
|
||||
this.pendingSubscriptions.add(channel);
|
||||
this.send({ type: 'subscribe', channel });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
disconnect() {
|
||||
if (this.reconnectTimeout) {
|
||||
clearTimeout(this.reconnectTimeout);
|
||||
@@ -169,6 +260,9 @@ class WebSocketManager {
|
||||
this.ws = null;
|
||||
}
|
||||
this.listeners.clear();
|
||||
this.serverSubscriptions.clear();
|
||||
this.confirmedSubscriptions.clear();
|
||||
this.pendingSubscriptions.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user