Enhance logging and context handling in job management. Introduce a logger initialization with configurable parameters in the manager and runner commands. Update job context handling to use tar files instead of tar.gz, and implement ETag generation for improved caching. Refactor API endpoints to support new context file structure and enhance error handling in job submissions. Add support for unhide objects and auto-execution options in job creation requests.
This commit is contained in:
@@ -11,17 +11,34 @@ import (
|
||||
"jiggablend/internal/api"
|
||||
"jiggablend/internal/auth"
|
||||
"jiggablend/internal/database"
|
||||
"jiggablend/internal/logger"
|
||||
"jiggablend/internal/storage"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
port = flag.String("port", getEnv("PORT", "8080"), "Server port")
|
||||
dbPath = flag.String("db", getEnv("DB_PATH", "jiggablend.db"), "Database path")
|
||||
storagePath = flag.String("storage", getEnv("STORAGE_PATH", "./jiggablend-storage"), "Storage path")
|
||||
port = flag.String("port", getEnv("PORT", "8080"), "Server port")
|
||||
dbPath = flag.String("db", getEnv("DB_PATH", "jiggablend.db"), "Database path")
|
||||
storagePath = flag.String("storage", getEnv("STORAGE_PATH", "./jiggablend-storage"), "Storage path")
|
||||
logDir = flag.String("log-dir", getEnv("LOG_DIR", "./logs"), "Log directory")
|
||||
logMaxSize = flag.Int("log-max-size", getEnvInt("LOG_MAX_SIZE", 100), "Maximum log file size in MB before rotation")
|
||||
logMaxBackups = flag.Int("log-max-backups", getEnvInt("LOG_MAX_BACKUPS", 5), "Maximum number of rotated log files to keep")
|
||||
logMaxAge = flag.Int("log-max-age", getEnvInt("LOG_MAX_AGE", 30), "Maximum age in days for rotated log files")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
// Initialize logger (writes to both stdout and log file with rotation)
|
||||
logDirPath := *logDir
|
||||
if err := logger.Init(logDirPath, "manager.log", *logMaxSize, *logMaxBackups, *logMaxAge); err != nil {
|
||||
log.Fatalf("Failed to initialize logger: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if l := logger.GetDefault(); l != nil {
|
||||
l.Close()
|
||||
}
|
||||
}()
|
||||
log.Printf("Log rotation configured: max_size=%dMB, max_backups=%d, max_age=%d days", *logMaxSize, *logMaxBackups, *logMaxAge)
|
||||
|
||||
// Initialize database
|
||||
db, err := database.NewDB(*dbPath)
|
||||
if err != nil {
|
||||
@@ -86,6 +103,16 @@ func getEnv(key, defaultValue string) string {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func getEnvInt(key string, defaultValue int) int {
|
||||
if value := os.Getenv(key); value != "" {
|
||||
var result int
|
||||
if _, err := fmt.Sscanf(value, "%d", &result); err == nil {
|
||||
return result
|
||||
}
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// checkBlenderAvailable checks if Blender is available by running `blender --version`
|
||||
func checkBlenderAvailable() error {
|
||||
cmd := exec.Command("blender", "--version")
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"jiggablend/internal/logger"
|
||||
"jiggablend/internal/runner"
|
||||
)
|
||||
|
||||
@@ -31,6 +32,10 @@ func main() {
|
||||
token = flag.String("token", getEnv("REGISTRATION_TOKEN", ""), "Registration token")
|
||||
secretsFile = flag.String("secrets-file", getEnv("SECRETS_FILE", ""), "Path to secrets file for persistent storage (default: ./runner-secrets.json, or ./runner-secrets-{id}.json if multiple runners)")
|
||||
runnerIDSuffix = flag.String("runner-id", getEnv("RUNNER_ID", ""), "Unique runner ID suffix (auto-generated if not provided)")
|
||||
logDir = flag.String("log-dir", getEnv("LOG_DIR", "./logs"), "Log directory")
|
||||
logMaxSize = flag.Int("log-max-size", getEnvInt("LOG_MAX_SIZE", 100), "Maximum log file size in MB before rotation")
|
||||
logMaxBackups = flag.Int("log-max-backups", getEnvInt("LOG_MAX_BACKUPS", 5), "Maximum number of rotated log files to keep")
|
||||
logMaxAge = flag.Int("log-max-age", getEnvInt("LOG_MAX_AGE", 30), "Maximum age in days for rotated log files")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
@@ -55,6 +60,22 @@ func main() {
|
||||
*name = fmt.Sprintf("%s-%s", *name, runnerIDStr)
|
||||
}
|
||||
|
||||
// Initialize logger (writes to both stdout and log file with rotation)
|
||||
// Use runner-specific log file name based on the final name
|
||||
sanitizedName := strings.ReplaceAll(*name, "/", "_")
|
||||
sanitizedName = strings.ReplaceAll(sanitizedName, "\\", "_")
|
||||
logFileName := fmt.Sprintf("runner-%s.log", sanitizedName)
|
||||
|
||||
if err := logger.Init(*logDir, logFileName, *logMaxSize, *logMaxBackups, *logMaxAge); err != nil {
|
||||
log.Fatalf("Failed to initialize logger: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if l := logger.GetDefault(); l != nil {
|
||||
l.Close()
|
||||
}
|
||||
}()
|
||||
log.Printf("Log rotation configured: max_size=%dMB, max_backups=%d, max_age=%d days", *logMaxSize, *logMaxBackups, *logMaxAge)
|
||||
|
||||
// Set default secrets file if not provided - always use current directory
|
||||
if *secretsFile == "" {
|
||||
if *runnerIDSuffix != "" || getEnv("RUNNER_ID", "") != "" {
|
||||
@@ -210,6 +231,16 @@ func getEnv(key, defaultValue string) string {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func getEnvInt(key string, defaultValue int) int {
|
||||
if value := os.Getenv(key); value != "" {
|
||||
var result int
|
||||
if _, err := fmt.Sscanf(value, "%d", &result); err == nil {
|
||||
return result
|
||||
}
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// generateShortID generates a short random ID (8 hex characters)
|
||||
func generateShortID() string {
|
||||
bytes := make([]byte, 4)
|
||||
|
||||
1
go.mod
1
go.mod
@@ -10,6 +10,7 @@ require (
|
||||
github.com/marcboeker/go-duckdb/v2 v2.4.3
|
||||
golang.org/x/crypto v0.45.0
|
||||
golang.org/x/oauth2 v0.33.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
2
go.sum
2
go.sum
@@ -82,5 +82,7 @@ golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhS
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
1729
internal/api/jobs.go
1729
internal/api/jobs.go
File diff suppressed because it is too large
Load Diff
@@ -4,8 +4,8 @@ import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"jiggablend/pkg/scripts"
|
||||
"jiggablend/pkg/types"
|
||||
)
|
||||
|
||||
@@ -169,22 +170,26 @@ func (s *Server) handleGetJobMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
// extractMetadataFromContext extracts metadata from the blend file in a context archive
|
||||
// Returns the extracted metadata or an error
|
||||
func (s *Server) extractMetadataFromContext(jobID int64) (*types.BlendMetadata, error) {
|
||||
contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar.gz")
|
||||
contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar")
|
||||
|
||||
// Check if context exists
|
||||
if _, err := os.Stat(contextPath); err != nil {
|
||||
return nil, fmt.Errorf("context archive not found: %w", err)
|
||||
}
|
||||
|
||||
// Create temporary directory for extraction
|
||||
tmpDir, err := os.MkdirTemp("", fmt.Sprintf("fuego-metadata-%d-*", jobID))
|
||||
// Create temporary directory for extraction under storage base path
|
||||
tmpDir, err := s.storage.TempDir(fmt.Sprintf("jiggablend-metadata-%d-*", jobID))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
defer func() {
|
||||
if err := os.RemoveAll(tmpDir); err != nil {
|
||||
log.Printf("Warning: Failed to clean up temp directory %s: %v", tmpDir, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Extract context archive
|
||||
if err := s.extractTarGz(contextPath, tmpDir); err != nil {
|
||||
if err := s.extractTar(contextPath, tmpDir); err != nil {
|
||||
return nil, fmt.Errorf("failed to extract context: %w", err)
|
||||
}
|
||||
|
||||
@@ -228,188 +233,20 @@ func (s *Server) extractMetadataFromContext(jobID int64) (*types.BlendMetadata,
|
||||
return nil, fmt.Errorf("no .blend file found in context")
|
||||
}
|
||||
|
||||
// Create Python script to extract metadata
|
||||
// Use embedded Python script
|
||||
scriptPath := filepath.Join(tmpDir, "extract_metadata.py")
|
||||
scriptContent := `import bpy
|
||||
import json
|
||||
import sys
|
||||
|
||||
# Make all file paths relative to the blend file location FIRST
|
||||
# This must be done immediately after file load, before any other operations
|
||||
# to prevent Blender from trying to access external files with absolute paths
|
||||
try:
|
||||
bpy.ops.file.make_paths_relative()
|
||||
print("Made all file paths relative to blend file")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not make paths relative: {e}")
|
||||
|
||||
# Check for missing addons that the blend file requires
|
||||
# Blender marks missing addons with "_missing" suffix in preferences
|
||||
missing_files_info = {
|
||||
"checked": False,
|
||||
"has_missing": False,
|
||||
"missing_files": [],
|
||||
"missing_addons": []
|
||||
}
|
||||
|
||||
try:
|
||||
missing = []
|
||||
for mod in bpy.context.preferences.addons:
|
||||
if mod.module.endswith("_missing"):
|
||||
missing.append(mod.module.rsplit("_", 1)[0])
|
||||
|
||||
missing_files_info["checked"] = True
|
||||
if missing:
|
||||
missing_files_info["has_missing"] = True
|
||||
missing_files_info["missing_addons"] = missing
|
||||
print("Missing add-ons required by this .blend:")
|
||||
for name in missing:
|
||||
print(" -", name)
|
||||
else:
|
||||
print("No missing add-ons detected – file is headless-safe")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not check for missing addons: {e}")
|
||||
missing_files_info["error"] = str(e)
|
||||
|
||||
# Get scene
|
||||
scene = bpy.context.scene
|
||||
|
||||
# Extract frame range from scene settings
|
||||
frame_start = scene.frame_start
|
||||
frame_end = scene.frame_end
|
||||
|
||||
# Also check for actual animation range (keyframes)
|
||||
# Find the earliest and latest keyframes across all objects
|
||||
animation_start = None
|
||||
animation_end = None
|
||||
|
||||
for obj in scene.objects:
|
||||
if obj.animation_data and obj.animation_data.action:
|
||||
action = obj.animation_data.action
|
||||
if action.fcurves:
|
||||
for fcurve in action.fcurves:
|
||||
if fcurve.keyframe_points:
|
||||
for keyframe in fcurve.keyframe_points:
|
||||
frame = int(keyframe.co[0])
|
||||
if animation_start is None or frame < animation_start:
|
||||
animation_start = frame
|
||||
if animation_end is None or frame > animation_end:
|
||||
animation_end = frame
|
||||
|
||||
# Use animation range if available, otherwise use scene frame range
|
||||
# If scene range seems wrong (start == end), prefer animation range
|
||||
if animation_start is not None and animation_end is not None:
|
||||
if frame_start == frame_end or (animation_start < frame_start or animation_end > frame_end):
|
||||
# Use animation range if scene range is invalid or animation extends beyond it
|
||||
frame_start = animation_start
|
||||
frame_end = animation_end
|
||||
|
||||
# Extract render settings
|
||||
render = scene.render
|
||||
resolution_x = render.resolution_x
|
||||
resolution_y = render.resolution_y
|
||||
engine = scene.render.engine.upper()
|
||||
|
||||
# Determine output format from file format
|
||||
output_format = render.image_settings.file_format
|
||||
|
||||
# Extract engine-specific settings
|
||||
engine_settings = {}
|
||||
|
||||
if engine == 'CYCLES':
|
||||
cycles = scene.cycles
|
||||
engine_settings = {
|
||||
"samples": getattr(cycles, 'samples', 128),
|
||||
"use_denoising": getattr(cycles, 'use_denoising', False),
|
||||
"denoising_radius": getattr(cycles, 'denoising_radius', 0),
|
||||
"denoising_strength": getattr(cycles, 'denoising_strength', 0.0),
|
||||
"device": getattr(cycles, 'device', 'CPU'),
|
||||
"use_adaptive_sampling": getattr(cycles, 'use_adaptive_sampling', False),
|
||||
"adaptive_threshold": getattr(cycles, 'adaptive_threshold', 0.01) if getattr(cycles, 'use_adaptive_sampling', False) else 0.01,
|
||||
"use_fast_gi": getattr(cycles, 'use_fast_gi', False),
|
||||
"light_tree": getattr(cycles, 'use_light_tree', False),
|
||||
"use_light_linking": getattr(cycles, 'use_light_linking', False),
|
||||
"caustics_reflective": getattr(cycles, 'caustics_reflective', False),
|
||||
"caustics_refractive": getattr(cycles, 'caustics_refractive', False),
|
||||
"blur_glossy": getattr(cycles, 'blur_glossy', 0.0),
|
||||
"max_bounces": getattr(cycles, 'max_bounces', 12),
|
||||
"diffuse_bounces": getattr(cycles, 'diffuse_bounces', 4),
|
||||
"glossy_bounces": getattr(cycles, 'glossy_bounces', 4),
|
||||
"transmission_bounces": getattr(cycles, 'transmission_bounces', 12),
|
||||
"volume_bounces": getattr(cycles, 'volume_bounces', 0),
|
||||
"transparent_max_bounces": getattr(cycles, 'transparent_max_bounces', 8),
|
||||
"film_transparent": getattr(cycles, 'film_transparent', False),
|
||||
"use_layer_samples": getattr(cycles, 'use_layer_samples', False),
|
||||
}
|
||||
elif engine == 'EEVEE' or engine == 'EEVEE_NEXT':
|
||||
eevee = scene.eevee
|
||||
engine_settings = {
|
||||
"taa_render_samples": getattr(eevee, 'taa_render_samples', 64),
|
||||
"use_bloom": getattr(eevee, 'use_bloom', False),
|
||||
"bloom_threshold": getattr(eevee, 'bloom_threshold', 0.8),
|
||||
"bloom_intensity": getattr(eevee, 'bloom_intensity', 0.05),
|
||||
"bloom_radius": getattr(eevee, 'bloom_radius', 6.5),
|
||||
"use_ssr": getattr(eevee, 'use_ssr', True),
|
||||
"use_ssr_refraction": getattr(eevee, 'use_ssr_refraction', False),
|
||||
"ssr_quality": getattr(eevee, 'ssr_quality', 'MEDIUM'),
|
||||
"use_ssao": getattr(eevee, 'use_ssao', True),
|
||||
"ssao_quality": getattr(eevee, 'ssao_quality', 'MEDIUM'),
|
||||
"ssao_distance": getattr(eevee, 'ssao_distance', 0.2),
|
||||
"ssao_factor": getattr(eevee, 'ssao_factor', 1.0),
|
||||
"use_soft_shadows": getattr(eevee, 'use_soft_shadows', True),
|
||||
"use_shadow_high_bitdepth": getattr(eevee, 'use_shadow_high_bitdepth', True),
|
||||
"use_volumetric": getattr(eevee, 'use_volumetric', False),
|
||||
"volumetric_tile_size": getattr(eevee, 'volumetric_tile_size', '8'),
|
||||
"volumetric_samples": getattr(eevee, 'volumetric_samples', 64),
|
||||
"volumetric_start": getattr(eevee, 'volumetric_start', 0.0),
|
||||
"volumetric_end": getattr(eevee, 'volumetric_end', 100.0),
|
||||
"use_volumetric_lights": getattr(eevee, 'use_volumetric_lights', True),
|
||||
"use_volumetric_shadows": getattr(eevee, 'use_volumetric_shadows', True),
|
||||
"use_gtao": getattr(eevee, 'use_gtao', False),
|
||||
"gtao_quality": getattr(eevee, 'gtao_quality', 'MEDIUM'),
|
||||
"use_overscan": getattr(eevee, 'use_overscan', False),
|
||||
}
|
||||
else:
|
||||
# For other engines, extract basic samples if available
|
||||
engine_settings = {
|
||||
"samples": getattr(scene, 'samples', 128) if hasattr(scene, 'samples') else 128
|
||||
}
|
||||
|
||||
# Extract scene info
|
||||
camera_count = len([obj for obj in scene.objects if obj.type == 'CAMERA'])
|
||||
object_count = len(scene.objects)
|
||||
material_count = len(bpy.data.materials)
|
||||
|
||||
# Build metadata dictionary
|
||||
metadata = {
|
||||
"frame_start": frame_start,
|
||||
"frame_end": frame_end,
|
||||
"render_settings": {
|
||||
"resolution_x": resolution_x,
|
||||
"resolution_y": resolution_y,
|
||||
"output_format": output_format,
|
||||
"engine": engine.lower(),
|
||||
"engine_settings": engine_settings
|
||||
},
|
||||
"scene_info": {
|
||||
"camera_count": camera_count,
|
||||
"object_count": object_count,
|
||||
"material_count": material_count
|
||||
},
|
||||
"missing_files_info": missing_files_info
|
||||
}
|
||||
|
||||
# Output as JSON
|
||||
print(json.dumps(metadata))
|
||||
sys.stdout.flush()
|
||||
`
|
||||
|
||||
if err := os.WriteFile(scriptPath, []byte(scriptContent), 0644); err != nil {
|
||||
if err := os.WriteFile(scriptPath, []byte(scripts.ExtractMetadata), 0644); err != nil {
|
||||
return nil, fmt.Errorf("failed to create extraction script: %w", err)
|
||||
}
|
||||
|
||||
// Make blend file path relative to tmpDir to avoid path resolution issues
|
||||
blendFileRel, err := filepath.Rel(tmpDir, blendFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get relative path for blend file: %w", err)
|
||||
}
|
||||
|
||||
// Execute Blender with Python script
|
||||
cmd := exec.Command("blender", "-b", blendFile, "--python", scriptPath)
|
||||
cmd := exec.Command("blender", "-b", blendFileRel, "--python", "extract_metadata.py")
|
||||
cmd.Dir = tmpDir
|
||||
|
||||
// Capture stdout and stderr
|
||||
@@ -443,14 +280,16 @@ sys.stdout.flush()
|
||||
}
|
||||
}()
|
||||
|
||||
// Stream stderr (discard for now, but could log if needed)
|
||||
// Capture stderr for error reporting
|
||||
var stderrBuffer bytes.Buffer
|
||||
stderrDone := make(chan bool)
|
||||
go func() {
|
||||
defer close(stderrDone)
|
||||
scanner := bufio.NewScanner(stderrPipe)
|
||||
for scanner.Scan() {
|
||||
// Could log stderr if needed
|
||||
_ = scanner.Text()
|
||||
line := scanner.Text()
|
||||
stderrBuffer.WriteString(line)
|
||||
stderrBuffer.WriteString("\n")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -462,6 +301,18 @@ sys.stdout.flush()
|
||||
<-stderrDone
|
||||
|
||||
if err != nil {
|
||||
stderrOutput := strings.TrimSpace(stderrBuffer.String())
|
||||
stdoutOutput := strings.TrimSpace(stdoutBuffer.String())
|
||||
log.Printf("Blender metadata extraction failed for job %d:", jobID)
|
||||
if stderrOutput != "" {
|
||||
log.Printf("Blender stderr: %s", stderrOutput)
|
||||
}
|
||||
if stdoutOutput != "" {
|
||||
log.Printf("Blender stdout (last 500 chars): %s", truncateString(stdoutOutput, 500))
|
||||
}
|
||||
if stderrOutput != "" {
|
||||
return nil, fmt.Errorf("blender metadata extraction failed: %w (stderr: %s)", err, truncateString(stderrOutput, 200))
|
||||
}
|
||||
return nil, fmt.Errorf("blender metadata extraction failed: %w", err)
|
||||
}
|
||||
|
||||
@@ -484,21 +335,25 @@ sys.stdout.flush()
|
||||
return &metadata, nil
|
||||
}
|
||||
|
||||
// extractTarGz extracts a tar.gz archive to a destination directory
|
||||
func (s *Server) extractTarGz(tarGzPath, destDir string) error {
|
||||
file, err := os.Open(tarGzPath)
|
||||
// extractTar extracts a tar archive to a destination directory
|
||||
func (s *Server) extractTar(tarPath, destDir string) error {
|
||||
log.Printf("Extracting tar archive: %s -> %s", tarPath, destDir)
|
||||
|
||||
// Ensure destination directory exists
|
||||
if err := os.MkdirAll(destDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create destination directory: %w", err)
|
||||
}
|
||||
|
||||
file, err := os.Open(tarPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open archive: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create gzip reader: %w", err)
|
||||
}
|
||||
defer gzr.Close()
|
||||
tr := tar.NewReader(file)
|
||||
|
||||
tr := tar.NewReader(gzr)
|
||||
fileCount := 0
|
||||
dirCount := 0
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
@@ -511,9 +366,13 @@ func (s *Server) extractTarGz(tarGzPath, destDir string) error {
|
||||
|
||||
// Sanitize path to prevent directory traversal
|
||||
target := filepath.Join(destDir, header.Name)
|
||||
|
||||
// Ensure target is within destDir
|
||||
if !strings.HasPrefix(filepath.Clean(target), filepath.Clean(destDir)+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("invalid file path in archive: %s", header.Name)
|
||||
cleanTarget := filepath.Clean(target)
|
||||
cleanDestDir := filepath.Clean(destDir)
|
||||
if !strings.HasPrefix(cleanTarget, cleanDestDir+string(os.PathSeparator)) && cleanTarget != cleanDestDir {
|
||||
log.Printf("ERROR: Invalid file path in TAR - target: %s, destDir: %s", cleanTarget, cleanDestDir)
|
||||
return fmt.Errorf("invalid file path in archive: %s (target: %s, destDir: %s)", header.Name, cleanTarget, cleanDestDir)
|
||||
}
|
||||
|
||||
// Create parent directories
|
||||
@@ -527,14 +386,18 @@ func (s *Server) extractTarGz(tarGzPath, destDir string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create file: %w", err)
|
||||
}
|
||||
if _, err := io.Copy(outFile, tr); err != nil {
|
||||
_, err = io.Copy(outFile, tr)
|
||||
if err != nil {
|
||||
outFile.Close()
|
||||
return fmt.Errorf("failed to write file: %w", err)
|
||||
}
|
||||
outFile.Close()
|
||||
fileCount++
|
||||
} else if header.Typeflag == tar.TypeDir {
|
||||
dirCount++
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Extraction complete: %d files, %d directories extracted to %s", fileCount, dirCount, destDir)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
|
||||
"jiggablend/pkg/types"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
@@ -287,13 +289,27 @@ func (s *Server) handleUpdateTaskStep(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// Get job ID for broadcasting
|
||||
var jobID int64
|
||||
err = s.db.QueryRow("SELECT job_id FROM tasks WHERE id = ?", taskID).Scan(&jobID)
|
||||
if err == nil {
|
||||
// Broadcast step update to frontend
|
||||
s.broadcastTaskUpdate(jobID, taskID, "step_update", map[string]interface{}{
|
||||
"step_id": stepID,
|
||||
"step_name": req.StepName,
|
||||
"status": req.Status,
|
||||
"duration_ms": req.DurationMs,
|
||||
"error_message": req.ErrorMessage,
|
||||
})
|
||||
}
|
||||
|
||||
s.respondJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"step_id": stepID,
|
||||
"message": "Step updated successfully",
|
||||
})
|
||||
}
|
||||
|
||||
// handleDownloadJobContext allows runners to download the job context tar.gz
|
||||
// handleDownloadJobContext allows runners to download the job context tar
|
||||
func (s *Server) handleDownloadJobContext(w http.ResponseWriter, r *http.Request) {
|
||||
jobID, err := parseID(r, "jobId")
|
||||
if err != nil {
|
||||
@@ -302,7 +318,7 @@ func (s *Server) handleDownloadJobContext(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
// Construct the context file path
|
||||
contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar.gz")
|
||||
contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar")
|
||||
|
||||
// Check if context file exists
|
||||
if !s.storage.FileExists(contextPath) {
|
||||
@@ -319,9 +335,9 @@ func (s *Server) handleDownloadJobContext(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Set appropriate headers for tar.gz file
|
||||
w.Header().Set("Content-Type", "application/gzip")
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=context.tar.gz")
|
||||
// Set appropriate headers for tar file
|
||||
w.Header().Set("Content-Type", "application/x-tar")
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=context.tar")
|
||||
|
||||
// Stream the file to the response
|
||||
io.Copy(w, file)
|
||||
@@ -356,16 +372,26 @@ func (s *Server) handleUploadFileFromRunner(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
// Record in database
|
||||
_, err = s.db.Exec(
|
||||
var fileID int64
|
||||
err = s.db.QueryRow(
|
||||
`INSERT INTO job_files (job_id, file_type, file_path, file_name, file_size)
|
||||
VALUES (?, ?, ?, ?, ?)`,
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
RETURNING id`,
|
||||
jobID, types.JobFileTypeOutput, filePath, header.Filename, header.Size,
|
||||
)
|
||||
).Scan(&fileID)
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to record file: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Broadcast file addition
|
||||
s.broadcastJobUpdate(jobID, "file_added", map[string]interface{}{
|
||||
"file_id": fileID,
|
||||
"file_type": types.JobFileTypeOutput,
|
||||
"file_name": header.Filename,
|
||||
"file_size": header.Size,
|
||||
})
|
||||
|
||||
s.respondJSON(w, http.StatusCreated, map[string]interface{}{
|
||||
"file_path": filePath,
|
||||
"file_name": header.Filename,
|
||||
@@ -510,6 +536,79 @@ func (s *Server) handleGetJobMetadataForRunner(w http.ResponseWriter, r *http.Re
|
||||
s.respondJSON(w, http.StatusOK, metadata)
|
||||
}
|
||||
|
||||
// handleDownloadFileForRunner allows runners to download a file by fileName
|
||||
func (s *Server) handleDownloadFileForRunner(w http.ResponseWriter, r *http.Request) {
|
||||
jobID, err := parseID(r, "jobId")
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Get fileName from URL path (may need URL decoding)
|
||||
fileName := chi.URLParam(r, "fileName")
|
||||
if fileName == "" {
|
||||
s.respondError(w, http.StatusBadRequest, "fileName is required")
|
||||
return
|
||||
}
|
||||
|
||||
// URL decode the fileName in case it contains encoded characters
|
||||
decodedFileName, err := url.QueryUnescape(fileName)
|
||||
if err != nil {
|
||||
// If decoding fails, use original fileName
|
||||
decodedFileName = fileName
|
||||
}
|
||||
|
||||
// Get file info from database
|
||||
var filePath string
|
||||
err = s.db.QueryRow(
|
||||
`SELECT file_path FROM job_files WHERE job_id = ? AND file_name = ?`,
|
||||
jobID, decodedFileName,
|
||||
).Scan(&filePath)
|
||||
if err == sql.ErrNoRows {
|
||||
s.respondError(w, http.StatusNotFound, "File not found")
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query file: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Open file
|
||||
file, err := s.storage.GetFile(filePath)
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusNotFound, "File not found on disk")
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Determine content type based on file extension
|
||||
contentType := "application/octet-stream"
|
||||
fileNameLower := strings.ToLower(decodedFileName)
|
||||
switch {
|
||||
case strings.HasSuffix(fileNameLower, ".png"):
|
||||
contentType = "image/png"
|
||||
case strings.HasSuffix(fileNameLower, ".jpg") || strings.HasSuffix(fileNameLower, ".jpeg"):
|
||||
contentType = "image/jpeg"
|
||||
case strings.HasSuffix(fileNameLower, ".gif"):
|
||||
contentType = "image/gif"
|
||||
case strings.HasSuffix(fileNameLower, ".webp"):
|
||||
contentType = "image/webp"
|
||||
case strings.HasSuffix(fileNameLower, ".exr") || strings.HasSuffix(fileNameLower, ".EXR"):
|
||||
contentType = "image/x-exr"
|
||||
case strings.HasSuffix(fileNameLower, ".mp4"):
|
||||
contentType = "video/mp4"
|
||||
case strings.HasSuffix(fileNameLower, ".webm"):
|
||||
contentType = "video/webm"
|
||||
}
|
||||
|
||||
// Set headers
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", decodedFileName))
|
||||
|
||||
// Stream file
|
||||
io.Copy(w, file)
|
||||
}
|
||||
|
||||
// WebSocket message types
|
||||
type WSMessage struct {
|
||||
Type string `json:"type"`
|
||||
@@ -785,6 +884,13 @@ func (s *Server) handleWebSocketTaskComplete(runnerID int64, taskUpdate WSTaskUp
|
||||
taskUpdate.TaskID,
|
||||
).Scan(&jobID)
|
||||
if err == nil {
|
||||
// Broadcast task update
|
||||
s.broadcastTaskUpdate(jobID, taskUpdate.TaskID, "task_update", map[string]interface{}{
|
||||
"status": status,
|
||||
"output_path": taskUpdate.OutputPath,
|
||||
"completed_at": now,
|
||||
"error": taskUpdate.Error,
|
||||
})
|
||||
s.updateJobStatusFromTasks(jobID)
|
||||
}
|
||||
}
|
||||
@@ -840,6 +946,7 @@ func (s *Server) getCurrentFrameFromLogs(jobID int64) (int, bool) {
|
||||
for rows.Next() {
|
||||
var taskID int64
|
||||
if err := rows.Scan(&taskID); err != nil {
|
||||
log.Printf("Failed to scan task ID in getCurrentFrameFromLogs: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -895,6 +1002,14 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
allowParallelRunners.Valid && !allowParallelRunners.Bool &&
|
||||
frameStart.Valid && frameEnd.Valid
|
||||
|
||||
// Get current job status to detect changes
|
||||
var currentStatus string
|
||||
err = s.db.QueryRow(`SELECT status FROM jobs WHERE id = ?`, jobID).Scan(¤tStatus)
|
||||
if err != nil {
|
||||
log.Printf("Failed to get current job status for job %d: %v", jobID, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Count total tasks and completed tasks
|
||||
var totalTasks, completedTasks int
|
||||
err = s.db.QueryRow(
|
||||
@@ -914,8 +1029,6 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("updateJobStatusFromTasks: job %d - total: %d, completed: %d", jobID, totalTasks, completedTasks)
|
||||
|
||||
// Calculate progress
|
||||
var progress float64
|
||||
if totalTasks == 0 {
|
||||
@@ -985,9 +1098,6 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
} else {
|
||||
progress = renderProgress
|
||||
}
|
||||
|
||||
log.Printf("updateJobStatusFromTasks: job %d - frame-based progress: current_frame=%d, render_progress=%.1f%%, non_render_progress=%.1f%%, total_progress=%.1f%%",
|
||||
jobID, currentFrame, renderProgress, nonRenderProgress, progress)
|
||||
} else {
|
||||
// Standard task-based progress
|
||||
progress = float64(completedTasks) / float64(totalTasks) * 100.0
|
||||
@@ -1013,8 +1123,6 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("updateJobStatusFromTasks: job %d - pending/running: %d", jobID, pendingOrRunningTasks)
|
||||
|
||||
if pendingOrRunningTasks == 0 && totalTasks > 0 {
|
||||
// All tasks are either completed or failed/cancelled
|
||||
// Check if any tasks failed
|
||||
@@ -1039,7 +1147,16 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
if err != nil {
|
||||
log.Printf("Failed to update job %d status to %s: %v", jobID, jobStatus, err)
|
||||
} else {
|
||||
log.Printf("Updated job %d status to %s (progress: %.1f%%, completed tasks: %d/%d)", jobID, jobStatus, progress, completedTasks, totalTasks)
|
||||
// Only log if status actually changed
|
||||
if currentStatus != jobStatus {
|
||||
log.Printf("Updated job %d status from %s to %s (progress: %.1f%%, completed tasks: %d/%d)", jobID, currentStatus, jobStatus, progress, completedTasks, totalTasks)
|
||||
}
|
||||
// Broadcast job update via WebSocket
|
||||
s.broadcastJobUpdate(jobID, "job_update", map[string]interface{}{
|
||||
"status": jobStatus,
|
||||
"progress": progress,
|
||||
"completed_at": now,
|
||||
})
|
||||
}
|
||||
|
||||
if outputFormatStr == "EXR_264_MP4" || outputFormatStr == "EXR_AV1_MP4" {
|
||||
@@ -1054,14 +1171,22 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
// Create a video generation task instead of calling generateMP4Video directly
|
||||
// This prevents race conditions when multiple runners complete frames simultaneously
|
||||
videoTaskTimeout := 86400 // 24 hours for video generation
|
||||
_, err := s.db.Exec(
|
||||
var videoTaskID int64
|
||||
err := s.db.QueryRow(
|
||||
`INSERT INTO tasks (job_id, frame_start, frame_end, task_type, status, timeout_seconds, max_retries)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)`,
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
RETURNING id`,
|
||||
jobID, 0, 0, types.TaskTypeVideoGeneration, types.TaskStatusPending, videoTaskTimeout, 1,
|
||||
)
|
||||
).Scan(&videoTaskID)
|
||||
if err != nil {
|
||||
log.Printf("Failed to create video generation task for job %d: %v", jobID, err)
|
||||
} else {
|
||||
// Broadcast that a new task was added
|
||||
log.Printf("Broadcasting task_added for job %d: video generation task %d", jobID, videoTaskID)
|
||||
s.broadcastTaskUpdate(jobID, videoTaskID, "task_added", map[string]interface{}{
|
||||
"task_id": videoTaskID,
|
||||
"task_type": types.TaskTypeVideoGeneration,
|
||||
})
|
||||
// Update job status to ensure it's marked as running (has pending video task)
|
||||
s.updateJobStatusFromTasks(jobID)
|
||||
// Try to distribute the task immediately
|
||||
@@ -1099,7 +1224,10 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
if err != nil {
|
||||
log.Printf("Failed to update job %d status to %s: %v", jobID, jobStatus, err)
|
||||
} else {
|
||||
log.Printf("Updated job %d status to %s (progress: %.1f%%, completed: %d/%d, pending: %d, running: %d)", jobID, jobStatus, progress, completedTasks, totalTasks, pendingOrRunningTasks-runningTasks, runningTasks)
|
||||
// Only log if status actually changed
|
||||
if currentStatus != jobStatus {
|
||||
log.Printf("Updated job %d status from %s to %s (progress: %.1f%%, completed: %d/%d, pending: %d, running: %d)", jobID, currentStatus, jobStatus, progress, completedTasks, totalTasks, pendingOrRunningTasks-runningTasks, runningTasks)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1224,7 +1352,6 @@ func (s *Server) distributeTasksToRunners() {
|
||||
t.AllowParallelRunners = true
|
||||
}
|
||||
pendingTasks = append(pendingTasks, t)
|
||||
log.Printf("Found pending task %d (type: %s, job: %d '%s', status: %s)", t.TaskID, t.TaskType, t.JobID, t.JobName, t.JobStatus)
|
||||
}
|
||||
|
||||
if len(pendingTasks) == 0 {
|
||||
@@ -1308,11 +1435,6 @@ func (s *Server) distributeTasksToRunners() {
|
||||
}
|
||||
log.Printf("Distributing %d pending tasks (%v) to %d connected runners: %v", len(pendingTasks), taskTypes, len(connectedRunners), connectedRunners)
|
||||
|
||||
// Log each pending task for debugging
|
||||
for _, task := range pendingTasks {
|
||||
log.Printf(" - Task %d (type: %s, job: %d '%s', status: %s)", task.TaskID, task.TaskType, task.JobID, task.JobName, task.JobStatus)
|
||||
}
|
||||
|
||||
// Distribute tasks to runners
|
||||
// Sort tasks to prioritize metadata tasks
|
||||
sort.Slice(pendingTasks, func(i, j int) bool {
|
||||
@@ -1572,6 +1694,13 @@ func (s *Server) distributeTasksToRunners() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Broadcast task assignment
|
||||
s.broadcastTaskUpdate(task.JobID, task.TaskID, "task_update", map[string]interface{}{
|
||||
"status": types.TaskStatusRunning,
|
||||
"runner_id": selectedRunnerID,
|
||||
"started_at": now,
|
||||
})
|
||||
|
||||
// Task was successfully assigned, send via WebSocket
|
||||
log.Printf("Assigned task %d (type: %s, job: %d) to runner %d", task.TaskID, task.TaskType, task.JobID, selectedRunnerID)
|
||||
|
||||
@@ -1642,6 +1771,8 @@ func (s *Server) assignTaskToRunner(runnerID int64, taskID int64) error {
|
||||
var filePath string
|
||||
if err := rows.Scan(&filePath); err == nil {
|
||||
task.InputFiles = append(task.InputFiles, filePath)
|
||||
} else {
|
||||
log.Printf("Failed to scan input file path for task %d: %v", taskID, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -38,6 +43,15 @@ type Server struct {
|
||||
// Mutexes for each frontend connection to serialize writes
|
||||
frontendConnsWriteMu map[string]*sync.Mutex // key: "jobId:taskId"
|
||||
frontendConnsWriteMuMu sync.RWMutex
|
||||
// Job list WebSocket connections (key: userID)
|
||||
jobListConns map[int64]*websocket.Conn
|
||||
jobListConnsMu sync.RWMutex
|
||||
// Single job WebSocket connections (key: "userId:jobId")
|
||||
jobConns map[string]*websocket.Conn
|
||||
jobConnsMu sync.RWMutex
|
||||
// Mutexes for job WebSocket connections
|
||||
jobConnsWriteMu map[string]*sync.Mutex
|
||||
jobConnsWriteMuMu sync.RWMutex
|
||||
// Throttling for progress updates (per job)
|
||||
progressUpdateTimes map[int64]time.Time // key: jobID
|
||||
progressUpdateTimesMu sync.RWMutex
|
||||
@@ -66,6 +80,9 @@ func NewServer(db *database.DB, auth *authpkg.Auth, storage *storage.Storage) (*
|
||||
runnerConns: make(map[int64]*websocket.Conn),
|
||||
frontendConns: make(map[string]*websocket.Conn),
|
||||
frontendConnsWriteMu: make(map[string]*sync.Mutex),
|
||||
jobListConns: make(map[int64]*websocket.Conn),
|
||||
jobConns: make(map[string]*websocket.Conn),
|
||||
jobConnsWriteMu: make(map[string]*sync.Mutex),
|
||||
progressUpdateTimes: make(map[int64]time.Time),
|
||||
}
|
||||
|
||||
@@ -83,16 +100,62 @@ func (s *Server) setupMiddleware() {
|
||||
// Note: Timeout middleware is NOT applied globally to avoid conflicts with WebSocket connections
|
||||
// WebSocket connections are long-lived and should not have HTTP timeouts
|
||||
|
||||
// Add gzip compression for JSON responses
|
||||
s.router.Use(gzipMiddleware)
|
||||
|
||||
s.router.Use(cors.Handler(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "Range"},
|
||||
ExposedHeaders: []string{"Link", "Content-Range", "Accept-Ranges", "Content-Length"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "Range", "If-None-Match"},
|
||||
ExposedHeaders: []string{"Link", "Content-Range", "Accept-Ranges", "Content-Length", "ETag"},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 300,
|
||||
}))
|
||||
}
|
||||
|
||||
// gzipMiddleware compresses responses with gzip if client supports it
|
||||
func gzipMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Skip compression for WebSocket upgrades
|
||||
if strings.ToLower(r.Header.Get("Upgrade")) == "websocket" {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if client accepts gzip
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Create gzip writer
|
||||
gz := gzip.NewWriter(w)
|
||||
defer gz.Close()
|
||||
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
w.Header().Set("Vary", "Accept-Encoding")
|
||||
|
||||
// Wrap response writer
|
||||
gzw := &gzipResponseWriter{Writer: gz, ResponseWriter: w}
|
||||
next.ServeHTTP(gzw, r)
|
||||
})
|
||||
}
|
||||
|
||||
// gzipResponseWriter wraps http.ResponseWriter to add gzip compression
|
||||
type gzipResponseWriter struct {
|
||||
io.Writer
|
||||
http.ResponseWriter
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
return w.Writer.Write(b)
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) WriteHeader(statusCode int) {
|
||||
// Don't set Content-Length when using gzip - it will be set automatically
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
// setupRoutes configures routes
|
||||
func (s *Server) setupRoutes() {
|
||||
// Public routes
|
||||
@@ -118,16 +181,21 @@ func (s *Server) setupRoutes() {
|
||||
r.Post("/", s.handleCreateJob)
|
||||
r.Post("/upload", s.handleUploadFileForJobCreation) // Upload before job creation
|
||||
r.Get("/", s.handleListJobs)
|
||||
r.Get("/summary", s.handleListJobsSummary)
|
||||
r.Post("/batch", s.handleBatchGetJobs)
|
||||
r.Get("/{id}", s.handleGetJob)
|
||||
r.Delete("/{id}", s.handleCancelJob)
|
||||
r.Post("/{id}/delete", s.handleDeleteJob)
|
||||
r.Post("/{id}/upload", s.handleUploadJobFile)
|
||||
r.Get("/{id}/files", s.handleListJobFiles)
|
||||
r.Get("/{id}/files/count", s.handleGetJobFilesCount)
|
||||
r.Get("/{id}/context", s.handleListContextArchive)
|
||||
r.Get("/{id}/files/{fileId}/download", s.handleDownloadJobFile)
|
||||
r.Get("/{id}/video", s.handleStreamVideo)
|
||||
r.Get("/{id}/metadata", s.handleGetJobMetadata)
|
||||
r.Get("/{id}/tasks", s.handleListJobTasks)
|
||||
r.Get("/{id}/tasks/summary", s.handleListJobTasksSummary)
|
||||
r.Post("/{id}/tasks/batch", s.handleBatchGetTasks)
|
||||
r.Get("/{id}/tasks/{taskId}/logs", s.handleGetTaskLogs)
|
||||
// WebSocket route - no timeout middleware (long-lived connection)
|
||||
r.With(func(next http.Handler) http.Handler {
|
||||
@@ -138,6 +206,19 @@ func (s *Server) setupRoutes() {
|
||||
}).Get("/{id}/tasks/{taskId}/logs/ws", s.handleStreamTaskLogsWebSocket)
|
||||
r.Get("/{id}/tasks/{taskId}/steps", s.handleGetTaskSteps)
|
||||
r.Post("/{id}/tasks/{taskId}/retry", s.handleRetryTask)
|
||||
// WebSocket routes for real-time updates
|
||||
r.With(func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Remove timeout middleware for WebSocket
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}).Get("/ws", s.handleJobsWebSocket)
|
||||
r.With(func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Remove timeout middleware for WebSocket
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}).Get("/{id}/ws", s.handleJobWebSocket)
|
||||
})
|
||||
|
||||
// Admin routes
|
||||
@@ -181,7 +262,8 @@ func (s *Server) setupRoutes() {
|
||||
})
|
||||
r.Post("/tasks/{id}/progress", s.handleUpdateTaskProgress)
|
||||
r.Post("/tasks/{id}/steps", s.handleUpdateTaskStep)
|
||||
r.Get("/jobs/{jobId}/context.tar.gz", s.handleDownloadJobContext)
|
||||
r.Get("/jobs/{jobId}/context.tar", s.handleDownloadJobContext)
|
||||
r.Get("/files/{jobId}/{fileName}", s.handleDownloadFileForRunner)
|
||||
r.Post("/files/{jobId}/upload", s.handleUploadFileFromRunner)
|
||||
r.Get("/jobs/{jobId}/status", s.handleGetJobStatusForRunner)
|
||||
r.Get("/jobs/{jobId}/files", s.handleGetJobFilesForRunner)
|
||||
@@ -311,12 +393,14 @@ func (s *Server) handleLogout(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Server) handleGetMe(w http.ResponseWriter, r *http.Request) {
|
||||
cookie, err := r.Cookie("session_id")
|
||||
if err != nil {
|
||||
log.Printf("Authentication failed: missing session cookie in /auth/me")
|
||||
s.respondError(w, http.StatusUnauthorized, "Not authenticated")
|
||||
return
|
||||
}
|
||||
|
||||
session, ok := s.auth.GetSession(cookie.Value)
|
||||
if !ok {
|
||||
log.Printf("Authentication failed: invalid session cookie in /auth/me")
|
||||
s.respondError(w, http.StatusUnauthorized, "Invalid session")
|
||||
return
|
||||
}
|
||||
@@ -410,6 +494,7 @@ func (s *Server) handleLocalLogin(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
session, err := s.auth.LocalLogin(req.Username, req.Password)
|
||||
if err != nil {
|
||||
log.Printf("Authentication failed: invalid credentials for username '%s'", req.Username)
|
||||
s.respondError(w, http.StatusUnauthorized, "Invalid credentials")
|
||||
return
|
||||
}
|
||||
@@ -512,6 +597,7 @@ func parseID(r *http.Request, param string) (int64, error) {
|
||||
func (s *Server) StartBackgroundTasks() {
|
||||
go s.recoverStuckTasks()
|
||||
go s.cleanupOldRenderJobs()
|
||||
go s.cleanupOldTempDirectories()
|
||||
}
|
||||
|
||||
// recoverStuckTasks periodically checks for dead runners and stuck tasks
|
||||
@@ -621,6 +707,7 @@ func (s *Server) recoverTaskTimeouts() {
|
||||
|
||||
err := rows.Scan(&taskID, &runnerID, &retryCount, &maxRetries, &timeoutSeconds, &startedAt)
|
||||
if err != nil {
|
||||
log.Printf("Failed to scan task row in recoverTaskTimeouts: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -659,3 +746,72 @@ func (s *Server) recoverTaskTimeouts() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupOldTempDirectories periodically cleans up old temporary directories
|
||||
func (s *Server) cleanupOldTempDirectories() {
|
||||
// Run cleanup every hour
|
||||
ticker := time.NewTicker(1 * time.Hour)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Run once immediately on startup
|
||||
s.cleanupOldTempDirectoriesOnce()
|
||||
|
||||
for range ticker.C {
|
||||
s.cleanupOldTempDirectoriesOnce()
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupOldTempDirectoriesOnce removes temp directories older than 1 hour
|
||||
func (s *Server) cleanupOldTempDirectoriesOnce() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("Panic in cleanupOldTempDirectories: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
tempPath := filepath.Join(s.storage.BasePath(), "temp")
|
||||
|
||||
// Check if temp directory exists
|
||||
if _, err := os.Stat(tempPath); os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Read all entries in temp directory
|
||||
entries, err := os.ReadDir(tempPath)
|
||||
if err != nil {
|
||||
log.Printf("Failed to read temp directory: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
cleanedCount := 0
|
||||
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
entryPath := filepath.Join(tempPath, entry.Name())
|
||||
|
||||
// Get directory info to check modification time
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove directories older than 1 hour
|
||||
age := now.Sub(info.ModTime())
|
||||
if age > 1*time.Hour {
|
||||
if err := os.RemoveAll(entryPath); err != nil {
|
||||
log.Printf("Warning: Failed to clean up old temp directory %s: %v", entryPath, err)
|
||||
} else {
|
||||
cleanedCount++
|
||||
log.Printf("Cleaned up old temp directory: %s (age: %v)", entryPath, age)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cleanedCount > 0 {
|
||||
log.Printf("Cleaned up %d old temp directories", cleanedCount)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -410,6 +410,7 @@ func (a *Auth) Middleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
cookie, err := r.Cookie("session_id")
|
||||
if err != nil {
|
||||
log.Printf("Authentication failed: missing session cookie for %s %s", r.Method, r.URL.Path)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
json.NewEncoder(w).Encode(map[string]string{"error": "Unauthorized"})
|
||||
@@ -418,6 +419,7 @@ func (a *Auth) Middleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
|
||||
session, ok := a.GetSession(cookie.Value)
|
||||
if !ok {
|
||||
log.Printf("Authentication failed: invalid session cookie for %s %s", r.Method, r.URL.Path)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
json.NewEncoder(w).Encode(map[string]string{"error": "Unauthorized"})
|
||||
@@ -451,6 +453,7 @@ func (a *Auth) AdminMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
// First check authentication
|
||||
cookie, err := r.Cookie("session_id")
|
||||
if err != nil {
|
||||
log.Printf("Admin authentication failed: missing session cookie for %s %s", r.Method, r.URL.Path)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
json.NewEncoder(w).Encode(map[string]string{"error": "Unauthorized"})
|
||||
@@ -459,6 +462,7 @@ func (a *Auth) AdminMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
|
||||
session, ok := a.GetSession(cookie.Value)
|
||||
if !ok {
|
||||
log.Printf("Admin authentication failed: invalid session cookie for %s %s", r.Method, r.URL.Path)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
json.NewEncoder(w).Encode(map[string]string{"error": "Unauthorized"})
|
||||
@@ -467,6 +471,7 @@ func (a *Auth) AdminMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
|
||||
// Then check admin status
|
||||
if !session.IsAdmin {
|
||||
log.Printf("Admin access denied: user %d (email: %s) attempted to access admin endpoint %s %s", session.UserID, session.Email, r.Method, r.URL.Path)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
json.NewEncoder(w).Encode(map[string]string{"error": "Forbidden: Admin access required"})
|
||||
|
||||
@@ -165,14 +165,17 @@ func (db *DB) migrate() error {
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_user_id ON jobs(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_user_status_created ON jobs(user_id, status, created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_job_id ON tasks(job_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_runner_id ON tasks(runner_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_status ON tasks(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_job_status ON tasks(job_id, status);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_started_at ON tasks(started_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_job_files_job_id ON job_files(job_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_registration_tokens_token ON registration_tokens(token);
|
||||
CREATE INDEX IF NOT EXISTS idx_registration_tokens_expires_at ON registration_tokens(expires_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_task_logs_task_id_created_at ON task_logs(task_id, created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_task_logs_task_id_id ON task_logs(task_id, id DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_task_logs_runner_id ON task_logs(runner_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_task_steps_task_id ON task_steps(task_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_runners_last_heartbeat ON runners(last_heartbeat);
|
||||
@@ -213,6 +216,9 @@ func (db *DB) migrate() error {
|
||||
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS retry_count INTEGER DEFAULT 0`,
|
||||
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS max_retries INTEGER DEFAULT 3`,
|
||||
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS timeout_seconds INTEGER`,
|
||||
// Add updated_at columns for ETag support
|
||||
`ALTER TABLE jobs ADD COLUMN IF NOT EXISTS updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP`,
|
||||
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP`,
|
||||
// Migrate file_size from INTEGER to BIGINT to support large files (>2GB)
|
||||
// DuckDB doesn't support direct ALTER COLUMN TYPE, so we use a workaround:
|
||||
// 1. Add new column as BIGINT
|
||||
|
||||
127
internal/logger/logger.go
Normal file
127
internal/logger/logger.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultLogger *Logger
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
// Logger wraps the standard log.Logger with file and stdout output
|
||||
type Logger struct {
|
||||
*log.Logger
|
||||
fileWriter io.WriteCloser
|
||||
}
|
||||
|
||||
// Init initializes the default logger with both file and stdout output
|
||||
func Init(logDir, logFileName string, maxSizeMB int, maxBackups int, maxAgeDays int) error {
|
||||
var err error
|
||||
once.Do(func() {
|
||||
defaultLogger, err = New(logDir, logFileName, maxSizeMB, maxBackups, maxAgeDays)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Replace standard log output with the multi-writer
|
||||
multiWriter := io.MultiWriter(os.Stdout, defaultLogger.fileWriter)
|
||||
log.SetOutput(multiWriter)
|
||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// New creates a new logger that writes to both stdout and a log file
|
||||
func New(logDir, logFileName string, maxSizeMB int, maxBackups int, maxAgeDays int) (*Logger, error) {
|
||||
// Ensure log directory exists
|
||||
if err := os.MkdirAll(logDir, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logPath := filepath.Join(logDir, logFileName)
|
||||
|
||||
// Create file writer with rotation
|
||||
fileWriter := &lumberjack.Logger{
|
||||
Filename: logPath,
|
||||
MaxSize: maxSizeMB, // megabytes
|
||||
MaxBackups: maxBackups, // number of backup files
|
||||
MaxAge: maxAgeDays, // days
|
||||
Compress: true, // compress old log files
|
||||
}
|
||||
|
||||
// Create multi-writer that writes to both stdout and file
|
||||
multiWriter := io.MultiWriter(os.Stdout, fileWriter)
|
||||
|
||||
// Create logger with standard flags
|
||||
logger := log.New(multiWriter, "", log.LstdFlags|log.Lshortfile)
|
||||
|
||||
return &Logger{
|
||||
Logger: logger,
|
||||
fileWriter: fileWriter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the file writer
|
||||
func (l *Logger) Close() error {
|
||||
if l.fileWriter != nil {
|
||||
return l.fileWriter.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDefault returns the default logger instance
|
||||
func GetDefault() *Logger {
|
||||
return defaultLogger
|
||||
}
|
||||
|
||||
// Printf logs a formatted message
|
||||
func Printf(format string, v ...interface{}) {
|
||||
if defaultLogger != nil {
|
||||
defaultLogger.Printf(format, v...)
|
||||
} else {
|
||||
log.Printf(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Print logs a message
|
||||
func Print(v ...interface{}) {
|
||||
if defaultLogger != nil {
|
||||
defaultLogger.Print(v...)
|
||||
} else {
|
||||
log.Print(v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Println logs a message with newline
|
||||
func Println(v ...interface{}) {
|
||||
if defaultLogger != nil {
|
||||
defaultLogger.Println(v...)
|
||||
} else {
|
||||
log.Println(v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Fatal logs a message and exits
|
||||
func Fatal(v ...interface{}) {
|
||||
if defaultLogger != nil {
|
||||
defaultLogger.Fatal(v...)
|
||||
} else {
|
||||
log.Fatal(v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Fatalf logs a formatted message and exits
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
if defaultLogger != nil {
|
||||
defaultLogger.Fatalf(format, v...)
|
||||
} else {
|
||||
log.Fatalf(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"jiggablend/pkg/scripts"
|
||||
"jiggablend/pkg/types"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
@@ -53,18 +54,20 @@ type Client struct {
|
||||
vaapiDevicesMu sync.RWMutex // Protects vaapiDevices
|
||||
allocatedDevices map[int64]string // map[taskID]device - tracks which device is allocated to which task
|
||||
allocatedDevicesMu sync.RWMutex // Protects allocatedDevices
|
||||
longRunningClient *http.Client // HTTP client for long-running operations (no timeout)
|
||||
}
|
||||
|
||||
// NewClient creates a new runner client
|
||||
func NewClient(managerURL, name, hostname, ipAddress string) *Client {
|
||||
return &Client{
|
||||
managerURL: managerURL,
|
||||
name: name,
|
||||
hostname: hostname,
|
||||
ipAddress: ipAddress,
|
||||
httpClient: &http.Client{Timeout: 30 * time.Second},
|
||||
stopChan: make(chan struct{}),
|
||||
stepStartTimes: make(map[string]time.Time),
|
||||
managerURL: managerURL,
|
||||
name: name,
|
||||
hostname: hostname,
|
||||
ipAddress: ipAddress,
|
||||
httpClient: &http.Client{Timeout: 30 * time.Second},
|
||||
longRunningClient: &http.Client{Timeout: 0}, // No timeout for long-running operations (context downloads, file uploads/downloads)
|
||||
stopChan: make(chan struct{}),
|
||||
stepStartTimes: make(map[string]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -465,6 +468,17 @@ func (c *Client) Register(registrationToken string) (int64, string, string, erro
|
||||
// doSignedRequest performs an authenticated HTTP request using shared secret
|
||||
// queryParams is optional and will be appended to the URL
|
||||
func (c *Client) doSignedRequest(method, path string, body []byte, queryParams ...string) (*http.Response, error) {
|
||||
return c.doSignedRequestWithClient(method, path, body, c.httpClient, queryParams...)
|
||||
}
|
||||
|
||||
// doSignedRequestLong performs an authenticated HTTP request using the long-running client (no timeout)
|
||||
// Use this for context downloads, file uploads/downloads, and other operations that may take a long time
|
||||
func (c *Client) doSignedRequestLong(method, path string, body []byte, queryParams ...string) (*http.Response, error) {
|
||||
return c.doSignedRequestWithClient(method, path, body, c.longRunningClient, queryParams...)
|
||||
}
|
||||
|
||||
// doSignedRequestWithClient performs an authenticated HTTP request using the specified client
|
||||
func (c *Client) doSignedRequestWithClient(method, path string, body []byte, client *http.Client, queryParams ...string) (*http.Response, error) {
|
||||
if c.runnerSecret == "" {
|
||||
return nil, fmt.Errorf("runner not authenticated")
|
||||
}
|
||||
@@ -483,7 +497,7 @@ func (c *Client) doSignedRequest(method, path string, body []byte, queryParams .
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("X-Runner-Secret", c.runnerSecret)
|
||||
|
||||
return c.httpClient.Do(req)
|
||||
return client.Do(req)
|
||||
}
|
||||
|
||||
// ConnectWebSocket establishes a WebSocket connection to the manager
|
||||
@@ -969,16 +983,16 @@ func (c *Client) processTask(task map[string]interface{}, jobName string, output
|
||||
// Clean up expired cache entries periodically
|
||||
c.cleanupExpiredContextCache()
|
||||
|
||||
// Download context tar.gz
|
||||
contextPath := filepath.Join(workDir, "context.tar.gz")
|
||||
// Download context tar
|
||||
contextPath := filepath.Join(workDir, "context.tar")
|
||||
if err := c.downloadJobContext(jobID, contextPath); err != nil {
|
||||
c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error())
|
||||
return fmt.Errorf("failed to download context: %w", err)
|
||||
}
|
||||
|
||||
// Extract context tar.gz
|
||||
// Extract context tar
|
||||
c.sendLog(taskID, types.LogLevelInfo, "Extracting context...", "download")
|
||||
if err := c.extractTarGz(contextPath, workDir); err != nil {
|
||||
if err := c.extractTar(contextPath, workDir); err != nil {
|
||||
c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error())
|
||||
return fmt.Errorf("failed to extract context: %w", err)
|
||||
}
|
||||
@@ -1077,662 +1091,24 @@ func (c *Client) processTask(task map[string]interface{}, jobName string, output
|
||||
// This script will override the blend file's settings based on job metadata
|
||||
formatFilePath := filepath.Join(workDir, "output_format.txt")
|
||||
renderSettingsFilePath := filepath.Join(workDir, "render_settings.json")
|
||||
scriptContent := fmt.Sprintf(`import bpy
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
|
||||
# Make all file paths relative to the blend file location FIRST
|
||||
# This must be done immediately after file load, before any other operations
|
||||
# to prevent Blender from trying to access external files with absolute paths
|
||||
try:
|
||||
bpy.ops.file.make_paths_relative()
|
||||
print("Made all file paths relative to blend file")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not make paths relative: {e}")
|
||||
// Check if unhide_objects is enabled
|
||||
unhideObjects := false
|
||||
if jobMetadata != nil && jobMetadata.UnhideObjects != nil && *jobMetadata.UnhideObjects {
|
||||
unhideObjects = true
|
||||
}
|
||||
|
||||
# Check for missing addons that the blend file requires
|
||||
# Blender marks missing addons with "_missing" suffix in preferences
|
||||
missing = []
|
||||
try:
|
||||
for mod in bpy.context.preferences.addons:
|
||||
if mod.module.endswith("_missing"):
|
||||
missing.append(mod.module.rsplit("_", 1)[0])
|
||||
// Build unhide code conditionally from embedded script
|
||||
unhideCode := ""
|
||||
if unhideObjects {
|
||||
unhideCode = scripts.UnhideObjects
|
||||
}
|
||||
|
||||
if missing:
|
||||
print("Missing add-ons required by this .blend:")
|
||||
for name in missing:
|
||||
print(" -", name)
|
||||
else:
|
||||
print("No missing add-ons detected – file is headless-safe")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not check for missing addons: {e}")
|
||||
|
||||
# Fix objects and collections hidden from render
|
||||
vl = bpy.context.view_layer
|
||||
|
||||
# 1. Objects hidden in view layer
|
||||
print("Checking for objects hidden from render that need to be enabled...")
|
||||
try:
|
||||
for obj in bpy.data.objects:
|
||||
if obj.hide_get(view_layer=vl):
|
||||
if any(k in obj.name.lower() for k in ["scrotum|","cage","genital","penis","dick","collision","body.001","couch"]):
|
||||
obj.hide_set(False, view_layer=vl)
|
||||
print("Enabled object:", obj.name)
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not check/fix hidden render objects: {e}")
|
||||
|
||||
# 2. Collections disabled in renders OR set to Holdout (the final killer)
|
||||
print("Checking for collections hidden from render that need to be enabled...")
|
||||
try:
|
||||
for col in bpy.data.collections:
|
||||
if col.hide_render or (vl.layer_collection.children.get(col.name) and not vl.layer_collection.children[col.name].exclude == False):
|
||||
if any(k in col.name.lower() for k in ["genital","nsfw","dick","private","hidden","cage","scrotum","collision","dick"]):
|
||||
col.hide_render = False
|
||||
if col.name in vl.layer_collection.children:
|
||||
vl.layer_collection.children[col.name].exclude = False
|
||||
vl.layer_collection.children[col.name].holdout = False
|
||||
vl.layer_collection.children[col.name].indirect_only = False
|
||||
print("Enabled collection:", col.name)
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not check/fix hidden render collections: {e}")
|
||||
|
||||
# Read output format from file (created by Go code)
|
||||
format_file_path = %q
|
||||
output_format_override = None
|
||||
if os.path.exists(format_file_path):
|
||||
try:
|
||||
with open(format_file_path, 'r') as f:
|
||||
output_format_override = f.read().strip().upper()
|
||||
print(f"Read output format from file: '{output_format_override}'")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not read output format file: {e}")
|
||||
else:
|
||||
print(f"Warning: Output format file does not exist: {format_file_path}")
|
||||
|
||||
# Read render settings from JSON file (created by Go code)
|
||||
render_settings_file = %q
|
||||
render_settings_override = None
|
||||
if os.path.exists(render_settings_file):
|
||||
try:
|
||||
with open(render_settings_file, 'r') as f:
|
||||
render_settings_override = json.load(f)
|
||||
print(f"Loaded render settings from job metadata")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not read render settings file: {e}")
|
||||
`, formatFilePath, renderSettingsFilePath) + `
|
||||
|
||||
# Get current scene settings (preserve blend file preferences)
|
||||
scene = bpy.context.scene
|
||||
current_engine = scene.render.engine
|
||||
current_device = scene.cycles.device if hasattr(scene, 'cycles') and scene.cycles else None
|
||||
current_output_format = scene.render.image_settings.file_format
|
||||
|
||||
print(f"Blend file render engine: {current_engine}")
|
||||
if current_device:
|
||||
print(f"Blend file device setting: {current_device}")
|
||||
print(f"Blend file output format: {current_output_format}")
|
||||
|
||||
# Override output format if specified
|
||||
# The format file always takes precedence (it's written specifically for this job)
|
||||
if output_format_override:
|
||||
print(f"Overriding output format from '{current_output_format}' to '{output_format_override}'")
|
||||
# Map common format names to Blender's format constants
|
||||
# For video formats (EXR_264_MP4, EXR_AV1_MP4), we render as EXR frames first
|
||||
format_to_use = output_format_override.upper()
|
||||
if format_to_use in ['EXR_264_MP4', 'EXR_AV1_MP4']:
|
||||
format_to_use = 'EXR' # Render as EXR for video formats
|
||||
|
||||
format_map = {
|
||||
'PNG': 'PNG',
|
||||
'JPEG': 'JPEG',
|
||||
'JPG': 'JPEG',
|
||||
'EXR': 'OPEN_EXR',
|
||||
'OPEN_EXR': 'OPEN_EXR',
|
||||
'TARGA': 'TARGA',
|
||||
'TIFF': 'TIFF',
|
||||
'BMP': 'BMP',
|
||||
}
|
||||
blender_format = format_map.get(format_to_use, format_to_use)
|
||||
try:
|
||||
scene.render.image_settings.file_format = blender_format
|
||||
print(f"Successfully set output format to: {blender_format}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not set output format to {blender_format}: {e}")
|
||||
print(f"Using blend file's format: {current_output_format}")
|
||||
else:
|
||||
print(f"Using blend file's output format: {current_output_format}")
|
||||
|
||||
# Apply render settings from job metadata if provided
|
||||
# Note: output_format is NOT applied from render_settings_override - it's already set from format file above
|
||||
if render_settings_override:
|
||||
engine_override = render_settings_override.get('engine', '').upper()
|
||||
engine_settings = render_settings_override.get('engine_settings', {})
|
||||
|
||||
# Switch engine if specified
|
||||
if engine_override and engine_override != current_engine.upper():
|
||||
print(f"Switching render engine from '{current_engine}' to '{engine_override}'")
|
||||
try:
|
||||
scene.render.engine = engine_override
|
||||
current_engine = engine_override
|
||||
print(f"Successfully switched to {engine_override} engine")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not switch engine to {engine_override}: {e}")
|
||||
print(f"Using blend file's engine: {current_engine}")
|
||||
|
||||
# Apply engine-specific settings
|
||||
if engine_settings:
|
||||
if current_engine.upper() == 'CYCLES':
|
||||
cycles = scene.cycles
|
||||
print("Applying Cycles render settings from job metadata...")
|
||||
for key, value in engine_settings.items():
|
||||
try:
|
||||
if hasattr(cycles, key):
|
||||
setattr(cycles, key, value)
|
||||
print(f" Set Cycles.{key} = {value}")
|
||||
else:
|
||||
print(f" Warning: Cycles has no attribute '{key}'")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not set Cycles.{key} = {value}: {e}")
|
||||
elif current_engine.upper() in ['EEVEE', 'EEVEE_NEXT']:
|
||||
eevee = scene.eevee
|
||||
print("Applying EEVEE render settings from job metadata...")
|
||||
for key, value in engine_settings.items():
|
||||
try:
|
||||
if hasattr(eevee, key):
|
||||
setattr(eevee, key, value)
|
||||
print(f" Set EEVEE.{key} = {value}")
|
||||
else:
|
||||
print(f" Warning: EEVEE has no attribute '{key}'")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not set EEVEE.{key} = {value}: {e}")
|
||||
|
||||
# Apply resolution if specified
|
||||
if 'resolution_x' in render_settings_override:
|
||||
try:
|
||||
scene.render.resolution_x = render_settings_override['resolution_x']
|
||||
print(f"Set resolution_x = {render_settings_override['resolution_x']}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not set resolution_x: {e}")
|
||||
if 'resolution_y' in render_settings_override:
|
||||
try:
|
||||
scene.render.resolution_y = render_settings_override['resolution_y']
|
||||
print(f"Set resolution_y = {render_settings_override['resolution_y']}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not set resolution_y: {e}")
|
||||
|
||||
# Only override device selection if using Cycles (other engines handle GPU differently)
|
||||
if current_engine == 'CYCLES':
|
||||
# Check if CPU rendering is forced
|
||||
force_cpu = False
|
||||
if render_settings_override and render_settings_override.get('force_cpu'):
|
||||
force_cpu = render_settings_override.get('force_cpu', False)
|
||||
print("Force CPU rendering is enabled - skipping GPU detection")
|
||||
|
||||
# Ensure Cycles addon is enabled
|
||||
try:
|
||||
if 'cycles' not in bpy.context.preferences.addons:
|
||||
bpy.ops.preferences.addon_enable(module='cycles')
|
||||
print("Enabled Cycles addon")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not enable Cycles addon: {e}")
|
||||
|
||||
# If CPU is forced, skip GPU detection and set CPU directly
|
||||
if force_cpu:
|
||||
scene.cycles.device = 'CPU'
|
||||
print("Forced CPU rendering (skipping GPU detection)")
|
||||
else:
|
||||
# Access Cycles preferences
|
||||
prefs = bpy.context.preferences
|
||||
try:
|
||||
cycles_prefs = prefs.addons['cycles'].preferences
|
||||
except (KeyError, AttributeError):
|
||||
try:
|
||||
cycles_addon = prefs.addons.get('cycles')
|
||||
if cycles_addon:
|
||||
cycles_prefs = cycles_addon.preferences
|
||||
else:
|
||||
raise Exception("Cycles addon not found")
|
||||
except Exception as e:
|
||||
print(f"ERROR: Could not access Cycles preferences: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
# Check all devices and choose the best GPU type
|
||||
# Device type preference order (most performant first)
|
||||
device_type_preference = ['OPTIX', 'CUDA', 'HIP', 'ONEAPI', 'METAL']
|
||||
gpu_available = False
|
||||
best_device_type = None
|
||||
best_gpu_devices = []
|
||||
devices_by_type = {} # {device_type: [devices]}
|
||||
seen_device_ids = set() # Track device IDs to avoid duplicates
|
||||
|
||||
print("Checking for GPU availability...")
|
||||
|
||||
# Try to get all devices - try each device type to see what's available
|
||||
for device_type in device_type_preference:
|
||||
try:
|
||||
cycles_prefs.compute_device_type = device_type
|
||||
cycles_prefs.refresh_devices()
|
||||
|
||||
# Get devices for this type
|
||||
devices = None
|
||||
if hasattr(cycles_prefs, 'devices'):
|
||||
try:
|
||||
devices_prop = cycles_prefs.devices
|
||||
if devices_prop:
|
||||
devices = list(devices_prop) if hasattr(devices_prop, '__iter__') else [devices_prop]
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if not devices or len(devices) == 0:
|
||||
try:
|
||||
devices = cycles_prefs.get_devices()
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if devices and len(devices) > 0:
|
||||
# Categorize devices by their type attribute, avoiding duplicates
|
||||
for device in devices:
|
||||
if hasattr(device, 'type'):
|
||||
device_type_str = str(device.type).upper()
|
||||
device_id = getattr(device, 'id', None)
|
||||
|
||||
# Use device ID to avoid duplicates (same device appears when checking different compute_device_types)
|
||||
if device_id and device_id in seen_device_ids:
|
||||
continue
|
||||
|
||||
if device_id:
|
||||
seen_device_ids.add(device_id)
|
||||
|
||||
if device_type_str not in devices_by_type:
|
||||
devices_by_type[device_type_str] = []
|
||||
devices_by_type[device_type_str].append(device)
|
||||
except (ValueError, AttributeError, KeyError, TypeError):
|
||||
# Device type not supported, continue
|
||||
continue
|
||||
except Exception as e:
|
||||
# Other errors - log but continue
|
||||
print(f" Error checking {device_type}: {e}")
|
||||
continue
|
||||
|
||||
# Print what we found
|
||||
print(f"Found devices by type: {list(devices_by_type.keys())}")
|
||||
for dev_type, dev_list in devices_by_type.items():
|
||||
print(f" {dev_type}: {len(dev_list)} device(s)")
|
||||
for device in dev_list:
|
||||
device_name = getattr(device, 'name', 'Unknown')
|
||||
print(f" - {device_name}")
|
||||
|
||||
# Choose the best GPU type based on preference
|
||||
for preferred_type in device_type_preference:
|
||||
if preferred_type in devices_by_type:
|
||||
gpu_devices = [d for d in devices_by_type[preferred_type] if preferred_type in ['CUDA', 'OPENCL', 'OPTIX', 'HIP', 'METAL', 'ONEAPI']]
|
||||
if gpu_devices:
|
||||
best_device_type = preferred_type
|
||||
best_gpu_devices = [(d, preferred_type) for d in gpu_devices]
|
||||
print(f"Selected {preferred_type} as best GPU type with {len(gpu_devices)} device(s)")
|
||||
break
|
||||
|
||||
# Second pass: Enable the best GPU we found
|
||||
if best_device_type and best_gpu_devices:
|
||||
print(f"\nEnabling GPU devices for {best_device_type}...")
|
||||
try:
|
||||
# Set the device type again
|
||||
cycles_prefs.compute_device_type = best_device_type
|
||||
cycles_prefs.refresh_devices()
|
||||
|
||||
# First, disable all CPU devices to ensure only GPU is used
|
||||
print(f" Disabling CPU devices...")
|
||||
all_devices = cycles_prefs.devices if hasattr(cycles_prefs, 'devices') else cycles_prefs.get_devices()
|
||||
if all_devices:
|
||||
for device in all_devices:
|
||||
if hasattr(device, 'type') and str(device.type).upper() == 'CPU':
|
||||
try:
|
||||
device.use = False
|
||||
device_name = getattr(device, 'name', 'Unknown')
|
||||
print(f" Disabled CPU: {device_name}")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not disable CPU device {getattr(device, 'name', 'Unknown')}: {e}")
|
||||
|
||||
# Enable all GPU devices
|
||||
enabled_count = 0
|
||||
for device, device_type in best_gpu_devices:
|
||||
try:
|
||||
device.use = True
|
||||
enabled_count += 1
|
||||
device_name = getattr(device, 'name', 'Unknown')
|
||||
print(f" Enabled: {device_name}")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not enable device {getattr(device, 'name', 'Unknown')}: {e}")
|
||||
|
||||
# Enable ray tracing acceleration for supported device types
|
||||
try:
|
||||
if best_device_type == 'HIP':
|
||||
# HIPRT (HIP Ray Tracing) for AMD GPUs
|
||||
if hasattr(cycles_prefs, 'use_hiprt'):
|
||||
cycles_prefs.use_hiprt = True
|
||||
print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering")
|
||||
elif hasattr(scene.cycles, 'use_hiprt'):
|
||||
scene.cycles.use_hiprt = True
|
||||
print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering")
|
||||
else:
|
||||
print(f" HIPRT not available (requires Blender 4.0+)")
|
||||
elif best_device_type == 'OPTIX':
|
||||
# OptiX is already enabled when using OPTIX device type
|
||||
# But we can check if there are any OptiX-specific settings
|
||||
if hasattr(scene.cycles, 'use_optix_denoising'):
|
||||
scene.cycles.use_optix_denoising = True
|
||||
print(f" Enabled OptiX denoising")
|
||||
print(f" OptiX ray tracing is active (using OPTIX device type)")
|
||||
elif best_device_type == 'CUDA':
|
||||
# CUDA can use OptiX if available, but it's usually automatic
|
||||
# Check if we can prefer OptiX over CUDA
|
||||
if hasattr(scene.cycles, 'use_optix_denoising'):
|
||||
scene.cycles.use_optix_denoising = True
|
||||
print(f" Enabled OptiX denoising (if OptiX available)")
|
||||
print(f" CUDA ray tracing active")
|
||||
elif best_device_type == 'METAL':
|
||||
# MetalRT for Apple Silicon (if available)
|
||||
if hasattr(scene.cycles, 'use_metalrt'):
|
||||
scene.cycles.use_metalrt = True
|
||||
print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering")
|
||||
elif hasattr(cycles_prefs, 'use_metalrt'):
|
||||
cycles_prefs.use_metalrt = True
|
||||
print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering")
|
||||
else:
|
||||
print(f" MetalRT not available")
|
||||
elif best_device_type == 'ONEAPI':
|
||||
# Intel oneAPI - Embree might be available
|
||||
if hasattr(scene.cycles, 'use_embree'):
|
||||
scene.cycles.use_embree = True
|
||||
print(f" Enabled Embree for faster CPU ray tracing")
|
||||
print(f" oneAPI ray tracing active")
|
||||
except Exception as e:
|
||||
print(f" Could not enable ray tracing acceleration: {e}")
|
||||
|
||||
print(f"SUCCESS: Enabled {enabled_count} GPU device(s) for {best_device_type}")
|
||||
gpu_available = True
|
||||
except Exception as e:
|
||||
print(f"ERROR: Failed to enable GPU devices: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
# Set device based on availability (prefer GPU, fallback to CPU)
|
||||
if gpu_available:
|
||||
scene.cycles.device = 'GPU'
|
||||
print(f"Using GPU for rendering (blend file had: {current_device})")
|
||||
else:
|
||||
scene.cycles.device = 'CPU'
|
||||
print(f"GPU not available, using CPU for rendering (blend file had: {current_device})")
|
||||
|
||||
# Verify device setting
|
||||
if current_engine == 'CYCLES':
|
||||
final_device = scene.cycles.device
|
||||
print(f"Final Cycles device: {final_device}")
|
||||
else:
|
||||
# For other engines (EEVEE, etc.), respect blend file settings
|
||||
print(f"Using {current_engine} engine - respecting blend file settings")
|
||||
|
||||
# Enable GPU acceleration for EEVEE viewport rendering (if using EEVEE)
|
||||
if current_engine == 'EEVEE' or current_engine == 'EEVEE_NEXT':
|
||||
try:
|
||||
if hasattr(bpy.context.preferences.system, 'gpu_backend'):
|
||||
bpy.context.preferences.system.gpu_backend = 'OPENGL'
|
||||
print("Enabled OpenGL GPU backend for EEVEE")
|
||||
except Exception as e:
|
||||
print(f"Could not set EEVEE GPU backend: {e}")
|
||||
|
||||
# Enable GPU acceleration for compositing (if compositing is enabled)
|
||||
try:
|
||||
if scene.use_nodes and hasattr(scene, 'node_tree') and scene.node_tree:
|
||||
if hasattr(scene.node_tree, 'use_gpu_compositing'):
|
||||
scene.node_tree.use_gpu_compositing = True
|
||||
print("Enabled GPU compositing")
|
||||
except Exception as e:
|
||||
print(f"Could not enable GPU compositing: {e}")
|
||||
|
||||
# CRITICAL: Initialize headless rendering to prevent black images
|
||||
# This ensures the render engine is properly initialized before rendering
|
||||
print("Initializing headless rendering context...")
|
||||
try:
|
||||
# Ensure world exists and has proper settings
|
||||
if not scene.world:
|
||||
# Create a default world if none exists
|
||||
world = bpy.data.worlds.new("World")
|
||||
scene.world = world
|
||||
print("Created default world")
|
||||
|
||||
# Ensure world has a background shader (not just black)
|
||||
if scene.world:
|
||||
# Enable nodes if not already enabled
|
||||
if not scene.world.use_nodes:
|
||||
scene.world.use_nodes = True
|
||||
print("Enabled world nodes")
|
||||
|
||||
world_nodes = scene.world.node_tree
|
||||
if world_nodes:
|
||||
# Find or create background shader
|
||||
bg_shader = None
|
||||
for node in world_nodes.nodes:
|
||||
if node.type == 'BACKGROUND':
|
||||
bg_shader = node
|
||||
break
|
||||
|
||||
if not bg_shader:
|
||||
bg_shader = world_nodes.nodes.new(type='ShaderNodeBackground')
|
||||
# Connect to output
|
||||
output = world_nodes.nodes.get('World Output')
|
||||
if not output:
|
||||
output = world_nodes.nodes.new(type='ShaderNodeOutputWorld')
|
||||
output.name = 'World Output'
|
||||
if output and bg_shader:
|
||||
# Connect background to surface input
|
||||
if 'Surface' in output.inputs and 'Background' in bg_shader.outputs:
|
||||
world_nodes.links.new(bg_shader.outputs['Background'], output.inputs['Surface'])
|
||||
print("Created background shader for world")
|
||||
|
||||
# Ensure background has some color (not pure black)
|
||||
if bg_shader:
|
||||
# Only set if it's pure black (0,0,0)
|
||||
if hasattr(bg_shader.inputs, 'Color'):
|
||||
color = bg_shader.inputs['Color'].default_value
|
||||
if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0:
|
||||
# Set to a very dark gray instead of pure black
|
||||
bg_shader.inputs['Color'].default_value = (0.01, 0.01, 0.01, 1.0)
|
||||
print("Adjusted world background color to prevent black renders")
|
||||
else:
|
||||
# Fallback: use legacy world color if nodes aren't working
|
||||
if hasattr(scene.world, 'color'):
|
||||
color = scene.world.color
|
||||
if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0:
|
||||
scene.world.color = (0.01, 0.01, 0.01)
|
||||
print("Adjusted legacy world color to prevent black renders")
|
||||
|
||||
# For EEVEE, force viewport update to initialize render engine
|
||||
if current_engine in ['EEVEE', 'EEVEE_NEXT']:
|
||||
# Force EEVEE to update its internal state
|
||||
try:
|
||||
# Update depsgraph to ensure everything is initialized
|
||||
depsgraph = bpy.context.evaluated_depsgraph_get()
|
||||
if depsgraph:
|
||||
# Force update
|
||||
depsgraph.update()
|
||||
print("Forced EEVEE depsgraph update for headless rendering")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not force EEVEE update: {e}")
|
||||
|
||||
# Ensure EEVEE settings are applied
|
||||
try:
|
||||
# Force a material update to ensure shaders are compiled
|
||||
for obj in scene.objects:
|
||||
if obj.type == 'MESH' and obj.data.materials:
|
||||
for mat in obj.data.materials:
|
||||
if mat and mat.use_nodes:
|
||||
# Touch the material to force update
|
||||
mat.use_nodes = mat.use_nodes
|
||||
print("Forced material updates for EEVEE")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not update materials: {e}")
|
||||
|
||||
# For Cycles, ensure proper initialization
|
||||
if current_engine == 'CYCLES':
|
||||
# Ensure samples are set (even if 1 for preview)
|
||||
if not hasattr(scene.cycles, 'samples') or scene.cycles.samples < 1:
|
||||
scene.cycles.samples = 1
|
||||
print("Set minimum Cycles samples")
|
||||
|
||||
# Check for lights in the scene
|
||||
lights = [obj for obj in scene.objects if obj.type == 'LIGHT']
|
||||
print(f"Found {len(lights)} light(s) in scene")
|
||||
if len(lights) == 0:
|
||||
print("WARNING: No lights found in scene - rendering may be black!")
|
||||
print(" Consider adding lights or ensuring world background emits light")
|
||||
|
||||
# Ensure world background emits light (critical for Cycles)
|
||||
if scene.world and scene.world.use_nodes:
|
||||
world_nodes = scene.world.node_tree
|
||||
if world_nodes:
|
||||
bg_shader = None
|
||||
for node in world_nodes.nodes:
|
||||
if node.type == 'BACKGROUND':
|
||||
bg_shader = node
|
||||
break
|
||||
|
||||
if bg_shader:
|
||||
# Check and set strength - Cycles needs this to emit light!
|
||||
if hasattr(bg_shader.inputs, 'Strength'):
|
||||
strength = bg_shader.inputs['Strength'].default_value
|
||||
if strength <= 0.0:
|
||||
bg_shader.inputs['Strength'].default_value = 1.0
|
||||
print("Set world background strength to 1.0 for Cycles lighting")
|
||||
else:
|
||||
print(f"World background strength: {strength}")
|
||||
# Also ensure color is not pure black
|
||||
if hasattr(bg_shader.inputs, 'Color'):
|
||||
color = bg_shader.inputs['Color'].default_value
|
||||
if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0:
|
||||
bg_shader.inputs['Color'].default_value = (1.0, 1.0, 1.0, 1.0)
|
||||
print("Set world background color to white for Cycles lighting")
|
||||
|
||||
# Check film_transparent setting - if enabled, background will be transparent/black
|
||||
if hasattr(scene.cycles, 'film_transparent') and scene.cycles.film_transparent:
|
||||
print("WARNING: film_transparent is enabled - background will be transparent")
|
||||
print(" If you see black renders, try disabling film_transparent")
|
||||
|
||||
# Force Cycles to update/compile materials and shaders
|
||||
try:
|
||||
# Update depsgraph to ensure everything is initialized
|
||||
depsgraph = bpy.context.evaluated_depsgraph_get()
|
||||
if depsgraph:
|
||||
depsgraph.update()
|
||||
print("Forced Cycles depsgraph update")
|
||||
|
||||
# Force material updates to ensure shaders are compiled
|
||||
for obj in scene.objects:
|
||||
if obj.type == 'MESH' and obj.data.materials:
|
||||
for mat in obj.data.materials:
|
||||
if mat and mat.use_nodes:
|
||||
# Force material update
|
||||
mat.use_nodes = mat.use_nodes
|
||||
print("Forced Cycles material updates")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not force Cycles updates: {e}")
|
||||
|
||||
# Verify device is actually set correctly
|
||||
if hasattr(scene.cycles, 'device'):
|
||||
actual_device = scene.cycles.device
|
||||
print(f"Cycles device setting: {actual_device}")
|
||||
if actual_device == 'GPU':
|
||||
# Try to verify GPU is actually available
|
||||
try:
|
||||
prefs = bpy.context.preferences
|
||||
cycles_prefs = prefs.addons['cycles'].preferences
|
||||
devices = cycles_prefs.devices
|
||||
enabled_devices = [d for d in devices if d.use]
|
||||
if len(enabled_devices) == 0:
|
||||
print("WARNING: GPU device set but no GPU devices are enabled!")
|
||||
print(" Falling back to CPU may cause issues")
|
||||
except Exception as e:
|
||||
print(f"Could not verify GPU devices: {e}")
|
||||
|
||||
# Ensure camera exists and is active
|
||||
if scene.camera is None:
|
||||
# Find first camera in scene
|
||||
for obj in scene.objects:
|
||||
if obj.type == 'CAMERA':
|
||||
scene.camera = obj
|
||||
print(f"Set active camera: {obj.name}")
|
||||
break
|
||||
|
||||
# Fix objects and collections hidden from render
|
||||
vl = bpy.context.view_layer
|
||||
|
||||
# 1. Objects hidden in view layer
|
||||
for obj in bpy.data.objects:
|
||||
if obj.hide_get(view_layer=vl):
|
||||
if any(k in obj.name.lower() for k in ["scrotum|","cage","genital","penis","dick","collision","body.001","couch"]):
|
||||
obj.hide_set(False, view_layer=vl)
|
||||
print("Enabled object:", obj.name)
|
||||
|
||||
# 2. Collections disabled in renders OR set to Holdout (the final killer)
|
||||
for col in bpy.data.collections:
|
||||
if col.hide_render or (vl.layer_collection.children.get(col.name) and not vl.layer_collection.children[col.name].exclude == False):
|
||||
if any(k in col.name.lower() for k in ["genital","nsfw","dick","private","hidden","cage","scrotum","collision","dick"]):
|
||||
col.hide_render = False
|
||||
if col.name in vl.layer_collection.children:
|
||||
vl.layer_collection.children[col.name].exclude = False
|
||||
vl.layer_collection.children[col.name].holdout = False
|
||||
vl.layer_collection.children[col.name].indirect_only = False
|
||||
print("Enabled collection:", col.name)
|
||||
|
||||
print("Headless rendering initialization complete")
|
||||
except Exception as e:
|
||||
print(f"Warning: Headless rendering initialization had issues: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
# Final verification before rendering
|
||||
print("\n=== Pre-render verification ===")
|
||||
try:
|
||||
scene = bpy.context.scene
|
||||
print(f"Render engine: {scene.render.engine}")
|
||||
print(f"Active camera: {scene.camera.name if scene.camera else 'None'}")
|
||||
|
||||
if scene.render.engine == 'CYCLES':
|
||||
print(f"Cycles device: {scene.cycles.device}")
|
||||
print(f"Cycles samples: {scene.cycles.samples}")
|
||||
lights = [obj for obj in scene.objects if obj.type == 'LIGHT']
|
||||
print(f"Lights in scene: {len(lights)}")
|
||||
if scene.world:
|
||||
if scene.world.use_nodes:
|
||||
world_nodes = scene.world.node_tree
|
||||
if world_nodes:
|
||||
bg_shader = None
|
||||
for node in world_nodes.nodes:
|
||||
if node.type == 'BACKGROUND':
|
||||
bg_shader = node
|
||||
break
|
||||
if bg_shader:
|
||||
if hasattr(bg_shader.inputs, 'Strength'):
|
||||
strength = bg_shader.inputs['Strength'].default_value
|
||||
print(f"World background strength: {strength}")
|
||||
if hasattr(bg_shader.inputs, 'Color'):
|
||||
color = bg_shader.inputs['Color'].default_value
|
||||
print(f"World background color: ({color[0]:.2f}, {color[1]:.2f}, {color[2]:.2f})")
|
||||
else:
|
||||
print("World exists but nodes are disabled")
|
||||
else:
|
||||
print("WARNING: No world in scene!")
|
||||
|
||||
print("=== Verification complete ===\n")
|
||||
except Exception as e:
|
||||
print(f"Warning: Verification failed: {e}")
|
||||
|
||||
print("Device configuration complete - blend file settings preserved, device optimized")
|
||||
sys.stdout.flush()
|
||||
`
|
||||
// Load template and replace placeholders
|
||||
scriptContent := scripts.RenderBlenderTemplate
|
||||
scriptContent = strings.ReplaceAll(scriptContent, "{{UNHIDE_CODE}}", unhideCode)
|
||||
scriptContent = strings.ReplaceAll(scriptContent, "{{FORMAT_FILE_PATH}}", fmt.Sprintf("%q", formatFilePath))
|
||||
scriptContent = strings.ReplaceAll(scriptContent, "{{RENDER_SETTINGS_FILE}}", fmt.Sprintf("%q", renderSettingsFilePath))
|
||||
scriptPath := filepath.Join(workDir, "enable_gpu.py")
|
||||
if err := os.WriteFile(scriptPath, []byte(scriptContent), 0644); err != nil {
|
||||
errMsg := fmt.Sprintf("failed to create GPU enable script: %v", err)
|
||||
@@ -1765,23 +1141,30 @@ sys.stdout.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Check if execution should be enabled (defaults to false/off)
|
||||
enableExecution := false
|
||||
if jobMetadata != nil && jobMetadata.EnableExecution != nil && *jobMetadata.EnableExecution {
|
||||
enableExecution = true
|
||||
}
|
||||
|
||||
// Run Blender with GPU enabled via Python script
|
||||
// Use -s (start) and -e (end) for frame ranges, or -f for single frame
|
||||
var cmd *exec.Cmd
|
||||
args := []string{"-b", blendFile, "--python", scriptPath}
|
||||
if enableExecution {
|
||||
args = append(args, "--enable-autoexec")
|
||||
}
|
||||
if frameStart == frameEnd {
|
||||
// Single frame
|
||||
cmd = exec.Command("blender", "-b", blendFile,
|
||||
"--python", scriptPath,
|
||||
"-o", absOutputPattern,
|
||||
"-f", fmt.Sprintf("%d", frameStart))
|
||||
args = append(args, "-o", absOutputPattern, "-f", fmt.Sprintf("%d", frameStart))
|
||||
cmd = exec.Command("blender", args...)
|
||||
} else {
|
||||
// Frame range
|
||||
cmd = exec.Command("blender", "-b", blendFile,
|
||||
"--python", scriptPath,
|
||||
"-o", absOutputPattern,
|
||||
args = append(args, "-o", absOutputPattern,
|
||||
"-s", fmt.Sprintf("%d", frameStart),
|
||||
"-e", fmt.Sprintf("%d", frameEnd),
|
||||
"-a") // -a renders animation (all frames in range)
|
||||
cmd = exec.Command("blender", args...)
|
||||
}
|
||||
cmd.Dir = workDir
|
||||
|
||||
@@ -3261,8 +2644,11 @@ func (c *Client) getJobMetadata(jobID int64) (*types.BlendMetadata, error) {
|
||||
|
||||
// downloadFrameFile downloads a frame file for MP4 generation
|
||||
func (c *Client) downloadFrameFile(jobID int64, fileName, destPath string) error {
|
||||
path := fmt.Sprintf("/api/runner/files/%d/%s", jobID, fileName)
|
||||
resp, err := c.doSignedRequest("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID))
|
||||
// URL encode the fileName to handle special characters in filenames
|
||||
encodedFileName := url.PathEscape(fileName)
|
||||
path := fmt.Sprintf("/api/runner/files/%d/%s", jobID, encodedFileName)
|
||||
// Use long-running client for file downloads (no timeout) - EXR files can be large
|
||||
resp, err := c.doSignedRequestLong("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3330,7 +2716,8 @@ func (c *Client) downloadFileToPath(filePath, destPath string) error {
|
||||
downloadPath += "/" + filepath.Base(filePath)
|
||||
}
|
||||
|
||||
resp, err := c.doSignedRequest("GET", downloadPath, nil, fmt.Sprintf("runner_id=%d", c.runnerID))
|
||||
// Use long-running client for file downloads (no timeout)
|
||||
resp, err := c.doSignedRequestLong("GET", downloadPath, nil, fmt.Sprintf("runner_id=%d", c.runnerID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to download file: %w", err)
|
||||
}
|
||||
@@ -3392,7 +2779,8 @@ func (c *Client) uploadFile(jobID int64, filePath string) (string, error) {
|
||||
req.Header.Set("Content-Type", formWriter.FormDataContentType())
|
||||
req.Header.Set("X-Runner-Secret", c.runnerSecret)
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
// Use long-running client for file uploads (no timeout)
|
||||
resp, err := c.longRunningClient.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
@@ -3424,7 +2812,7 @@ func (c *Client) getContextCacheKey(jobID int64) string {
|
||||
func (c *Client) getContextCachePath(cacheKey string) string {
|
||||
cacheDir := filepath.Join(c.getWorkspaceDir(), "cache", "contexts")
|
||||
os.MkdirAll(cacheDir, 0755)
|
||||
return filepath.Join(cacheDir, cacheKey+".tar.gz")
|
||||
return filepath.Join(cacheDir, cacheKey+".tar")
|
||||
}
|
||||
|
||||
// isContextCacheValid checks if a cached context file exists and is not expired (1 hour TTL)
|
||||
@@ -3437,7 +2825,7 @@ func (c *Client) isContextCacheValid(cachePath string) bool {
|
||||
return time.Since(info.ModTime()) < time.Hour
|
||||
}
|
||||
|
||||
// downloadJobContext downloads the job context tar.gz, using cache if available
|
||||
// downloadJobContext downloads the job context tar, using cache if available
|
||||
func (c *Client) downloadJobContext(jobID int64, destPath string) error {
|
||||
cacheKey := c.getContextCacheKey(jobID)
|
||||
cachePath := c.getContextCachePath(cacheKey)
|
||||
@@ -3464,9 +2852,9 @@ func (c *Client) downloadJobContext(jobID int64, destPath string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Download from manager
|
||||
path := fmt.Sprintf("/api/runner/jobs/%d/context.tar.gz", jobID)
|
||||
resp, err := c.doSignedRequest("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID))
|
||||
// Download from manager - use long-running client (no timeout) for large context files
|
||||
path := fmt.Sprintf("/api/runner/jobs/%d/context.tar", jobID)
|
||||
resp, err := c.doSignedRequestLong("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to download context: %w", err)
|
||||
}
|
||||
@@ -3517,24 +2905,17 @@ func (c *Client) downloadJobContext(jobID int64, destPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractTarGz extracts a tar.gz file to the destination directory
|
||||
func (c *Client) extractTarGz(tarGzPath, destDir string) error {
|
||||
// Open the tar.gz file
|
||||
file, err := os.Open(tarGzPath)
|
||||
// extractTar extracts a tar file to the destination directory
|
||||
func (c *Client) extractTar(tarPath, destDir string) error {
|
||||
// Open the tar file
|
||||
file, err := os.Open(tarPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open tar.gz file: %w", err)
|
||||
return fmt.Errorf("failed to open tar file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Create gzip reader
|
||||
gzReader, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create gzip reader: %w", err)
|
||||
}
|
||||
defer gzReader.Close()
|
||||
|
||||
// Create tar reader
|
||||
tarReader := tar.NewReader(gzReader)
|
||||
tarReader := tar.NewReader(file)
|
||||
|
||||
// Extract files
|
||||
for {
|
||||
@@ -3635,16 +3016,16 @@ func (c *Client) processMetadataTask(task map[string]interface{}, jobID int64, i
|
||||
c.sendStepUpdate(taskID, "download", types.StepStatusRunning, "")
|
||||
c.sendLog(taskID, types.LogLevelInfo, "Downloading job context...", "download")
|
||||
|
||||
// Download context tar.gz
|
||||
contextPath := filepath.Join(workDir, "context.tar.gz")
|
||||
// Download context tar
|
||||
contextPath := filepath.Join(workDir, "context.tar")
|
||||
if err := c.downloadJobContext(jobID, contextPath); err != nil {
|
||||
c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error())
|
||||
return fmt.Errorf("failed to download context: %w", err)
|
||||
}
|
||||
|
||||
// Extract context tar.gz
|
||||
// Extract context tar
|
||||
c.sendLog(taskID, types.LogLevelInfo, "Extracting context...", "download")
|
||||
if err := c.extractTarGz(contextPath, workDir); err != nil {
|
||||
if err := c.extractTar(contextPath, workDir); err != nil {
|
||||
c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error())
|
||||
return fmt.Errorf("failed to extract context: %w", err)
|
||||
}
|
||||
@@ -3881,6 +3262,7 @@ sys.stdout.flush()
|
||||
}
|
||||
|
||||
// Execute Blender with Python script
|
||||
// Note: disable_execution flag is not applied to metadata extraction for safety
|
||||
cmd := exec.Command("blender", "-b", blendFile, "--python", scriptPath)
|
||||
cmd.Dir = workDir
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ package storage
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -31,6 +31,7 @@ func (s *Storage) init() error {
|
||||
s.basePath,
|
||||
s.uploadsPath(),
|
||||
s.outputsPath(),
|
||||
s.tempPath(),
|
||||
}
|
||||
|
||||
for _, dir := range dirs {
|
||||
@@ -42,6 +43,28 @@ func (s *Storage) init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// tempPath returns the path for temporary files
|
||||
func (s *Storage) tempPath() string {
|
||||
return filepath.Join(s.basePath, "temp")
|
||||
}
|
||||
|
||||
// BasePath returns the storage base path (for cleanup tasks)
|
||||
func (s *Storage) BasePath() string {
|
||||
return s.basePath
|
||||
}
|
||||
|
||||
// TempDir creates a temporary directory under the storage base path
|
||||
// Returns the path to the temporary directory
|
||||
func (s *Storage) TempDir(pattern string) (string, error) {
|
||||
// Ensure temp directory exists
|
||||
if err := os.MkdirAll(s.tempPath(), 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create temp directory: %w", err)
|
||||
}
|
||||
|
||||
// Create temp directory under storage base path
|
||||
return os.MkdirTemp(s.tempPath(), pattern)
|
||||
}
|
||||
|
||||
// uploadsPath returns the path for uploads
|
||||
func (s *Storage) uploadsPath() string {
|
||||
return filepath.Join(s.basePath, "uploads")
|
||||
@@ -142,6 +165,13 @@ func (s *Storage) GetFileSize(filePath string) (int64, error) {
|
||||
// ExtractZip extracts a ZIP file to the destination directory
|
||||
// Returns a list of all extracted file paths
|
||||
func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) {
|
||||
log.Printf("Extracting ZIP archive: %s -> %s", zipPath, destDir)
|
||||
|
||||
// Ensure destination directory exists
|
||||
if err := os.MkdirAll(destDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create destination directory: %w", err)
|
||||
}
|
||||
|
||||
r, err := zip.OpenReader(zipPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open ZIP file: %w", err)
|
||||
@@ -149,12 +179,20 @@ func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) {
|
||||
defer r.Close()
|
||||
|
||||
var extractedFiles []string
|
||||
fileCount := 0
|
||||
dirCount := 0
|
||||
|
||||
log.Printf("ZIP contains %d entries", len(r.File))
|
||||
|
||||
for _, f := range r.File {
|
||||
// Sanitize file path to prevent directory traversal
|
||||
destPath := filepath.Join(destDir, f.Name)
|
||||
if !strings.HasPrefix(destPath, filepath.Clean(destDir)+string(os.PathSeparator)) {
|
||||
return nil, fmt.Errorf("invalid file path in ZIP: %s", f.Name)
|
||||
|
||||
cleanDestPath := filepath.Clean(destPath)
|
||||
cleanDestDir := filepath.Clean(destDir)
|
||||
if !strings.HasPrefix(cleanDestPath, cleanDestDir+string(os.PathSeparator)) && cleanDestPath != cleanDestDir {
|
||||
log.Printf("ERROR: Invalid file path in ZIP - target: %s, destDir: %s", cleanDestPath, cleanDestDir)
|
||||
return nil, fmt.Errorf("invalid file path in ZIP: %s (target: %s, destDir: %s)", f.Name, cleanDestPath, cleanDestDir)
|
||||
}
|
||||
|
||||
// Create directory structure
|
||||
@@ -162,6 +200,7 @@ func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) {
|
||||
if err := os.MkdirAll(destPath, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
dirCount++
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -191,8 +230,10 @@ func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) {
|
||||
}
|
||||
|
||||
extractedFiles = append(extractedFiles, destPath)
|
||||
fileCount++
|
||||
}
|
||||
|
||||
log.Printf("ZIP extraction complete: %d files, %d directories extracted to %s", fileCount, dirCount, destDir)
|
||||
return extractedFiles, nil
|
||||
}
|
||||
|
||||
@@ -261,15 +302,15 @@ func isBlenderSaveFile(filename string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// CreateJobContext creates a tar.gz archive containing all job input files
|
||||
// CreateJobContext creates a tar archive containing all job input files
|
||||
// Filters out Blender save files (.blend1, .blend2, etc.)
|
||||
// Uses temporary directories and streaming to handle large files efficiently
|
||||
func (s *Storage) CreateJobContext(jobID int64) (string, error) {
|
||||
jobPath := s.JobPath(jobID)
|
||||
contextPath := filepath.Join(jobPath, "context.tar.gz")
|
||||
contextPath := filepath.Join(jobPath, "context.tar")
|
||||
|
||||
// Create temporary directory for staging
|
||||
tmpDir, err := os.MkdirTemp("", "fuego-context-*")
|
||||
tmpDir, err := os.MkdirTemp("", "jiggablend-context-*")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
@@ -320,17 +361,14 @@ func (s *Storage) CreateJobContext(jobID int64) (string, error) {
|
||||
return "", fmt.Errorf("no files found to include in context")
|
||||
}
|
||||
|
||||
// Create the tar.gz file using streaming
|
||||
// Create the tar file using streaming
|
||||
contextFile, err := os.Create(contextPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create context file: %w", err)
|
||||
}
|
||||
defer contextFile.Close()
|
||||
|
||||
gzWriter := gzip.NewWriter(contextFile)
|
||||
defer gzWriter.Close()
|
||||
|
||||
tarWriter := tar.NewWriter(gzWriter)
|
||||
tarWriter := tar.NewWriter(contextFile)
|
||||
defer tarWriter.Close()
|
||||
|
||||
// Add each file to the tar archive
|
||||
@@ -383,9 +421,6 @@ func (s *Storage) CreateJobContext(jobID int64) (string, error) {
|
||||
if err := tarWriter.Close(); err != nil {
|
||||
return "", fmt.Errorf("failed to close tar writer: %w", err)
|
||||
}
|
||||
if err := gzWriter.Close(); err != nil {
|
||||
return "", fmt.Errorf("failed to close gzip writer: %w", err)
|
||||
}
|
||||
if err := contextFile.Close(); err != nil {
|
||||
return "", fmt.Errorf("failed to close context file: %w", err)
|
||||
}
|
||||
@@ -393,12 +428,12 @@ func (s *Storage) CreateJobContext(jobID int64) (string, error) {
|
||||
return contextPath, nil
|
||||
}
|
||||
|
||||
// CreateJobContextFromDir creates a context archive (tar.gz) from files in a source directory
|
||||
// CreateJobContextFromDir creates a context archive (tar) from files in a source directory
|
||||
// This is used during upload to immediately create the context archive as the primary artifact
|
||||
// excludeFiles is a set of relative paths (from sourceDir) to exclude from the context
|
||||
func (s *Storage) CreateJobContextFromDir(sourceDir string, jobID int64, excludeFiles ...string) (string, error) {
|
||||
jobPath := s.JobPath(jobID)
|
||||
contextPath := filepath.Join(jobPath, "context.tar.gz")
|
||||
contextPath := filepath.Join(jobPath, "context.tar")
|
||||
|
||||
// Ensure job directory exists
|
||||
if err := os.MkdirAll(jobPath, 0755); err != nil {
|
||||
@@ -498,17 +533,14 @@ func (s *Storage) CreateJobContextFromDir(sourceDir string, jobID int64, exclude
|
||||
return "", fmt.Errorf("multiple .blend files found at root level in context archive (found %d, expected 1)", blendFilesAtRoot)
|
||||
}
|
||||
|
||||
// Create the tar.gz file using streaming
|
||||
// Create the tar file using streaming
|
||||
contextFile, err := os.Create(contextPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create context file: %w", err)
|
||||
}
|
||||
defer contextFile.Close()
|
||||
|
||||
gzWriter := gzip.NewWriter(contextFile)
|
||||
defer gzWriter.Close()
|
||||
|
||||
tarWriter := tar.NewWriter(gzWriter)
|
||||
tarWriter := tar.NewWriter(contextFile)
|
||||
defer tarWriter.Close()
|
||||
|
||||
// Add each file to the tar archive
|
||||
@@ -560,9 +592,6 @@ func (s *Storage) CreateJobContextFromDir(sourceDir string, jobID int64, exclude
|
||||
if err := tarWriter.Close(); err != nil {
|
||||
return "", fmt.Errorf("failed to close tar writer: %w", err)
|
||||
}
|
||||
if err := gzWriter.Close(); err != nil {
|
||||
return "", fmt.Errorf("failed to close gzip writer: %w", err)
|
||||
}
|
||||
if err := contextFile.Close(); err != nil {
|
||||
return "", fmt.Errorf("failed to close context file: %w", err)
|
||||
}
|
||||
|
||||
13
pkg/scripts/scripts.go
Normal file
13
pkg/scripts/scripts.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package scripts
|
||||
|
||||
import _ "embed"
|
||||
|
||||
//go:embed scripts/extract_metadata.py
|
||||
var ExtractMetadata string
|
||||
|
||||
//go:embed scripts/unhide_objects.py
|
||||
var UnhideObjects string
|
||||
|
||||
//go:embed scripts/render_blender.py.template
|
||||
var RenderBlenderTemplate string
|
||||
|
||||
173
pkg/scripts/scripts/extract_metadata.py
Normal file
173
pkg/scripts/scripts/extract_metadata.py
Normal file
@@ -0,0 +1,173 @@
|
||||
import bpy
|
||||
import json
|
||||
import sys
|
||||
|
||||
# Make all file paths relative to the blend file location FIRST
|
||||
# This must be done immediately after file load, before any other operations
|
||||
# to prevent Blender from trying to access external files with absolute paths
|
||||
try:
|
||||
bpy.ops.file.make_paths_relative()
|
||||
print("Made all file paths relative to blend file")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not make paths relative: {e}")
|
||||
|
||||
# Check for missing addons that the blend file requires
|
||||
# Blender marks missing addons with "_missing" suffix in preferences
|
||||
missing_files_info = {
|
||||
"checked": False,
|
||||
"has_missing": False,
|
||||
"missing_files": [],
|
||||
"missing_addons": []
|
||||
}
|
||||
|
||||
try:
|
||||
missing = []
|
||||
for mod in bpy.context.preferences.addons:
|
||||
if mod.module.endswith("_missing"):
|
||||
missing.append(mod.module.rsplit("_", 1)[0])
|
||||
|
||||
missing_files_info["checked"] = True
|
||||
if missing:
|
||||
missing_files_info["has_missing"] = True
|
||||
missing_files_info["missing_addons"] = missing
|
||||
print("Missing add-ons required by this .blend:")
|
||||
for name in missing:
|
||||
print(" -", name)
|
||||
else:
|
||||
print("No missing add-ons detected – file is headless-safe")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not check for missing addons: {e}")
|
||||
missing_files_info["error"] = str(e)
|
||||
|
||||
# Get scene
|
||||
scene = bpy.context.scene
|
||||
|
||||
# Extract frame range from scene settings
|
||||
frame_start = scene.frame_start
|
||||
frame_end = scene.frame_end
|
||||
|
||||
# Also check for actual animation range (keyframes)
|
||||
# Find the earliest and latest keyframes across all objects
|
||||
animation_start = None
|
||||
animation_end = None
|
||||
|
||||
for obj in scene.objects:
|
||||
if obj.animation_data and obj.animation_data.action:
|
||||
action = obj.animation_data.action
|
||||
if action.fcurves:
|
||||
for fcurve in action.fcurves:
|
||||
if fcurve.keyframe_points:
|
||||
for keyframe in fcurve.keyframe_points:
|
||||
frame = int(keyframe.co[0])
|
||||
if animation_start is None or frame < animation_start:
|
||||
animation_start = frame
|
||||
if animation_end is None or frame > animation_end:
|
||||
animation_end = frame
|
||||
|
||||
# Use animation range if available, otherwise use scene frame range
|
||||
# If scene range seems wrong (start == end), prefer animation range
|
||||
if animation_start is not None and animation_end is not None:
|
||||
if frame_start == frame_end or (animation_start < frame_start or animation_end > frame_end):
|
||||
# Use animation range if scene range is invalid or animation extends beyond it
|
||||
frame_start = animation_start
|
||||
frame_end = animation_end
|
||||
|
||||
# Extract render settings
|
||||
render = scene.render
|
||||
resolution_x = render.resolution_x
|
||||
resolution_y = render.resolution_y
|
||||
engine = scene.render.engine.upper()
|
||||
|
||||
# Determine output format from file format
|
||||
output_format = render.image_settings.file_format
|
||||
|
||||
# Extract engine-specific settings
|
||||
engine_settings = {}
|
||||
|
||||
if engine == 'CYCLES':
|
||||
cycles = scene.cycles
|
||||
engine_settings = {
|
||||
"samples": getattr(cycles, 'samples', 128),
|
||||
"use_denoising": getattr(cycles, 'use_denoising', False),
|
||||
"denoising_radius": getattr(cycles, 'denoising_radius', 0),
|
||||
"denoising_strength": getattr(cycles, 'denoising_strength', 0.0),
|
||||
"device": getattr(cycles, 'device', 'CPU'),
|
||||
"use_adaptive_sampling": getattr(cycles, 'use_adaptive_sampling', False),
|
||||
"adaptive_threshold": getattr(cycles, 'adaptive_threshold', 0.01) if getattr(cycles, 'use_adaptive_sampling', False) else 0.01,
|
||||
"use_fast_gi": getattr(cycles, 'use_fast_gi', False),
|
||||
"light_tree": getattr(cycles, 'use_light_tree', False),
|
||||
"use_light_linking": getattr(cycles, 'use_light_linking', False),
|
||||
"caustics_reflective": getattr(cycles, 'caustics_reflective', False),
|
||||
"caustics_refractive": getattr(cycles, 'caustics_refractive', False),
|
||||
"blur_glossy": getattr(cycles, 'blur_glossy', 0.0),
|
||||
"max_bounces": getattr(cycles, 'max_bounces', 12),
|
||||
"diffuse_bounces": getattr(cycles, 'diffuse_bounces', 4),
|
||||
"glossy_bounces": getattr(cycles, 'glossy_bounces', 4),
|
||||
"transmission_bounces": getattr(cycles, 'transmission_bounces', 12),
|
||||
"volume_bounces": getattr(cycles, 'volume_bounces', 0),
|
||||
"transparent_max_bounces": getattr(cycles, 'transparent_max_bounces', 8),
|
||||
"film_transparent": getattr(cycles, 'film_transparent', False),
|
||||
"use_layer_samples": getattr(cycles, 'use_layer_samples', False),
|
||||
}
|
||||
elif engine == 'EEVEE' or engine == 'EEVEE_NEXT':
|
||||
eevee = scene.eevee
|
||||
engine_settings = {
|
||||
"taa_render_samples": getattr(eevee, 'taa_render_samples', 64),
|
||||
"use_bloom": getattr(eevee, 'use_bloom', False),
|
||||
"bloom_threshold": getattr(eevee, 'bloom_threshold', 0.8),
|
||||
"bloom_intensity": getattr(eevee, 'bloom_intensity', 0.05),
|
||||
"bloom_radius": getattr(eevee, 'bloom_radius', 6.5),
|
||||
"use_ssr": getattr(eevee, 'use_ssr', True),
|
||||
"use_ssr_refraction": getattr(eevee, 'use_ssr_refraction', False),
|
||||
"ssr_quality": getattr(eevee, 'ssr_quality', 'MEDIUM'),
|
||||
"use_ssao": getattr(eevee, 'use_ssao', True),
|
||||
"ssao_quality": getattr(eevee, 'ssao_quality', 'MEDIUM'),
|
||||
"ssao_distance": getattr(eevee, 'ssao_distance', 0.2),
|
||||
"ssao_factor": getattr(eevee, 'ssao_factor', 1.0),
|
||||
"use_soft_shadows": getattr(eevee, 'use_soft_shadows', True),
|
||||
"use_shadow_high_bitdepth": getattr(eevee, 'use_shadow_high_bitdepth', True),
|
||||
"use_volumetric": getattr(eevee, 'use_volumetric', False),
|
||||
"volumetric_tile_size": getattr(eevee, 'volumetric_tile_size', '8'),
|
||||
"volumetric_samples": getattr(eevee, 'volumetric_samples', 64),
|
||||
"volumetric_start": getattr(eevee, 'volumetric_start', 0.0),
|
||||
"volumetric_end": getattr(eevee, 'volumetric_end', 100.0),
|
||||
"use_volumetric_lights": getattr(eevee, 'use_volumetric_lights', True),
|
||||
"use_volumetric_shadows": getattr(eevee, 'use_volumetric_shadows', True),
|
||||
"use_gtao": getattr(eevee, 'use_gtao', False),
|
||||
"gtao_quality": getattr(eevee, 'gtao_quality', 'MEDIUM'),
|
||||
"use_overscan": getattr(eevee, 'use_overscan', False),
|
||||
}
|
||||
else:
|
||||
# For other engines, extract basic samples if available
|
||||
engine_settings = {
|
||||
"samples": getattr(scene, 'samples', 128) if hasattr(scene, 'samples') else 128
|
||||
}
|
||||
|
||||
# Extract scene info
|
||||
camera_count = len([obj for obj in scene.objects if obj.type == 'CAMERA'])
|
||||
object_count = len(scene.objects)
|
||||
material_count = len(bpy.data.materials)
|
||||
|
||||
# Build metadata dictionary
|
||||
metadata = {
|
||||
"frame_start": frame_start,
|
||||
"frame_end": frame_end,
|
||||
"render_settings": {
|
||||
"resolution_x": resolution_x,
|
||||
"resolution_y": resolution_y,
|
||||
"output_format": output_format,
|
||||
"engine": engine.lower(),
|
||||
"engine_settings": engine_settings
|
||||
},
|
||||
"scene_info": {
|
||||
"camera_count": camera_count,
|
||||
"object_count": object_count,
|
||||
"material_count": material_count
|
||||
},
|
||||
"missing_files_info": missing_files_info
|
||||
}
|
||||
|
||||
# Output as JSON
|
||||
print(json.dumps(metadata))
|
||||
sys.stdout.flush()
|
||||
|
||||
589
pkg/scripts/scripts/render_blender.py.template
Normal file
589
pkg/scripts/scripts/render_blender.py.template
Normal file
@@ -0,0 +1,589 @@
|
||||
import bpy
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
|
||||
# Make all file paths relative to the blend file location FIRST
|
||||
# This must be done immediately after file load, before any other operations
|
||||
# to prevent Blender from trying to access external files with absolute paths
|
||||
try:
|
||||
bpy.ops.file.make_paths_relative()
|
||||
print("Made all file paths relative to blend file")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not make paths relative: {e}")
|
||||
|
||||
{{UNHIDE_CODE}}
|
||||
# Read output format from file (created by Go code)
|
||||
format_file_path = {{FORMAT_FILE_PATH}}
|
||||
output_format_override = None
|
||||
if os.path.exists(format_file_path):
|
||||
try:
|
||||
with open(format_file_path, 'r') as f:
|
||||
output_format_override = f.read().strip().upper()
|
||||
print(f"Read output format from file: '{output_format_override}'")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not read output format file: {e}")
|
||||
else:
|
||||
print(f"Warning: Output format file does not exist: {format_file_path}")
|
||||
|
||||
# Read render settings from JSON file (created by Go code)
|
||||
render_settings_file = {{RENDER_SETTINGS_FILE}}
|
||||
render_settings_override = None
|
||||
if os.path.exists(render_settings_file):
|
||||
try:
|
||||
with open(render_settings_file, 'r') as f:
|
||||
render_settings_override = json.load(f)
|
||||
print(f"Loaded render settings from job metadata")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not read render settings file: {e}")
|
||||
|
||||
# Get current scene settings (preserve blend file preferences)
|
||||
scene = bpy.context.scene
|
||||
current_engine = scene.render.engine
|
||||
current_device = scene.cycles.device if hasattr(scene, 'cycles') and scene.cycles else None
|
||||
current_output_format = scene.render.image_settings.file_format
|
||||
|
||||
print(f"Blend file render engine: {current_engine}")
|
||||
if current_device:
|
||||
print(f"Blend file device setting: {current_device}")
|
||||
print(f"Blend file output format: {current_output_format}")
|
||||
|
||||
# Override output format if specified
|
||||
# The format file always takes precedence (it's written specifically for this job)
|
||||
if output_format_override:
|
||||
print(f"Overriding output format from '{current_output_format}' to '{output_format_override}'")
|
||||
# Map common format names to Blender's format constants
|
||||
# For video formats (EXR_264_MP4, EXR_AV1_MP4), we render as EXR frames first
|
||||
format_to_use = output_format_override.upper()
|
||||
if format_to_use in ['EXR_264_MP4', 'EXR_AV1_MP4']:
|
||||
format_to_use = 'EXR' # Render as EXR for video formats
|
||||
|
||||
format_map = {
|
||||
'PNG': 'PNG',
|
||||
'JPEG': 'JPEG',
|
||||
'JPG': 'JPEG',
|
||||
'EXR': 'OPEN_EXR',
|
||||
'OPEN_EXR': 'OPEN_EXR',
|
||||
'TARGA': 'TARGA',
|
||||
'TIFF': 'TIFF',
|
||||
'BMP': 'BMP',
|
||||
}
|
||||
blender_format = format_map.get(format_to_use, format_to_use)
|
||||
try:
|
||||
scene.render.image_settings.file_format = blender_format
|
||||
print(f"Successfully set output format to: {blender_format}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not set output format to {blender_format}: {e}")
|
||||
print(f"Using blend file's format: {current_output_format}")
|
||||
else:
|
||||
print(f"Using blend file's output format: {current_output_format}")
|
||||
|
||||
# Apply render settings from job metadata if provided
|
||||
# Note: output_format is NOT applied from render_settings_override - it's already set from format file above
|
||||
if render_settings_override:
|
||||
engine_override = render_settings_override.get('engine', '').upper()
|
||||
engine_settings = render_settings_override.get('engine_settings', {})
|
||||
|
||||
# Switch engine if specified
|
||||
if engine_override and engine_override != current_engine.upper():
|
||||
print(f"Switching render engine from '{current_engine}' to '{engine_override}'")
|
||||
try:
|
||||
scene.render.engine = engine_override
|
||||
current_engine = engine_override
|
||||
print(f"Successfully switched to {engine_override} engine")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not switch engine to {engine_override}: {e}")
|
||||
print(f"Using blend file's engine: {current_engine}")
|
||||
|
||||
# Apply engine-specific settings
|
||||
if engine_settings:
|
||||
if current_engine.upper() == 'CYCLES':
|
||||
cycles = scene.cycles
|
||||
print("Applying Cycles render settings from job metadata...")
|
||||
for key, value in engine_settings.items():
|
||||
try:
|
||||
if hasattr(cycles, key):
|
||||
setattr(cycles, key, value)
|
||||
print(f" Set Cycles.{key} = {value}")
|
||||
else:
|
||||
print(f" Warning: Cycles has no attribute '{key}'")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not set Cycles.{key} = {value}: {e}")
|
||||
elif current_engine.upper() in ['EEVEE', 'EEVEE_NEXT']:
|
||||
eevee = scene.eevee
|
||||
print("Applying EEVEE render settings from job metadata...")
|
||||
for key, value in engine_settings.items():
|
||||
try:
|
||||
if hasattr(eevee, key):
|
||||
setattr(eevee, key, value)
|
||||
print(f" Set EEVEE.{key} = {value}")
|
||||
else:
|
||||
print(f" Warning: EEVEE has no attribute '{key}'")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not set EEVEE.{key} = {value}: {e}")
|
||||
|
||||
# Apply resolution if specified
|
||||
if 'resolution_x' in render_settings_override:
|
||||
try:
|
||||
scene.render.resolution_x = render_settings_override['resolution_x']
|
||||
print(f"Set resolution_x = {render_settings_override['resolution_x']}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not set resolution_x: {e}")
|
||||
if 'resolution_y' in render_settings_override:
|
||||
try:
|
||||
scene.render.resolution_y = render_settings_override['resolution_y']
|
||||
print(f"Set resolution_y = {render_settings_override['resolution_y']}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not set resolution_y: {e}")
|
||||
|
||||
# Only override device selection if using Cycles (other engines handle GPU differently)
|
||||
if current_engine == 'CYCLES':
|
||||
# Check if CPU rendering is forced
|
||||
force_cpu = False
|
||||
if render_settings_override and render_settings_override.get('force_cpu'):
|
||||
force_cpu = render_settings_override.get('force_cpu', False)
|
||||
print("Force CPU rendering is enabled - skipping GPU detection")
|
||||
|
||||
# Ensure Cycles addon is enabled
|
||||
try:
|
||||
if 'cycles' not in bpy.context.preferences.addons:
|
||||
bpy.ops.preferences.addon_enable(module='cycles')
|
||||
print("Enabled Cycles addon")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not enable Cycles addon: {e}")
|
||||
|
||||
# If CPU is forced, skip GPU detection and set CPU directly
|
||||
if force_cpu:
|
||||
scene.cycles.device = 'CPU'
|
||||
print("Forced CPU rendering (skipping GPU detection)")
|
||||
else:
|
||||
# Access Cycles preferences
|
||||
prefs = bpy.context.preferences
|
||||
try:
|
||||
cycles_prefs = prefs.addons['cycles'].preferences
|
||||
except (KeyError, AttributeError):
|
||||
try:
|
||||
cycles_addon = prefs.addons.get('cycles')
|
||||
if cycles_addon:
|
||||
cycles_prefs = cycles_addon.preferences
|
||||
else:
|
||||
raise Exception("Cycles addon not found")
|
||||
except Exception as e:
|
||||
print(f"ERROR: Could not access Cycles preferences: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
# Check all devices and choose the best GPU type
|
||||
# Device type preference order (most performant first)
|
||||
device_type_preference = ['OPTIX', 'CUDA', 'HIP', 'ONEAPI', 'METAL']
|
||||
gpu_available = False
|
||||
best_device_type = None
|
||||
best_gpu_devices = []
|
||||
devices_by_type = {} # {device_type: [devices]}
|
||||
seen_device_ids = set() # Track device IDs to avoid duplicates
|
||||
|
||||
print("Checking for GPU availability...")
|
||||
|
||||
# Try to get all devices - try each device type to see what's available
|
||||
for device_type in device_type_preference:
|
||||
try:
|
||||
cycles_prefs.compute_device_type = device_type
|
||||
cycles_prefs.refresh_devices()
|
||||
|
||||
# Get devices for this type
|
||||
devices = None
|
||||
if hasattr(cycles_prefs, 'devices'):
|
||||
try:
|
||||
devices_prop = cycles_prefs.devices
|
||||
if devices_prop:
|
||||
devices = list(devices_prop) if hasattr(devices_prop, '__iter__') else [devices_prop]
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if not devices or len(devices) == 0:
|
||||
try:
|
||||
devices = cycles_prefs.get_devices()
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if devices and len(devices) > 0:
|
||||
# Categorize devices by their type attribute, avoiding duplicates
|
||||
for device in devices:
|
||||
if hasattr(device, 'type'):
|
||||
device_type_str = str(device.type).upper()
|
||||
device_id = getattr(device, 'id', None)
|
||||
|
||||
# Use device ID to avoid duplicates (same device appears when checking different compute_device_types)
|
||||
if device_id and device_id in seen_device_ids:
|
||||
continue
|
||||
|
||||
if device_id:
|
||||
seen_device_ids.add(device_id)
|
||||
|
||||
if device_type_str not in devices_by_type:
|
||||
devices_by_type[device_type_str] = []
|
||||
devices_by_type[device_type_str].append(device)
|
||||
except (ValueError, AttributeError, KeyError, TypeError):
|
||||
# Device type not supported, continue
|
||||
continue
|
||||
except Exception as e:
|
||||
# Other errors - log but continue
|
||||
print(f" Error checking {device_type}: {e}")
|
||||
continue
|
||||
|
||||
# Print what we found
|
||||
print(f"Found devices by type: {list(devices_by_type.keys())}")
|
||||
for dev_type, dev_list in devices_by_type.items():
|
||||
print(f" {dev_type}: {len(dev_list)} device(s)")
|
||||
for device in dev_list:
|
||||
device_name = getattr(device, 'name', 'Unknown')
|
||||
print(f" - {device_name}")
|
||||
|
||||
# Choose the best GPU type based on preference
|
||||
for preferred_type in device_type_preference:
|
||||
if preferred_type in devices_by_type:
|
||||
gpu_devices = [d for d in devices_by_type[preferred_type] if preferred_type in ['CUDA', 'OPENCL', 'OPTIX', 'HIP', 'METAL', 'ONEAPI']]
|
||||
if gpu_devices:
|
||||
best_device_type = preferred_type
|
||||
best_gpu_devices = [(d, preferred_type) for d in gpu_devices]
|
||||
print(f"Selected {preferred_type} as best GPU type with {len(gpu_devices)} device(s)")
|
||||
break
|
||||
|
||||
# Second pass: Enable the best GPU we found
|
||||
if best_device_type and best_gpu_devices:
|
||||
print(f"\nEnabling GPU devices for {best_device_type}...")
|
||||
try:
|
||||
# Set the device type again
|
||||
cycles_prefs.compute_device_type = best_device_type
|
||||
cycles_prefs.refresh_devices()
|
||||
|
||||
# First, disable all CPU devices to ensure only GPU is used
|
||||
print(f" Disabling CPU devices...")
|
||||
all_devices = cycles_prefs.devices if hasattr(cycles_prefs, 'devices') else cycles_prefs.get_devices()
|
||||
if all_devices:
|
||||
for device in all_devices:
|
||||
if hasattr(device, 'type') and str(device.type).upper() == 'CPU':
|
||||
try:
|
||||
device.use = False
|
||||
device_name = getattr(device, 'name', 'Unknown')
|
||||
print(f" Disabled CPU: {device_name}")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not disable CPU device {getattr(device, 'name', 'Unknown')}: {e}")
|
||||
|
||||
# Enable all GPU devices
|
||||
enabled_count = 0
|
||||
for device, device_type in best_gpu_devices:
|
||||
try:
|
||||
device.use = True
|
||||
enabled_count += 1
|
||||
device_name = getattr(device, 'name', 'Unknown')
|
||||
print(f" Enabled: {device_name}")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not enable device {getattr(device, 'name', 'Unknown')}: {e}")
|
||||
|
||||
# Enable ray tracing acceleration for supported device types
|
||||
try:
|
||||
if best_device_type == 'HIP':
|
||||
# HIPRT (HIP Ray Tracing) for AMD GPUs
|
||||
if hasattr(cycles_prefs, 'use_hiprt'):
|
||||
cycles_prefs.use_hiprt = True
|
||||
print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering")
|
||||
elif hasattr(scene.cycles, 'use_hiprt'):
|
||||
scene.cycles.use_hiprt = True
|
||||
print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering")
|
||||
else:
|
||||
print(f" HIPRT not available (requires Blender 4.0+)")
|
||||
elif best_device_type == 'OPTIX':
|
||||
# OptiX is already enabled when using OPTIX device type
|
||||
# But we can check if there are any OptiX-specific settings
|
||||
if hasattr(scene.cycles, 'use_optix_denoising'):
|
||||
scene.cycles.use_optix_denoising = True
|
||||
print(f" Enabled OptiX denoising")
|
||||
print(f" OptiX ray tracing is active (using OPTIX device type)")
|
||||
elif best_device_type == 'CUDA':
|
||||
# CUDA can use OptiX if available, but it's usually automatic
|
||||
# Check if we can prefer OptiX over CUDA
|
||||
if hasattr(scene.cycles, 'use_optix_denoising'):
|
||||
scene.cycles.use_optix_denoising = True
|
||||
print(f" Enabled OptiX denoising (if OptiX available)")
|
||||
print(f" CUDA ray tracing active")
|
||||
elif best_device_type == 'METAL':
|
||||
# MetalRT for Apple Silicon (if available)
|
||||
if hasattr(scene.cycles, 'use_metalrt'):
|
||||
scene.cycles.use_metalrt = True
|
||||
print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering")
|
||||
elif hasattr(cycles_prefs, 'use_metalrt'):
|
||||
cycles_prefs.use_metalrt = True
|
||||
print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering")
|
||||
else:
|
||||
print(f" MetalRT not available")
|
||||
elif best_device_type == 'ONEAPI':
|
||||
# Intel oneAPI - Embree might be available
|
||||
if hasattr(scene.cycles, 'use_embree'):
|
||||
scene.cycles.use_embree = True
|
||||
print(f" Enabled Embree for faster CPU ray tracing")
|
||||
print(f" oneAPI ray tracing active")
|
||||
except Exception as e:
|
||||
print(f" Could not enable ray tracing acceleration: {e}")
|
||||
|
||||
print(f"SUCCESS: Enabled {enabled_count} GPU device(s) for {best_device_type}")
|
||||
gpu_available = True
|
||||
except Exception as e:
|
||||
print(f"ERROR: Failed to enable GPU devices: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
# Set device based on availability (prefer GPU, fallback to CPU)
|
||||
if gpu_available:
|
||||
scene.cycles.device = 'GPU'
|
||||
print(f"Using GPU for rendering (blend file had: {current_device})")
|
||||
else:
|
||||
scene.cycles.device = 'CPU'
|
||||
print(f"GPU not available, using CPU for rendering (blend file had: {current_device})")
|
||||
|
||||
# Verify device setting
|
||||
if current_engine == 'CYCLES':
|
||||
final_device = scene.cycles.device
|
||||
print(f"Final Cycles device: {final_device}")
|
||||
else:
|
||||
# For other engines (EEVEE, etc.), respect blend file settings
|
||||
print(f"Using {current_engine} engine - respecting blend file settings")
|
||||
|
||||
# Enable GPU acceleration for EEVEE viewport rendering (if using EEVEE)
|
||||
if current_engine == 'EEVEE' or current_engine == 'EEVEE_NEXT':
|
||||
try:
|
||||
if hasattr(bpy.context.preferences.system, 'gpu_backend'):
|
||||
bpy.context.preferences.system.gpu_backend = 'OPENGL'
|
||||
print("Enabled OpenGL GPU backend for EEVEE")
|
||||
except Exception as e:
|
||||
print(f"Could not set EEVEE GPU backend: {e}")
|
||||
|
||||
# Enable GPU acceleration for compositing (if compositing is enabled)
|
||||
try:
|
||||
if scene.use_nodes and hasattr(scene, 'node_tree') and scene.node_tree:
|
||||
if hasattr(scene.node_tree, 'use_gpu_compositing'):
|
||||
scene.node_tree.use_gpu_compositing = True
|
||||
print("Enabled GPU compositing")
|
||||
except Exception as e:
|
||||
print(f"Could not enable GPU compositing: {e}")
|
||||
|
||||
# CRITICAL: Initialize headless rendering to prevent black images
|
||||
# This ensures the render engine is properly initialized before rendering
|
||||
print("Initializing headless rendering context...")
|
||||
try:
|
||||
# Ensure world exists and has proper settings
|
||||
if not scene.world:
|
||||
# Create a default world if none exists
|
||||
world = bpy.data.worlds.new("World")
|
||||
scene.world = world
|
||||
print("Created default world")
|
||||
|
||||
# Ensure world has a background shader (not just black)
|
||||
if scene.world:
|
||||
# Enable nodes if not already enabled
|
||||
if not scene.world.use_nodes:
|
||||
scene.world.use_nodes = True
|
||||
print("Enabled world nodes")
|
||||
|
||||
world_nodes = scene.world.node_tree
|
||||
if world_nodes:
|
||||
# Find or create background shader
|
||||
bg_shader = None
|
||||
for node in world_nodes.nodes:
|
||||
if node.type == 'BACKGROUND':
|
||||
bg_shader = node
|
||||
break
|
||||
|
||||
if not bg_shader:
|
||||
bg_shader = world_nodes.nodes.new(type='ShaderNodeBackground')
|
||||
# Connect to output
|
||||
output = world_nodes.nodes.get('World Output')
|
||||
if not output:
|
||||
output = world_nodes.nodes.new(type='ShaderNodeOutputWorld')
|
||||
output.name = 'World Output'
|
||||
if output and bg_shader:
|
||||
# Connect background to surface input
|
||||
if 'Surface' in output.inputs and 'Background' in bg_shader.outputs:
|
||||
world_nodes.links.new(bg_shader.outputs['Background'], output.inputs['Surface'])
|
||||
print("Created background shader for world")
|
||||
|
||||
# Ensure background has some color (not pure black)
|
||||
if bg_shader:
|
||||
# Only set if it's pure black (0,0,0)
|
||||
if hasattr(bg_shader.inputs, 'Color'):
|
||||
color = bg_shader.inputs['Color'].default_value
|
||||
if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0:
|
||||
# Set to a very dark gray instead of pure black
|
||||
bg_shader.inputs['Color'].default_value = (0.01, 0.01, 0.01, 1.0)
|
||||
print("Adjusted world background color to prevent black renders")
|
||||
else:
|
||||
# Fallback: use legacy world color if nodes aren't working
|
||||
if hasattr(scene.world, 'color'):
|
||||
color = scene.world.color
|
||||
if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0:
|
||||
scene.world.color = (0.01, 0.01, 0.01)
|
||||
print("Adjusted legacy world color to prevent black renders")
|
||||
|
||||
# For EEVEE, force viewport update to initialize render engine
|
||||
if current_engine in ['EEVEE', 'EEVEE_NEXT']:
|
||||
# Force EEVEE to update its internal state
|
||||
try:
|
||||
# Update depsgraph to ensure everything is initialized
|
||||
depsgraph = bpy.context.evaluated_depsgraph_get()
|
||||
if depsgraph:
|
||||
# Force update
|
||||
depsgraph.update()
|
||||
print("Forced EEVEE depsgraph update for headless rendering")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not force EEVEE update: {e}")
|
||||
|
||||
# Ensure EEVEE settings are applied
|
||||
try:
|
||||
# Force a material update to ensure shaders are compiled
|
||||
for obj in scene.objects:
|
||||
if obj.type == 'MESH' and obj.data.materials:
|
||||
for mat in obj.data.materials:
|
||||
if mat and mat.use_nodes:
|
||||
# Touch the material to force update
|
||||
mat.use_nodes = mat.use_nodes
|
||||
print("Forced material updates for EEVEE")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not update materials: {e}")
|
||||
|
||||
# For Cycles, ensure proper initialization
|
||||
if current_engine == 'CYCLES':
|
||||
# Ensure samples are set (even if 1 for preview)
|
||||
if not hasattr(scene.cycles, 'samples') or scene.cycles.samples < 1:
|
||||
scene.cycles.samples = 1
|
||||
print("Set minimum Cycles samples")
|
||||
|
||||
# Check for lights in the scene
|
||||
lights = [obj for obj in scene.objects if obj.type == 'LIGHT']
|
||||
print(f"Found {len(lights)} light(s) in scene")
|
||||
if len(lights) == 0:
|
||||
print("WARNING: No lights found in scene - rendering may be black!")
|
||||
print(" Consider adding lights or ensuring world background emits light")
|
||||
|
||||
# Ensure world background emits light (critical for Cycles)
|
||||
if scene.world and scene.world.use_nodes:
|
||||
world_nodes = scene.world.node_tree
|
||||
if world_nodes:
|
||||
bg_shader = None
|
||||
for node in world_nodes.nodes:
|
||||
if node.type == 'BACKGROUND':
|
||||
bg_shader = node
|
||||
break
|
||||
|
||||
if bg_shader:
|
||||
# Check and set strength - Cycles needs this to emit light!
|
||||
if hasattr(bg_shader.inputs, 'Strength'):
|
||||
strength = bg_shader.inputs['Strength'].default_value
|
||||
if strength <= 0.0:
|
||||
bg_shader.inputs['Strength'].default_value = 1.0
|
||||
print("Set world background strength to 1.0 for Cycles lighting")
|
||||
else:
|
||||
print(f"World background strength: {strength}")
|
||||
# Also ensure color is not pure black
|
||||
if hasattr(bg_shader.inputs, 'Color'):
|
||||
color = bg_shader.inputs['Color'].default_value
|
||||
if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0:
|
||||
bg_shader.inputs['Color'].default_value = (1.0, 1.0, 1.0, 1.0)
|
||||
print("Set world background color to white for Cycles lighting")
|
||||
|
||||
# Check film_transparent setting - if enabled, background will be transparent/black
|
||||
if hasattr(scene.cycles, 'film_transparent') and scene.cycles.film_transparent:
|
||||
print("WARNING: film_transparent is enabled - background will be transparent")
|
||||
print(" If you see black renders, try disabling film_transparent")
|
||||
|
||||
# Force Cycles to update/compile materials and shaders
|
||||
try:
|
||||
# Update depsgraph to ensure everything is initialized
|
||||
depsgraph = bpy.context.evaluated_depsgraph_get()
|
||||
if depsgraph:
|
||||
depsgraph.update()
|
||||
print("Forced Cycles depsgraph update")
|
||||
|
||||
# Force material updates to ensure shaders are compiled
|
||||
for obj in scene.objects:
|
||||
if obj.type == 'MESH' and obj.data.materials:
|
||||
for mat in obj.data.materials:
|
||||
if mat and mat.use_nodes:
|
||||
# Force material update
|
||||
mat.use_nodes = mat.use_nodes
|
||||
print("Forced Cycles material updates")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not force Cycles updates: {e}")
|
||||
|
||||
# Verify device is actually set correctly
|
||||
if hasattr(scene.cycles, 'device'):
|
||||
actual_device = scene.cycles.device
|
||||
print(f"Cycles device setting: {actual_device}")
|
||||
if actual_device == 'GPU':
|
||||
# Try to verify GPU is actually available
|
||||
try:
|
||||
prefs = bpy.context.preferences
|
||||
cycles_prefs = prefs.addons['cycles'].preferences
|
||||
devices = cycles_prefs.devices
|
||||
enabled_devices = [d for d in devices if d.use]
|
||||
if len(enabled_devices) == 0:
|
||||
print("WARNING: GPU device set but no GPU devices are enabled!")
|
||||
print(" Falling back to CPU may cause issues")
|
||||
except Exception as e:
|
||||
print(f"Could not verify GPU devices: {e}")
|
||||
|
||||
# Ensure camera exists and is active
|
||||
if scene.camera is None:
|
||||
# Find first camera in scene
|
||||
for obj in scene.objects:
|
||||
if obj.type == 'CAMERA':
|
||||
scene.camera = obj
|
||||
print(f"Set active camera: {obj.name}")
|
||||
break
|
||||
|
||||
print("Headless rendering initialization complete")
|
||||
except Exception as e:
|
||||
print(f"Warning: Headless rendering initialization had issues: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
# Final verification before rendering
|
||||
print("\n=== Pre-render verification ===")
|
||||
try:
|
||||
scene = bpy.context.scene
|
||||
print(f"Render engine: {scene.render.engine}")
|
||||
print(f"Active camera: {scene.camera.name if scene.camera else 'None'}")
|
||||
|
||||
if scene.render.engine == 'CYCLES':
|
||||
print(f"Cycles device: {scene.cycles.device}")
|
||||
print(f"Cycles samples: {scene.cycles.samples}")
|
||||
lights = [obj for obj in scene.objects if obj.type == 'LIGHT']
|
||||
print(f"Lights in scene: {len(lights)}")
|
||||
if scene.world:
|
||||
if scene.world.use_nodes:
|
||||
world_nodes = scene.world.node_tree
|
||||
if world_nodes:
|
||||
bg_shader = None
|
||||
for node in world_nodes.nodes:
|
||||
if node.type == 'BACKGROUND':
|
||||
bg_shader = node
|
||||
break
|
||||
if bg_shader:
|
||||
if hasattr(bg_shader.inputs, 'Strength'):
|
||||
strength = bg_shader.inputs['Strength'].default_value
|
||||
print(f"World background strength: {strength}")
|
||||
if hasattr(bg_shader.inputs, 'Color'):
|
||||
color = bg_shader.inputs['Color'].default_value
|
||||
print(f"World background color: ({color[0]:.2f}, {color[1]:.2f}, {color[2]:.2f})")
|
||||
else:
|
||||
print("World exists but nodes are disabled")
|
||||
else:
|
||||
print("WARNING: No world in scene!")
|
||||
|
||||
print("=== Verification complete ===\n")
|
||||
except Exception as e:
|
||||
print(f"Warning: Verification failed: {e}")
|
||||
|
||||
print("Device configuration complete - blend file settings preserved, device optimized")
|
||||
sys.stdout.flush()
|
||||
|
||||
29
pkg/scripts/scripts/unhide_objects.py
Normal file
29
pkg/scripts/scripts/unhide_objects.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# Fix objects and collections hidden from render
|
||||
vl = bpy.context.view_layer
|
||||
|
||||
# 1. Objects hidden in view layer
|
||||
print("Checking for objects hidden from render that need to be enabled...")
|
||||
try:
|
||||
for obj in bpy.data.objects:
|
||||
if obj.hide_get(view_layer=vl):
|
||||
if any(k in obj.name.lower() for k in ["scrotum|","cage","genital","penis","dick","collision","body.001","couch"]):
|
||||
obj.hide_set(False, view_layer=vl)
|
||||
print("Enabled object:", obj.name)
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not check/fix hidden render objects: {e}")
|
||||
|
||||
# 2. Collections disabled in renders OR set to Holdout (the final killer)
|
||||
print("Checking for collections hidden from render that need to be enabled...")
|
||||
try:
|
||||
for col in bpy.data.collections:
|
||||
if col.hide_render or (vl.layer_collection.children.get(col.name) and not vl.layer_collection.children[col.name].exclude == False):
|
||||
if any(k in col.name.lower() for k in ["genital","nsfw","dick","private","hidden","cage","scrotum","collision","dick"]):
|
||||
col.hide_render = False
|
||||
if col.name in vl.layer_collection.children:
|
||||
vl.layer_collection.children[col.name].exclude = False
|
||||
vl.layer_collection.children[col.name].holdout = False
|
||||
vl.layer_collection.children[col.name].indirect_only = False
|
||||
print("Enabled collection:", col.name)
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not check/fix hidden render collections: {e}")
|
||||
|
||||
@@ -140,6 +140,8 @@ type CreateJobRequest struct {
|
||||
AllowParallelRunners *bool `json:"allow_parallel_runners,omitempty"` // Optional for render jobs, defaults to true
|
||||
RenderSettings *RenderSettings `json:"render_settings,omitempty"` // Optional: Override blend file render settings
|
||||
UploadSessionID *string `json:"upload_session_id,omitempty"` // Optional: Session ID from file upload
|
||||
UnhideObjects *bool `json:"unhide_objects,omitempty"` // Optional: Enable unhide tweaks for objects/collections
|
||||
EnableExecution *bool `json:"enable_execution,omitempty"` // Optional: Enable auto-execution in Blender (adds --enable-autoexec flag, defaults to false)
|
||||
}
|
||||
|
||||
// UpdateJobProgressRequest represents a request to update job progress
|
||||
@@ -227,9 +229,11 @@ type TaskLogEntry struct {
|
||||
type BlendMetadata struct {
|
||||
FrameStart int `json:"frame_start"`
|
||||
FrameEnd int `json:"frame_end"`
|
||||
RenderSettings RenderSettings `json:"render_settings"`
|
||||
SceneInfo SceneInfo `json:"scene_info"`
|
||||
MissingFilesInfo *MissingFilesInfo `json:"missing_files_info,omitempty"`
|
||||
RenderSettings RenderSettings `json:"render_settings"`
|
||||
SceneInfo SceneInfo `json:"scene_info"`
|
||||
MissingFilesInfo *MissingFilesInfo `json:"missing_files_info,omitempty"`
|
||||
UnhideObjects *bool `json:"unhide_objects,omitempty"` // Enable unhide tweaks for objects/collections
|
||||
EnableExecution *bool `json:"enable_execution,omitempty"` // Enable auto-execution in Blender (adds --enable-autoexec flag, defaults to false)
|
||||
}
|
||||
|
||||
// MissingFilesInfo represents information about missing files/addons
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { useState } from 'react';
|
||||
|
||||
export default function FileExplorer({ files, onDownload, onPreview, isImageFile }) {
|
||||
const [expandedPaths, setExpandedPaths] = useState(new Set());
|
||||
const [expandedPaths, setExpandedPaths] = useState(new Set()); // Root folder collapsed by default
|
||||
|
||||
// Build directory tree from file paths
|
||||
const buildTree = (files) => {
|
||||
@@ -70,7 +70,7 @@ export default function FileExplorer({ files, onDownload, onPreview, isImageFile
|
||||
const file = item.file;
|
||||
const isImage = isImageFile && isImageFile(file.file_name);
|
||||
const sizeMB = (file.file_size / 1024 / 1024).toFixed(2);
|
||||
const isArchive = file.file_name.endsWith('.tar.gz') || file.file_name.endsWith('.zip');
|
||||
const isArchive = file.file_name.endsWith('.tar') || file.file_name.endsWith('.zip');
|
||||
|
||||
return (
|
||||
<div key={fullPath} className="flex items-center justify-between py-1.5 hover:bg-gray-800/50 rounded px-2" style={{ paddingLeft: `${indent + 8}px` }}>
|
||||
@@ -108,10 +108,13 @@ export default function FileExplorer({ files, onDownload, onPreview, isImageFile
|
||||
return (
|
||||
<div key={fullPath}>
|
||||
<div
|
||||
className="flex items-center gap-2 py-1 hover:bg-gray-800/50 rounded px-2 cursor-pointer"
|
||||
className="flex items-center gap-2 py-1.5 hover:bg-gray-800/50 rounded px-2 cursor-pointer select-none"
|
||||
style={{ paddingLeft: `${indent + 8}px` }}
|
||||
onClick={() => hasChildren && togglePath(fullPath)}
|
||||
>
|
||||
<span className="text-gray-400 text-xs w-4 flex items-center justify-center">
|
||||
{hasChildren ? (isExpanded ? '▼' : '▶') : '•'}
|
||||
</span>
|
||||
<span className="text-gray-500 text-sm">
|
||||
{hasChildren ? (isExpanded ? '📂' : '📁') : '📁'}
|
||||
</span>
|
||||
@@ -123,7 +126,7 @@ export default function FileExplorer({ files, onDownload, onPreview, isImageFile
|
||||
)}
|
||||
</div>
|
||||
{hasChildren && isExpanded && (
|
||||
<div>
|
||||
<div className="ml-2">
|
||||
{renderTree(item.children, level + 1, fullPath)}
|
||||
</div>
|
||||
)}
|
||||
@@ -143,10 +146,34 @@ export default function FileExplorer({ files, onDownload, onPreview, isImageFile
|
||||
);
|
||||
}
|
||||
|
||||
// Wrap tree in a root folder
|
||||
const rootExpanded = expandedPaths.has('');
|
||||
|
||||
return (
|
||||
<div className="bg-gray-900 rounded-lg border border-gray-700 p-3">
|
||||
<div className="space-y-1">
|
||||
{renderTree(tree)}
|
||||
<div>
|
||||
<div
|
||||
className="flex items-center gap-2 py-1.5 hover:bg-gray-800/50 rounded px-2 cursor-pointer select-none"
|
||||
onClick={() => togglePath('')}
|
||||
>
|
||||
<span className="text-gray-400 text-xs w-4 flex items-center justify-center">
|
||||
{rootExpanded ? '▼' : '▶'}
|
||||
</span>
|
||||
<span className="text-gray-500 text-sm">
|
||||
{rootExpanded ? '📂' : '📁'}
|
||||
</span>
|
||||
<span className="text-gray-300 text-sm font-medium">Files</span>
|
||||
<span className="text-gray-500 text-xs ml-2">
|
||||
({Object.keys(tree).length})
|
||||
</span>
|
||||
</div>
|
||||
{rootExpanded && (
|
||||
<div className="ml-2">
|
||||
{renderTree(tree)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { useState, useEffect, useRef } from 'react';
|
||||
import { jobs } from '../utils/api';
|
||||
import { jobs, REQUEST_SUPERSEDED } from '../utils/api';
|
||||
import VideoPlayer from './VideoPlayer';
|
||||
import FileExplorer from './FileExplorer';
|
||||
|
||||
@@ -18,19 +18,53 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
const [streaming, setStreaming] = useState(false);
|
||||
const [previewImage, setPreviewImage] = useState(null); // { url, fileName } or null
|
||||
const wsRef = useRef(null);
|
||||
const jobWsRef = useRef(null); // Separate ref for job WebSocket
|
||||
const logContainerRefs = useRef({}); // Refs for each step's log container
|
||||
const shouldAutoScrollRefs = useRef({}); // Auto-scroll state per step
|
||||
|
||||
useEffect(() => {
|
||||
loadDetails();
|
||||
const interval = setInterval(loadDetails, 2000);
|
||||
return () => {
|
||||
clearInterval(interval);
|
||||
if (wsRef.current) {
|
||||
wsRef.current.close();
|
||||
// Use WebSocket for real-time updates instead of polling
|
||||
if (jobDetails.status === 'running' || jobDetails.status === 'pending' || !jobDetails.status) {
|
||||
connectJobWebSocket();
|
||||
return () => {
|
||||
if (jobWsRef.current) {
|
||||
try {
|
||||
jobWsRef.current.close();
|
||||
} catch (e) {
|
||||
// Ignore errors when closing
|
||||
}
|
||||
jobWsRef.current = null;
|
||||
}
|
||||
if (wsRef.current) {
|
||||
try {
|
||||
wsRef.current.close();
|
||||
} catch (e) {
|
||||
// Ignore errors when closing
|
||||
}
|
||||
wsRef.current = null;
|
||||
}
|
||||
};
|
||||
} else {
|
||||
// Job is completed/failed/cancelled - close WebSocket
|
||||
if (jobWsRef.current) {
|
||||
try {
|
||||
jobWsRef.current.close();
|
||||
} catch (e) {
|
||||
// Ignore errors when closing
|
||||
}
|
||||
jobWsRef.current = null;
|
||||
}
|
||||
};
|
||||
}, [job.id]);
|
||||
if (wsRef.current) {
|
||||
try {
|
||||
wsRef.current.close();
|
||||
} catch (e) {
|
||||
// Ignore errors when closing
|
||||
}
|
||||
wsRef.current = null;
|
||||
}
|
||||
}
|
||||
}, [job.id, jobDetails.status]);
|
||||
|
||||
useEffect(() => {
|
||||
// Load logs and steps for all running tasks
|
||||
@@ -83,14 +117,45 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
|
||||
const loadDetails = async () => {
|
||||
try {
|
||||
const [details, fileList, taskList] = await Promise.all([
|
||||
setLoading(true);
|
||||
// Use summary endpoint for tasks initially - much faster
|
||||
const [details, fileList, taskListResult] = await Promise.all([
|
||||
jobs.get(job.id),
|
||||
jobs.getFiles(job.id),
|
||||
jobs.getTasks(job.id),
|
||||
jobs.getFiles(job.id, { limit: 50 }), // Only load first page of files
|
||||
jobs.getTasksSummary(job.id, { limit: 100, sort: 'frame_start:asc' }), // Use summary endpoint
|
||||
]);
|
||||
setJobDetails(details);
|
||||
setFiles(fileList);
|
||||
setTasks(taskList);
|
||||
|
||||
// Handle paginated file response - check for superseded sentinel
|
||||
if (fileList === REQUEST_SUPERSEDED) {
|
||||
return; // Request was superseded, skip this update
|
||||
}
|
||||
const fileData = fileList?.data || fileList;
|
||||
setFiles(Array.isArray(fileData) ? fileData : []);
|
||||
|
||||
// Handle paginated task summary response - check for superseded sentinel
|
||||
if (taskListResult === REQUEST_SUPERSEDED) {
|
||||
return; // Request was superseded, skip this update
|
||||
}
|
||||
const taskData = taskListResult?.data || taskListResult;
|
||||
const taskSummaries = Array.isArray(taskData) ? taskData : [];
|
||||
|
||||
// Convert summaries to task-like objects for display
|
||||
const tasksForDisplay = taskSummaries.map(summary => ({
|
||||
id: summary.id,
|
||||
job_id: job.id,
|
||||
frame_start: summary.frame_start,
|
||||
frame_end: summary.frame_end,
|
||||
status: summary.status,
|
||||
task_type: summary.task_type,
|
||||
runner_id: summary.runner_id,
|
||||
// These will be loaded on expand
|
||||
current_step: null,
|
||||
retry_count: 0,
|
||||
max_retries: 3,
|
||||
created_at: new Date().toISOString(),
|
||||
}));
|
||||
setTasks(Array.isArray(tasksForDisplay) ? tasksForDisplay : []);
|
||||
|
||||
// Fetch context archive contents separately (may not exist for old jobs)
|
||||
try {
|
||||
@@ -101,26 +166,27 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
setContextFiles([]);
|
||||
}
|
||||
|
||||
// Only load task data (logs/steps) for tasks that don't have data yet
|
||||
// This prevents overwriting logs that are being streamed via WebSocket
|
||||
// Once we have logs for a task, we rely on WebSocket for new logs
|
||||
// Only load task data (logs/steps) for expanded tasks
|
||||
// Don't auto-load for all tasks - wait for user to expand
|
||||
if (details.status === 'running') {
|
||||
taskList.forEach(task => {
|
||||
const existingData = taskData[task.id];
|
||||
// Only fetch logs via HTTP if we don't have any logs yet
|
||||
// Once we have logs, WebSocket will handle new ones
|
||||
if (!existingData || !existingData.logs || existingData.logs.length === 0) {
|
||||
loadTaskData(task.id);
|
||||
} else if (!existingData.steps || existingData.steps.length === 0) {
|
||||
// If we have logs but no steps, fetch steps only
|
||||
loadTaskStepsOnly(task.id);
|
||||
// Only load data for tasks that are expanded
|
||||
tasksForDisplay.forEach(task => {
|
||||
if (expandedTasks.has(task.id)) {
|
||||
const existingData = taskData[task.id];
|
||||
// Only fetch logs via HTTP if we don't have any logs yet
|
||||
if (!existingData || !existingData.logs || existingData.logs.length === 0) {
|
||||
loadTaskData(task.id);
|
||||
} else if (!existingData.steps || existingData.steps.length === 0) {
|
||||
loadTaskStepsOnly(task.id);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Check if there's an MP4 output file
|
||||
const mp4File = fileList.find(
|
||||
(f) => f.file_type === 'output' && f.file_name.endsWith('.mp4')
|
||||
const fileArray = Array.isArray(fileData) ? fileData : [];
|
||||
const mp4File = fileArray.find(
|
||||
(f) => f.file_type === 'output' && f.file_name && f.file_name.endsWith('.mp4')
|
||||
);
|
||||
if (mp4File) {
|
||||
setVideoUrl(jobs.getVideoUrl(job.id));
|
||||
@@ -138,23 +204,39 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
|
||||
const loadTaskData = async (taskId) => {
|
||||
try {
|
||||
const [logs, steps] = await Promise.all([
|
||||
jobs.getTaskLogs(job.id, taskId),
|
||||
console.log(`Loading task data for task ${taskId}...`);
|
||||
const [logsResult, steps] = await Promise.all([
|
||||
jobs.getTaskLogs(job.id, taskId, { limit: 1000 }), // Increased limit for completed tasks
|
||||
jobs.getTaskSteps(job.id, taskId),
|
||||
]);
|
||||
|
||||
// Check for superseded sentinel
|
||||
if (logsResult === REQUEST_SUPERSEDED || steps === REQUEST_SUPERSEDED) {
|
||||
return; // Request was superseded, skip this update
|
||||
}
|
||||
|
||||
console.log(`Task ${taskId} logs result:`, logsResult);
|
||||
|
||||
// Handle new format with logs, last_id, limit
|
||||
const logs = logsResult.logs || logsResult;
|
||||
const lastId = logsResult.last_id;
|
||||
|
||||
console.log(`Task ${taskId} - loaded ${Array.isArray(logs) ? logs.length : 0} logs, ${Array.isArray(steps) ? steps.length : 0} steps`);
|
||||
|
||||
setTaskData(prev => {
|
||||
const current = prev[taskId] || { steps: [], logs: [] };
|
||||
const current = prev[taskId] || { steps: [], logs: [], lastId: 0 };
|
||||
// Merge logs instead of replacing - this preserves WebSocket-streamed logs
|
||||
// Deduplicate by log ID
|
||||
const existingLogIds = new Set((current.logs || []).map(l => l.id));
|
||||
const newLogs = (logs || []).filter(l => !existingLogIds.has(l.id));
|
||||
const newLogs = (Array.isArray(logs) ? logs : []).filter(l => !existingLogIds.has(l.id));
|
||||
const mergedLogs = [...(current.logs || []), ...newLogs].sort((a, b) => a.id - b.id);
|
||||
|
||||
return {
|
||||
...prev,
|
||||
[taskId]: {
|
||||
steps: steps || current.steps, // Steps can be replaced (they don't change often)
|
||||
logs: mergedLogs
|
||||
steps: steps || current.steps,
|
||||
logs: mergedLogs,
|
||||
lastId: lastId || current.lastId
|
||||
}
|
||||
};
|
||||
});
|
||||
@@ -166,6 +248,10 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
const loadTaskStepsOnly = async (taskId) => {
|
||||
try {
|
||||
const steps = await jobs.getTaskSteps(job.id, taskId);
|
||||
// Check for superseded sentinel
|
||||
if (steps === REQUEST_SUPERSEDED) {
|
||||
return; // Request was superseded, skip this update
|
||||
}
|
||||
setTaskData(prev => {
|
||||
const current = prev[taskId] || { steps: [], logs: [] };
|
||||
return {
|
||||
@@ -181,9 +267,276 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
}
|
||||
};
|
||||
|
||||
const connectJobWebSocket = () => {
|
||||
try {
|
||||
// Close existing connection if any
|
||||
if (jobWsRef.current) {
|
||||
try {
|
||||
jobWsRef.current.close();
|
||||
} catch (e) {
|
||||
// Ignore errors when closing
|
||||
}
|
||||
jobWsRef.current = null;
|
||||
}
|
||||
|
||||
const ws = jobs.streamJobWebSocket(job.id);
|
||||
jobWsRef.current = ws; // Store reference
|
||||
|
||||
ws.onopen = () => {
|
||||
console.log('Job WebSocket connected for job', job.id);
|
||||
};
|
||||
|
||||
ws.onmessage = (event) => {
|
||||
try {
|
||||
const data = JSON.parse(event.data);
|
||||
console.log('Job WebSocket message received:', data.type, data);
|
||||
|
||||
if (data.type === 'job_update' && data.data) {
|
||||
// Update job details
|
||||
setJobDetails(prev => ({ ...prev, ...data.data }));
|
||||
} else if (data.type === 'task_update' && data.data) {
|
||||
// Update task in list
|
||||
setTasks(prev => {
|
||||
// Ensure prev is always an array
|
||||
const prevArray = Array.isArray(prev) ? prev : [];
|
||||
if (!data.task_id) {
|
||||
console.warn('task_update message missing task_id:', data);
|
||||
return prevArray;
|
||||
}
|
||||
const index = prevArray.findIndex(t => t.id === data.task_id);
|
||||
if (index >= 0) {
|
||||
const updated = [...prevArray];
|
||||
updated[index] = { ...updated[index], ...data.data };
|
||||
return updated;
|
||||
}
|
||||
// If task not found, it might be a new task - reload to be safe
|
||||
if (data.data && (data.data.status === 'running' || data.data.status === 'pending')) {
|
||||
setTimeout(() => {
|
||||
const reloadTasks = async () => {
|
||||
try {
|
||||
const taskListResult = await jobs.getTasksSummary(job.id, { limit: 100, sort: 'frame_start:asc' });
|
||||
// Check for superseded sentinel
|
||||
if (taskListResult === REQUEST_SUPERSEDED) {
|
||||
return; // Request was superseded, skip this update
|
||||
}
|
||||
const taskData = taskListResult.data || taskListResult;
|
||||
const taskSummaries = Array.isArray(taskData) ? taskData : [];
|
||||
const tasksForDisplay = taskSummaries.map(summary => ({
|
||||
id: summary.id,
|
||||
job_id: job.id,
|
||||
frame_start: summary.frame_start,
|
||||
frame_end: summary.frame_end,
|
||||
status: summary.status,
|
||||
task_type: summary.task_type,
|
||||
runner_id: summary.runner_id,
|
||||
current_step: null,
|
||||
retry_count: 0,
|
||||
max_retries: 3,
|
||||
created_at: new Date().toISOString(),
|
||||
}));
|
||||
setTasks(Array.isArray(tasksForDisplay) ? tasksForDisplay : []);
|
||||
} catch (error) {
|
||||
console.error('Failed to reload tasks:', error);
|
||||
}
|
||||
};
|
||||
reloadTasks();
|
||||
}, 100);
|
||||
}
|
||||
return prevArray;
|
||||
});
|
||||
} else if (data.type === 'task_added' && data.data) {
|
||||
// New task was added - reload task summaries to get the new task
|
||||
console.log('task_added message received, reloading tasks...', data);
|
||||
const reloadTasks = async () => {
|
||||
try {
|
||||
const taskListResult = await jobs.getTasksSummary(job.id, { limit: 100, sort: 'frame_start:asc' });
|
||||
// Check for superseded sentinel
|
||||
if (taskListResult === REQUEST_SUPERSEDED) {
|
||||
return; // Request was superseded, skip this update
|
||||
}
|
||||
const taskData = taskListResult.data || taskListResult;
|
||||
const taskSummaries = Array.isArray(taskData) ? taskData : [];
|
||||
const tasksForDisplay = taskSummaries.map(summary => ({
|
||||
id: summary.id,
|
||||
job_id: job.id,
|
||||
frame_start: summary.frame_start,
|
||||
frame_end: summary.frame_end,
|
||||
status: summary.status,
|
||||
task_type: summary.task_type,
|
||||
runner_id: summary.runner_id,
|
||||
current_step: null,
|
||||
retry_count: 0,
|
||||
max_retries: 3,
|
||||
created_at: new Date().toISOString(),
|
||||
}));
|
||||
setTasks(Array.isArray(tasksForDisplay) ? tasksForDisplay : []);
|
||||
} catch (error) {
|
||||
console.error('Failed to reload tasks:', error);
|
||||
// Fallback to full reload
|
||||
loadDetails();
|
||||
}
|
||||
};
|
||||
reloadTasks();
|
||||
} else if (data.type === 'tasks_added' && data.data) {
|
||||
// Multiple new tasks were added - reload task summaries
|
||||
console.log('tasks_added message received, reloading tasks...', data);
|
||||
const reloadTasks = async () => {
|
||||
try {
|
||||
const taskListResult = await jobs.getTasksSummary(job.id, { limit: 100, sort: 'frame_start:asc' });
|
||||
// Check for superseded sentinel
|
||||
if (taskListResult === REQUEST_SUPERSEDED) {
|
||||
return; // Request was superseded, skip this update
|
||||
}
|
||||
const taskData = taskListResult.data || taskListResult;
|
||||
const taskSummaries = Array.isArray(taskData) ? taskData : [];
|
||||
const tasksForDisplay = taskSummaries.map(summary => ({
|
||||
id: summary.id,
|
||||
job_id: job.id,
|
||||
frame_start: summary.frame_start,
|
||||
frame_end: summary.frame_end,
|
||||
status: summary.status,
|
||||
task_type: summary.task_type,
|
||||
runner_id: summary.runner_id,
|
||||
current_step: null,
|
||||
retry_count: 0,
|
||||
max_retries: 3,
|
||||
created_at: new Date().toISOString(),
|
||||
}));
|
||||
setTasks(Array.isArray(tasksForDisplay) ? tasksForDisplay : []);
|
||||
} catch (error) {
|
||||
console.error('Failed to reload tasks:', error);
|
||||
// Fallback to full reload
|
||||
loadDetails();
|
||||
}
|
||||
};
|
||||
reloadTasks();
|
||||
} else if (data.type === 'file_added' && data.data) {
|
||||
// New file was added - reload file list
|
||||
const reloadFiles = async () => {
|
||||
try {
|
||||
const fileList = await jobs.getFiles(job.id, { limit: 50 });
|
||||
// Check for superseded sentinel
|
||||
if (fileList === REQUEST_SUPERSEDED) {
|
||||
return; // Request was superseded, skip this update
|
||||
}
|
||||
const fileData = fileList.data || fileList;
|
||||
setFiles(Array.isArray(fileData) ? fileData : []);
|
||||
} catch (error) {
|
||||
console.error('Failed to reload files:', error);
|
||||
}
|
||||
};
|
||||
reloadFiles();
|
||||
} else if (data.type === 'step_update' && data.data && data.task_id) {
|
||||
// Step was created or updated - update task data
|
||||
console.log('step_update message received:', data);
|
||||
setTaskData(prev => {
|
||||
const taskId = data.task_id;
|
||||
const current = prev[taskId] || { steps: [], logs: [] };
|
||||
const stepData = data.data;
|
||||
|
||||
// Find if step already exists
|
||||
const existingSteps = current.steps || [];
|
||||
const stepIndex = existingSteps.findIndex(s => s.step_name === stepData.step_name);
|
||||
|
||||
let updatedSteps;
|
||||
if (stepIndex >= 0) {
|
||||
// Update existing step
|
||||
updatedSteps = [...existingSteps];
|
||||
updatedSteps[stepIndex] = {
|
||||
...updatedSteps[stepIndex],
|
||||
...stepData,
|
||||
id: stepData.step_id || updatedSteps[stepIndex].id,
|
||||
};
|
||||
} else {
|
||||
// Add new step
|
||||
updatedSteps = [...existingSteps, {
|
||||
id: stepData.step_id,
|
||||
step_name: stepData.step_name,
|
||||
status: stepData.status,
|
||||
duration_ms: stepData.duration_ms,
|
||||
error_message: stepData.error_message,
|
||||
}];
|
||||
}
|
||||
|
||||
return {
|
||||
...prev,
|
||||
[taskId]: {
|
||||
...current,
|
||||
steps: updatedSteps,
|
||||
}
|
||||
};
|
||||
});
|
||||
} else if (data.type === 'connected') {
|
||||
// Connection established
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to parse WebSocket message:', error);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onerror = (error) => {
|
||||
console.error('Job WebSocket error:', {
|
||||
error,
|
||||
readyState: ws.readyState,
|
||||
url: ws.url,
|
||||
jobId: job.id,
|
||||
status: jobDetails.status
|
||||
});
|
||||
// WebSocket errors don't provide much detail, but we can check readyState
|
||||
if (ws.readyState === WebSocket.CLOSED || ws.readyState === WebSocket.CLOSING) {
|
||||
console.warn('Job WebSocket is closed or closing, will attempt reconnect');
|
||||
}
|
||||
};
|
||||
|
||||
ws.onclose = (event) => {
|
||||
console.log('Job WebSocket closed:', {
|
||||
code: event.code,
|
||||
reason: event.reason,
|
||||
wasClean: event.wasClean,
|
||||
jobId: job.id,
|
||||
status: jobDetails.status
|
||||
});
|
||||
jobWsRef.current = null;
|
||||
|
||||
// Code 1006 = Abnormal Closure (connection lost without close frame)
|
||||
// Code 1000 = Normal Closure
|
||||
// Code 1001 = Going Away (server restart, etc.)
|
||||
// We should reconnect for abnormal closures (1006) or unexpected closes
|
||||
const shouldReconnect = !event.wasClean || event.code === 1006 || event.code === 1001;
|
||||
|
||||
// Get current status from state to avoid stale closure
|
||||
const currentStatus = jobDetails.status;
|
||||
const isActiveJob = currentStatus === 'running' || currentStatus === 'pending';
|
||||
|
||||
if (shouldReconnect && isActiveJob) {
|
||||
console.log(`Attempting to reconnect job WebSocket in 2 seconds... (code: ${event.code})`);
|
||||
setTimeout(() => {
|
||||
// Check status again before reconnecting (might have changed)
|
||||
// Use a ref or check the current state directly
|
||||
if ((!jobWsRef.current || jobWsRef.current.readyState === WebSocket.CLOSED)) {
|
||||
// Re-check if job is still active by reading current state
|
||||
// We'll check this in connectJobWebSocket if needed
|
||||
connectJobWebSocket();
|
||||
}
|
||||
}, 2000);
|
||||
} else if (!isActiveJob) {
|
||||
console.log('Job is no longer active, not reconnecting WebSocket');
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Failed to connect job WebSocket:', error);
|
||||
}
|
||||
};
|
||||
|
||||
const startLogStream = (taskIds) => {
|
||||
if (taskIds.length === 0 || streaming) return;
|
||||
|
||||
// Don't start streaming if job is no longer running
|
||||
if (jobDetails.status !== 'running' && jobDetails.status !== 'pending') {
|
||||
console.log('Job is not running, skipping log stream');
|
||||
return;
|
||||
}
|
||||
|
||||
setStreaming(true);
|
||||
// For now, stream the first task's logs (WebSocket supports one task at a time)
|
||||
// In the future, we could have multiple WebSocket connections
|
||||
@@ -219,17 +572,44 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
}
|
||||
};
|
||||
|
||||
ws.onopen = () => {
|
||||
console.log('Log WebSocket connected for task', primaryTaskId);
|
||||
};
|
||||
|
||||
ws.onerror = (error) => {
|
||||
console.error('WebSocket error:', error);
|
||||
console.error('Log WebSocket error:', {
|
||||
error,
|
||||
readyState: ws.readyState,
|
||||
url: ws.url,
|
||||
taskId: primaryTaskId,
|
||||
jobId: job.id
|
||||
});
|
||||
setStreaming(false);
|
||||
};
|
||||
|
||||
ws.onclose = () => {
|
||||
ws.onclose = (event) => {
|
||||
console.log('Log WebSocket closed:', {
|
||||
code: event.code,
|
||||
reason: event.reason,
|
||||
wasClean: event.wasClean,
|
||||
taskId: primaryTaskId,
|
||||
jobId: job.id
|
||||
});
|
||||
setStreaming(false);
|
||||
// Auto-reconnect if job is still running
|
||||
if (jobDetails.status === 'running' && taskIds.length > 0) {
|
||||
wsRef.current = null;
|
||||
|
||||
// Code 1006 = Abnormal Closure (connection lost without close frame)
|
||||
// Code 1000 = Normal Closure
|
||||
// Code 1001 = Going Away (server restart, etc.)
|
||||
const shouldReconnect = !event.wasClean || event.code === 1006 || event.code === 1001;
|
||||
|
||||
// Auto-reconnect if job is still running and close was unexpected
|
||||
if (shouldReconnect && jobDetails.status === 'running' && taskIds.length > 0) {
|
||||
console.log(`Attempting to reconnect log WebSocket in 2 seconds... (code: ${event.code})`);
|
||||
setTimeout(() => {
|
||||
if (jobDetails.status === 'running') {
|
||||
// Check status again before reconnecting (might have changed)
|
||||
// The startLogStream function will check if job is still running
|
||||
if (jobDetails.status === 'running' && taskIds.length > 0) {
|
||||
startLogStream(taskIds);
|
||||
}
|
||||
}, 2000);
|
||||
@@ -243,9 +623,39 @@ export default function JobDetails({ job, onClose, onUpdate }) {
|
||||
newExpanded.delete(taskId);
|
||||
} else {
|
||||
newExpanded.add(taskId);
|
||||
// Load data if not already loaded
|
||||
if (!taskData[taskId]) {
|
||||
// Load full task details if we only have summary
|
||||
const tasksArray = Array.isArray(tasks) ? tasks : [];
|
||||
const currentTask = tasksArray.find(t => t.id === taskId);
|
||||
if (currentTask && !currentTask.created_at) {
|
||||
// This is a summary - fetch full task details
|
||||
try {
|
||||
const fullTasks = await jobs.getTasks(job.id, {
|
||||
limit: 1,
|
||||
// We can't filter by task ID, so we'll get all and find the one we need
|
||||
});
|
||||
const taskData = fullTasks.data || fullTasks;
|
||||
const fullTask = Array.isArray(taskData) ? taskData.find(t => t.id === taskId) : null;
|
||||
if (fullTask) {
|
||||
setTasks(prev => {
|
||||
const prevArray = Array.isArray(prev) ? prev : [];
|
||||
return prevArray.map(t => t.id === taskId ? fullTask : t);
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to load full task details:', err);
|
||||
}
|
||||
}
|
||||
// Always load logs/steps when expanding a task to ensure we have the latest data
|
||||
// This is especially important for completed tasks that weren't loaded before
|
||||
const existingData = taskData[taskId];
|
||||
const hasLogs = existingData && existingData.logs && existingData.logs.length > 0;
|
||||
const hasSteps = existingData && existingData.steps && existingData.steps.length > 0;
|
||||
|
||||
if (!hasLogs || !hasSteps) {
|
||||
console.log(`Loading task data for task ${taskId} (logs: ${hasLogs}, steps: ${hasSteps})`);
|
||||
await loadTaskData(taskId);
|
||||
} else {
|
||||
console.log(`Task ${taskId} already has ${existingData.logs.length} logs and ${existingData.steps.length} steps, skipping load`);
|
||||
}
|
||||
}
|
||||
setExpandedTasks(newExpanded);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useState, useEffect } from 'react';
|
||||
import { useState, useEffect, useRef } from 'react';
|
||||
import { jobs } from '../utils/api';
|
||||
import JobDetails from './JobDetails';
|
||||
|
||||
@@ -6,17 +6,165 @@ export default function JobList() {
|
||||
const [jobList, setJobList] = useState([]);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [selectedJob, setSelectedJob] = useState(null);
|
||||
const [pagination, setPagination] = useState({ total: 0, limit: 50, offset: 0 });
|
||||
const [hasMore, setHasMore] = useState(true);
|
||||
const pollingIntervalRef = useRef(null);
|
||||
const wsRef = useRef(null);
|
||||
|
||||
useEffect(() => {
|
||||
loadJobs();
|
||||
const interval = setInterval(loadJobs, 5000);
|
||||
return () => clearInterval(interval);
|
||||
// Use WebSocket for real-time updates instead of polling
|
||||
connectWebSocket();
|
||||
return () => {
|
||||
if (pollingIntervalRef.current) {
|
||||
clearInterval(pollingIntervalRef.current);
|
||||
}
|
||||
if (wsRef.current) {
|
||||
try {
|
||||
wsRef.current.close();
|
||||
} catch (e) {
|
||||
// Ignore errors when closing
|
||||
}
|
||||
wsRef.current = null;
|
||||
}
|
||||
};
|
||||
}, []);
|
||||
|
||||
const loadJobs = async () => {
|
||||
const connectWebSocket = () => {
|
||||
try {
|
||||
const data = await jobs.list();
|
||||
setJobList(data);
|
||||
// Close existing connection if any
|
||||
if (wsRef.current) {
|
||||
try {
|
||||
wsRef.current.close();
|
||||
} catch (e) {
|
||||
// Ignore errors when closing
|
||||
}
|
||||
wsRef.current = null;
|
||||
}
|
||||
|
||||
const ws = jobs.streamJobsWebSocket();
|
||||
wsRef.current = ws;
|
||||
|
||||
ws.onopen = () => {
|
||||
console.log('Job list WebSocket connected');
|
||||
};
|
||||
|
||||
ws.onmessage = (event) => {
|
||||
try {
|
||||
const data = JSON.parse(event.data);
|
||||
if (data.type === 'job_update' && data.data) {
|
||||
// Update job in list
|
||||
setJobList(prev => {
|
||||
const index = prev.findIndex(j => j.id === data.job_id);
|
||||
if (index >= 0) {
|
||||
const updated = [...prev];
|
||||
updated[index] = { ...updated[index], ...data.data };
|
||||
return updated;
|
||||
}
|
||||
// If job not in current page, reload to get updated list
|
||||
if (data.data.status === 'completed' || data.data.status === 'failed') {
|
||||
loadJobs();
|
||||
}
|
||||
return prev;
|
||||
});
|
||||
} else if (data.type === 'connected') {
|
||||
// Connection established
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to parse WebSocket message:', error);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onerror = (error) => {
|
||||
console.error('Job list WebSocket error:', {
|
||||
error,
|
||||
readyState: ws.readyState,
|
||||
url: ws.url
|
||||
});
|
||||
// WebSocket errors don't provide much detail, but we can check readyState
|
||||
if (ws.readyState === WebSocket.CLOSED || ws.readyState === WebSocket.CLOSING) {
|
||||
console.warn('Job list WebSocket is closed or closing, will fallback to polling');
|
||||
// Fallback to polling on error
|
||||
startAdaptivePolling();
|
||||
}
|
||||
};
|
||||
|
||||
ws.onclose = (event) => {
|
||||
console.log('Job list WebSocket closed:', {
|
||||
code: event.code,
|
||||
reason: event.reason,
|
||||
wasClean: event.wasClean
|
||||
});
|
||||
wsRef.current = null;
|
||||
|
||||
// Code 1006 = Abnormal Closure (connection lost without close frame)
|
||||
// Code 1000 = Normal Closure
|
||||
// Code 1001 = Going Away (server restart, etc.)
|
||||
// We should reconnect for abnormal closures (1006) or unexpected closes
|
||||
const shouldReconnect = !event.wasClean || event.code === 1006 || event.code === 1001;
|
||||
|
||||
if (shouldReconnect) {
|
||||
console.log(`Attempting to reconnect job list WebSocket in 2 seconds... (code: ${event.code})`);
|
||||
setTimeout(() => {
|
||||
if (wsRef.current === null || (wsRef.current && wsRef.current.readyState === WebSocket.CLOSED)) {
|
||||
connectWebSocket();
|
||||
}
|
||||
}, 2000);
|
||||
} else {
|
||||
// Clean close (code 1000) - fallback to polling
|
||||
console.log('WebSocket closed cleanly, falling back to polling');
|
||||
startAdaptivePolling();
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Failed to connect WebSocket:', error);
|
||||
// Fallback to polling
|
||||
startAdaptivePolling();
|
||||
}
|
||||
};
|
||||
|
||||
const startAdaptivePolling = () => {
|
||||
const checkAndPoll = () => {
|
||||
const hasRunningJobs = jobList.some(job => job.status === 'running' || job.status === 'pending');
|
||||
const interval = hasRunningJobs ? 5000 : 10000; // 5s for running, 10s for completed
|
||||
|
||||
if (pollingIntervalRef.current) {
|
||||
clearInterval(pollingIntervalRef.current);
|
||||
}
|
||||
|
||||
pollingIntervalRef.current = setInterval(() => {
|
||||
loadJobs();
|
||||
}, interval);
|
||||
};
|
||||
|
||||
checkAndPoll();
|
||||
// Re-check interval when job list changes
|
||||
const checkInterval = setInterval(checkAndPoll, 5000);
|
||||
return () => clearInterval(checkInterval);
|
||||
};
|
||||
|
||||
const loadJobs = async (append = false) => {
|
||||
try {
|
||||
const offset = append ? pagination.offset + pagination.limit : 0;
|
||||
const result = await jobs.listSummary({
|
||||
limit: pagination.limit,
|
||||
offset,
|
||||
sort: 'created_at:desc'
|
||||
});
|
||||
|
||||
// Handle both old format (array) and new format (object with data, total, etc.)
|
||||
const jobsData = result.data || result;
|
||||
const total = result.total !== undefined ? result.total : jobsData.length;
|
||||
|
||||
if (append) {
|
||||
setJobList(prev => [...prev, ...jobsData]);
|
||||
setPagination(prev => ({ ...prev, offset, total }));
|
||||
} else {
|
||||
setJobList(jobsData);
|
||||
setPagination({ total, limit: result.limit || pagination.limit, offset: result.offset || 0 });
|
||||
}
|
||||
|
||||
setHasMore(offset + jobsData.length < total);
|
||||
} catch (error) {
|
||||
console.error('Failed to load jobs:', error);
|
||||
} finally {
|
||||
@@ -24,8 +172,13 @@ export default function JobList() {
|
||||
}
|
||||
};
|
||||
|
||||
const loadMore = () => {
|
||||
if (!loading && hasMore) {
|
||||
loadJobs(true);
|
||||
}
|
||||
};
|
||||
|
||||
// Keep selectedJob in sync with the job list when it refreshes
|
||||
// This prevents the selected job from becoming stale when format selection or other actions trigger updates
|
||||
useEffect(() => {
|
||||
if (selectedJob && jobList.length > 0) {
|
||||
const freshJob = jobList.find(j => j.id === selectedJob.id);
|
||||
@@ -74,7 +227,7 @@ export default function JobList() {
|
||||
return colors[status] || colors.pending;
|
||||
};
|
||||
|
||||
if (loading) {
|
||||
if (loading && jobList.length === 0) {
|
||||
return (
|
||||
<div className="flex justify-center items-center h-64">
|
||||
<div className="animate-spin rounded-full h-12 w-12 border-b-2 border-orange-500"></div>
|
||||
@@ -106,8 +259,10 @@ export default function JobList() {
|
||||
</div>
|
||||
|
||||
<div className="space-y-2 text-sm text-gray-400 mb-4">
|
||||
<p>Frames: {job.frame_start} - {job.frame_end}</p>
|
||||
<p>Format: {job.output_format}</p>
|
||||
{job.frame_start !== undefined && job.frame_end !== undefined && (
|
||||
<p>Frames: {job.frame_start} - {job.frame_end}</p>
|
||||
)}
|
||||
{job.output_format && <p>Format: {job.output_format}</p>}
|
||||
<p>Created: {new Date(job.created_at).toLocaleString()}</p>
|
||||
</div>
|
||||
|
||||
@@ -126,7 +281,15 @@ export default function JobList() {
|
||||
|
||||
<div className="flex gap-2">
|
||||
<button
|
||||
onClick={() => setSelectedJob(job)}
|
||||
onClick={() => {
|
||||
// Fetch full job details when viewing
|
||||
jobs.get(job.id).then(fullJob => {
|
||||
setSelectedJob(fullJob);
|
||||
}).catch(err => {
|
||||
console.error('Failed to load job details:', err);
|
||||
setSelectedJob(job); // Fallback to summary
|
||||
});
|
||||
}}
|
||||
className="flex-1 px-4 py-2 bg-orange-600 text-white rounded-lg hover:bg-orange-500 transition-colors font-medium"
|
||||
>
|
||||
View Details
|
||||
@@ -153,6 +316,18 @@ export default function JobList() {
|
||||
))}
|
||||
</div>
|
||||
|
||||
{hasMore && (
|
||||
<div className="flex justify-center mt-6">
|
||||
<button
|
||||
onClick={loadMore}
|
||||
disabled={loading}
|
||||
className="px-6 py-2 bg-gray-700 text-gray-200 rounded-lg hover:bg-gray-600 transition-colors font-medium disabled:opacity-50"
|
||||
>
|
||||
{loading ? 'Loading...' : 'Load More'}
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{selectedJob && (
|
||||
<JobDetails
|
||||
job={selectedJob}
|
||||
@@ -163,4 +338,3 @@ export default function JobList() {
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import { jobs } from '../utils/api';
|
||||
import JobDetails from './JobDetails';
|
||||
|
||||
export default function JobSubmission({ onSuccess }) {
|
||||
const [step, setStep] = useState(1); // 1 = upload & extract metadata, 2 = configure & submit
|
||||
const [step, setStep] = useState(1); // 1 = upload & extract metadata, 2 = missing addons (if any), 3 = configure & submit
|
||||
const [formData, setFormData] = useState({
|
||||
name: '',
|
||||
frame_start: 1,
|
||||
@@ -11,6 +11,8 @@ export default function JobSubmission({ onSuccess }) {
|
||||
output_format: 'PNG',
|
||||
allow_parallel_runners: true,
|
||||
render_settings: null, // Will contain engine settings
|
||||
unhide_objects: false, // Unhide objects/collections tweak
|
||||
enable_execution: false, // Enable auto-execution in Blender
|
||||
});
|
||||
const [showAdvancedSettings, setShowAdvancedSettings] = useState(false);
|
||||
const [file, setFile] = useState(null);
|
||||
@@ -25,6 +27,7 @@ export default function JobSubmission({ onSuccess }) {
|
||||
const [isUploading, setIsUploading] = useState(false);
|
||||
const [blendFiles, setBlendFiles] = useState([]); // For ZIP files with multiple blend files
|
||||
const [selectedMainBlend, setSelectedMainBlend] = useState('');
|
||||
const [confirmedMissingFiles, setConfirmedMissingFiles] = useState(false); // Confirmation for missing files
|
||||
|
||||
// Use refs to track cancellation state across re-renders
|
||||
const isCancelledRef = useRef(false);
|
||||
@@ -71,10 +74,15 @@ export default function JobSubmission({ onSuccess }) {
|
||||
// Upload file to new endpoint (no job required)
|
||||
const result = await jobs.uploadFileForJobCreation(selectedFile, (progress) => {
|
||||
setUploadProgress(progress);
|
||||
// After upload completes, show processing state
|
||||
if (progress >= 100) {
|
||||
setMetadataStatus('processing');
|
||||
}
|
||||
}, selectedMainBlend || undefined);
|
||||
|
||||
// Keep showing processing state until we have the result
|
||||
setMetadataStatus('processing');
|
||||
setUploadProgress(100);
|
||||
setIsUploading(false);
|
||||
|
||||
// Store session ID for later use when creating the job
|
||||
if (result.session_id) {
|
||||
@@ -88,6 +96,9 @@ export default function JobSubmission({ onSuccess }) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Upload and processing complete
|
||||
setIsUploading(false);
|
||||
|
||||
// If metadata was extracted, use it
|
||||
if (result.metadata_extracted && result.metadata) {
|
||||
setMetadata(result.metadata);
|
||||
@@ -141,25 +152,33 @@ export default function JobSubmission({ onSuccess }) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
setIsUploading(true);
|
||||
setUploadProgress(0);
|
||||
setMetadataStatus('extracting');
|
||||
try {
|
||||
setIsUploading(true);
|
||||
setUploadProgress(0);
|
||||
setMetadataStatus('extracting');
|
||||
|
||||
// Re-upload with selected main blend file
|
||||
const result = await jobs.uploadFileForJobCreation(file, (progress) => {
|
||||
setUploadProgress(progress);
|
||||
}, selectedMainBlend);
|
||||
// Re-upload with selected main blend file
|
||||
const result = await jobs.uploadFileForJobCreation(file, (progress) => {
|
||||
setUploadProgress(progress);
|
||||
// After upload completes, show processing state
|
||||
if (progress >= 100) {
|
||||
setMetadataStatus('processing');
|
||||
}
|
||||
}, selectedMainBlend);
|
||||
|
||||
setUploadProgress(100);
|
||||
setIsUploading(false);
|
||||
setBlendFiles([]);
|
||||
// Keep showing processing state until we have the result
|
||||
setMetadataStatus('processing');
|
||||
setUploadProgress(100);
|
||||
setBlendFiles([]);
|
||||
|
||||
// Store session ID
|
||||
if (result.session_id) {
|
||||
setUploadSessionId(result.session_id);
|
||||
}
|
||||
|
||||
// Upload and processing complete
|
||||
setIsUploading(false);
|
||||
|
||||
// If metadata was extracted, use it
|
||||
if (result.metadata_extracted && result.metadata) {
|
||||
setMetadata(result.metadata);
|
||||
@@ -202,17 +221,43 @@ export default function JobSubmission({ onSuccess }) {
|
||||
|
||||
const handleContinueToStep2 = () => {
|
||||
if (metadataStatus === 'completed' || metadataStatus === 'error') {
|
||||
setStep(2);
|
||||
// Check if there are missing addons - if so, go to addon step, otherwise skip to config
|
||||
const hasMissingAddons = metadata?.missing_files_info?.missing_addons &&
|
||||
metadata.missing_files_info.missing_addons.length > 0;
|
||||
if (hasMissingAddons) {
|
||||
setStep(2); // Step 2 = missing addons
|
||||
} else {
|
||||
setStep(3); // Step 3 = configure & submit
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleContinueToStep3 = () => {
|
||||
setStep(3); // Continue from addons step to config step
|
||||
};
|
||||
|
||||
const handleBackToStep1 = () => {
|
||||
setStep(1);
|
||||
};
|
||||
|
||||
const handleBackToStep2 = () => {
|
||||
setStep(2);
|
||||
};
|
||||
|
||||
const handleSubmit = async (e) => {
|
||||
e.preventDefault();
|
||||
setError('');
|
||||
|
||||
// Check if there are missing files/addons and require confirmation
|
||||
const hasMissingFiles = metadata?.missing_files_info?.has_missing &&
|
||||
metadata.missing_files_info.missing_addons &&
|
||||
metadata.missing_files_info.missing_addons.length > 0;
|
||||
|
||||
if (hasMissingFiles && !confirmedMissingFiles) {
|
||||
setError('Please confirm that you want to proceed with missing addons');
|
||||
return;
|
||||
}
|
||||
|
||||
setSubmitting(true);
|
||||
|
||||
try {
|
||||
@@ -246,6 +291,8 @@ export default function JobSubmission({ onSuccess }) {
|
||||
allow_parallel_runners: formData.allow_parallel_runners,
|
||||
render_settings: renderSettings,
|
||||
upload_session_id: uploadSessionId || undefined, // Pass session ID to move context archive
|
||||
unhide_objects: formData.unhide_objects || undefined, // Pass unhide toggle
|
||||
enable_execution: formData.enable_execution || undefined, // Pass enable execution toggle
|
||||
});
|
||||
|
||||
// Fetch the full job details
|
||||
@@ -269,6 +316,8 @@ export default function JobSubmission({ onSuccess }) {
|
||||
output_format: 'PNG',
|
||||
allow_parallel_runners: true,
|
||||
render_settings: null,
|
||||
unhide_objects: false,
|
||||
enable_execution: false,
|
||||
});
|
||||
setShowAdvancedSettings(false);
|
||||
setFile(null);
|
||||
@@ -304,10 +353,21 @@ export default function JobSubmission({ onSuccess }) {
|
||||
</div>
|
||||
<span>Upload & Extract Metadata</span>
|
||||
</div>
|
||||
{metadata?.missing_files_info?.missing_addons && metadata.missing_files_info.missing_addons.length > 0 && (
|
||||
<>
|
||||
<div className="w-8 h-0.5 bg-gray-700"></div>
|
||||
<div className={`flex items-center gap-2 ${step >= 2 ? 'text-orange-500 font-medium' : 'text-gray-500'}`}>
|
||||
<div className={`w-6 h-6 rounded-full flex items-center justify-center ${step >= 2 ? 'bg-orange-600 text-white' : 'bg-gray-700'}`}>
|
||||
{step > 2 ? '✓' : '2'}
|
||||
</div>
|
||||
<span>Missing Addons</span>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
<div className="w-8 h-0.5 bg-gray-700"></div>
|
||||
<div className={`flex items-center gap-2 ${step >= 2 ? 'text-orange-500 font-medium' : 'text-gray-500'}`}>
|
||||
<div className={`w-6 h-6 rounded-full flex items-center justify-center ${step >= 2 ? 'bg-orange-600 text-white' : 'bg-gray-700'}`}>
|
||||
2
|
||||
<div className={`flex items-center gap-2 ${step >= 3 ? 'text-orange-500 font-medium' : 'text-gray-500'}`}>
|
||||
<div className={`w-6 h-6 rounded-full flex items-center justify-center ${step >= 3 ? 'bg-orange-600 text-white' : 'bg-gray-700'}`}>
|
||||
{step > 3 ? '✓' : (metadata?.missing_files_info?.missing_addons && metadata.missing_files_info.missing_addons.length > 0 ? '3' : '2')}
|
||||
</div>
|
||||
<span>Configure & Submit</span>
|
||||
</div>
|
||||
@@ -370,9 +430,9 @@ export default function JobSubmission({ onSuccess }) {
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
{(isUploading || metadataStatus === 'extracting') && (
|
||||
{(isUploading || metadataStatus === 'extracting' || metadataStatus === 'processing') && (
|
||||
<div className="mt-2 p-3 bg-orange-400/20 border border-orange-400/50 rounded-lg text-orange-400 text-sm">
|
||||
{isUploading ? (
|
||||
{isUploading && uploadProgress < 100 ? (
|
||||
<div className="space-y-2">
|
||||
<div className="flex items-center justify-between text-xs">
|
||||
<span>Uploading file...</span>
|
||||
@@ -385,6 +445,22 @@ export default function JobSubmission({ onSuccess }) {
|
||||
></div>
|
||||
</div>
|
||||
</div>
|
||||
) : metadataStatus === 'processing' ? (
|
||||
<div className="space-y-2">
|
||||
<div className="flex items-center justify-between text-xs">
|
||||
<span>Processing file and extracting metadata...</span>
|
||||
<span>{Math.round(uploadProgress)}%</span>
|
||||
</div>
|
||||
<div className="w-full bg-gray-700 rounded-full h-2">
|
||||
<div
|
||||
className="bg-orange-500 h-2 rounded-full transition-all duration-300"
|
||||
style={{ width: `${uploadProgress}%` }}
|
||||
></div>
|
||||
</div>
|
||||
<div className="text-xs text-orange-400/80 mt-1">
|
||||
This may take a moment for large files...
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="animate-spin rounded-full h-4 w-4 border-b-2 border-orange-500"></div>
|
||||
@@ -430,9 +506,9 @@ export default function JobSubmission({ onSuccess }) {
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
// Step 2: Configure and submit
|
||||
<form onSubmit={handleSubmit} className="space-y-6">
|
||||
) : step === 2 ? (
|
||||
// Step 2: Missing Addons (only shown if there are missing addons)
|
||||
<div className="space-y-6">
|
||||
<button
|
||||
type="button"
|
||||
onClick={handleBackToStep1}
|
||||
@@ -440,6 +516,39 @@ export default function JobSubmission({ onSuccess }) {
|
||||
>
|
||||
← Back to Upload
|
||||
</button>
|
||||
<div className="p-4 bg-yellow-400/20 border border-yellow-400/50 rounded-lg">
|
||||
<div className="text-yellow-400 font-semibold mb-3">Missing Addons Detected</div>
|
||||
<div className="text-yellow-400/80 text-sm mb-4">
|
||||
<p className="mb-2">The following addons are required by this blend file but are not available on the render servers:</p>
|
||||
<ul className="list-disc list-inside space-y-1 ml-2">
|
||||
{metadata?.missing_files_info?.missing_addons?.map((addon, idx) => (
|
||||
<li key={idx}>{addon}</li>
|
||||
))}
|
||||
</ul>
|
||||
<p className="mt-3 text-xs">
|
||||
<strong>Note:</strong> The render may fail or produce unexpected results if these addons are required for rendering.
|
||||
You can still proceed, but be aware that the output may not match your expectations.
|
||||
</p>
|
||||
</div>
|
||||
<button
|
||||
type="button"
|
||||
onClick={handleContinueToStep3}
|
||||
className="w-full px-4 py-2 bg-yellow-600 text-white rounded-lg hover:bg-yellow-500 transition-colors font-medium"
|
||||
>
|
||||
Continue to Configuration →
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
// Step 3: Configure and submit
|
||||
<form onSubmit={handleSubmit} className="space-y-6">
|
||||
<button
|
||||
type="button"
|
||||
onClick={metadata?.missing_files_info?.missing_addons && metadata.missing_files_info.missing_addons.length > 0 ? handleBackToStep2 : handleBackToStep1}
|
||||
className="text-orange-500 hover:text-orange-400 font-medium text-sm flex items-center gap-1"
|
||||
>
|
||||
← Back
|
||||
</button>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-gray-300 mb-2">
|
||||
Job Name
|
||||
@@ -516,21 +625,89 @@ export default function JobSubmission({ onSuccess }) {
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div className="p-4 bg-blue-400/20 border border-blue-400/50 rounded-lg">
|
||||
<div className="flex items-center">
|
||||
<input
|
||||
type="checkbox"
|
||||
id="unhide_objects"
|
||||
checked={formData.unhide_objects}
|
||||
onChange={(e) => setFormData({ ...formData, unhide_objects: e.target.checked })}
|
||||
className="h-4 w-4 text-orange-600 focus:ring-orange-500 border-gray-600 bg-gray-900 rounded"
|
||||
/>
|
||||
<label htmlFor="unhide_objects" className="ml-2 block text-sm text-gray-300">
|
||||
<span className="font-medium">Enable unhide tweaks</span>
|
||||
<span className="text-xs text-gray-400 block mt-1">
|
||||
Automatically unhide objects and collections that are hidden from render (useful for certain blend files)
|
||||
</span>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="p-4 bg-blue-400/20 border border-blue-400/50 rounded-lg">
|
||||
<div className="flex items-center">
|
||||
<input
|
||||
type="checkbox"
|
||||
id="enable_execution"
|
||||
checked={formData.enable_execution}
|
||||
onChange={(e) => setFormData({ ...formData, enable_execution: e.target.checked })}
|
||||
className="h-4 w-4 text-orange-600 focus:ring-orange-500 border-gray-600 bg-gray-900 rounded"
|
||||
/>
|
||||
<label htmlFor="enable_execution" className="ml-2 block text-sm text-gray-300">
|
||||
<span className="font-medium">Enable auto-execution</span>
|
||||
<span className="text-xs text-gray-400 block mt-1">
|
||||
Allow Blender to auto-execute startup scripts (disabled by default for security)
|
||||
</span>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{metadata && metadataStatus === 'completed' && (
|
||||
<div className="p-4 bg-green-400/20 border border-green-400/50 rounded-lg text-sm mb-4">
|
||||
<div className="text-green-400 font-semibold mb-2">Metadata from blend file:</div>
|
||||
<div className="text-green-400/80 text-xs space-y-1">
|
||||
<div>Frames: {metadata.frame_start} - {metadata.frame_end}</div>
|
||||
<div>Resolution: {metadata.render_settings?.resolution_x} x {metadata.render_settings?.resolution_y}</div>
|
||||
<div>Engine: {metadata.render_settings?.engine}</div>
|
||||
{metadata.render_settings?.engine_settings?.samples && (
|
||||
<div>Samples: {metadata.render_settings.engine_settings.samples}</div>
|
||||
)}
|
||||
{metadata.render_settings?.engine_settings?.taa_render_samples && (
|
||||
<div>EEVEE Samples: {metadata.render_settings.engine_settings.taa_render_samples}</div>
|
||||
)}
|
||||
<>
|
||||
<div className="p-4 bg-green-400/20 border border-green-400/50 rounded-lg text-sm mb-4">
|
||||
<div className="text-green-400 font-semibold mb-2">Metadata from blend file:</div>
|
||||
<div className="text-green-400/80 text-xs space-y-1">
|
||||
<div>Frames: {metadata.frame_start} - {metadata.frame_end}</div>
|
||||
<div>Resolution: {metadata.render_settings?.resolution_x} x {metadata.render_settings?.resolution_y}</div>
|
||||
<div>Engine: {metadata.render_settings?.engine}</div>
|
||||
{metadata.render_settings?.engine_settings?.samples && (
|
||||
<div>Samples: {metadata.render_settings.engine_settings.samples}</div>
|
||||
)}
|
||||
{metadata.render_settings?.engine_settings?.taa_render_samples && (
|
||||
<div>EEVEE Samples: {metadata.render_settings.engine_settings.taa_render_samples}</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Missing Files/Addons Warning */}
|
||||
{metadata.missing_files_info?.has_missing &&
|
||||
metadata.missing_files_info.missing_addons &&
|
||||
metadata.missing_files_info.missing_addons.length > 0 && (
|
||||
<div className="p-4 bg-yellow-400/20 border border-yellow-400/50 rounded-lg text-sm mb-4">
|
||||
<div className="text-yellow-400 font-semibold mb-2">⚠️ Missing Addons Detected</div>
|
||||
<div className="text-yellow-400/80 text-xs mb-3">
|
||||
<p className="mb-2">The following addons are required by this blend file but are not available:</p>
|
||||
<ul className="list-disc list-inside space-y-1">
|
||||
{metadata.missing_files_info.missing_addons.map((addon, idx) => (
|
||||
<li key={idx}>{addon}</li>
|
||||
))}
|
||||
</ul>
|
||||
<p className="mt-2 font-medium">Rendering may fail or produce incorrect results without these addons.</p>
|
||||
</div>
|
||||
<div className="flex items-center">
|
||||
<input
|
||||
type="checkbox"
|
||||
id="confirm_missing_files"
|
||||
checked={confirmedMissingFiles}
|
||||
onChange={(e) => setConfirmedMissingFiles(e.target.checked)}
|
||||
className="h-4 w-4 text-orange-600 focus:ring-orange-500 border-gray-600 bg-gray-900 rounded"
|
||||
/>
|
||||
<label htmlFor="confirm_missing_files" className="ml-2 block text-sm text-yellow-400">
|
||||
I understand the risks and want to proceed anyway
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* Advanced Render Settings */}
|
||||
@@ -886,11 +1063,16 @@ export default function JobSubmission({ onSuccess }) {
|
||||
)}
|
||||
<button
|
||||
type="submit"
|
||||
disabled={submitting || !file || isUploading}
|
||||
disabled={submitting || !file || isUploading || (metadata?.missing_files_info?.has_missing && !confirmedMissingFiles)}
|
||||
className="w-full px-6 py-3 bg-orange-600 text-white rounded-lg hover:bg-orange-500 transition-colors font-semibold disabled:opacity-50 disabled:cursor-not-allowed"
|
||||
>
|
||||
{submitting ? 'Creating Job...' : 'Create Job'}
|
||||
</button>
|
||||
{metadata?.missing_files_info?.has_missing && !confirmedMissingFiles && (
|
||||
<p className="text-xs text-yellow-400 mt-2 text-center">
|
||||
Please confirm that you want to proceed with missing addons
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</form>
|
||||
)}
|
||||
|
||||
@@ -3,6 +3,73 @@ const API_BASE = '/api';
|
||||
// Global auth error handler - will be set by useAuth hook
|
||||
let onAuthError = null;
|
||||
|
||||
// Request debouncing and deduplication
|
||||
const pendingRequests = new Map(); // key: endpoint+params, value: Promise
|
||||
const requestQueue = new Map(); // key: endpoint+params, value: { resolve, reject, timestamp }
|
||||
const DEBOUNCE_DELAY = 100; // 100ms debounce delay
|
||||
const DEDUPE_WINDOW = 5000; // 5 seconds - same request within this window uses cached promise
|
||||
|
||||
// Generate cache key from endpoint and params
|
||||
function getCacheKey(endpoint, options = {}) {
|
||||
const params = new URLSearchParams();
|
||||
Object.keys(options).sort().forEach(key => {
|
||||
if (options[key] !== undefined && options[key] !== null) {
|
||||
params.append(key, String(options[key]));
|
||||
}
|
||||
});
|
||||
const query = params.toString();
|
||||
return `${endpoint}${query ? '?' + query : ''}`;
|
||||
}
|
||||
|
||||
// Sentinel value to indicate a request was superseded (instead of rejecting)
|
||||
// Export it so components can check for it
|
||||
export const REQUEST_SUPERSEDED = Symbol('REQUEST_SUPERSEDED');
|
||||
|
||||
// Debounced request wrapper
|
||||
function debounceRequest(key, requestFn, delay = DEBOUNCE_DELAY) {
|
||||
return new Promise((resolve, reject) => {
|
||||
// Check if there's a pending request for this key
|
||||
if (pendingRequests.has(key)) {
|
||||
const pending = pendingRequests.get(key);
|
||||
// If request is very recent (within dedupe window), reuse it
|
||||
const now = Date.now();
|
||||
if (pending.timestamp && (now - pending.timestamp) < DEDUPE_WINDOW) {
|
||||
pending.promise.then(resolve).catch(reject);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Clear any existing timeout for this key
|
||||
if (requestQueue.has(key)) {
|
||||
const queued = requestQueue.get(key);
|
||||
clearTimeout(queued.timeout);
|
||||
// Resolve with sentinel value instead of rejecting - this prevents errors from propagating
|
||||
// The new request will handle the actual response
|
||||
queued.resolve(REQUEST_SUPERSEDED);
|
||||
}
|
||||
|
||||
// Queue new request
|
||||
const timeout = setTimeout(() => {
|
||||
requestQueue.delete(key);
|
||||
const promise = requestFn();
|
||||
const timestamp = Date.now();
|
||||
pendingRequests.set(key, { promise, timestamp });
|
||||
|
||||
promise
|
||||
.then(result => {
|
||||
pendingRequests.delete(key);
|
||||
resolve(result);
|
||||
})
|
||||
.catch(error => {
|
||||
pendingRequests.delete(key);
|
||||
reject(error);
|
||||
});
|
||||
}, delay);
|
||||
|
||||
requestQueue.set(key, { resolve, reject, timeout });
|
||||
});
|
||||
}
|
||||
|
||||
export const setAuthErrorHandler = (handler) => {
|
||||
onAuthError = handler;
|
||||
};
|
||||
@@ -174,12 +241,53 @@ export const auth = {
|
||||
};
|
||||
|
||||
export const jobs = {
|
||||
async list() {
|
||||
return api.get('/jobs');
|
||||
async list(options = {}) {
|
||||
const key = getCacheKey('/jobs', options);
|
||||
return debounceRequest(key, () => {
|
||||
const params = new URLSearchParams();
|
||||
if (options.limit) params.append('limit', options.limit.toString());
|
||||
if (options.offset) params.append('offset', options.offset.toString());
|
||||
if (options.status) params.append('status', options.status);
|
||||
if (options.sort) params.append('sort', options.sort);
|
||||
const query = params.toString();
|
||||
return api.get(`/jobs${query ? '?' + query : ''}`);
|
||||
});
|
||||
},
|
||||
|
||||
async get(id) {
|
||||
return api.get(`/jobs/${id}`);
|
||||
async listSummary(options = {}) {
|
||||
const key = getCacheKey('/jobs/summary', options);
|
||||
return debounceRequest(key, () => {
|
||||
const params = new URLSearchParams();
|
||||
if (options.limit) params.append('limit', options.limit.toString());
|
||||
if (options.offset) params.append('offset', options.offset.toString());
|
||||
if (options.status) params.append('status', options.status);
|
||||
if (options.sort) params.append('sort', options.sort);
|
||||
const query = params.toString();
|
||||
return api.get(`/jobs/summary${query ? '?' + query : ''}`);
|
||||
});
|
||||
},
|
||||
|
||||
async get(id, options = {}) {
|
||||
const key = getCacheKey(`/jobs/${id}`, options);
|
||||
return debounceRequest(key, async () => {
|
||||
if (options.etag) {
|
||||
// Include ETag in request headers for conditional requests
|
||||
const headers = { 'If-None-Match': options.etag };
|
||||
const response = await fetch(`${API_BASE}/jobs/${id}`, {
|
||||
credentials: 'include',
|
||||
headers,
|
||||
});
|
||||
if (response.status === 304) {
|
||||
return null; // Not modified
|
||||
}
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => null);
|
||||
throw new Error(errorData?.error || response.statusText);
|
||||
}
|
||||
return response.json();
|
||||
}
|
||||
return api.get(`/jobs/${id}`);
|
||||
});
|
||||
},
|
||||
|
||||
async create(jobData) {
|
||||
@@ -202,8 +310,27 @@ export const jobs = {
|
||||
return api.uploadFile(`/jobs/upload`, file, onProgress, mainBlendFile);
|
||||
},
|
||||
|
||||
async getFiles(jobId) {
|
||||
return api.get(`/jobs/${jobId}/files`);
|
||||
async getFiles(jobId, options = {}) {
|
||||
const key = getCacheKey(`/jobs/${jobId}/files`, options);
|
||||
return debounceRequest(key, () => {
|
||||
const params = new URLSearchParams();
|
||||
if (options.limit) params.append('limit', options.limit.toString());
|
||||
if (options.offset) params.append('offset', options.offset.toString());
|
||||
if (options.file_type) params.append('file_type', options.file_type);
|
||||
if (options.extension) params.append('extension', options.extension);
|
||||
const query = params.toString();
|
||||
return api.get(`/jobs/${jobId}/files${query ? '?' + query : ''}`);
|
||||
});
|
||||
},
|
||||
|
||||
async getFilesCount(jobId, options = {}) {
|
||||
const key = getCacheKey(`/jobs/${jobId}/files/count`, options);
|
||||
return debounceRequest(key, () => {
|
||||
const params = new URLSearchParams();
|
||||
if (options.file_type) params.append('file_type', options.file_type);
|
||||
const query = params.toString();
|
||||
return api.get(`/jobs/${jobId}/files/count${query ? '?' + query : ''}`);
|
||||
});
|
||||
},
|
||||
|
||||
async getContextArchive(jobId) {
|
||||
@@ -219,12 +346,21 @@ export const jobs = {
|
||||
},
|
||||
|
||||
async getTaskLogs(jobId, taskId, options = {}) {
|
||||
const params = new URLSearchParams();
|
||||
if (options.stepName) params.append('step_name', options.stepName);
|
||||
if (options.logLevel) params.append('log_level', options.logLevel);
|
||||
if (options.limit) params.append('limit', options.limit.toString());
|
||||
const query = params.toString();
|
||||
return api.get(`/jobs/${jobId}/tasks/${taskId}/logs${query ? '?' + query : ''}`);
|
||||
const key = getCacheKey(`/jobs/${jobId}/tasks/${taskId}/logs`, options);
|
||||
return debounceRequest(key, async () => {
|
||||
const params = new URLSearchParams();
|
||||
if (options.stepName) params.append('step_name', options.stepName);
|
||||
if (options.logLevel) params.append('log_level', options.logLevel);
|
||||
if (options.limit) params.append('limit', options.limit.toString());
|
||||
if (options.sinceId) params.append('since_id', options.sinceId.toString());
|
||||
const query = params.toString();
|
||||
const result = await api.get(`/jobs/${jobId}/tasks/${taskId}/logs${query ? '?' + query : ''}`);
|
||||
// Handle both old format (array) and new format (object with logs, last_id, limit)
|
||||
if (Array.isArray(result)) {
|
||||
return { logs: result, last_id: result.length > 0 ? result[result.length - 1].id : 0, limit: options.limit || 100 };
|
||||
}
|
||||
return result;
|
||||
});
|
||||
},
|
||||
|
||||
async getTaskSteps(jobId, taskId) {
|
||||
@@ -239,6 +375,20 @@ export const jobs = {
|
||||
return new WebSocket(url);
|
||||
},
|
||||
|
||||
streamJobsWebSocket() {
|
||||
const wsProtocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||
const wsHost = window.location.host;
|
||||
const url = `${wsProtocol}//${wsHost}${API_BASE}/jobs/ws`;
|
||||
return new WebSocket(url);
|
||||
},
|
||||
|
||||
streamJobWebSocket(jobId) {
|
||||
const wsProtocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||
const wsHost = window.location.host;
|
||||
const url = `${wsProtocol}//${wsHost}${API_BASE}/jobs/${jobId}/ws`;
|
||||
return new WebSocket(url);
|
||||
},
|
||||
|
||||
async retryTask(jobId, taskId) {
|
||||
return api.post(`/jobs/${jobId}/tasks/${taskId}/retry`);
|
||||
},
|
||||
@@ -247,8 +397,50 @@ export const jobs = {
|
||||
return api.get(`/jobs/${jobId}/metadata`);
|
||||
},
|
||||
|
||||
async getTasks(jobId) {
|
||||
return api.get(`/jobs/${jobId}/tasks`);
|
||||
async getTasks(jobId, options = {}) {
|
||||
const key = getCacheKey(`/jobs/${jobId}/tasks`, options);
|
||||
return debounceRequest(key, () => {
|
||||
const params = new URLSearchParams();
|
||||
if (options.limit) params.append('limit', options.limit.toString());
|
||||
if (options.offset) params.append('offset', options.offset.toString());
|
||||
if (options.status) params.append('status', options.status);
|
||||
if (options.frameStart) params.append('frame_start', options.frameStart.toString());
|
||||
if (options.frameEnd) params.append('frame_end', options.frameEnd.toString());
|
||||
if (options.sort) params.append('sort', options.sort);
|
||||
const query = params.toString();
|
||||
return api.get(`/jobs/${jobId}/tasks${query ? '?' + query : ''}`);
|
||||
});
|
||||
},
|
||||
|
||||
async getTasksSummary(jobId, options = {}) {
|
||||
const key = getCacheKey(`/jobs/${jobId}/tasks/summary`, options);
|
||||
return debounceRequest(key, () => {
|
||||
const params = new URLSearchParams();
|
||||
if (options.limit) params.append('limit', options.limit.toString());
|
||||
if (options.offset) params.append('offset', options.offset.toString());
|
||||
if (options.status) params.append('status', options.status);
|
||||
if (options.sort) params.append('sort', options.sort);
|
||||
const query = params.toString();
|
||||
return api.get(`/jobs/${jobId}/tasks/summary${query ? '?' + query : ''}`);
|
||||
});
|
||||
},
|
||||
|
||||
async batchGetJobs(jobIds) {
|
||||
// Sort jobIds for consistent cache key
|
||||
const sortedIds = [...jobIds].sort((a, b) => a - b);
|
||||
const key = getCacheKey('/jobs/batch', { job_ids: sortedIds.join(',') });
|
||||
return debounceRequest(key, () => {
|
||||
return api.post('/jobs/batch', { job_ids: jobIds });
|
||||
});
|
||||
},
|
||||
|
||||
async batchGetTasks(jobId, taskIds) {
|
||||
// Sort taskIds for consistent cache key
|
||||
const sortedIds = [...taskIds].sort((a, b) => a - b);
|
||||
const key = getCacheKey(`/jobs/${jobId}/tasks/batch`, { task_ids: sortedIds.join(',') });
|
||||
return debounceRequest(key, () => {
|
||||
return api.post(`/jobs/${jobId}/tasks/batch`, { task_ids: taskIds });
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user