diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 27f3cb2..60e54f3 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -6,6 +6,7 @@ import ( "log" "net/http" "os" + "os/exec" "jiggablend/internal/api" "jiggablend/internal/auth" @@ -40,6 +41,14 @@ func main() { log.Fatalf("Failed to initialize storage: %v", err) } + // Check if Blender is available (required for metadata extraction) + if err := checkBlenderAvailable(); err != nil { + log.Fatalf("Blender is not available: %v\n"+ + "The manager requires Blender to be installed and in PATH for metadata extraction.\n"+ + "Please install Blender and ensure it's accessible via the 'blender' command.", err) + } + log.Printf("Blender is available") + // Create API server server, err := api.NewServer(db, authHandler, storageHandler) if err != nil { @@ -76,3 +85,14 @@ func getEnv(key, defaultValue string) string { } return defaultValue } + +// checkBlenderAvailable checks if Blender is available by running `blender --version` +func checkBlenderAvailable() error { + cmd := exec.Command("blender", "--version") + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to run 'blender --version': %w (output: %s)", err, string(output)) + } + // If we got here, Blender is available + return nil +} diff --git a/internal/api/jobs.go b/internal/api/jobs.go index 3f460da..dc4a9c7 100644 --- a/internal/api/jobs.go +++ b/internal/api/jobs.go @@ -1,13 +1,19 @@ package api import ( + "archive/tar" + "bufio" + "bytes" + "compress/gzip" "database/sql" "encoding/json" + "errors" "fmt" "io" "log" "net/http" "os" + "os/exec" "path/filepath" "strconv" "strings" @@ -39,9 +45,9 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { return } - // Validate job type - if req.JobType != types.JobTypeMetadata && req.JobType != types.JobTypeRender { - s.respondError(w, http.StatusBadRequest, "Invalid job_type: must be 'metadata' or 'render'") + // Validate job type - only render jobs are supported now + if req.JobType != types.JobTypeRender { + s.respondError(w, http.StatusBadRequest, "Invalid job_type: only 'render' jobs are supported") return } @@ -85,59 +91,140 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { // Set job timeout to 24 hours (86400 seconds) jobTimeout := 86400 - // Build SQL query based on job type + // Store render settings in blend_metadata if provided + var blendMetadataJSON *string + if req.RenderSettings != nil { + metadata := types.BlendMetadata{ + FrameStart: *req.FrameStart, + FrameEnd: *req.FrameEnd, + RenderSettings: *req.RenderSettings, + } + metadataBytes, err := json.Marshal(metadata) + if err == nil { + metadataStr := string(metadataBytes) + blendMetadataJSON = &metadataStr + } + } + + log.Printf("Creating render job with output_format: '%s' (from user selection)", *req.OutputFormat) var jobID int64 - if req.JobType == types.JobTypeMetadata { - // Metadata jobs don't need frame range or output format - err = s.db.QueryRow( - `INSERT INTO jobs (user_id, job_type, name, status, progress, timeout_seconds) - VALUES (?, ?, ?, ?, ?, ?) - RETURNING id`, - userID, req.JobType, req.Name, types.JobStatusPending, 0.0, jobTimeout, - ).Scan(&jobID) - } else { - // Render jobs need all fields - err = s.db.QueryRow( - `INSERT INTO jobs (user_id, job_type, name, status, progress, frame_start, frame_end, output_format, allow_parallel_runners, timeout_seconds) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - RETURNING id`, - userID, req.JobType, req.Name, types.JobStatusPending, 0.0, *req.FrameStart, *req.FrameEnd, *req.OutputFormat, allowParallelRunners, jobTimeout, - ).Scan(&jobID) + err = s.db.QueryRow( + `INSERT INTO jobs (user_id, job_type, name, status, progress, frame_start, frame_end, output_format, allow_parallel_runners, timeout_seconds, blend_metadata) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + RETURNING id`, + userID, req.JobType, req.Name, types.JobStatusPending, 0.0, *req.FrameStart, *req.FrameEnd, *req.OutputFormat, allowParallelRunners, jobTimeout, blendMetadataJSON, + ).Scan(&jobID) + if err == nil { + log.Printf("Created render job %d with output_format: '%s'", jobID, *req.OutputFormat) } if err != nil { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create job: %v", err)) return } - // For render jobs, copy input files from metadata job if specified - if req.JobType == types.JobTypeRender && req.MetadataJobID != nil { - // Verify metadata job exists and belongs to the same user - var metadataJobUserID int64 - err = s.db.QueryRow("SELECT user_id FROM jobs WHERE id = ? AND job_type = ?", *req.MetadataJobID, types.JobTypeMetadata).Scan(&metadataJobUserID) - if err == nil && metadataJobUserID == userID { - // Copy input files from metadata job to render job - _, err = s.db.Exec( - `INSERT INTO job_files (job_id, file_type, file_path, file_name, file_size) - SELECT ?, file_type, file_path, file_name, file_size - FROM job_files - WHERE job_id = ? AND file_type = ?`, - jobID, *req.MetadataJobID, types.JobFileTypeInput, - ) + // If upload session ID is provided, move the context archive from temp to job directory + if req.UploadSessionID != nil && *req.UploadSessionID != "" { + log.Printf("Processing upload session for job %d: %s", jobID, *req.UploadSessionID) + // Session ID is the full temp directory path + tempDir := *req.UploadSessionID + tempContextPath := filepath.Join(tempDir, "context.tar.gz") + + if _, err := os.Stat(tempContextPath); err == nil { + log.Printf("Found context archive at %s, moving to job %d directory", tempContextPath, jobID) + // Move context to job directory + jobPath := s.storage.JobPath(jobID) + if err := os.MkdirAll(jobPath, 0755); err != nil { + log.Printf("ERROR: Failed to create job directory for job %d: %v", jobID, err) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create job directory: %v", err)) + return + } + + jobContextPath := filepath.Join(jobPath, "context.tar.gz") + + // Copy file instead of rename (works across filesystems) + srcFile, err := os.Open(tempContextPath) if err != nil { - log.Printf("Warning: Failed to copy input files from metadata job %d to render job %d: %v", *req.MetadataJobID, jobID, err) - } else { - log.Printf("Copied input files from metadata job %d to render job %d", *req.MetadataJobID, jobID) + log.Printf("ERROR: Failed to open source context archive %s: %v", tempContextPath, err) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to open context archive: %v", err)) + return + } + defer srcFile.Close() + + dstFile, err := os.Create(jobContextPath) + if err != nil { + log.Printf("ERROR: Failed to create destination context archive %s: %v", jobContextPath, err) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create context archive: %v", err)) + return + } + defer dstFile.Close() + + _, err = io.Copy(dstFile, srcFile) + if err != nil { + dstFile.Close() + os.Remove(jobContextPath) // Clean up partial file + log.Printf("ERROR: Failed to copy context archive from %s to %s: %v", tempContextPath, jobContextPath, err) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to copy context archive: %v", err)) + return + } + + // Close files before deleting source + srcFile.Close() + if err := dstFile.Close(); err != nil { + log.Printf("ERROR: Failed to close destination file: %v", err) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to finalize context archive: %v", err)) + return + } + + // Delete source file after successful copy + if err := os.Remove(tempContextPath); err != nil { + log.Printf("Warning: Failed to remove source context archive %s: %v", tempContextPath, err) + // Don't fail the operation if cleanup fails + } + + log.Printf("Successfully copied context archive to %s", jobContextPath) + + // Record context archive in database + contextInfo, err := os.Stat(jobContextPath) + if err != nil { + log.Printf("ERROR: Failed to stat context archive after move: %v", err) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to verify context archive: %v", err)) + return + } + + var fileID int64 + err = s.db.QueryRow( + `INSERT INTO job_files (job_id, file_type, file_path, file_name, file_size) + VALUES (?, ?, ?, ?, ?) + RETURNING id`, + jobID, types.JobFileTypeInput, jobContextPath, filepath.Base(jobContextPath), contextInfo.Size(), + ).Scan(&fileID) + if err != nil { + log.Printf("ERROR: Failed to record context archive in database for job %d: %v", jobID, err) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to record context archive: %v", err)) + return + } + + log.Printf("Successfully recorded context archive in database for job %d (file ID: %d, size: %d bytes)", jobID, fileID, contextInfo.Size()) + + // Clean up temp directory + if err := os.RemoveAll(tempDir); err != nil { + log.Printf("Warning: Failed to clean up temp directory %s: %v", tempDir, err) } } else { - log.Printf("Warning: Metadata job %d not found or doesn't belong to user %d, skipping file copy", *req.MetadataJobID, userID) + log.Printf("ERROR: Context archive not found at %s for session %s: %v", tempContextPath, *req.UploadSessionID, err) + s.respondError(w, http.StatusBadRequest, fmt.Sprintf("Context archive not found for upload session. Please upload the file again.")) + return } + } else { + log.Printf("Warning: No upload session ID provided for job %d - job created without input files", jobID) } + // Only create render tasks for render jobs if req.JobType == types.JobTypeRender { // Determine task timeout based on output format taskTimeout := 300 // Default: 5 minutes for frame rendering - if *req.OutputFormat == "MP4" { + if *req.OutputFormat == "EXR_264_MP4" || *req.OutputFormat == "EXR_AV1_MP4" { // For MP4, we'll create frame tasks with 5 min timeout // Video generation tasks will be created later with 24h timeout taskTimeout = 300 @@ -175,8 +262,6 @@ func (s *Server) handleCreateJob(w http.ResponseWriter, r *http.Request) { } // Update job status (should be pending since tasks are pending) s.updateJobStatusFromTasks(jobID) - } else { - log.Printf("Created metadata extraction job %d (no render tasks)", jobID) } // Build response job object @@ -211,25 +296,12 @@ func (s *Server) handleListJobs(w http.ResponseWriter, r *http.Request) { return } - // Filter out metadata jobs for non-admin users - isAdmin := isAdminUser(r) - var query string - if isAdmin { - query = `SELECT id, user_id, job_type, name, status, progress, frame_start, frame_end, output_format, - allow_parallel_runners, timeout_seconds, blend_metadata, created_at, started_at, completed_at, error_message - FROM jobs WHERE user_id = ? ORDER BY created_at DESC` - } else { - query = `SELECT id, user_id, job_type, name, status, progress, frame_start, frame_end, output_format, - allow_parallel_runners, timeout_seconds, blend_metadata, created_at, started_at, completed_at, error_message - FROM jobs WHERE user_id = ? AND job_type != ? ORDER BY created_at DESC` - } + // Query all jobs for the user + query := `SELECT id, user_id, job_type, name, status, progress, frame_start, frame_end, output_format, + allow_parallel_runners, timeout_seconds, blend_metadata, created_at, started_at, completed_at, error_message + FROM jobs WHERE user_id = ? ORDER BY created_at DESC` - var rows *sql.Rows - if isAdmin { - rows, err = s.db.Query(query, userID) - } else { - rows, err = s.db.Query(query, userID, types.JobTypeMetadata) - } + rows, err := s.db.Query(query, userID) if err != nil { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query jobs: %v", err)) return @@ -317,7 +389,7 @@ func (s *Server) handleGetJob(w http.ResponseWriter, r *http.Request) { var outputFormat sql.NullString var allowParallelRunners sql.NullBool - // Allow admins to view any job, regular users can only view their own (and not metadata jobs) + // Allow admins to view any job, regular users can only view their own isAdmin := isAdminUser(r) var err2 error if isAdmin { @@ -335,8 +407,8 @@ func (s *Server) handleGetJob(w http.ResponseWriter, r *http.Request) { err2 = s.db.QueryRow( `SELECT id, user_id, job_type, name, status, progress, frame_start, frame_end, output_format, allow_parallel_runners, timeout_seconds, blend_metadata, created_at, started_at, completed_at, error_message - FROM jobs WHERE id = ? AND user_id = ? AND job_type != ?`, - jobID, userID, types.JobTypeMetadata, + FROM jobs WHERE id = ? AND user_id = ?`, + jobID, userID, ).Scan( &job.ID, &job.UserID, &jobType, &job.Name, &job.Status, &job.Progress, &frameStart, &frameEnd, &outputFormat, &allowParallelRunners, &job.TimeoutSeconds, @@ -420,8 +492,6 @@ func (s *Server) handleCancelJob(w http.ResponseWriter, r *http.Request) { return } - isMetadataExtractionJob := types.JobType(jobType) == types.JobTypeMetadata - result, err := s.db.Exec( `UPDATE jobs SET status = ? WHERE id = ? AND user_id = ?`, types.JobStatusCancelled, jobID, userID, @@ -437,43 +507,13 @@ func (s *Server) handleCancelJob(w http.ResponseWriter, r *http.Request) { return } - log.Printf("Cancelling job %d (type: %s, isMetadataExtraction: %v)", jobID, jobType, isMetadataExtractionJob) + log.Printf("Cancelling job %d (type: %s)", jobID, jobType) - // For metadata extraction jobs, be more careful - only cancel if no metadata task is running - if isMetadataExtractionJob { - // Check if there's a running metadata task - var runningMetadataTask int - s.db.QueryRow( - `SELECT COUNT(*) FROM tasks WHERE job_id = ? AND task_type = ? AND status = ?`, - jobID, types.TaskTypeMetadata, types.TaskStatusRunning, - ).Scan(&runningMetadataTask) - - if runningMetadataTask > 0 { - log.Printf("Job %d has running metadata task, preserving it", jobID) - // Don't cancel running metadata tasks - let them complete - // Only cancel pending tasks that aren't metadata - _, err = s.db.Exec( - `UPDATE tasks SET status = ? - WHERE job_id = ? AND status = ? AND task_type != ?`, - types.TaskStatusFailed, jobID, types.TaskStatusPending, types.TaskTypeMetadata, - ) - } else { - // No running metadata task, safe to cancel pending metadata tasks - _, err = s.db.Exec( - `UPDATE tasks SET status = ? - WHERE job_id = ? AND status = ?`, - types.TaskStatusFailed, jobID, types.TaskStatusPending, - ) - } - } else { - // For regular jobs, cancel pending tasks (but preserve running metadata tasks) - _, err = s.db.Exec( - `UPDATE tasks SET status = ? - WHERE job_id = ? AND status = ? - AND NOT (task_type = ? AND runner_id IS NOT NULL)`, - types.TaskStatusFailed, jobID, types.TaskStatusPending, types.TaskTypeMetadata, - ) - } + // Cancel all pending tasks + _, err = s.db.Exec( + `UPDATE tasks SET status = ? WHERE job_id = ? AND status = ?`, + types.TaskStatusFailed, jobID, types.TaskStatusPending, + ) if err != nil { s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to cancel tasks: %v", err)) @@ -504,8 +544,8 @@ func (s *Server) handleDeleteJob(w http.ResponseWriter, r *http.Request) { if isAdmin { err = s.db.QueryRow("SELECT user_id, status FROM jobs WHERE id = ?", jobID).Scan(&jobUserID, &jobStatus) } else { - // Non-admin users can only delete their own jobs, and not metadata jobs - err = s.db.QueryRow("SELECT user_id, status FROM jobs WHERE id = ? AND user_id = ? AND job_type != ?", jobID, userID, types.JobTypeMetadata).Scan(&jobUserID, &jobStatus) + // Non-admin users can only delete their own jobs + err = s.db.QueryRow("SELECT user_id, status FROM jobs WHERE id = ? AND user_id = ?", jobID, userID).Scan(&jobUserID, &jobStatus) } if err == sql.ErrNoRows { s.respondError(w, http.StatusNotFound, "Job not found") @@ -590,124 +630,22 @@ func (s *Server) handleDeleteJob(w http.ResponseWriter, r *http.Request) { s.respondJSON(w, http.StatusOK, map[string]string{"message": "Job deleted"}) } -// cleanupOldMetadataJobs periodically deletes metadata jobs older than 1 day -func (s *Server) cleanupOldMetadataJobs() { +// cleanupOldRenderJobs periodically deletes render jobs older than 1 month +func (s *Server) cleanupOldRenderJobs() { // Run cleanup every hour ticker := time.NewTicker(1 * time.Hour) defer ticker.Stop() // Run once immediately on startup - s.cleanupMetadataJobs() - s.cleanupOldRenderJobs() + s.cleanupOldRenderJobsOnce() for range ticker.C { - s.cleanupMetadataJobs() - s.cleanupOldRenderJobs() + s.cleanupOldRenderJobsOnce() } } -// cleanupMetadataJobs finds and deletes metadata jobs older than 1 day -func (s *Server) cleanupMetadataJobs() { - defer func() { - if r := recover(); r != nil { - log.Printf("Panic in cleanupMetadataJobs: %v", r) - } - }() - - // Find metadata jobs older than 1 day - rows, err := s.db.Query( - `SELECT id FROM jobs - WHERE job_type = ? - AND created_at < CURRENT_TIMESTAMP - INTERVAL '1 day'`, - types.JobTypeMetadata, - ) - if err != nil { - log.Printf("Failed to query old metadata jobs: %v", err) - return - } - defer rows.Close() - - var jobIDs []int64 - for rows.Next() { - var jobID int64 - if err := rows.Scan(&jobID); err == nil { - jobIDs = append(jobIDs, jobID) - } - } - rows.Close() - - if len(jobIDs) == 0 { - return - } - - log.Printf("Cleaning up %d old metadata jobs", len(jobIDs)) - - // Delete each job - for _, jobID := range jobIDs { - // Delete in transaction to ensure consistency - tx, err := s.db.Begin() - if err != nil { - log.Printf("Failed to start transaction for job %d: %v", jobID, err) - continue - } - - // Delete task logs - _, err = tx.Exec(`DELETE FROM task_logs WHERE task_id IN (SELECT id FROM tasks WHERE job_id = ?)`, jobID) - if err != nil { - tx.Rollback() - log.Printf("Failed to delete task logs for job %d: %v", jobID, err) - continue - } - - // Delete task steps - _, err = tx.Exec(`DELETE FROM task_steps WHERE task_id IN (SELECT id FROM tasks WHERE job_id = ?)`, jobID) - if err != nil { - tx.Rollback() - log.Printf("Failed to delete task steps for job %d: %v", jobID, err) - continue - } - - // Delete tasks - _, err = tx.Exec("DELETE FROM tasks WHERE job_id = ?", jobID) - if err != nil { - tx.Rollback() - log.Printf("Failed to delete tasks for job %d: %v", jobID, err) - continue - } - - // Delete job files - _, err = tx.Exec("DELETE FROM job_files WHERE job_id = ?", jobID) - if err != nil { - tx.Rollback() - log.Printf("Failed to delete job files for job %d: %v", jobID, err) - continue - } - - // Delete the job - _, err = tx.Exec("DELETE FROM jobs WHERE id = ?", jobID) - if err != nil { - tx.Rollback() - log.Printf("Failed to delete job %d: %v", jobID, err) - continue - } - - // Commit transaction - if err = tx.Commit(); err != nil { - log.Printf("Failed to commit transaction for job %d: %v", jobID, err) - continue - } - - // Delete physical files (best effort, don't fail if this errors) - if err := s.storage.DeleteJobFiles(jobID); err != nil { - log.Printf("Warning: Failed to delete files for metadata job %d: %v", jobID, err) - } - } - - log.Printf("Cleaned up %d old metadata jobs", len(jobIDs)) -} - -// cleanupOldRenderJobs finds and deletes render jobs older than 1 month that are completed, failed, or cancelled -func (s *Server) cleanupOldRenderJobs() { +// cleanupOldRenderJobsOnce finds and deletes render jobs older than 1 month that are completed, failed, or cancelled +func (s *Server) cleanupOldRenderJobsOnce() { defer func() { if r := recover(); r != nil { log.Printf("Panic in cleanupOldRenderJobs: %v", r) @@ -866,6 +804,14 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) { return } + // Create temporary directory for processing upload + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("fuego-upload-%d-*", jobID)) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create temporary directory: %v", err)) + return + } + defer os.RemoveAll(tmpDir) + var fileID int64 var mainBlendFile string var extractedFiles []string @@ -873,8 +819,8 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) { // Check if this is a ZIP file if strings.HasSuffix(strings.ToLower(header.Filename), ".zip") { log.Printf("Processing ZIP file '%s' for job %d", header.Filename, jobID) - // Extract ZIP file - zipPath := filepath.Join(jobPath, header.Filename) + // Save ZIP to temporary directory + zipPath := filepath.Join(tmpDir, header.Filename) log.Printf("Creating ZIP file at: %s", zipPath) zipFile, err := os.Create(zipPath) if err != nil { @@ -892,30 +838,9 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) { } log.Printf("Successfully copied %d bytes to ZIP file for job %d", copied, jobID) - // Record ZIP file in database - zipInfo, err := os.Stat(zipPath) - if err != nil { - log.Printf("ERROR: Failed to stat ZIP file for job %d: %v", jobID, err) - s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to stat ZIP file: %v", err)) - return - } - log.Printf("Recording ZIP file in database for job %d (size: %d bytes)", jobID, zipInfo.Size()) - err = s.db.QueryRow( - `INSERT INTO job_files (job_id, file_type, file_path, file_name, file_size) - VALUES (?, ?, ?, ?, ?) - RETURNING id`, - jobID, types.JobFileTypeInput, zipPath, header.Filename, zipInfo.Size(), - ).Scan(&fileID) - if err != nil { - log.Printf("ERROR: Failed to record ZIP file in database for job %d: %v", jobID, err) - s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to record ZIP file: %v", err)) - return - } - log.Printf("ZIP file recorded in database with ID %d for job %d", fileID, jobID) - - // Extract ZIP file + // Extract ZIP file to temporary directory log.Printf("Extracting ZIP file for job %d...", jobID) - extractedFiles, err = s.storage.ExtractZip(zipPath, jobPath) + extractedFiles, err = s.storage.ExtractZip(zipPath, tmpDir) if err != nil { log.Printf("ERROR: Failed to extract ZIP file for job %d: %v", jobID, err) s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to extract ZIP file: %v", err)) @@ -927,7 +852,7 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) { mainBlendParam := r.FormValue("main_blend_file") if mainBlendParam != "" { // User specified main blend file - mainBlendFile = filepath.Join(jobPath, mainBlendParam) + mainBlendFile = filepath.Join(tmpDir, mainBlendParam) if _, err := os.Stat(mainBlendFile); err != nil { s.respondError(w, http.StatusBadRequest, fmt.Sprintf("Specified main blend file not found: %s", mainBlendParam)) return @@ -935,12 +860,12 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) { } else { // Auto-detect: find blend files in root directory blendFiles := []string{} - err := filepath.Walk(jobPath, func(path string, info os.FileInfo, err error) error { + err := filepath.Walk(tmpDir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } // Only check files in root directory (not subdirectories) - relPath, _ := filepath.Rel(jobPath, path) + relPath, _ := filepath.Rel(tmpDir, path) if !info.IsDir() && strings.HasSuffix(strings.ToLower(info.Name()), ".blend") { // Check if it's in root (no path separators) if !strings.Contains(relPath, string(filepath.Separator)) { @@ -957,7 +882,7 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) { // Return list of blend files for user to choose blendFileNames := []string{} for _, f := range blendFiles { - rel, _ := filepath.Rel(jobPath, f) + rel, _ := filepath.Rel(tmpDir, f) blendFileNames = append(blendFileNames, rel) } s.respondJSON(w, http.StatusOK, map[string]interface{}{ @@ -968,126 +893,786 @@ func (s *Server) handleUploadJobFile(w http.ResponseWriter, r *http.Request) { return } } - - // Record extracted files in database - for _, extractedFile := range extractedFiles { - relPath, _ := filepath.Rel(jobPath, extractedFile) - info, err := os.Stat(extractedFile) - if err == nil && !info.IsDir() { - _, _ = s.db.Exec( - `INSERT INTO job_files (job_id, file_type, file_path, file_name, file_size) - VALUES (?, ?, ?, ?, ?)`, - jobID, types.JobFileTypeInput, extractedFile, relPath, info.Size(), - ) - } - } } else { - // Regular file upload (not ZIP) - filePath, err := s.storage.SaveUpload(jobID, header.Filename, file) + // Regular file upload (not ZIP) - save to temporary directory + filePath := filepath.Join(tmpDir, header.Filename) + outFile, err := os.Create(filePath) if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create file: %v", err)) + return + } + + // Get a fresh file reader (FormFile returns a new reader each time) + fileReader, _, err := r.FormFile("file") + if err != nil { + outFile.Close() + s.respondError(w, http.StatusBadRequest, fmt.Sprintf("No file provided: %v", err)) + return + } + + if _, err := io.Copy(outFile, fileReader); err != nil { + fileReader.Close() + outFile.Close() s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to save file: %v", err)) return } + fileReader.Close() + outFile.Close() - // Record in database - err = s.db.QueryRow( - `INSERT INTO job_files (job_id, file_type, file_path, file_name, file_size) - VALUES (?, ?, ?, ?, ?) - RETURNING id`, - jobID, types.JobFileTypeInput, filePath, header.Filename, header.Size, - ).Scan(&fileID) - if err != nil { - s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to record file: %v", err)) - return + if strings.HasSuffix(strings.ToLower(header.Filename), ".blend") { + mainBlendFile = filePath } - mainBlendFile = filePath } - // If we have a main blend file (from ZIP extraction or direct upload), create metadata extraction task - // But ONLY for metadata extraction jobs (temporary jobs created during the initial two-step submission flow) - // Never create metadata tasks for regular render jobs, even if they receive blend files later - blendFileToCheck := mainBlendFile - if blendFileToCheck == "" && strings.HasSuffix(strings.ToLower(header.Filename), ".blend") { - // Direct blend file upload (not from ZIP) - blendFileToCheck = s.storage.JobPath(jobID) - blendFileToCheck = filepath.Join(blendFileToCheck, header.Filename) + // Create context archive from temporary directory - this is the primary artifact + // Exclude the original uploaded ZIP file (but keep blend files as they're needed for rendering) + log.Printf("Creating context archive for job %d...", jobID) + var excludeFiles []string + if strings.HasSuffix(strings.ToLower(header.Filename), ".zip") { + excludeFiles = append(excludeFiles, header.Filename) } + contextPath, err := s.storage.CreateJobContextFromDir(tmpDir, jobID, excludeFiles...) + if err != nil { + log.Printf("ERROR: Failed to create context archive for job %d: %v", jobID, err) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create context archive: %v", err)) + return + } + log.Printf("Successfully created context archive for job %d at %s", jobID, contextPath) - if blendFileToCheck != "" && strings.HasSuffix(strings.ToLower(blendFileToCheck), ".blend") { - // Check if this is a metadata extraction job - var jobType string - var hasRenderTasks int - err = s.db.QueryRow( - `SELECT j.job_type, COUNT(t.id) - FROM jobs j - LEFT JOIN tasks t ON j.id = t.job_id AND t.task_type = 'render' - WHERE j.id = ? - GROUP BY j.id, j.job_type`, - jobID, - ).Scan(&jobType, &hasRenderTasks) + // Record context archive in database + contextInfo, err := os.Stat(contextPath) + if err != nil { + log.Printf("ERROR: Failed to stat context archive for job %d: %v", jobID, err) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to stat context archive: %v", err)) + return + } + err = s.db.QueryRow( + `INSERT INTO job_files (job_id, file_type, file_path, file_name, file_size) + VALUES (?, ?, ?, ?, ?) + RETURNING id`, + jobID, types.JobFileTypeInput, contextPath, filepath.Base(contextPath), contextInfo.Size(), + ).Scan(&fileID) + if err != nil { + log.Printf("ERROR: Failed to record context archive in database for job %d: %v", jobID, err) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to record context archive: %v", err)) + return + } + log.Printf("Context archive recorded in database with ID %d for job %d", fileID, jobID) - // Only create metadata extraction task if: - // 1. Job type is "metadata" (temporary job from initial submission) - // 2. Job has no render tasks (not a regular render job) - if err == nil && types.JobType(jobType) == types.JobTypeMetadata && hasRenderTasks == 0 { - // Check if metadata task already exists to avoid duplicates - var existingMetadataTask int - s.db.QueryRow( - `SELECT COUNT(*) FROM tasks WHERE job_id = ? AND task_type = 'metadata'`, - jobID, - ).Scan(&existingMetadataTask) - - if existingMetadataTask == 0 { - // Create metadata extraction task - metadataTaskTimeout := 300 // 5 minutes for metadata extraction - var metadataTaskID int64 - err = s.db.QueryRow( - `INSERT INTO tasks (job_id, frame_start, frame_end, task_type, status, timeout_seconds, max_retries) - VALUES (?, ?, ?, ?, ?, ?, ?) - RETURNING id`, - jobID, 0, 0, types.TaskTypeMetadata, types.TaskStatusPending, metadataTaskTimeout, 1, - ).Scan(&metadataTaskID) - if err != nil { - log.Printf("Failed to create metadata extraction task: %v", err) - } else { - log.Printf("Created metadata extraction task %d for job %d (initial submission)", metadataTaskID, jobID) - // Log task creation to task logs - s.logTaskEvent(metadataTaskID, nil, types.LogLevelInfo, "Metadata extraction task created", "") - // Try to distribute the task immediately (with small delay to ensure transaction is committed) - go func() { - time.Sleep(100 * time.Millisecond) // Small delay to ensure transaction is committed - s.distributeTasksToRunners() - }() - } + // Extract metadata directly from the context archive + log.Printf("Extracting metadata for job %d...", jobID) + metadata, err := s.extractMetadataFromContext(jobID) + if err != nil { + log.Printf("Warning: Failed to extract metadata for job %d: %v", jobID, err) + // Don't fail the upload if metadata extraction fails - job can still proceed + } else { + // Update job with metadata + metadataJSON, err := json.Marshal(metadata) + if err == nil { + _, err = s.db.Exec( + `UPDATE jobs SET blend_metadata = ? WHERE id = ?`, + string(metadataJSON), jobID, + ) + if err != nil { + log.Printf("Warning: Failed to update job metadata in database: %v", err) } else { - log.Printf("Skipping metadata extraction task creation for job %d (metadata task already exists)", jobID) + log.Printf("Successfully extracted and stored metadata for job %d", jobID) } } else { - log.Printf("Skipping metadata extraction task creation for job %d (not an initial metadata extraction job)", jobID) + log.Printf("Warning: Failed to marshal metadata: %v", err) } } + response := map[string]interface{}{ - "id": fileID, - "file_name": header.Filename, - "file_size": header.Size, + "id": fileID, + "file_name": header.Filename, + "file_size": header.Size, + "context_archive": filepath.Base(contextPath), } if strings.HasSuffix(strings.ToLower(header.Filename), ".zip") { response["zip_extracted"] = true response["extracted_files_count"] = len(extractedFiles) if mainBlendFile != "" { - relPath, _ := filepath.Rel(s.storage.JobPath(jobID), mainBlendFile) + // Get relative path from temp dir + relPath, _ := filepath.Rel(tmpDir, mainBlendFile) response["main_blend_file"] = relPath } - } else { - response["file_path"] = s.storage.JobPath(jobID) - response["file_path"] = filepath.Join(response["file_path"].(string), header.Filename) + } else if mainBlendFile != "" { + relPath, _ := filepath.Rel(tmpDir, mainBlendFile) + response["main_blend_file"] = relPath } s.respondJSON(w, http.StatusCreated, response) } +// handleUploadFileForJobCreation handles file upload before job creation +// Creates context archive and extracts metadata, returns metadata and upload session ID +func (s *Server) handleUploadFileForJobCreation(w http.ResponseWriter, r *http.Request) { + userID, err := getUserID(r) + if err != nil { + s.respondError(w, http.StatusUnauthorized, err.Error()) + return + } + + // Parse multipart form with large limit for big files + err = r.ParseMultipartForm(20 << 30) // 20 GB + if err != nil { + log.Printf("Error parsing multipart form: %v", err) + s.respondError(w, http.StatusBadRequest, fmt.Sprintf("Failed to parse form: %v", err)) + return + } + + file, header, err := r.FormFile("file") + if err != nil { + log.Printf("Error getting file from form: %v", err) + s.respondError(w, http.StatusBadRequest, fmt.Sprintf("No file provided: %v", err)) + return + } + defer file.Close() + + log.Printf("Uploading file '%s' (size: %d bytes) for user %d (pre-job creation)", header.Filename, header.Size, userID) + + // Create temporary directory for processing upload (user-specific) + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("fuego-upload-user-%d-*", userID)) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create temporary directory: %v", err)) + return + } + // Note: We'll clean this up after job creation or after timeout + + var mainBlendFile string + var extractedFiles []string + + // Check if this is a ZIP file + if strings.HasSuffix(strings.ToLower(header.Filename), ".zip") { + log.Printf("Processing ZIP file '%s'", header.Filename) + // Save ZIP to temporary directory + zipPath := filepath.Join(tmpDir, header.Filename) + zipFile, err := os.Create(zipPath) + if err != nil { + os.RemoveAll(tmpDir) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create ZIP file: %v", err)) + return + } + copied, err := io.Copy(zipFile, file) + zipFile.Close() + if err != nil { + os.RemoveAll(tmpDir) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to save ZIP file: %v", err)) + return + } + log.Printf("Successfully copied %d bytes to ZIP file", copied) + + // Extract ZIP file to temporary directory + extractedFiles, err = s.storage.ExtractZip(zipPath, tmpDir) + if err != nil { + os.RemoveAll(tmpDir) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to extract ZIP file: %v", err)) + return + } + log.Printf("Successfully extracted %d files from ZIP", len(extractedFiles)) + + // Find main blend file + mainBlendParam := r.FormValue("main_blend_file") + if mainBlendParam != "" { + mainBlendFile = filepath.Join(tmpDir, mainBlendParam) + if _, err := os.Stat(mainBlendFile); err != nil { + os.RemoveAll(tmpDir) + s.respondError(w, http.StatusBadRequest, fmt.Sprintf("Specified main blend file not found: %s", mainBlendParam)) + return + } + } else { + // Auto-detect: find blend files in root directory + blendFiles := []string{} + err := filepath.Walk(tmpDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + relPath, _ := filepath.Rel(tmpDir, path) + if !info.IsDir() && strings.HasSuffix(strings.ToLower(info.Name()), ".blend") { + if !strings.Contains(relPath, string(filepath.Separator)) { + blendFiles = append(blendFiles, path) + } + } + return nil + }) + if err == nil && len(blendFiles) == 1 { + mainBlendFile = blendFiles[0] + } else if len(blendFiles) > 1 { + // Multiple blend files - return list for user to choose + blendFileNames := []string{} + for _, f := range blendFiles { + rel, _ := filepath.Rel(tmpDir, f) + blendFileNames = append(blendFileNames, rel) + } + os.RemoveAll(tmpDir) + s.respondJSON(w, http.StatusOK, map[string]interface{}{ + "zip_extracted": true, + "blend_files": blendFileNames, + "message": "Multiple blend files found. Please specify the main blend file.", + }) + return + } + } + } else { + // Regular file upload (not ZIP) - save to temporary directory + filePath := filepath.Join(tmpDir, header.Filename) + outFile, err := os.Create(filePath) + if err != nil { + os.RemoveAll(tmpDir) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create file: %v", err)) + return + } + + fileReader, _, err := r.FormFile("file") + if err != nil { + outFile.Close() + os.RemoveAll(tmpDir) + s.respondError(w, http.StatusBadRequest, fmt.Sprintf("No file provided: %v", err)) + return + } + + if _, err := io.Copy(outFile, fileReader); err != nil { + fileReader.Close() + outFile.Close() + os.RemoveAll(tmpDir) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to save file: %v", err)) + return + } + fileReader.Close() + outFile.Close() + + if strings.HasSuffix(strings.ToLower(header.Filename), ".blend") { + mainBlendFile = filePath + } + } + + // Create context archive from temporary directory + log.Printf("Creating context archive from temporary directory...") + var excludeFiles []string + if strings.HasSuffix(strings.ToLower(header.Filename), ".zip") { + excludeFiles = append(excludeFiles, header.Filename) + } + + // Create context in temp directory (we'll move it to job directory later) + contextPath := filepath.Join(tmpDir, "context.tar.gz") + contextPath, err = s.createContextFromDir(tmpDir, contextPath, excludeFiles...) + if err != nil { + os.RemoveAll(tmpDir) + log.Printf("ERROR: Failed to create context archive: %v", err) + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to create context archive: %v", err)) + return + } + log.Printf("Successfully created context archive at %s", contextPath) + + // Extract metadata from context archive + log.Printf("Extracting metadata from context archive...") + metadata, err := s.extractMetadataFromTempContext(contextPath) + if err != nil { + log.Printf("Warning: Failed to extract metadata: %v", err) + // Continue anyway - user can fill in manually + metadata = nil + } + + // Generate a session ID to track this upload + // Store the full temp directory path as session ID for easy lookup + sessionID := tmpDir + + response := map[string]interface{}{ + "session_id": sessionID, // Full temp directory path + "file_name": header.Filename, + "file_size": header.Size, + "context_archive": filepath.Base(contextPath), + } + + if strings.HasSuffix(strings.ToLower(header.Filename), ".zip") { + response["zip_extracted"] = true + response["extracted_files_count"] = len(extractedFiles) + if mainBlendFile != "" { + relPath, _ := filepath.Rel(tmpDir, mainBlendFile) + response["main_blend_file"] = relPath + } + } else if mainBlendFile != "" { + relPath, _ := filepath.Rel(tmpDir, mainBlendFile) + response["main_blend_file"] = relPath + } + + if metadata != nil { + response["metadata"] = metadata + response["metadata_extracted"] = true + } else { + response["metadata_extracted"] = false + } + + s.respondJSON(w, http.StatusOK, response) +} + +// extractMetadataFromTempContext extracts metadata from a context archive in a temporary location +func (s *Server) extractMetadataFromTempContext(contextPath string) (*types.BlendMetadata, error) { + // Create temporary directory for extraction + tmpDir, err := os.MkdirTemp("", "fuego-metadata-temp-*") + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(tmpDir) + + // Extract context archive + if err := s.extractTarGz(contextPath, tmpDir); err != nil { + return nil, fmt.Errorf("failed to extract context: %w", err) + } + + // Find .blend file + blendFile := "" + err = filepath.Walk(tmpDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() && strings.HasSuffix(strings.ToLower(info.Name()), ".blend") { + lower := strings.ToLower(info.Name()) + idx := strings.LastIndex(lower, ".blend") + if idx != -1 { + suffix := lower[idx+len(".blend"):] + isSaveFile := false + if len(suffix) > 0 { + isSaveFile = true + for _, r := range suffix { + if r < '0' || r > '9' { + isSaveFile = false + break + } + } + } + if !isSaveFile { + blendFile = path + return filepath.SkipAll + } + } + } + return nil + }) + + if err != nil || blendFile == "" { + return nil, fmt.Errorf("no .blend file found in context") + } + + // Use the same extraction script and process as extractMetadataFromContext + // (Copy the logic from extractMetadataFromContext but use tmpDir and blendFile) + return s.runBlenderMetadataExtraction(blendFile, tmpDir) +} + +// runBlenderMetadataExtraction runs Blender to extract metadata from a blend file +func (s *Server) runBlenderMetadataExtraction(blendFile, workDir string) (*types.BlendMetadata, error) { + // Create Python script (same as in extractMetadataFromContext) + scriptPath := filepath.Join(workDir, "extract_metadata.py") + scriptContent := `import bpy +import json +import sys + +try: + bpy.ops.file.make_paths_relative() + print("Made all file paths relative to blend file") +except Exception as e: + print(f"Warning: Could not make paths relative: {e}") + +missing_files_info = { + "checked": False, + "has_missing": False, + "missing_files": [], + "missing_addons": [] +} + +try: + missing = [] + for mod in bpy.context.preferences.addons: + if mod.module.endswith("_missing"): + missing.append(mod.module.rsplit("_", 1)[0]) + + missing_files_info["checked"] = True + if missing: + missing_files_info["has_missing"] = True + missing_files_info["missing_addons"] = missing + print("Missing add-ons required by this .blend:") + for name in missing: + print(" -", name) + else: + print("No missing add-ons detected – file is headless-safe") +except Exception as e: + print(f"Warning: Could not check for missing addons: {e}") + missing_files_info["error"] = str(e) + +scene = bpy.context.scene +frame_start = scene.frame_start +frame_end = scene.frame_end + +animation_start = None +animation_end = None + +for obj in scene.objects: + if obj.animation_data and obj.animation_data.action: + action = obj.animation_data.action + if action.fcurves: + for fcurve in action.fcurves: + if fcurve.keyframe_points: + for keyframe in fcurve.keyframe_points: + frame = int(keyframe.co[0]) + if animation_start is None or frame < animation_start: + animation_start = frame + if animation_end is None or frame > animation_end: + animation_end = frame + +if animation_start is not None and animation_end is not None: + if frame_start == frame_end or (animation_start < frame_start or animation_end > frame_end): + frame_start = animation_start + frame_end = animation_end + +render = scene.render +resolution_x = render.resolution_x +resolution_y = render.resolution_y +engine = scene.render.engine.upper() +output_format = render.image_settings.file_format + +engine_settings = {} + +if engine == 'CYCLES': + cycles = scene.cycles + engine_settings = { + "samples": getattr(cycles, 'samples', 128), + "use_denoising": getattr(cycles, 'use_denoising', False), + "denoising_radius": getattr(cycles, 'denoising_radius', 0), + "denoising_strength": getattr(cycles, 'denoising_strength', 0.0), + "device": getattr(cycles, 'device', 'CPU'), + "use_adaptive_sampling": getattr(cycles, 'use_adaptive_sampling', False), + "adaptive_threshold": getattr(cycles, 'adaptive_threshold', 0.01) if getattr(cycles, 'use_adaptive_sampling', False) else 0.01, + "use_fast_gi": getattr(cycles, 'use_fast_gi', False), + "light_tree": getattr(cycles, 'use_light_tree', False), + "use_light_linking": getattr(cycles, 'use_light_linking', False), + "caustics_reflective": getattr(cycles, 'caustics_reflective', False), + "caustics_refractive": getattr(cycles, 'caustics_refractive', False), + "blur_glossy": getattr(cycles, 'blur_glossy', 0.0), + "max_bounces": getattr(cycles, 'max_bounces', 12), + "diffuse_bounces": getattr(cycles, 'diffuse_bounces', 4), + "glossy_bounces": getattr(cycles, 'glossy_bounces', 4), + "transmission_bounces": getattr(cycles, 'transmission_bounces', 12), + "volume_bounces": getattr(cycles, 'volume_bounces', 0), + "transparent_max_bounces": getattr(cycles, 'transparent_max_bounces', 8), + "film_transparent": getattr(cycles, 'film_transparent', False), + "use_layer_samples": getattr(cycles, 'use_layer_samples', False), + } +elif engine == 'EEVEE' or engine == 'EEVEE_NEXT': + eevee = scene.eevee + engine_settings = { + "taa_render_samples": getattr(eevee, 'taa_render_samples', 64), + "use_bloom": getattr(eevee, 'use_bloom', False), + "bloom_threshold": getattr(eevee, 'bloom_threshold', 0.8), + "bloom_intensity": getattr(eevee, 'bloom_intensity', 0.05), + "bloom_radius": getattr(eevee, 'bloom_radius', 6.5), + "use_ssr": getattr(eevee, 'use_ssr', True), + "use_ssr_refraction": getattr(eevee, 'use_ssr_refraction', False), + "ssr_quality": getattr(eevee, 'ssr_quality', 'MEDIUM'), + "use_ssao": getattr(eevee, 'use_ssao', True), + "ssao_quality": getattr(eevee, 'ssao_quality', 'MEDIUM'), + "ssao_distance": getattr(eevee, 'ssao_distance', 0.2), + "ssao_factor": getattr(eevee, 'ssao_factor', 1.0), + "use_soft_shadows": getattr(eevee, 'use_soft_shadows', True), + "use_shadow_high_bitdepth": getattr(eevee, 'use_shadow_high_bitdepth', True), + "use_volumetric": getattr(eevee, 'use_volumetric', False), + "volumetric_tile_size": getattr(eevee, 'volumetric_tile_size', '8'), + "volumetric_samples": getattr(eevee, 'volumetric_samples', 64), + "volumetric_start": getattr(eevee, 'volumetric_start', 0.0), + "volumetric_end": getattr(eevee, 'volumetric_end', 100.0), + "use_volumetric_lights": getattr(eevee, 'use_volumetric_lights', True), + "use_volumetric_shadows": getattr(eevee, 'use_volumetric_shadows', True), + "use_gtao": getattr(eevee, 'use_gtao', False), + "gtao_quality": getattr(eevee, 'gtao_quality', 'MEDIUM'), + "use_overscan": getattr(eevee, 'use_overscan', False), + } +else: + engine_settings = { + "samples": getattr(scene, 'samples', 128) if hasattr(scene, 'samples') else 128 + } + +camera_count = len([obj for obj in scene.objects if obj.type == 'CAMERA']) +object_count = len(scene.objects) +material_count = len(bpy.data.materials) + +metadata = { + "frame_start": frame_start, + "frame_end": frame_end, + "render_settings": { + "resolution_x": resolution_x, + "resolution_y": resolution_y, + "output_format": output_format, + "engine": engine.lower(), + "engine_settings": engine_settings + }, + "scene_info": { + "camera_count": camera_count, + "object_count": object_count, + "material_count": material_count + }, + "missing_files_info": missing_files_info +} + +print(json.dumps(metadata)) +sys.stdout.flush() +` + + if err := os.WriteFile(scriptPath, []byte(scriptContent), 0644); err != nil { + return nil, fmt.Errorf("failed to create extraction script: %w", err) + } + + // Execute Blender + cmd := exec.Command("blender", "-b", blendFile, "--python", scriptPath) + cmd.Dir = workDir + + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stdout pipe: %w", err) + } + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stderr pipe: %w", err) + } + + var stdoutBuffer bytes.Buffer + + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to start blender: %w", err) + } + + stdoutDone := make(chan bool) + go func() { + defer close(stdoutDone) + scanner := bufio.NewScanner(stdoutPipe) + for scanner.Scan() { + line := scanner.Text() + stdoutBuffer.WriteString(line) + stdoutBuffer.WriteString("\n") + } + }() + + stderrDone := make(chan bool) + go func() { + defer close(stderrDone) + scanner := bufio.NewScanner(stderrPipe) + for scanner.Scan() { + _ = scanner.Text() + } + }() + + err = cmd.Wait() + <-stdoutDone + <-stderrDone + + if err != nil { + return nil, fmt.Errorf("blender metadata extraction failed: %w", err) + } + + metadataJSON := strings.TrimSpace(stdoutBuffer.String()) + jsonStart := strings.Index(metadataJSON, "{") + jsonEnd := strings.LastIndex(metadataJSON, "}") + if jsonStart == -1 || jsonEnd == -1 || jsonEnd <= jsonStart { + return nil, errors.New("failed to extract JSON from Blender output") + } + metadataJSON = metadataJSON[jsonStart : jsonEnd+1] + + var metadata types.BlendMetadata + if err := json.Unmarshal([]byte(metadataJSON), &metadata); err != nil { + return nil, fmt.Errorf("failed to parse metadata JSON: %w", err) + } + + log.Printf("Metadata extracted: frame_start=%d, frame_end=%d", metadata.FrameStart, metadata.FrameEnd) + return &metadata, nil +} + +// createContextFromDir creates a context archive from a source directory to a specific destination path +func (s *Server) createContextFromDir(sourceDir, destPath string, excludeFiles ...string) (string, error) { + // Build set of files to exclude + excludeSet := make(map[string]bool) + for _, excludeFile := range excludeFiles { + excludePath := filepath.Clean(excludeFile) + excludeSet[excludePath] = true + excludeSet[filepath.ToSlash(excludePath)] = true + } + + // Collect all files from source directory + var filesToInclude []string + err := filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + + // Skip Blender save files + lower := strings.ToLower(info.Name()) + idx := strings.LastIndex(lower, ".blend") + if idx != -1 { + suffix := lower[idx+len(".blend"):] + if len(suffix) > 0 { + isSaveFile := true + for _, r := range suffix { + if r < '0' || r > '9' { + isSaveFile = false + break + } + } + if isSaveFile { + return nil + } + } + } + + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + cleanRelPath := filepath.Clean(relPath) + if strings.HasPrefix(cleanRelPath, "..") { + return fmt.Errorf("invalid file path: %s", relPath) + } + + if excludeSet[cleanRelPath] || excludeSet[filepath.ToSlash(cleanRelPath)] { + return nil + } + + filesToInclude = append(filesToInclude, path) + return nil + }) + if err != nil { + return "", fmt.Errorf("failed to walk source directory: %w", err) + } + + if len(filesToInclude) == 0 { + return "", fmt.Errorf("no files found to include in context archive") + } + + // Collect relative paths to find common prefix + relPaths := make([]string, 0, len(filesToInclude)) + for _, filePath := range filesToInclude { + relPath, err := filepath.Rel(sourceDir, filePath) + if err != nil { + return "", fmt.Errorf("failed to get relative path: %w", err) + } + relPaths = append(relPaths, relPath) + } + + // Find and strip common leading directory + commonPrefix := "" + if len(relPaths) > 0 { + firstComponents := make([]string, 0, len(relPaths)) + for _, path := range relPaths { + parts := strings.Split(filepath.ToSlash(path), "/") + if len(parts) > 0 && parts[0] != "" { + firstComponents = append(firstComponents, parts[0]) + } else { + firstComponents = nil + break + } + } + if len(firstComponents) > 0 { + commonFirst := firstComponents[0] + allSame := true + for _, comp := range firstComponents { + if comp != commonFirst { + allSame = false + break + } + } + if allSame { + commonPrefix = commonFirst + "/" + } + } + } + + // Validate single .blend file at root + blendFilesAtRoot := 0 + for _, relPath := range relPaths { + tarPath := filepath.ToSlash(relPath) + if commonPrefix != "" && strings.HasPrefix(tarPath, commonPrefix) { + tarPath = strings.TrimPrefix(tarPath, commonPrefix) + } + if strings.HasSuffix(strings.ToLower(tarPath), ".blend") && !strings.Contains(tarPath, "/") { + blendFilesAtRoot++ + } + } + + if blendFilesAtRoot == 0 { + return "", fmt.Errorf("no .blend file found at root level in context archive") + } + if blendFilesAtRoot > 1 { + return "", fmt.Errorf("multiple .blend files found at root level in context archive (found %d, expected 1)", blendFilesAtRoot) + } + + // Create the tar.gz file + contextFile, err := os.Create(destPath) + if err != nil { + return "", fmt.Errorf("failed to create context file: %w", err) + } + defer contextFile.Close() + + gzWriter := gzip.NewWriter(contextFile) + defer gzWriter.Close() + + tarWriter := tar.NewWriter(gzWriter) + defer tarWriter.Close() + + // Add each file to the tar archive + for i, filePath := range filesToInclude { + file, err := os.Open(filePath) + if err != nil { + return "", fmt.Errorf("failed to open file: %w", err) + } + + info, err := file.Stat() + if err != nil { + file.Close() + return "", fmt.Errorf("failed to stat file: %w", err) + } + + relPath := relPaths[i] + tarPath := filepath.ToSlash(relPath) + if commonPrefix != "" && strings.HasPrefix(tarPath, commonPrefix) { + tarPath = strings.TrimPrefix(tarPath, commonPrefix) + } + + header, err := tar.FileInfoHeader(info, "") + if err != nil { + file.Close() + return "", fmt.Errorf("failed to create tar header: %w", err) + } + header.Name = tarPath + + if err := tarWriter.WriteHeader(header); err != nil { + file.Close() + return "", fmt.Errorf("failed to write tar header: %w", err) + } + + if _, err := io.Copy(tarWriter, file); err != nil { + file.Close() + return "", fmt.Errorf("failed to write file to tar: %w", err) + } + + file.Close() + } + + if err := tarWriter.Close(); err != nil { + return "", fmt.Errorf("failed to close tar writer: %w", err) + } + if err := gzWriter.Close(); err != nil { + return "", fmt.Errorf("failed to close gzip writer: %w", err) + } + if err := contextFile.Close(); err != nil { + return "", fmt.Errorf("failed to close context file: %w", err) + } + + return destPath, nil +} + // handleListJobFiles lists files for a job func (s *Server) handleListJobFiles(w http.ResponseWriter, r *http.Request) { userID, err := getUserID(r) @@ -1157,6 +1742,93 @@ func (s *Server) handleListJobFiles(w http.ResponseWriter, r *http.Request) { s.respondJSON(w, http.StatusOK, files) } +// handleListContextArchive lists files inside the context archive +func (s *Server) handleListContextArchive(w http.ResponseWriter, r *http.Request) { + userID, err := getUserID(r) + if err != nil { + s.respondError(w, http.StatusUnauthorized, err.Error()) + return + } + + jobID, err := parseID(r, "id") + if err != nil { + s.respondError(w, http.StatusBadRequest, err.Error()) + return + } + + // Verify job belongs to user (unless admin) + isAdmin := isAdminUser(r) + if !isAdmin { + var jobUserID int64 + err = s.db.QueryRow("SELECT user_id FROM jobs WHERE id = ?", jobID).Scan(&jobUserID) + if err == sql.ErrNoRows { + s.respondError(w, http.StatusNotFound, "Job not found") + return + } + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to verify job: %v", err)) + return + } + if jobUserID != userID { + s.respondError(w, http.StatusForbidden, "Access denied") + return + } + } + + // Get context archive path + contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar.gz") + if !s.storage.FileExists(contextPath) { + s.respondError(w, http.StatusNotFound, "Context archive not found") + return + } + + // Read the tar.gz and list its contents + file, err := s.storage.GetFile(contextPath) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to open context archive: %v", err)) + return + } + defer file.Close() + + gzReader, err := gzip.NewReader(file) + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to read context archive: %v", err)) + return + } + defer gzReader.Close() + + tarReader := tar.NewReader(gzReader) + + type ArchiveFile struct { + Name string `json:"name"` + Size int64 `json:"size"` + Path string `json:"path"` + } + + var archiveFiles []ArchiveFile + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to read archive: %v", err)) + return + } + + // Only include regular files (not directories) + if header.Typeflag == tar.TypeReg { + archiveFiles = append(archiveFiles, ArchiveFile{ + Name: filepath.Base(header.Name), + Size: header.Size, + Path: header.Name, + }) + } + } + + s.respondJSON(w, http.StatusOK, archiveFiles) +} + // handleDownloadJobFile downloads a job file func (s *Server) handleDownloadJobFile(w http.ResponseWriter, r *http.Request) { userID, err := getUserID(r) diff --git a/internal/api/metadata.go b/internal/api/metadata.go index ce910c1..ab3d0ba 100644 --- a/internal/api/metadata.go +++ b/internal/api/metadata.go @@ -1,11 +1,21 @@ package api import ( + "archive/tar" + "bufio" + "bytes" + "compress/gzip" "database/sql" "encoding/json" + "errors" "fmt" + "io" "log" "net/http" + "os" + "os/exec" + "path/filepath" + "strings" "jiggablend/pkg/types" ) @@ -156,3 +166,375 @@ func (s *Server) handleGetJobMetadata(w http.ResponseWriter, r *http.Request) { s.respondJSON(w, http.StatusOK, metadata) } +// extractMetadataFromContext extracts metadata from the blend file in a context archive +// Returns the extracted metadata or an error +func (s *Server) extractMetadataFromContext(jobID int64) (*types.BlendMetadata, error) { + contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar.gz") + + // Check if context exists + if _, err := os.Stat(contextPath); err != nil { + return nil, fmt.Errorf("context archive not found: %w", err) + } + + // Create temporary directory for extraction + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("fuego-metadata-%d-*", jobID)) + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(tmpDir) + + // Extract context archive + if err := s.extractTarGz(contextPath, tmpDir); err != nil { + return nil, fmt.Errorf("failed to extract context: %w", err) + } + + // Find .blend file in extracted contents + blendFile := "" + err = filepath.Walk(tmpDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() && strings.HasSuffix(strings.ToLower(info.Name()), ".blend") { + // Check it's not a Blender save file (.blend1, .blend2, etc.) + lower := strings.ToLower(info.Name()) + idx := strings.LastIndex(lower, ".blend") + if idx != -1 { + suffix := lower[idx+len(".blend"):] + // If there are digits after .blend, it's a save file + isSaveFile := false + if len(suffix) > 0 { + isSaveFile = true + for _, r := range suffix { + if r < '0' || r > '9' { + isSaveFile = false + break + } + } + } + if !isSaveFile { + blendFile = path + return filepath.SkipAll // Stop walking once we find a blend file + } + } + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to find blend file: %w", err) + } + + if blendFile == "" { + return nil, fmt.Errorf("no .blend file found in context") + } + + // Create Python script to extract metadata + scriptPath := filepath.Join(tmpDir, "extract_metadata.py") + scriptContent := `import bpy +import json +import sys + +# Make all file paths relative to the blend file location FIRST +# This must be done immediately after file load, before any other operations +# to prevent Blender from trying to access external files with absolute paths +try: + bpy.ops.file.make_paths_relative() + print("Made all file paths relative to blend file") +except Exception as e: + print(f"Warning: Could not make paths relative: {e}") + +# Check for missing addons that the blend file requires +# Blender marks missing addons with "_missing" suffix in preferences +missing_files_info = { + "checked": False, + "has_missing": False, + "missing_files": [], + "missing_addons": [] +} + +try: + missing = [] + for mod in bpy.context.preferences.addons: + if mod.module.endswith("_missing"): + missing.append(mod.module.rsplit("_", 1)[0]) + + missing_files_info["checked"] = True + if missing: + missing_files_info["has_missing"] = True + missing_files_info["missing_addons"] = missing + print("Missing add-ons required by this .blend:") + for name in missing: + print(" -", name) + else: + print("No missing add-ons detected – file is headless-safe") +except Exception as e: + print(f"Warning: Could not check for missing addons: {e}") + missing_files_info["error"] = str(e) + +# Get scene +scene = bpy.context.scene + +# Extract frame range from scene settings +frame_start = scene.frame_start +frame_end = scene.frame_end + +# Also check for actual animation range (keyframes) +# Find the earliest and latest keyframes across all objects +animation_start = None +animation_end = None + +for obj in scene.objects: + if obj.animation_data and obj.animation_data.action: + action = obj.animation_data.action + if action.fcurves: + for fcurve in action.fcurves: + if fcurve.keyframe_points: + for keyframe in fcurve.keyframe_points: + frame = int(keyframe.co[0]) + if animation_start is None or frame < animation_start: + animation_start = frame + if animation_end is None or frame > animation_end: + animation_end = frame + +# Use animation range if available, otherwise use scene frame range +# If scene range seems wrong (start == end), prefer animation range +if animation_start is not None and animation_end is not None: + if frame_start == frame_end or (animation_start < frame_start or animation_end > frame_end): + # Use animation range if scene range is invalid or animation extends beyond it + frame_start = animation_start + frame_end = animation_end + +# Extract render settings +render = scene.render +resolution_x = render.resolution_x +resolution_y = render.resolution_y +engine = scene.render.engine.upper() + +# Determine output format from file format +output_format = render.image_settings.file_format + +# Extract engine-specific settings +engine_settings = {} + +if engine == 'CYCLES': + cycles = scene.cycles + engine_settings = { + "samples": getattr(cycles, 'samples', 128), + "use_denoising": getattr(cycles, 'use_denoising', False), + "denoising_radius": getattr(cycles, 'denoising_radius', 0), + "denoising_strength": getattr(cycles, 'denoising_strength', 0.0), + "device": getattr(cycles, 'device', 'CPU'), + "use_adaptive_sampling": getattr(cycles, 'use_adaptive_sampling', False), + "adaptive_threshold": getattr(cycles, 'adaptive_threshold', 0.01) if getattr(cycles, 'use_adaptive_sampling', False) else 0.01, + "use_fast_gi": getattr(cycles, 'use_fast_gi', False), + "light_tree": getattr(cycles, 'use_light_tree', False), + "use_light_linking": getattr(cycles, 'use_light_linking', False), + "caustics_reflective": getattr(cycles, 'caustics_reflective', False), + "caustics_refractive": getattr(cycles, 'caustics_refractive', False), + "blur_glossy": getattr(cycles, 'blur_glossy', 0.0), + "max_bounces": getattr(cycles, 'max_bounces', 12), + "diffuse_bounces": getattr(cycles, 'diffuse_bounces', 4), + "glossy_bounces": getattr(cycles, 'glossy_bounces', 4), + "transmission_bounces": getattr(cycles, 'transmission_bounces', 12), + "volume_bounces": getattr(cycles, 'volume_bounces', 0), + "transparent_max_bounces": getattr(cycles, 'transparent_max_bounces', 8), + "film_transparent": getattr(cycles, 'film_transparent', False), + "use_layer_samples": getattr(cycles, 'use_layer_samples', False), + } +elif engine == 'EEVEE' or engine == 'EEVEE_NEXT': + eevee = scene.eevee + engine_settings = { + "taa_render_samples": getattr(eevee, 'taa_render_samples', 64), + "use_bloom": getattr(eevee, 'use_bloom', False), + "bloom_threshold": getattr(eevee, 'bloom_threshold', 0.8), + "bloom_intensity": getattr(eevee, 'bloom_intensity', 0.05), + "bloom_radius": getattr(eevee, 'bloom_radius', 6.5), + "use_ssr": getattr(eevee, 'use_ssr', True), + "use_ssr_refraction": getattr(eevee, 'use_ssr_refraction', False), + "ssr_quality": getattr(eevee, 'ssr_quality', 'MEDIUM'), + "use_ssao": getattr(eevee, 'use_ssao', True), + "ssao_quality": getattr(eevee, 'ssao_quality', 'MEDIUM'), + "ssao_distance": getattr(eevee, 'ssao_distance', 0.2), + "ssao_factor": getattr(eevee, 'ssao_factor', 1.0), + "use_soft_shadows": getattr(eevee, 'use_soft_shadows', True), + "use_shadow_high_bitdepth": getattr(eevee, 'use_shadow_high_bitdepth', True), + "use_volumetric": getattr(eevee, 'use_volumetric', False), + "volumetric_tile_size": getattr(eevee, 'volumetric_tile_size', '8'), + "volumetric_samples": getattr(eevee, 'volumetric_samples', 64), + "volumetric_start": getattr(eevee, 'volumetric_start', 0.0), + "volumetric_end": getattr(eevee, 'volumetric_end', 100.0), + "use_volumetric_lights": getattr(eevee, 'use_volumetric_lights', True), + "use_volumetric_shadows": getattr(eevee, 'use_volumetric_shadows', True), + "use_gtao": getattr(eevee, 'use_gtao', False), + "gtao_quality": getattr(eevee, 'gtao_quality', 'MEDIUM'), + "use_overscan": getattr(eevee, 'use_overscan', False), + } +else: + # For other engines, extract basic samples if available + engine_settings = { + "samples": getattr(scene, 'samples', 128) if hasattr(scene, 'samples') else 128 + } + +# Extract scene info +camera_count = len([obj for obj in scene.objects if obj.type == 'CAMERA']) +object_count = len(scene.objects) +material_count = len(bpy.data.materials) + +# Build metadata dictionary +metadata = { + "frame_start": frame_start, + "frame_end": frame_end, + "render_settings": { + "resolution_x": resolution_x, + "resolution_y": resolution_y, + "output_format": output_format, + "engine": engine.lower(), + "engine_settings": engine_settings + }, + "scene_info": { + "camera_count": camera_count, + "object_count": object_count, + "material_count": material_count + }, + "missing_files_info": missing_files_info +} + +# Output as JSON +print(json.dumps(metadata)) +sys.stdout.flush() +` + + if err := os.WriteFile(scriptPath, []byte(scriptContent), 0644); err != nil { + return nil, fmt.Errorf("failed to create extraction script: %w", err) + } + + // Execute Blender with Python script + cmd := exec.Command("blender", "-b", blendFile, "--python", scriptPath) + cmd.Dir = tmpDir + + // Capture stdout and stderr + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stdout pipe: %w", err) + } + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stderr pipe: %w", err) + } + + // Buffer to collect stdout for JSON parsing + var stdoutBuffer bytes.Buffer + + // Start the command + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to start blender: %w", err) + } + + // Stream stdout and collect for JSON parsing + stdoutDone := make(chan bool) + go func() { + defer close(stdoutDone) + scanner := bufio.NewScanner(stdoutPipe) + for scanner.Scan() { + line := scanner.Text() + stdoutBuffer.WriteString(line) + stdoutBuffer.WriteString("\n") + } + }() + + // Stream stderr (discard for now, but could log if needed) + stderrDone := make(chan bool) + go func() { + defer close(stderrDone) + scanner := bufio.NewScanner(stderrPipe) + for scanner.Scan() { + // Could log stderr if needed + _ = scanner.Text() + } + }() + + // Wait for command to complete + err = cmd.Wait() + + // Wait for streaming goroutines to finish + <-stdoutDone + <-stderrDone + + if err != nil { + return nil, fmt.Errorf("blender metadata extraction failed: %w", err) + } + + // Parse output (metadata is printed to stdout) + metadataJSON := strings.TrimSpace(stdoutBuffer.String()) + // Extract JSON from output (Blender may print other stuff) + jsonStart := strings.Index(metadataJSON, "{") + jsonEnd := strings.LastIndex(metadataJSON, "}") + if jsonStart == -1 || jsonEnd == -1 || jsonEnd <= jsonStart { + return nil, errors.New("failed to extract JSON from Blender output") + } + metadataJSON = metadataJSON[jsonStart : jsonEnd+1] + + var metadata types.BlendMetadata + if err := json.Unmarshal([]byte(metadataJSON), &metadata); err != nil { + return nil, fmt.Errorf("failed to parse metadata JSON: %w", err) + } + + log.Printf("Metadata extracted for job %d: frame_start=%d, frame_end=%d", jobID, metadata.FrameStart, metadata.FrameEnd) + return &metadata, nil +} + +// extractTarGz extracts a tar.gz archive to a destination directory +func (s *Server) extractTarGz(tarGzPath, destDir string) error { + file, err := os.Open(tarGzPath) + if err != nil { + return fmt.Errorf("failed to open archive: %w", err) + } + defer file.Close() + + gzr, err := gzip.NewReader(file) + if err != nil { + return fmt.Errorf("failed to create gzip reader: %w", err) + } + defer gzr.Close() + + tr := tar.NewReader(gzr) + + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("failed to read tar header: %w", err) + } + + // Sanitize path to prevent directory traversal + target := filepath.Join(destDir, header.Name) + // Ensure target is within destDir + if !strings.HasPrefix(filepath.Clean(target), filepath.Clean(destDir)+string(os.PathSeparator)) { + return fmt.Errorf("invalid file path in archive: %s", header.Name) + } + + // Create parent directories + if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + + // Write file + if header.Typeflag == tar.TypeReg { + outFile, err := os.Create(target) + if err != nil { + return fmt.Errorf("failed to create file: %w", err) + } + if _, err := io.Copy(outFile, tr); err != nil { + outFile.Close() + return fmt.Errorf("failed to write file: %w", err) + } + outFile.Close() + } + } + + return nil +} + diff --git a/internal/api/runners.go b/internal/api/runners.go index 68caab3..e5015c1 100644 --- a/internal/api/runners.go +++ b/internal/api/runners.go @@ -17,7 +17,6 @@ import ( "jiggablend/pkg/types" - "github.com/go-chi/chi/v5" "github.com/gorilla/websocket" ) @@ -294,51 +293,37 @@ func (s *Server) handleUpdateTaskStep(w http.ResponseWriter, r *http.Request) { }) } -// handleDownloadFileForRunner allows runners to download job files -func (s *Server) handleDownloadFileForRunner(w http.ResponseWriter, r *http.Request) { +// handleDownloadJobContext allows runners to download the job context tar.gz +func (s *Server) handleDownloadJobContext(w http.ResponseWriter, r *http.Request) { jobID, err := parseID(r, "jobId") if err != nil { s.respondError(w, http.StatusBadRequest, err.Error()) return } - // Get the file path from the wildcard parameter (supports subdirectories) - filePathParam := chi.URLParam(r, "*") - if filePathParam == "" { - s.respondError(w, http.StatusBadRequest, "File path not specified") - return - } - // Remove leading slash if present - filePathParam = strings.TrimPrefix(filePathParam, "/") + // Construct the context file path + contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar.gz") - // Find the file in the database by matching file_name (which stores relative path) - var filePath string - var storedFileName string - err = s.db.QueryRow( - `SELECT file_path, file_name FROM job_files WHERE job_id = ? AND file_name = ?`, - jobID, filePathParam, - ).Scan(&filePath, &storedFileName) - if err == sql.ErrNoRows { - s.respondError(w, http.StatusNotFound, "File not found") - return - } - if err != nil { - s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query file: %v", err)) + // Check if context file exists + if !s.storage.FileExists(contextPath) { + log.Printf("Context archive not found for job %d", jobID) + s.respondError(w, http.StatusNotFound, "Context archive not found. The file may not have been uploaded successfully.") return } // Open and serve file - file, err := s.storage.GetFile(filePath) + file, err := s.storage.GetFile(contextPath) if err != nil { - s.respondError(w, http.StatusNotFound, "File not found on disk") + s.respondError(w, http.StatusNotFound, "Context file not found on disk") return } defer file.Close() - // Use the stored file name for the download (preserves original filename) - downloadFileName := filepath.Base(storedFileName) - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", downloadFileName)) + // Set appropriate headers for tar.gz file + w.Header().Set("Content-Type", "application/gzip") + w.Header().Set("Content-Disposition", "attachment; filename=context.tar.gz") + + // Stream the file to the response io.Copy(w, file) } @@ -488,6 +473,43 @@ func (s *Server) handleGetJobFilesForRunner(w http.ResponseWriter, r *http.Reque s.respondJSON(w, http.StatusOK, files) } +// handleGetJobMetadataForRunner allows runners to get job metadata +func (s *Server) handleGetJobMetadataForRunner(w http.ResponseWriter, r *http.Request) { + jobID, err := parseID(r, "jobId") + if err != nil { + s.respondError(w, http.StatusBadRequest, err.Error()) + return + } + + var blendMetadataJSON sql.NullString + err = s.db.QueryRow( + `SELECT blend_metadata FROM jobs WHERE id = ?`, + jobID, + ).Scan(&blendMetadataJSON) + + if err == sql.ErrNoRows { + s.respondError(w, http.StatusNotFound, "Job not found") + return + } + if err != nil { + s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query job: %v", err)) + return + } + + if !blendMetadataJSON.Valid || blendMetadataJSON.String == "" { + s.respondJSON(w, http.StatusOK, nil) + return + } + + var metadata types.BlendMetadata + if err := json.Unmarshal([]byte(blendMetadataJSON.String), &metadata); err != nil { + s.respondError(w, http.StatusInternalServerError, "Failed to parse metadata") + return + } + + s.respondJSON(w, http.StatusOK, metadata) +} + // WebSocket message types type WSMessage struct { Type string `json:"type"` @@ -1020,7 +1042,7 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) { log.Printf("Updated job %d status to %s (progress: %.1f%%, completed tasks: %d/%d)", jobID, jobStatus, progress, completedTasks, totalTasks) } - if outputFormatStr == "MP4" { + if outputFormatStr == "EXR_264_MP4" || outputFormatStr == "EXR_AV1_MP4" { // Check if a video generation task already exists for this job (any status) var existingVideoTask int s.db.QueryRow( @@ -1603,6 +1625,9 @@ func (s *Server) assignTaskToRunner(runnerID int64, taskID int64) error { task.JobName = jobName if outputFormat.Valid { task.OutputFormat = outputFormat.String + log.Printf("Task %d assigned with output_format: '%s' (from job %d)", taskID, outputFormat.String, task.JobID) + } else { + log.Printf("Task %d assigned with no output_format (job %d)", taskID, task.JobID) } task.TaskType = taskType diff --git a/internal/api/server.go b/internal/api/server.go index ff27c75..ff0dafe 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -116,12 +116,14 @@ func (s *Server) setupRoutes() { return http.HandlerFunc(s.auth.Middleware(next.ServeHTTP)) }) r.Post("/", s.handleCreateJob) + r.Post("/upload", s.handleUploadFileForJobCreation) // Upload before job creation r.Get("/", s.handleListJobs) r.Get("/{id}", s.handleGetJob) r.Delete("/{id}", s.handleCancelJob) r.Post("/{id}/delete", s.handleDeleteJob) r.Post("/{id}/upload", s.handleUploadJobFile) r.Get("/{id}/files", s.handleListJobFiles) + r.Get("/{id}/context", s.handleListContextArchive) r.Get("/{id}/files/{fileId}/download", s.handleDownloadJobFile) r.Get("/{id}/video", s.handleStreamVideo) r.Get("/{id}/metadata", s.handleGetJobMetadata) @@ -179,10 +181,11 @@ func (s *Server) setupRoutes() { }) r.Post("/tasks/{id}/progress", s.handleUpdateTaskProgress) r.Post("/tasks/{id}/steps", s.handleUpdateTaskStep) - r.Get("/files/{jobId}/*", s.handleDownloadFileForRunner) + r.Get("/jobs/{jobId}/context.tar.gz", s.handleDownloadJobContext) r.Post("/files/{jobId}/upload", s.handleUploadFileFromRunner) r.Get("/jobs/{jobId}/status", s.handleGetJobStatusForRunner) r.Get("/jobs/{jobId}/files", s.handleGetJobFilesForRunner) + r.Get("/jobs/{jobId}/metadata", s.handleGetJobMetadataForRunner) r.Post("/jobs/{jobId}/metadata", s.handleSubmitMetadata) }) }) @@ -508,7 +511,7 @@ func parseID(r *http.Request, param string) (int64, error) { // StartBackgroundTasks starts background goroutines for error recovery func (s *Server) StartBackgroundTasks() { go s.recoverStuckTasks() - go s.cleanupOldMetadataJobs() + go s.cleanupOldRenderJobs() } // recoverStuckTasks periodically checks for dead runners and stuck tasks diff --git a/internal/runner/client.go b/internal/runner/client.go index f2a8d91..bfc3fee 100644 --- a/internal/runner/client.go +++ b/internal/runner/client.go @@ -1,8 +1,10 @@ package runner import ( + "archive/tar" "bufio" "bytes" + "compress/gzip" "encoding/json" "errors" "fmt" @@ -951,7 +953,7 @@ func (c *Client) processTask(task map[string]interface{}, jobName string, output frameEnd := int(task["frame_end"].(float64)) c.sendLog(taskID, types.LogLevelInfo, fmt.Sprintf("Starting task: job %d, frames %d-%d, format: %s", jobID, frameStart, frameEnd, outputFormat), "") - log.Printf("Processing task %d: job %d, frames %d-%d, format: %s", taskID, jobID, frameStart, frameEnd, outputFormat) + log.Printf("Processing task %d: job %d, frames %d-%d, format: %s (from task assignment)", taskID, jobID, frameStart, frameEnd, outputFormat) // Create temporary job workspace within runner workspace workDir := filepath.Join(c.getWorkspaceDir(), fmt.Sprintf("job-%d-task-%d", jobID, taskID)) @@ -962,49 +964,80 @@ func (c *Client) processTask(task map[string]interface{}, jobName string, output // Step: download c.sendStepUpdate(taskID, "download", types.StepStatusRunning, "") - c.sendLog(taskID, types.LogLevelInfo, "Downloading input files...", "download") + c.sendLog(taskID, types.LogLevelInfo, "Downloading job context...", "download") + + // Clean up expired cache entries periodically + c.cleanupExpiredContextCache() + + // Download context tar.gz + contextPath := filepath.Join(workDir, "context.tar.gz") + if err := c.downloadJobContext(jobID, contextPath); err != nil { + c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) + return fmt.Errorf("failed to download context: %w", err) + } + + // Extract context tar.gz + c.sendLog(taskID, types.LogLevelInfo, "Extracting context...", "download") + if err := c.extractTarGz(contextPath, workDir); err != nil { + c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) + return fmt.Errorf("failed to extract context: %w", err) + } + + // Find .blend file in extracted contents blendFile := "" - for _, filePath := range inputFiles { - filePathStr := filePath.(string) - // Preserve directory structure when downloading (for ZIP-extracted files) - // Extract relative path from storage path (format: storage/jobs/{jobID}/...) - relPath := filePathStr - if strings.Contains(filePathStr, "/jobs/") { - parts := strings.Split(filePathStr, "/jobs/") - if len(parts) > 1 { - // Get path after /jobs/{jobID}/ - jobPathParts := strings.SplitN(parts[1], "/", 2) - if len(jobPathParts) > 1 { - relPath = jobPathParts[1] - } else { - relPath = jobPathParts[0] + err := filepath.Walk(workDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() && strings.HasSuffix(strings.ToLower(info.Name()), ".blend") { + // Check it's not a Blender save file (.blend1, .blend2, etc.) + lower := strings.ToLower(info.Name()) + idx := strings.LastIndex(lower, ".blend") + if idx != -1 { + suffix := lower[idx+len(".blend"):] + // If there are digits after .blend, it's a save file + isSaveFile := false + if len(suffix) > 0 { + isSaveFile = true + for _, r := range suffix { + if r < '0' || r > '9' { + isSaveFile = false + break + } + } + } + if !isSaveFile { + blendFile = path + return filepath.SkipAll // Stop walking once we find a blend file } } } + return nil + }) - destPath := filepath.Join(workDir, relPath) - destDir := filepath.Dir(destPath) - if err := os.MkdirAll(destDir, 0755); err != nil { - c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) - return fmt.Errorf("failed to create directory for file %s: %w", filePathStr, err) - } - - if err := c.downloadFileToPath(filePathStr, destPath); err != nil { - c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) - return fmt.Errorf("failed to download file %s: %w", filePathStr, err) - } - if filepath.Ext(filePathStr) == ".blend" { - blendFile = destPath - } + if err != nil { + c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) + return fmt.Errorf("failed to find blend file: %w", err) } if blendFile == "" { - err := fmt.Errorf("no .blend file found in input files") + err := fmt.Errorf("no .blend file found in context") c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) return err } + c.sendStepUpdate(taskID, "download", types.StepStatusCompleted, "") - c.sendLog(taskID, types.LogLevelInfo, "Input files downloaded successfully", "download") + c.sendLog(taskID, types.LogLevelInfo, "Context downloaded and extracted successfully", "download") + + // Fetch job metadata to get render settings + var jobMetadata *types.BlendMetadata + metadata, err := c.getJobMetadata(jobID) + if err == nil && metadata != nil { + jobMetadata = metadata + c.sendLog(taskID, types.LogLevelInfo, "Loaded render settings from job metadata", "render_blender") + } else { + c.sendLog(taskID, types.LogLevelInfo, "No render settings found in job metadata, using blend file defaults", "render_blender") + } // Render frames outputDir := filepath.Join(workDir, "output") @@ -1012,10 +1045,10 @@ func (c *Client) processTask(task map[string]interface{}, jobName string, output return fmt.Errorf("failed to create output directory: %w", err) } - // For MP4, render as PNG first, then combine into video + // For EXR_264_MP4 and EXR_AV1_MP4, render as EXR (OpenEXR) first for highest fidelity, then combine into video renderFormat := outputFormat - if outputFormat == "MP4" { - renderFormat = "PNG" + if outputFormat == "EXR_264_MP4" || outputFormat == "EXR_AV1_MP4" { + renderFormat = "EXR" // Use EXR for maximum quality (32-bit float, HDR) } // Blender uses # characters for frame number placeholders (not %04d) @@ -1039,23 +1072,203 @@ func (c *Client) processTask(task map[string]interface{}, jobName string, output return errors.New(errMsg) } - // Respect blend file settings but prefer GPU if available, fallback to CPU - // This preserves the blend file's render settings (engine, samples, etc.) but optimizes device selection - scriptContent := ` -import bpy + // Override output format and render settings from job submission + // For MP4, we render as EXR (handled above) for highest fidelity, so renderFormat is already EXR + // This script will override the blend file's settings based on job metadata + formatFilePath := filepath.Join(workDir, "output_format.txt") + renderSettingsFilePath := filepath.Join(workDir, "render_settings.json") + scriptContent := fmt.Sprintf(`import bpy import sys +import os +import json + +# Make all file paths relative to the blend file location FIRST +# This must be done immediately after file load, before any other operations +# to prevent Blender from trying to access external files with absolute paths +try: + bpy.ops.file.make_paths_relative() + print("Made all file paths relative to blend file") +except Exception as e: + print(f"Warning: Could not make paths relative: {e}") + +# Check for missing addons that the blend file requires +# Blender marks missing addons with "_missing" suffix in preferences +missing = [] +try: + for mod in bpy.context.preferences.addons: + if mod.module.endswith("_missing"): + missing.append(mod.module.rsplit("_", 1)[0]) + + if missing: + print("Missing add-ons required by this .blend:") + for name in missing: + print(" -", name) + else: + print("No missing add-ons detected – file is headless-safe") +except Exception as e: + print(f"Warning: Could not check for missing addons: {e}") + +# Fix objects and collections hidden from render +vl = bpy.context.view_layer + +# 1. Objects hidden in view layer +print("Checking for objects hidden from render that need to be enabled...") +try: + for obj in bpy.data.objects: + if obj.hide_get(view_layer=vl): + if any(k in obj.name.lower() for k in ["scrotum|","cage","genital","penis","dick","collision","body.001","couch"]): + obj.hide_set(False, view_layer=vl) + print("Enabled object:", obj.name) +except Exception as e: + print(f"Warning: Could not check/fix hidden render objects: {e}") + +# 2. Collections disabled in renders OR set to Holdout (the final killer) +print("Checking for collections hidden from render that need to be enabled...") +try: + for col in bpy.data.collections: + if col.hide_render or (vl.layer_collection.children.get(col.name) and not vl.layer_collection.children[col.name].exclude == False): + if any(k in col.name.lower() for k in ["genital","nsfw","dick","private","hidden","cage","scrotum","collision","dick"]): + col.hide_render = False + if col.name in vl.layer_collection.children: + vl.layer_collection.children[col.name].exclude = False + vl.layer_collection.children[col.name].holdout = False + vl.layer_collection.children[col.name].indirect_only = False + print("Enabled collection:", col.name) +except Exception as e: + print(f"Warning: Could not check/fix hidden render collections: {e}") + +# Read output format from file (created by Go code) +format_file_path = %q +output_format_override = None +if os.path.exists(format_file_path): + try: + with open(format_file_path, 'r') as f: + output_format_override = f.read().strip().upper() + print(f"Read output format from file: '{output_format_override}'") + except Exception as e: + print(f"Warning: Could not read output format file: {e}") +else: + print(f"Warning: Output format file does not exist: {format_file_path}") + +# Read render settings from JSON file (created by Go code) +render_settings_file = %q +render_settings_override = None +if os.path.exists(render_settings_file): + try: + with open(render_settings_file, 'r') as f: + render_settings_override = json.load(f) + print(f"Loaded render settings from job metadata") + except Exception as e: + print(f"Warning: Could not read render settings file: {e}") +`, formatFilePath, renderSettingsFilePath) + ` # Get current scene settings (preserve blend file preferences) scene = bpy.context.scene current_engine = scene.render.engine current_device = scene.cycles.device if hasattr(scene, 'cycles') and scene.cycles else None +current_output_format = scene.render.image_settings.file_format print(f"Blend file render engine: {current_engine}") if current_device: print(f"Blend file device setting: {current_device}") +print(f"Blend file output format: {current_output_format}") + +# Override output format if specified +# The format file always takes precedence (it's written specifically for this job) +if output_format_override: + print(f"Overriding output format from '{current_output_format}' to '{output_format_override}'") + # Map common format names to Blender's format constants + # For video formats (EXR_264_MP4, EXR_AV1_MP4), we render as EXR frames first + format_to_use = output_format_override.upper() + if format_to_use in ['EXR_264_MP4', 'EXR_AV1_MP4']: + format_to_use = 'EXR' # Render as EXR for video formats + + format_map = { + 'PNG': 'PNG', + 'JPEG': 'JPEG', + 'JPG': 'JPEG', + 'EXR': 'OPEN_EXR', + 'OPEN_EXR': 'OPEN_EXR', + 'TARGA': 'TARGA', + 'TIFF': 'TIFF', + 'BMP': 'BMP', + } + blender_format = format_map.get(format_to_use, format_to_use) + try: + scene.render.image_settings.file_format = blender_format + print(f"Successfully set output format to: {blender_format}") + except Exception as e: + print(f"Warning: Could not set output format to {blender_format}: {e}") + print(f"Using blend file's format: {current_output_format}") +else: + print(f"Using blend file's output format: {current_output_format}") + +# Apply render settings from job metadata if provided +# Note: output_format is NOT applied from render_settings_override - it's already set from format file above +if render_settings_override: + engine_override = render_settings_override.get('engine', '').upper() + engine_settings = render_settings_override.get('engine_settings', {}) + + # Switch engine if specified + if engine_override and engine_override != current_engine.upper(): + print(f"Switching render engine from '{current_engine}' to '{engine_override}'") + try: + scene.render.engine = engine_override + current_engine = engine_override + print(f"Successfully switched to {engine_override} engine") + except Exception as e: + print(f"Warning: Could not switch engine to {engine_override}: {e}") + print(f"Using blend file's engine: {current_engine}") + + # Apply engine-specific settings + if engine_settings: + if current_engine.upper() == 'CYCLES': + cycles = scene.cycles + print("Applying Cycles render settings from job metadata...") + for key, value in engine_settings.items(): + try: + if hasattr(cycles, key): + setattr(cycles, key, value) + print(f" Set Cycles.{key} = {value}") + else: + print(f" Warning: Cycles has no attribute '{key}'") + except Exception as e: + print(f" Warning: Could not set Cycles.{key} = {value}: {e}") + elif current_engine.upper() in ['EEVEE', 'EEVEE_NEXT']: + eevee = scene.eevee + print("Applying EEVEE render settings from job metadata...") + for key, value in engine_settings.items(): + try: + if hasattr(eevee, key): + setattr(eevee, key, value) + print(f" Set EEVEE.{key} = {value}") + else: + print(f" Warning: EEVEE has no attribute '{key}'") + except Exception as e: + print(f" Warning: Could not set EEVEE.{key} = {value}: {e}") + + # Apply resolution if specified + if 'resolution_x' in render_settings_override: + try: + scene.render.resolution_x = render_settings_override['resolution_x'] + print(f"Set resolution_x = {render_settings_override['resolution_x']}") + except Exception as e: + print(f"Warning: Could not set resolution_x: {e}") + if 'resolution_y' in render_settings_override: + try: + scene.render.resolution_y = render_settings_override['resolution_y'] + print(f"Set resolution_y = {render_settings_override['resolution_y']}") + except Exception as e: + print(f"Warning: Could not set resolution_y: {e}") # Only override device selection if using Cycles (other engines handle GPU differently) if current_engine == 'CYCLES': + # Check if CPU rendering is forced + force_cpu = False + if render_settings_override and render_settings_override.get('force_cpu'): + force_cpu = render_settings_override.get('force_cpu', False) + print("Force CPU rendering is enabled - skipping GPU detection") + # Ensure Cycles addon is enabled try: if 'cycles' not in bpy.context.preferences.addons: @@ -1064,194 +1277,200 @@ if current_engine == 'CYCLES': except Exception as e: print(f"Warning: Could not enable Cycles addon: {e}") - # Access Cycles preferences - prefs = bpy.context.preferences - try: - cycles_prefs = prefs.addons['cycles'].preferences - except (KeyError, AttributeError): - try: - cycles_addon = prefs.addons.get('cycles') - if cycles_addon: - cycles_prefs = cycles_addon.preferences - else: - raise Exception("Cycles addon not found") - except Exception as e: - print(f"ERROR: Could not access Cycles preferences: {e}") - import traceback - traceback.print_exc() - sys.exit(1) - - # Check all devices and choose the best GPU type - # Device type preference order (most performant first) - device_type_preference = ['OPTIX', 'CUDA', 'HIP', 'ONEAPI', 'METAL'] - gpu_available = False - best_device_type = None - best_gpu_devices = [] - devices_by_type = {} # {device_type: [devices]} - seen_device_ids = set() # Track device IDs to avoid duplicates - - print("Checking for GPU availability...") - - # Try to get all devices - try each device type to see what's available - for device_type in device_type_preference: - try: - cycles_prefs.compute_device_type = device_type - cycles_prefs.refresh_devices() - - # Get devices for this type - devices = None - if hasattr(cycles_prefs, 'devices'): - try: - devices_prop = cycles_prefs.devices - if devices_prop: - devices = list(devices_prop) if hasattr(devices_prop, '__iter__') else [devices_prop] - except Exception as e: - pass - - if not devices or len(devices) == 0: - try: - devices = cycles_prefs.get_devices() - except Exception as e: - pass - - if devices and len(devices) > 0: - # Categorize devices by their type attribute, avoiding duplicates - for device in devices: - if hasattr(device, 'type'): - device_type_str = str(device.type).upper() - device_id = getattr(device, 'id', None) - - # Use device ID to avoid duplicates (same device appears when checking different compute_device_types) - if device_id and device_id in seen_device_ids: - continue - - if device_id: - seen_device_ids.add(device_id) - - if device_type_str not in devices_by_type: - devices_by_type[device_type_str] = [] - devices_by_type[device_type_str].append(device) - except (ValueError, AttributeError, KeyError, TypeError): - # Device type not supported, continue - continue - except Exception as e: - # Other errors - log but continue - print(f" Error checking {device_type}: {e}") - continue - - # Print what we found - print(f"Found devices by type: {list(devices_by_type.keys())}") - for dev_type, dev_list in devices_by_type.items(): - print(f" {dev_type}: {len(dev_list)} device(s)") - for device in dev_list: - device_name = getattr(device, 'name', 'Unknown') - print(f" - {device_name}") - - # Choose the best GPU type based on preference - for preferred_type in device_type_preference: - if preferred_type in devices_by_type: - gpu_devices = [d for d in devices_by_type[preferred_type] if preferred_type in ['CUDA', 'OPENCL', 'OPTIX', 'HIP', 'METAL', 'ONEAPI']] - if gpu_devices: - best_device_type = preferred_type - best_gpu_devices = [(d, preferred_type) for d in gpu_devices] - print(f"Selected {preferred_type} as best GPU type with {len(gpu_devices)} device(s)") - break - - # Second pass: Enable the best GPU we found - if best_device_type and best_gpu_devices: - print(f"\nEnabling GPU devices for {best_device_type}...") - try: - # Set the device type again - cycles_prefs.compute_device_type = best_device_type - cycles_prefs.refresh_devices() - - # First, disable all CPU devices to ensure only GPU is used - print(f" Disabling CPU devices...") - all_devices = cycles_prefs.devices if hasattr(cycles_prefs, 'devices') else cycles_prefs.get_devices() - if all_devices: - for device in all_devices: - if hasattr(device, 'type') and str(device.type).upper() == 'CPU': - try: - device.use = False - device_name = getattr(device, 'name', 'Unknown') - print(f" Disabled CPU: {device_name}") - except Exception as e: - print(f" Warning: Could not disable CPU device {getattr(device, 'name', 'Unknown')}: {e}") - - # Enable all GPU devices - enabled_count = 0 - for device, device_type in best_gpu_devices: - try: - device.use = True - enabled_count += 1 - device_name = getattr(device, 'name', 'Unknown') - print(f" Enabled: {device_name}") - except Exception as e: - print(f" Warning: Could not enable device {getattr(device, 'name', 'Unknown')}: {e}") - - # Enable ray tracing acceleration for supported device types - try: - if best_device_type == 'HIP': - # HIPRT (HIP Ray Tracing) for AMD GPUs - if hasattr(cycles_prefs, 'use_hiprt'): - cycles_prefs.use_hiprt = True - print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering") - elif hasattr(scene.cycles, 'use_hiprt'): - scene.cycles.use_hiprt = True - print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering") - else: - print(f" HIPRT not available (requires Blender 4.0+)") - elif best_device_type == 'OPTIX': - # OptiX is already enabled when using OPTIX device type - # But we can check if there are any OptiX-specific settings - if hasattr(scene.cycles, 'use_optix_denoising'): - scene.cycles.use_optix_denoising = True - print(f" Enabled OptiX denoising") - print(f" OptiX ray tracing is active (using OPTIX device type)") - elif best_device_type == 'CUDA': - # CUDA can use OptiX if available, but it's usually automatic - # Check if we can prefer OptiX over CUDA - if hasattr(scene.cycles, 'use_optix_denoising'): - scene.cycles.use_optix_denoising = True - print(f" Enabled OptiX denoising (if OptiX available)") - print(f" CUDA ray tracing active") - elif best_device_type == 'METAL': - # MetalRT for Apple Silicon (if available) - if hasattr(scene.cycles, 'use_metalrt'): - scene.cycles.use_metalrt = True - print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering") - elif hasattr(cycles_prefs, 'use_metalrt'): - cycles_prefs.use_metalrt = True - print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering") - else: - print(f" MetalRT not available") - elif best_device_type == 'ONEAPI': - # Intel oneAPI - Embree might be available - if hasattr(scene.cycles, 'use_embree'): - scene.cycles.use_embree = True - print(f" Enabled Embree for faster CPU ray tracing") - print(f" oneAPI ray tracing active") - except Exception as e: - print(f" Could not enable ray tracing acceleration: {e}") - - print(f"SUCCESS: Enabled {enabled_count} GPU device(s) for {best_device_type}") - gpu_available = True - except Exception as e: - print(f"ERROR: Failed to enable GPU devices: {e}") - import traceback - traceback.print_exc() - - # Set device based on availability (prefer GPU, fallback to CPU) - if gpu_available: - scene.cycles.device = 'GPU' - print(f"Using GPU for rendering (blend file had: {current_device})") - else: + # If CPU is forced, skip GPU detection and set CPU directly + if force_cpu: scene.cycles.device = 'CPU' - print(f"GPU not available, using CPU for rendering (blend file had: {current_device})") + print("Forced CPU rendering (skipping GPU detection)") + else: + # Access Cycles preferences + prefs = bpy.context.preferences + try: + cycles_prefs = prefs.addons['cycles'].preferences + except (KeyError, AttributeError): + try: + cycles_addon = prefs.addons.get('cycles') + if cycles_addon: + cycles_prefs = cycles_addon.preferences + else: + raise Exception("Cycles addon not found") + except Exception as e: + print(f"ERROR: Could not access Cycles preferences: {e}") + import traceback + traceback.print_exc() + sys.exit(1) + + # Check all devices and choose the best GPU type + # Device type preference order (most performant first) + device_type_preference = ['OPTIX', 'CUDA', 'HIP', 'ONEAPI', 'METAL'] + gpu_available = False + best_device_type = None + best_gpu_devices = [] + devices_by_type = {} # {device_type: [devices]} + seen_device_ids = set() # Track device IDs to avoid duplicates + + print("Checking for GPU availability...") + + # Try to get all devices - try each device type to see what's available + for device_type in device_type_preference: + try: + cycles_prefs.compute_device_type = device_type + cycles_prefs.refresh_devices() + + # Get devices for this type + devices = None + if hasattr(cycles_prefs, 'devices'): + try: + devices_prop = cycles_prefs.devices + if devices_prop: + devices = list(devices_prop) if hasattr(devices_prop, '__iter__') else [devices_prop] + except Exception as e: + pass + + if not devices or len(devices) == 0: + try: + devices = cycles_prefs.get_devices() + except Exception as e: + pass + + if devices and len(devices) > 0: + # Categorize devices by their type attribute, avoiding duplicates + for device in devices: + if hasattr(device, 'type'): + device_type_str = str(device.type).upper() + device_id = getattr(device, 'id', None) + + # Use device ID to avoid duplicates (same device appears when checking different compute_device_types) + if device_id and device_id in seen_device_ids: + continue + + if device_id: + seen_device_ids.add(device_id) + + if device_type_str not in devices_by_type: + devices_by_type[device_type_str] = [] + devices_by_type[device_type_str].append(device) + except (ValueError, AttributeError, KeyError, TypeError): + # Device type not supported, continue + continue + except Exception as e: + # Other errors - log but continue + print(f" Error checking {device_type}: {e}") + continue + + # Print what we found + print(f"Found devices by type: {list(devices_by_type.keys())}") + for dev_type, dev_list in devices_by_type.items(): + print(f" {dev_type}: {len(dev_list)} device(s)") + for device in dev_list: + device_name = getattr(device, 'name', 'Unknown') + print(f" - {device_name}") + + # Choose the best GPU type based on preference + for preferred_type in device_type_preference: + if preferred_type in devices_by_type: + gpu_devices = [d for d in devices_by_type[preferred_type] if preferred_type in ['CUDA', 'OPENCL', 'OPTIX', 'HIP', 'METAL', 'ONEAPI']] + if gpu_devices: + best_device_type = preferred_type + best_gpu_devices = [(d, preferred_type) for d in gpu_devices] + print(f"Selected {preferred_type} as best GPU type with {len(gpu_devices)} device(s)") + break + + # Second pass: Enable the best GPU we found + if best_device_type and best_gpu_devices: + print(f"\nEnabling GPU devices for {best_device_type}...") + try: + # Set the device type again + cycles_prefs.compute_device_type = best_device_type + cycles_prefs.refresh_devices() + + # First, disable all CPU devices to ensure only GPU is used + print(f" Disabling CPU devices...") + all_devices = cycles_prefs.devices if hasattr(cycles_prefs, 'devices') else cycles_prefs.get_devices() + if all_devices: + for device in all_devices: + if hasattr(device, 'type') and str(device.type).upper() == 'CPU': + try: + device.use = False + device_name = getattr(device, 'name', 'Unknown') + print(f" Disabled CPU: {device_name}") + except Exception as e: + print(f" Warning: Could not disable CPU device {getattr(device, 'name', 'Unknown')}: {e}") + + # Enable all GPU devices + enabled_count = 0 + for device, device_type in best_gpu_devices: + try: + device.use = True + enabled_count += 1 + device_name = getattr(device, 'name', 'Unknown') + print(f" Enabled: {device_name}") + except Exception as e: + print(f" Warning: Could not enable device {getattr(device, 'name', 'Unknown')}: {e}") + + # Enable ray tracing acceleration for supported device types + try: + if best_device_type == 'HIP': + # HIPRT (HIP Ray Tracing) for AMD GPUs + if hasattr(cycles_prefs, 'use_hiprt'): + cycles_prefs.use_hiprt = True + print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering") + elif hasattr(scene.cycles, 'use_hiprt'): + scene.cycles.use_hiprt = True + print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering") + else: + print(f" HIPRT not available (requires Blender 4.0+)") + elif best_device_type == 'OPTIX': + # OptiX is already enabled when using OPTIX device type + # But we can check if there are any OptiX-specific settings + if hasattr(scene.cycles, 'use_optix_denoising'): + scene.cycles.use_optix_denoising = True + print(f" Enabled OptiX denoising") + print(f" OptiX ray tracing is active (using OPTIX device type)") + elif best_device_type == 'CUDA': + # CUDA can use OptiX if available, but it's usually automatic + # Check if we can prefer OptiX over CUDA + if hasattr(scene.cycles, 'use_optix_denoising'): + scene.cycles.use_optix_denoising = True + print(f" Enabled OptiX denoising (if OptiX available)") + print(f" CUDA ray tracing active") + elif best_device_type == 'METAL': + # MetalRT for Apple Silicon (if available) + if hasattr(scene.cycles, 'use_metalrt'): + scene.cycles.use_metalrt = True + print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering") + elif hasattr(cycles_prefs, 'use_metalrt'): + cycles_prefs.use_metalrt = True + print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering") + else: + print(f" MetalRT not available") + elif best_device_type == 'ONEAPI': + # Intel oneAPI - Embree might be available + if hasattr(scene.cycles, 'use_embree'): + scene.cycles.use_embree = True + print(f" Enabled Embree for faster CPU ray tracing") + print(f" oneAPI ray tracing active") + except Exception as e: + print(f" Could not enable ray tracing acceleration: {e}") + + print(f"SUCCESS: Enabled {enabled_count} GPU device(s) for {best_device_type}") + gpu_available = True + except Exception as e: + print(f"ERROR: Failed to enable GPU devices: {e}") + import traceback + traceback.print_exc() + + # Set device based on availability (prefer GPU, fallback to CPU) + if gpu_available: + scene.cycles.device = 'GPU' + print(f"Using GPU for rendering (blend file had: {current_device})") + else: + scene.cycles.device = 'CPU' + print(f"GPU not available, using CPU for rendering (blend file had: {current_device})") # Verify device setting - final_device = scene.cycles.device - print(f"Final Cycles device: {final_device}") + if current_engine == 'CYCLES': + final_device = scene.cycles.device + print(f"Final Cycles device: {final_device}") else: # For other engines (EEVEE, etc.), respect blend file settings print(f"Using {current_engine} engine - respecting blend file settings") @@ -1274,6 +1493,243 @@ try: except Exception as e: print(f"Could not enable GPU compositing: {e}") +# CRITICAL: Initialize headless rendering to prevent black images +# This ensures the render engine is properly initialized before rendering +print("Initializing headless rendering context...") +try: + # Ensure world exists and has proper settings + if not scene.world: + # Create a default world if none exists + world = bpy.data.worlds.new("World") + scene.world = world + print("Created default world") + + # Ensure world has a background shader (not just black) + if scene.world: + # Enable nodes if not already enabled + if not scene.world.use_nodes: + scene.world.use_nodes = True + print("Enabled world nodes") + + world_nodes = scene.world.node_tree + if world_nodes: + # Find or create background shader + bg_shader = None + for node in world_nodes.nodes: + if node.type == 'BACKGROUND': + bg_shader = node + break + + if not bg_shader: + bg_shader = world_nodes.nodes.new(type='ShaderNodeBackground') + # Connect to output + output = world_nodes.nodes.get('World Output') + if not output: + output = world_nodes.nodes.new(type='ShaderNodeOutputWorld') + output.name = 'World Output' + if output and bg_shader: + # Connect background to surface input + if 'Surface' in output.inputs and 'Background' in bg_shader.outputs: + world_nodes.links.new(bg_shader.outputs['Background'], output.inputs['Surface']) + print("Created background shader for world") + + # Ensure background has some color (not pure black) + if bg_shader: + # Only set if it's pure black (0,0,0) + if hasattr(bg_shader.inputs, 'Color'): + color = bg_shader.inputs['Color'].default_value + if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0: + # Set to a very dark gray instead of pure black + bg_shader.inputs['Color'].default_value = (0.01, 0.01, 0.01, 1.0) + print("Adjusted world background color to prevent black renders") + else: + # Fallback: use legacy world color if nodes aren't working + if hasattr(scene.world, 'color'): + color = scene.world.color + if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0: + scene.world.color = (0.01, 0.01, 0.01) + print("Adjusted legacy world color to prevent black renders") + + # For EEVEE, force viewport update to initialize render engine + if current_engine in ['EEVEE', 'EEVEE_NEXT']: + # Force EEVEE to update its internal state + try: + # Update depsgraph to ensure everything is initialized + depsgraph = bpy.context.evaluated_depsgraph_get() + if depsgraph: + # Force update + depsgraph.update() + print("Forced EEVEE depsgraph update for headless rendering") + except Exception as e: + print(f"Warning: Could not force EEVEE update: {e}") + + # Ensure EEVEE settings are applied + try: + # Force a material update to ensure shaders are compiled + for obj in scene.objects: + if obj.type == 'MESH' and obj.data.materials: + for mat in obj.data.materials: + if mat and mat.use_nodes: + # Touch the material to force update + mat.use_nodes = mat.use_nodes + print("Forced material updates for EEVEE") + except Exception as e: + print(f"Warning: Could not update materials: {e}") + + # For Cycles, ensure proper initialization + if current_engine == 'CYCLES': + # Ensure samples are set (even if 1 for preview) + if not hasattr(scene.cycles, 'samples') or scene.cycles.samples < 1: + scene.cycles.samples = 1 + print("Set minimum Cycles samples") + + # Check for lights in the scene + lights = [obj for obj in scene.objects if obj.type == 'LIGHT'] + print(f"Found {len(lights)} light(s) in scene") + if len(lights) == 0: + print("WARNING: No lights found in scene - rendering may be black!") + print(" Consider adding lights or ensuring world background emits light") + + # Ensure world background emits light (critical for Cycles) + if scene.world and scene.world.use_nodes: + world_nodes = scene.world.node_tree + if world_nodes: + bg_shader = None + for node in world_nodes.nodes: + if node.type == 'BACKGROUND': + bg_shader = node + break + + if bg_shader: + # Check and set strength - Cycles needs this to emit light! + if hasattr(bg_shader.inputs, 'Strength'): + strength = bg_shader.inputs['Strength'].default_value + if strength <= 0.0: + bg_shader.inputs['Strength'].default_value = 1.0 + print("Set world background strength to 1.0 for Cycles lighting") + else: + print(f"World background strength: {strength}") + # Also ensure color is not pure black + if hasattr(bg_shader.inputs, 'Color'): + color = bg_shader.inputs['Color'].default_value + if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0: + bg_shader.inputs['Color'].default_value = (1.0, 1.0, 1.0, 1.0) + print("Set world background color to white for Cycles lighting") + + # Check film_transparent setting - if enabled, background will be transparent/black + if hasattr(scene.cycles, 'film_transparent') and scene.cycles.film_transparent: + print("WARNING: film_transparent is enabled - background will be transparent") + print(" If you see black renders, try disabling film_transparent") + + # Force Cycles to update/compile materials and shaders + try: + # Update depsgraph to ensure everything is initialized + depsgraph = bpy.context.evaluated_depsgraph_get() + if depsgraph: + depsgraph.update() + print("Forced Cycles depsgraph update") + + # Force material updates to ensure shaders are compiled + for obj in scene.objects: + if obj.type == 'MESH' and obj.data.materials: + for mat in obj.data.materials: + if mat and mat.use_nodes: + # Force material update + mat.use_nodes = mat.use_nodes + print("Forced Cycles material updates") + except Exception as e: + print(f"Warning: Could not force Cycles updates: {e}") + + # Verify device is actually set correctly + if hasattr(scene.cycles, 'device'): + actual_device = scene.cycles.device + print(f"Cycles device setting: {actual_device}") + if actual_device == 'GPU': + # Try to verify GPU is actually available + try: + prefs = bpy.context.preferences + cycles_prefs = prefs.addons['cycles'].preferences + devices = cycles_prefs.devices + enabled_devices = [d for d in devices if d.use] + if len(enabled_devices) == 0: + print("WARNING: GPU device set but no GPU devices are enabled!") + print(" Falling back to CPU may cause issues") + except Exception as e: + print(f"Could not verify GPU devices: {e}") + + # Ensure camera exists and is active + if scene.camera is None: + # Find first camera in scene + for obj in scene.objects: + if obj.type == 'CAMERA': + scene.camera = obj + print(f"Set active camera: {obj.name}") + break + + # Fix objects and collections hidden from render + vl = bpy.context.view_layer + + # 1. Objects hidden in view layer + for obj in bpy.data.objects: + if obj.hide_get(view_layer=vl): + if any(k in obj.name.lower() for k in ["scrotum|","cage","genital","penis","dick","collision","body.001","couch"]): + obj.hide_set(False, view_layer=vl) + print("Enabled object:", obj.name) + + # 2. Collections disabled in renders OR set to Holdout (the final killer) + for col in bpy.data.collections: + if col.hide_render or (vl.layer_collection.children.get(col.name) and not vl.layer_collection.children[col.name].exclude == False): + if any(k in col.name.lower() for k in ["genital","nsfw","dick","private","hidden","cage","scrotum","collision","dick"]): + col.hide_render = False + if col.name in vl.layer_collection.children: + vl.layer_collection.children[col.name].exclude = False + vl.layer_collection.children[col.name].holdout = False + vl.layer_collection.children[col.name].indirect_only = False + print("Enabled collection:", col.name) + + print("Headless rendering initialization complete") +except Exception as e: + print(f"Warning: Headless rendering initialization had issues: {e}") + import traceback + traceback.print_exc() + +# Final verification before rendering +print("\n=== Pre-render verification ===") +try: + scene = bpy.context.scene + print(f"Render engine: {scene.render.engine}") + print(f"Active camera: {scene.camera.name if scene.camera else 'None'}") + + if scene.render.engine == 'CYCLES': + print(f"Cycles device: {scene.cycles.device}") + print(f"Cycles samples: {scene.cycles.samples}") + lights = [obj for obj in scene.objects if obj.type == 'LIGHT'] + print(f"Lights in scene: {len(lights)}") + if scene.world: + if scene.world.use_nodes: + world_nodes = scene.world.node_tree + if world_nodes: + bg_shader = None + for node in world_nodes.nodes: + if node.type == 'BACKGROUND': + bg_shader = node + break + if bg_shader: + if hasattr(bg_shader.inputs, 'Strength'): + strength = bg_shader.inputs['Strength'].default_value + print(f"World background strength: {strength}") + if hasattr(bg_shader.inputs, 'Color'): + color = bg_shader.inputs['Color'].default_value + print(f"World background color: ({color[0]:.2f}, {color[1]:.2f}, {color[2]:.2f})") + else: + print("World exists but nodes are disabled") + else: + print("WARNING: No world in scene!") + + print("=== Verification complete ===\n") +except Exception as e: + print(f"Warning: Verification failed: {e}") + print("Device configuration complete - blend file settings preserved, device optimized") sys.stdout.flush() ` @@ -1285,6 +1741,30 @@ sys.stdout.flush() return errors.New(errMsg) } + // Write output format to a temporary file for the script to read + // (Blender's argument parsing makes it tricky to pass custom args to Python scripts) + // IMPORTANT: Write the user's selected outputFormat, NOT renderFormat + // renderFormat might be "EXR" for video, but we want the user's actual selection (PNG, JPEG, etc.) + formatFile := filepath.Join(workDir, "output_format.txt") + c.sendLog(taskID, types.LogLevelInfo, fmt.Sprintf("Writing output format '%s' to format file (user selected: '%s', render format: '%s')", outputFormat, outputFormat, renderFormat), "render_blender") + if err := os.WriteFile(formatFile, []byte(outputFormat), 0644); err != nil { + errMsg := fmt.Sprintf("failed to create format file: %v", err) + c.sendLog(taskID, types.LogLevelError, errMsg, "render_blender") + c.sendStepUpdate(taskID, "render_blender", types.StepStatusFailed, errMsg) + return errors.New(errMsg) + } + + // Write render settings to a JSON file if we have metadata with render settings + renderSettingsFile := filepath.Join(workDir, "render_settings.json") + if jobMetadata != nil && jobMetadata.RenderSettings.EngineSettings != nil { + settingsJSON, err := json.Marshal(jobMetadata.RenderSettings) + if err == nil { + if err := os.WriteFile(renderSettingsFile, settingsJSON, 0644); err != nil { + c.sendLog(taskID, types.LogLevelWarn, fmt.Sprintf("Failed to write render settings file: %v", err), "render_blender") + } + } + } + // Run Blender with GPU enabled via Python script // Use -s (start) and -e (end) for frame ranges, or -f for single frame var cmd *exec.Cmd @@ -1305,6 +1785,12 @@ sys.stdout.flush() } cmd.Dir = workDir + // Set environment variables for headless rendering + // This helps ensure proper OpenGL context initialization, especially for EEVEE + cmd.Env = os.Environ() + // Blender will handle headless rendering automatically + // We preserve the environment to allow GPU access if available + // Capture stdout and stderr separately for line-by-line streaming stdoutPipe, err := cmd.StdoutPipe() if err != nil { @@ -1463,7 +1949,7 @@ sys.stdout.flush() // Step: upload or upload_frames uploadStepName := "upload" - if outputFormat == "MP4" { + if outputFormat == "EXR_264_MP4" || outputFormat == "EXR_AV1_MP4" { uploadStepName = "upload_frames" } c.sendStepUpdate(taskID, uploadStepName, types.StepStatusRunning, "") @@ -1594,6 +2080,20 @@ func (c *Client) processVideoGenerationTask(task map[string]interface{}, jobID i c.sendLog(taskID, types.LogLevelInfo, fmt.Sprintf("Starting video generation task: job %d", jobID), "") log.Printf("Processing video generation task %d for job %d", taskID, jobID) + // Get job metadata to determine output format + jobMetadata, err := c.getJobMetadata(jobID) + var outputFormat string + if err == nil && jobMetadata != nil && jobMetadata.RenderSettings.OutputFormat != "" { + outputFormat = jobMetadata.RenderSettings.OutputFormat + } else { + // Fallback: try to get from task data or default to EXR_264_MP4 + if format, ok := task["output_format"].(string); ok { + outputFormat = format + } else { + outputFormat = "EXR_264_MP4" // Default + } + } + // Get all output files for this job files, err := c.getJobFiles(jobID) if err != nil { @@ -1601,30 +2101,31 @@ func (c *Client) processVideoGenerationTask(task map[string]interface{}, jobID i return fmt.Errorf("failed to get job files: %w", err) } - // Find all PNG frame files - var pngFiles []map[string]interface{} + // Find all EXR frame files (MP4 is rendered as EXR for highest fidelity - 32-bit float HDR) + var exrFiles []map[string]interface{} for _, file := range files { fileType, _ := file["file_type"].(string) fileName, _ := file["file_name"].(string) - if fileType == "output" && strings.HasSuffix(fileName, ".png") { - pngFiles = append(pngFiles, file) + // Check for both .exr and .EXR extensions + if fileType == "output" && (strings.HasSuffix(strings.ToLower(fileName), ".exr") || strings.HasSuffix(fileName, ".EXR")) { + exrFiles = append(exrFiles, file) } } - if len(pngFiles) == 0 { - err := fmt.Errorf("no PNG frame files found for MP4 generation") + if len(exrFiles) == 0 { + err := fmt.Errorf("no EXR frame files found for MP4 generation") c.sendStepUpdate(taskID, "get_files", types.StepStatusFailed, err.Error()) return err } c.sendStepUpdate(taskID, "get_files", types.StepStatusCompleted, "") - c.sendLog(taskID, types.LogLevelInfo, fmt.Sprintf("Found %d PNG frames for video generation", len(pngFiles)), "get_files") + c.sendLog(taskID, types.LogLevelInfo, fmt.Sprintf("Found %d EXR frames for video generation (highest fidelity - 32-bit HDR)", len(exrFiles)), "get_files") - log.Printf("Generating MP4 for job %d from %d PNG frames", jobID, len(pngFiles)) + log.Printf("Generating MP4 for job %d from %d EXR frames", jobID, len(exrFiles)) // Step: download_frames c.sendStepUpdate(taskID, "download_frames", types.StepStatusRunning, "") - c.sendLog(taskID, types.LogLevelInfo, "Downloading PNG frames...", "download_frames") + c.sendLog(taskID, types.LogLevelInfo, "Downloading EXR frames...", "download_frames") // Create temporary job workspace for video generation within runner workspace workDir := filepath.Join(c.getWorkspaceDir(), fmt.Sprintf("job-%d-video", jobID)) @@ -1634,9 +2135,9 @@ func (c *Client) processVideoGenerationTask(task map[string]interface{}, jobID i } defer os.RemoveAll(workDir) - // Download all PNG frames + // Download all EXR frames var frameFiles []string - for _, file := range pngFiles { + for _, file := range exrFiles { fileName, _ := file["file_name"].(string) framePath := filepath.Join(workDir, fileName) if err := c.downloadFrameFile(jobID, fileName, framePath); err != nil { @@ -1659,15 +2160,32 @@ func (c *Client) processVideoGenerationTask(task map[string]interface{}, jobID i // Step: generate_video c.sendStepUpdate(taskID, "generate_video", types.StepStatusRunning, "") - c.sendLog(taskID, types.LogLevelInfo, "Generating MP4 video with ffmpeg...", "generate_video") + + // Determine codec and pixel format based on output format + var codec string + var pixFmt string + var useAlpha bool + + if outputFormat == "EXR_AV1_MP4" { + codec = "libaom-av1" + pixFmt = "yuva420p" // AV1 with alpha channel + useAlpha = true + c.sendLog(taskID, types.LogLevelInfo, "Generating MP4 video with AV1 codec (with alpha channel)...", "generate_video") + } else { + // Default to H.264 for EXR_264_MP4 + codec = "libx264" + pixFmt = "yuv420p" // H.264 without alpha + useAlpha = false + c.sendLog(taskID, types.LogLevelInfo, "Generating MP4 video with H.264 codec...", "generate_video") + } // Generate MP4 using ffmpeg outputMP4 := filepath.Join(workDir, fmt.Sprintf("output_%d.mp4", jobID)) - // Use ffmpeg to combine frames into MP4 + // Use ffmpeg to combine EXR frames into MP4 // Method 1: Using image sequence input (more reliable) firstFrame := frameFiles[0] - // Extract frame number pattern (e.g., frame_2470.png -> frame_%04d.png) + // Extract frame number pattern (e.g., frame_2470.exr -> frame_%04d.exr) baseName := filepath.Base(firstFrame) // Find the numeric part and replace it with %04d pattern // Use regex to find digits after underscore and before extension @@ -1690,24 +2208,64 @@ func (c *Client) processVideoGenerationTask(task map[string]interface{}, jobID i // Allocate a VAAPI device for this task (if available) allocatedDevice := c.allocateVAAPIDevice(taskID) defer c.releaseVAAPIDevice(taskID) // Always release the device when done - if allocatedDevice != "" { c.sendLog(taskID, types.LogLevelInfo, fmt.Sprintf("Using VAAPI device: %s", allocatedDevice), "generate_video") } else { - c.sendLog(taskID, types.LogLevelInfo, "No VAAPI device available, will use software encoding or first available device", "generate_video") + c.sendLog(taskID, types.LogLevelInfo, "No VAAPI device available, will use software encoding or other hardware", "generate_video") } - // Run ffmpeg to combine frames into MP4 at 24 fps with hardware acceleration + // Run ffmpeg to combine EXR frames into MP4 at 24 fps + // EXR is 32-bit float HDR format - FFmpeg will automatically tonemap to 8-bit/10-bit for video // Use -start_number to tell ffmpeg the starting frame number - cmd, err := c.buildFFmpegCommand(allocatedDevice, "-y", "-start_number", fmt.Sprintf("%d", startNumber), - "-framerate", "24", "-i", patternPath, - "-r", "24", outputMP4) - if err != nil { - c.sendLog(taskID, types.LogLevelWarn, fmt.Sprintf("Hardware acceleration detection failed, using software encoding: %v", err), "generate_video") - // Fallback to software encoding + var cmd *exec.Cmd + var useHardware bool + + if outputFormat == "EXR_AV1_MP4" { + // Try AV1 hardware acceleration + cmd, err = c.buildFFmpegCommandAV1(allocatedDevice, useAlpha, "-y", "-start_number", fmt.Sprintf("%d", startNumber), + "-framerate", "24", "-i", patternPath, + "-r", "24", outputMP4) + if err == nil { + useHardware = true + c.sendLog(taskID, types.LogLevelInfo, "Using AV1 hardware acceleration", "generate_video") + } else { + c.sendLog(taskID, types.LogLevelInfo, fmt.Sprintf("AV1 hardware acceleration not available, will use software: %v", err), "generate_video") + } + } else { + // Try H.264 hardware acceleration + if allocatedDevice != "" { + cmd, err = c.buildFFmpegCommand(allocatedDevice, "-y", "-start_number", fmt.Sprintf("%d", startNumber), + "-framerate", "24", "-i", patternPath, + "-r", "24", outputMP4) + if err == nil { + useHardware = true + } else { + allocatedDevice = "" // Fall back to software + } + } + } + + if !useHardware { + // Software encoding with HDR tonemapping + // Build video filter for HDR to SDR conversion + var vf string + if useAlpha { + // For AV1 with alpha: preserve alpha channel during tonemapping + vf = "zscale=t=linear:npl=100,format=gbrpf32le,zscale=p=bt709,tonemap=tonemap=hable:desat=0,zscale=t=bt709:m=bt709:r=tv,format=yuva420p" + } else { + // For H.264 without alpha: standard tonemapping + vf = "zscale=t=linear:npl=100,format=gbrpf32le,zscale=p=bt709,tonemap=tonemap=hable:desat=0,zscale=t=bt709:m=bt709:r=tv,format=yuv420p" + } + cmd = exec.Command("ffmpeg", "-y", "-start_number", fmt.Sprintf("%d", startNumber), "-framerate", "24", "-i", patternPath, - "-c:v", "libx264", "-pix_fmt", "yuv420p", "-r", "24", outputMP4) + "-vf", vf, + "-c:v", codec, "-pix_fmt", pixFmt, "-r", "24", outputMP4) + + if outputFormat == "EXR_AV1_MP4" { + // AV1 encoding options for quality + cmd.Args = append(cmd.Args, "-cpu-used", "4", "-crf", "30", "-b:v", "0") + } } cmd.Dir = workDir output, err := cmd.CombinedOutput() @@ -1723,7 +2281,7 @@ func (c *Client) processVideoGenerationTask(task map[string]interface{}, jobID i // Try alternative method with concat demuxer log.Printf("First ffmpeg attempt failed, trying concat method: %s", outputStr) - err = c.generateMP4WithConcat(frameFiles, outputMP4, workDir, allocatedDevice) + err = c.generateMP4WithConcat(frameFiles, outputMP4, workDir, allocatedDevice, outputFormat, codec, pixFmt, useAlpha, useHardware) if err != nil { // Check for size errors in concat method too if sizeErr := c.checkFFmpegSizeError(err.Error()); sizeErr != nil { @@ -1844,6 +2402,108 @@ func (c *Client) buildFFmpegCommand(device string, args ...string) (*exec.Cmd, e return nil, fmt.Errorf("no hardware encoder available") } +// buildFFmpegCommandAV1 builds an ffmpeg command with AV1 hardware acceleration if available +// If device is provided (non-empty), it will be used for VAAPI encoding +// useAlpha indicates if alpha channel should be preserved +// Returns the command and any error encountered during detection +func (c *Client) buildFFmpegCommandAV1(device string, useAlpha bool, args ...string) (*exec.Cmd, error) { + // Try AV1 hardware encoders in order of preference + // Priority: NVENC (NVIDIA) > QSV (Intel) > VAAPI (Intel/AMD Linux) > AMF (AMD Windows) > software fallback + // Note: Hardware AV1 encoders may not support alpha, so we may need to fall back to software + + // Build HDR tonemapping filter for EXR input + // Hardware encoders need the input to be tonemapped first + var tonemapFilter string + if useAlpha { + tonemapFilter = "zscale=t=linear:npl=100,format=gbrpf32le,zscale=p=bt709,tonemap=tonemap=hable:desat=0,zscale=t=bt709:m=bt709:r=tv,format=yuva420p" + } else { + tonemapFilter = "zscale=t=linear:npl=100,format=gbrpf32le,zscale=p=bt709,tonemap=tonemap=hable:desat=0,zscale=t=bt709:m=bt709:r=tv,format=yuv420p" + } + + // Check for NVIDIA NVENC AV1 (RTX 40 series and newer) + if c.checkEncoderAvailable("av1_nvenc") { + outputIdx := len(args) - 1 + // AV1 NVENC may support alpha, but let's use yuva420p only if useAlpha is true + pixFmt := "yuv420p" + if useAlpha { + // Check if av1_nvenc supports alpha (it should on newer drivers) + pixFmt = "yuva420p" + } + // Insert tonemapping filter and hardware encoding args before output file + hwArgs := []string{"-vf", tonemapFilter, "-c:v", "av1_nvenc", "-preset", "p4", "-b:v", "10M", "-maxrate", "12M", "-bufsize", "20M", "-pix_fmt", pixFmt} + newArgs := make([]string, 0, len(args)+len(hwArgs)) + newArgs = append(newArgs, args[:outputIdx]...) + newArgs = append(newArgs, hwArgs...) + newArgs = append(newArgs, args[outputIdx:]...) + return exec.Command("ffmpeg", newArgs...), nil + } + + // Check for Intel Quick Sync AV1 (Arc GPUs and newer) + if c.checkEncoderAvailable("av1_qsv") { + outputIdx := len(args) - 1 + pixFmt := "yuv420p" + if useAlpha { + // QSV AV1 may support alpha on newer hardware + pixFmt = "yuva420p" + } + // Insert tonemapping filter and hardware encoding args + hwArgs := []string{"-vf", tonemapFilter, "-c:v", "av1_qsv", "-preset", "medium", "-b:v", "10M", "-pix_fmt", pixFmt} + newArgs := make([]string, 0, len(args)+len(hwArgs)) + newArgs = append(newArgs, args[:outputIdx]...) + newArgs = append(newArgs, hwArgs...) + newArgs = append(newArgs, args[outputIdx:]...) + return exec.Command("ffmpeg", newArgs...), nil + } + + // Check for VAAPI AV1 (Intel/AMD on Linux, newer hardware) + if c.checkEncoderAvailable("av1_vaapi") { + // Use provided device if available, otherwise get the first available + vaapiDevice := device + if vaapiDevice == "" { + vaapiDevice = c.getVAAPIDevice() + } + + if vaapiDevice != "" { + outputIdx := len(args) - 1 + pixFmt := "yuv420p" + vaapiFilter := tonemapFilter + if useAlpha { + // VAAPI AV1 may support alpha on newer hardware + // Note: VAAPI may need format conversion before hwupload + pixFmt = "yuva420p" + } + // For VAAPI, we need to tonemap first, then convert format and upload to hardware + vaapiFilter = vaapiFilter + ",format=nv12,hwupload" + hwArgs := []string{"-vaapi_device", vaapiDevice, "-vf", vaapiFilter, "-c:v", "av1_vaapi", "-b:v", "10M", "-pix_fmt", pixFmt} + newArgs := make([]string, 0, len(args)+len(hwArgs)) + newArgs = append(newArgs, args[:outputIdx]...) + newArgs = append(newArgs, hwArgs...) + newArgs = append(newArgs, args[outputIdx:]...) + return exec.Command("ffmpeg", newArgs...), nil + } + } + + // Check for AMD AMF AV1 (newer AMD GPUs) + if c.checkEncoderAvailable("av1_amf") { + outputIdx := len(args) - 1 + pixFmt := "yuv420p" + if useAlpha { + // AMF AV1 may support alpha on newer hardware + pixFmt = "yuva420p" + } + // Insert tonemapping filter and hardware encoding args + hwArgs := []string{"-vf", tonemapFilter, "-c:v", "av1_amf", "-quality", "balanced", "-b:v", "10M", "-pix_fmt", pixFmt} + newArgs := make([]string, 0, len(args)+len(hwArgs)) + newArgs = append(newArgs, args[:outputIdx]...) + newArgs = append(newArgs, hwArgs...) + newArgs = append(newArgs, args[outputIdx:]...) + return exec.Command("ffmpeg", newArgs...), nil + } + + // No AV1 hardware acceleration available + return nil, fmt.Errorf("no AV1 hardware encoder available") +} + // probeAllHardwareAccelerators probes ffmpeg for all available hardware acceleration methods // Returns a map of hwaccel method -> true/false func (c *Client) probeAllHardwareAccelerators() map[string]bool { @@ -2420,7 +3080,7 @@ func (c *Client) testGenericEncoder(encoder string) bool { // generateMP4WithConcat uses ffmpeg concat demuxer as fallback // device parameter is optional - if provided, it will be used for VAAPI encoding -func (c *Client) generateMP4WithConcat(frameFiles []string, outputMP4, workDir string, device string) error { +func (c *Client) generateMP4WithConcat(frameFiles []string, outputMP4, workDir string, device string, outputFormat string, codec string, pixFmt string, useAlpha bool, useHardware bool) error { // Create file list for ffmpeg concat demuxer listFile := filepath.Join(workDir, "frames.txt") listFileHandle, err := os.Create(listFile) @@ -2434,13 +3094,50 @@ func (c *Client) generateMP4WithConcat(frameFiles []string, outputMP4, workDir s } listFileHandle.Close() - // Run ffmpeg with concat demuxer and hardware acceleration - cmd, err := c.buildFFmpegCommand(device, "-f", "concat", "-safe", "0", "-i", listFile, - "-r", "24", "-y", outputMP4) - if err != nil { - // Fallback to software encoding + // Build video filter for HDR to SDR conversion + var vf string + if useAlpha { + // For AV1 with alpha: preserve alpha channel during tonemapping + vf = "zscale=t=linear:npl=100,format=gbrpf32le,zscale=p=bt709,tonemap=tonemap=hable:desat=0,zscale=t=bt709:m=bt709:r=tv,format=yuva420p" + } else { + // For H.264 without alpha: standard tonemapping + vf = "zscale=t=linear:npl=100,format=gbrpf32le,zscale=p=bt709,tonemap=tonemap=hable:desat=0,zscale=t=bt709:m=bt709:r=tv,format=yuv420p" + } + + // Run ffmpeg with concat demuxer + // EXR frames are 32-bit float HDR - FFmpeg will tonemap automatically + var cmd *exec.Cmd + + if useHardware { + if outputFormat == "EXR_AV1_MP4" { + // Try AV1 hardware acceleration + cmd, err = c.buildFFmpegCommandAV1(device, useAlpha, "-f", "concat", "-safe", "0", "-i", listFile, + "-r", "24", "-y", outputMP4) + if err != nil { + useHardware = false // Fall back to software + } + } else { + // Try H.264 hardware acceleration + if device != "" { + cmd, err = c.buildFFmpegCommand(device, "-f", "concat", "-safe", "0", "-i", listFile, + "-r", "24", "-y", outputMP4) + if err != nil { + useHardware = false // Fall back to software + } + } + } + } + + if !useHardware { + // Software encoding with HDR tonemapping cmd = exec.Command("ffmpeg", "-f", "concat", "-safe", "0", "-i", listFile, - "-c:v", "libx264", "-pix_fmt", "yuv420p", "-r", "24", "-y", outputMP4) + "-vf", vf, + "-c:v", codec, "-pix_fmt", pixFmt, "-r", "24", "-y", outputMP4) + + if outputFormat == "EXR_AV1_MP4" { + // AV1 encoding options for quality + cmd.Args = append(cmd.Args, "-cpu-used", "4", "-crf", "30", "-b:v", "0") + } } output, err := cmd.CombinedOutput() if err != nil { @@ -2503,7 +3200,7 @@ func (c *Client) checkFFmpegSizeError(output string) error { return nil } -// extractFrameNumber extracts frame number from filename like "frame_0001.png" +// extractFrameNumber extracts frame number from filename like "frame_0001.exr" or "frame_0001.png" func extractFrameNumber(filename string) int { parts := strings.Split(filepath.Base(filename), "_") if len(parts) < 2 { @@ -2537,6 +3234,31 @@ func (c *Client) getJobFiles(jobID int64) ([]map[string]interface{}, error) { return files, nil } +// getJobMetadata gets job metadata from manager +func (c *Client) getJobMetadata(jobID int64) (*types.BlendMetadata, error) { + path := fmt.Sprintf("/api/runner/jobs/%d/metadata", jobID) + resp, err := c.doSignedRequest("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound { + return nil, nil // No metadata found, not an error + } + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("failed to get job metadata: %s", string(body)) + } + + var metadata types.BlendMetadata + if err := json.NewDecoder(resp.Body).Decode(&metadata); err != nil { + return nil, err + } + + return &metadata, nil +} + // downloadFrameFile downloads a frame file for MP4 generation func (c *Client) downloadFrameFile(jobID int64, fileName, destPath string) error { path := fmt.Sprintf("/api/runner/files/%d/%s", jobID, fileName) @@ -2692,6 +3414,209 @@ func (c *Client) uploadFile(jobID int64, filePath string) (string, error) { return result.FilePath, nil } +// getContextCacheKey generates a cache key for a job's context +func (c *Client) getContextCacheKey(jobID int64) string { + // Use job ID as the cache key (context is regenerated when job files change) + return fmt.Sprintf("job_%d", jobID) +} + +// getContextCachePath returns the path to a cached context file +func (c *Client) getContextCachePath(cacheKey string) string { + cacheDir := filepath.Join(c.getWorkspaceDir(), "cache", "contexts") + os.MkdirAll(cacheDir, 0755) + return filepath.Join(cacheDir, cacheKey+".tar.gz") +} + +// isContextCacheValid checks if a cached context file exists and is not expired (1 hour TTL) +func (c *Client) isContextCacheValid(cachePath string) bool { + info, err := os.Stat(cachePath) + if err != nil { + return false + } + // Check if file is less than 1 hour old + return time.Since(info.ModTime()) < time.Hour +} + +// downloadJobContext downloads the job context tar.gz, using cache if available +func (c *Client) downloadJobContext(jobID int64, destPath string) error { + cacheKey := c.getContextCacheKey(jobID) + cachePath := c.getContextCachePath(cacheKey) + + // Check cache first + if c.isContextCacheValid(cachePath) { + log.Printf("Using cached context for job %d", jobID) + // Copy from cache to destination + src, err := os.Open(cachePath) + if err != nil { + log.Printf("Failed to open cached context, will download: %v", err) + } else { + defer src.Close() + dst, err := os.Create(destPath) + if err != nil { + return fmt.Errorf("failed to create destination file: %w", err) + } + defer dst.Close() + _, err = io.Copy(dst, src) + if err == nil { + return nil + } + log.Printf("Failed to copy cached context, will download: %v", err) + } + } + + // Download from manager + path := fmt.Sprintf("/api/runner/jobs/%d/context.tar.gz", jobID) + resp, err := c.doSignedRequest("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID)) + if err != nil { + return fmt.Errorf("failed to download context: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("context download failed: %s", string(body)) + } + + // Create temporary file first + tmpPath := destPath + ".tmp" + tmpFile, err := os.Create(tmpPath) + if err != nil { + return fmt.Errorf("failed to create temporary file: %w", err) + } + defer tmpFile.Close() + defer os.Remove(tmpPath) + + // Stream download to temporary file + _, err = io.Copy(tmpFile, resp.Body) + if err != nil { + return fmt.Errorf("failed to download context: %w", err) + } + tmpFile.Close() + + // Move to final destination + if err := os.Rename(tmpPath, destPath); err != nil { + return fmt.Errorf("failed to move context to destination: %w", err) + } + + // Update cache + cacheDir := filepath.Dir(cachePath) + os.MkdirAll(cacheDir, 0755) + if err := os.Link(destPath, cachePath); err != nil { + // If link fails (e.g., cross-filesystem), copy instead + src, err := os.Open(destPath) + if err == nil { + defer src.Close() + dst, err := os.Create(cachePath) + if err == nil { + defer dst.Close() + io.Copy(dst, src) + } + } + } + + return nil +} + +// extractTarGz extracts a tar.gz file to the destination directory +func (c *Client) extractTarGz(tarGzPath, destDir string) error { + // Open the tar.gz file + file, err := os.Open(tarGzPath) + if err != nil { + return fmt.Errorf("failed to open tar.gz file: %w", err) + } + defer file.Close() + + // Create gzip reader + gzReader, err := gzip.NewReader(file) + if err != nil { + return fmt.Errorf("failed to create gzip reader: %w", err) + } + defer gzReader.Close() + + // Create tar reader + tarReader := tar.NewReader(gzReader) + + // Extract files + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("failed to read tar header: %w", err) + } + + // Sanitize path to prevent directory traversal + targetPath := filepath.Join(destDir, header.Name) + if !strings.HasPrefix(filepath.Clean(targetPath), filepath.Clean(destDir)+string(os.PathSeparator)) { + return fmt.Errorf("invalid file path in tar: %s", header.Name) + } + + // Handle directories + if header.Typeflag == tar.TypeDir { + if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + continue + } + + // Handle regular files + if header.Typeflag == tar.TypeReg { + // Create parent directories + if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil { + return fmt.Errorf("failed to create parent directory: %w", err) + } + + // Create file + outFile, err := os.Create(targetPath) + if err != nil { + return fmt.Errorf("failed to create file: %w", err) + } + + // Copy file contents + if _, err := io.Copy(outFile, tarReader); err != nil { + outFile.Close() + return fmt.Errorf("failed to extract file: %w", err) + } + + // Set file permissions + if err := os.Chmod(targetPath, os.FileMode(header.Mode)); err != nil { + outFile.Close() + return fmt.Errorf("failed to set file permissions: %w", err) + } + + outFile.Close() + } + } + + return nil +} + +// cleanupExpiredContextCache removes context cache files older than 1 hour +func (c *Client) cleanupExpiredContextCache() { + cacheDir := filepath.Join(c.getWorkspaceDir(), "cache", "contexts") + entries, err := os.ReadDir(cacheDir) + if err != nil { + return + } + + now := time.Now() + for _, entry := range entries { + if entry.IsDir() { + continue + } + info, err := entry.Info() + if err != nil { + continue + } + if now.Sub(info.ModTime()) > time.Hour { + cachePath := filepath.Join(cacheDir, entry.Name()) + os.Remove(cachePath) + log.Printf("Removed expired context cache: %s", entry.Name()) + } + } +} + // processMetadataTask processes a metadata extraction task func (c *Client) processMetadataTask(task map[string]interface{}, jobID int64, inputFiles []interface{}) error { taskID := int64(task["id"].(float64)) @@ -2708,26 +3633,67 @@ func (c *Client) processMetadataTask(task map[string]interface{}, jobID int64, i // Step: download c.sendStepUpdate(taskID, "download", types.StepStatusRunning, "") - c.sendLog(taskID, types.LogLevelInfo, "Downloading blend file...", "download") + c.sendLog(taskID, types.LogLevelInfo, "Downloading job context...", "download") + + // Download context tar.gz + contextPath := filepath.Join(workDir, "context.tar.gz") + if err := c.downloadJobContext(jobID, contextPath); err != nil { + c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) + return fmt.Errorf("failed to download context: %w", err) + } + + // Extract context tar.gz + c.sendLog(taskID, types.LogLevelInfo, "Extracting context...", "download") + if err := c.extractTarGz(contextPath, workDir); err != nil { + c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) + return fmt.Errorf("failed to extract context: %w", err) + } + + // Find .blend file in extracted contents blendFile := "" - for _, filePath := range inputFiles { - filePathStr := filePath.(string) - if err := c.downloadFile(filePathStr, workDir); err != nil { - c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) - return fmt.Errorf("failed to download file %s: %w", filePathStr, err) + err := filepath.Walk(workDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err } - if filepath.Ext(filePathStr) == ".blend" { - blendFile = filepath.Join(workDir, filepath.Base(filePathStr)) + if !info.IsDir() && strings.HasSuffix(strings.ToLower(info.Name()), ".blend") { + // Check it's not a Blender save file (.blend1, .blend2, etc.) + lower := strings.ToLower(info.Name()) + idx := strings.LastIndex(lower, ".blend") + if idx != -1 { + suffix := lower[idx+len(".blend"):] + // If there are digits after .blend, it's a save file + isSaveFile := false + if len(suffix) > 0 { + isSaveFile = true + for _, r := range suffix { + if r < '0' || r > '9' { + isSaveFile = false + break + } + } + } + if !isSaveFile { + blendFile = path + return filepath.SkipAll // Stop walking once we find a blend file + } + } } + return nil + }) + + if err != nil { + c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) + return fmt.Errorf("failed to find blend file: %w", err) } if blendFile == "" { - err := fmt.Errorf("no .blend file found in input files") + err := fmt.Errorf("no .blend file found in context") c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error()) return err } + c.sendStepUpdate(taskID, "download", types.StepStatusCompleted, "") - c.sendLog(taskID, types.LogLevelInfo, "Blend file downloaded successfully", "download") + c.sendLog(taskID, types.LogLevelInfo, "Context downloaded and extracted successfully", "download") // Step: extract_metadata c.sendStepUpdate(taskID, "extract_metadata", types.StepStatusRunning, "") @@ -2739,6 +3705,43 @@ func (c *Client) processMetadataTask(task map[string]interface{}, jobID int64, i import json import sys +# Make all file paths relative to the blend file location FIRST +# This must be done immediately after file load, before any other operations +# to prevent Blender from trying to access external files with absolute paths +try: + bpy.ops.file.make_paths_relative() + print("Made all file paths relative to blend file") +except Exception as e: + print(f"Warning: Could not make paths relative: {e}") + +# Check for missing addons that the blend file requires +# Blender marks missing addons with "_missing" suffix in preferences +missing_files_info = { + "checked": False, + "has_missing": False, + "missing_files": [], + "missing_addons": [] +} + +try: + missing = [] + for mod in bpy.context.preferences.addons: + if mod.module.endswith("_missing"): + missing.append(mod.module.rsplit("_", 1)[0]) + + missing_files_info["checked"] = True + if missing: + missing_files_info["has_missing"] = True + missing_files_info["missing_addons"] = missing + print("Missing add-ons required by this .blend:") + for name in missing: + print(" -", name) + else: + print("No missing add-ons detected – file is headless-safe") +except Exception as e: + print(f"Warning: Could not check for missing addons: {e}") + missing_files_info["error"] = str(e) + # Get scene scene = bpy.context.scene @@ -2776,12 +3779,73 @@ if animation_start is not None and animation_end is not None: render = scene.render resolution_x = render.resolution_x resolution_y = render.resolution_y -samples = scene.cycles.samples if scene.cycles else scene.eevee.taa_render_samples -engine = scene.render.engine.lower() +engine = scene.render.engine.upper() # Determine output format from file format output_format = render.image_settings.file_format +# Extract engine-specific settings +engine_settings = {} + +if engine == 'CYCLES': + cycles = scene.cycles + engine_settings = { + "samples": getattr(cycles, 'samples', 128), + "use_denoising": getattr(cycles, 'use_denoising', False), + "denoising_radius": getattr(cycles, 'denoising_radius', 0), + "denoising_strength": getattr(cycles, 'denoising_strength', 0.0), + "device": getattr(cycles, 'device', 'CPU'), + "use_adaptive_sampling": getattr(cycles, 'use_adaptive_sampling', False), + "adaptive_threshold": getattr(cycles, 'adaptive_threshold', 0.01) if getattr(cycles, 'use_adaptive_sampling', False) else 0.01, + "use_fast_gi": getattr(cycles, 'use_fast_gi', False), + "light_tree": getattr(cycles, 'use_light_tree', False), + "use_light_linking": getattr(cycles, 'use_light_linking', False), + "caustics_reflective": getattr(cycles, 'caustics_reflective', False), + "caustics_refractive": getattr(cycles, 'caustics_refractive', False), + "blur_glossy": getattr(cycles, 'blur_glossy', 0.0), + "max_bounces": getattr(cycles, 'max_bounces', 12), + "diffuse_bounces": getattr(cycles, 'diffuse_bounces', 4), + "glossy_bounces": getattr(cycles, 'glossy_bounces', 4), + "transmission_bounces": getattr(cycles, 'transmission_bounces', 12), + "volume_bounces": getattr(cycles, 'volume_bounces', 0), + "transparent_max_bounces": getattr(cycles, 'transparent_max_bounces', 8), + "film_transparent": getattr(cycles, 'film_transparent', False), + "use_layer_samples": getattr(cycles, 'use_layer_samples', False), + } +elif engine == 'EEVEE' or engine == 'EEVEE_NEXT': + eevee = scene.eevee + engine_settings = { + "taa_render_samples": getattr(eevee, 'taa_render_samples', 64), + "use_bloom": getattr(eevee, 'use_bloom', False), + "bloom_threshold": getattr(eevee, 'bloom_threshold', 0.8), + "bloom_intensity": getattr(eevee, 'bloom_intensity', 0.05), + "bloom_radius": getattr(eevee, 'bloom_radius', 6.5), + "use_ssr": getattr(eevee, 'use_ssr', True), + "use_ssr_refraction": getattr(eevee, 'use_ssr_refraction', False), + "ssr_quality": getattr(eevee, 'ssr_quality', 'MEDIUM'), + "use_ssao": getattr(eevee, 'use_ssao', True), + "ssao_quality": getattr(eevee, 'ssao_quality', 'MEDIUM'), + "ssao_distance": getattr(eevee, 'ssao_distance', 0.2), + "ssao_factor": getattr(eevee, 'ssao_factor', 1.0), + "use_soft_shadows": getattr(eevee, 'use_soft_shadows', True), + "use_shadow_high_bitdepth": getattr(eevee, 'use_shadow_high_bitdepth', True), + "use_volumetric": getattr(eevee, 'use_volumetric', False), + "volumetric_tile_size": getattr(eevee, 'volumetric_tile_size', '8'), + "volumetric_samples": getattr(eevee, 'volumetric_samples', 64), + "volumetric_start": getattr(eevee, 'volumetric_start', 0.0), + "volumetric_end": getattr(eevee, 'volumetric_end', 100.0), + "use_volumetric_lights": getattr(eevee, 'use_volumetric_lights', True), + "use_volumetric_shadows": getattr(eevee, 'use_volumetric_shadows', True), + "use_gtao": getattr(eevee, 'use_gtao', False), + "gtao_quality": getattr(eevee, 'gtao_quality', 'MEDIUM'), + "use_overscan": getattr(eevee, 'use_overscan', False), + } +else: + # For other engines, extract basic samples if available + engine_settings = { + "samples": getattr(scene, 'samples', 128) if hasattr(scene, 'samples') else 128 + } + # Extract scene info camera_count = len([obj for obj in scene.objects if obj.type == 'CAMERA']) object_count = len(scene.objects) @@ -2794,15 +3858,16 @@ metadata = { "render_settings": { "resolution_x": resolution_x, "resolution_y": resolution_y, - "samples": samples, "output_format": output_format, - "engine": engine + "engine": engine.lower(), + "engine_settings": engine_settings }, "scene_info": { "camera_count": camera_count, "object_count": object_count, "material_count": material_count - } + }, + "missing_files_info": missing_files_info } # Output as JSON diff --git a/internal/storage/storage.go b/internal/storage/storage.go index d026e58..52ff098 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -1,7 +1,9 @@ package storage import ( + "archive/tar" "archive/zip" + "compress/gzip" "fmt" "io" "os" @@ -194,3 +196,377 @@ func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) { return extractedFiles, nil } +// findCommonPrefix finds the common leading directory prefix if all paths share the same first-level directory +// Returns the prefix to strip (with trailing slash) or empty string if no common prefix +func findCommonPrefix(relPaths []string) string { + if len(relPaths) == 0 { + return "" + } + + // Get the first path component of each path + firstComponents := make([]string, 0, len(relPaths)) + for _, path := range relPaths { + parts := strings.Split(filepath.ToSlash(path), "/") + if len(parts) > 0 && parts[0] != "" { + firstComponents = append(firstComponents, parts[0]) + } else { + // If any path is at root level, no common prefix + return "" + } + } + + // Check if all first components are the same + if len(firstComponents) == 0 { + return "" + } + + commonFirst := firstComponents[0] + for _, comp := range firstComponents { + if comp != commonFirst { + // Not all paths share the same first directory + return "" + } + } + + // All paths share the same first directory - return it with trailing slash + return commonFirst + "/" +} + +// isBlenderSaveFile checks if a filename is a Blender save file (.blend1, .blend2, etc.) +// Returns true for files like "file.blend1", "file.blend2", but false for "file.blend" +func isBlenderSaveFile(filename string) bool { + lower := strings.ToLower(filename) + // Check if it ends with .blend followed by one or more digits + // Pattern: *.blend[digits] + if !strings.HasSuffix(lower, ".blend") { + // Doesn't end with .blend, check if it ends with .blend + digits + idx := strings.LastIndex(lower, ".blend") + if idx == -1 { + return false + } + // Check if there are digits after .blend + suffix := lower[idx+len(".blend"):] + if len(suffix) == 0 { + return false + } + // All remaining characters must be digits + for _, r := range suffix { + if r < '0' || r > '9' { + return false + } + } + return true + } + // Ends with .blend exactly - this is a regular blend file, not a save file + return false +} + +// CreateJobContext creates a tar.gz archive containing all job input files +// Filters out Blender save files (.blend1, .blend2, etc.) +// Uses temporary directories and streaming to handle large files efficiently +func (s *Storage) CreateJobContext(jobID int64) (string, error) { + jobPath := s.JobPath(jobID) + contextPath := filepath.Join(jobPath, "context.tar.gz") + + // Create temporary directory for staging + tmpDir, err := os.MkdirTemp("", "fuego-context-*") + if err != nil { + return "", fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(tmpDir) + + // Collect all files from job directory, excluding the context file itself and Blender save files + var filesToInclude []string + err = filepath.Walk(jobPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip directories + if info.IsDir() { + return nil + } + + // Skip the context file itself if it exists + if path == contextPath { + return nil + } + + // Skip Blender save files + if isBlenderSaveFile(info.Name()) { + return nil + } + + // Get relative path from job directory + relPath, err := filepath.Rel(jobPath, path) + if err != nil { + return err + } + + // Sanitize path - ensure it doesn't escape the job directory + cleanRelPath := filepath.Clean(relPath) + if strings.HasPrefix(cleanRelPath, "..") { + return fmt.Errorf("invalid file path: %s", relPath) + } + + filesToInclude = append(filesToInclude, path) + return nil + }) + if err != nil { + return "", fmt.Errorf("failed to walk job directory: %w", err) + } + + if len(filesToInclude) == 0 { + return "", fmt.Errorf("no files found to include in context") + } + + // Create the tar.gz file using streaming + contextFile, err := os.Create(contextPath) + if err != nil { + return "", fmt.Errorf("failed to create context file: %w", err) + } + defer contextFile.Close() + + gzWriter := gzip.NewWriter(contextFile) + defer gzWriter.Close() + + tarWriter := tar.NewWriter(gzWriter) + defer tarWriter.Close() + + // Add each file to the tar archive + for _, filePath := range filesToInclude { + file, err := os.Open(filePath) + if err != nil { + return "", fmt.Errorf("failed to open file %s: %w", filePath, err) + } + + info, err := file.Stat() + if err != nil { + file.Close() + return "", fmt.Errorf("failed to stat file %s: %w", filePath, err) + } + + // Get relative path for tar header + relPath, err := filepath.Rel(jobPath, filePath) + if err != nil { + file.Close() + return "", fmt.Errorf("failed to get relative path for %s: %w", filePath, err) + } + + // Normalize path separators for tar (use forward slashes) + tarPath := filepath.ToSlash(relPath) + + // Create tar header + header, err := tar.FileInfoHeader(info, "") + if err != nil { + file.Close() + return "", fmt.Errorf("failed to create tar header for %s: %w", filePath, err) + } + header.Name = tarPath + + // Write header + if err := tarWriter.WriteHeader(header); err != nil { + file.Close() + return "", fmt.Errorf("failed to write tar header for %s: %w", filePath, err) + } + + // Copy file contents using streaming + if _, err := io.Copy(tarWriter, file); err != nil { + file.Close() + return "", fmt.Errorf("failed to write file %s to tar: %w", filePath, err) + } + + file.Close() + } + + // Ensure all data is flushed + if err := tarWriter.Close(); err != nil { + return "", fmt.Errorf("failed to close tar writer: %w", err) + } + if err := gzWriter.Close(); err != nil { + return "", fmt.Errorf("failed to close gzip writer: %w", err) + } + if err := contextFile.Close(); err != nil { + return "", fmt.Errorf("failed to close context file: %w", err) + } + + return contextPath, nil +} + +// CreateJobContextFromDir creates a context archive (tar.gz) from files in a source directory +// This is used during upload to immediately create the context archive as the primary artifact +// excludeFiles is a set of relative paths (from sourceDir) to exclude from the context +func (s *Storage) CreateJobContextFromDir(sourceDir string, jobID int64, excludeFiles ...string) (string, error) { + jobPath := s.JobPath(jobID) + contextPath := filepath.Join(jobPath, "context.tar.gz") + + // Ensure job directory exists + if err := os.MkdirAll(jobPath, 0755); err != nil { + return "", fmt.Errorf("failed to create job directory: %w", err) + } + + // Build set of files to exclude (normalize paths) + excludeSet := make(map[string]bool) + for _, excludeFile := range excludeFiles { + // Normalize the exclude path + excludePath := filepath.Clean(excludeFile) + excludeSet[excludePath] = true + // Also add with forward slash for cross-platform compatibility + excludeSet[filepath.ToSlash(excludePath)] = true + } + + // Collect all files from source directory, excluding Blender save files and excluded files + var filesToInclude []string + err := filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip directories + if info.IsDir() { + return nil + } + + // Skip Blender save files + if isBlenderSaveFile(info.Name()) { + return nil + } + + // Get relative path from source directory + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // Sanitize path - ensure it doesn't escape the source directory + cleanRelPath := filepath.Clean(relPath) + if strings.HasPrefix(cleanRelPath, "..") { + return fmt.Errorf("invalid file path: %s", relPath) + } + + // Check if this file should be excluded + if excludeSet[cleanRelPath] || excludeSet[filepath.ToSlash(cleanRelPath)] { + return nil + } + + filesToInclude = append(filesToInclude, path) + return nil + }) + if err != nil { + return "", fmt.Errorf("failed to walk source directory: %w", err) + } + + if len(filesToInclude) == 0 { + return "", fmt.Errorf("no files found to include in context archive") + } + + // Collect relative paths to find common prefix + relPaths := make([]string, 0, len(filesToInclude)) + for _, filePath := range filesToInclude { + relPath, err := filepath.Rel(sourceDir, filePath) + if err != nil { + return "", fmt.Errorf("failed to get relative path for %s: %w", filePath, err) + } + relPaths = append(relPaths, relPath) + } + + // Find and strip common leading directory if all files share one + commonPrefix := findCommonPrefix(relPaths) + + // Validate that there's exactly one .blend file at the root level after prefix stripping + blendFilesAtRoot := 0 + for _, relPath := range relPaths { + tarPath := filepath.ToSlash(relPath) + // Strip common prefix if present + if commonPrefix != "" && strings.HasPrefix(tarPath, commonPrefix) { + tarPath = strings.TrimPrefix(tarPath, commonPrefix) + } + + // Check if it's a .blend file at root (no path separators after prefix stripping) + if strings.HasSuffix(strings.ToLower(tarPath), ".blend") { + // Check if it's at root level (no directory separators) + if !strings.Contains(tarPath, "/") { + blendFilesAtRoot++ + } + } + } + + if blendFilesAtRoot == 0 { + return "", fmt.Errorf("no .blend file found at root level in context archive") + } + if blendFilesAtRoot > 1 { + return "", fmt.Errorf("multiple .blend files found at root level in context archive (found %d, expected 1)", blendFilesAtRoot) + } + + // Create the tar.gz file using streaming + contextFile, err := os.Create(contextPath) + if err != nil { + return "", fmt.Errorf("failed to create context file: %w", err) + } + defer contextFile.Close() + + gzWriter := gzip.NewWriter(contextFile) + defer gzWriter.Close() + + tarWriter := tar.NewWriter(gzWriter) + defer tarWriter.Close() + + // Add each file to the tar archive + for i, filePath := range filesToInclude { + file, err := os.Open(filePath) + if err != nil { + return "", fmt.Errorf("failed to open file %s: %w", filePath, err) + } + + info, err := file.Stat() + if err != nil { + file.Close() + return "", fmt.Errorf("failed to stat file %s: %w", filePath, err) + } + + // Get relative path and strip common prefix if present + relPath := relPaths[i] + tarPath := filepath.ToSlash(relPath) + + // Strip common prefix if found + if commonPrefix != "" && strings.HasPrefix(tarPath, commonPrefix) { + tarPath = strings.TrimPrefix(tarPath, commonPrefix) + } + + // Create tar header + header, err := tar.FileInfoHeader(info, "") + if err != nil { + file.Close() + return "", fmt.Errorf("failed to create tar header for %s: %w", filePath, err) + } + header.Name = tarPath + + // Write header + if err := tarWriter.WriteHeader(header); err != nil { + file.Close() + return "", fmt.Errorf("failed to write tar header for %s: %w", filePath, err) + } + + // Copy file contents using streaming + if _, err := io.Copy(tarWriter, file); err != nil { + file.Close() + return "", fmt.Errorf("failed to write file %s to tar: %w", filePath, err) + } + + file.Close() + } + + // Ensure all data is flushed + if err := tarWriter.Close(); err != nil { + return "", fmt.Errorf("failed to close tar writer: %w", err) + } + if err := gzWriter.Close(); err != nil { + return "", fmt.Errorf("failed to close gzip writer: %w", err) + } + if err := contextFile.Close(); err != nil { + return "", fmt.Errorf("failed to close context file: %w", err) + } + + return contextPath, nil +} + diff --git a/pkg/types/types.go b/pkg/types/types.go index d36a872..c0794e0 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -27,15 +27,14 @@ const ( type JobType string const ( - JobTypeMetadata JobType = "metadata" // Metadata extraction job - only needs blend file - JobTypeRender JobType = "render" // Render job - needs frame range, format, etc. + JobTypeRender JobType = "render" // Render job - needs frame range, format, etc. ) -// Job represents a job (metadata extraction or render) +// Job represents a render job type Job struct { ID int64 `json:"id"` UserID int64 `json:"user_id"` - JobType JobType `json:"job_type"` // "metadata" or "render" + JobType JobType `json:"job_type"` // "render" Name string `json:"name"` Status JobStatus `json:"status"` Progress float64 `json:"progress"` // 0.0 to 100.0 @@ -133,13 +132,14 @@ type JobFile struct { // CreateJobRequest represents a request to create a new job type CreateJobRequest struct { - JobType JobType `json:"job_type"` // "metadata" or "render" - Name string `json:"name"` - FrameStart *int `json:"frame_start,omitempty"` // Required for render jobs - FrameEnd *int `json:"frame_end,omitempty"` // Required for render jobs - OutputFormat *string `json:"output_format,omitempty"` // Required for render jobs - AllowParallelRunners *bool `json:"allow_parallel_runners,omitempty"` // Optional for render jobs, defaults to true - MetadataJobID *int64 `json:"metadata_job_id,omitempty"` // Optional: ID of metadata job to copy input files from + JobType JobType `json:"job_type"` // "render" + Name string `json:"name"` + FrameStart *int `json:"frame_start,omitempty"` // Required for render jobs + FrameEnd *int `json:"frame_end,omitempty"` // Required for render jobs + OutputFormat *string `json:"output_format,omitempty"` // Required for render jobs + AllowParallelRunners *bool `json:"allow_parallel_runners,omitempty"` // Optional for render jobs, defaults to true + RenderSettings *RenderSettings `json:"render_settings,omitempty"` // Optional: Override blend file render settings + UploadSessionID *string `json:"upload_session_id,omitempty"` // Optional: Session ID from file upload } // UpdateJobProgressRequest represents a request to update job progress @@ -225,19 +225,30 @@ type TaskLogEntry struct { // BlendMetadata represents extracted metadata from a blend file type BlendMetadata struct { - FrameStart int `json:"frame_start"` - FrameEnd int `json:"frame_end"` - RenderSettings RenderSettings `json:"render_settings"` - SceneInfo SceneInfo `json:"scene_info"` + FrameStart int `json:"frame_start"` + FrameEnd int `json:"frame_end"` + RenderSettings RenderSettings `json:"render_settings"` + SceneInfo SceneInfo `json:"scene_info"` + MissingFilesInfo *MissingFilesInfo `json:"missing_files_info,omitempty"` +} + +// MissingFilesInfo represents information about missing files/addons +type MissingFilesInfo struct { + Checked bool `json:"checked"` + HasMissing bool `json:"has_missing"` + MissingFiles []string `json:"missing_files,omitempty"` + MissingAddons []string `json:"missing_addons,omitempty"` + Error string `json:"error,omitempty"` } // RenderSettings represents render settings from a blend file type RenderSettings struct { - ResolutionX int `json:"resolution_x"` - ResolutionY int `json:"resolution_y"` - Samples int `json:"samples"` - OutputFormat string `json:"output_format"` - Engine string `json:"engine"` + ResolutionX int `json:"resolution_x"` + ResolutionY int `json:"resolution_y"` + Samples int `json:"samples,omitempty"` // Deprecated, use EngineSettings + OutputFormat string `json:"output_format"` + Engine string `json:"engine"` + EngineSettings map[string]interface{} `json:"engine_settings,omitempty"` } // SceneInfo represents scene information from a blend file diff --git a/web/src/components/FileExplorer.jsx b/web/src/components/FileExplorer.jsx new file mode 100644 index 0000000..77991dc --- /dev/null +++ b/web/src/components/FileExplorer.jsx @@ -0,0 +1,154 @@ +import { useState } from 'react'; + +export default function FileExplorer({ files, onDownload, onPreview, isImageFile }) { + const [expandedPaths, setExpandedPaths] = useState(new Set()); + + // Build directory tree from file paths + const buildTree = (files) => { + const tree = {}; + + files.forEach(file => { + const path = file.file_name; + // Handle both paths with slashes and single filenames + const parts = path.includes('/') ? path.split('/').filter(p => p) : [path]; + + // If it's a single file at root (no slashes), treat it specially + if (parts.length === 1 && !path.includes('/')) { + tree[parts[0]] = { + name: parts[0], + isFile: true, + file: file, + children: {}, + path: parts[0] + }; + return; + } + + let current = tree; + parts.forEach((part, index) => { + if (!current[part]) { + current[part] = { + name: part, + isFile: index === parts.length - 1, + file: index === parts.length - 1 ? file : null, + children: {}, + path: parts.slice(0, index + 1).join('/') + }; + } + current = current[part].children; + }); + }); + + return tree; + }; + + const togglePath = (path) => { + const newExpanded = new Set(expandedPaths); + if (newExpanded.has(path)) { + newExpanded.delete(path); + } else { + newExpanded.add(path); + } + setExpandedPaths(newExpanded); + }; + + const renderTree = (node, level = 0, parentPath = '') => { + const items = Object.values(node).sort((a, b) => { + // Directories first, then files + if (a.isFile !== b.isFile) { + return a.isFile ? 1 : -1; + } + return a.name.localeCompare(b.name); + }); + + return items.map((item) => { + const fullPath = parentPath ? `${parentPath}/${item.name}` : item.name; + const isExpanded = expandedPaths.has(fullPath); + const indent = level * 20; + + if (item.isFile) { + const file = item.file; + const isImage = isImageFile && isImageFile(file.file_name); + const sizeMB = (file.file_size / 1024 / 1024).toFixed(2); + const isArchive = file.file_name.endsWith('.tar.gz') || file.file_name.endsWith('.zip'); + + return ( +
{file.file_name}
-- {(file.file_size / 1024 / 1024).toFixed(2)} MB -
-{file.file_name}
-- {(file.file_size / 1024 / 1024).toFixed(2)} MB -
-