Enhance logging and context handling in job management. Introduce a logger initialization with configurable parameters in the manager and runner commands. Update job context handling to use tar files instead of tar.gz, and implement ETag generation for improved caching. Refactor API endpoints to support new context file structure and enhance error handling in job submissions. Add support for unhide objects and auto-execution options in job creation requests.
This commit is contained in:
1755
internal/api/jobs.go
1755
internal/api/jobs.go
File diff suppressed because it is too large
Load Diff
@@ -4,8 +4,8 @@ import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"jiggablend/pkg/scripts"
|
||||
"jiggablend/pkg/types"
|
||||
)
|
||||
|
||||
@@ -169,22 +170,26 @@ func (s *Server) handleGetJobMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
// extractMetadataFromContext extracts metadata from the blend file in a context archive
|
||||
// Returns the extracted metadata or an error
|
||||
func (s *Server) extractMetadataFromContext(jobID int64) (*types.BlendMetadata, error) {
|
||||
contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar.gz")
|
||||
|
||||
contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar")
|
||||
|
||||
// Check if context exists
|
||||
if _, err := os.Stat(contextPath); err != nil {
|
||||
return nil, fmt.Errorf("context archive not found: %w", err)
|
||||
}
|
||||
|
||||
// Create temporary directory for extraction
|
||||
tmpDir, err := os.MkdirTemp("", fmt.Sprintf("fuego-metadata-%d-*", jobID))
|
||||
// Create temporary directory for extraction under storage base path
|
||||
tmpDir, err := s.storage.TempDir(fmt.Sprintf("jiggablend-metadata-%d-*", jobID))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
defer func() {
|
||||
if err := os.RemoveAll(tmpDir); err != nil {
|
||||
log.Printf("Warning: Failed to clean up temp directory %s: %v", tmpDir, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Extract context archive
|
||||
if err := s.extractTarGz(contextPath, tmpDir); err != nil {
|
||||
if err := s.extractTar(contextPath, tmpDir); err != nil {
|
||||
return nil, fmt.Errorf("failed to extract context: %w", err)
|
||||
}
|
||||
|
||||
@@ -228,188 +233,20 @@ func (s *Server) extractMetadataFromContext(jobID int64) (*types.BlendMetadata,
|
||||
return nil, fmt.Errorf("no .blend file found in context")
|
||||
}
|
||||
|
||||
// Create Python script to extract metadata
|
||||
// Use embedded Python script
|
||||
scriptPath := filepath.Join(tmpDir, "extract_metadata.py")
|
||||
scriptContent := `import bpy
|
||||
import json
|
||||
import sys
|
||||
|
||||
# Make all file paths relative to the blend file location FIRST
|
||||
# This must be done immediately after file load, before any other operations
|
||||
# to prevent Blender from trying to access external files with absolute paths
|
||||
try:
|
||||
bpy.ops.file.make_paths_relative()
|
||||
print("Made all file paths relative to blend file")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not make paths relative: {e}")
|
||||
|
||||
# Check for missing addons that the blend file requires
|
||||
# Blender marks missing addons with "_missing" suffix in preferences
|
||||
missing_files_info = {
|
||||
"checked": False,
|
||||
"has_missing": False,
|
||||
"missing_files": [],
|
||||
"missing_addons": []
|
||||
}
|
||||
|
||||
try:
|
||||
missing = []
|
||||
for mod in bpy.context.preferences.addons:
|
||||
if mod.module.endswith("_missing"):
|
||||
missing.append(mod.module.rsplit("_", 1)[0])
|
||||
|
||||
missing_files_info["checked"] = True
|
||||
if missing:
|
||||
missing_files_info["has_missing"] = True
|
||||
missing_files_info["missing_addons"] = missing
|
||||
print("Missing add-ons required by this .blend:")
|
||||
for name in missing:
|
||||
print(" -", name)
|
||||
else:
|
||||
print("No missing add-ons detected – file is headless-safe")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not check for missing addons: {e}")
|
||||
missing_files_info["error"] = str(e)
|
||||
|
||||
# Get scene
|
||||
scene = bpy.context.scene
|
||||
|
||||
# Extract frame range from scene settings
|
||||
frame_start = scene.frame_start
|
||||
frame_end = scene.frame_end
|
||||
|
||||
# Also check for actual animation range (keyframes)
|
||||
# Find the earliest and latest keyframes across all objects
|
||||
animation_start = None
|
||||
animation_end = None
|
||||
|
||||
for obj in scene.objects:
|
||||
if obj.animation_data and obj.animation_data.action:
|
||||
action = obj.animation_data.action
|
||||
if action.fcurves:
|
||||
for fcurve in action.fcurves:
|
||||
if fcurve.keyframe_points:
|
||||
for keyframe in fcurve.keyframe_points:
|
||||
frame = int(keyframe.co[0])
|
||||
if animation_start is None or frame < animation_start:
|
||||
animation_start = frame
|
||||
if animation_end is None or frame > animation_end:
|
||||
animation_end = frame
|
||||
|
||||
# Use animation range if available, otherwise use scene frame range
|
||||
# If scene range seems wrong (start == end), prefer animation range
|
||||
if animation_start is not None and animation_end is not None:
|
||||
if frame_start == frame_end or (animation_start < frame_start or animation_end > frame_end):
|
||||
# Use animation range if scene range is invalid or animation extends beyond it
|
||||
frame_start = animation_start
|
||||
frame_end = animation_end
|
||||
|
||||
# Extract render settings
|
||||
render = scene.render
|
||||
resolution_x = render.resolution_x
|
||||
resolution_y = render.resolution_y
|
||||
engine = scene.render.engine.upper()
|
||||
|
||||
# Determine output format from file format
|
||||
output_format = render.image_settings.file_format
|
||||
|
||||
# Extract engine-specific settings
|
||||
engine_settings = {}
|
||||
|
||||
if engine == 'CYCLES':
|
||||
cycles = scene.cycles
|
||||
engine_settings = {
|
||||
"samples": getattr(cycles, 'samples', 128),
|
||||
"use_denoising": getattr(cycles, 'use_denoising', False),
|
||||
"denoising_radius": getattr(cycles, 'denoising_radius', 0),
|
||||
"denoising_strength": getattr(cycles, 'denoising_strength', 0.0),
|
||||
"device": getattr(cycles, 'device', 'CPU'),
|
||||
"use_adaptive_sampling": getattr(cycles, 'use_adaptive_sampling', False),
|
||||
"adaptive_threshold": getattr(cycles, 'adaptive_threshold', 0.01) if getattr(cycles, 'use_adaptive_sampling', False) else 0.01,
|
||||
"use_fast_gi": getattr(cycles, 'use_fast_gi', False),
|
||||
"light_tree": getattr(cycles, 'use_light_tree', False),
|
||||
"use_light_linking": getattr(cycles, 'use_light_linking', False),
|
||||
"caustics_reflective": getattr(cycles, 'caustics_reflective', False),
|
||||
"caustics_refractive": getattr(cycles, 'caustics_refractive', False),
|
||||
"blur_glossy": getattr(cycles, 'blur_glossy', 0.0),
|
||||
"max_bounces": getattr(cycles, 'max_bounces', 12),
|
||||
"diffuse_bounces": getattr(cycles, 'diffuse_bounces', 4),
|
||||
"glossy_bounces": getattr(cycles, 'glossy_bounces', 4),
|
||||
"transmission_bounces": getattr(cycles, 'transmission_bounces', 12),
|
||||
"volume_bounces": getattr(cycles, 'volume_bounces', 0),
|
||||
"transparent_max_bounces": getattr(cycles, 'transparent_max_bounces', 8),
|
||||
"film_transparent": getattr(cycles, 'film_transparent', False),
|
||||
"use_layer_samples": getattr(cycles, 'use_layer_samples', False),
|
||||
}
|
||||
elif engine == 'EEVEE' or engine == 'EEVEE_NEXT':
|
||||
eevee = scene.eevee
|
||||
engine_settings = {
|
||||
"taa_render_samples": getattr(eevee, 'taa_render_samples', 64),
|
||||
"use_bloom": getattr(eevee, 'use_bloom', False),
|
||||
"bloom_threshold": getattr(eevee, 'bloom_threshold', 0.8),
|
||||
"bloom_intensity": getattr(eevee, 'bloom_intensity', 0.05),
|
||||
"bloom_radius": getattr(eevee, 'bloom_radius', 6.5),
|
||||
"use_ssr": getattr(eevee, 'use_ssr', True),
|
||||
"use_ssr_refraction": getattr(eevee, 'use_ssr_refraction', False),
|
||||
"ssr_quality": getattr(eevee, 'ssr_quality', 'MEDIUM'),
|
||||
"use_ssao": getattr(eevee, 'use_ssao', True),
|
||||
"ssao_quality": getattr(eevee, 'ssao_quality', 'MEDIUM'),
|
||||
"ssao_distance": getattr(eevee, 'ssao_distance', 0.2),
|
||||
"ssao_factor": getattr(eevee, 'ssao_factor', 1.0),
|
||||
"use_soft_shadows": getattr(eevee, 'use_soft_shadows', True),
|
||||
"use_shadow_high_bitdepth": getattr(eevee, 'use_shadow_high_bitdepth', True),
|
||||
"use_volumetric": getattr(eevee, 'use_volumetric', False),
|
||||
"volumetric_tile_size": getattr(eevee, 'volumetric_tile_size', '8'),
|
||||
"volumetric_samples": getattr(eevee, 'volumetric_samples', 64),
|
||||
"volumetric_start": getattr(eevee, 'volumetric_start', 0.0),
|
||||
"volumetric_end": getattr(eevee, 'volumetric_end', 100.0),
|
||||
"use_volumetric_lights": getattr(eevee, 'use_volumetric_lights', True),
|
||||
"use_volumetric_shadows": getattr(eevee, 'use_volumetric_shadows', True),
|
||||
"use_gtao": getattr(eevee, 'use_gtao', False),
|
||||
"gtao_quality": getattr(eevee, 'gtao_quality', 'MEDIUM'),
|
||||
"use_overscan": getattr(eevee, 'use_overscan', False),
|
||||
}
|
||||
else:
|
||||
# For other engines, extract basic samples if available
|
||||
engine_settings = {
|
||||
"samples": getattr(scene, 'samples', 128) if hasattr(scene, 'samples') else 128
|
||||
}
|
||||
|
||||
# Extract scene info
|
||||
camera_count = len([obj for obj in scene.objects if obj.type == 'CAMERA'])
|
||||
object_count = len(scene.objects)
|
||||
material_count = len(bpy.data.materials)
|
||||
|
||||
# Build metadata dictionary
|
||||
metadata = {
|
||||
"frame_start": frame_start,
|
||||
"frame_end": frame_end,
|
||||
"render_settings": {
|
||||
"resolution_x": resolution_x,
|
||||
"resolution_y": resolution_y,
|
||||
"output_format": output_format,
|
||||
"engine": engine.lower(),
|
||||
"engine_settings": engine_settings
|
||||
},
|
||||
"scene_info": {
|
||||
"camera_count": camera_count,
|
||||
"object_count": object_count,
|
||||
"material_count": material_count
|
||||
},
|
||||
"missing_files_info": missing_files_info
|
||||
}
|
||||
|
||||
# Output as JSON
|
||||
print(json.dumps(metadata))
|
||||
sys.stdout.flush()
|
||||
`
|
||||
|
||||
if err := os.WriteFile(scriptPath, []byte(scriptContent), 0644); err != nil {
|
||||
if err := os.WriteFile(scriptPath, []byte(scripts.ExtractMetadata), 0644); err != nil {
|
||||
return nil, fmt.Errorf("failed to create extraction script: %w", err)
|
||||
}
|
||||
|
||||
// Make blend file path relative to tmpDir to avoid path resolution issues
|
||||
blendFileRel, err := filepath.Rel(tmpDir, blendFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get relative path for blend file: %w", err)
|
||||
}
|
||||
|
||||
// Execute Blender with Python script
|
||||
cmd := exec.Command("blender", "-b", blendFile, "--python", scriptPath)
|
||||
cmd := exec.Command("blender", "-b", blendFileRel, "--python", "extract_metadata.py")
|
||||
cmd.Dir = tmpDir
|
||||
|
||||
// Capture stdout and stderr
|
||||
@@ -443,14 +280,16 @@ sys.stdout.flush()
|
||||
}
|
||||
}()
|
||||
|
||||
// Stream stderr (discard for now, but could log if needed)
|
||||
// Capture stderr for error reporting
|
||||
var stderrBuffer bytes.Buffer
|
||||
stderrDone := make(chan bool)
|
||||
go func() {
|
||||
defer close(stderrDone)
|
||||
scanner := bufio.NewScanner(stderrPipe)
|
||||
for scanner.Scan() {
|
||||
// Could log stderr if needed
|
||||
_ = scanner.Text()
|
||||
line := scanner.Text()
|
||||
stderrBuffer.WriteString(line)
|
||||
stderrBuffer.WriteString("\n")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -462,6 +301,18 @@ sys.stdout.flush()
|
||||
<-stderrDone
|
||||
|
||||
if err != nil {
|
||||
stderrOutput := strings.TrimSpace(stderrBuffer.String())
|
||||
stdoutOutput := strings.TrimSpace(stdoutBuffer.String())
|
||||
log.Printf("Blender metadata extraction failed for job %d:", jobID)
|
||||
if stderrOutput != "" {
|
||||
log.Printf("Blender stderr: %s", stderrOutput)
|
||||
}
|
||||
if stdoutOutput != "" {
|
||||
log.Printf("Blender stdout (last 500 chars): %s", truncateString(stdoutOutput, 500))
|
||||
}
|
||||
if stderrOutput != "" {
|
||||
return nil, fmt.Errorf("blender metadata extraction failed: %w (stderr: %s)", err, truncateString(stderrOutput, 200))
|
||||
}
|
||||
return nil, fmt.Errorf("blender metadata extraction failed: %w", err)
|
||||
}
|
||||
|
||||
@@ -484,21 +335,25 @@ sys.stdout.flush()
|
||||
return &metadata, nil
|
||||
}
|
||||
|
||||
// extractTarGz extracts a tar.gz archive to a destination directory
|
||||
func (s *Server) extractTarGz(tarGzPath, destDir string) error {
|
||||
file, err := os.Open(tarGzPath)
|
||||
// extractTar extracts a tar archive to a destination directory
|
||||
func (s *Server) extractTar(tarPath, destDir string) error {
|
||||
log.Printf("Extracting tar archive: %s -> %s", tarPath, destDir)
|
||||
|
||||
// Ensure destination directory exists
|
||||
if err := os.MkdirAll(destDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create destination directory: %w", err)
|
||||
}
|
||||
|
||||
file, err := os.Open(tarPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open archive: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create gzip reader: %w", err)
|
||||
}
|
||||
defer gzr.Close()
|
||||
tr := tar.NewReader(file)
|
||||
|
||||
tr := tar.NewReader(gzr)
|
||||
fileCount := 0
|
||||
dirCount := 0
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
@@ -511,9 +366,13 @@ func (s *Server) extractTarGz(tarGzPath, destDir string) error {
|
||||
|
||||
// Sanitize path to prevent directory traversal
|
||||
target := filepath.Join(destDir, header.Name)
|
||||
|
||||
// Ensure target is within destDir
|
||||
if !strings.HasPrefix(filepath.Clean(target), filepath.Clean(destDir)+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("invalid file path in archive: %s", header.Name)
|
||||
cleanTarget := filepath.Clean(target)
|
||||
cleanDestDir := filepath.Clean(destDir)
|
||||
if !strings.HasPrefix(cleanTarget, cleanDestDir+string(os.PathSeparator)) && cleanTarget != cleanDestDir {
|
||||
log.Printf("ERROR: Invalid file path in TAR - target: %s, destDir: %s", cleanTarget, cleanDestDir)
|
||||
return fmt.Errorf("invalid file path in archive: %s (target: %s, destDir: %s)", header.Name, cleanTarget, cleanDestDir)
|
||||
}
|
||||
|
||||
// Create parent directories
|
||||
@@ -527,14 +386,18 @@ func (s *Server) extractTarGz(tarGzPath, destDir string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create file: %w", err)
|
||||
}
|
||||
if _, err := io.Copy(outFile, tr); err != nil {
|
||||
_, err = io.Copy(outFile, tr)
|
||||
if err != nil {
|
||||
outFile.Close()
|
||||
return fmt.Errorf("failed to write file: %w", err)
|
||||
}
|
||||
outFile.Close()
|
||||
fileCount++
|
||||
} else if header.Typeflag == tar.TypeDir {
|
||||
dirCount++
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Extraction complete: %d files, %d directories extracted to %s", fileCount, dirCount, destDir)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
|
||||
"jiggablend/pkg/types"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
@@ -287,13 +289,27 @@ func (s *Server) handleUpdateTaskStep(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// Get job ID for broadcasting
|
||||
var jobID int64
|
||||
err = s.db.QueryRow("SELECT job_id FROM tasks WHERE id = ?", taskID).Scan(&jobID)
|
||||
if err == nil {
|
||||
// Broadcast step update to frontend
|
||||
s.broadcastTaskUpdate(jobID, taskID, "step_update", map[string]interface{}{
|
||||
"step_id": stepID,
|
||||
"step_name": req.StepName,
|
||||
"status": req.Status,
|
||||
"duration_ms": req.DurationMs,
|
||||
"error_message": req.ErrorMessage,
|
||||
})
|
||||
}
|
||||
|
||||
s.respondJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"step_id": stepID,
|
||||
"message": "Step updated successfully",
|
||||
})
|
||||
}
|
||||
|
||||
// handleDownloadJobContext allows runners to download the job context tar.gz
|
||||
// handleDownloadJobContext allows runners to download the job context tar
|
||||
func (s *Server) handleDownloadJobContext(w http.ResponseWriter, r *http.Request) {
|
||||
jobID, err := parseID(r, "jobId")
|
||||
if err != nil {
|
||||
@@ -302,7 +318,7 @@ func (s *Server) handleDownloadJobContext(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
// Construct the context file path
|
||||
contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar.gz")
|
||||
contextPath := filepath.Join(s.storage.JobPath(jobID), "context.tar")
|
||||
|
||||
// Check if context file exists
|
||||
if !s.storage.FileExists(contextPath) {
|
||||
@@ -319,9 +335,9 @@ func (s *Server) handleDownloadJobContext(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Set appropriate headers for tar.gz file
|
||||
w.Header().Set("Content-Type", "application/gzip")
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=context.tar.gz")
|
||||
// Set appropriate headers for tar file
|
||||
w.Header().Set("Content-Type", "application/x-tar")
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=context.tar")
|
||||
|
||||
// Stream the file to the response
|
||||
io.Copy(w, file)
|
||||
@@ -356,16 +372,26 @@ func (s *Server) handleUploadFileFromRunner(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
// Record in database
|
||||
_, err = s.db.Exec(
|
||||
var fileID int64
|
||||
err = s.db.QueryRow(
|
||||
`INSERT INTO job_files (job_id, file_type, file_path, file_name, file_size)
|
||||
VALUES (?, ?, ?, ?, ?)`,
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
RETURNING id`,
|
||||
jobID, types.JobFileTypeOutput, filePath, header.Filename, header.Size,
|
||||
)
|
||||
).Scan(&fileID)
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to record file: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Broadcast file addition
|
||||
s.broadcastJobUpdate(jobID, "file_added", map[string]interface{}{
|
||||
"file_id": fileID,
|
||||
"file_type": types.JobFileTypeOutput,
|
||||
"file_name": header.Filename,
|
||||
"file_size": header.Size,
|
||||
})
|
||||
|
||||
s.respondJSON(w, http.StatusCreated, map[string]interface{}{
|
||||
"file_path": filePath,
|
||||
"file_name": header.Filename,
|
||||
@@ -510,6 +536,79 @@ func (s *Server) handleGetJobMetadataForRunner(w http.ResponseWriter, r *http.Re
|
||||
s.respondJSON(w, http.StatusOK, metadata)
|
||||
}
|
||||
|
||||
// handleDownloadFileForRunner allows runners to download a file by fileName
|
||||
func (s *Server) handleDownloadFileForRunner(w http.ResponseWriter, r *http.Request) {
|
||||
jobID, err := parseID(r, "jobId")
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusBadRequest, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Get fileName from URL path (may need URL decoding)
|
||||
fileName := chi.URLParam(r, "fileName")
|
||||
if fileName == "" {
|
||||
s.respondError(w, http.StatusBadRequest, "fileName is required")
|
||||
return
|
||||
}
|
||||
|
||||
// URL decode the fileName in case it contains encoded characters
|
||||
decodedFileName, err := url.QueryUnescape(fileName)
|
||||
if err != nil {
|
||||
// If decoding fails, use original fileName
|
||||
decodedFileName = fileName
|
||||
}
|
||||
|
||||
// Get file info from database
|
||||
var filePath string
|
||||
err = s.db.QueryRow(
|
||||
`SELECT file_path FROM job_files WHERE job_id = ? AND file_name = ?`,
|
||||
jobID, decodedFileName,
|
||||
).Scan(&filePath)
|
||||
if err == sql.ErrNoRows {
|
||||
s.respondError(w, http.StatusNotFound, "File not found")
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to query file: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Open file
|
||||
file, err := s.storage.GetFile(filePath)
|
||||
if err != nil {
|
||||
s.respondError(w, http.StatusNotFound, "File not found on disk")
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Determine content type based on file extension
|
||||
contentType := "application/octet-stream"
|
||||
fileNameLower := strings.ToLower(decodedFileName)
|
||||
switch {
|
||||
case strings.HasSuffix(fileNameLower, ".png"):
|
||||
contentType = "image/png"
|
||||
case strings.HasSuffix(fileNameLower, ".jpg") || strings.HasSuffix(fileNameLower, ".jpeg"):
|
||||
contentType = "image/jpeg"
|
||||
case strings.HasSuffix(fileNameLower, ".gif"):
|
||||
contentType = "image/gif"
|
||||
case strings.HasSuffix(fileNameLower, ".webp"):
|
||||
contentType = "image/webp"
|
||||
case strings.HasSuffix(fileNameLower, ".exr") || strings.HasSuffix(fileNameLower, ".EXR"):
|
||||
contentType = "image/x-exr"
|
||||
case strings.HasSuffix(fileNameLower, ".mp4"):
|
||||
contentType = "video/mp4"
|
||||
case strings.HasSuffix(fileNameLower, ".webm"):
|
||||
contentType = "video/webm"
|
||||
}
|
||||
|
||||
// Set headers
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", decodedFileName))
|
||||
|
||||
// Stream file
|
||||
io.Copy(w, file)
|
||||
}
|
||||
|
||||
// WebSocket message types
|
||||
type WSMessage struct {
|
||||
Type string `json:"type"`
|
||||
@@ -785,6 +884,13 @@ func (s *Server) handleWebSocketTaskComplete(runnerID int64, taskUpdate WSTaskUp
|
||||
taskUpdate.TaskID,
|
||||
).Scan(&jobID)
|
||||
if err == nil {
|
||||
// Broadcast task update
|
||||
s.broadcastTaskUpdate(jobID, taskUpdate.TaskID, "task_update", map[string]interface{}{
|
||||
"status": status,
|
||||
"output_path": taskUpdate.OutputPath,
|
||||
"completed_at": now,
|
||||
"error": taskUpdate.Error,
|
||||
})
|
||||
s.updateJobStatusFromTasks(jobID)
|
||||
}
|
||||
}
|
||||
@@ -840,6 +946,7 @@ func (s *Server) getCurrentFrameFromLogs(jobID int64) (int, bool) {
|
||||
for rows.Next() {
|
||||
var taskID int64
|
||||
if err := rows.Scan(&taskID); err != nil {
|
||||
log.Printf("Failed to scan task ID in getCurrentFrameFromLogs: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -895,6 +1002,14 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
allowParallelRunners.Valid && !allowParallelRunners.Bool &&
|
||||
frameStart.Valid && frameEnd.Valid
|
||||
|
||||
// Get current job status to detect changes
|
||||
var currentStatus string
|
||||
err = s.db.QueryRow(`SELECT status FROM jobs WHERE id = ?`, jobID).Scan(¤tStatus)
|
||||
if err != nil {
|
||||
log.Printf("Failed to get current job status for job %d: %v", jobID, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Count total tasks and completed tasks
|
||||
var totalTasks, completedTasks int
|
||||
err = s.db.QueryRow(
|
||||
@@ -914,8 +1029,6 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("updateJobStatusFromTasks: job %d - total: %d, completed: %d", jobID, totalTasks, completedTasks)
|
||||
|
||||
// Calculate progress
|
||||
var progress float64
|
||||
if totalTasks == 0 {
|
||||
@@ -985,9 +1098,6 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
} else {
|
||||
progress = renderProgress
|
||||
}
|
||||
|
||||
log.Printf("updateJobStatusFromTasks: job %d - frame-based progress: current_frame=%d, render_progress=%.1f%%, non_render_progress=%.1f%%, total_progress=%.1f%%",
|
||||
jobID, currentFrame, renderProgress, nonRenderProgress, progress)
|
||||
} else {
|
||||
// Standard task-based progress
|
||||
progress = float64(completedTasks) / float64(totalTasks) * 100.0
|
||||
@@ -1013,8 +1123,6 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("updateJobStatusFromTasks: job %d - pending/running: %d", jobID, pendingOrRunningTasks)
|
||||
|
||||
if pendingOrRunningTasks == 0 && totalTasks > 0 {
|
||||
// All tasks are either completed or failed/cancelled
|
||||
// Check if any tasks failed
|
||||
@@ -1039,7 +1147,16 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
if err != nil {
|
||||
log.Printf("Failed to update job %d status to %s: %v", jobID, jobStatus, err)
|
||||
} else {
|
||||
log.Printf("Updated job %d status to %s (progress: %.1f%%, completed tasks: %d/%d)", jobID, jobStatus, progress, completedTasks, totalTasks)
|
||||
// Only log if status actually changed
|
||||
if currentStatus != jobStatus {
|
||||
log.Printf("Updated job %d status from %s to %s (progress: %.1f%%, completed tasks: %d/%d)", jobID, currentStatus, jobStatus, progress, completedTasks, totalTasks)
|
||||
}
|
||||
// Broadcast job update via WebSocket
|
||||
s.broadcastJobUpdate(jobID, "job_update", map[string]interface{}{
|
||||
"status": jobStatus,
|
||||
"progress": progress,
|
||||
"completed_at": now,
|
||||
})
|
||||
}
|
||||
|
||||
if outputFormatStr == "EXR_264_MP4" || outputFormatStr == "EXR_AV1_MP4" {
|
||||
@@ -1054,14 +1171,22 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
// Create a video generation task instead of calling generateMP4Video directly
|
||||
// This prevents race conditions when multiple runners complete frames simultaneously
|
||||
videoTaskTimeout := 86400 // 24 hours for video generation
|
||||
_, err := s.db.Exec(
|
||||
var videoTaskID int64
|
||||
err := s.db.QueryRow(
|
||||
`INSERT INTO tasks (job_id, frame_start, frame_end, task_type, status, timeout_seconds, max_retries)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)`,
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
RETURNING id`,
|
||||
jobID, 0, 0, types.TaskTypeVideoGeneration, types.TaskStatusPending, videoTaskTimeout, 1,
|
||||
)
|
||||
).Scan(&videoTaskID)
|
||||
if err != nil {
|
||||
log.Printf("Failed to create video generation task for job %d: %v", jobID, err)
|
||||
} else {
|
||||
// Broadcast that a new task was added
|
||||
log.Printf("Broadcasting task_added for job %d: video generation task %d", jobID, videoTaskID)
|
||||
s.broadcastTaskUpdate(jobID, videoTaskID, "task_added", map[string]interface{}{
|
||||
"task_id": videoTaskID,
|
||||
"task_type": types.TaskTypeVideoGeneration,
|
||||
})
|
||||
// Update job status to ensure it's marked as running (has pending video task)
|
||||
s.updateJobStatusFromTasks(jobID)
|
||||
// Try to distribute the task immediately
|
||||
@@ -1099,7 +1224,10 @@ func (s *Server) updateJobStatusFromTasks(jobID int64) {
|
||||
if err != nil {
|
||||
log.Printf("Failed to update job %d status to %s: %v", jobID, jobStatus, err)
|
||||
} else {
|
||||
log.Printf("Updated job %d status to %s (progress: %.1f%%, completed: %d/%d, pending: %d, running: %d)", jobID, jobStatus, progress, completedTasks, totalTasks, pendingOrRunningTasks-runningTasks, runningTasks)
|
||||
// Only log if status actually changed
|
||||
if currentStatus != jobStatus {
|
||||
log.Printf("Updated job %d status from %s to %s (progress: %.1f%%, completed: %d/%d, pending: %d, running: %d)", jobID, currentStatus, jobStatus, progress, completedTasks, totalTasks, pendingOrRunningTasks-runningTasks, runningTasks)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1224,7 +1352,6 @@ func (s *Server) distributeTasksToRunners() {
|
||||
t.AllowParallelRunners = true
|
||||
}
|
||||
pendingTasks = append(pendingTasks, t)
|
||||
log.Printf("Found pending task %d (type: %s, job: %d '%s', status: %s)", t.TaskID, t.TaskType, t.JobID, t.JobName, t.JobStatus)
|
||||
}
|
||||
|
||||
if len(pendingTasks) == 0 {
|
||||
@@ -1308,11 +1435,6 @@ func (s *Server) distributeTasksToRunners() {
|
||||
}
|
||||
log.Printf("Distributing %d pending tasks (%v) to %d connected runners: %v", len(pendingTasks), taskTypes, len(connectedRunners), connectedRunners)
|
||||
|
||||
// Log each pending task for debugging
|
||||
for _, task := range pendingTasks {
|
||||
log.Printf(" - Task %d (type: %s, job: %d '%s', status: %s)", task.TaskID, task.TaskType, task.JobID, task.JobName, task.JobStatus)
|
||||
}
|
||||
|
||||
// Distribute tasks to runners
|
||||
// Sort tasks to prioritize metadata tasks
|
||||
sort.Slice(pendingTasks, func(i, j int) bool {
|
||||
@@ -1572,6 +1694,13 @@ func (s *Server) distributeTasksToRunners() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Broadcast task assignment
|
||||
s.broadcastTaskUpdate(task.JobID, task.TaskID, "task_update", map[string]interface{}{
|
||||
"status": types.TaskStatusRunning,
|
||||
"runner_id": selectedRunnerID,
|
||||
"started_at": now,
|
||||
})
|
||||
|
||||
// Task was successfully assigned, send via WebSocket
|
||||
log.Printf("Assigned task %d (type: %s, job: %d) to runner %d", task.TaskID, task.TaskType, task.JobID, selectedRunnerID)
|
||||
|
||||
@@ -1642,6 +1771,8 @@ func (s *Server) assignTaskToRunner(runnerID int64, taskID int64) error {
|
||||
var filePath string
|
||||
if err := rows.Scan(&filePath); err == nil {
|
||||
task.InputFiles = append(task.InputFiles, filePath)
|
||||
} else {
|
||||
log.Printf("Failed to scan input file path for task %d: %v", taskID, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -38,6 +43,15 @@ type Server struct {
|
||||
// Mutexes for each frontend connection to serialize writes
|
||||
frontendConnsWriteMu map[string]*sync.Mutex // key: "jobId:taskId"
|
||||
frontendConnsWriteMuMu sync.RWMutex
|
||||
// Job list WebSocket connections (key: userID)
|
||||
jobListConns map[int64]*websocket.Conn
|
||||
jobListConnsMu sync.RWMutex
|
||||
// Single job WebSocket connections (key: "userId:jobId")
|
||||
jobConns map[string]*websocket.Conn
|
||||
jobConnsMu sync.RWMutex
|
||||
// Mutexes for job WebSocket connections
|
||||
jobConnsWriteMu map[string]*sync.Mutex
|
||||
jobConnsWriteMuMu sync.RWMutex
|
||||
// Throttling for progress updates (per job)
|
||||
progressUpdateTimes map[int64]time.Time // key: jobID
|
||||
progressUpdateTimesMu sync.RWMutex
|
||||
@@ -66,6 +80,9 @@ func NewServer(db *database.DB, auth *authpkg.Auth, storage *storage.Storage) (*
|
||||
runnerConns: make(map[int64]*websocket.Conn),
|
||||
frontendConns: make(map[string]*websocket.Conn),
|
||||
frontendConnsWriteMu: make(map[string]*sync.Mutex),
|
||||
jobListConns: make(map[int64]*websocket.Conn),
|
||||
jobConns: make(map[string]*websocket.Conn),
|
||||
jobConnsWriteMu: make(map[string]*sync.Mutex),
|
||||
progressUpdateTimes: make(map[int64]time.Time),
|
||||
}
|
||||
|
||||
@@ -83,16 +100,62 @@ func (s *Server) setupMiddleware() {
|
||||
// Note: Timeout middleware is NOT applied globally to avoid conflicts with WebSocket connections
|
||||
// WebSocket connections are long-lived and should not have HTTP timeouts
|
||||
|
||||
// Add gzip compression for JSON responses
|
||||
s.router.Use(gzipMiddleware)
|
||||
|
||||
s.router.Use(cors.Handler(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "Range"},
|
||||
ExposedHeaders: []string{"Link", "Content-Range", "Accept-Ranges", "Content-Length"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "Range", "If-None-Match"},
|
||||
ExposedHeaders: []string{"Link", "Content-Range", "Accept-Ranges", "Content-Length", "ETag"},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 300,
|
||||
}))
|
||||
}
|
||||
|
||||
// gzipMiddleware compresses responses with gzip if client supports it
|
||||
func gzipMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Skip compression for WebSocket upgrades
|
||||
if strings.ToLower(r.Header.Get("Upgrade")) == "websocket" {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if client accepts gzip
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Create gzip writer
|
||||
gz := gzip.NewWriter(w)
|
||||
defer gz.Close()
|
||||
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
w.Header().Set("Vary", "Accept-Encoding")
|
||||
|
||||
// Wrap response writer
|
||||
gzw := &gzipResponseWriter{Writer: gz, ResponseWriter: w}
|
||||
next.ServeHTTP(gzw, r)
|
||||
})
|
||||
}
|
||||
|
||||
// gzipResponseWriter wraps http.ResponseWriter to add gzip compression
|
||||
type gzipResponseWriter struct {
|
||||
io.Writer
|
||||
http.ResponseWriter
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
return w.Writer.Write(b)
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) WriteHeader(statusCode int) {
|
||||
// Don't set Content-Length when using gzip - it will be set automatically
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
// setupRoutes configures routes
|
||||
func (s *Server) setupRoutes() {
|
||||
// Public routes
|
||||
@@ -118,16 +181,21 @@ func (s *Server) setupRoutes() {
|
||||
r.Post("/", s.handleCreateJob)
|
||||
r.Post("/upload", s.handleUploadFileForJobCreation) // Upload before job creation
|
||||
r.Get("/", s.handleListJobs)
|
||||
r.Get("/summary", s.handleListJobsSummary)
|
||||
r.Post("/batch", s.handleBatchGetJobs)
|
||||
r.Get("/{id}", s.handleGetJob)
|
||||
r.Delete("/{id}", s.handleCancelJob)
|
||||
r.Post("/{id}/delete", s.handleDeleteJob)
|
||||
r.Post("/{id}/upload", s.handleUploadJobFile)
|
||||
r.Get("/{id}/files", s.handleListJobFiles)
|
||||
r.Get("/{id}/files/count", s.handleGetJobFilesCount)
|
||||
r.Get("/{id}/context", s.handleListContextArchive)
|
||||
r.Get("/{id}/files/{fileId}/download", s.handleDownloadJobFile)
|
||||
r.Get("/{id}/video", s.handleStreamVideo)
|
||||
r.Get("/{id}/metadata", s.handleGetJobMetadata)
|
||||
r.Get("/{id}/tasks", s.handleListJobTasks)
|
||||
r.Get("/{id}/tasks/summary", s.handleListJobTasksSummary)
|
||||
r.Post("/{id}/tasks/batch", s.handleBatchGetTasks)
|
||||
r.Get("/{id}/tasks/{taskId}/logs", s.handleGetTaskLogs)
|
||||
// WebSocket route - no timeout middleware (long-lived connection)
|
||||
r.With(func(next http.Handler) http.Handler {
|
||||
@@ -138,6 +206,19 @@ func (s *Server) setupRoutes() {
|
||||
}).Get("/{id}/tasks/{taskId}/logs/ws", s.handleStreamTaskLogsWebSocket)
|
||||
r.Get("/{id}/tasks/{taskId}/steps", s.handleGetTaskSteps)
|
||||
r.Post("/{id}/tasks/{taskId}/retry", s.handleRetryTask)
|
||||
// WebSocket routes for real-time updates
|
||||
r.With(func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Remove timeout middleware for WebSocket
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}).Get("/ws", s.handleJobsWebSocket)
|
||||
r.With(func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Remove timeout middleware for WebSocket
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}).Get("/{id}/ws", s.handleJobWebSocket)
|
||||
})
|
||||
|
||||
// Admin routes
|
||||
@@ -181,7 +262,8 @@ func (s *Server) setupRoutes() {
|
||||
})
|
||||
r.Post("/tasks/{id}/progress", s.handleUpdateTaskProgress)
|
||||
r.Post("/tasks/{id}/steps", s.handleUpdateTaskStep)
|
||||
r.Get("/jobs/{jobId}/context.tar.gz", s.handleDownloadJobContext)
|
||||
r.Get("/jobs/{jobId}/context.tar", s.handleDownloadJobContext)
|
||||
r.Get("/files/{jobId}/{fileName}", s.handleDownloadFileForRunner)
|
||||
r.Post("/files/{jobId}/upload", s.handleUploadFileFromRunner)
|
||||
r.Get("/jobs/{jobId}/status", s.handleGetJobStatusForRunner)
|
||||
r.Get("/jobs/{jobId}/files", s.handleGetJobFilesForRunner)
|
||||
@@ -311,12 +393,14 @@ func (s *Server) handleLogout(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Server) handleGetMe(w http.ResponseWriter, r *http.Request) {
|
||||
cookie, err := r.Cookie("session_id")
|
||||
if err != nil {
|
||||
log.Printf("Authentication failed: missing session cookie in /auth/me")
|
||||
s.respondError(w, http.StatusUnauthorized, "Not authenticated")
|
||||
return
|
||||
}
|
||||
|
||||
session, ok := s.auth.GetSession(cookie.Value)
|
||||
if !ok {
|
||||
log.Printf("Authentication failed: invalid session cookie in /auth/me")
|
||||
s.respondError(w, http.StatusUnauthorized, "Invalid session")
|
||||
return
|
||||
}
|
||||
@@ -410,6 +494,7 @@ func (s *Server) handleLocalLogin(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
session, err := s.auth.LocalLogin(req.Username, req.Password)
|
||||
if err != nil {
|
||||
log.Printf("Authentication failed: invalid credentials for username '%s'", req.Username)
|
||||
s.respondError(w, http.StatusUnauthorized, "Invalid credentials")
|
||||
return
|
||||
}
|
||||
@@ -512,6 +597,7 @@ func parseID(r *http.Request, param string) (int64, error) {
|
||||
func (s *Server) StartBackgroundTasks() {
|
||||
go s.recoverStuckTasks()
|
||||
go s.cleanupOldRenderJobs()
|
||||
go s.cleanupOldTempDirectories()
|
||||
}
|
||||
|
||||
// recoverStuckTasks periodically checks for dead runners and stuck tasks
|
||||
@@ -621,6 +707,7 @@ func (s *Server) recoverTaskTimeouts() {
|
||||
|
||||
err := rows.Scan(&taskID, &runnerID, &retryCount, &maxRetries, &timeoutSeconds, &startedAt)
|
||||
if err != nil {
|
||||
log.Printf("Failed to scan task row in recoverTaskTimeouts: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -659,3 +746,72 @@ func (s *Server) recoverTaskTimeouts() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupOldTempDirectories periodically cleans up old temporary directories
|
||||
func (s *Server) cleanupOldTempDirectories() {
|
||||
// Run cleanup every hour
|
||||
ticker := time.NewTicker(1 * time.Hour)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Run once immediately on startup
|
||||
s.cleanupOldTempDirectoriesOnce()
|
||||
|
||||
for range ticker.C {
|
||||
s.cleanupOldTempDirectoriesOnce()
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupOldTempDirectoriesOnce removes temp directories older than 1 hour
|
||||
func (s *Server) cleanupOldTempDirectoriesOnce() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("Panic in cleanupOldTempDirectories: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
tempPath := filepath.Join(s.storage.BasePath(), "temp")
|
||||
|
||||
// Check if temp directory exists
|
||||
if _, err := os.Stat(tempPath); os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Read all entries in temp directory
|
||||
entries, err := os.ReadDir(tempPath)
|
||||
if err != nil {
|
||||
log.Printf("Failed to read temp directory: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
cleanedCount := 0
|
||||
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
entryPath := filepath.Join(tempPath, entry.Name())
|
||||
|
||||
// Get directory info to check modification time
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove directories older than 1 hour
|
||||
age := now.Sub(info.ModTime())
|
||||
if age > 1*time.Hour {
|
||||
if err := os.RemoveAll(entryPath); err != nil {
|
||||
log.Printf("Warning: Failed to clean up old temp directory %s: %v", entryPath, err)
|
||||
} else {
|
||||
cleanedCount++
|
||||
log.Printf("Cleaned up old temp directory: %s (age: %v)", entryPath, age)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cleanedCount > 0 {
|
||||
log.Printf("Cleaned up %d old temp directories", cleanedCount)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -410,6 +410,7 @@ func (a *Auth) Middleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
cookie, err := r.Cookie("session_id")
|
||||
if err != nil {
|
||||
log.Printf("Authentication failed: missing session cookie for %s %s", r.Method, r.URL.Path)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
json.NewEncoder(w).Encode(map[string]string{"error": "Unauthorized"})
|
||||
@@ -418,6 +419,7 @@ func (a *Auth) Middleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
|
||||
session, ok := a.GetSession(cookie.Value)
|
||||
if !ok {
|
||||
log.Printf("Authentication failed: invalid session cookie for %s %s", r.Method, r.URL.Path)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
json.NewEncoder(w).Encode(map[string]string{"error": "Unauthorized"})
|
||||
@@ -451,6 +453,7 @@ func (a *Auth) AdminMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
// First check authentication
|
||||
cookie, err := r.Cookie("session_id")
|
||||
if err != nil {
|
||||
log.Printf("Admin authentication failed: missing session cookie for %s %s", r.Method, r.URL.Path)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
json.NewEncoder(w).Encode(map[string]string{"error": "Unauthorized"})
|
||||
@@ -459,6 +462,7 @@ func (a *Auth) AdminMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
|
||||
session, ok := a.GetSession(cookie.Value)
|
||||
if !ok {
|
||||
log.Printf("Admin authentication failed: invalid session cookie for %s %s", r.Method, r.URL.Path)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
json.NewEncoder(w).Encode(map[string]string{"error": "Unauthorized"})
|
||||
@@ -467,6 +471,7 @@ func (a *Auth) AdminMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
|
||||
// Then check admin status
|
||||
if !session.IsAdmin {
|
||||
log.Printf("Admin access denied: user %d (email: %s) attempted to access admin endpoint %s %s", session.UserID, session.Email, r.Method, r.URL.Path)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
json.NewEncoder(w).Encode(map[string]string{"error": "Forbidden: Admin access required"})
|
||||
|
||||
@@ -165,14 +165,17 @@ func (db *DB) migrate() error {
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_user_id ON jobs(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_jobs_user_status_created ON jobs(user_id, status, created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_job_id ON tasks(job_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_runner_id ON tasks(runner_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_status ON tasks(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_job_status ON tasks(job_id, status);
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_started_at ON tasks(started_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_job_files_job_id ON job_files(job_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_registration_tokens_token ON registration_tokens(token);
|
||||
CREATE INDEX IF NOT EXISTS idx_registration_tokens_expires_at ON registration_tokens(expires_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_task_logs_task_id_created_at ON task_logs(task_id, created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_task_logs_task_id_id ON task_logs(task_id, id DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_task_logs_runner_id ON task_logs(runner_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_task_steps_task_id ON task_steps(task_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_runners_last_heartbeat ON runners(last_heartbeat);
|
||||
@@ -213,6 +216,9 @@ func (db *DB) migrate() error {
|
||||
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS retry_count INTEGER DEFAULT 0`,
|
||||
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS max_retries INTEGER DEFAULT 3`,
|
||||
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS timeout_seconds INTEGER`,
|
||||
// Add updated_at columns for ETag support
|
||||
`ALTER TABLE jobs ADD COLUMN IF NOT EXISTS updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP`,
|
||||
`ALTER TABLE tasks ADD COLUMN IF NOT EXISTS updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP`,
|
||||
// Migrate file_size from INTEGER to BIGINT to support large files (>2GB)
|
||||
// DuckDB doesn't support direct ALTER COLUMN TYPE, so we use a workaround:
|
||||
// 1. Add new column as BIGINT
|
||||
|
||||
127
internal/logger/logger.go
Normal file
127
internal/logger/logger.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultLogger *Logger
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
// Logger wraps the standard log.Logger with file and stdout output
|
||||
type Logger struct {
|
||||
*log.Logger
|
||||
fileWriter io.WriteCloser
|
||||
}
|
||||
|
||||
// Init initializes the default logger with both file and stdout output
|
||||
func Init(logDir, logFileName string, maxSizeMB int, maxBackups int, maxAgeDays int) error {
|
||||
var err error
|
||||
once.Do(func() {
|
||||
defaultLogger, err = New(logDir, logFileName, maxSizeMB, maxBackups, maxAgeDays)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Replace standard log output with the multi-writer
|
||||
multiWriter := io.MultiWriter(os.Stdout, defaultLogger.fileWriter)
|
||||
log.SetOutput(multiWriter)
|
||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// New creates a new logger that writes to both stdout and a log file
|
||||
func New(logDir, logFileName string, maxSizeMB int, maxBackups int, maxAgeDays int) (*Logger, error) {
|
||||
// Ensure log directory exists
|
||||
if err := os.MkdirAll(logDir, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logPath := filepath.Join(logDir, logFileName)
|
||||
|
||||
// Create file writer with rotation
|
||||
fileWriter := &lumberjack.Logger{
|
||||
Filename: logPath,
|
||||
MaxSize: maxSizeMB, // megabytes
|
||||
MaxBackups: maxBackups, // number of backup files
|
||||
MaxAge: maxAgeDays, // days
|
||||
Compress: true, // compress old log files
|
||||
}
|
||||
|
||||
// Create multi-writer that writes to both stdout and file
|
||||
multiWriter := io.MultiWriter(os.Stdout, fileWriter)
|
||||
|
||||
// Create logger with standard flags
|
||||
logger := log.New(multiWriter, "", log.LstdFlags|log.Lshortfile)
|
||||
|
||||
return &Logger{
|
||||
Logger: logger,
|
||||
fileWriter: fileWriter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the file writer
|
||||
func (l *Logger) Close() error {
|
||||
if l.fileWriter != nil {
|
||||
return l.fileWriter.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDefault returns the default logger instance
|
||||
func GetDefault() *Logger {
|
||||
return defaultLogger
|
||||
}
|
||||
|
||||
// Printf logs a formatted message
|
||||
func Printf(format string, v ...interface{}) {
|
||||
if defaultLogger != nil {
|
||||
defaultLogger.Printf(format, v...)
|
||||
} else {
|
||||
log.Printf(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Print logs a message
|
||||
func Print(v ...interface{}) {
|
||||
if defaultLogger != nil {
|
||||
defaultLogger.Print(v...)
|
||||
} else {
|
||||
log.Print(v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Println logs a message with newline
|
||||
func Println(v ...interface{}) {
|
||||
if defaultLogger != nil {
|
||||
defaultLogger.Println(v...)
|
||||
} else {
|
||||
log.Println(v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Fatal logs a message and exits
|
||||
func Fatal(v ...interface{}) {
|
||||
if defaultLogger != nil {
|
||||
defaultLogger.Fatal(v...)
|
||||
} else {
|
||||
log.Fatal(v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Fatalf logs a formatted message and exits
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
if defaultLogger != nil {
|
||||
defaultLogger.Fatalf(format, v...)
|
||||
} else {
|
||||
log.Fatalf(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"jiggablend/pkg/scripts"
|
||||
"jiggablend/pkg/types"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
@@ -53,18 +54,20 @@ type Client struct {
|
||||
vaapiDevicesMu sync.RWMutex // Protects vaapiDevices
|
||||
allocatedDevices map[int64]string // map[taskID]device - tracks which device is allocated to which task
|
||||
allocatedDevicesMu sync.RWMutex // Protects allocatedDevices
|
||||
longRunningClient *http.Client // HTTP client for long-running operations (no timeout)
|
||||
}
|
||||
|
||||
// NewClient creates a new runner client
|
||||
func NewClient(managerURL, name, hostname, ipAddress string) *Client {
|
||||
return &Client{
|
||||
managerURL: managerURL,
|
||||
name: name,
|
||||
hostname: hostname,
|
||||
ipAddress: ipAddress,
|
||||
httpClient: &http.Client{Timeout: 30 * time.Second},
|
||||
stopChan: make(chan struct{}),
|
||||
stepStartTimes: make(map[string]time.Time),
|
||||
managerURL: managerURL,
|
||||
name: name,
|
||||
hostname: hostname,
|
||||
ipAddress: ipAddress,
|
||||
httpClient: &http.Client{Timeout: 30 * time.Second},
|
||||
longRunningClient: &http.Client{Timeout: 0}, // No timeout for long-running operations (context downloads, file uploads/downloads)
|
||||
stopChan: make(chan struct{}),
|
||||
stepStartTimes: make(map[string]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -465,6 +468,17 @@ func (c *Client) Register(registrationToken string) (int64, string, string, erro
|
||||
// doSignedRequest performs an authenticated HTTP request using shared secret
|
||||
// queryParams is optional and will be appended to the URL
|
||||
func (c *Client) doSignedRequest(method, path string, body []byte, queryParams ...string) (*http.Response, error) {
|
||||
return c.doSignedRequestWithClient(method, path, body, c.httpClient, queryParams...)
|
||||
}
|
||||
|
||||
// doSignedRequestLong performs an authenticated HTTP request using the long-running client (no timeout)
|
||||
// Use this for context downloads, file uploads/downloads, and other operations that may take a long time
|
||||
func (c *Client) doSignedRequestLong(method, path string, body []byte, queryParams ...string) (*http.Response, error) {
|
||||
return c.doSignedRequestWithClient(method, path, body, c.longRunningClient, queryParams...)
|
||||
}
|
||||
|
||||
// doSignedRequestWithClient performs an authenticated HTTP request using the specified client
|
||||
func (c *Client) doSignedRequestWithClient(method, path string, body []byte, client *http.Client, queryParams ...string) (*http.Response, error) {
|
||||
if c.runnerSecret == "" {
|
||||
return nil, fmt.Errorf("runner not authenticated")
|
||||
}
|
||||
@@ -483,7 +497,7 @@ func (c *Client) doSignedRequest(method, path string, body []byte, queryParams .
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("X-Runner-Secret", c.runnerSecret)
|
||||
|
||||
return c.httpClient.Do(req)
|
||||
return client.Do(req)
|
||||
}
|
||||
|
||||
// ConnectWebSocket establishes a WebSocket connection to the manager
|
||||
@@ -969,16 +983,16 @@ func (c *Client) processTask(task map[string]interface{}, jobName string, output
|
||||
// Clean up expired cache entries periodically
|
||||
c.cleanupExpiredContextCache()
|
||||
|
||||
// Download context tar.gz
|
||||
contextPath := filepath.Join(workDir, "context.tar.gz")
|
||||
// Download context tar
|
||||
contextPath := filepath.Join(workDir, "context.tar")
|
||||
if err := c.downloadJobContext(jobID, contextPath); err != nil {
|
||||
c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error())
|
||||
return fmt.Errorf("failed to download context: %w", err)
|
||||
}
|
||||
|
||||
// Extract context tar.gz
|
||||
// Extract context tar
|
||||
c.sendLog(taskID, types.LogLevelInfo, "Extracting context...", "download")
|
||||
if err := c.extractTarGz(contextPath, workDir); err != nil {
|
||||
if err := c.extractTar(contextPath, workDir); err != nil {
|
||||
c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error())
|
||||
return fmt.Errorf("failed to extract context: %w", err)
|
||||
}
|
||||
@@ -1077,662 +1091,24 @@ func (c *Client) processTask(task map[string]interface{}, jobName string, output
|
||||
// This script will override the blend file's settings based on job metadata
|
||||
formatFilePath := filepath.Join(workDir, "output_format.txt")
|
||||
renderSettingsFilePath := filepath.Join(workDir, "render_settings.json")
|
||||
scriptContent := fmt.Sprintf(`import bpy
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
|
||||
# Make all file paths relative to the blend file location FIRST
|
||||
# This must be done immediately after file load, before any other operations
|
||||
# to prevent Blender from trying to access external files with absolute paths
|
||||
try:
|
||||
bpy.ops.file.make_paths_relative()
|
||||
print("Made all file paths relative to blend file")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not make paths relative: {e}")
|
||||
|
||||
# Check for missing addons that the blend file requires
|
||||
# Blender marks missing addons with "_missing" suffix in preferences
|
||||
missing = []
|
||||
try:
|
||||
for mod in bpy.context.preferences.addons:
|
||||
if mod.module.endswith("_missing"):
|
||||
missing.append(mod.module.rsplit("_", 1)[0])
|
||||
|
||||
if missing:
|
||||
print("Missing add-ons required by this .blend:")
|
||||
for name in missing:
|
||||
print(" -", name)
|
||||
else:
|
||||
print("No missing add-ons detected – file is headless-safe")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not check for missing addons: {e}")
|
||||
|
||||
# Fix objects and collections hidden from render
|
||||
vl = bpy.context.view_layer
|
||||
|
||||
# 1. Objects hidden in view layer
|
||||
print("Checking for objects hidden from render that need to be enabled...")
|
||||
try:
|
||||
for obj in bpy.data.objects:
|
||||
if obj.hide_get(view_layer=vl):
|
||||
if any(k in obj.name.lower() for k in ["scrotum|","cage","genital","penis","dick","collision","body.001","couch"]):
|
||||
obj.hide_set(False, view_layer=vl)
|
||||
print("Enabled object:", obj.name)
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not check/fix hidden render objects: {e}")
|
||||
|
||||
# 2. Collections disabled in renders OR set to Holdout (the final killer)
|
||||
print("Checking for collections hidden from render that need to be enabled...")
|
||||
try:
|
||||
for col in bpy.data.collections:
|
||||
if col.hide_render or (vl.layer_collection.children.get(col.name) and not vl.layer_collection.children[col.name].exclude == False):
|
||||
if any(k in col.name.lower() for k in ["genital","nsfw","dick","private","hidden","cage","scrotum","collision","dick"]):
|
||||
col.hide_render = False
|
||||
if col.name in vl.layer_collection.children:
|
||||
vl.layer_collection.children[col.name].exclude = False
|
||||
vl.layer_collection.children[col.name].holdout = False
|
||||
vl.layer_collection.children[col.name].indirect_only = False
|
||||
print("Enabled collection:", col.name)
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not check/fix hidden render collections: {e}")
|
||||
|
||||
# Read output format from file (created by Go code)
|
||||
format_file_path = %q
|
||||
output_format_override = None
|
||||
if os.path.exists(format_file_path):
|
||||
try:
|
||||
with open(format_file_path, 'r') as f:
|
||||
output_format_override = f.read().strip().upper()
|
||||
print(f"Read output format from file: '{output_format_override}'")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not read output format file: {e}")
|
||||
else:
|
||||
print(f"Warning: Output format file does not exist: {format_file_path}")
|
||||
|
||||
# Read render settings from JSON file (created by Go code)
|
||||
render_settings_file = %q
|
||||
render_settings_override = None
|
||||
if os.path.exists(render_settings_file):
|
||||
try:
|
||||
with open(render_settings_file, 'r') as f:
|
||||
render_settings_override = json.load(f)
|
||||
print(f"Loaded render settings from job metadata")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not read render settings file: {e}")
|
||||
`, formatFilePath, renderSettingsFilePath) + `
|
||||
|
||||
# Get current scene settings (preserve blend file preferences)
|
||||
scene = bpy.context.scene
|
||||
current_engine = scene.render.engine
|
||||
current_device = scene.cycles.device if hasattr(scene, 'cycles') and scene.cycles else None
|
||||
current_output_format = scene.render.image_settings.file_format
|
||||
|
||||
print(f"Blend file render engine: {current_engine}")
|
||||
if current_device:
|
||||
print(f"Blend file device setting: {current_device}")
|
||||
print(f"Blend file output format: {current_output_format}")
|
||||
|
||||
# Override output format if specified
|
||||
# The format file always takes precedence (it's written specifically for this job)
|
||||
if output_format_override:
|
||||
print(f"Overriding output format from '{current_output_format}' to '{output_format_override}'")
|
||||
# Map common format names to Blender's format constants
|
||||
# For video formats (EXR_264_MP4, EXR_AV1_MP4), we render as EXR frames first
|
||||
format_to_use = output_format_override.upper()
|
||||
if format_to_use in ['EXR_264_MP4', 'EXR_AV1_MP4']:
|
||||
format_to_use = 'EXR' # Render as EXR for video formats
|
||||
|
||||
format_map = {
|
||||
'PNG': 'PNG',
|
||||
'JPEG': 'JPEG',
|
||||
'JPG': 'JPEG',
|
||||
'EXR': 'OPEN_EXR',
|
||||
'OPEN_EXR': 'OPEN_EXR',
|
||||
'TARGA': 'TARGA',
|
||||
'TIFF': 'TIFF',
|
||||
'BMP': 'BMP',
|
||||
}
|
||||
blender_format = format_map.get(format_to_use, format_to_use)
|
||||
try:
|
||||
scene.render.image_settings.file_format = blender_format
|
||||
print(f"Successfully set output format to: {blender_format}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not set output format to {blender_format}: {e}")
|
||||
print(f"Using blend file's format: {current_output_format}")
|
||||
else:
|
||||
print(f"Using blend file's output format: {current_output_format}")
|
||||
|
||||
# Apply render settings from job metadata if provided
|
||||
# Note: output_format is NOT applied from render_settings_override - it's already set from format file above
|
||||
if render_settings_override:
|
||||
engine_override = render_settings_override.get('engine', '').upper()
|
||||
engine_settings = render_settings_override.get('engine_settings', {})
|
||||
|
||||
# Switch engine if specified
|
||||
if engine_override and engine_override != current_engine.upper():
|
||||
print(f"Switching render engine from '{current_engine}' to '{engine_override}'")
|
||||
try:
|
||||
scene.render.engine = engine_override
|
||||
current_engine = engine_override
|
||||
print(f"Successfully switched to {engine_override} engine")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not switch engine to {engine_override}: {e}")
|
||||
print(f"Using blend file's engine: {current_engine}")
|
||||
|
||||
# Apply engine-specific settings
|
||||
if engine_settings:
|
||||
if current_engine.upper() == 'CYCLES':
|
||||
cycles = scene.cycles
|
||||
print("Applying Cycles render settings from job metadata...")
|
||||
for key, value in engine_settings.items():
|
||||
try:
|
||||
if hasattr(cycles, key):
|
||||
setattr(cycles, key, value)
|
||||
print(f" Set Cycles.{key} = {value}")
|
||||
else:
|
||||
print(f" Warning: Cycles has no attribute '{key}'")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not set Cycles.{key} = {value}: {e}")
|
||||
elif current_engine.upper() in ['EEVEE', 'EEVEE_NEXT']:
|
||||
eevee = scene.eevee
|
||||
print("Applying EEVEE render settings from job metadata...")
|
||||
for key, value in engine_settings.items():
|
||||
try:
|
||||
if hasattr(eevee, key):
|
||||
setattr(eevee, key, value)
|
||||
print(f" Set EEVEE.{key} = {value}")
|
||||
else:
|
||||
print(f" Warning: EEVEE has no attribute '{key}'")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not set EEVEE.{key} = {value}: {e}")
|
||||
|
||||
# Apply resolution if specified
|
||||
if 'resolution_x' in render_settings_override:
|
||||
try:
|
||||
scene.render.resolution_x = render_settings_override['resolution_x']
|
||||
print(f"Set resolution_x = {render_settings_override['resolution_x']}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not set resolution_x: {e}")
|
||||
if 'resolution_y' in render_settings_override:
|
||||
try:
|
||||
scene.render.resolution_y = render_settings_override['resolution_y']
|
||||
print(f"Set resolution_y = {render_settings_override['resolution_y']}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not set resolution_y: {e}")
|
||||
|
||||
# Only override device selection if using Cycles (other engines handle GPU differently)
|
||||
if current_engine == 'CYCLES':
|
||||
# Check if CPU rendering is forced
|
||||
force_cpu = False
|
||||
if render_settings_override and render_settings_override.get('force_cpu'):
|
||||
force_cpu = render_settings_override.get('force_cpu', False)
|
||||
print("Force CPU rendering is enabled - skipping GPU detection")
|
||||
|
||||
# Ensure Cycles addon is enabled
|
||||
try:
|
||||
if 'cycles' not in bpy.context.preferences.addons:
|
||||
bpy.ops.preferences.addon_enable(module='cycles')
|
||||
print("Enabled Cycles addon")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not enable Cycles addon: {e}")
|
||||
|
||||
# If CPU is forced, skip GPU detection and set CPU directly
|
||||
if force_cpu:
|
||||
scene.cycles.device = 'CPU'
|
||||
print("Forced CPU rendering (skipping GPU detection)")
|
||||
else:
|
||||
# Access Cycles preferences
|
||||
prefs = bpy.context.preferences
|
||||
try:
|
||||
cycles_prefs = prefs.addons['cycles'].preferences
|
||||
except (KeyError, AttributeError):
|
||||
try:
|
||||
cycles_addon = prefs.addons.get('cycles')
|
||||
if cycles_addon:
|
||||
cycles_prefs = cycles_addon.preferences
|
||||
else:
|
||||
raise Exception("Cycles addon not found")
|
||||
except Exception as e:
|
||||
print(f"ERROR: Could not access Cycles preferences: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
# Check all devices and choose the best GPU type
|
||||
# Device type preference order (most performant first)
|
||||
device_type_preference = ['OPTIX', 'CUDA', 'HIP', 'ONEAPI', 'METAL']
|
||||
gpu_available = False
|
||||
best_device_type = None
|
||||
best_gpu_devices = []
|
||||
devices_by_type = {} # {device_type: [devices]}
|
||||
seen_device_ids = set() # Track device IDs to avoid duplicates
|
||||
|
||||
print("Checking for GPU availability...")
|
||||
|
||||
# Try to get all devices - try each device type to see what's available
|
||||
for device_type in device_type_preference:
|
||||
try:
|
||||
cycles_prefs.compute_device_type = device_type
|
||||
cycles_prefs.refresh_devices()
|
||||
|
||||
# Get devices for this type
|
||||
devices = None
|
||||
if hasattr(cycles_prefs, 'devices'):
|
||||
try:
|
||||
devices_prop = cycles_prefs.devices
|
||||
if devices_prop:
|
||||
devices = list(devices_prop) if hasattr(devices_prop, '__iter__') else [devices_prop]
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if not devices or len(devices) == 0:
|
||||
try:
|
||||
devices = cycles_prefs.get_devices()
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if devices and len(devices) > 0:
|
||||
# Categorize devices by their type attribute, avoiding duplicates
|
||||
for device in devices:
|
||||
if hasattr(device, 'type'):
|
||||
device_type_str = str(device.type).upper()
|
||||
device_id = getattr(device, 'id', None)
|
||||
|
||||
# Use device ID to avoid duplicates (same device appears when checking different compute_device_types)
|
||||
if device_id and device_id in seen_device_ids:
|
||||
continue
|
||||
|
||||
if device_id:
|
||||
seen_device_ids.add(device_id)
|
||||
|
||||
if device_type_str not in devices_by_type:
|
||||
devices_by_type[device_type_str] = []
|
||||
devices_by_type[device_type_str].append(device)
|
||||
except (ValueError, AttributeError, KeyError, TypeError):
|
||||
# Device type not supported, continue
|
||||
continue
|
||||
except Exception as e:
|
||||
# Other errors - log but continue
|
||||
print(f" Error checking {device_type}: {e}")
|
||||
continue
|
||||
|
||||
# Print what we found
|
||||
print(f"Found devices by type: {list(devices_by_type.keys())}")
|
||||
for dev_type, dev_list in devices_by_type.items():
|
||||
print(f" {dev_type}: {len(dev_list)} device(s)")
|
||||
for device in dev_list:
|
||||
device_name = getattr(device, 'name', 'Unknown')
|
||||
print(f" - {device_name}")
|
||||
|
||||
# Choose the best GPU type based on preference
|
||||
for preferred_type in device_type_preference:
|
||||
if preferred_type in devices_by_type:
|
||||
gpu_devices = [d for d in devices_by_type[preferred_type] if preferred_type in ['CUDA', 'OPENCL', 'OPTIX', 'HIP', 'METAL', 'ONEAPI']]
|
||||
if gpu_devices:
|
||||
best_device_type = preferred_type
|
||||
best_gpu_devices = [(d, preferred_type) for d in gpu_devices]
|
||||
print(f"Selected {preferred_type} as best GPU type with {len(gpu_devices)} device(s)")
|
||||
break
|
||||
|
||||
# Second pass: Enable the best GPU we found
|
||||
if best_device_type and best_gpu_devices:
|
||||
print(f"\nEnabling GPU devices for {best_device_type}...")
|
||||
try:
|
||||
# Set the device type again
|
||||
cycles_prefs.compute_device_type = best_device_type
|
||||
cycles_prefs.refresh_devices()
|
||||
|
||||
# First, disable all CPU devices to ensure only GPU is used
|
||||
print(f" Disabling CPU devices...")
|
||||
all_devices = cycles_prefs.devices if hasattr(cycles_prefs, 'devices') else cycles_prefs.get_devices()
|
||||
if all_devices:
|
||||
for device in all_devices:
|
||||
if hasattr(device, 'type') and str(device.type).upper() == 'CPU':
|
||||
try:
|
||||
device.use = False
|
||||
device_name = getattr(device, 'name', 'Unknown')
|
||||
print(f" Disabled CPU: {device_name}")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not disable CPU device {getattr(device, 'name', 'Unknown')}: {e}")
|
||||
|
||||
# Enable all GPU devices
|
||||
enabled_count = 0
|
||||
for device, device_type in best_gpu_devices:
|
||||
try:
|
||||
device.use = True
|
||||
enabled_count += 1
|
||||
device_name = getattr(device, 'name', 'Unknown')
|
||||
print(f" Enabled: {device_name}")
|
||||
except Exception as e:
|
||||
print(f" Warning: Could not enable device {getattr(device, 'name', 'Unknown')}: {e}")
|
||||
|
||||
# Enable ray tracing acceleration for supported device types
|
||||
try:
|
||||
if best_device_type == 'HIP':
|
||||
# HIPRT (HIP Ray Tracing) for AMD GPUs
|
||||
if hasattr(cycles_prefs, 'use_hiprt'):
|
||||
cycles_prefs.use_hiprt = True
|
||||
print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering")
|
||||
elif hasattr(scene.cycles, 'use_hiprt'):
|
||||
scene.cycles.use_hiprt = True
|
||||
print(f" Enabled HIPRT (HIP Ray Tracing) for faster rendering")
|
||||
else:
|
||||
print(f" HIPRT not available (requires Blender 4.0+)")
|
||||
elif best_device_type == 'OPTIX':
|
||||
# OptiX is already enabled when using OPTIX device type
|
||||
# But we can check if there are any OptiX-specific settings
|
||||
if hasattr(scene.cycles, 'use_optix_denoising'):
|
||||
scene.cycles.use_optix_denoising = True
|
||||
print(f" Enabled OptiX denoising")
|
||||
print(f" OptiX ray tracing is active (using OPTIX device type)")
|
||||
elif best_device_type == 'CUDA':
|
||||
# CUDA can use OptiX if available, but it's usually automatic
|
||||
# Check if we can prefer OptiX over CUDA
|
||||
if hasattr(scene.cycles, 'use_optix_denoising'):
|
||||
scene.cycles.use_optix_denoising = True
|
||||
print(f" Enabled OptiX denoising (if OptiX available)")
|
||||
print(f" CUDA ray tracing active")
|
||||
elif best_device_type == 'METAL':
|
||||
# MetalRT for Apple Silicon (if available)
|
||||
if hasattr(scene.cycles, 'use_metalrt'):
|
||||
scene.cycles.use_metalrt = True
|
||||
print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering")
|
||||
elif hasattr(cycles_prefs, 'use_metalrt'):
|
||||
cycles_prefs.use_metalrt = True
|
||||
print(f" Enabled MetalRT (Metal Ray Tracing) for faster rendering")
|
||||
else:
|
||||
print(f" MetalRT not available")
|
||||
elif best_device_type == 'ONEAPI':
|
||||
# Intel oneAPI - Embree might be available
|
||||
if hasattr(scene.cycles, 'use_embree'):
|
||||
scene.cycles.use_embree = True
|
||||
print(f" Enabled Embree for faster CPU ray tracing")
|
||||
print(f" oneAPI ray tracing active")
|
||||
except Exception as e:
|
||||
print(f" Could not enable ray tracing acceleration: {e}")
|
||||
|
||||
print(f"SUCCESS: Enabled {enabled_count} GPU device(s) for {best_device_type}")
|
||||
gpu_available = True
|
||||
except Exception as e:
|
||||
print(f"ERROR: Failed to enable GPU devices: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
# Set device based on availability (prefer GPU, fallback to CPU)
|
||||
if gpu_available:
|
||||
scene.cycles.device = 'GPU'
|
||||
print(f"Using GPU for rendering (blend file had: {current_device})")
|
||||
else:
|
||||
scene.cycles.device = 'CPU'
|
||||
print(f"GPU not available, using CPU for rendering (blend file had: {current_device})")
|
||||
|
||||
# Verify device setting
|
||||
if current_engine == 'CYCLES':
|
||||
final_device = scene.cycles.device
|
||||
print(f"Final Cycles device: {final_device}")
|
||||
else:
|
||||
# For other engines (EEVEE, etc.), respect blend file settings
|
||||
print(f"Using {current_engine} engine - respecting blend file settings")
|
||||
|
||||
# Enable GPU acceleration for EEVEE viewport rendering (if using EEVEE)
|
||||
if current_engine == 'EEVEE' or current_engine == 'EEVEE_NEXT':
|
||||
try:
|
||||
if hasattr(bpy.context.preferences.system, 'gpu_backend'):
|
||||
bpy.context.preferences.system.gpu_backend = 'OPENGL'
|
||||
print("Enabled OpenGL GPU backend for EEVEE")
|
||||
except Exception as e:
|
||||
print(f"Could not set EEVEE GPU backend: {e}")
|
||||
|
||||
# Enable GPU acceleration for compositing (if compositing is enabled)
|
||||
try:
|
||||
if scene.use_nodes and hasattr(scene, 'node_tree') and scene.node_tree:
|
||||
if hasattr(scene.node_tree, 'use_gpu_compositing'):
|
||||
scene.node_tree.use_gpu_compositing = True
|
||||
print("Enabled GPU compositing")
|
||||
except Exception as e:
|
||||
print(f"Could not enable GPU compositing: {e}")
|
||||
|
||||
# CRITICAL: Initialize headless rendering to prevent black images
|
||||
# This ensures the render engine is properly initialized before rendering
|
||||
print("Initializing headless rendering context...")
|
||||
try:
|
||||
# Ensure world exists and has proper settings
|
||||
if not scene.world:
|
||||
# Create a default world if none exists
|
||||
world = bpy.data.worlds.new("World")
|
||||
scene.world = world
|
||||
print("Created default world")
|
||||
|
||||
# Ensure world has a background shader (not just black)
|
||||
if scene.world:
|
||||
# Enable nodes if not already enabled
|
||||
if not scene.world.use_nodes:
|
||||
scene.world.use_nodes = True
|
||||
print("Enabled world nodes")
|
||||
|
||||
world_nodes = scene.world.node_tree
|
||||
if world_nodes:
|
||||
# Find or create background shader
|
||||
bg_shader = None
|
||||
for node in world_nodes.nodes:
|
||||
if node.type == 'BACKGROUND':
|
||||
bg_shader = node
|
||||
break
|
||||
|
||||
if not bg_shader:
|
||||
bg_shader = world_nodes.nodes.new(type='ShaderNodeBackground')
|
||||
# Connect to output
|
||||
output = world_nodes.nodes.get('World Output')
|
||||
if not output:
|
||||
output = world_nodes.nodes.new(type='ShaderNodeOutputWorld')
|
||||
output.name = 'World Output'
|
||||
if output and bg_shader:
|
||||
# Connect background to surface input
|
||||
if 'Surface' in output.inputs and 'Background' in bg_shader.outputs:
|
||||
world_nodes.links.new(bg_shader.outputs['Background'], output.inputs['Surface'])
|
||||
print("Created background shader for world")
|
||||
|
||||
# Ensure background has some color (not pure black)
|
||||
if bg_shader:
|
||||
# Only set if it's pure black (0,0,0)
|
||||
if hasattr(bg_shader.inputs, 'Color'):
|
||||
color = bg_shader.inputs['Color'].default_value
|
||||
if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0:
|
||||
# Set to a very dark gray instead of pure black
|
||||
bg_shader.inputs['Color'].default_value = (0.01, 0.01, 0.01, 1.0)
|
||||
print("Adjusted world background color to prevent black renders")
|
||||
else:
|
||||
# Fallback: use legacy world color if nodes aren't working
|
||||
if hasattr(scene.world, 'color'):
|
||||
color = scene.world.color
|
||||
if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0:
|
||||
scene.world.color = (0.01, 0.01, 0.01)
|
||||
print("Adjusted legacy world color to prevent black renders")
|
||||
|
||||
# For EEVEE, force viewport update to initialize render engine
|
||||
if current_engine in ['EEVEE', 'EEVEE_NEXT']:
|
||||
# Force EEVEE to update its internal state
|
||||
try:
|
||||
# Update depsgraph to ensure everything is initialized
|
||||
depsgraph = bpy.context.evaluated_depsgraph_get()
|
||||
if depsgraph:
|
||||
# Force update
|
||||
depsgraph.update()
|
||||
print("Forced EEVEE depsgraph update for headless rendering")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not force EEVEE update: {e}")
|
||||
|
||||
# Ensure EEVEE settings are applied
|
||||
try:
|
||||
# Force a material update to ensure shaders are compiled
|
||||
for obj in scene.objects:
|
||||
if obj.type == 'MESH' and obj.data.materials:
|
||||
for mat in obj.data.materials:
|
||||
if mat and mat.use_nodes:
|
||||
# Touch the material to force update
|
||||
mat.use_nodes = mat.use_nodes
|
||||
print("Forced material updates for EEVEE")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not update materials: {e}")
|
||||
|
||||
# For Cycles, ensure proper initialization
|
||||
if current_engine == 'CYCLES':
|
||||
# Ensure samples are set (even if 1 for preview)
|
||||
if not hasattr(scene.cycles, 'samples') or scene.cycles.samples < 1:
|
||||
scene.cycles.samples = 1
|
||||
print("Set minimum Cycles samples")
|
||||
|
||||
# Check for lights in the scene
|
||||
lights = [obj for obj in scene.objects if obj.type == 'LIGHT']
|
||||
print(f"Found {len(lights)} light(s) in scene")
|
||||
if len(lights) == 0:
|
||||
print("WARNING: No lights found in scene - rendering may be black!")
|
||||
print(" Consider adding lights or ensuring world background emits light")
|
||||
|
||||
# Ensure world background emits light (critical for Cycles)
|
||||
if scene.world and scene.world.use_nodes:
|
||||
world_nodes = scene.world.node_tree
|
||||
if world_nodes:
|
||||
bg_shader = None
|
||||
for node in world_nodes.nodes:
|
||||
if node.type == 'BACKGROUND':
|
||||
bg_shader = node
|
||||
break
|
||||
|
||||
if bg_shader:
|
||||
# Check and set strength - Cycles needs this to emit light!
|
||||
if hasattr(bg_shader.inputs, 'Strength'):
|
||||
strength = bg_shader.inputs['Strength'].default_value
|
||||
if strength <= 0.0:
|
||||
bg_shader.inputs['Strength'].default_value = 1.0
|
||||
print("Set world background strength to 1.0 for Cycles lighting")
|
||||
else:
|
||||
print(f"World background strength: {strength}")
|
||||
# Also ensure color is not pure black
|
||||
if hasattr(bg_shader.inputs, 'Color'):
|
||||
color = bg_shader.inputs['Color'].default_value
|
||||
if len(color) >= 3 and color[0] == 0.0 and color[1] == 0.0 and color[2] == 0.0:
|
||||
bg_shader.inputs['Color'].default_value = (1.0, 1.0, 1.0, 1.0)
|
||||
print("Set world background color to white for Cycles lighting")
|
||||
|
||||
# Check film_transparent setting - if enabled, background will be transparent/black
|
||||
if hasattr(scene.cycles, 'film_transparent') and scene.cycles.film_transparent:
|
||||
print("WARNING: film_transparent is enabled - background will be transparent")
|
||||
print(" If you see black renders, try disabling film_transparent")
|
||||
|
||||
# Force Cycles to update/compile materials and shaders
|
||||
try:
|
||||
# Update depsgraph to ensure everything is initialized
|
||||
depsgraph = bpy.context.evaluated_depsgraph_get()
|
||||
if depsgraph:
|
||||
depsgraph.update()
|
||||
print("Forced Cycles depsgraph update")
|
||||
|
||||
# Force material updates to ensure shaders are compiled
|
||||
for obj in scene.objects:
|
||||
if obj.type == 'MESH' and obj.data.materials:
|
||||
for mat in obj.data.materials:
|
||||
if mat and mat.use_nodes:
|
||||
# Force material update
|
||||
mat.use_nodes = mat.use_nodes
|
||||
print("Forced Cycles material updates")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not force Cycles updates: {e}")
|
||||
|
||||
# Verify device is actually set correctly
|
||||
if hasattr(scene.cycles, 'device'):
|
||||
actual_device = scene.cycles.device
|
||||
print(f"Cycles device setting: {actual_device}")
|
||||
if actual_device == 'GPU':
|
||||
# Try to verify GPU is actually available
|
||||
try:
|
||||
prefs = bpy.context.preferences
|
||||
cycles_prefs = prefs.addons['cycles'].preferences
|
||||
devices = cycles_prefs.devices
|
||||
enabled_devices = [d for d in devices if d.use]
|
||||
if len(enabled_devices) == 0:
|
||||
print("WARNING: GPU device set but no GPU devices are enabled!")
|
||||
print(" Falling back to CPU may cause issues")
|
||||
except Exception as e:
|
||||
print(f"Could not verify GPU devices: {e}")
|
||||
|
||||
# Ensure camera exists and is active
|
||||
if scene.camera is None:
|
||||
# Find first camera in scene
|
||||
for obj in scene.objects:
|
||||
if obj.type == 'CAMERA':
|
||||
scene.camera = obj
|
||||
print(f"Set active camera: {obj.name}")
|
||||
break
|
||||
|
||||
# Fix objects and collections hidden from render
|
||||
vl = bpy.context.view_layer
|
||||
|
||||
# 1. Objects hidden in view layer
|
||||
for obj in bpy.data.objects:
|
||||
if obj.hide_get(view_layer=vl):
|
||||
if any(k in obj.name.lower() for k in ["scrotum|","cage","genital","penis","dick","collision","body.001","couch"]):
|
||||
obj.hide_set(False, view_layer=vl)
|
||||
print("Enabled object:", obj.name)
|
||||
|
||||
# 2. Collections disabled in renders OR set to Holdout (the final killer)
|
||||
for col in bpy.data.collections:
|
||||
if col.hide_render or (vl.layer_collection.children.get(col.name) and not vl.layer_collection.children[col.name].exclude == False):
|
||||
if any(k in col.name.lower() for k in ["genital","nsfw","dick","private","hidden","cage","scrotum","collision","dick"]):
|
||||
col.hide_render = False
|
||||
if col.name in vl.layer_collection.children:
|
||||
vl.layer_collection.children[col.name].exclude = False
|
||||
vl.layer_collection.children[col.name].holdout = False
|
||||
vl.layer_collection.children[col.name].indirect_only = False
|
||||
print("Enabled collection:", col.name)
|
||||
|
||||
print("Headless rendering initialization complete")
|
||||
except Exception as e:
|
||||
print(f"Warning: Headless rendering initialization had issues: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
# Final verification before rendering
|
||||
print("\n=== Pre-render verification ===")
|
||||
try:
|
||||
scene = bpy.context.scene
|
||||
print(f"Render engine: {scene.render.engine}")
|
||||
print(f"Active camera: {scene.camera.name if scene.camera else 'None'}")
|
||||
|
||||
if scene.render.engine == 'CYCLES':
|
||||
print(f"Cycles device: {scene.cycles.device}")
|
||||
print(f"Cycles samples: {scene.cycles.samples}")
|
||||
lights = [obj for obj in scene.objects if obj.type == 'LIGHT']
|
||||
print(f"Lights in scene: {len(lights)}")
|
||||
if scene.world:
|
||||
if scene.world.use_nodes:
|
||||
world_nodes = scene.world.node_tree
|
||||
if world_nodes:
|
||||
bg_shader = None
|
||||
for node in world_nodes.nodes:
|
||||
if node.type == 'BACKGROUND':
|
||||
bg_shader = node
|
||||
break
|
||||
if bg_shader:
|
||||
if hasattr(bg_shader.inputs, 'Strength'):
|
||||
strength = bg_shader.inputs['Strength'].default_value
|
||||
print(f"World background strength: {strength}")
|
||||
if hasattr(bg_shader.inputs, 'Color'):
|
||||
color = bg_shader.inputs['Color'].default_value
|
||||
print(f"World background color: ({color[0]:.2f}, {color[1]:.2f}, {color[2]:.2f})")
|
||||
else:
|
||||
print("World exists but nodes are disabled")
|
||||
else:
|
||||
print("WARNING: No world in scene!")
|
||||
|
||||
print("=== Verification complete ===\n")
|
||||
except Exception as e:
|
||||
print(f"Warning: Verification failed: {e}")
|
||||
|
||||
print("Device configuration complete - blend file settings preserved, device optimized")
|
||||
sys.stdout.flush()
|
||||
`
|
||||
|
||||
// Check if unhide_objects is enabled
|
||||
unhideObjects := false
|
||||
if jobMetadata != nil && jobMetadata.UnhideObjects != nil && *jobMetadata.UnhideObjects {
|
||||
unhideObjects = true
|
||||
}
|
||||
|
||||
// Build unhide code conditionally from embedded script
|
||||
unhideCode := ""
|
||||
if unhideObjects {
|
||||
unhideCode = scripts.UnhideObjects
|
||||
}
|
||||
|
||||
// Load template and replace placeholders
|
||||
scriptContent := scripts.RenderBlenderTemplate
|
||||
scriptContent = strings.ReplaceAll(scriptContent, "{{UNHIDE_CODE}}", unhideCode)
|
||||
scriptContent = strings.ReplaceAll(scriptContent, "{{FORMAT_FILE_PATH}}", fmt.Sprintf("%q", formatFilePath))
|
||||
scriptContent = strings.ReplaceAll(scriptContent, "{{RENDER_SETTINGS_FILE}}", fmt.Sprintf("%q", renderSettingsFilePath))
|
||||
scriptPath := filepath.Join(workDir, "enable_gpu.py")
|
||||
if err := os.WriteFile(scriptPath, []byte(scriptContent), 0644); err != nil {
|
||||
errMsg := fmt.Sprintf("failed to create GPU enable script: %v", err)
|
||||
@@ -1765,23 +1141,30 @@ sys.stdout.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Check if execution should be enabled (defaults to false/off)
|
||||
enableExecution := false
|
||||
if jobMetadata != nil && jobMetadata.EnableExecution != nil && *jobMetadata.EnableExecution {
|
||||
enableExecution = true
|
||||
}
|
||||
|
||||
// Run Blender with GPU enabled via Python script
|
||||
// Use -s (start) and -e (end) for frame ranges, or -f for single frame
|
||||
var cmd *exec.Cmd
|
||||
args := []string{"-b", blendFile, "--python", scriptPath}
|
||||
if enableExecution {
|
||||
args = append(args, "--enable-autoexec")
|
||||
}
|
||||
if frameStart == frameEnd {
|
||||
// Single frame
|
||||
cmd = exec.Command("blender", "-b", blendFile,
|
||||
"--python", scriptPath,
|
||||
"-o", absOutputPattern,
|
||||
"-f", fmt.Sprintf("%d", frameStart))
|
||||
args = append(args, "-o", absOutputPattern, "-f", fmt.Sprintf("%d", frameStart))
|
||||
cmd = exec.Command("blender", args...)
|
||||
} else {
|
||||
// Frame range
|
||||
cmd = exec.Command("blender", "-b", blendFile,
|
||||
"--python", scriptPath,
|
||||
"-o", absOutputPattern,
|
||||
args = append(args, "-o", absOutputPattern,
|
||||
"-s", fmt.Sprintf("%d", frameStart),
|
||||
"-e", fmt.Sprintf("%d", frameEnd),
|
||||
"-a") // -a renders animation (all frames in range)
|
||||
cmd = exec.Command("blender", args...)
|
||||
}
|
||||
cmd.Dir = workDir
|
||||
|
||||
@@ -3261,8 +2644,11 @@ func (c *Client) getJobMetadata(jobID int64) (*types.BlendMetadata, error) {
|
||||
|
||||
// downloadFrameFile downloads a frame file for MP4 generation
|
||||
func (c *Client) downloadFrameFile(jobID int64, fileName, destPath string) error {
|
||||
path := fmt.Sprintf("/api/runner/files/%d/%s", jobID, fileName)
|
||||
resp, err := c.doSignedRequest("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID))
|
||||
// URL encode the fileName to handle special characters in filenames
|
||||
encodedFileName := url.PathEscape(fileName)
|
||||
path := fmt.Sprintf("/api/runner/files/%d/%s", jobID, encodedFileName)
|
||||
// Use long-running client for file downloads (no timeout) - EXR files can be large
|
||||
resp, err := c.doSignedRequestLong("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3330,7 +2716,8 @@ func (c *Client) downloadFileToPath(filePath, destPath string) error {
|
||||
downloadPath += "/" + filepath.Base(filePath)
|
||||
}
|
||||
|
||||
resp, err := c.doSignedRequest("GET", downloadPath, nil, fmt.Sprintf("runner_id=%d", c.runnerID))
|
||||
// Use long-running client for file downloads (no timeout)
|
||||
resp, err := c.doSignedRequestLong("GET", downloadPath, nil, fmt.Sprintf("runner_id=%d", c.runnerID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to download file: %w", err)
|
||||
}
|
||||
@@ -3392,7 +2779,8 @@ func (c *Client) uploadFile(jobID int64, filePath string) (string, error) {
|
||||
req.Header.Set("Content-Type", formWriter.FormDataContentType())
|
||||
req.Header.Set("X-Runner-Secret", c.runnerSecret)
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
// Use long-running client for file uploads (no timeout)
|
||||
resp, err := c.longRunningClient.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
@@ -3424,7 +2812,7 @@ func (c *Client) getContextCacheKey(jobID int64) string {
|
||||
func (c *Client) getContextCachePath(cacheKey string) string {
|
||||
cacheDir := filepath.Join(c.getWorkspaceDir(), "cache", "contexts")
|
||||
os.MkdirAll(cacheDir, 0755)
|
||||
return filepath.Join(cacheDir, cacheKey+".tar.gz")
|
||||
return filepath.Join(cacheDir, cacheKey+".tar")
|
||||
}
|
||||
|
||||
// isContextCacheValid checks if a cached context file exists and is not expired (1 hour TTL)
|
||||
@@ -3437,7 +2825,7 @@ func (c *Client) isContextCacheValid(cachePath string) bool {
|
||||
return time.Since(info.ModTime()) < time.Hour
|
||||
}
|
||||
|
||||
// downloadJobContext downloads the job context tar.gz, using cache if available
|
||||
// downloadJobContext downloads the job context tar, using cache if available
|
||||
func (c *Client) downloadJobContext(jobID int64, destPath string) error {
|
||||
cacheKey := c.getContextCacheKey(jobID)
|
||||
cachePath := c.getContextCachePath(cacheKey)
|
||||
@@ -3464,9 +2852,9 @@ func (c *Client) downloadJobContext(jobID int64, destPath string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Download from manager
|
||||
path := fmt.Sprintf("/api/runner/jobs/%d/context.tar.gz", jobID)
|
||||
resp, err := c.doSignedRequest("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID))
|
||||
// Download from manager - use long-running client (no timeout) for large context files
|
||||
path := fmt.Sprintf("/api/runner/jobs/%d/context.tar", jobID)
|
||||
resp, err := c.doSignedRequestLong("GET", path, nil, fmt.Sprintf("runner_id=%d", c.runnerID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to download context: %w", err)
|
||||
}
|
||||
@@ -3517,24 +2905,17 @@ func (c *Client) downloadJobContext(jobID int64, destPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractTarGz extracts a tar.gz file to the destination directory
|
||||
func (c *Client) extractTarGz(tarGzPath, destDir string) error {
|
||||
// Open the tar.gz file
|
||||
file, err := os.Open(tarGzPath)
|
||||
// extractTar extracts a tar file to the destination directory
|
||||
func (c *Client) extractTar(tarPath, destDir string) error {
|
||||
// Open the tar file
|
||||
file, err := os.Open(tarPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open tar.gz file: %w", err)
|
||||
return fmt.Errorf("failed to open tar file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Create gzip reader
|
||||
gzReader, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create gzip reader: %w", err)
|
||||
}
|
||||
defer gzReader.Close()
|
||||
|
||||
// Create tar reader
|
||||
tarReader := tar.NewReader(gzReader)
|
||||
tarReader := tar.NewReader(file)
|
||||
|
||||
// Extract files
|
||||
for {
|
||||
@@ -3635,16 +3016,16 @@ func (c *Client) processMetadataTask(task map[string]interface{}, jobID int64, i
|
||||
c.sendStepUpdate(taskID, "download", types.StepStatusRunning, "")
|
||||
c.sendLog(taskID, types.LogLevelInfo, "Downloading job context...", "download")
|
||||
|
||||
// Download context tar.gz
|
||||
contextPath := filepath.Join(workDir, "context.tar.gz")
|
||||
// Download context tar
|
||||
contextPath := filepath.Join(workDir, "context.tar")
|
||||
if err := c.downloadJobContext(jobID, contextPath); err != nil {
|
||||
c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error())
|
||||
return fmt.Errorf("failed to download context: %w", err)
|
||||
}
|
||||
|
||||
// Extract context tar.gz
|
||||
// Extract context tar
|
||||
c.sendLog(taskID, types.LogLevelInfo, "Extracting context...", "download")
|
||||
if err := c.extractTarGz(contextPath, workDir); err != nil {
|
||||
if err := c.extractTar(contextPath, workDir); err != nil {
|
||||
c.sendStepUpdate(taskID, "download", types.StepStatusFailed, err.Error())
|
||||
return fmt.Errorf("failed to extract context: %w", err)
|
||||
}
|
||||
@@ -3881,6 +3262,7 @@ sys.stdout.flush()
|
||||
}
|
||||
|
||||
// Execute Blender with Python script
|
||||
// Note: disable_execution flag is not applied to metadata extraction for safety
|
||||
cmd := exec.Command("blender", "-b", blendFile, "--python", scriptPath)
|
||||
cmd.Dir = workDir
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ package storage
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -31,6 +31,7 @@ func (s *Storage) init() error {
|
||||
s.basePath,
|
||||
s.uploadsPath(),
|
||||
s.outputsPath(),
|
||||
s.tempPath(),
|
||||
}
|
||||
|
||||
for _, dir := range dirs {
|
||||
@@ -42,6 +43,28 @@ func (s *Storage) init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// tempPath returns the path for temporary files
|
||||
func (s *Storage) tempPath() string {
|
||||
return filepath.Join(s.basePath, "temp")
|
||||
}
|
||||
|
||||
// BasePath returns the storage base path (for cleanup tasks)
|
||||
func (s *Storage) BasePath() string {
|
||||
return s.basePath
|
||||
}
|
||||
|
||||
// TempDir creates a temporary directory under the storage base path
|
||||
// Returns the path to the temporary directory
|
||||
func (s *Storage) TempDir(pattern string) (string, error) {
|
||||
// Ensure temp directory exists
|
||||
if err := os.MkdirAll(s.tempPath(), 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create temp directory: %w", err)
|
||||
}
|
||||
|
||||
// Create temp directory under storage base path
|
||||
return os.MkdirTemp(s.tempPath(), pattern)
|
||||
}
|
||||
|
||||
// uploadsPath returns the path for uploads
|
||||
func (s *Storage) uploadsPath() string {
|
||||
return filepath.Join(s.basePath, "uploads")
|
||||
@@ -142,6 +165,13 @@ func (s *Storage) GetFileSize(filePath string) (int64, error) {
|
||||
// ExtractZip extracts a ZIP file to the destination directory
|
||||
// Returns a list of all extracted file paths
|
||||
func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) {
|
||||
log.Printf("Extracting ZIP archive: %s -> %s", zipPath, destDir)
|
||||
|
||||
// Ensure destination directory exists
|
||||
if err := os.MkdirAll(destDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create destination directory: %w", err)
|
||||
}
|
||||
|
||||
r, err := zip.OpenReader(zipPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open ZIP file: %w", err)
|
||||
@@ -149,12 +179,20 @@ func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) {
|
||||
defer r.Close()
|
||||
|
||||
var extractedFiles []string
|
||||
fileCount := 0
|
||||
dirCount := 0
|
||||
|
||||
log.Printf("ZIP contains %d entries", len(r.File))
|
||||
|
||||
for _, f := range r.File {
|
||||
// Sanitize file path to prevent directory traversal
|
||||
destPath := filepath.Join(destDir, f.Name)
|
||||
if !strings.HasPrefix(destPath, filepath.Clean(destDir)+string(os.PathSeparator)) {
|
||||
return nil, fmt.Errorf("invalid file path in ZIP: %s", f.Name)
|
||||
|
||||
cleanDestPath := filepath.Clean(destPath)
|
||||
cleanDestDir := filepath.Clean(destDir)
|
||||
if !strings.HasPrefix(cleanDestPath, cleanDestDir+string(os.PathSeparator)) && cleanDestPath != cleanDestDir {
|
||||
log.Printf("ERROR: Invalid file path in ZIP - target: %s, destDir: %s", cleanDestPath, cleanDestDir)
|
||||
return nil, fmt.Errorf("invalid file path in ZIP: %s (target: %s, destDir: %s)", f.Name, cleanDestPath, cleanDestDir)
|
||||
}
|
||||
|
||||
// Create directory structure
|
||||
@@ -162,6 +200,7 @@ func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) {
|
||||
if err := os.MkdirAll(destPath, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
dirCount++
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -191,8 +230,10 @@ func (s *Storage) ExtractZip(zipPath, destDir string) ([]string, error) {
|
||||
}
|
||||
|
||||
extractedFiles = append(extractedFiles, destPath)
|
||||
fileCount++
|
||||
}
|
||||
|
||||
log.Printf("ZIP extraction complete: %d files, %d directories extracted to %s", fileCount, dirCount, destDir)
|
||||
return extractedFiles, nil
|
||||
}
|
||||
|
||||
@@ -261,15 +302,15 @@ func isBlenderSaveFile(filename string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// CreateJobContext creates a tar.gz archive containing all job input files
|
||||
// CreateJobContext creates a tar archive containing all job input files
|
||||
// Filters out Blender save files (.blend1, .blend2, etc.)
|
||||
// Uses temporary directories and streaming to handle large files efficiently
|
||||
func (s *Storage) CreateJobContext(jobID int64) (string, error) {
|
||||
jobPath := s.JobPath(jobID)
|
||||
contextPath := filepath.Join(jobPath, "context.tar.gz")
|
||||
contextPath := filepath.Join(jobPath, "context.tar")
|
||||
|
||||
// Create temporary directory for staging
|
||||
tmpDir, err := os.MkdirTemp("", "fuego-context-*")
|
||||
tmpDir, err := os.MkdirTemp("", "jiggablend-context-*")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
@@ -320,17 +361,14 @@ func (s *Storage) CreateJobContext(jobID int64) (string, error) {
|
||||
return "", fmt.Errorf("no files found to include in context")
|
||||
}
|
||||
|
||||
// Create the tar.gz file using streaming
|
||||
// Create the tar file using streaming
|
||||
contextFile, err := os.Create(contextPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create context file: %w", err)
|
||||
}
|
||||
defer contextFile.Close()
|
||||
|
||||
gzWriter := gzip.NewWriter(contextFile)
|
||||
defer gzWriter.Close()
|
||||
|
||||
tarWriter := tar.NewWriter(gzWriter)
|
||||
tarWriter := tar.NewWriter(contextFile)
|
||||
defer tarWriter.Close()
|
||||
|
||||
// Add each file to the tar archive
|
||||
@@ -383,9 +421,6 @@ func (s *Storage) CreateJobContext(jobID int64) (string, error) {
|
||||
if err := tarWriter.Close(); err != nil {
|
||||
return "", fmt.Errorf("failed to close tar writer: %w", err)
|
||||
}
|
||||
if err := gzWriter.Close(); err != nil {
|
||||
return "", fmt.Errorf("failed to close gzip writer: %w", err)
|
||||
}
|
||||
if err := contextFile.Close(); err != nil {
|
||||
return "", fmt.Errorf("failed to close context file: %w", err)
|
||||
}
|
||||
@@ -393,12 +428,12 @@ func (s *Storage) CreateJobContext(jobID int64) (string, error) {
|
||||
return contextPath, nil
|
||||
}
|
||||
|
||||
// CreateJobContextFromDir creates a context archive (tar.gz) from files in a source directory
|
||||
// CreateJobContextFromDir creates a context archive (tar) from files in a source directory
|
||||
// This is used during upload to immediately create the context archive as the primary artifact
|
||||
// excludeFiles is a set of relative paths (from sourceDir) to exclude from the context
|
||||
func (s *Storage) CreateJobContextFromDir(sourceDir string, jobID int64, excludeFiles ...string) (string, error) {
|
||||
jobPath := s.JobPath(jobID)
|
||||
contextPath := filepath.Join(jobPath, "context.tar.gz")
|
||||
contextPath := filepath.Join(jobPath, "context.tar")
|
||||
|
||||
// Ensure job directory exists
|
||||
if err := os.MkdirAll(jobPath, 0755); err != nil {
|
||||
@@ -498,17 +533,14 @@ func (s *Storage) CreateJobContextFromDir(sourceDir string, jobID int64, exclude
|
||||
return "", fmt.Errorf("multiple .blend files found at root level in context archive (found %d, expected 1)", blendFilesAtRoot)
|
||||
}
|
||||
|
||||
// Create the tar.gz file using streaming
|
||||
// Create the tar file using streaming
|
||||
contextFile, err := os.Create(contextPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create context file: %w", err)
|
||||
}
|
||||
defer contextFile.Close()
|
||||
|
||||
gzWriter := gzip.NewWriter(contextFile)
|
||||
defer gzWriter.Close()
|
||||
|
||||
tarWriter := tar.NewWriter(gzWriter)
|
||||
tarWriter := tar.NewWriter(contextFile)
|
||||
defer tarWriter.Close()
|
||||
|
||||
// Add each file to the tar archive
|
||||
@@ -560,9 +592,6 @@ func (s *Storage) CreateJobContextFromDir(sourceDir string, jobID int64, exclude
|
||||
if err := tarWriter.Close(); err != nil {
|
||||
return "", fmt.Errorf("failed to close tar writer: %w", err)
|
||||
}
|
||||
if err := gzWriter.Close(); err != nil {
|
||||
return "", fmt.Errorf("failed to close gzip writer: %w", err)
|
||||
}
|
||||
if err := contextFile.Close(); err != nil {
|
||||
return "", fmt.Errorf("failed to close context file: %w", err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user